From 0e1437eb7eaedc8bac3ac78574a693fac605f0fd Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 11:52:43 -0400 Subject: [PATCH 01/82] Add encoding declaaration --- core/autoProcess/autoProcessComics.py | 2 ++ core/autoProcess/autoProcessGames.py | 2 ++ core/autoProcess/autoProcessMovie.py | 2 ++ core/autoProcess/autoProcessMusic.py | 2 ++ core/autoProcess/autoProcessTV.py | 2 ++ 5 files changed, 10 insertions(+) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 89060b62..7ca93957 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -1,3 +1,5 @@ +# coding=utf-8 + import os import time import core diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 7fa14227..7119a64b 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -1,3 +1,5 @@ +# coding=utf-8 + import core import requests import shutil diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 87dc2604..bd9b774d 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -1,3 +1,5 @@ +# coding=utf-8 + import os import time import requests diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 98fc390f..a60f3236 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -1,3 +1,5 @@ +# coding=utf-8 + import os import time import requests diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 7fa9abd5..76432fb2 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -1,3 +1,5 @@ +# coding=utf-8 + import copy import os import time From b7c7ec000b4d63879bb819cc37ea996b8ff0f415 Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 11:55:36 -0400 Subject: [PATCH 02/82] Add missing os import --- core/autoProcess/autoProcessGames.py | 1 + 1 file changed, 1 insertion(+) diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 7119a64b..cdf972a8 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -1,5 +1,6 @@ # coding=utf-8 +import os import core import requests import shutil From d15fa76fc979a2b31f404d9667d2d11920bec1f8 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 04:26:09 -0400 Subject: [PATCH 03/82] Remove unused imports, unused variables, and redundant parentheses --- core/autoProcess/autoProcessComics.py | 3 --- core/autoProcess/autoProcessGames.py | 1 - core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 20 ++++++++------------ 5 files changed, 10 insertions(+), 18 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 7ca93957..d7b19089 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -1,13 +1,10 @@ # coding=utf-8 import os -import time import core import requests -import time from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding -from core.nzbToMediaSceneExceptions import process_all_exceptions from core import logger requests.packages.urllib3.disable_warnings() diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index cdf972a8..3c6d705b 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -6,7 +6,6 @@ import requests import shutil from core.nzbToMediaUtil import convert_to_ascii, server_responding -from core.nzbToMediaSceneExceptions import process_all_exceptions from core import logger requests.packages.urllib3.disable_warnings() diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index bd9b774d..df191a97 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -325,7 +325,7 @@ class autoProcessMovie: # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for - while (time.time() < timeout): # only wait 2 (default) minutes, then return. + while time.time() < timeout: # only wait 2 (default) minutes, then return. logger.postprocess("Checking for status change, please stand by ...", section) release = self.get_release(baseURL, imdbid, download_id, release_id) if release: diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index a60f3236..cd9ce649 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -132,7 +132,7 @@ class autoProcessMusic: # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for - while (time.time() < timeout): + while time.time() < timeout: current_status = self.get_status(url, apikey, dirName) if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. logger.postprocess("SUCCESS: This release is now marked as status [%s]" % (current_status),section) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 76432fb2..cdc081ef 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -18,7 +18,6 @@ requests.packages.urllib3.disable_warnings() class autoProcessTV: def command_complete(self, url, params, headers, section): - r = None try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -36,7 +35,6 @@ class autoProcessTV: return None def CDH(self, url2, headers): - r = None try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -150,8 +148,8 @@ class autoProcessTV: inputName, dirName = convert_to_ascii(inputName, dirName) if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. - flatten(dirName) - + flatten(dirName) + # Check video files for corruption status = int(failed) good_files = 0 @@ -161,7 +159,7 @@ class autoProcessTV: if transcoder.isVideoGood(video, status): good_files += 1 import_subs(video) - if num_files > 0: + if num_files > 0: if good_files == num_files and not status == 0: logger.info('Found Valid Videos. Setting status Success') status = 0 @@ -263,7 +261,7 @@ class autoProcessTV: url = "%s%s:%s%s/api/command" % (protocol, host, port, web_root) url2 = "%s%s:%s%s/api/config/downloadClient" % (protocol, host, port, web_root) headers = {"X-Api-Key": apikey} - params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} + # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: logger.debug("remote_path: %s" % (remoteDir(dirName)),section) data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id} @@ -273,11 +271,10 @@ class autoProcessTV: if not download_id: data.pop("downloadClientId") data = json.dumps(data) - + try: if section == "SickBeard": logger.debug("Opening URL: %s with params: %s" % (url, str(fork_params)), section) - r = None s = requests.Session() login = "%s%s:%s%s/login" % (protocol,host,port,web_root) login_params = {'username': username, 'password': password} @@ -285,7 +282,6 @@ class autoProcessTV: r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) elif section == "NzbDrone": logger.debug("Opening URL: %s with data: %s" % (url, str(data)), section) - r = None r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error("Unable to open URL: %s" % (url), section) @@ -299,7 +295,7 @@ class autoProcessTV: Started = False if section == "SickBeard": for line in r.iter_lines(): - if line: + if line: logger.postprocess("%s" % (line), section) if "Moving file from" in line: inputName = os.path.split(line)[1] @@ -329,7 +325,7 @@ class autoProcessTV: while n < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = self.command_complete(url, params, headers, section) - if command_status and command_status in ['completed', 'failed']: + if command_status and command_status in ['completed', 'failed']: break n += 1 if command_status: @@ -345,7 +341,7 @@ class autoProcessTV: #return [1, "%s: Failed to post-process %s" % (section, inputName) ] if self.CDH(url2, headers): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % (section), section) - return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section) ] + return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section) ] else: logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) return [1, "%s: Failed to post-process %s" % (section, inputName) ] From 230cc794d0f7a488d7826a821aa5539bf5203ab7 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:44:44 -0400 Subject: [PATCH 04/82] Fix spelling --- core/autoProcess/autoProcessComics.py | 2 +- core/autoProcess/autoProcessMovie.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index d7b19089..5a6b5039 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -44,7 +44,7 @@ class autoProcessComics: inputName, dirName = convert_to_ascii(inputName, dirName) clean_name, ext = os.path.splitext(inputName) - if len(ext) == 4: # we assume this was a standrard extension. + if len(ext) == 4: # we assume this was a standard extension. inputName = clean_name params = {} diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index df191a97..d5652bb3 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -17,7 +17,7 @@ class autoProcessMovie: results = {} params = {} - # determin cmd and params to send to CouchPotato to get our results + # determine cmd and params to send to CouchPotato to get our results section = 'movies' cmd = "/media.list" if release_id or imdbid: @@ -319,7 +319,7 @@ class autoProcessMovie: logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section) return [0, "%s: No new release found now. %s will keep searching" % (section, section) ] - # Added a releease that was not in the wanted list so confirm rename successful by finding this movie media.list. + # Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list. if not release: download_id = None # we don't want to filter new releases based on this. @@ -354,7 +354,7 @@ class autoProcessMovie: # pause and let CouchPotatoServer catch its breath time.sleep(10 * wait_for) - # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now. + # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resume seeding now. logger.warning( "%s does not appear to have changed status after %s minutes, Please check your logs." % (inputName, wait_for), section) From c7defa37daaf0c0f79006c6eac62c324040036ec Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 11:15:14 -0400 Subject: [PATCH 05/82] Fix faulty logic in conditional --- core/autoProcess/autoProcessComics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 5a6b5039..a6329de2 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -66,7 +66,7 @@ class autoProcessComics: return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] for line in r.iter_lines(): if line: logger.postprocess("%s" % (line), section) - if ("Post Processing SUCCESSFUL!" or "Post Processing SUCCESSFULL!")in line: success = True + if "Post Processing SUCCESSFUL" in line: success = True if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) From 0a1fe843062e2aa8dc1f9af5c52b786cb1f474b3 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:38:54 -0400 Subject: [PATCH 06/82] Convert to new-style class by inheriting from object --- core/autoProcess/autoProcessComics.py | 3 ++- core/autoProcess/autoProcessGames.py | 3 ++- core/autoProcess/autoProcessMovie.py | 7 ++++--- core/autoProcess/autoProcessMusic.py | 5 +++-- core/autoProcess/autoProcessTV.py | 3 ++- 5 files changed, 13 insertions(+), 8 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index a6329de2..528eff25 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -9,7 +9,8 @@ from core import logger requests.packages.urllib3.disable_warnings() -class autoProcessComics: + +class autoProcessComics(object): def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): if int(status) != 0: logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.",section) diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 3c6d705b..1bce13aa 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -10,7 +10,8 @@ from core import logger requests.packages.urllib3.disable_warnings() -class autoProcessGames: + +class autoProcessGames(object): def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): status = int(status) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index d5652bb3..6f62096e 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -12,7 +12,8 @@ from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() -class autoProcessMovie: + +class autoProcessMovie(object): def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None): results = {} params = {} @@ -46,7 +47,7 @@ class autoProcessMovie: if 'error' in result: logger.error(str(result['error'])) else: - logger.error("no media found for id %s" % (params['id'])) + logger.error("no media found for id %s" % (params['id'])) return results # Gather release info and return it back, no need to narrow results @@ -210,7 +211,7 @@ class autoProcessMovie: else: logger.error("Transcoding failed for files in %s" % (dirName), section) return [1, "%s: Failed to post-process - Transcoding failed" % (section) ] - for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): + for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): if not release and not ".cp(tt" in video and imdbid: videoName, videoExt = os.path.splitext(video) video2 = "%s.cp(%s)%s" % (videoName, imdbid, videoExt) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index cd9ce649..ccebe177 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -11,7 +11,8 @@ from core import logger requests.packages.urllib3.disable_warnings() -class autoProcessMusic: + +class autoProcessMusic(object): def get_status(self, url, apikey, dirName): logger.debug("Attempting to get current status for release:%s" % (os.path.basename(dirName))) @@ -121,7 +122,7 @@ class autoProcessMusic: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] elif r.text == "OK": - logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName),section) + logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName),section) else: logger.error("FAILED: Post-Processing has NOT started for %s in folder %s. exiting!" % (inputName, dirName),section) return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index cdc081ef..4237c101 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -16,7 +16,8 @@ from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() -class autoProcessTV: + +class autoProcessTV(object): def command_complete(self, url, params, headers, section): try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) From 38ed3350ac43240b01d0f40785995cf960063d4a Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 11:59:25 -0400 Subject: [PATCH 07/82] PEP8: Fix formatting * Remove redundant backslash between brackets * Fix multiple statements on one line * Fix missing/excess whitespace * Fix comments not starting with a single `#` and a space * Convert tabs to spaces --- core/autoProcess/autoProcessComics.py | 24 ++++++----- core/autoProcess/autoProcessGames.py | 26 ++++++------ core/autoProcess/autoProcessMovie.py | 45 +++++++++++---------- core/autoProcess/autoProcessMusic.py | 39 +++++++++--------- core/autoProcess/autoProcessTV.py | 58 +++++++++++++-------------- 5 files changed, 97 insertions(+), 95 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 528eff25..f2f75f50 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -13,8 +13,8 @@ requests.packages.urllib3.disable_warnings() class autoProcessComics(object): def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): if int(status) != 0: - logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.",section) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] + logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.", section) + return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] host = core.CFG[section][inputCategory]["host"] port = core.CFG[section][inputCategory]["port"] @@ -41,7 +41,7 @@ class autoProcessComics(object): url = "%s%s:%s%s/post_process" % (protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] + return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] inputName, dirName = convert_to_ascii(inputName, dirName) clean_name, ext = os.path.splitext(inputName) @@ -64,18 +64,20 @@ class autoProcessComics(object): r = requests.get(url, auth=(username, password), params=params, stream=True, verify=False, timeout=(30, 300)) except requests.ConnectionError: logger.error("Unable to open URL", section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] for line in r.iter_lines(): - if line: logger.postprocess("%s" % (line), section) - if "Post Processing SUCCESSFUL" in line: success = True + if line: + logger.postprocess("%s" % (line), section) + if "Post Processing SUCCESSFUL" in line: + success = True if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] if success: - logger.postprocess("SUCCESS: This issue has been processed successfully",section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + logger.postprocess("SUCCESS: This issue has been processed successfully", section) + return [0, "%s: Successfully post-processed %s" % (section, inputName)] else: - logger.warning("The issue does not appear to have successfully processed. Please check your Logs",section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] + logger.warning("The issue does not appear to have successfully processed. Please check your Logs", section) + return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 1bce13aa..2056cbbb 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -39,13 +39,13 @@ class autoProcessGames(object): url = "%s%s:%s%s/api" % (protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] + return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] inputName, dirName = convert_to_ascii(inputName, dirName) fields = inputName.split("-") - gamezID = fields[0].replace("[","").replace("]","").replace(" ","") + gamezID = fields[0].replace("[", "").replace("]", "").replace(" ", "") downloadStatus = 'Wanted' if status == 0: @@ -57,33 +57,33 @@ class autoProcessGames(object): params['db_id'] = gamezID params['status'] = downloadStatus - logger.debug("Opening URL: %s" % (url),section) + logger.debug("Opening URL: %s" % (url), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: logger.error("Unable to open URL") - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() - logger.postprocess("%s" % (result),section) + logger.postprocess("%s" % (result), section) if library: - logger.postprocess("moving files to library: %s" % (library),section) + logger.postprocess("moving files to library: %s" % (library), section) try: shutil.move(dirName, os.path.join(library, inputName)) except: logger.error("Unable to move %s to %s" % (dirName, os.path.join(library, inputName)), section) - return [1, "%s: Failed to post-process - Unable to move files" % (section) ] + return [1, "%s: Failed to post-process - Unable to move files" % (section)] else: logger.error("No library specified to move files to. Please edit your configuration.", section) - return [1, "%s: Failed to post-process - No library defined in %s" % (section, section) ] + return [1, "%s: Failed to post-process - No library defined in %s" % (section, section)] if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: - logger.postprocess("SUCCESS: Status for %s has been set to %s in Gamez" % (gamezID, downloadStatus),section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + logger.postprocess("SUCCESS: Status for %s has been set to %s in Gamez" % (gamezID, downloadStatus), section) + return [0, "%s: Successfully post-processed %s" % (section, inputName)] else: - logger.error("FAILED: Status for %s has NOT been updated in Gamez" % (gamezID),section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] + logger.error("FAILED: Status for %s has NOT been updated in Gamez" % (gamezID), section) + return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 6f62096e..42af0787 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -40,7 +40,7 @@ class autoProcessMovie(object): except: logger.error("CouchPotato returned the following non-json data") for line in r.iter_lines(): - logger.error("%s" %(line)) + logger.error("%s" % (line)) return results if not result['success']: @@ -56,7 +56,8 @@ class autoProcessMovie(object): id = result[section]['_id'] results[id] = result[section] return results - except:pass + except: + pass # Gather release info and proceed with trying to narrow results to one release choice @@ -135,7 +136,7 @@ class autoProcessMovie(object): baseURL = "%s%s:%s%s/api/%s" % (protocol, host, port, web_root, apikey) if not server_responding(baseURL): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] + return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] imdbid = find_imdbid(dirName, inputName) release = self.get_release(baseURL, imdbid, download_id) @@ -155,7 +156,7 @@ class autoProcessMovie(object): except: pass - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. + if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] SpecificPath = os.path.join(dirName, str(inputName)) @@ -210,7 +211,7 @@ class autoProcessMovie(object): dirName = newDirName else: logger.error("Transcoding failed for files in %s" % (dirName), section) - return [1, "%s: Failed to post-process - Transcoding failed" % (section) ] + return [1, "%s: Failed to post-process - Transcoding failed" % (section)] for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): if not release and not ".cp(tt" in video and imdbid: videoName, videoExt = os.path.splitext(video) @@ -244,20 +245,20 @@ class autoProcessMovie(object): r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error("Unable to open URL", section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: logger.postprocess("SUCCESS: Finished %s scan for folder %s" % (method, dirName), section) if method == "manage": - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] else: logger.error("FAILED: %s scan was unable to finish for folder %s. exiting!" % (method, dirName), section) - return [1, "%s: Failed to post-process - Server did not return success" % (section) ] + return [1, "%s: Failed to post-process - Server did not return success" % (section)] else: core.FAILED = True @@ -272,7 +273,7 @@ class autoProcessMovie(object): if not release_id and not media_id: logger.error("Could not find a downloaded movie in the database matching %s, exiting!" % inputName, section) - return [1, "%s: Failed to post-process - Failed download not found in %s" % (section, section) ] + return [1, "%s: Failed to post-process - Failed download not found in %s" % (section, section)] if release_id: logger.postprocess("Setting failed release %s to ignored ..." % (inputName), section) @@ -286,17 +287,17 @@ class autoProcessMovie(object): r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: logger.error("Unable to open URL %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: logger.postprocess("SUCCESS: %s has been set to ignored ..." % (inputName), section) else: logger.warning("FAILED: Unable to set %s to ignored!" % (inputName), section) - return [1, "%s: Failed to post-process - Unable to set %s to ignored" % (section, inputName) ] + return [1, "%s: Failed to post-process - Unable to set %s to ignored" % (section, inputName)] logger.postprocess("Trying to snatch the next highest ranked release.", section) @@ -307,18 +308,18 @@ class autoProcessMovie(object): r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600)) except requests.ConnectionError: logger.error("Unable to open URL %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: logger.postprocess("SUCCESS: Snatched the next highest release ...", section) - return [0, "%s: Successfully snatched next highest release" % (section) ] + return [0, "%s: Successfully snatched next highest release" % (section)] else: logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section) - return [0, "%s: No new release found now. %s will keep searching" % (section, section) ] + return [0, "%s: No new release found now. %s will keep searching" % (section, section)] # Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list. if not release: @@ -333,24 +334,24 @@ class autoProcessMovie(object): try: if release_id is None and release_status_old is None: # we didn't have a release before, but now we do. logger.postprocess("SUCCESS: Movie %s has now been added to CouchPotato" % (imdbid), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] release_status_new = release[release_id]['status'] if release_status_new != release_status_old: logger.postprocess("SUCCESS: Release %s has now been marked with a status of [%s]" % ( inputName, str(release_status_new).upper()), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] except: pass if not os.path.isdir(dirName): logger.postprocess("SUCCESS: Input Directory [%s] has been processed and removed" % ( dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True): logger.postprocess("SUCCESS: Input Directory [%s] has no remaining media files. This has been fully processed." % ( dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] # pause and let CouchPotatoServer catch its breath time.sleep(10 * wait_for) @@ -359,4 +360,4 @@ class autoProcessMovie(object): logger.warning( "%s does not appear to have changed status after %s minutes, Please check your logs." % (inputName, wait_for), section) - return [1, "%s: Failed to post-process - No change in status" % (section) ] + return [1, "%s: Failed to post-process - No change in status" % (section)] diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index ccebe177..2936f9d4 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -32,7 +32,7 @@ class autoProcessMusic(object): result = r.json() for album in result: if os.path.basename(dirName) == album['FolderName']: - return album["Status"].lower() + return album["Status"].lower() except: return None @@ -66,13 +66,12 @@ class autoProcessMusic(object): else: protocol = "http://" - - url = "%s%s:%s%s/api" % (protocol,host,port,web_root) + url = "%s%s:%s%s/api" % (protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] + return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. + if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] SpecificPath = os.path.join(dirName, str(inputName)) @@ -106,43 +105,43 @@ class autoProcessMusic(object): release_status = self.get_status(url, apikey, dirName) if not release_status: - logger.error("Could not find a status for %s, is it in the wanted list ?" % (inputName),section) + logger.error("Could not find a status for %s, is it in the wanted list ?" % (inputName), section) logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % (url) ,section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + logger.error("Unable to open URL %s" % (url), section) + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] - logger.debug("Result: %s" % (r.text),section) + logger.debug("Result: %s" % (r.text), section) if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif r.text == "OK": - logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName),section) + logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName), section) else: - logger.error("FAILED: Post-Processing has NOT started for %s in folder %s. exiting!" % (inputName, dirName),section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] + logger.error("FAILED: Post-Processing has NOT started for %s in folder %s. exiting!" % (inputName, dirName), section) + return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] else: logger.warning("FAILED DOWNLOAD DETECTED", section) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] + return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: current_status = self.get_status(url, apikey, dirName) if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. - logger.postprocess("SUCCESS: This release is now marked as status [%s]" % (current_status),section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + logger.postprocess("SUCCESS: This release is now marked as status [%s]" % (current_status), section) + return [0, "%s: Successfully post-processed %s" % (section, inputName)] if not os.path.isdir(dirName): - logger.postprocess("SUCCESS: The input directory %s has been removed Processing must have finished." % (dirName),section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + logger.postprocess("SUCCESS: The input directory %s has been removed Processing must have finished." % (dirName), section) + return [0, "%s: Successfully post-processed %s" % (section, inputName)] time.sleep(10 * wait_for) # The status hasn't changed. uTorrent can resume seeding now. - logger.warning("The music album does not appear to have changed status after %s minutes. Please check your Logs" % (wait_for),section) - return [1, "%s: Failed to post-process - No change in wanted status" % (section) ] + logger.warning("The music album does not appear to have changed status after %s minutes. Please check your Logs" % (wait_for), section) + return [1, "%s: Failed to post-process - No change in wanted status" % (section)] diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 4237c101..ac3005a6 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -51,7 +51,7 @@ class autoProcessTV(object): except: return False - def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent = "manual", download_id=None, inputCategory=None, failureLink=None): + def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): host = core.CFG[section][inputCategory]["host"] port = core.CFG[section][inputCategory]["port"] try: @@ -66,9 +66,9 @@ class autoProcessTV(object): web_root = core.CFG[section][inputCategory]["web_root"] except: web_root = "" - if not server_responding("%s%s:%s%s" % (protocol,host,port,web_root)): + if not server_responding("%s%s:%s%s" % (protocol, host, port, web_root)): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] + return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] # auto-detect correct fork fork, fork_params = autoFork(section, inputCategory) @@ -116,7 +116,7 @@ class autoProcessTV(object): except: extract = 0 - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. + if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] SpecificPath = os.path.join(dirName, str(inputName)) @@ -136,7 +136,7 @@ class autoProcessTV(object): if e.errno != errno.EEXIST: raise - if not 'process_method' in fork_params or (clientAgent in ['nzbget','sabnzbd'] and nzbExtractionBy != "Destination"): + if not 'process_method' in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"): if inputName: process_all_exceptions(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName) @@ -175,7 +175,7 @@ class autoProcessTV(object): failureLink = failureLink + '&corrupt=true' elif clientAgent == "manual": logger.warning("No media files found in directory %s to manually process." % (dirName), section) - return [0, ""] # Success (as far as this script is concerned) + return [0, ""] # Success (as far as this script is concerned) elif nzbExtractionBy == "Destination": logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.") if int(failed) == 0: @@ -183,9 +183,9 @@ class autoProcessTV(object): status = 0 failed = 0 else: - logger.info("Downloader reported an error during download or verification. Processing this as a failed download.") - status = 1 - failed = 1 + logger.info("Downloader reported an error during download or verification. Processing this as a failed download.") + status = 1 + failed = 1 else: logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) status = 1 @@ -193,14 +193,14 @@ class autoProcessTV(object): if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') - if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads + if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads result, newDirName = transcoder.Transcode_directory(dirName) if result == 0: logger.debug("SUCCESS: Transcoding succeeded for files in %s" % (dirName), section) dirName = newDirName else: logger.error("FAILED: Transcoding failed for files in %s" % (dirName), section) - return [1, "%s: Failed to post-process - Transcoding failed" % (section) ] + return [1, "%s: Failed to post-process - Transcoding failed" % (section)] # configure SB params to pass fork_params['quiet'] = 1 @@ -235,7 +235,7 @@ class autoProcessTV(object): del fork_params[param] # delete any unused params so we don't pass them to SB by mistake - [fork_params.pop(k) for k,v in fork_params.items() if v is None] + [fork_params.pop(k) for k, v in fork_params.items() if v is None] if status == 0: logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section) @@ -247,27 +247,27 @@ class autoProcessTV(object): logger.postprocess("FAILED: The download failed. Sending 'failed' process request to %s branch" % (fork), section) elif section == "NzbDrone": logger.postprocess("FAILED: The download failed. Sending failed download to %s for CDH processing" % (fork), section) - return [1, "%s: Downlaod Failed. Sending back to %s" % (section, section) ] # Return as failed to flag this in the downloader. + return [1, "%s: Downlaod Failed. Sending back to %s" % (section, section)] # Return as failed to flag this in the downloader. else: logger.postprocess("FAILED: The download failed. %s branch does not handle failed downloads. Nothing to process" % (fork), section) if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: logger.postprocess("Deleting failed files and folder %s" % (dirName), section) rmDir(dirName) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] # Return as failed to flag this in the downloader. + return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] # Return as failed to flag this in the downloader. url = None if section == "SickBeard": - url = "%s%s:%s%s/home/postprocess/processEpisode" % (protocol,host,port,web_root) + url = "%s%s:%s%s/home/postprocess/processEpisode" % (protocol, host, port, web_root) elif section == "NzbDrone": url = "%s%s:%s%s/api/command" % (protocol, host, port, web_root) url2 = "%s%s:%s%s/api/config/downloadClient" % (protocol, host, port, web_root) headers = {"X-Api-Key": apikey} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: - logger.debug("remote_path: %s" % (remoteDir(dirName)),section) + logger.debug("remote_path: %s" % (remoteDir(dirName)), section) data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id} else: - logger.debug("path: %s" % (dirName),section) + logger.debug("path: %s" % (dirName), section) data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id} if not download_id: data.pop("downloadClientId") @@ -277,7 +277,7 @@ class autoProcessTV(object): if section == "SickBeard": logger.debug("Opening URL: %s with params: %s" % (url, str(fork_params)), section) s = requests.Session() - login = "%s%s:%s%s/login" % (protocol,host,port,web_root) + login = "%s%s:%s%s/login" % (protocol, host, port, web_root) login_params = {'username': username, 'password': password} s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) @@ -286,11 +286,11 @@ class autoProcessTV(object): r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error("Unable to open URL: %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] + return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] + return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] Success = False Started = False @@ -314,11 +314,11 @@ class autoProcessTV(object): Started = False if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder %s" % (dirName),section) + logger.postprocess("Deleting failed files and folder %s" % (dirName), section) rmDir(dirName) if Success: - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] elif section == "NzbDrone" and Started: n = 0 params = {} @@ -327,24 +327,24 @@ class autoProcessTV(object): time.sleep(10 * wait_for) command_status = self.command_complete(url, params, headers, section) if command_status and command_status in ['completed', 'failed']: - break + break n += 1 if command_status: logger.debug("The Scan command return status: %s" % (command_status), section) if not os.path.exists(dirName): logger.debug("The directory %s has been removed. Renaming was successful." % (dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] elif command_status and command_status in ['completed']: logger.debug("The Scan command has completed successfully. Renaming was successful.", section) - return [0, "%s: Successfully post-processed %s" % (section, inputName) ] + return [0, "%s: Successfully post-processed %s" % (section, inputName)] elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) - #return [1, "%s: Failed to post-process %s" % (section, inputName) ] + # return [1, "%s: Failed to post-process %s" % (section, inputName) ] if self.CDH(url2, headers): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % (section), section) - return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section) ] + return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section)] else: logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) - return [1, "%s: Failed to post-process %s" % (section, inputName) ] + return [1, "%s: Failed to post-process %s" % (section, inputName)] else: - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] # We did not receive Success confirmation. + return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] # We did not receive Success confirmation. From 061a167b56722ef4b42ea4caaa86c2beb1b97edd Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:25:16 -0400 Subject: [PATCH 08/82] PEP8: Test for membership should be 'not in' --- core/autoProcess/autoProcessComics.py | 2 +- core/autoProcess/autoProcessGames.py | 2 +- core/autoProcess/autoProcessMovie.py | 8 ++++---- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 8 ++++---- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index f2f75f50..10fe6ec9 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -71,7 +71,7 @@ class autoProcessComics(object): if "Post Processing SUCCESSFUL" in line: success = True - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 2056cbbb..a08a8675 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -78,7 +78,7 @@ class autoProcessGames(object): logger.error("No library specified to move files to. Please edit your configuration.", section) return [1, "%s: Failed to post-process - No library defined in %s" % (section, section)] - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 42af0787..773fda93 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -213,7 +213,7 @@ class autoProcessMovie(object): logger.error("Transcoding failed for files in %s" % (dirName), section) return [1, "%s: Failed to post-process - Transcoding failed" % (section)] for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): - if not release and not ".cp(tt" in video and imdbid: + if not release and ".cp(tt" not in video and imdbid: videoName, videoExt = os.path.splitext(video) video2 = "%s.cp(%s)%s" % (videoName, imdbid, videoExt) if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): @@ -248,7 +248,7 @@ class autoProcessMovie(object): return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: @@ -290,7 +290,7 @@ class autoProcessMovie(object): return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: @@ -311,7 +311,7 @@ class autoProcessMovie(object): return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] result = r.json() - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif result['success']: diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 2936f9d4..d5b79134 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -117,7 +117,7 @@ class autoProcessMusic(object): logger.debug("Result: %s" % (r.text), section) - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] elif r.text == "OK": diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index ac3005a6..a701f688 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -24,7 +24,7 @@ class autoProcessTV(object): except requests.ConnectionError: logger.error("Unable to open URL: %s" % (url1), section) return None - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return None else: @@ -41,7 +41,7 @@ class autoProcessTV(object): except requests.ConnectionError: logger.error("Unable to open URL: %s" % (url2), section) return False - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return False else: @@ -136,7 +136,7 @@ class autoProcessTV(object): if e.errno != errno.EEXIST: raise - if not 'process_method' in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"): + if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"): if inputName: process_all_exceptions(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName) @@ -288,7 +288,7 @@ class autoProcessTV(object): logger.error("Unable to open URL: %s" % (url), section) return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] - if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: + if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] From 948ead5408b1fc080f7dbbccc2cd5a405f727578 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:26:59 -0400 Subject: [PATCH 09/82] PEP8: comparison to None should be 'if cond is not None:' --- core/autoProcess/autoProcessComics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 10fe6ec9..9ac9e9f2 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -54,7 +54,7 @@ class autoProcessComics(object): if remote_path: params['nzb_folder'] = remoteDir(dirName) - if inputName != None: + if inputName is not None: params['nzb_name'] = inputName success = False From c5c5279a8b111beb7b720cff2e4a5ad2bb123769 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:33:50 -0400 Subject: [PATCH 10/82] PEP8: .has_key() is deprecated, use 'in' --- core/autoProcess/autoProcessMovie.py | 4 ++-- core/autoProcess/autoProcessTV.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 773fda93..79c01604 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -189,7 +189,7 @@ class autoProcessMovie(object): status = 0 elif num_files > 0 and good_files < num_files: logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) - if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if failureLink: failureLink = failureLink + '&corrupt=true' @@ -200,7 +200,7 @@ class autoProcessMovie(object): else: logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) status = 1 - if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if status == 0: diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index a701f688..ab8a4247 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -169,7 +169,7 @@ class autoProcessTV(object): logger.info('Found corrupt videos. Setting status Failed') status = 1 failed = 1 - if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if failureLink: failureLink = failureLink + '&corrupt=true' @@ -190,7 +190,7 @@ class autoProcessTV(object): logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) status = 1 failed = 1 - if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads From 03fa8bc973303ee0fcf1b286c5d7b38efe312f0e Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:35:25 -0400 Subject: [PATCH 11/82] Replace assignment with augmented assignment --- core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 79c01604..0166db2e 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -192,7 +192,7 @@ class autoProcessMovie(object): if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if failureLink: - failureLink = failureLink + '&corrupt=true' + failureLink += '&corrupt=true' status = 1 elif clientAgent == "manual": logger.warning("No media files found in directory %s to manually process." % (dirName), section) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index ab8a4247..1e7b7fbd 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -172,7 +172,7 @@ class autoProcessTV(object): if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if failureLink: - failureLink = failureLink + '&corrupt=true' + failureLink += '&corrupt=true' elif clientAgent == "manual": logger.warning("No media files found in directory %s to manually process." % (dirName), section) return [0, ""] # Success (as far as this script is concerned) From 4a4087180b64601a5564738395b3baa5c2913ff1 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:40:30 -0400 Subject: [PATCH 12/82] Python 3: Convert 'except exceptClass, Target' to 'except exceptClass as Target' --- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index d5b79134..2255ac60 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -24,7 +24,7 @@ class autoProcessMusic(object): try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) - except Exception, e: + except Exception as e: logger.error("Unable to open URL") return None diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 1e7b7fbd..00d89538 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -131,7 +131,7 @@ class autoProcessTV(object): # won't process the directory because it doesn't exist. try: os.makedirs(dirName) # Attempt to create the directory - except OSError, e: + except OSError as e: # Re-raise the error if it wasn't about the directory not existing if e.errno != errno.EEXIST: raise From 8235134fad52af05ec26ed8a7492c6f9481a4efd Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:44:33 -0400 Subject: [PATCH 13/82] Rewrite dictionary creation as a dictionary literal --- core/autoProcess/autoProcessComics.py | 8 +++----- core/autoProcess/autoProcessGames.py | 11 ++++++----- core/autoProcess/autoProcessMusic.py | 19 +++++++++---------- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 9ac9e9f2..74ab1a78 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -48,11 +48,9 @@ class autoProcessComics(object): if len(ext) == 4: # we assume this was a standard extension. inputName = clean_name - params = {} - params['nzb_folder'] = dirName - - if remote_path: - params['nzb_folder'] = remoteDir(dirName) + params = { + 'nzb_folder': remoteDir(dirName) if remote_path else dirName, + } if inputName is not None: params['nzb_name'] = inputName diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index a08a8675..17c4ccf6 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -51,11 +51,12 @@ class autoProcessGames(object): if status == 0: downloadStatus = 'Downloaded' - params = {} - params['api_key'] = apikey - params['mode'] = 'UPDATEREQUESTEDSTATUS' - params['db_id'] = gamezID - params['status'] = downloadStatus + params = { + 'api_key': apikey, + 'mode': 'UPDATEREQUESTEDSTATUS', + 'db_id': gamezID, + 'status': downloadStatus + } logger.debug("Opening URL: %s" % (url), section) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 2255ac60..501aa0f3 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -16,9 +16,10 @@ class autoProcessMusic(object): def get_status(self, url, apikey, dirName): logger.debug("Attempting to get current status for release:%s" % (os.path.basename(dirName))) - params = {} - params['apikey'] = apikey - params['cmd'] = "getHistory" + params = { + 'apikey': apikey, + 'cmd': "getHistory" + } logger.debug("Opening URL: %s with PARAMS: %s" % (url, params)) @@ -95,13 +96,11 @@ class autoProcessMusic(object): if status == 0: - params = {} - params['apikey'] = apikey - params['cmd'] = "forceProcess" - - params['dir'] = os.path.dirname(dirName) - if remote_path: - params['dir'] = remoteDir(os.path.dirname(dirName)) + params = { + 'apikey': apikey, + 'cmd': "forceProcess", + 'dir': remoteDir(os.path.dirname(dirName)) if remote_path else os.path.dirname(dirName) + } release_status = self.get_status(url, apikey, dirName) if not release_status: From 8880d11e8af761177e3811eacf633425e4225689 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 03:49:34 -0400 Subject: [PATCH 14/82] Too broad exceptions. * Use .get() with default value instead. * Use ValueError to catch JSONDecodeError from simplejson and ValueError from json standard lib * Use request.RequestException instead. --- core/autoProcess/autoProcessComics.py | 15 ++---- core/autoProcess/autoProcessGames.py | 15 ++---- core/autoProcess/autoProcessMovie.py | 24 +++------ core/autoProcess/autoProcessMusic.py | 33 ++++-------- core/autoProcess/autoProcessTV.py | 77 ++++++++------------------- 5 files changed, 44 insertions(+), 120 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 74ab1a78..f7262985 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -20,18 +20,9 @@ class autoProcessComics(object): port = core.CFG[section][inputCategory]["port"] username = core.CFG[section][inputCategory]["username"] password = core.CFG[section][inputCategory]["password"] - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" - try: - remote_path = int(core.CFG[section][inputCategory]["remote_path"]) - except: - remote_path = 0 + ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) + web_root = core.CFG[section][inputCategory].get("web_root", "") + remote_path = int(core.CFG[section][inputCategory].get("remote_path"), 0) if ssl: protocol = "https://" diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 17c4ccf6..fe0bc890 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -18,18 +18,9 @@ class autoProcessGames(object): host = core.CFG[section][inputCategory]["host"] port = core.CFG[section][inputCategory]["port"] apikey = core.CFG[section][inputCategory]["apikey"] - try: - library = core.CFG[section][inputCategory]["library"] - except: - library = None - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" + library = core.CFG[section][inputCategory].get("library") + ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) + web_root = core.CFG[section][inputCategory].get("web_root", "") if ssl: protocol = "https://" diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 0166db2e..93180a71 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -37,7 +37,8 @@ class autoProcessMovie(object): try: result = r.json() - except: + except ValueError: + # ValueError catches simplejson's JSONDecodeError and json's ValueError logger.error("CouchPotato returned the following non-json data") for line in r.iter_lines(): logger.error("%s" % (line)) @@ -110,23 +111,10 @@ class autoProcessMovie(object): method = core.CFG[section][inputCategory]["method"] delete_failed = int(core.CFG[section][inputCategory]["delete_failed"]) wait_for = int(core.CFG[section][inputCategory]["wait_for"]) - - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" - try: - remote_path = int(core.CFG[section][inputCategory]["remote_path"]) - except: - remote_path = 0 - try: - extract = int(section[inputCategory]["extract"]) - except: - extract = 0 + ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) + web_root = core.CFG[section][inputCategory].get("web_root", "") + remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) + extract = int(section[inputCategory].get("extract", 0)) if ssl: protocol = "https://" diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 501aa0f3..65294eeb 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -25,18 +25,20 @@ class autoProcessMusic(object): try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) - except Exception as e: + except requests.RequestException: logger.error("Unable to open URL") return None try: result = r.json() - for album in result: - if os.path.basename(dirName) == album['FolderName']: - return album["Status"].lower() - except: + except ValueError: + # ValueError catches simplejson's JSONDecodeError and json's ValueError return None + for album in result: + if os.path.basename(dirName) == album['FolderName']: + return album["Status"].lower() + def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): status = int(status) @@ -44,23 +46,10 @@ class autoProcessMusic(object): port = core.CFG[section][inputCategory]["port"] apikey = core.CFG[section][inputCategory]["apikey"] wait_for = int(core.CFG[section][inputCategory]["wait_for"]) - - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" - try: - remote_path = int(core.CFG[section][inputCategory]["remote_path"]) - except: - remote_path = 0 - try: - extract = int(section[inputCategory]["extract"]) - except: - extract = 0 + ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) + web_root = core.CFG[section][inputCategory].get("web_root", "") + remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) + extract = int(section[inputCategory].get("extract", 0)) if ssl: protocol = "https://" diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 00d89538..42b3d5ab 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -29,9 +29,9 @@ class autoProcessTV(object): return None else: try: - res = json.loads(r.content) - return res['state'] - except: + return r.json()['state'] + except (ValueError, KeyError): + # ValueError catches simplejson's JSONDecodeError and json's ValueError logger.error("%s did not return expected json data." % section, section) return None @@ -46,26 +46,22 @@ class autoProcessTV(object): return False else: try: - res = json.loads(r.content) - return res["enableCompletedDownloadHandling"] - except: + return r.json().get("enableCompletedDownloadHandling", False) + except ValueError: + # ValueError catches simplejson's JSONDecodeError and json's ValueError return False def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): host = core.CFG[section][inputCategory]["host"] port = core.CFG[section][inputCategory]["port"] - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 + ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) + web_root = core.CFG[section][inputCategory].get("web_root", "") + if ssl: protocol = "https://" else: protocol = "http://" - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" + if not server_responding("%s%s:%s%s" % (protocol, host, port, web_root)): logger.error("Server did not respond. Exiting", section) return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] @@ -73,48 +69,17 @@ class autoProcessTV(object): # auto-detect correct fork fork, fork_params = autoFork(section, inputCategory) - try: - username = core.CFG[section][inputCategory]["username"] - password = core.CFG[section][inputCategory]["password"] - except: - username = "" - password = "" - try: - apikey = core.CFG[section][inputCategory]["apikey"] - except: - apikey = "" - try: - delete_failed = int(core.CFG[section][inputCategory]["delete_failed"]) - except: - delete_failed = 0 - try: - nzbExtractionBy = core.CFG[section][inputCategory]["nzbExtractionBy"] - except: - nzbExtractionBy = "Downloader" - try: - process_method = core.CFG[section][inputCategory]["process_method"] - except: - process_method = None - try: - remote_path = int(core.CFG[section][inputCategory]["remote_path"]) - except: - remote_path = 0 - try: - wait_for = int(core.CFG[section][inputCategory]["wait_for"]) - except: - wait_for = 2 - try: - force = int(core.CFG[section][inputCategory]["force"]) - except: - force = 0 - try: - delete_on = int(core.CFG[section][inputCategory]["delete_on"]) - except: - delete_on = 0 - try: - extract = int(section[inputCategory]["extract"]) - except: - extract = 0 + username = core.CFG[section][inputCategory].get("username", "") + password = core.CFG[section][inputCategory].get("password", "") + apikey = core.CFG[section][inputCategory].get("apikey", "") + delete_failed = int(core.CFG[section][inputCategory].get("delete_failed", 0)) + nzbExtractionBy = core.CFG[section][inputCategory].get("nzbExtractionBy", "Downloader") + process_method = core.CFG[section][inputCategory].get("process_method") + remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) + wait_for = int(core.CFG[section][inputCategory].get("wait_for", 2)) + force = int(core.CFG[section][inputCategory].get("force", 0)) + delete_on = int(core.CFG[section][inputCategory].get("delete_on", 0)) + extract = int(section[inputCategory].get("extract", 0)) if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] From 88c9d742065e6c8ce379a28ac5a91b4f916af42f Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 04:09:14 -0400 Subject: [PATCH 15/82] Fix log message: * Renamed url1 to url * Added `section` argument to CDH * Removed undefined variable `good_files` --- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 65294eeb..be0be9c1 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -80,7 +80,7 @@ class autoProcessMusic(object): inputName, dirName = convert_to_ascii(inputName, dirName) if listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and status: - logger.info("Status shown as failed from Downloader, but %s valid video files found. Setting as successful." % (str(good_files)), section) + logger.info("Status shown as failed from Downloader, but valid video files found. Setting as successful.", section) status = 0 if status == 0: diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 42b3d5ab..e261435e 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -22,7 +22,7 @@ class autoProcessTV(object): try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: - logger.error("Unable to open URL: %s" % (url1), section) + logger.error("Unable to open URL: %s" % url, section) return None if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status %s" % (str(r.status_code)), section) @@ -35,7 +35,7 @@ class autoProcessTV(object): logger.error("%s did not return expected json data." % section, section) return None - def CDH(self, url2, headers): + def CDH(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -305,8 +305,8 @@ class autoProcessTV(object): elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) # return [1, "%s: Failed to post-process %s" % (section, inputName) ] - if self.CDH(url2, headers): - logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % (section), section) + if self.CDH(url2, headers, section=section): + logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % section, section) return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section)] else: logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) From 0c459613820bf3b971a70e5fc457bb7c52908cad Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 31 May 2016 04:42:49 -0400 Subject: [PATCH 16/82] Streamline variable assignment --- core/autoProcess/autoProcessComics.py | 6 +----- core/autoProcess/autoProcessGames.py | 10 ++-------- core/autoProcess/autoProcessMovie.py | 10 ++-------- core/autoProcess/autoProcessMusic.py | 6 +----- core/autoProcess/autoProcessTV.py | 6 +----- 5 files changed, 7 insertions(+), 31 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index f7262985..1a0fb43f 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -23,11 +23,7 @@ class autoProcessComics(object): ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) web_root = core.CFG[section][inputCategory].get("web_root", "") remote_path = int(core.CFG[section][inputCategory].get("remote_path"), 0) - - if ssl: - protocol = "https://" - else: - protocol = "http://" + protocol = "https://" if ssl else "http://" url = "%s%s:%s%s/post_process" % (protocol, host, port, web_root) if not server_responding(url): diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index fe0bc890..336c48b3 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -21,11 +21,7 @@ class autoProcessGames(object): library = core.CFG[section][inputCategory].get("library") ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) web_root = core.CFG[section][inputCategory].get("web_root", "") - - if ssl: - protocol = "https://" - else: - protocol = "http://" + protocol = "https://" if ssl else "http://" url = "%s%s:%s%s/api" % (protocol, host, port, web_root) if not server_responding(url): @@ -38,9 +34,7 @@ class autoProcessGames(object): gamezID = fields[0].replace("[", "").replace("]", "").replace(" ", "") - downloadStatus = 'Wanted' - if status == 0: - downloadStatus = 'Downloaded' + downloadStatus = 'Downloaded' if status == 0 else 'Wanted' params = { 'api_key': apikey, diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 93180a71..3ded9d24 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -115,11 +115,7 @@ class autoProcessMovie(object): web_root = core.CFG[section][inputCategory].get("web_root", "") remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) extract = int(section[inputCategory].get("extract", 0)) - - if ssl: - protocol = "https://" - else: - protocol = "http://" + protocol = "https://" if ssl else "http://" baseURL = "%s%s:%s%s/api/%s" % (protocol, host, port, web_root, apikey) if not server_responding(baseURL): @@ -213,9 +209,7 @@ class autoProcessMovie(object): params['downloader'] = downloader or clientAgent params['download_id'] = download_id - params['media_folder'] = dirName - if remote_path: - params['media_folder'] = remoteDir(dirName) + params['media_folder'] = remoteDir(dirName) if remote_path else dirName if method == "manage": command = "/manage.update" diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index be0be9c1..8ea22a3c 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -50,11 +50,7 @@ class autoProcessMusic(object): web_root = core.CFG[section][inputCategory].get("web_root", "") remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) extract = int(section[inputCategory].get("extract", 0)) - - if ssl: - protocol = "https://" - else: - protocol = "http://" + protocol = "https://" if ssl else "http://" url = "%s%s:%s%s/api" % (protocol, host, port, web_root) if not server_responding(url): diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index e261435e..34d10df2 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -56,11 +56,7 @@ class autoProcessTV(object): port = core.CFG[section][inputCategory]["port"] ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) web_root = core.CFG[section][inputCategory].get("web_root", "") - - if ssl: - protocol = "https://" - else: - protocol = "http://" + protocol = "https://" if ssl else "http://" if not server_responding("%s%s:%s%s" % (protocol, host, port, web_root)): logger.error("Server did not respond. Exiting", section) From 51d2c730543028d85c60c9fd0bad53995f392d9e Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 12:02:40 -0400 Subject: [PATCH 17/82] Use `format()` instead of `%` for string formatting --- core/autoProcess/autoProcessComics.py | 20 ++--- core/autoProcess/autoProcessGames.py | 30 +++---- core/autoProcess/autoProcessMovie.py | 112 +++++++++++++------------- core/autoProcess/autoProcessMusic.py | 44 +++++----- core/autoProcess/autoProcessTV.py | 90 ++++++++++----------- 5 files changed, 148 insertions(+), 148 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 1a0fb43f..b6e5b6ee 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -14,7 +14,7 @@ class autoProcessComics(object): def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): if int(status) != 0: logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.", section) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] + return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] host = core.CFG[section][inputCategory]["host"] port = core.CFG[section][inputCategory]["port"] @@ -25,10 +25,10 @@ class autoProcessComics(object): remote_path = int(core.CFG[section][inputCategory].get("remote_path"), 0) protocol = "https://" if ssl else "http://" - url = "%s%s:%s%s/post_process" % (protocol, host, port, web_root) + url = "{0}{1}:{2}{3}/post_process".format(protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] + return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] inputName, dirName = convert_to_ascii(inputName, dirName) clean_name, ext = os.path.splitext(inputName) @@ -44,25 +44,25 @@ class autoProcessComics(object): success = False - logger.debug("Opening URL: %s" % (url), section) + logger.debug("Opening URL: {0}".format(url), section) try: r = requests.get(url, auth=(username, password), params=params, stream=True, verify=False, timeout=(30, 300)) except requests.ConnectionError: logger.error("Unable to open URL", section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] for line in r.iter_lines(): if line: - logger.postprocess("%s" % (line), section) + logger.postprocess("{0}".format(line), section) if "Post Processing SUCCESSFUL" in line: success = True if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] if success: logger.postprocess("SUCCESS: This issue has been processed successfully", section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] else: logger.warning("The issue does not appear to have successfully processed. Please check your Logs", section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] + return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 336c48b3..ada32b11 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -23,10 +23,10 @@ class autoProcessGames(object): web_root = core.CFG[section][inputCategory].get("web_root", "") protocol = "https://" if ssl else "http://" - url = "%s%s:%s%s/api" % (protocol, host, port, web_root) + url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] + return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] inputName, dirName = convert_to_ascii(inputName, dirName) @@ -43,33 +43,33 @@ class autoProcessGames(object): 'status': downloadStatus } - logger.debug("Opening URL: %s" % (url), section) + logger.debug("Opening URL: {0}".format(url), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: logger.error("Unable to open URL") - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] result = r.json() - logger.postprocess("%s" % (result), section) + logger.postprocess("{0}".format(result), section) if library: - logger.postprocess("moving files to library: %s" % (library), section) + logger.postprocess("moving files to library: {0}".format(library), section) try: shutil.move(dirName, os.path.join(library, inputName)) except: - logger.error("Unable to move %s to %s" % (dirName, os.path.join(library, inputName)), section) - return [1, "%s: Failed to post-process - Unable to move files" % (section)] + logger.error("Unable to move {0} to {1}".format(dirName, os.path.join(library, inputName)), section) + return [1, "{0}: Failed to post-process - Unable to move files".format(section)] else: logger.error("No library specified to move files to. Please edit your configuration.", section) - return [1, "%s: Failed to post-process - No library defined in %s" % (section, section)] + return [1, "{0}: Failed to post-process - No library defined in {1}".format(section, section)] if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: - logger.postprocess("SUCCESS: Status for %s has been set to %s in Gamez" % (gamezID, downloadStatus), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + logger.postprocess("SUCCESS: Status for {0} has been set to {1} in Gamez".format(gamezID, downloadStatus), section) + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] else: - logger.error("FAILED: Status for %s has NOT been updated in Gamez" % (gamezID), section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] + logger.error("FAILED: Status for {0} has NOT been updated in Gamez".format(gamezID), section) + return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 3ded9d24..2c7dce17 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -27,12 +27,12 @@ class autoProcessMovie(object): params['id'] = release_id or imdbid url = baseURL + cmd - logger.debug("Opening URL: %s with PARAMS: %s" % (url, params)) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params)) try: r = requests.get(url, params=params, verify=False, timeout=(30, 60)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % url) + logger.error("Unable to open URL {0}".format(url)) return results try: @@ -41,14 +41,14 @@ class autoProcessMovie(object): # ValueError catches simplejson's JSONDecodeError and json's ValueError logger.error("CouchPotato returned the following non-json data") for line in r.iter_lines(): - logger.error("%s" % (line)) + logger.error("{0}".format(line)) return results if not result['success']: if 'error' in result: logger.error(str(result['error'])) else: - logger.error("no media found for id %s" % (params['id'])) + logger.error("no media found for id {0}".format(params['id'])) return results # Gather release info and return it back, no need to narrow results @@ -117,10 +117,10 @@ class autoProcessMovie(object): extract = int(section[inputCategory].get("extract", 0)) protocol = "https://" if ssl else "http://" - baseURL = "%s%s:%s%s/api/%s" % (protocol, host, port, web_root, apikey) + baseURL = "{0}{1}:{2}{3}/api/{4}".format(protocol, host, port, web_root, apikey) if not server_responding(baseURL): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] + return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] imdbid = find_imdbid(dirName, inputName) release = self.get_release(baseURL, imdbid, download_id) @@ -154,7 +154,7 @@ class autoProcessMovie(object): inputName, dirName = convert_to_ascii(inputName, dirName) if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: %s' % (dirName)) + logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) core.extractFiles(dirName) inputName, dirName = convert_to_ascii(inputName, dirName) @@ -169,7 +169,7 @@ class autoProcessMovie(object): good_files += 1 if num_files > 0 and good_files == num_files: if status: - logger.info("Status shown as failed from Downloader, but %s valid video files found. Setting as success." % (str(good_files)), section) + logger.info("Status shown as failed from Downloader, but {0} valid video files found. Setting as success.".format(good_files), section) status = 0 elif num_files > 0 and good_files < num_files: logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) @@ -179,10 +179,10 @@ class autoProcessMovie(object): failureLink += '&corrupt=true' status = 1 elif clientAgent == "manual": - logger.warning("No media files found in directory %s to manually process." % (dirName), section) + logger.warning("No media files found in directory {0} to manually process.".format(dirName), section) return [0, ""] # Success (as far as this script is concerned) else: - logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) + logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section) status = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') @@ -191,17 +191,17 @@ class autoProcessMovie(object): if core.TRANSCODE == 1: result, newDirName = transcoder.Transcode_directory(dirName) if result == 0: - logger.debug("Transcoding succeeded for files in %s" % (dirName), section) + logger.debug("Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName else: - logger.error("Transcoding failed for files in %s" % (dirName), section) - return [1, "%s: Failed to post-process - Transcoding failed" % (section)] + logger.error("Transcoding failed for files in {0}".format(dirName), section) + return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): if not release and ".cp(tt" not in video and imdbid: videoName, videoExt = os.path.splitext(video) - video2 = "%s.cp(%s)%s" % (videoName, imdbid, videoExt) + video2 = "{0}.cp({1}){2}".format(videoName, imdbid, videoExt) if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): - logger.debug('Renaming: %s to: %s' % (video, video2)) + logger.debug('Renaming: {0} to: {1}'.format(video, video2)) os.rename(video, video2) params = {} @@ -217,91 +217,91 @@ class autoProcessMovie(object): else: command = "/renamer.scan" - url = "%s%s" % (baseURL, command) + url = "{0}{1}".format(baseURL, command) - logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) - logger.postprocess("Starting %s scan for %s" % (method, inputName), section) + logger.postprocess("Starting {0} scan for {1}".format(method, inputName), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error("Unable to open URL", section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] result = r.json() if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: - logger.postprocess("SUCCESS: Finished %s scan for folder %s" % (method, dirName), section) + logger.postprocess("SUCCESS: Finished {0} scan for folder {1}".format(method, dirName), section) if method == "manage": - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] else: - logger.error("FAILED: %s scan was unable to finish for folder %s. exiting!" % (method, dirName), + logger.error("FAILED: {0} scan was unable to finish for folder {1}. exiting!".format(method, dirName), section) - return [1, "%s: Failed to post-process - Server did not return success" % (section)] + return [1, "{0}: Failed to post-process - Server did not return success".format(section)] else: core.FAILED = True - logger.postprocess("FAILED DOWNLOAD DETECTED FOR %s" % (inputName), section) + logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(inputName), section) if failureLink: reportNzb(failureLink, clientAgent) if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder %s" % dirName, section) + logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) rmDir(dirName) if not release_id and not media_id: - logger.error("Could not find a downloaded movie in the database matching %s, exiting!" % inputName, + logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(inputName), section) - return [1, "%s: Failed to post-process - Failed download not found in %s" % (section, section)] + return [1, "{0}: Failed to post-process - Failed download not found in {1}".format(section, section)] if release_id: - logger.postprocess("Setting failed release %s to ignored ..." % (inputName), section) + logger.postprocess("Setting failed release {0} to ignored ...".format(inputName), section) url = baseURL + "/release.ignore" params = {'id': release_id} - logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + logger.error("Unable to open URL {0}".format(url), section) + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] result = r.json() if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: - logger.postprocess("SUCCESS: %s has been set to ignored ..." % (inputName), section) + logger.postprocess("SUCCESS: {0} has been set to ignored ...".format(inputName), section) else: - logger.warning("FAILED: Unable to set %s to ignored!" % (inputName), section) - return [1, "%s: Failed to post-process - Unable to set %s to ignored" % (section, inputName)] + logger.warning("FAILED: Unable to set {0} to ignored!".format(inputName), section) + return [1, "{0}: Failed to post-process - Unable to set {1} to ignored".format(section, inputName)] logger.postprocess("Trying to snatch the next highest ranked release.", section) - url = "%s/movie.searcher.try_next" % (baseURL) - logger.debug("Opening URL: %s" % (url), section) + url = "{0}/movie.searcher.try_next".format(baseURL) + logger.debug("Opening URL: {0}".format(url), section) try: r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + logger.error("Unable to open URL {0}".format(url), section) + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] result = r.json() if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: logger.postprocess("SUCCESS: Snatched the next highest release ...", section) - return [0, "%s: Successfully snatched next highest release" % (section)] + return [0, "{0}: Successfully snatched next highest release".format(section)] else: logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section) - return [0, "%s: No new release found now. %s will keep searching" % (section, section)] + return [0, "{0}: No new release found now. {1} will keep searching".format(section, section)] # Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list. if not release: @@ -315,31 +315,31 @@ class autoProcessMovie(object): if release: try: if release_id is None and release_status_old is None: # we didn't have a release before, but now we do. - logger.postprocess("SUCCESS: Movie %s has now been added to CouchPotato" % (imdbid), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + logger.postprocess("SUCCESS: Movie {0} has now been added to CouchPotato".format(imdbid), section) + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] release_status_new = release[release_id]['status'] if release_status_new != release_status_old: - logger.postprocess("SUCCESS: Release %s has now been marked with a status of [%s]" % ( + logger.postprocess("SUCCESS: Release {0} has now been marked with a status of [{1}]".format( inputName, str(release_status_new).upper()), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] except: pass if not os.path.isdir(dirName): - logger.postprocess("SUCCESS: Input Directory [%s] has been processed and removed" % ( + logger.postprocess("SUCCESS: Input Directory [{0}] has been processed and removed".format( dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True): - logger.postprocess("SUCCESS: Input Directory [%s] has no remaining media files. This has been fully processed." % ( + logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format( dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] # pause and let CouchPotatoServer catch its breath time.sleep(10 * wait_for) # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resume seeding now. logger.warning( - "%s does not appear to have changed status after %s minutes, Please check your logs." % (inputName, wait_for), + "{0} does not appear to have changed status after {1} minutes, Please check your logs.".format(inputName, wait_for), section) - return [1, "%s: Failed to post-process - No change in status" % (section)] + return [1, "{0}: Failed to post-process - No change in status".format(section)] diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 8ea22a3c..f4ec185b 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -14,14 +14,14 @@ requests.packages.urllib3.disable_warnings() class autoProcessMusic(object): def get_status(self, url, apikey, dirName): - logger.debug("Attempting to get current status for release:%s" % (os.path.basename(dirName))) + logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dirName))) params = { 'apikey': apikey, 'cmd': "getHistory" } - logger.debug("Opening URL: %s with PARAMS: %s" % (url, params)) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params)) try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) @@ -52,10 +52,10 @@ class autoProcessMusic(object): extract = int(section[inputCategory].get("extract", 0)) protocol = "https://" if ssl else "http://" - url = "%s%s:%s%s/api" % (protocol, host, port, web_root) + url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] + return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] @@ -71,7 +71,7 @@ class autoProcessMusic(object): inputName, dirName = convert_to_ascii(inputName, dirName) if not listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: %s' % (dirName)) + logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) core.extractFiles(dirName) inputName, dirName = convert_to_ascii(inputName, dirName) @@ -89,43 +89,43 @@ class autoProcessMusic(object): release_status = self.get_status(url, apikey, dirName) if not release_status: - logger.error("Could not find a status for %s, is it in the wanted list ?" % (inputName), section) + logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) - logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + logger.error("Unable to open URL {0}".format(url), section) + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] - logger.debug("Result: %s" % (r.text), section) + logger.debug("Result: {0}".format(r.text), section) if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif r.text == "OK": - logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName), section) + logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(inputName, dirName), section) else: - logger.error("FAILED: Post-Processing has NOT started for %s in folder %s. exiting!" % (inputName, dirName), section) - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] + logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(inputName, dirName), section) + return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] else: logger.warning("FAILED DOWNLOAD DETECTED", section) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] + return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: current_status = self.get_status(url, apikey, dirName) if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. - logger.postprocess("SUCCESS: This release is now marked as status [%s]" % (current_status), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + logger.postprocess("SUCCESS: This release is now marked as status [{0}]".format(current_status), section) + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] if not os.path.isdir(dirName): - logger.postprocess("SUCCESS: The input directory %s has been removed Processing must have finished." % (dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dirName), section) + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] time.sleep(10 * wait_for) # The status hasn't changed. uTorrent can resume seeding now. - logger.warning("The music album does not appear to have changed status after %s minutes. Please check your Logs" % (wait_for), section) - return [1, "%s: Failed to post-process - No change in wanted status" % (section)] + logger.warning("The music album does not appear to have changed status after {0} minutes. Please check your Logs".format(wait_for), section) + return [1, "{0}: Failed to post-process - No change in wanted status".format(section)] diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 34d10df2..030588a9 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -22,27 +22,27 @@ class autoProcessTV(object): try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: - logger.error("Unable to open URL: %s" % url, section) + logger.error("Unable to open URL: {0}".format(url), section) return None if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) + logger.error("Server returned status {0}".format(r.status_code), section) return None else: try: return r.json()['state'] except (ValueError, KeyError): # ValueError catches simplejson's JSONDecodeError and json's ValueError - logger.error("%s did not return expected json data." % section, section) + logger.error("{0} did not return expected json data.".format(section), section) return None def CDH(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: - logger.error("Unable to open URL: %s" % (url2), section) + logger.error("Unable to open URL: {0}".format(url2), section) return False if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) + logger.error("Server returned status {0}".format(r.status_code), section) return False else: try: @@ -58,9 +58,9 @@ class autoProcessTV(object): web_root = core.CFG[section][inputCategory].get("web_root", "") protocol = "https://" if ssl else "http://" - if not server_responding("%s%s:%s%s" % (protocol, host, port, web_root)): + if not server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): logger.error("Server did not respond. Exiting", section) - return [1, "%s: Failed to post-process - %s did not respond." % (section, section)] + return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] # auto-detect correct fork fork, fork_params = autoFork(section, inputCategory) @@ -105,7 +105,7 @@ class autoProcessTV(object): # Now check if tv files exist in destination. if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: %s' % (dirName)) + logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) core.extractFiles(dirName) inputName, dirName = convert_to_ascii(inputName, dirName) @@ -135,7 +135,7 @@ class autoProcessTV(object): if failureLink: failureLink += '&corrupt=true' elif clientAgent == "manual": - logger.warning("No media files found in directory %s to manually process." % (dirName), section) + logger.warning("No media files found in directory {0} to manually process.".format(dirName), section) return [0, ""] # Success (as far as this script is concerned) elif nzbExtractionBy == "Destination": logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.") @@ -148,7 +148,7 @@ class autoProcessTV(object): status = 1 failed = 1 else: - logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) + logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section) status = 1 failed = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': @@ -157,11 +157,11 @@ class autoProcessTV(object): if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads result, newDirName = transcoder.Transcode_directory(dirName) if result == 0: - logger.debug("SUCCESS: Transcoding succeeded for files in %s" % (dirName), section) + logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName else: - logger.error("FAILED: Transcoding failed for files in %s" % (dirName), section) - return [1, "%s: Failed to post-process - Transcoding failed" % (section)] + logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section) + return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] # configure SB params to pass fork_params['quiet'] = 1 @@ -205,30 +205,30 @@ class autoProcessTV(object): if failureLink: reportNzb(failureLink, clientAgent) if 'failed' in fork_params: - logger.postprocess("FAILED: The download failed. Sending 'failed' process request to %s branch" % (fork), section) + logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) elif section == "NzbDrone": - logger.postprocess("FAILED: The download failed. Sending failed download to %s for CDH processing" % (fork), section) - return [1, "%s: Downlaod Failed. Sending back to %s" % (section, section)] # Return as failed to flag this in the downloader. + logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(fork), section) + return [1, "{0}: Downlaod Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. else: - logger.postprocess("FAILED: The download failed. %s branch does not handle failed downloads. Nothing to process" % (fork), section) + logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder %s" % (dirName), section) + logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) rmDir(dirName) - return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section)] # Return as failed to flag this in the downloader. + return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. url = None if section == "SickBeard": - url = "%s%s:%s%s/home/postprocess/processEpisode" % (protocol, host, port, web_root) + url = "{0}{1}:{2}{3}/home/postprocess/processEpisode".format(protocol, host, port, web_root) elif section == "NzbDrone": - url = "%s%s:%s%s/api/command" % (protocol, host, port, web_root) - url2 = "%s%s:%s%s/api/config/downloadClient" % (protocol, host, port, web_root) + url = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root) + url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root) headers = {"X-Api-Key": apikey} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: - logger.debug("remote_path: %s" % (remoteDir(dirName)), section) + logger.debug("remote_path: {0}".format(remoteDir(dirName)), section) data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id} else: - logger.debug("path: %s" % (dirName), section) + logger.debug("path: {0}".format(dirName), section) data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id} if not download_id: data.pop("downloadClientId") @@ -236,29 +236,29 @@ class autoProcessTV(object): try: if section == "SickBeard": - logger.debug("Opening URL: %s with params: %s" % (url, str(fork_params)), section) + logger.debug("Opening URL: {0} with params: {1}".format(url, fork_params), section) s = requests.Session() - login = "%s%s:%s%s/login" % (protocol, host, port, web_root) + login = "{0}{1}:{2}{3}/login".format(protocol, host, port, web_root) login_params = {'username': username, 'password': password} s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) elif section == "NzbDrone": - logger.debug("Opening URL: %s with data: %s" % (url, str(data)), section) + logger.debug("Opening URL: {0} with data: {1}".format(url, data), section) r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: - logger.error("Unable to open URL: %s" % (url), section) - return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section)] + logger.error("Unable to open URL: {0}".format(url), section) + return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: - logger.error("Server returned status %s" % (str(r.status_code)), section) - return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code))] + logger.error("Server returned status {0}".format(r.status_code), section) + return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] Success = False Started = False if section == "SickBeard": for line in r.iter_lines(): if line: - logger.postprocess("%s" % (line), section) + logger.postprocess("{0}".format(line), section) if "Moving file from" in line: inputName = os.path.split(line)[1] if "Processing succeeded" in line or "Successfully processed" in line: @@ -267,23 +267,23 @@ class autoProcessTV(object): try: res = json.loads(r.content) scan_id = int(res['id']) - logger.debug("Scan started with id: %s" % (str(scan_id)), section) + logger.debug("Scan started with id: {0}".format(scan_id), section) Started = True except Exception as e: - logger.warning("No scan id was returned due to: %s" % (e), section) + logger.warning("No scan id was returned due to: {0}".format(e), section) scan_id = None Started = False if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder %s" % (dirName), section) + logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) rmDir(dirName) if Success: - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] elif section == "NzbDrone" and Started: n = 0 params = {} - url = url + "/" + str(scan_id) + url = "{0}/{1}".format(url, scan_id) while n < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = self.command_complete(url, params, headers, section) @@ -291,21 +291,21 @@ class autoProcessTV(object): break n += 1 if command_status: - logger.debug("The Scan command return status: %s" % (command_status), section) + logger.debug("The Scan command return status: {0}".format(command_status), section) if not os.path.exists(dirName): - logger.debug("The directory %s has been removed. Renaming was successful." % (dirName), section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section) + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] elif command_status and command_status in ['completed']: logger.debug("The Scan command has completed successfully. Renaming was successful.", section) - return [0, "%s: Successfully post-processed %s" % (section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) # return [1, "%s: Failed to post-process %s" % (section, inputName) ] if self.CDH(url2, headers, section=section): - logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % section, section) - return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section)] + logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) + return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] else: logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) - return [1, "%s: Failed to post-process %s" % (section, inputName)] + return [1, "{0}: Failed to post-process {1}".format(section, inputName)] else: - return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section)] # We did not receive Success confirmation. + return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # We did not receive Success confirmation. From b4541d323611a742ed82aaeaa08f1312fee33e73 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 01:40:13 -0400 Subject: [PATCH 18/82] Fix config option `extract` never being used --- core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 2c7dce17..0264abc6 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -114,7 +114,7 @@ class autoProcessMovie(object): ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) web_root = core.CFG[section][inputCategory].get("web_root", "") remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) - extract = int(section[inputCategory].get("extract", 0)) + extract = int(core.CFG[section][inputCategory].get("extract", 0)) protocol = "https://" if ssl else "http://" baseURL = "{0}{1}:{2}{3}/api/{4}".format(protocol, host, port, web_root, apikey) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index f4ec185b..42c23311 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -49,7 +49,7 @@ class autoProcessMusic(object): ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) web_root = core.CFG[section][inputCategory].get("web_root", "") remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) - extract = int(section[inputCategory].get("extract", 0)) + extract = int(core.CFG[section][inputCategory].get("extract", 0)) protocol = "https://" if ssl else "http://" url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 030588a9..293b0568 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -75,7 +75,7 @@ class autoProcessTV(object): wait_for = int(core.CFG[section][inputCategory].get("wait_for", 2)) force = int(core.CFG[section][inputCategory].get("force", 0)) delete_on = int(core.CFG[section][inputCategory].get("delete_on", 0)) - extract = int(section[inputCategory].get("extract", 0)) + extract = int(core.CFG[section][inputCategory].get("extract", 0)) if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] From a4fd80e6957498035295004ce71f46e077c78a95 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 01:34:24 -0400 Subject: [PATCH 19/82] Fix TypeError for missing keys by type-casting config to dict --- core/autoProcess/autoProcessComics.py | 16 +++++++------ core/autoProcess/autoProcessGames.py | 14 +++++++----- core/autoProcess/autoProcessMovie.py | 22 ++++++++++-------- core/autoProcess/autoProcessMusic.py | 18 ++++++++------- core/autoProcess/autoProcessTV.py | 33 +++++++++++++++------------ 5 files changed, 57 insertions(+), 46 deletions(-) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index b6e5b6ee..70ebee03 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -16,13 +16,15 @@ class autoProcessComics(object): logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.", section) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - username = core.CFG[section][inputCategory]["username"] - password = core.CFG[section][inputCategory]["password"] - ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) - web_root = core.CFG[section][inputCategory].get("web_root", "") - remote_path = int(core.CFG[section][inputCategory].get("remote_path"), 0) + cfg = dict(core.CFG[section][inputCategory]) + + host = cfg["host"] + port = cfg["port"] + username = cfg["username"] + password = cfg["password"] + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") + remote_path = int(cfg.get("remote_path"), 0) protocol = "https://" if ssl else "http://" url = "{0}{1}:{2}{3}/post_process".format(protocol, host, port, web_root) diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index ada32b11..ef7e7cb9 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -15,12 +15,14 @@ class autoProcessGames(object): def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): status = int(status) - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - apikey = core.CFG[section][inputCategory]["apikey"] - library = core.CFG[section][inputCategory].get("library") - ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) - web_root = core.CFG[section][inputCategory].get("web_root", "") + cfg = dict(core.CFG[section][inputCategory]) + + host = cfg["host"] + port = cfg["port"] + apikey = cfg["apikey"] + library = cfg.get("library") + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") protocol = "https://" if ssl else "http://" url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 0264abc6..a5846e14 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -105,16 +105,18 @@ class autoProcessMovie(object): def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None): - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - apikey = core.CFG[section][inputCategory]["apikey"] - method = core.CFG[section][inputCategory]["method"] - delete_failed = int(core.CFG[section][inputCategory]["delete_failed"]) - wait_for = int(core.CFG[section][inputCategory]["wait_for"]) - ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) - web_root = core.CFG[section][inputCategory].get("web_root", "") - remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) - extract = int(core.CFG[section][inputCategory].get("extract", 0)) + cfg = dict(core.CFG[section][inputCategory]) + + host = cfg["host"] + port = cfg["port"] + apikey = cfg["apikey"] + method = cfg["method"] + delete_failed = int(cfg["delete_failed"]) + wait_for = int(cfg["wait_for"]) + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") + remote_path = int(cfg.get("remote_path", 0)) + extract = int(cfg.get("extract", 0)) protocol = "https://" if ssl else "http://" baseURL = "{0}{1}:{2}{3}/api/{4}".format(protocol, host, port, web_root, apikey) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 42c23311..48bbea69 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -42,14 +42,16 @@ class autoProcessMusic(object): def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): status = int(status) - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - apikey = core.CFG[section][inputCategory]["apikey"] - wait_for = int(core.CFG[section][inputCategory]["wait_for"]) - ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) - web_root = core.CFG[section][inputCategory].get("web_root", "") - remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) - extract = int(core.CFG[section][inputCategory].get("extract", 0)) + cfg = dict(core.CFG[section][inputCategory]) + + host = cfg["host"] + port = cfg["port"] + apikey = cfg["apikey"] + wait_for = int(cfg["wait_for"]) + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") + remote_path = int(cfg.get("remote_path", 0)) + extract = int(cfg.get("extract", 0)) protocol = "https://" if ssl else "http://" url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 293b0568..153439ff 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -52,10 +52,13 @@ class autoProcessTV(object): return False def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - ssl = int(core.CFG[section][inputCategory].get("ssl", 0)) - web_root = core.CFG[section][inputCategory].get("web_root", "") + + cfg = dict(core.CFG[section][inputCategory]) + + host = cfg["host"] + port = cfg["port"] + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") protocol = "https://" if ssl else "http://" if not server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): @@ -65,17 +68,17 @@ class autoProcessTV(object): # auto-detect correct fork fork, fork_params = autoFork(section, inputCategory) - username = core.CFG[section][inputCategory].get("username", "") - password = core.CFG[section][inputCategory].get("password", "") - apikey = core.CFG[section][inputCategory].get("apikey", "") - delete_failed = int(core.CFG[section][inputCategory].get("delete_failed", 0)) - nzbExtractionBy = core.CFG[section][inputCategory].get("nzbExtractionBy", "Downloader") - process_method = core.CFG[section][inputCategory].get("process_method") - remote_path = int(core.CFG[section][inputCategory].get("remote_path", 0)) - wait_for = int(core.CFG[section][inputCategory].get("wait_for", 2)) - force = int(core.CFG[section][inputCategory].get("force", 0)) - delete_on = int(core.CFG[section][inputCategory].get("delete_on", 0)) - extract = int(core.CFG[section][inputCategory].get("extract", 0)) + username = cfg.get("username", "") + password = cfg.get("password", "") + apikey = cfg.get("apikey", "") + delete_failed = int(cfg.get("delete_failed", 0)) + nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader") + process_method = cfg.get("process_method") + remote_path = int(cfg.get("remote_path", 0)) + wait_for = int(cfg.get("wait_for", 2)) + force = int(cfg.get("force", 0)) + delete_on = int(cfg.get("delete_on", 0)) + extract = int(cfg.get("extract", 0)) if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. dirName = os.path.split(os.path.normpath(dirName))[0] From 81ffe0456d46b6b2160e3f6aacdb9e6238a25bb7 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 21:36:03 -0400 Subject: [PATCH 20/82] Add encoding declaration --- core/__init__.py | 1 + core/autoProcess/__init__.py | 1 + core/databases/__init__.py | 1 + core/databases/mainDB.py | 2 ++ core/extractor/__init__.py | 1 + core/extractor/extractor.py | 1 + core/gh_api.py | 1 + core/linktastic/__init__.py | 1 + core/linktastic/linktastic.py | 1 + core/logger.py | 1 + core/nzbToMediaAutoFork.py | 1 + core/nzbToMediaConfig.py | 1 + core/nzbToMediaDB.py | 1 + core/nzbToMediaSceneExceptions.py | 1 + core/nzbToMediaUserScript.py | 1 + core/nzbToMediaUtil.py | 1 + core/synchronousdeluge/__init__.py | 1 + core/synchronousdeluge/client.py | 1 + core/synchronousdeluge/exceptions.py | 1 + core/synchronousdeluge/protocol.py | 1 + core/synchronousdeluge/rencode.py | 2 +- core/synchronousdeluge/transfer.py | 1 + core/transcoder/__init__.py | 1 + core/transcoder/transcoder.py | 1 + core/transmissionrpc/six.py | 1 + core/utorrent/__init__.py | 1 + core/utorrent/upload.py | 1 + core/versionCheck.py | 1 + 28 files changed, 29 insertions(+), 1 deletion(-) diff --git a/core/__init__.py b/core/__init__.py index a9d2a1e9..50ffc928 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -1,3 +1,4 @@ +# coding=utf-8 import locale import os import re diff --git a/core/autoProcess/__init__.py b/core/autoProcess/__init__.py index e69de29b..bf893c06 100644 --- a/core/autoProcess/__init__.py +++ b/core/autoProcess/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 \ No newline at end of file diff --git a/core/databases/__init__.py b/core/databases/__init__.py index 96661806..737828fb 100644 --- a/core/databases/__init__.py +++ b/core/databases/__init__.py @@ -1 +1,2 @@ +# coding=utf-8 __all__ = ["mainDB"] \ No newline at end of file diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index 89b89529..71c1e3b2 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -1,3 +1,4 @@ +# coding=utf-8 import core from core import logger, nzbToMediaDB from core.nzbToMediaUtil import backupVersionedFile @@ -5,6 +6,7 @@ from core.nzbToMediaUtil import backupVersionedFile MIN_DB_VERSION = 1 # oldest db version we support migrating from MAX_DB_VERSION = 2 + def backupDatabase(version): logger.info("Backing up database before upgrade") if not backupVersionedFile(nzbToMediaDB.dbFilename(), version): diff --git a/core/extractor/__init__.py b/core/extractor/__init__.py index e69de29b..bf893c06 100644 --- a/core/extractor/__init__.py +++ b/core/extractor/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 \ No newline at end of file diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 92faf7be..68d5e6df 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import platform import shutil diff --git a/core/gh_api.py b/core/gh_api.py index abdf8b1d..8da2a794 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -1,3 +1,4 @@ +# coding=utf-8 import json import requests diff --git a/core/linktastic/__init__.py b/core/linktastic/__init__.py index e69de29b..bf893c06 100644 --- a/core/linktastic/__init__.py +++ b/core/linktastic/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 \ No newline at end of file diff --git a/core/linktastic/linktastic.py b/core/linktastic/linktastic.py index 408bbc2e..9d981b57 100644 --- a/core/linktastic/linktastic.py +++ b/core/linktastic/linktastic.py @@ -1,3 +1,4 @@ +# coding=utf-8 # Linktastic Module # - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems # diff --git a/core/logger.py b/core/logger.py index 671bfc04..0b8a5446 100644 --- a/core/logger.py +++ b/core/logger.py @@ -1,3 +1,4 @@ +# coding=utf-8 from __future__ import with_statement import os diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 966b453f..f5140228 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -1,3 +1,4 @@ +# coding=utf-8 import urllib import core import requests diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index c5c14512..40a48b2b 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import shutil import copy diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 2c3335e7..9ca856d5 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -1,3 +1,4 @@ +# coding=utf-8 from __future__ import with_statement import re diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 91796582..7eedd7a0 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import re import core diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index 5d07ab0b..6acc0169 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import core from subprocess import Popen diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 3dad6577..53bdf27a 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1,3 +1,4 @@ +# coding=utf-8 from __future__ import unicode_literals import os import re diff --git a/core/synchronousdeluge/__init__.py b/core/synchronousdeluge/__init__.py index d7edfe22..6155881f 100644 --- a/core/synchronousdeluge/__init__.py +++ b/core/synchronousdeluge/__init__.py @@ -1,3 +1,4 @@ +# coding=utf-8 """A synchronous implementation of the Deluge RPC protocol based on gevent-deluge by Christopher Rosell. diff --git a/core/synchronousdeluge/client.py b/core/synchronousdeluge/client.py index f35663e4..b4228d83 100644 --- a/core/synchronousdeluge/client.py +++ b/core/synchronousdeluge/client.py @@ -1,3 +1,4 @@ +# coding=utf-8 import os import platform diff --git a/core/synchronousdeluge/exceptions.py b/core/synchronousdeluge/exceptions.py index da6cf022..ff622cb1 100644 --- a/core/synchronousdeluge/exceptions.py +++ b/core/synchronousdeluge/exceptions.py @@ -1,3 +1,4 @@ +# coding=utf-8 __all__ = ["DelugeRPCError"] class DelugeRPCError(Exception): diff --git a/core/synchronousdeluge/protocol.py b/core/synchronousdeluge/protocol.py index 756d4dfc..9af38b4d 100644 --- a/core/synchronousdeluge/protocol.py +++ b/core/synchronousdeluge/protocol.py @@ -1,3 +1,4 @@ +# coding=utf-8 __all__ = ["DelugeRPCRequest", "DelugeRPCResponse"] class DelugeRPCRequest(object): diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index a0a6eec3..0f6ca1ec 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -1,4 +1,4 @@ - +# coding=utf-8 """ rencode -- Web safe object pickling/unpickling. diff --git a/core/synchronousdeluge/transfer.py b/core/synchronousdeluge/transfer.py index 27982fab..0f39bcab 100644 --- a/core/synchronousdeluge/transfer.py +++ b/core/synchronousdeluge/transfer.py @@ -1,3 +1,4 @@ +# coding=utf-8 import zlib import struct import socket diff --git a/core/transcoder/__init__.py b/core/transcoder/__init__.py index 1f47cffe..b1629751 100644 --- a/core/transcoder/__init__.py +++ b/core/transcoder/__init__.py @@ -1 +1,2 @@ +# coding=utf-8 __author__ = 'Justin' diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index a3e536f5..622b5f08 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -1,3 +1,4 @@ +# coding=utf-8 import errno import os import platform diff --git a/core/transmissionrpc/six.py b/core/transmissionrpc/six.py index 836d516c..b73b777a 100644 --- a/core/transmissionrpc/six.py +++ b/core/transmissionrpc/six.py @@ -1,3 +1,4 @@ +# coding=utf-8 """Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2013 Benjamin Peterson diff --git a/core/utorrent/__init__.py b/core/utorrent/__init__.py index e69de29b..bf893c06 100644 --- a/core/utorrent/__init__.py +++ b/core/utorrent/__init__.py @@ -0,0 +1 @@ +# coding=utf-8 \ No newline at end of file diff --git a/core/utorrent/upload.py b/core/utorrent/upload.py index 9886c3ec..8a72306a 100644 --- a/core/utorrent/upload.py +++ b/core/utorrent/upload.py @@ -1,3 +1,4 @@ +# coding=utf-8 #code copied from http://www.doughellmann.com/PyMOTW/urllib2/ import itertools diff --git a/core/versionCheck.py b/core/versionCheck.py index 5066a3e9..773a7f25 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -1,3 +1,4 @@ +# coding=utf-8 # Author: Nic Wolfe # Modified by: echel0n From 8cd0e76ef82c8abe44559b4e936dccbd01387e4e Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 22:07:03 -0400 Subject: [PATCH 21/82] PEP8: Fix formatting * Remove redundant backslash between brackets * Fix multiple statements on one line * Fix missing/excess whitespace * Fix comments not starting with a single # and a space * Convert tabs to spaces * Use triple-quoted docstring --- core/__init__.py | 469 +++++++++++++++---------- core/autoProcess/__init__.py | 2 +- core/databases/__init__.py | 2 +- core/databases/mainDB.py | 9 +- core/extractor/__init__.py | 2 +- core/extractor/extractor.py | 29 +- core/gh_api.py | 1 + core/linktastic/__init__.py | 2 +- core/linktastic/linktastic.py | 32 +- core/logger.py | 18 +- core/nzbToMediaAutoFork.py | 17 +- core/nzbToMediaConfig.py | 84 +++-- core/nzbToMediaDB.py | 5 +- core/nzbToMediaSceneExceptions.py | 51 +-- core/nzbToMediaUserScript.py | 10 +- core/nzbToMediaUtil.py | 219 +++++++----- core/synchronousdeluge/client.py | 27 +- core/synchronousdeluge/exceptions.py | 2 +- core/synchronousdeluge/protocol.py | 3 +- core/synchronousdeluge/rencode.py | 191 ++++++---- core/synchronousdeluge/transfer.py | 4 +- core/transcoder/transcoder.py | 233 +++++++----- core/transmissionrpc/__init__.py | 12 +- core/transmissionrpc/client.py | 101 +++--- core/transmissionrpc/constants.py | 507 ++++++++++++++------------- core/transmissionrpc/error.py | 12 +- core/transmissionrpc/httphandler.py | 4 + core/transmissionrpc/session.py | 1 + core/transmissionrpc/six.py | 52 ++- core/transmissionrpc/torrent.py | 39 ++- core/transmissionrpc/utils.py | 21 +- core/utorrent/__init__.py | 2 +- core/utorrent/client.py | 58 +-- core/utorrent/upload.py | 39 +-- core/versionCheck.py | 29 +- 35 files changed, 1342 insertions(+), 947 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index 50ffc928..21864078 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -34,14 +34,14 @@ from core.autoProcess.autoProcessTV import autoProcessTV from core import logger, versionCheck, nzbToMediaDB from core.nzbToMediaConfig import config from core.nzbToMediaUtil import category_search, sanitizeName, copy_link, parse_args, flatten, getDirs, \ - rmReadOnly,rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, \ + rmReadOnly, rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, \ extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir, \ create_torrent_class, listMediaFiles, RunningProcess from core.transcoder import transcoder from core.databases import mainDB # Client Agents -NZB_CLIENTS = ['sabnzbd','nzbget'] +NZB_CLIENTS = ['sabnzbd', 'nzbget'] TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other'] # sabnzbd constants @@ -62,7 +62,8 @@ FORKS[FORK_FAILED_TORRENT] = {"dir": None, "failed": None, "process_method": Non FORKS[FORK_SICKRAGETV] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} FORKS[FORK_SICKRAGE] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} FORKS[FORK_SICKGEAR] = {"dir": None, "failed": None, "process_method": None, "force": None} -ALL_FORKS = {"dir": None, "dirName": None, "proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} +ALL_FORKS = {"dir": None, "dirName": None, "proc_dir": None, "failed": None, "process_method": None, "force": None, + "delete_on": None} # NZBGet Exit Codes NZBGET_POSTPROCESS_PARCHECK = 92 @@ -202,6 +203,7 @@ USER_SCRIPT_RUNONCE = None __INITIALIZED__ = False + def initialize(section=None): global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \ NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, \ @@ -224,7 +226,7 @@ def initialize(section=None): if __INITIALIZED__: return False - + if os.environ.has_key('NTM_LOGFILE'): LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] @@ -316,7 +318,8 @@ def initialize(section=None): # restart nzbToMedia try: del MYAPP - except: pass + except: + pass restart() else: logger.error("Update wasn't successful, not restarting. Check your log for more information.") @@ -334,8 +337,10 @@ def initialize(section=None): SABNZBDAPIKEY = CFG["Nzb"]["sabnzbd_apikey"] NZB_DEFAULTDIR = CFG["Nzb"]["default_downloadDirectory"] GROUPS = CFG["Custom"]["remove_group"] - if isinstance(GROUPS, str): GROUPS = GROUPS.split(',') - if GROUPS == ['']: GROUPS = None + if isinstance(GROUPS, str): + GROUPS = GROUPS.split(',') + if GROUPS == ['']: + GROUPS = None TORRENT_CLIENTAGENT = CFG["Torrent"]["clientAgent"] # utorrent | deluge | transmission | rtorrent | vuze |other USELINK = CFG["Torrent"]["useLink"] # no | hard | sym @@ -343,8 +348,10 @@ def initialize(section=None): TORRENT_DEFAULTDIR = CFG["Torrent"]["default_downloadDirectory"] CATEGORIES = (CFG["Torrent"]["categories"]) # music,music_videos,pictures,software NOFLATTEN = (CFG["Torrent"]["noFlatten"]) - if isinstance(NOFLATTEN, str): NOFLATTEN = NOFLATTEN.split(',') - if isinstance(CATEGORIES, str): CATEGORIES = CATEGORIES.split(',') + if isinstance(NOFLATTEN, str): + NOFLATTEN = NOFLATTEN.split(',') + if isinstance(CATEGORIES, str): + CATEGORIES = CATEGORIES.split(',') DELETE_ORIGINAL = int(CFG["Torrent"]["deleteOriginal"]) TORRENT_CHMOD_DIRECTORY = int(str(CFG["Torrent"]["chmodDirectory"]), 8) TORRENT_RESUME_ON_FAILURE = int(CFG["Torrent"]["resumeOnFailure"]) @@ -365,9 +372,12 @@ def initialize(section=None): REMOTEPATHS = CFG["Network"]["mount_points"] or [] if REMOTEPATHS: - if isinstance(REMOTEPATHS, list): REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list. - REMOTEPATHS = [ tuple(item.split(',')) for item in REMOTEPATHS.split('|') ] # /volume1/Public/,E:\|/volume2/share/,\\NAS\ - REMOTEPATHS = [ (local.strip(), remote.strip()) for local, remote in REMOTEPATHS ] # strip trailing and leading whitespaces + if isinstance(REMOTEPATHS, list): + REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list. + REMOTEPATHS = [tuple(item.split(',')) for item in + REMOTEPATHS.split('|')] # /volume1/Public/,E:\|/volume2/share/,\\NAS\ + REMOTEPATHS = [(local.strip(), remote.strip()) for local, remote in + REMOTEPATHS] # strip trailing and leading whitespaces PLEXSSL = int(CFG["Plex"]["plex_ssl"]) PLEXHOST = CFG["Plex"]["plex_host"] @@ -375,62 +385,79 @@ def initialize(section=None): PLEXTOKEN = CFG["Plex"]["plex_token"] PLEXSEC = CFG["Plex"]["plex_sections"] or [] if PLEXSEC: - if isinstance(PLEXSEC, list): PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list. - PLEXSEC = [ tuple(item.split(',')) for item in PLEXSEC.split('|') ] + if isinstance(PLEXSEC, list): + PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list. + PLEXSEC = [tuple(item.split(',')) for item in PLEXSEC.split('|')] devnull = open(os.devnull, 'w') try: subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate() NICENESS.extend(['nice', '-n%s' % (int(CFG["Posix"]["niceness"]))]) - except: pass + except: + pass try: subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate() try: NICENESS.extend(['ionice', '-c%s' % (int(CFG["Posix"]["ionice_class"]))]) - except: pass + except: + pass try: if 'ionice' in NICENESS: NICENESS.extend(['-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) else: NICENESS.extend(['ionice', '-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) - except: pass - except: pass + except: + pass + except: + pass devnull.close() COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I), - re.compile('.part\d+.rar$', re.I), - re.compile('.rar$', re.I)] + re.compile('.part\d+.rar$', re.I), + re.compile('.rar$', re.I)] COMPRESSEDCONTAINER += [re.compile('%s$' % ext, re.I) for ext in CFG["Extensions"]["compressedExtensions"]] MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"] AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"] METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt - if isinstance(COMPRESSEDCONTAINER, str): COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',') - if isinstance(MEDIACONTAINER, str): MEDIACONTAINER = MEDIACONTAINER.split(',') - if isinstance(AUDIOCONTAINER, str): AUDIOCONTAINER = AUDIOCONTAINER.split(',') - if isinstance(METACONTAINER, str): METACONTAINER = METACONTAINER.split(',') + if isinstance(COMPRESSEDCONTAINER, str): + COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',') + if isinstance(MEDIACONTAINER, str): + MEDIACONTAINER = MEDIACONTAINER.split(',') + if isinstance(AUDIOCONTAINER, str): + AUDIOCONTAINER = AUDIOCONTAINER.split(',') + if isinstance(METACONTAINER, str): + METACONTAINER = METACONTAINER.split(',') GETSUBS = int(CFG["Transcoder"]["getSubs"]) TRANSCODE = int(CFG["Transcoder"]["transcode"]) DUPLICATE = int(CFG["Transcoder"]["duplicate"]) CONCAT = int(CFG["Transcoder"]["concat"]) IGNOREEXTENSIONS = (CFG["Transcoder"]["ignoreExtensions"]) - if isinstance(IGNOREEXTENSIONS, str): IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',') + if isinstance(IGNOREEXTENSIONS, str): + IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',') OUTPUTFASTSTART = int(CFG["Transcoder"]["outputFastStart"]) GENERALOPTS = (CFG["Transcoder"]["generalOptions"]) - if isinstance(GENERALOPTS, str): GENERALOPTS = GENERALOPTS.split(',') - if GENERALOPTS == ['']: GENERALOPTS = [] - if not '-fflags' in GENERALOPTS: GENERALOPTS.append('-fflags') - if not '+genpts' in GENERALOPTS: GENERALOPTS.append('+genpts') + if isinstance(GENERALOPTS, str): + GENERALOPTS = GENERALOPTS.split(',') + if GENERALOPTS == ['']: + GENERALOPTS = [] + if not '-fflags' in GENERALOPTS: + GENERALOPTS.append('-fflags') + if not '+genpts' in GENERALOPTS: + GENERALOPTS.append('+genpts') try: OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"]) - except: pass + except: + pass OUTPUTVIDEOPATH = CFG["Transcoder"]["outputVideoPath"] PROCESSOUTPUT = int(CFG["Transcoder"]["processOutput"]) ALANGUAGE = CFG["Transcoder"]["audioLanguage"] AINCLUDE = int(CFG["Transcoder"]["allAudioLanguages"]) SLANGUAGES = CFG["Transcoder"]["subLanguages"] - if isinstance(SLANGUAGES, str): SLANGUAGES = SLANGUAGES.split(',') - if SLANGUAGES == ['']: SLANGUAGES = [] + if isinstance(SLANGUAGES, str): + SLANGUAGES = SLANGUAGES.split(',') + if SLANGUAGES == ['']: + SLANGUAGES = [] SINCLUDE = int(CFG["Transcoder"]["allSubLanguages"]) SEXTRACT = int(CFG["Transcoder"]["extractSubs"]) SEMBED = int(CFG["Transcoder"]["embedSubs"]) @@ -438,169 +465,215 @@ def initialize(section=None): VEXTENSION = CFG["Transcoder"]["outputVideoExtension"].strip() VCODEC = CFG["Transcoder"]["outputVideoCodec"].strip() VCODEC_ALLOW = CFG["Transcoder"]["VideoCodecAllow"].strip() - if isinstance(VCODEC_ALLOW, str): VCODEC_ALLOW = VCODEC_ALLOW.split(',') - if VCODEC_ALLOW == ['']: VCODEC_ALLOW = [] + if isinstance(VCODEC_ALLOW, str): + VCODEC_ALLOW = VCODEC_ALLOW.split(',') + if VCODEC_ALLOW == ['']: + VCODEC_ALLOW = [] VPRESET = CFG["Transcoder"]["outputVideoPreset"].strip() try: VFRAMERATE = float(CFG["Transcoder"]["outputVideoFramerate"].strip()) - except: pass + except: + pass try: VCRF = int(CFG["Transcoder"]["outputVideoCRF"].strip()) - except: pass + except: + pass try: VLEVEL = CFG["Transcoder"]["outputVideoLevel"].strip() - except: pass + except: + pass try: - VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k','000')) - except: pass + VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k', '000')) + except: + pass VRESOLUTION = CFG["Transcoder"]["outputVideoResolution"] ACODEC = CFG["Transcoder"]["outputAudioCodec"].strip() ACODEC_ALLOW = CFG["Transcoder"]["AudioCodecAllow"].strip() - if isinstance(ACODEC_ALLOW, str): ACODEC_ALLOW = ACODEC_ALLOW.split(',') - if ACODEC_ALLOW == ['']: ACODEC_ALLOW = [] + if isinstance(ACODEC_ALLOW, str): + ACODEC_ALLOW = ACODEC_ALLOW.split(',') + if ACODEC_ALLOW == ['']: + ACODEC_ALLOW = [] try: ACHANNELS = int(CFG["Transcoder"]["outputAudioChannels"].strip()) - except: pass + except: + pass try: - ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k','000')) - except: pass + ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k', '000')) + except: + pass ACODEC2 = CFG["Transcoder"]["outputAudioTrack2Codec"].strip() ACODEC2_ALLOW = CFG["Transcoder"]["AudioCodec2Allow"].strip() - if isinstance(ACODEC2_ALLOW, str): ACODEC2_ALLOW = ACODEC2_ALLOW.split(',') - if ACODEC2_ALLOW == ['']: ACODEC2_ALLOW = [] + if isinstance(ACODEC2_ALLOW, str): + ACODEC2_ALLOW = ACODEC2_ALLOW.split(',') + if ACODEC2_ALLOW == ['']: + ACODEC2_ALLOW = [] try: ACHANNELS2 = int(CFG["Transcoder"]["outputAudioTrack2Channels"].strip()) - except: pass + except: + pass try: - ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k','000')) - except: pass + ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k', '000')) + except: + pass ACODEC3 = CFG["Transcoder"]["outputAudioOtherCodec"].strip() ACODEC3_ALLOW = CFG["Transcoder"]["AudioOtherCodecAllow"].strip() - if isinstance(ACODEC3_ALLOW, str): ACODEC3_ALLOW = ACODEC3_ALLOW.split(',') - if ACODEC3_ALLOW == ['']: ACODEC3_ALLOW = [] + if isinstance(ACODEC3_ALLOW, str): + ACODEC3_ALLOW = ACODEC3_ALLOW.split(',') + if ACODEC3_ALLOW == ['']: + ACODEC3_ALLOW = [] try: ACHANNELS3 = int(CFG["Transcoder"]["outputAudioOtherChannels"].strip()) - except: pass + except: + pass try: - ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k','000')) - except: pass + ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k', '000')) + except: + pass SCODEC = CFG["Transcoder"]["outputSubtitleCodec"].strip() BURN = int(CFG["Transcoder"]["burnInSubtitle"].strip()) DEFAULTS = CFG["Transcoder"]["outputDefault"].strip() HWACCEL = int(CFG["Transcoder"]["hwAccel"]) - allow_subs = ['.mkv','.mp4', '.m4v', 'asf', 'wma', 'wmv'] + allow_subs = ['.mkv', '.mp4', '.m4v', 'asf', 'wma', 'wmv'] codec_alias = { - 'libx264':['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], - 'libmp3lame':['libmp3lame', 'mp3'], - 'libfaac':['libfaac', 'aac', 'faac'] - } + 'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], + 'libmp3lame': ['libmp3lame', 'mp3'], + 'libfaac': ['libfaac', 'aac', 'faac'] + } transcode_defaults = { - 'iPad':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'iPad-1080p':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':'1920:1080','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'iPad-720p':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'Apple-TV':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, - 'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'iPod':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, - 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'iPhone':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':'460:320','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, - 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'PS3':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, - 'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'xbox':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, - 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'Roku-480p':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'Roku-720p':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'Roku-1080p':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], - 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':160000, 'ACHANNELS':2, - 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, - 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, - 'SCODEC':'mov_text' - }, - 'mkv':{ - 'VEXTENSION':'.mkv','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], - 'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8, - 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, - 'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8, - 'SCODEC':'mov_text' - }, - 'mp4-scene-release':{ - 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':19,'VLEVEL':'3.1', - 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], - 'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8, - 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, - 'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8, - 'SCODEC':'mov_text' - } + 'iPad': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'iPad-1080p': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': '1920:1080', + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'iPad-720p': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': '1280:720', + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'Apple-TV': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': '1280:720', + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, + 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'iPod': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': '1280:720', + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, + 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'iPhone': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': '460:320', + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, + 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'PS3': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, + 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'xbox': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, + 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'Roku-480p': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'Roku-720p': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'Roku-1080p': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], + 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2, + 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, + 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, + 'SCODEC': 'mov_text' + }, + 'mkv': { + 'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': None, 'VLEVEL': None, + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], + 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, + 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, + 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, + 'ACHANNELS3': 8, + 'SCODEC': 'mov_text' + }, + 'mp4-scene-release': { + 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, + 'VCRF': 19, 'VLEVEL': '3.1', + 'VRESOLUTION': None, + 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], + 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, + 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, + 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, + 'ACHANNELS3': 8, + 'SCODEC': 'mov_text' } + } if DEFAULTS and DEFAULTS in transcode_defaults: VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VCODEC = transcode_defaults[DEFAULTS]['VCODEC'] @@ -630,25 +703,29 @@ def initialize(section=None): if VEXTENSION in allow_subs: ALLOWSUBS = 1 - if not VCODEC_ALLOW and VCODEC: VCODEC_ALLOW.extend([VCODEC]) + if not VCODEC_ALLOW and VCODEC: + VCODEC_ALLOW.extend([VCODEC]) for codec in VCODEC_ALLOW: if codec in codec_alias: - extra = [ item for item in codec_alias[codec] if item not in VCODEC_ALLOW ] + extra = [item for item in codec_alias[codec] if item not in VCODEC_ALLOW] VCODEC_ALLOW.extend(extra) - if not ACODEC_ALLOW and ACODEC: ACODEC_ALLOW.extend([ACODEC]) + if not ACODEC_ALLOW and ACODEC: + ACODEC_ALLOW.extend([ACODEC]) for codec in ACODEC_ALLOW: if codec in codec_alias: - extra = [ item for item in codec_alias[codec] if item not in ACODEC_ALLOW ] + extra = [item for item in codec_alias[codec] if item not in ACODEC_ALLOW] ACODEC_ALLOW.extend(extra) - if not ACODEC2_ALLOW and ACODEC2: ACODEC2_ALLOW.extend([ACODEC2]) + if not ACODEC2_ALLOW and ACODEC2: + ACODEC2_ALLOW.extend([ACODEC2]) for codec in ACODEC2_ALLOW: if codec in codec_alias: - extra = [ item for item in codec_alias[codec] if item not in ACODEC2_ALLOW ] + extra = [item for item in codec_alias[codec] if item not in ACODEC2_ALLOW] ACODEC2_ALLOW.extend(extra) - if not ACODEC3_ALLOW and ACODEC3: ACODEC3_ALLOW.extend([ACODEC3]) + if not ACODEC3_ALLOW and ACODEC3: + ACODEC3_ALLOW.extend([ACODEC3]) for codec in ACODEC3_ALLOW: if codec in codec_alias: - extra = [ item for item in codec_alias[codec] if item not in ACODEC3_ALLOW ] + extra = [item for item in codec_alias[codec] if item not in ACODEC3_ALLOW] ACODEC3_ALLOW.extend(extra) codec_alias = {} # clear memory @@ -674,47 +751,59 @@ def initialize(section=None): else: try: SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass - if not SEVENZIP: + except: + pass + if not SEVENZIP: try: SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass - if not SEVENZIP: + except: + pass + if not SEVENZIP: try: SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass + except: + pass if not SEVENZIP: SEVENZIP = None - logger.warning("Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!") - if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'), os.X_OK): + logger.warning( + "Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!") + if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'), + os.X_OK): FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg') - elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'), os.X_OK): + elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'), + os.X_OK): FFMPEG = os.path.join(FFMPEG_PATH, 'avconv') else: try: FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass - if not FFMPEG: + except: + pass + if not FFMPEG: try: FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass + except: + pass if not FFMPEG: FFMPEG = None logger.warning("Failed to locate ffmpeg. Transcoding disabled!") logger.warning("Install ffmpeg with x264 support to enable this feature ...") - if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'), os.X_OK): + if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'), + os.X_OK): FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe') - elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'), os.X_OK): + elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'), + os.X_OK): FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe') else: try: FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass - if not FFPROBE: + except: + pass + if not FFPROBE: try: FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip() - except: pass + except: + pass if not FFPROBE: FFPROBE = None if CHECK_MEDIA: @@ -723,7 +812,7 @@ def initialize(section=None): # check for script-defied section and if None set to allow sections SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)] - for section,subsections in SECTIONS.items(): + for section, subsections in SECTIONS.items(): CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()]) CATEGORIES = list(set(CATEGORIES)) @@ -733,6 +822,7 @@ def initialize(section=None): # finished initalizing return True + def restart(): install_type = versionCheck.CheckVersion().install_type @@ -752,11 +842,12 @@ def restart(): os._exit(status) + def rchmod(path, mod): logger.log("Changing file mode of %s to %s" % (path, oct(mod))) os.chmod(path, mod) if not os.path.isdir(path): - return # Skip files + return # Skip files for root, dirs, files in os.walk(path): for d in dirs: diff --git a/core/autoProcess/__init__.py b/core/autoProcess/__init__.py index bf893c06..9bad5790 100644 --- a/core/autoProcess/__init__.py +++ b/core/autoProcess/__init__.py @@ -1 +1 @@ -# coding=utf-8 \ No newline at end of file +# coding=utf-8 diff --git a/core/databases/__init__.py b/core/databases/__init__.py index 737828fb..14f97982 100644 --- a/core/databases/__init__.py +++ b/core/databases/__init__.py @@ -1,2 +1,2 @@ # coding=utf-8 -__all__ = ["mainDB"] \ No newline at end of file +__all__ = ["mainDB"] diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index 71c1e3b2..0d27b526 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -14,6 +14,7 @@ def backupDatabase(version): else: logger.info("Proceeding with upgrade") + # ====================== # = Main DB Migrations = # ====================== @@ -45,21 +46,21 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade): cur_db_version) + ") is too old to migrate from what this version of nzbToMedia supports (" + \ str(MIN_DB_VERSION) + ").\n" + \ "Please remove nzbtomedia.db file to begin fresh." - ) + ) if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit("Your database version (" + str( cur_db_version) + ") has been incremented past what this version of nzbToMedia supports (" + \ str(MAX_DB_VERSION) + ").\n" + \ "If you have used other forks of nzbToMedia, your database may be unusable due to their modifications." - ) + ) if cur_db_version < MAX_DB_VERSION: # We need to upgrade. queries = [ "CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", "INSERT INTO downloads2 SELECT * FROM downloads;", "DROP TABLE IF EXISTS downloads;", - "ALTER TABLE downloads2 RENAME TO downloads;", + "ALTER TABLE downloads2 RENAME TO downloads;", "INSERT INTO db_version (db_version) VALUES (2);" ] for query in queries: - self.connection.action(query) \ No newline at end of file + self.connection.action(query) diff --git a/core/extractor/__init__.py b/core/extractor/__init__.py index bf893c06..9bad5790 100644 --- a/core/extractor/__init__.py +++ b/core/extractor/__init__.py @@ -1 +1 @@ -# coding=utf-8 \ No newline at end of file +# coding=utf-8 diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 68d5e6df..865802ba 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -8,6 +8,7 @@ import core from subprocess import call, Popen import subprocess + def extract(filePath, outputDestination): success = 0 # Using Windows @@ -22,9 +23,9 @@ def extract(filePath, outputDestination): # Using unix else: required_cmds = ["unrar", "unzip", "tar", "unxz", "unlzma", "7zr", "bunzip2"] - ## Possible future suport: + # ## Possible future suport: # gunzip: gz (cmd will delete original archive) - ## the following do not extract to dest dir + # ## the following do not extract to dest dir # ".xz": ["xz", "-d --keep"], # ".lzma": ["xz", "-d --format=lzma --keep"], # ".bz2": ["bzip2", "-d --keep"], @@ -43,12 +44,13 @@ def extract(filePath, outputDestination): if not os.getenv('TR_TORRENT_DIR'): devnull = open(os.devnull, 'w') for cmd in required_cmds: - if call(['which', cmd], stdout=devnull, stderr=devnull): #note, returns 0 if exists, or 1 if doesn't exist. + if call(['which', cmd], stdout=devnull, + stderr=devnull): # note, returns 0 if exists, or 1 if doesn't exist. if cmd == "7zr" and not call(["which", "7z"]): # we do have "7z" command EXTRACT_COMMANDS[".7z"] = ["7z", "x"] elif cmd == "7zr" and not call(["which", "7za"]): # we do have "7za" command EXTRACT_COMMANDS[".7z"] = ["7za", "x"] - else: + else: for k, v in EXTRACT_COMMANDS.items(): if cmd in v[0]: core.logger.error("EXTRACTOR: %s not found, disabling support for %s" % (cmd, k)) @@ -77,7 +79,7 @@ def extract(filePath, outputDestination): core.logger.debug("EXTRACTOR: Unknown file type: %s" % ext[1]) return False - # Create outputDestination folder + # Create outputDestination folder core.makeDir(outputDestination) if core.PASSWORDSFILE != "" and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): @@ -99,7 +101,7 @@ def extract(filePath, outputDestination): pwd = os.getcwd() # Get our Present Working Directory os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory devnull = open(os.devnull, 'w') - + try: # now works same for nt and *nix info = None cmd.append(filePath) # add filePath to final cmd arg. @@ -112,7 +114,8 @@ def extract(filePath, outputDestination): cmd2.append("-p-") # don't prompt for password. p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. res = p.wait() - if (res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful. + if ( + res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful. core.logger.info("EXTRACTOR: Extraction was successful for %s to %s" % (filePath, outputDestination)) success = 1 elif len(passwords) > 0: @@ -121,14 +124,14 @@ def extract(filePath, outputDestination): if password == "": # if edited in windows or otherwise if blank lines. continue cmd2 = cmd - #append password here. + # append password here. passcmd = "-p" + password cmd2.append(passcmd) p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. res = p.wait() if (res >= 0 and platform == 'Windows') or res == 0: core.logger.info("EXTRACTOR: Extraction was successful for %s to %s using password: %s" % ( - filePath, outputDestination, password)) + filePath, outputDestination, password)) success = 1 break else: @@ -142,19 +145,21 @@ def extract(filePath, outputDestination): os.chdir(pwd) # Go back to our Original Working Directory if success: # sleep to let files finish writing to disk - sleep (3) + sleep(3) perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode) for dir, subdirs, files in os.walk(outputDestination): for subdir in subdirs: if not os.path.join(dir, subdir) in origFiles: try: os.chmod(os.path.join(dir, subdir), perms) - except: pass + except: + pass for file in files: if not os.path.join(dir, file) in origFiles: try: shutil.copymode(filePath, os.path.join(dir, file)) - except: pass + except: + pass return True else: core.logger.error("EXTRACTOR: Extraction failed for %s. Result was %s" % (filePath, res)) diff --git a/core/gh_api.py b/core/gh_api.py index 8da2a794..95faf10e 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -2,6 +2,7 @@ import json import requests + class GitHub(object): """ Simple api wrapper for the Github API v3. diff --git a/core/linktastic/__init__.py b/core/linktastic/__init__.py index bf893c06..9bad5790 100644 --- a/core/linktastic/__init__.py +++ b/core/linktastic/__init__.py @@ -1 +1 @@ -# coding=utf-8 \ No newline at end of file +# coding=utf-8 diff --git a/core/linktastic/linktastic.py b/core/linktastic/linktastic.py index 9d981b57..af690158 100644 --- a/core/linktastic/linktastic.py +++ b/core/linktastic/linktastic.py @@ -30,6 +30,7 @@ if os.name == 'nt': info = subprocess.STARTUPINFO() info.dwFlags |= subprocess.STARTF_USESHOWWINDOW + # Prevent spaces from messing with us! def _escape_param(param): return '"%s"' % param @@ -45,9 +46,9 @@ def _link_windows(src, dest): raise IOError(err.output.decode('utf-8')) - # TODO, find out what kind of messages Windows sends us from mklink - # print(stdout) - # assume if they ret-coded 0 we're good + # TODO, find out what kind of messages Windows sends us from mklink + # print(stdout) + # assume if they ret-coded 0 we're good def _symlink_windows(src, dest): @@ -58,9 +59,10 @@ def _symlink_windows(src, dest): except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) - # TODO, find out what kind of messages Windows sends us from mklink - # print(stdout) - # assume if they ret-coded 0 we're good + # TODO, find out what kind of messages Windows sends us from mklink + # print(stdout) + # assume if they ret-coded 0 we're good + def _dirlink_windows(src, dest): try: @@ -70,9 +72,10 @@ def _dirlink_windows(src, dest): except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) - # TODO, find out what kind of messages Windows sends us from mklink - # print(stdout) - # assume if they ret-coded 0 we're good + # TODO, find out what kind of messages Windows sends us from mklink + # print(stdout) + # assume if they ret-coded 0 we're good + def _junctionlink_windows(src, dest): try: @@ -82,9 +85,10 @@ def _junctionlink_windows(src, dest): except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) - # TODO, find out what kind of messages Windows sends us from mklink - # print(stdout) - # assume if they ret-coded 0 we're good + # TODO, find out what kind of messages Windows sends us from mklink + # print(stdout) + # assume if they ret-coded 0 we're good + # Create a hard link to src named as dest # This version of link, unlike os.link, supports nt systems as well @@ -102,6 +106,7 @@ def symlink(src, dest): else: os.symlink(src, dest) + # Create a symlink to src named as dest, but don't fail if you're on nt def dirlink(src, dest): if os.name == 'nt': @@ -109,9 +114,10 @@ def dirlink(src, dest): else: os.symlink(src, dest) + # Create a symlink to src named as dest, but don't fail if you're on nt def junctionlink(src, dest): if os.name == 'nt': _junctionlink_windows(src, dest) else: - os.symlink(src, dest) \ No newline at end of file + os.symlink(src, dest) diff --git a/core/logger.py b/core/logger.py index 0b8a5446..324248d4 100644 --- a/core/logger.py +++ b/core/logger.py @@ -27,6 +27,7 @@ reverseNames = {u'ERROR': ERROR, u'POSTPROCESS': POSTPROCESS, u'DB': DB} + class NTMRotatingLogHandler(object): def __init__(self, log_file, num_files, num_bytes): self.num_files = num_files @@ -68,7 +69,7 @@ class NTMRotatingLogHandler(object): if self.cur_handler: old_handler = self.cur_handler else: - #Add a new logging levels + # Add a new logging levels logging.addLevelName(21, 'POSTPROCESS') logging.addLevelName(5, 'DB') @@ -85,7 +86,7 @@ class NTMRotatingLogHandler(object): {'nzbtomedia': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), 'postprocess': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), 'db': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S') - }, + }, logging.Formatter('%(message)s'), )) # add the handler to the root logger @@ -122,7 +123,7 @@ class NTMRotatingLogHandler(object): {'nzbtomedia': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), 'postprocess': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), 'db': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S') - }, + }, logging.Formatter('%(message)s'), )) return file_handler @@ -234,6 +235,7 @@ class NTMRotatingLogHandler(object): else: sys.exit(1) + class DispatchingFormatter: def __init__(self, formatters, default_formatter): self._formatters = formatters @@ -243,31 +245,41 @@ class DispatchingFormatter: formatter = self._formatters.get(record.name, self._default_formatter) return formatter.format(record) + ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE) + def log(toLog, logLevel=MESSAGE, section='MAIN'): ntm_log_instance.log(toLog, logLevel, section) + def info(toLog, section='MAIN'): log(toLog, MESSAGE, section) + def error(toLog, section='MAIN'): log(toLog, ERROR, section) + def warning(toLog, section='MAIN'): log(toLog, WARNING, section) + def debug(toLog, section='MAIN'): log(toLog, DEBUG, section) + def postprocess(toLog, section='POSTPROCESS'): log(toLog, POSTPROCESS, section) + def db(toLog, section='DB'): log(toLog, DB, section) + def log_error_and_exit(error_msg): ntm_log_instance.log_error_and_exit(error_msg) + def close(): ntm_log_instance.close_log() diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index f5140228..d1c122c1 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -4,6 +4,7 @@ import core import requests from core import logger + def autoFork(section, inputCategory): # auto-detect correct section # config settings @@ -49,13 +50,13 @@ def autoFork(section, inputCategory): detected = False if section == "NzbDrone": logger.info("Attempting to verify %s fork" % inputCategory) - url = "%s%s:%s%s/api/rootfolder" % (protocol,host,port,web_root) - headers={"X-Api-Key": apikey} + url = "%s%s:%s%s/api/rootfolder" % (protocol, host, port, web_root) + headers = {"X-Api-Key": apikey} try: r = requests.get(url, headers=headers, stream=True, verify=False) except requests.ConnectionError: logger.warning("Could not connect to %s:%s to verify fork!" % (section, inputCategory)) - + if not r.ok: logger.warning("Connection to %s:%s failed! Check your configuration" % (section, inputCategory)) @@ -67,12 +68,12 @@ def autoFork(section, inputCategory): logger.info("Attempting to auto-detect %s fork" % inputCategory) # define the order to test. Default must be first since the default fork doesn't reject parameters. # then in order of most unique parameters. - url = "%s%s:%s%s/home/postprocess/" % (protocol,host,port,web_root) + url = "%s%s:%s%s/home/postprocess/" % (protocol, host, port, web_root) # attempting to auto-detect fork try: if username and password: s = requests.Session() - login = "%s%s:%s%s/login" % (protocol,host,port,web_root) + login = "%s%s:%s%s/login" % (protocol, host, port, web_root) login_params = {'username': username, 'password': password} s.post(login, data=login_params, stream=True, verify=False) r = s.get(url, auth=(username, password), verify=False) @@ -83,10 +84,10 @@ def autoFork(section, inputCategory): r = [] if r and r.ok: for param in params: - if not 'name="%s"' %(param) in r.text: + if not 'name="%s"' % (param) in r.text: rem_params.append(param) for param in rem_params: - params.pop(param) + params.pop(param) for fork in sorted(core.FORKS.iteritems(), reverse=False): if params == fork[1]: detected = True @@ -101,4 +102,4 @@ def autoFork(section, inputCategory): fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)] logger.info("%s:%s fork set to %s" % (section, inputCategory, fork[0])) - return fork[0], fork[1] \ No newline at end of file + return fork[0], fork[1] diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index 40a48b2b..e0bb8172 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -8,13 +8,15 @@ from core import logger from itertools import chain + class Section(configobj.Section): def isenabled(section): # checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {} if not section.sections: try: value = list(ConfigObj.find_key(section, 'enabled'))[0] - except:value = 0 + except: + value = 0 if int(value) == 1: return section else: @@ -23,7 +25,8 @@ class Section(configobj.Section): for subsection in subsections: try: value = list(ConfigObj.find_key(subsections, 'enabled'))[0] - except:value = 0 + except: + value = 0 if int(value) != 1: del to_return[section_name][subsection] @@ -39,7 +42,8 @@ class Section(configobj.Section): for subsection in to_return: try: value = list(ConfigObj.find_key(to_return[subsection], key))[0] - except:value = None + except: + value = None if not value: del to_return[subsection] @@ -80,6 +84,7 @@ class Section(configobj.Section): return to_return + class ConfigObj(configobj.ConfigObj, Section): def __init__(self, *args, **kw): if len(args) == 0: @@ -190,7 +195,8 @@ class ConfigObj(configobj.ConfigObj, Section): if not list(ConfigObj.find_key(CFG_NEW, option)): try: values.pop(option) - except: pass + except: + pass return values @@ -221,7 +227,7 @@ class ConfigObj(configobj.ConfigObj, Section): subsection = None if section in list(chain.from_iterable(subsections.values())): subsection = section - section = ''.join([k for k,v in subsections.iteritems() if subsection in v]) + section = ''.join([k for k, v in subsections.iteritems() if subsection in v]) process_section(section, subsection) elif section in subsections.keys(): subsection = subsections[section] @@ -247,7 +253,8 @@ class ConfigObj(configobj.ConfigObj, Section): try: if os.environ.has_key('NZBPO_NDCATEGORY') and os.environ.has_key('NZBPO_SBCATEGORY'): if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: - logger.warning("%s category is set for SickBeard and NzbDrone. Please check your config in NZBGet" % (os.environ['NZBPO_NDCATEGORY'])) + logger.warning("%s category is set for SickBeard and NzbDrone. " + "Please check your config in NZBGet" % (os.environ['NZBPO_NDCATEGORY'])) section = "Nzb" key = 'NZBOP_DESTDIR' @@ -274,12 +281,14 @@ class ConfigObj(configobj.ConfigObj, Section): if os.environ.has_key(key): option = cfgKeys[index] value = os.environ[key] - CFG_NEW[section][option] = value + CFG_NEW[section][option] = value section = "CouchPotato" envCatKey = 'NZBPO_CPSCATEGORY' - envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR'] - cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir'] + envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', + 'WAIT_FOR', 'WATCH_DIR'] + cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', + 'wait_for', 'watch_dir'] if os.environ.has_key(envCatKey): for index in range(len(envKeys)): key = 'NZBPO_CPS' + envKeys[index] @@ -293,8 +302,10 @@ class ConfigObj(configobj.ConfigObj, Section): section = "SickBeard" envCatKey = 'NZBPO_SBCATEGORY' - envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] - cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] + envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', + 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] + cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', + 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if os.environ.has_key(envCatKey): for index in range(len(envKeys)): key = 'NZBPO_SB' + envKeys[index] @@ -325,8 +336,10 @@ class ConfigObj(configobj.ConfigObj, Section): section = "Mylar" envCatKey = 'NZBPO_MYCATEGORY' - envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] - cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] + envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', + 'REMOTE_PATH'] + cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', + 'remote_path'] if os.environ.has_key(envCatKey): for index in range(len(envKeys)): key = 'NZBPO_MY' + envKeys[index] @@ -355,8 +368,10 @@ class ConfigObj(configobj.ConfigObj, Section): section = "NzbDrone" envCatKey = 'NZBPO_NDCATEGORY' - envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] - cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] + envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', + 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] + cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', + 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] if os.environ.has_key(envCatKey): for index in range(len(envKeys)): key = 'NZBPO_ND' + envKeys[index] @@ -391,16 +406,26 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW[section][option] = value section = "Transcoder" - envKeys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', - 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', - 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', - 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', - 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS'] - cfgKeys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', - 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', - 'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', - 'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', - 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels'] + envKeys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', + 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', + 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', + 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', + 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', + 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', + 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', + 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', + 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', + 'OUTPUTAUDIOOTHERCHANNELS'] + cfgKeys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', + 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', + 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', + 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', + 'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', + 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', + 'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', + 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', + 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', + 'outputAudioOtherChannels'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] if os.environ.has_key(key): @@ -420,8 +445,10 @@ class ConfigObj(configobj.ConfigObj, Section): section = "UserScript" envCatKey = 'NZBPO_USCATEGORY' - envKeys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] - cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] + envKeys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', + 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] + cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', + 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] if os.environ.has_key(envCatKey): for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] @@ -441,10 +468,11 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW.filename = core.CONFIG_FILE CFG_NEW.write() except Exception, e: - logger.debug("Error %s when writing changes to .cfg" % (e)) + logger.debug("Error %s when writing changes to .cfg" % (e)) return CFG_NEW + configobj.Section = Section configobj.ConfigObj = ConfigObj config = ConfigObj diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 9ca856d5..a34b8662 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -8,6 +8,7 @@ import time import core from core import logger + def dbFilename(filename="nzbtomedia.db", suffix=None): """ @param filename: The sqlite database filename to use. If not specified, @@ -153,7 +154,6 @@ class DBConnection: return sqlResult - def select(self, query, args=None): sqlResults = self.action(query, args).fetchall() @@ -244,7 +244,7 @@ class SchemaUpgrade(object): self.connection = connection def hasTable(self, tableName): - return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName, )).fetchall()) > 0 + return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 def hasColumn(self, tableName, column): return column in self.connection.tableInfo(tableName) @@ -264,4 +264,3 @@ class SchemaUpgrade(object): new_version = self.checkDBVersion() + 1 self.connection.action("UPDATE db_version SET db_version = ?", [new_version]) return new_version - diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 7eedd7a0..b37ec268 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -2,23 +2,28 @@ import os import re import core -import shlex +import shlex from core import logger from core.nzbToMediaUtil import listMediaFiles -reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", - r"\.ld[.-]?bew\.", r"\.pir.?(dov|dvd|bew|db|rb)\.", r"\brdvd\.", r"\.vts\.", r"\.reneercs\.", r"\.dcv\.", r"\b(pir|mac)dh\b", r"\.reporp\.", r"\.kcaper\.", +reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", + r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", + r"\.ld[.-]?bew\.", r"\.pir.?(dov|dvd|bew|db|rb)\.", r"\brdvd\.", r"\.vts\.", r"\.reneercs\.", + r"\.dcv\.", r"\b(pir|mac)dh\b", r"\.reporp\.", r"\.kcaper\.", r"\.lanretni\.", r"\b3ca\b", r"\.cstn\."] reverse_pattern = re.compile('|'.join(reverse_list), flags=re.IGNORECASE) season_pattern = re.compile(r"(.*\.\d{2}e\d{2}s\.)(.*)", flags=re.IGNORECASE) word_pattern = re.compile(r"([^A-Z0-9]*[A-Z0-9]+)") -media_list = [r"\.s\d{2}e\d{2}\.", r"\.1080[pi]\.", r"\.720p\.", r"\.576[pi]", r"\.480[pi]\.", r"\.360p\.", r"\.[xh]26[45]\b", r"\.bluray\.", r"\.[hp]dtv\.", - r"\.web[.-]?dl\.", r"\.(vod|dvd|web|bd|br).?rip\.", r"\.dvdr\b", r"\.stv\.", r"\.screener\.", r"\.vcd\.", r"\bhd(cam|rip)\b", r"\.proper\.", r"\.repack\.", +media_list = [r"\.s\d{2}e\d{2}\.", r"\.1080[pi]\.", r"\.720p\.", r"\.576[pi]", r"\.480[pi]\.", r"\.360p\.", + r"\.[xh]26[45]\b", r"\.bluray\.", r"\.[hp]dtv\.", + r"\.web[.-]?dl\.", r"\.(vod|dvd|web|bd|br).?rip\.", r"\.dvdr\b", r"\.stv\.", r"\.screener\.", r"\.vcd\.", + r"\bhd(cam|rip)\b", r"\.proper\.", r"\.repack\.", r"\.internal\.", r"\bac3\b", r"\.ntsc\.", r"\.pal\.", r"\.secam\.", r"\bdivx\b", r"\bxvid\b"] media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE) garbage_name = re.compile(r"^[a-zA-Z0-9]*$") -char_replace = [[r"(\w)1\.(\w)",r"\1i\2"] -] +char_replace = [[r"(\w)1\.(\w)", r"\1i\2"] + ] + def process_all_exceptions(name, dirname): rename_script(dirname) @@ -27,7 +32,7 @@ def process_all_exceptions(name, dirname): parentDir = os.path.dirname(filename) head, fileExtension = os.path.splitext(os.path.basename(filename)) if reverse_pattern.search(head) is not None: - exception = reverse_filename + exception = reverse_filename elif garbage_name.search(head) is not None: exception = replace_filename else: @@ -38,7 +43,8 @@ def process_all_exceptions(name, dirname): if core.GROUPS: newfilename = strip_groups(newfilename) if newfilename != filename: - rename_file(filename, newfilename) + rename_file(filename, newfilename) + def strip_groups(filename): if not core.GROUPS: @@ -48,33 +54,36 @@ def strip_groups(filename): newname = head.replace(' ', '.') for group in core.GROUPS: newname = newname.replace(group, '') - newname = newname.replace('[]', '') + newname = newname.replace('[]', '') newfile = newname + fileExtension newfilePath = os.path.join(dirname, newfile) return newfilePath + def rename_file(filename, newfilePath): logger.debug("Replacing file name %s with download name %s" % (filename, newfilePath), "EXCEPTION") try: os.rename(filename, newfilePath) - except Exception,e: + except Exception, e: logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") + def replace_filename(filename, dirname, name): head, fileExtension = os.path.splitext(os.path.basename(filename)) - if media_pattern.search(os.path.basename(dirname).replace(' ','.')) is not None: + if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None: newname = os.path.basename(dirname).replace(' ', '.') logger.debug("Replacing file name %s with directory name %s" % (head, newname), "EXCEPTION") - elif media_pattern.search(name.replace(' ','.').lower()) is not None: + elif media_pattern.search(name.replace(' ', '.').lower()) is not None: newname = name.replace(' ', '.') logger.debug("Replacing file name %s with download name %s" % (head, newname), "EXCEPTION") else: logger.warning("No name replacement determined for %s" % (head), "EXCEPTION") - newname = name + newname = name newfile = newname + fileExtension newfilePath = os.path.join(dirname, newfile) return newfilePath + def reverse_filename(filename, dirname, name): head, fileExtension = os.path.splitext(os.path.basename(filename)) na_parts = season_pattern.search(head) @@ -85,11 +94,11 @@ def reverse_filename(filename, dirname, name): for wp in word_p: if wp[0] == ".": new_words += "." - new_words += re.sub(r"\W","",wp) + new_words += re.sub(r"\W", "", wp) else: new_words = na_parts.group(2) for cr in char_replace: - new_words = re.sub(cr[0],cr[1],new_words) + new_words = re.sub(cr[0], cr[1], new_words) newname = new_words[::-1] + na_parts.group(1)[::-1] else: newname = head[::-1].title() @@ -99,15 +108,16 @@ def reverse_filename(filename, dirname, name): newfilePath = os.path.join(dirname, newfile) return newfilePath + def rename_script(dirname): rename_file = "" for dir, dirs, files in os.walk(dirname): for file in files: - if re.search('(rename\S*\.(sh|bat)$)',file,re.IGNORECASE): + if re.search('(rename\S*\.(sh|bat)$)', file, re.IGNORECASE): rename_file = os.path.join(dir, file) dirname = dir break - if rename_file: + if rename_file: rename_lines = [line.strip() for line in open(rename_file)] for line in rename_lines: if re.search('^(mv|Move)', line, re.IGNORECASE): @@ -122,10 +132,9 @@ def rename_script(dirname): logger.debug("Renaming file %s to %s" % (orig, dest), "EXCEPTION") try: os.rename(orig, dest) - except Exception,e: + except Exception, e: logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") # dict for custom groups # we can add more to this list -#__customgroups__ = {'Q o Q': process_qoq, '-ECI': process_eci} - +# _customgroups = {'Q o Q': process_qoq, '-ECI': process_eci} diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index 6acc0169..23c3c5de 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -6,12 +6,14 @@ from core.transcoder import transcoder from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir from core import logger + def external_script(outputDestination, torrentName, torrentLabel, settings): final_result = 0 # start at 0. num_files = 0 try: core.USER_SCRIPT_MEDIAEXTENSIONS = settings["user_script_mediaExtensions"] - if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str): core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',') + if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str): + core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',') except: core.USER_SCRIPT_MEDIAEXTENSIONS = [] try: @@ -22,12 +24,14 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): return [0, ""] try: core.USER_SCRIPT_PARAM = settings["user_script_param"] - if isinstance(core.USER_SCRIPT_PARAM, str): core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',') + if isinstance(core.USER_SCRIPT_PARAM, str): + core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',') except: core.USER_SCRIPT_PARAM = [] try: core.USER_SCRIPT_SUCCESSCODES = settings["user_script_successCodes"] - if isinstance(core.USER_SCRIPT_SUCCESSCODES, str): core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',') + if isinstance(core.USER_SCRIPT_SUCCESSCODES, str): + core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',') except: core.USER_SCRIPT_SUCCESSCODES = 0 try: diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 53bdf27a..3899766d 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -14,7 +14,7 @@ import beets import requests import core from babelfish import Language -import subliminal +import subliminal from core.extractor import extractor from core.linktastic import linktastic @@ -25,13 +25,14 @@ from core import logger, nzbToMediaDB requests.packages.urllib3.disable_warnings() + def reportNzb(failure_link, clientAgent): # Contact indexer site logger.info("Sending failure notification to indexer site") if clientAgent == 'nzbget': - headers = {'User-Agent' : 'NZBGet / nzbToMedia.py'} + headers = {'User-Agent': 'NZBGet / nzbToMedia.py'} elif clientAgent == 'sabnzbd': - headers = {'User-Agent' : 'SABnzbd / nzbToMedia.py'} + headers = {'User-Agent': 'SABnzbd / nzbToMedia.py'} else: return try: @@ -40,8 +41,9 @@ def reportNzb(failure_link, clientAgent): logger.error("Unable to open URL %s due to %s" % (failure_link, e)) return + def sanitizeName(name): - ''' + """ >>> sanitizeName('a/b/c') 'a-b-c' >>> sanitizeName('abc') @@ -50,7 +52,7 @@ def sanitizeName(name): 'ab' >>> sanitizeName('.a.b..') 'a.b' - ''' + """ # remove bad chars from the filename name = re.sub(r'[\\\/*]', '-', name) @@ -60,10 +62,12 @@ def sanitizeName(name): name = name.strip(' .') try: name = name.encode(core.SYS_ENCODING) - except: pass + except: + pass return name - + + def makeDir(path): if not os.path.isdir(path): try: @@ -72,12 +76,13 @@ def makeDir(path): return False return True + def remoteDir(path): if not core.REMOTEPATHS: return path - for local,remote in core.REMOTEPATHS: + for local, remote in core.REMOTEPATHS: if local in path: - base_dirs = path.replace(local,"").split(os.sep) + base_dirs = path.replace(local, "").split(os.sep) if '/' in remote: remote_sep = '/' else: @@ -89,22 +94,25 @@ def remoteDir(path): return new_path return path + def category_search(inputDirectory, inputName, inputCategory, root, categories): tordir = False try: inputName = inputName.encode(core.SYS_ENCODING) - except: pass + except: + pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) - except: pass + except: + pass if inputDirectory is None: # =Nothing to process here. return inputDirectory, inputName, inputCategory, root pathlist = os.path.normpath(inputDirectory).split(os.sep) - if inputCategory and inputCategory in pathlist: + if inputCategory and inputCategory in pathlist: logger.debug("SEARCH: Found the Category: %s in directory structure" % (inputCategory)) elif inputCategory: logger.debug("SEARCH: Could not find the category: %s in the directory structure" % (inputCategory)) @@ -116,7 +124,8 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): inputCategory = "" logger.debug("SEARCH: Could not find a category in the directory structure") if not os.path.isdir(inputDirectory) and os.path.isfile(inputDirectory): # If the input directory is a file - if not inputName: inputName = os.path.split(os.path.normpath(inputDirectory))[1] + if not inputName: + inputName = os.path.split(os.path.normpath(inputDirectory))[1] return inputDirectory, inputName, inputCategory, root if inputCategory and os.path.isdir(os.path.join(inputDirectory, inputCategory)): @@ -158,7 +167,8 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): if index + 1 < len(pathlist): tordir = True logger.info("SEARCH: Found a unique directory %s in the category directory" % (pathlist[index + 1])) - if not inputName: inputName = pathlist[index + 1] + if not inputName: + inputName = pathlist[index + 1] except ValueError: pass @@ -177,15 +187,17 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): return inputDirectory, inputName, inputCategory, root + def getDirSize(inputPath): - from functools import partial - prepend = partial(os.path.join, inputPath) - return sum([(os.path.getsize(f) if os.path.isfile(f) else getDirSize(f)) for f in map(prepend, os.listdir(inputPath))]) + from functools import partial + prepend = partial(os.path.join, inputPath) + return sum( + [(os.path.getsize(f) if os.path.isfile(f) else getDirSize(f)) for f in map(prepend, os.listdir(inputPath))]) + def is_minSize(inputName, minSize): fileName, fileExt = os.path.splitext(os.path.basename(inputName)) - # audio files we need to check directory size not file size inputSize = os.path.getsize(inputName) if fileExt in (core.AUDIOCONTAINER): @@ -199,11 +211,13 @@ def is_minSize(inputName, minSize): if inputSize > minSize * 1048576: return True + def is_sample(inputName): # Ignore 'sample' in files if re.search('(^|[\W_])sample\d*[\W_]', inputName.lower()): return True + def copy_link(src, targetLink, useLink): logger.info("MEDIAFILE: [%s]" % (os.path.basename(targetLink)), 'COPYLINK') logger.info("SOURCE FOLDER: [%s]" % (os.path.dirname(src)), 'COPYLINK') @@ -254,6 +268,7 @@ def copy_link(src, targetLink, useLink): return True + def replace_links(link): n = 0 target = link @@ -277,6 +292,7 @@ def replace_links(link): os.unlink(link) linktastic.symlink(target, link) + def flatten(outputDestination): logger.info("FLATTEN: Flattening directory: %s" % (outputDestination)) for outputFile in listMediaFiles(outputDestination): @@ -295,29 +311,31 @@ def flatten(outputDestination): removeEmptyFolders(outputDestination) # Cleanup empty directories + def removeEmptyFolders(path, removeRoot=True): - 'Function to remove empty folders' - if not os.path.isdir(path): - return + """Function to remove empty folders""" + if not os.path.isdir(path): + return - # remove empty subfolders - logger.debug("Checking for empty folders in:%s" % (path)) - files = os.listdir(path) - if len(files): - for f in files: - fullpath = os.path.join(path, f) - if os.path.isdir(fullpath): - removeEmptyFolders(fullpath) + # remove empty subfolders + logger.debug("Checking for empty folders in:%s" % (path)) + files = os.listdir(path) + if len(files): + for f in files: + fullpath = os.path.join(path, f) + if os.path.isdir(fullpath): + removeEmptyFolders(fullpath) + + # if folder empty, delete it + files = os.listdir(path) + if len(files) == 0 and removeRoot: + logger.debug("Removing empty folder:%s" % (path)) + os.rmdir(path) - # if folder empty, delete it - files = os.listdir(path) - if len(files) == 0 and removeRoot: - logger.debug("Removing empty folder:%s" % (path)) - os.rmdir(path) def rmReadOnly(filename): if os.path.isfile(filename): - #check first the read-only attribute + # check first the read-only attribute file_attribute = os.stat(filename)[0] if (not file_attribute & stat.S_IWRITE): # File is read-only, so make it writeable @@ -327,7 +345,8 @@ def rmReadOnly(filename): except: logger.warning('Cannot change permissions of ' + filename, logger.WARNING) -#Wake function + +# Wake function def WakeOnLan(ethernet_address): addr_byte = ethernet_address.split(':') hw_addr = struct.pack(b'BBBBBB', int(addr_byte[0], 16), @@ -349,7 +368,7 @@ def WakeOnLan(ethernet_address): ss.close() -#Test Connection function +# Test Connection function def TestCon(host, port): try: socket.create_connection((host, port)) @@ -372,7 +391,7 @@ def WakeUp(): if TestCon(host, port) == "Down": # final check. logger.warning("System with mac: %s has not woken after 3 attempts. Continuing with the rest of the script." % ( - mac)) + mac)) else: logger.info("System with mac: %s has been woken. Continuing with the rest of the script." % (mac)) @@ -392,7 +411,8 @@ def CharReplace(Name): # /!\ detection is done 2char by 2char for UTF-8 special character if (len(Name) != 1) & (Idx < (len(Name) - 1)): # Detect UTF-8 - if ((Name[Idx] == '\xC2') | (Name[Idx] == '\xC3')) & ((Name[Idx+1] >= '\xA0') & (Name[Idx+1] <= '\xFF')): + if ((Name[Idx] == '\xC2') | (Name[Idx] == '\xC3')) & ( + (Name[Idx + 1] >= '\xA0') & (Name[Idx + 1] <= '\xFF')): encoding = 'utf-8' break # Detect CP850 @@ -433,7 +453,7 @@ def convert_to_ascii(inputName, dirName): if encoded: dirName = os.path.join(dir, base2) logger.info("Renaming directory to: %s." % (base2), 'ENCODER') - os.rename(os.path.join(dir,base), dirName) + os.rename(os.path.join(dir, base), dirName) if os.environ.has_key('NZBOP_SCRIPTDIR'): print "[NZB] DIRECTORY=%s" % (dirName) # Return the new directory to NZBGet. @@ -576,23 +596,23 @@ def parse_args(clientAgent, args): return None, None, None, None, None -def getDirs(section, subsection, link = 'hard'): +def getDirs(section, subsection, link='hard'): to_return = [] def processDir(path): folders = [] logger.info("Searching %s for mediafiles to post-process ..." % (path)) - sync = [ o for o in os.listdir(path) if os.path.splitext(o)[1] in ['.!sync','.bts'] ] + sync = [o for o in os.listdir(path) if os.path.splitext(o)[1] in ['.!sync', '.bts']] # search for single files and move them into their own folder for post-processing - for mediafile in [ os.path.join(path, o) for o in os.listdir(path) if - os.path.isfile(os.path.join(path, o)) ]: + for mediafile in [os.path.join(path, o) for o in os.listdir(path) if + os.path.isfile(os.path.join(path, o))]: if len(sync) > 0: break if os.path.split(mediafile)[1] in ['Thumbs.db', 'thumbs.db']: continue try: - logger.debug("Found file %s in root directory %s." % (os.path.split(mediafile)[1], path)) + logger.debug("Found file %s in root directory %s." % (os.path.split(mediafile)[1], path)) newPath = None fileExt = os.path.splitext(mediafile)[1] try: @@ -627,8 +647,9 @@ def getDirs(section, subsection, link = 'hard'): newPath = os.path.join(path, sanitizeName(title)) try: - newPath = newPath.encode(core.SYS_ENCODING) - except: pass + newPath = newPath.encode(core.SYS_ENCODING) + except: + pass # Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe). if os.path.isfile(newPath): @@ -642,19 +663,20 @@ def getDirs(section, subsection, link = 'hard'): newfile = os.path.join(newPath, sanitizeName(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) - except: pass + except: + pass # link file to its new path copy_link(mediafile, newfile, link) except Exception as e: logger.error("Failed to move %s to its own directory: %s" % (os.path.split(mediafile)[1], e)) - #removeEmptyFolders(path, removeRoot=False) + # removeEmptyFolders(path, removeRoot=False) if os.listdir(path): for dir in [os.path.join(path, o) for o in os.listdir(path) if - os.path.isdir(os.path.join(path, o))]: - sync = [ o for o in os.listdir(dir) if os.path.splitext(o)[1] in ['.!sync','.bts'] ] + os.path.isdir(os.path.join(path, o))]: + sync = [o for o in os.listdir(dir) if os.path.splitext(o)[1] in ['.!sync', '.bts']] if len(sync) > 0 or len(os.listdir(dir)) == 0: continue folders.extend([dir]) @@ -667,7 +689,8 @@ def getDirs(section, subsection, link = 'hard'): elif os.path.exists(core.CFG[section][subsection]["watch_dir"]): to_return.extend(processDir(core.CFG[section][subsection]["watch_dir"])) except Exception as e: - logger.error("Failed to add directories from %s for post-processing: %s" % (core.CFG[section][subsection]["watch_dir"], e)) + logger.error("Failed to add directories from %s for post-processing: %s" % ( + core.CFG[section][subsection]["watch_dir"], e)) if core.USELINK == 'move': try: @@ -678,10 +701,11 @@ def getDirs(section, subsection, link = 'hard'): logger.error("Failed to add directories from %s for post-processing: %s" % (core.OUTPUTDIRECTORY, e)) if not to_return: - logger.debug("No directories identified in %s:%s for post-processing" % (section,subsection)) + logger.debug("No directories identified in %s:%s for post-processing" % (section, subsection)) return list(set(to_return)) + def onerror(func, path, exc_info): """ Error handler for ``shutil.rmtree``. @@ -700,6 +724,7 @@ def onerror(func, path, exc_info): else: raise + def rmDir(dirName): logger.info("Deleting %s" % (dirName)) try: @@ -707,6 +732,7 @@ def rmDir(dirName): except: logger.error("Unable to delete folder %s" % (dirName)) + def cleanDir(path, section, subsection): if not os.path.exists(path): logger.info('Directory %s has been processed and removed ...' % (path), 'CLEANDIR') @@ -717,10 +743,12 @@ def cleanDir(path, section, subsection): return try: minSize = int(core.CFG[section][subsection]['minSize']) - except:minSize = 0 + except: + minSize = 0 try: delete_ignored = int(core.CFG[section][subsection]['delete_ignored']) - except:delete_ignored = 0 + except: + delete_ignored = 0 try: num_files = len(listMediaFiles(path, minSize=minSize, delete_ignored=delete_ignored)) except: @@ -737,6 +765,7 @@ def cleanDir(path, section, subsection): except: logger.error("Unable to delete directory %s" % (path)) + def create_torrent_class(clientAgent): # Hardlink solution for Torrents tc = None @@ -753,8 +782,8 @@ def create_torrent_class(clientAgent): logger.debug("Connecting to %s: http://%s:%s" % ( clientAgent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT)) tc = TransmissionClient(core.TRANSMISSIONHOST, core.TRANSMISSIONPORT, - core.TRANSMISSIONUSR, - core.TRANSMISSIONPWD) + core.TRANSMISSIONUSR, + core.TRANSMISSIONPWD) except: logger.error("Failed to connect to Transmission") @@ -763,12 +792,13 @@ def create_torrent_class(clientAgent): logger.debug("Connecting to %s: http://%s:%s" % (clientAgent, core.DELUGEHOST, core.DELUGEPORT)) tc = DelugeClient() tc.connect(host=core.DELUGEHOST, port=core.DELUGEPORT, username=core.DELUGEUSR, - password=core.DELUGEPWD) + password=core.DELUGEPWD) except: logger.error("Failed to connect to Deluge") return tc + def pause_torrent(clientAgent, inputHash, inputID, inputName): logger.debug("Stopping torrent %s in %s while processing" % (inputName, clientAgent)) try: @@ -782,6 +812,7 @@ def pause_torrent(clientAgent, inputHash, inputID, inputName): except: logger.warning("Failed to stop torrent %s in %s" % (inputName, clientAgent)) + def resume_torrent(clientAgent, inputHash, inputID, inputName): if not core.TORRENT_RESUME == 1: return @@ -797,6 +828,7 @@ def resume_torrent(clientAgent, inputHash, inputID, inputName): except: logger.warning("Failed to start torrent %s in %s" % (inputName, clientAgent)) + def remove_torrent(clientAgent, inputHash, inputID, inputName): if core.DELETE_ORIGINAL == 1 or core.USELINK == 'move': logger.debug("Deleting torrent %s from %s" % (inputName, clientAgent)) @@ -811,9 +843,10 @@ def remove_torrent(clientAgent, inputHash, inputID, inputName): time.sleep(5) except: logger.warning("Failed to delete torrent %s in %s" % (inputName, clientAgent)) - else: + else: resume_torrent(clientAgent, inputHash, inputID, inputName) + def find_download(clientAgent, download_id): logger.debug("Searching for Download on %s ..." % (clientAgent)) if clientAgent == 'utorrent': @@ -851,6 +884,7 @@ def find_download(clientAgent, download_id): return True return False + def get_nzoid(inputName): nzoid = None slots = [] @@ -923,6 +957,7 @@ def is_archive_file(filename): return regext.split(filename)[0] return False + def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True): fileName, fileExt = os.path.splitext(mediafile) @@ -933,17 +968,18 @@ def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True): except: pass - if (media and fileExt.lower() in core.MEDIACONTAINER)\ - or (audio and fileExt.lower() in core.AUDIOCONTAINER)\ - or (meta and fileExt.lower() in core.METACONTAINER)\ - or (archives and is_archive_file(mediafile)): + if (media and fileExt.lower() in core.MEDIACONTAINER) \ + or (audio and fileExt.lower() in core.AUDIOCONTAINER) \ + or (meta and fileExt.lower() in core.METACONTAINER) \ + or (archives and is_archive_file(mediafile)): return True else: return False + def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True): files = [] - if not os.path.isdir(path): + if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. curFile = os.path.split(path)[1] if isMediaFile(curFile, media, audio, meta, archives): @@ -953,7 +989,8 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me try: os.unlink(path) logger.debug('Ignored file %s has been removed ...' % (curFile)) - except:pass + except: + pass else: files.append(path) @@ -973,12 +1010,14 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me try: os.unlink(fullCurFile) logger.debug('Ignored file %s has been removed ...' % (curFile)) - except:pass + except: + pass continue files.append(fullCurFile) - return sorted(files,key=len) + return sorted(files, key=len) + def find_imdbid(dirName, inputName): imdbid = None @@ -987,7 +1026,7 @@ def find_imdbid(dirName, inputName): # find imdbid in dirName logger.info('Searching folder and file names for imdbID ...') - m = re.search('(tt\d{7})', dirName+inputName) + m = re.search('(tt\d{7})', dirName + inputName) if m: imdbid = m.group(1) logger.info("Found imdbID [%s]" % imdbid) @@ -1000,14 +1039,14 @@ def find_imdbid(dirName, inputName): logger.info("Found imdbID [%s] via file name" % imdbid) return imdbid if os.environ.has_key('NZBPR__DNZB_MOREINFO'): - dnzb_more_info=os.environ.get('NZBPR__DNZB_MOREINFO', '') + dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '') if dnzb_more_info != '': regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE) m = regex.match(dnzb_more_info) if m: imdbid = m.group(1) logger.info("Found imdbID [%s] from DNZB-MoreInfo" % imdbid) - return imdbid + return imdbid logger.info('Searching IMDB for imdbID ...') guess = guessit.guess_movie_info(inputName) if guess: @@ -1045,7 +1084,8 @@ def find_imdbid(dirName, inputName): logger.warning('Unable to find a imdbID for %s' % (inputName)) return imdbid -def extractFiles(src, dst=None, keep_archive = None): + +def extractFiles(src, dst=None, keep_archive=None): extracted_folder = [] extracted_archive = [] @@ -1081,13 +1121,14 @@ def extractFiles(src, dst=None, keep_archive = None): except Exception as e: logger.error("Unable to remove file %s due to: %s" % (inputFile, e)) + def import_subs(filename): if not core.GETSUBS: return try: subliminal.cache_region.configure('dogpile.cache.memory') except: - pass + pass languages = set() for item in core.SLANGUAGES: @@ -1098,13 +1139,14 @@ def import_subs(filename): if not languages: return - logger.debug("Attempting to download subtitles for %s" %(filename), 'SUBTITLES') + logger.debug("Attempting to download subtitles for %s" % (filename), 'SUBTITLES') try: video = subliminal.scan_video(filename, subtitles=True, embedded_subtitles=True) subtitles = subliminal.download_best_subtitles([video], languages, hearing_impaired=False) subliminal.save_subtitles(subtitles) except Exception as e: - logger.error("Failed to download subtitles for %s due to: %s" %(filename, e), 'SUBTITLES') + logger.error("Failed to download subtitles for %s due to: %s" % (filename, e), 'SUBTITLES') + def server_responding(baseURL): try: @@ -1113,6 +1155,7 @@ def server_responding(baseURL): except (requests.ConnectionError, requests.exceptions.Timeout): return False + def plex_update(category): if core.FAILED: return @@ -1124,7 +1167,7 @@ def plex_update(category): section = None if not core.PLEXSEC: return - logger.debug("Attempting to update Plex Library for category %s." %(category), 'PLEX') + logger.debug("Attempting to update Plex Library for category %s." % (category), 'PLEX') for item in core.PLEXSEC: if item[0] == category: section = item[1] @@ -1136,6 +1179,7 @@ def plex_update(category): else: logger.debug("Could not identify section for plex update", 'PLEX') + def backupVersionedFile(old_file, version): numTries = 0 @@ -1152,7 +1196,8 @@ def backupVersionedFile(old_file, version): logger.log(u"Backup done", logger.DEBUG) break except Exception, e: - logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + str(e), logger.WARNING) + logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + str(e), + logger.WARNING) numTries += 1 time.sleep(1) logger.log(u"Trying again.", logger.DEBUG) @@ -1181,6 +1226,7 @@ def get_downloadInfo(inputName, status): return sqlResults + class RunningProcess(): """ Limits application to single instance """ @@ -1193,13 +1239,13 @@ class RunningProcess(): def alreadyrunning(self): return self.process.alreadyrunning() - #def __del__(self): - # self.process.__del__() + # def __del__(self): + # self.process.__del__() + class WindowsProcess(): - def __init__(self): - self.mutexname = "nzbtomedia_" + core.PID_FILE.replace('\\','/') # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}" + self.mutexname = "nzbtomedia_" + core.PID_FILE.replace('\\', '/') # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}" if platform.system() == 'Windows': from win32event import CreateMutex from win32api import CloseHandle, GetLastError @@ -1208,7 +1254,7 @@ class WindowsProcess(): self.CloseHandle = CloseHandle self.GetLastError = GetLastError self.ERROR_ALREADY_EXISTS = ERROR_ALREADY_EXISTS - + def alreadyrunning(self): self.mutex = self.CreateMutex(None, 0, self.mutexname) self.lasterror = self.GetLastError() @@ -1217,14 +1263,13 @@ class WindowsProcess(): return True else: return False - def __del__(self): if self.mutex: self.CloseHandle(self.mutex) -class PosixProcess(): +class PosixProcess(): def __init__(self): self.pidpath = core.PID_FILE self.lock_socket = None @@ -1239,7 +1284,8 @@ class PosixProcess(): if "Address already in use" in e: self.lasterror = True return self.lasterror - except AttributeError: pass + except AttributeError: + pass if os.path.exists(self.pidpath): # Make sure it is not a "stale" pidFile try: @@ -1256,7 +1302,7 @@ class PosixProcess(): else: self.lasterror = False else: - self.lasterror = False + self.lasterror = False if not self.lasterror: # Write my pid into pidFile to keep multiple copies of program from running @@ -1264,7 +1310,8 @@ class PosixProcess(): fp = open(self.pidpath, 'w') fp.write(str(os.getpid())) fp.close() - except: pass + except: + pass return self.lasterror diff --git a/core/synchronousdeluge/client.py b/core/synchronousdeluge/client.py index b4228d83..af2c740c 100644 --- a/core/synchronousdeluge/client.py +++ b/core/synchronousdeluge/client.py @@ -8,10 +8,8 @@ from exceptions import DelugeRPCError from protocol import DelugeRPCRequest, DelugeRPCResponse from transfer import DelugeTransfer - __all__ = ["DelugeClient"] - RPC_RESPONSE = 1 RPC_ERROR = 2 RPC_EVENT = 3 @@ -31,7 +29,8 @@ class DelugeClient(object): appDataPath = os.environ.get("APPDATA") if not appDataPath: import _winreg - hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders") + hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, + "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders") appDataReg = _winreg.QueryValueEx(hkey, "AppData") appDataPath = appDataReg[0] _winreg.CloseKey(hkey) @@ -44,7 +43,6 @@ class DelugeClient(object): except OSError, e: return username, password - if os.path.exists(auth_file): for line in open(auth_file): if line.startswith("#"): @@ -108,20 +106,20 @@ class DelugeClient(object): message_type = message[0] -# if message_type == RPC_EVENT: -# event = message[1] -# values = message[2] -# -# if event in self._event_handlers: -# for handler in self._event_handlers[event]: -# gevent.spawn(handler, *values) -# -# elif message_type in (RPC_RESPONSE, RPC_ERROR): + # if message_type == RPC_EVENT: + # event = message[1] + # values = message[2] + # + # if event in self._event_handlers: + # for handler in self._event_handlers[event]: + # gevent.spawn(handler, *values) + # + # elif message_type in (RPC_RESPONSE, RPC_ERROR): if message_type in (RPC_RESPONSE, RPC_ERROR): request_id = message[1] value = message[2] - if request_id == self._request_counter : + if request_id == self._request_counter: if message_type == RPC_RESPONSE: response.set(value) elif message_type == RPC_ERROR: @@ -160,4 +158,3 @@ class DelugeClient(object): def disconnect(self): """Disconnects from the daemon.""" self.transfer.disconnect() - diff --git a/core/synchronousdeluge/exceptions.py b/core/synchronousdeluge/exceptions.py index ff622cb1..95bf7f04 100644 --- a/core/synchronousdeluge/exceptions.py +++ b/core/synchronousdeluge/exceptions.py @@ -1,6 +1,7 @@ # coding=utf-8 __all__ = ["DelugeRPCError"] + class DelugeRPCError(Exception): def __init__(self, name, msg, traceback): self.name = name @@ -9,4 +10,3 @@ class DelugeRPCError(Exception): def __str__(self): return "{0}: {1}: {2}".format(self.__class__.__name__, self.name, self.msg) - diff --git a/core/synchronousdeluge/protocol.py b/core/synchronousdeluge/protocol.py index 9af38b4d..2cb1a73e 100644 --- a/core/synchronousdeluge/protocol.py +++ b/core/synchronousdeluge/protocol.py @@ -1,6 +1,7 @@ # coding=utf-8 __all__ = ["DelugeRPCRequest", "DelugeRPCResponse"] + class DelugeRPCRequest(object): def __init__(self, request_id, method, *args, **kwargs): self.request_id = request_id @@ -11,6 +12,7 @@ class DelugeRPCRequest(object): def format(self): return (self.request_id, self.method, self.args, self.kwargs) + class DelugeRPCResponse(object): def __init__(self): self.value = None @@ -36,4 +38,3 @@ class DelugeRPCResponse(object): return self.value else: raise self._exception - diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index 0f6ca1ec..655f903b 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -9,9 +9,9 @@ BitTorrent project. For complex, heterogeneous data structures with many small elements, r-encodings take up significantly less space than b-encodings: - >>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99})) + >>> len(rencode.dumps({'a': 0, 'b': [1, 2], 'c': 99})) 13 - >>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99})) + >>> len(bencode.bencode({'a': 0, 'b': [1, 2], 'c': 99})) 26 The rencode format is not standardized, and may change with different @@ -73,19 +73,19 @@ MAX_INT_LENGTH = 64 # The bencode 'typecodes' such as i, d, etc have been extended and # relocated on the base-256 character set. -CHR_LIST = chr(59) -CHR_DICT = chr(60) -CHR_INT = chr(61) -CHR_INT1 = chr(62) -CHR_INT2 = chr(63) -CHR_INT4 = chr(64) -CHR_INT8 = chr(65) +CHR_LIST = chr(59) +CHR_DICT = chr(60) +CHR_INT = chr(61) +CHR_INT1 = chr(62) +CHR_INT2 = chr(63) +CHR_INT4 = chr(64) +CHR_INT8 = chr(65) CHR_FLOAT32 = chr(66) CHR_FLOAT64 = chr(44) -CHR_TRUE = chr(67) -CHR_FALSE = chr(68) -CHR_NONE = chr(69) -CHR_TERM = chr(127) +CHR_TRUE = chr(67) +CHR_FALSE = chr(68) +CHR_NONE = chr(69) +CHR_TERM = chr(127) # Positive integers with value embedded in typecode. INT_POS_FIXED_START = 0 @@ -104,9 +104,10 @@ STR_FIXED_START = 128 STR_FIXED_COUNT = 64 # Lists with length embedded in typecode. -LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT +LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT LIST_FIXED_COUNT = 64 + def decode_int(x, f): f += 1 newf = x.index(CHR_TERM, f) @@ -119,35 +120,42 @@ def decode_int(x, f): if x[f] == '-': if x[f + 1] == '0': raise ValueError - elif x[f] == '0' and newf != f+1: + elif x[f] == '0' and newf != f + 1: raise ValueError - return (n, newf+1) + return (n, newf + 1) + def decode_intb(x, f): f += 1 - return (struct.unpack('!b', x[f:f+1])[0], f+1) + return (struct.unpack('!b', x[f:f + 1])[0], f + 1) + def decode_inth(x, f): f += 1 - return (struct.unpack('!h', x[f:f+2])[0], f+2) + return (struct.unpack('!h', x[f:f + 2])[0], f + 2) + def decode_intl(x, f): f += 1 - return (struct.unpack('!l', x[f:f+4])[0], f+4) + return (struct.unpack('!l', x[f:f + 4])[0], f + 4) + def decode_intq(x, f): f += 1 - return (struct.unpack('!q', x[f:f+8])[0], f+8) + return (struct.unpack('!q', x[f:f + 8])[0], f + 8) + def decode_float32(x, f): f += 1 - n = struct.unpack('!f', x[f:f+4])[0] - return (n, f+4) + n = struct.unpack('!f', x[f:f + 4])[0] + return (n, f + 4) + def decode_float64(x, f): f += 1 - n = struct.unpack('!d', x[f:f+8])[0] - return (n, f+8) + n = struct.unpack('!d', x[f:f + 8])[0] + return (n, f + 8) + def decode_string(x, f): colon = x.index(':', f) @@ -155,40 +163,46 @@ def decode_string(x, f): n = int(x[f:colon]) except (OverflowError, ValueError): n = long(x[f:colon]) - if x[f] == '0' and colon != f+1: + if x[f] == '0' and colon != f + 1: raise ValueError colon += 1 - s = x[colon:colon+n] + s = x[colon:colon + n] try: t = s.decode("utf8") if len(t) != len(s): s = t except UnicodeDecodeError: pass - return (s, colon+n) + return (s, colon + n) + def decode_list(x, f): - r, f = [], f+1 + r, f = [], f + 1 while x[f] != CHR_TERM: v, f = decode_func[x[f]](x, f) r.append(v) return (tuple(r), f + 1) + def decode_dict(x, f): - r, f = {}, f+1 + r, f = {}, f + 1 while x[f] != CHR_TERM: k, f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f) return (r, f + 1) + def decode_true(x, f): - return (True, f+1) + return (True, f + 1) + def decode_false(x, f): - return (False, f+1) + return (False, f + 1) + def decode_none(x, f): - return (None, f+1) + return (None, f + 1) + decode_func = {} decode_func['0'] = decode_string @@ -201,77 +215,94 @@ decode_func['6'] = decode_string decode_func['7'] = decode_string decode_func['8'] = decode_string decode_func['9'] = decode_string -decode_func[CHR_LIST ] = decode_list -decode_func[CHR_DICT ] = decode_dict -decode_func[CHR_INT ] = decode_int -decode_func[CHR_INT1 ] = decode_intb -decode_func[CHR_INT2 ] = decode_inth -decode_func[CHR_INT4 ] = decode_intl -decode_func[CHR_INT8 ] = decode_intq +decode_func[CHR_LIST] = decode_list +decode_func[CHR_DICT] = decode_dict +decode_func[CHR_INT] = decode_int +decode_func[CHR_INT1] = decode_intb +decode_func[CHR_INT2] = decode_inth +decode_func[CHR_INT4] = decode_intl +decode_func[CHR_INT8] = decode_intq decode_func[CHR_FLOAT32] = decode_float32 decode_func[CHR_FLOAT64] = decode_float64 -decode_func[CHR_TRUE ] = decode_true -decode_func[CHR_FALSE ] = decode_false -decode_func[CHR_NONE ] = decode_none +decode_func[CHR_TRUE] = decode_true +decode_func[CHR_FALSE] = decode_false +decode_func[CHR_NONE] = decode_none + def make_fixed_length_string_decoders(): def make_decoder(slen): def f(x, f): - s = x[f+1:f+1+slen] + s = x[f + 1:f + 1 + slen] try: t = s.decode("utf8") if len(t) != len(s): s = t except UnicodeDecodeError: pass - return (s, f+1+slen) + return (s, f + 1 + slen) + return f + for i in range(STR_FIXED_COUNT): - decode_func[chr(STR_FIXED_START+i)] = make_decoder(i) + decode_func[chr(STR_FIXED_START + i)] = make_decoder(i) + make_fixed_length_string_decoders() + def make_fixed_length_list_decoders(): def make_decoder(slen): def f(x, f): - r, f = [], f+1 + r, f = [], f + 1 for i in range(slen): v, f = decode_func[x[f]](x, f) r.append(v) return (tuple(r), f) + return f + for i in range(LIST_FIXED_COUNT): - decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i) + decode_func[chr(LIST_FIXED_START + i)] = make_decoder(i) + make_fixed_length_list_decoders() + def make_fixed_length_int_decoders(): def make_decoder(j): def f(x, f): - return (j, f+1) + return (j, f + 1) + return f + for i in range(INT_POS_FIXED_COUNT): - decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i) + decode_func[chr(INT_POS_FIXED_START + i)] = make_decoder(i) for i in range(INT_NEG_FIXED_COUNT): - decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i) + decode_func[chr(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i) + make_fixed_length_int_decoders() + def make_fixed_length_dict_decoders(): def make_decoder(slen): def f(x, f): - r, f = {}, f+1 + r, f = {}, f + 1 for j in range(slen): k, f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f) return (r, f) + return f + for i in range(DICT_FIXED_COUNT): - decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i) + decode_func[chr(DICT_FIXED_START + i)] = make_decoder(i) + make_fixed_length_dict_decoders() -def encode_dict(x,r): + +def encode_dict(x, r): r.append(CHR_DICT) for k, v in x.items(): encode_func[type(k)](k, r) @@ -288,13 +319,15 @@ def loads(x): raise ValueError return r + from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType + def encode_int(x, r): if 0 <= x < INT_POS_FIXED_COUNT: - r.append(chr(INT_POS_FIXED_START+x)) + r.append(chr(INT_POS_FIXED_START + x)) elif -INT_NEG_FIXED_COUNT <= x < 0: - r.append(chr(INT_NEG_FIXED_START-1-x)) + r.append(chr(INT_NEG_FIXED_START - 1 - x)) elif -128 <= x < 128: r.extend((CHR_INT1, struct.pack('!b', x))) elif -32768 <= x < 32768: @@ -309,27 +342,34 @@ def encode_int(x, r): raise ValueError('overflow') r.extend((CHR_INT, s, CHR_TERM)) + def encode_float32(x, r): r.extend((CHR_FLOAT32, struct.pack('!f', x))) + def encode_float64(x, r): r.extend((CHR_FLOAT64, struct.pack('!d', x))) + def encode_bool(x, r): r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)]) + def encode_none(x, r): r.extend(CHR_NONE) + def encode_string(x, r): if len(x) < STR_FIXED_COUNT: r.extend((chr(STR_FIXED_START + len(x)), x)) else: r.extend((str(len(x)), ':', x)) + def encode_unicode(x, r): encode_string(x.encode("utf8"), r) + def encode_list(x, r): if len(x) < LIST_FIXED_COUNT: r.append(chr(LIST_FIXED_START + len(x))) @@ -341,7 +381,8 @@ def encode_list(x, r): encode_func[type(i)](i, r) r.append(CHR_TERM) -def encode_dict(x,r): + +def encode_dict(x, r): if len(x) < DICT_FIXED_COUNT: r.append(chr(DICT_FIXED_START + len(x))) for k, v in x.items(): @@ -354,6 +395,7 @@ def encode_dict(x,r): encode_func[type(v)](v, r) r.append(CHR_TERM) + encode_func = {} encode_func[IntType] = encode_int encode_func[LongType] = encode_int @@ -368,10 +410,12 @@ lock = Lock() try: from types import BooleanType + encode_func[BooleanType] = encode_bool except ImportError: pass + def dumps(x, float_bits=DEFAULT_FLOAT_BITS): """ Dump data structure to str. @@ -392,41 +436,46 @@ def dumps(x, float_bits=DEFAULT_FLOAT_BITS): lock.release() return ''.join(r) + def test(): f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0] f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0] f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0] - L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),) + L = (({'a': 15, 'bb': f1, 'ccc': f2, '': (f3, (), False, True, '')}, ('a', 10 ** 20), tuple(range(-100000, 100000)), + 'b' * 31, 'b' * 62, 'b' * 64, 2 ** 30, 2 ** 33, 2 ** 62, 2 ** 64, 2 ** 30, 2 ** 33, 2 ** 62, 2 ** 64, False, + False, True, -1, 2, 0),) assert loads(dumps(L)) == L - d = dict(zip(range(-100000,100000),range(-100000,100000))) - d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False}) - L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''}) + d = dict(zip(range(-100000, 100000), range(-100000, 100000))) + d.update({'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False}) + L = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: ''}) assert loads(dumps(L)) == L - L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000) + L = ('', 'a' * 10, 'a' * 100, 'a' * 1000, 'a' * 10000, 'a' * 100000, 'a' * 1000000, 'a' * 10000000) assert loads(dumps(L)) == L - L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',) + L = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + ('b',) assert loads(dumps(L)) == L - L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',) + L = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + ('b',) assert loads(dumps(L)) == L L = tuple([tuple(range(n)) for n in range(100)]) + ('b',) assert loads(dumps(L)) == L - L = tuple(['a'*n for n in range(1000)]) + ('b',) + L = tuple(['a' * n for n in range(1000)]) + ('b',) assert loads(dumps(L)) == L - L = tuple(['a'*n for n in range(1000)]) + (None,True,None) + L = tuple(['a' * n for n in range(1000)]) + (None, True, None) assert loads(dumps(L)) == L assert loads(dumps(None)) == None - assert loads(dumps({None:None})) == {None:None} - assert 1e-10 0 and len(audStreams) > 0): disable = True - logger.info("DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.", 'TRANSCODER') + logger.info("DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.", + 'TRANSCODER') if disable: if status: # if the download was "failed", assume bad. If it was successful, assume good. return False @@ -51,9 +53,11 @@ def isVideoGood(videofile, status): logger.info("SUCCESS: [%s] has no corruption." % (fileNameExt), 'TRANSCODER') return True else: - logger.info("FAILED: [%s] has %s video streams and %s audio streams. Assume corruption." % (fileNameExt, str(len(videoStreams)), str(len(audioStreams))), 'TRANSCODER') + logger.info("FAILED: [%s] has %s video streams and %s audio streams. Assume corruption." % ( + fileNameExt, str(len(videoStreams)), str(len(audioStreams))), 'TRANSCODER') return False + def zip_out(file, img, bitbucket): procin = None cmd = [core.SEVENZIP, '-so', 'e', img, file] @@ -63,6 +67,7 @@ def zip_out(file, img, bitbucket): logger.error("Extracting [%s] has failed" % (file), 'TRANSCODER') return procin + def getVideoDetails(videofile, img=None, bitbucket=None): video_details = {} result = 1 @@ -76,7 +81,8 @@ def getVideoDetails(videofile, img=None, bitbucket=None): try: if img: videofile = '-' - command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', videofile] + command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', + videofile] print_cmd(command) if img: procin = zip_out(file, img, bitbucket) @@ -87,7 +93,8 @@ def getVideoDetails(videofile, img=None, bitbucket=None): out, err = proc.communicate() result = proc.returncode video_details = json.loads(out) - except: pass + except: + pass if not video_details: try: command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] @@ -104,6 +111,7 @@ def getVideoDetails(videofile, img=None, bitbucket=None): logger.error("Checking [%s] has failed" % (file), 'TRANSCODER') return video_details, result + def buildCommands(file, newDir, movieName, bitbucket): if isinstance(file, str): inputFile = file @@ -119,8 +127,8 @@ def buildCommands(file, newDir, movieName, bitbucket): name = ('%s.cd%s' % (movieName, check.groups()[0])) elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) - if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. - core.VEXTENSION = '-transcoded' + core.VEXTENSION # adds '-transcoded.ext' + if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. + core.VEXTENSION = '-transcoded' + core.VEXTENSION # adds '-transcoded.ext' else: img, data = file.iteritems().next() name = data['name'] @@ -139,7 +147,8 @@ def buildCommands(file, newDir, movieName, bitbucket): meta_cmd = [] other_cmd = [] - if not video_details or not video_details.get("streams"): # we couldn't read streams with ffprobe. Set defaults to try transcoding. + if not video_details or not video_details.get( + "streams"): # we couldn't read streams with ffprobe. Set defaults to try transcoding. videoStreams = [] audioStreams = [] subStreams = [] @@ -166,12 +175,13 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.ACODEC: audio_cmd.extend(['-c:a', core.ACODEC]) - if core.ACODEC in ['aac', 'dts']: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg + if core.ACODEC in ['aac', + 'dts']: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg audio_cmd.extend(['-strict', '-2']) else: audio_cmd.extend(['-c:a', 'copy']) if core.ACHANNELS: - audio_cmd.extend(['-ac', str(core.ACHANNELS)]) + audio_cmd.extend(['-ac', str(core.ACHANNELS)]) if core.ABITRATE: audio_cmd.extend(['-b:a', str(core.ABITRATE)]) if core.OUTPUTQUALITYPERCENT: @@ -183,7 +193,7 @@ def buildCommands(file, newDir, movieName, bitbucket): sub_cmd.extend(['-c:s', 'copy']) else: # http://en.wikibooks.org/wiki/FFMPEG_An_Intermediate_Guide/subtitle_options sub_cmd.extend(['-sn']) # Don't copy the subtitles over - + if core.OUTPUTFASTSTART: other_cmd.extend(['-movflags', '+faststart']) @@ -192,23 +202,29 @@ def buildCommands(file, newDir, movieName, bitbucket): audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle"] if core.VEXTENSION not in ['.mkv', '.mpegts']: - subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] + subStreams = [item for item in video_details["streams"] if + item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[ + "codec_name"] != "pgssub"] for video in videoStreams: codec = video["codec_name"] try: fr = video["avg_frame_rate"] - except: fr = 0 + except: + fr = 0 try: width = video["width"] - except: width = 0 + except: + width = 0 try: height = video["height"] - except: height = 0 + except: + height = 0 scale = core.VRESOLUTION try: - framerate = float(fr.split('/')[0])/float(fr.split('/')[1]) - except: framerate = 0 + framerate = float(fr.split('/')[0]) / float(fr.split('/')[1]) + except: + framerate = 0 if codec in core.VCODEC_ALLOW or not core.VCODEC: video_cmd.extend(['-c:v', 'copy']) else: @@ -216,16 +232,16 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.VFRAMERATE and not (core.VFRAMERATE * 0.999 <= fr <= core.VFRAMERATE * 1.001): video_cmd.extend(['-r', str(core.VFRAMERATE)]) if scale: - w_scale = width/float(scale.split(':')[0]) - h_scale = height/float(scale.split(':')[1]) - if w_scale > h_scale: # widescreen, Scale by width only. - scale = scale.split(':')[0] + ":" + str(int((height/w_scale)/2)*2) - if w_scale > 1: - video_cmd.extend(['-vf', 'scale=' + scale]) + w_scale = width / float(scale.split(':')[0]) + h_scale = height / float(scale.split(':')[1]) + if w_scale > h_scale: # widescreen, Scale by width only. + scale = scale.split(':')[0] + ":" + str(int((height / w_scale) / 2) * 2) + if w_scale > 1: + video_cmd.extend(['-vf', 'scale=' + scale]) else: # lower or mathcing ratio, scale by height only. - scale = str(int((width/h_scale)/2)*2) + ":" + scale.split(':')[1] - if h_scale > 1: - video_cmd.extend(['-vf', 'scale=' + scale]) + scale = str(int((width / h_scale) / 2) * 2) + ":" + scale.split(':')[1] + if h_scale > 1: + video_cmd.extend(['-vf', 'scale=' + scale]) if core.VBITRATE: video_cmd.extend(['-b:v', str(core.VBITRATE)]) if core.VPRESET: @@ -238,7 +254,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if video_cmd[1] == 'copy' and any(i in video_cmd for i in no_copy): video_cmd[1] = core.VCODEC if core.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding. - video_cmd = ['-c:v', 'copy'] + video_cmd = ['-c:v', 'copy'] map_cmd.extend(['-map', '0:' + str(video["index"])]) break # Only one video needed @@ -246,12 +262,12 @@ def buildCommands(file, newDir, movieName, bitbucket): a_mapped = [] if audioStreams: try: - audio1 = [ item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE ] + audio1 = [item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE] except: # no language tags. Assume only 1 language. audio1 = audioStreams - audio2 = [ item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW ] + audio2 = [item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW] try: - audio3 = [ item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE ] + audio3 = [item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE] except: audio3 = [] @@ -259,21 +275,25 @@ def buildCommands(file, newDir, movieName, bitbucket): map_cmd.extend(['-map', '0:' + str(audio2[0]["index"])]) a_mapped.extend([audio2[0]["index"]]) try: - bitrate = int(audio2[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio2[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio2[0]["channels"]) - except: channels = 0 + except: + channels = 0 audio_cmd.extend(['-c:a:' + str(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) try: - bitrate = int(audio1[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio1[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio1[0]["channels"]) - except: channels = 0 + except: + channels = 0 if core.ACODEC: audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) else: @@ -282,11 +302,13 @@ def buildCommands(file, newDir, movieName, bitbucket): map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) try: - bitrate = int(audio3[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio3[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio3[0]["channels"]) - except: channels = 0 + except: + channels = 0 if core.ACODEC: audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) else: @@ -309,26 +331,30 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.ACODEC2_ALLOW: used_audio += 1 - audio4 = [ item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW ] + audio4 = [item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW] if audio4: # right language and codec. map_cmd.extend(['-map', '0:' + str(audio4[0]["index"])]) a_mapped.extend([audio4[0]["index"]]) try: - bitrate = int(audio4[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio4[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio4[0]["channels"]) - except: channels = 0 + except: + channels = 0 audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) try: - bitrate = int(audio1[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio1[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio1[0]["channels"]) - except: channels = 0 + except: + channels = 0 if core.ACODEC2: audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) else: @@ -337,11 +363,13 @@ def buildCommands(file, newDir, movieName, bitbucket): map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) try: - bitrate = int(audio3[0]["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio3[0]["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio3[0]["channels"]) - except: channels = 0 + except: + channels = 0 if core.ACODEC2: audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) else: @@ -371,11 +399,13 @@ def buildCommands(file, newDir, movieName, bitbucket): map_cmd.extend(['-map', '0:' + str(audio["index"])]) audio_cmd3 = [] try: - bitrate = int(audio["bit_rate"])/1000 - except: bitrate = 0 + bitrate = int(audio["bit_rate"]) / 1000 + except: + bitrate = 0 try: channels = int(audio["channels"]) - except: channels = 0 + except: + channels = 0 if audio["codec_name"] in core.ACODEC3_ALLOW: audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) else: @@ -406,8 +436,9 @@ def buildCommands(file, newDir, movieName, bitbucket): n = 0 for lan in core.SLANGUAGES: try: - subs1 = [ item for item in subStreams if item["tags"]["language"] == lan ] - except: subs1 = [] + subs1 = [item for item in subStreams if item["tags"]["language"] == lan] + except: + subs1 = [] if core.BURN and not subs1 and not burnt and os.path.isfile(file): for subfile in get_subs(file): if lan in os.path.split(subfile)[1]: @@ -426,7 +457,7 @@ def buildCommands(file, newDir, movieName, bitbucket): break map_cmd.extend(['-map', '0:' + str(sub["index"])]) s_mapped.extend([sub["index"]]) - + if core.SINCLUDE: for sub in subStreams: if not core.ALLOWSUBS: @@ -434,7 +465,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if sub["index"] in s_mapped: continue map_cmd.extend(['-map', '0:' + str(sub["index"])]) - s_mapped.extend([sub["index"]]) + s_mapped.extend([sub["index"]]) if core.OUTPUTFASTSTART: other_cmd.extend(['-movflags', '+faststart']) @@ -446,7 +477,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.GENERALOPTS: command.extend(core.GENERALOPTS) - command.extend([ '-i', inputFile]) + command.extend(['-i', inputFile]) if core.SEMBED and os.path.isfile(file): for subfile in get_subs(file): @@ -461,7 +492,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if not core.ALLOWSUBS or (not s_mapped and not n): sub_cmd.extend(['-sn']) - else: + else: if core.SCODEC: sub_cmd.extend(['-c:s', core.SCODEC]) else: @@ -478,6 +509,7 @@ def buildCommands(file, newDir, movieName, bitbucket): command = core.NICENESS + command return command + def get_subs(file): filepaths = [] subExt = ['.srt', '.sub', '.idx'] @@ -486,9 +518,10 @@ def get_subs(file): for dirname, dirs, filenames in os.walk(dir): for filename in filenames: filepaths.extend([os.path.join(dirname, filename)]) - subfiles = [ item for item in filepaths if os.path.splitext(item)[1] in subExt and name in item ] + subfiles = [item for item in filepaths if os.path.splitext(item)[1] in subExt and name in item] return subfiles + def extract_subs(file, newfilePath, bitbucket): video_details, result = getVideoDetails(file) if not video_details: @@ -501,34 +534,39 @@ def extract_subs(file, newfilePath, bitbucket): name = os.path.splitext(os.path.split(newfilePath)[1])[0] try: - subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["tags"]["language"] in core.SLANGUAGES and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] + subStreams = [item for item in video_details["streams"] if + item["codec_type"] == "subtitle" and item["tags"]["language"] in core.SLANGUAGES and item[ + "codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] except: - subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] + subStreams = [item for item in video_details["streams"] if + item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[ + "codec_name"] != "pgssub"] num = len(subStreams) for n in range(num): sub = subStreams[n] idx = sub["index"] try: - lan = sub["tags"]["language"] + lan = sub["tags"]["language"] except: - lan = "unk" + lan = "unk" if num == 1: - outputFile = os.path.join(subdir, "%s.srt" %(name)) - if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "%s.%s.srt" %(name, n)) + outputFile = os.path.join(subdir, "%s.srt" % (name)) + if os.path.isfile(outputFile): + outputFile = os.path.join(subdir, "%s.%s.srt" % (name, n)) else: - outputFile = os.path.join(subdir, "%s.%s.srt" %(name, lan)) - if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "%s.%s.%s.srt" %(name, lan, n)) + outputFile = os.path.join(subdir, "%s.%s.srt" % (name, lan)) + if os.path.isfile(outputFile): + outputFile = os.path.join(subdir, "%s.%s.%s.srt" % (name, lan, n)) - command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', '-codec:' + str(idx), 'srt', outputFile] + command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', '-codec:' + str(idx), 'srt', + outputFile] if platform.system() != 'Windows': command = core.NICENESS + command logger.info("Extracting %s subtitle from: %s" % (lan, file)) print_cmd(command) - result = 1 # set result to failed in case call fails. + result = 1 # set result to failed in case call fails. try: proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc.communicate() @@ -539,11 +577,13 @@ def extract_subs(file, newfilePath, bitbucket): if result == 0: try: shutil.copymode(file, outputFile) - except: pass + except: + pass logger.info("Extracting %s subtitle from %s has succeeded" % (lan, file)) else: logger.error("Extracting subtitles has failed") + def processList(List, newDir, bitbucket): remList = [] newList = [] @@ -562,7 +602,7 @@ def processList(List, newDir, bitbucket): logger.debug("Found VIDEO_TS image file: %s" % (item), "TRANSCODER") if not vtsPath: try: - vtsPath = re.match("(.+VIDEO_TS)",item).groups()[0] + vtsPath = re.match("(.+VIDEO_TS)", item).groups()[0] except: vtsPath = os.path.split(item)[0] remList.append(item) @@ -571,7 +611,8 @@ def processList(List, newDir, bitbucket): elif core.CONCAT and re.match(".+[cC][dD][0-9].", item): remList.append(item) combine.append(item) - else: continue + else: + continue if vtsPath: newList.extend(combineVTS(vtsPath)) if combine: @@ -589,7 +630,8 @@ def processList(List, newDir, bitbucket): newList = [] remList = [] logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER") - return List, remList, newList, success + return List, remList, newList, success + def ripISO(item, newDir, bitbucket): newFiles = [] @@ -606,13 +648,14 @@ def ripISO(item, newDir, bitbucket): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) out, err = proc.communicate() result = proc.returncode - fileList = [ re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line) ] + fileList = [re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in + out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line)] combined = [] for n in range(99): concat = [] m = 1 while True: - vtsName = 'VIDEO_TS%sVTS_%02d_%d.VOB' % (os.sep, n+1, m) + vtsName = 'VIDEO_TS%sVTS_%02d_%d.VOB' % (os.sep, n + 1, m) if vtsName in fileList: concat.append(vtsName) m += 1 @@ -623,11 +666,11 @@ def ripISO(item, newDir, bitbucket): if core.CONCAT: combined.extend(concat) continue - name = '%s.cd%s' % (os.path.splitext(os.path.split(item)[1])[0] ,str(n+1)) - newFiles.append({item: {'name': name , 'files': concat}}) + name = '%s.cd%s' % (os.path.splitext(os.path.split(item)[1])[0], str(n + 1)) + newFiles.append({item: {'name': name, 'files': concat}}) if core.CONCAT: name = os.path.splitext(os.path.split(item)[1])[0] - newFiles.append({item: {'name': name , 'files': combined}}) + newFiles.append({item: {'name': name, 'files': combined}}) if not newFiles: logger.error("No VIDEO_TS folder found in image file %s" % (item), "TRANSCODER") newFiles = [failure_dir] @@ -636,6 +679,7 @@ def ripISO(item, newDir, bitbucket): newFiles = [failure_dir] return newFiles + def combineVTS(vtsPath): newFiles = [] combined = '' @@ -643,7 +687,7 @@ def combineVTS(vtsPath): concat = '' m = 1 while True: - vtsName = 'VTS_%02d_%d.VOB' % (n+1, m) + vtsName = 'VTS_%02d_%d.VOB' % (n + 1, m) if os.path.isfile(os.path.join(vtsPath, vtsName)): concat = concat + os.path.join(vtsPath, vtsName) + '|' m += 1 @@ -659,12 +703,14 @@ def combineVTS(vtsPath): newFiles.append('concat:%s' % combined[:-1]) return newFiles + def combineCD(combine): newFiles = [] - for item in set([ re.match("(.+)[cC][dD][0-9].",item).groups()[0] for item in combine ]): + for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]): concat = '' for n in range(99): - files = [ file for file in combine if n+1 == int(re.match(".+[cC][dD]([0-9]+).",file).groups()[0]) and item in file ] + files = [file for file in combine if + n + 1 == int(re.match(".+[cC][dD]([0-9]+).", file).groups()[0]) and item in file] if files: concat = concat + files[0] + '|' else: @@ -673,17 +719,19 @@ def combineCD(combine): newFiles.append('concat:%s' % concat[:-1]) return newFiles + def print_cmd(command): cmd = "" for item in command: cmd = cmd + " " + str(item) logger.debug("calling command:%s" % (cmd)) + def Transcode_directory(dirName): if not core.FFMPEG: return 1, dirName logger.info("Checking for files to be transcoded") - final_result = 0 # initialize as successful + final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: newDir = core.OUTPUTVIDEOPATH makeDir(newDir) @@ -713,17 +761,17 @@ def Transcode_directory(dirName): if core.SEXTRACT and isinstance(file, str): extract_subs(file, newfilePath, bitbucket) - try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) + try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfilePath) except OSError, e: - if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist + if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist logger.debug("Error when removing transcoding target: %s" % (e)) except Exception, e: logger.debug("Error when removing transcoding target: %s" % (e)) logger.info("Transcoding video: %s" % (newfilePath)) print_cmd(command) - result = 1 # set result to failed in case call fails. + result = 1 # set result to failed in case call fails. try: if isinstance(file, str): proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) @@ -752,12 +800,14 @@ def Transcode_directory(dirName): if result == 0: try: shutil.copymode(file, newfilePath) - except: pass + except: + pass logger.info("Transcoding of video to %s succeeded" % (newfilePath)) if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE): try: os.unlink(file) - except: pass + except: + pass else: logger.error("Transcoding of video to %s failed with result %s" % (newfilePath, str(result))) # this will be 0 (successful) it all are successful, else will return a positive integer for failure. @@ -766,8 +816,9 @@ def Transcode_directory(dirName): for file in remList: try: os.unlink(file) - except: pass - if not os.listdir(newDir): #this is an empty directory and we didn't transcode into it. + except: + pass + if not os.listdir(newDir): # this is an empty directory and we didn't transcode into it. os.rmdir(newDir) newDir = dirName if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB diff --git a/core/transmissionrpc/__init__.py b/core/transmissionrpc/__init__.py index 7cc02cfd..c0ced381 100644 --- a/core/transmissionrpc/__init__.py +++ b/core/transmissionrpc/__init__.py @@ -10,9 +10,9 @@ from core.transmissionrpc.session import Session from core.transmissionrpc.client import Client from core.transmissionrpc.utils import add_stdout_logger, add_file_logger -__author__ = 'Erik Svensson ' -__version_major__ = 0 -__version_minor__ = 11 -__version__ = '{0}.{1}'.format(__version_major__, __version_minor__) -__copyright__ = 'Copyright (c) 2008-2013 Erik Svensson' -__license__ = 'MIT' +__author__ = 'Erik Svensson ' +__version_major__ = 0 +__version_minor__ = 11 +__version__ = '{0}.{1}'.format(__version_major__, __version_minor__) +__copyright__ = 'Copyright (c) 2008-2013 Erik Svensson' +__license__ = 'MIT' diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index 803b59ae..461be0ad 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -18,7 +18,6 @@ from core.transmissionrpc.torrent import Torrent from core.transmissionrpc.session import Session from six import PY3, integer_types, string_types, iteritems - if PY3: from urllib.parse import urlparse from urllib.request import urlopen @@ -26,6 +25,7 @@ else: from urlparse import urlparse from urllib2 import urlopen + def debug_httperror(error): """ Log the Transmission RPC HTTP error. @@ -49,6 +49,7 @@ def debug_httperror(error): ) ) + def parse_torrent_id(arg): """Parse an torrent id or torrent hashString.""" torrent_id = None @@ -62,7 +63,7 @@ def parse_torrent_id(arg): elif isinstance(arg, string_types): try: torrent_id = int(arg) - if torrent_id >= 2**31: + if torrent_id >= 2 ** 31: torrent_id = None except (ValueError, TypeError): pass @@ -75,6 +76,7 @@ def parse_torrent_id(arg): pass return torrent_id + def parse_torrent_ids(args): """ Take things and make them valid torrent identifiers @@ -115,6 +117,7 @@ def parse_torrent_ids(args): ids = [torrent_id] return ids + """ Torrent ids @@ -129,12 +132,14 @@ possible to provide a argument called ``timeout``. Timeout is only effective when using Python 2.6 or later and the default timeout is 30 seconds. """ + class Client(object): """ Client is the class handling the Transmission JSON-RPC client protocol. """ - def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None): + def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, + timeout=None): if isinstance(timeout, (integer_types, float)): self._query_timeout = float(timeout) else: @@ -204,7 +209,8 @@ class Client(object): if timeout is None: timeout = self._query_timeout while True: - LOGGER.debug(json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2)) + LOGGER.debug( + json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2)) try: result = self.http_handler.request(self.url, query, headers, timeout) break @@ -244,8 +250,7 @@ class Client(object): elif require_ids: raise ValueError('request require ids') - query = json.dumps({'tag': self._sequence, 'method': method - , 'arguments': arguments}) + query = json.dumps({'tag': self._sequence, 'method': method, 'arguments': arguments}) self._sequence += 1 start = time.time() http_data = self._http_query(query, timeout) @@ -348,7 +353,7 @@ class Client(object): """ if self.rpc_version < version: LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.' - % (self.rpc_version, version)) + % (self.rpc_version, version)) def add_torrent(self, torrent, timeout=None, **kwargs): """ @@ -476,7 +481,7 @@ class Client(object): """ self._rpc_version_warning(3) self._request('torrent-remove', - {'delete-local-data':rpc_bool(delete_data)}, ids, True, timeout=timeout) + {'delete-local-data': rpc_bool(delete_data)}, ids, True, timeout=timeout) def remove(self, ids, delete_data=False, timeout=None): """ @@ -606,34 +611,34 @@ class Client(object): the new methods. list returns a dictionary indexed by torrent id. """ warnings.warn('list has been deprecated, please use get_torrent or get_torrents instead.', DeprecationWarning) - fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone' - , 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver' - , 'downloadedEver', 'uploadRatio', 'queuePosition'] + fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone', + 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver', + 'downloadedEver', 'uploadRatio', 'queuePosition'] return self._request('torrent-get', {'fields': fields}, timeout=timeout) def get_files(self, ids=None, timeout=None): """ - Get list of files for provided torrent id(s). If ids is empty, - information for all torrents are fetched. This function returns a dictionary - for each requested torrent id holding the information about the files. + Get list of files for provided torrent id(s). If ids is empty, + information for all torrents are fetched. This function returns a dictionary + for each requested torrent id holding the information about the files. - :: + :: - { - : { - : { - 'name': , - 'size': , - 'completed': , - 'priority': , - 'selected': - } + { + : { + : { + 'name': , + 'size': , + 'completed': , + 'priority': , + 'selected': + } - ... - } + ... + } - ... - } + ... + } """ fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted'] request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout) @@ -645,22 +650,22 @@ class Client(object): def set_files(self, items, timeout=None): """ Set file properties. Takes a dictionary with similar contents as the result - of `get_files`. + of `get_files`. - :: + :: - { - : { - : { - 'priority': , - 'selected': - } + { + : { + : { + 'priority': , + 'selected': + } - ... - } + ... + } - ... - } + ... + } """ if not isinstance(items, dict): raise ValueError('Invalid file description') @@ -703,8 +708,8 @@ class Client(object): def change_torrent(self, ids, timeout=None, **kwargs): """ - Change torrent parameters for the torrent(s) with the supplied id's. The - parameters are: + Change torrent parameters for the torrent(s) with the supplied id's. The + parameters are: ============================ ===== =============== ======================================================================================= Argument RPC Replaced by Description @@ -736,13 +741,13 @@ class Client(object): ``uploadLimited`` 5 - Enable upload speed limiter. ============================ ===== =============== ======================================================================================= - .. NOTE:: - transmissionrpc will try to automatically fix argument errors. + .. NOTE:: + transmissionrpc will try to automatically fix argument errors. """ args = {} for key, value in iteritems(kwargs): argument = make_rpc_name(key) - (arg, val) = argument_value_convert('torrent-set' , argument, value, self.rpc_version) + (arg, val) = argument_value_convert('torrent-set', argument, value, self.rpc_version) args[arg] = val if len(args) > 0: @@ -814,7 +819,7 @@ class Client(object): """Move transfer to the bottom of the queue.""" self._rpc_version_warning(14) self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout) - + def queue_up(self, ids, timeout=None): """Move transfer up in the queue.""" self._rpc_version_warning(14) @@ -888,14 +893,14 @@ class Client(object): ================================ ===== ================= ========================================================================================================================== .. NOTE:: - transmissionrpc will try to automatically fix argument errors. + transmissionrpc will try to automatically fix argument errors. """ args = {} for key, value in iteritems(kwargs): if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']: raise ValueError('Invalid encryption value') argument = make_rpc_name(key) - (arg, val) = argument_value_convert('session-set' , argument, value, self.rpc_version) + (arg, val) = argument_value_convert('session-set', argument, value, self.rpc_version) args[arg] = val if len(args) > 0: self._request('session-set', args, timeout=timeout) diff --git a/core/transmissionrpc/constants.py b/core/transmissionrpc/constants.py index 5237fac0..78e61dd5 100644 --- a/core/transmissionrpc/constants.py +++ b/core/transmissionrpc/constants.py @@ -6,10 +6,10 @@ import logging from core.transmissionrpc.six import iteritems - LOGGER = logging.getLogger('transmissionrpc') LOGGER.setLevel(logging.ERROR) + def mirror_dict(source): """ Creates a dictionary with all values as keys and all keys as values. @@ -17,38 +17,39 @@ def mirror_dict(source): source.update(dict((value, key) for key, value in iteritems(source))) return source + DEFAULT_PORT = 9091 DEFAULT_TIMEOUT = 30.0 -TR_PRI_LOW = -1 -TR_PRI_NORMAL = 0 -TR_PRI_HIGH = 1 +TR_PRI_LOW = -1 +TR_PRI_NORMAL = 0 +TR_PRI_HIGH = 1 PRIORITY = mirror_dict({ - 'low' : TR_PRI_LOW, - 'normal' : TR_PRI_NORMAL, - 'high' : TR_PRI_HIGH + 'low': TR_PRI_LOW, + 'normal': TR_PRI_NORMAL, + 'high': TR_PRI_HIGH }) -TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings -TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio -TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio +TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings +TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio +TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio RATIO_LIMIT = mirror_dict({ - 'global' : TR_RATIOLIMIT_GLOBAL, - 'single' : TR_RATIOLIMIT_SINGLE, - 'unlimited' : TR_RATIOLIMIT_UNLIMITED + 'global': TR_RATIOLIMIT_GLOBAL, + 'single': TR_RATIOLIMIT_SINGLE, + 'unlimited': TR_RATIOLIMIT_UNLIMITED }) -TR_IDLELIMIT_GLOBAL = 0 # follow the global settings -TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time -TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity +TR_IDLELIMIT_GLOBAL = 0 # follow the global settings +TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time +TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity IDLE_LIMIT = mirror_dict({ - 'global' : TR_RATIOLIMIT_GLOBAL, - 'single' : TR_RATIOLIMIT_SINGLE, - 'unlimited' : TR_RATIOLIMIT_UNLIMITED + 'global': TR_RATIOLIMIT_GLOBAL, + 'single': TR_RATIOLIMIT_SINGLE, + 'unlimited': TR_RATIOLIMIT_UNLIMITED }) # A note on argument maps @@ -62,236 +63,266 @@ IDLE_LIMIT = mirror_dict({ # Arguments for torrent methods TORRENT_ARGS = { - 'get' : { - 'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'), - 'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'), - 'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'), - 'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'), - 'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'), - 'comment': ('string', 1, None, None, None, 'Torrent comment.'), - 'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'), - 'creator': ('string', 1, None, None, None, 'Torrent creator.'), - 'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'), - 'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'), - 'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'), - 'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'), - 'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'), - 'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'), - 'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'), - 'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'), - 'downloadLimitMode': ('number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'), - 'error': ('number', 1, None, None, None, 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'), - 'errorString': ('number', 1, None, None, None, 'Error message.'), - 'eta': ('number', 1, None, None, None, 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'), - 'etaIdle': ('number', 15, None, None, None, 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'), - 'files': ('array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'), - 'fileStats': ('array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'), - 'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'), - 'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'), - 'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'), - 'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'), - 'id': ('number', 1, None, None, None, 'Session unique torrent id.'), - 'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'), - 'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'), - 'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'), - 'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'), - 'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'), - 'leechers': ('number', 1, 7, None, None, 'Number of leechers.'), - 'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'), - 'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'), - 'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'), - 'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'), - 'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'), - 'name': ('string', 1, None, None, None, 'Torrent name.'), - 'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'), - 'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'), - 'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'), - 'peers': ('array', 2, None, None, None, 'Array of peer objects.'), - 'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'), - 'peersFrom': ('object', 1, None, None, None, 'Object containing download peers counts for different peer types.'), - 'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'), - 'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'), - 'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'), - 'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'), - 'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'), - 'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'), - 'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'), - 'priorities': ('array', 1, None, None, None, 'Array of file priorities.'), - 'queuePosition': ('number', 14, None, None, None, 'The queue position.'), - 'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'), - 'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'), - 'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'), - 'secondsDownloading': ('number', 15, None, None, None, ''), - 'secondsSeeding': ('number', 15, None, None, None, ''), - 'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'), - 'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'), - 'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'), - 'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'), - 'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), - 'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'), - 'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), - 'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'), - 'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'), - 'status': ('number', 1, None, None, None, 'Current status, see source'), - 'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'), - 'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'), - 'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'), - 'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'), - 'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'), - 'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'), - 'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'), - 'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'), - 'uploadLimitMode': ('number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'), - 'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'), - 'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'), - 'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'), - 'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'), - 'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'), + 'get': { + 'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'), + 'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'), + 'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'), + 'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'), + 'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'), + 'comment': ('string', 1, None, None, None, 'Torrent comment.'), + 'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'), + 'creator': ('string', 1, None, None, None, 'Torrent creator.'), + 'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'), + 'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'), + 'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'), + 'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'), + 'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'), + 'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'), + 'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'), + 'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'), + 'downloadLimitMode': ( + 'number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'), + 'error': ('number', 1, None, None, None, + 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'), + 'errorString': ('number', 1, None, None, None, 'Error message.'), + 'eta': ('number', 1, None, None, None, + 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'), + 'etaIdle': ('number', 15, None, None, None, + 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'), + 'files': ( + 'array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'), + 'fileStats': ( + 'array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'), + 'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'), + 'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'), + 'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'), + 'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'), + 'id': ('number', 1, None, None, None, 'Session unique torrent id.'), + 'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'), + 'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'), + 'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'), + 'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'), + 'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'), + 'leechers': ('number', 1, 7, None, None, 'Number of leechers.'), + 'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'), + 'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'), + 'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'), + 'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'), + 'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'), + 'name': ('string', 1, None, None, None, 'Torrent name.'), + 'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'), + 'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'), + 'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'), + 'peers': ('array', 2, None, None, None, 'Array of peer objects.'), + 'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'), + 'peersFrom': ( + 'object', 1, None, None, None, 'Object containing download peers counts for different peer types.'), + 'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'), + 'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'), + 'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'), + 'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'), + 'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'), + 'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'), + 'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'), + 'priorities': ('array', 1, None, None, None, 'Array of file priorities.'), + 'queuePosition': ('number', 14, None, None, None, 'The queue position.'), + 'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'), + 'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'), + 'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'), + 'secondsDownloading': ('number', 15, None, None, None, ''), + 'secondsSeeding': ('number', 15, None, None, None, ''), + 'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'), + 'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'), + 'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'), + 'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'), + 'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), + 'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'), + 'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), + 'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'), + 'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'), + 'status': ('number', 1, None, None, None, 'Current status, see source'), + 'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'), + 'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'), + 'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'), + 'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'), + 'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'), + 'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'), + 'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'), + 'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'), + 'uploadLimitMode': ( + 'number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'), + 'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'), + 'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'), + 'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'), + 'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'), + 'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'), }, 'set': { - 'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'), - 'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'), - 'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'), - 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), - 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), - 'honorsSessionLimits': ('boolean', 5, None, None, None, "Enables or disables the transfer to honour the upload limit set in the session."), - 'location': ('array', 1, None, None, None, 'Local download location.'), - 'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'), - 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), - 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), - 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."), - 'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'), - 'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), - 'seedIdleMode': ('number', 10, None, None, None, 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), - 'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'), - 'seedRatioMode': ('number', 5, None, None, None, 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), - 'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'), - 'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'), - 'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'), - 'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'), - 'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'), - 'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'), - 'trackerReplace': ('array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'), - 'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'), - 'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'), + 'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'), + 'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'), + 'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'), + 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), + 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), + 'honorsSessionLimits': ('boolean', 5, None, None, None, + "Enables or disables the transfer to honour the upload limit set in the session."), + 'location': ('array', 1, None, None, None, 'Local download location.'), + 'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'), + 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), + 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), + 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."), + 'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'), + 'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), + 'seedIdleMode': ('number', 10, None, None, None, + 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), + 'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'), + 'seedRatioMode': ('number', 5, None, None, None, + 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), + 'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'), + 'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'), + 'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'), + 'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'), + 'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'), + 'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'), + 'trackerReplace': ( + 'array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'), + 'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'), + 'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'), }, 'add': { - 'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'), - 'download-dir': ('string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'), - 'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'), - 'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."), - 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), - 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), - 'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'), - 'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'), - 'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'), - 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), - 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."), - 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), + 'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'), + 'download-dir': ( + 'string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'), + 'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'), + 'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."), + 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), + 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), + 'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'), + 'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'), + 'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'), + 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), + 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."), + 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), } } # Arguments for session methods SESSION_ARGS = { 'get': { - "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), - "alt-speed-enabled": ('boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'), - "alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), - "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'), - "alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), - "alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'), - "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'), - "blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'), - "blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'), - "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), - "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), - "config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'), - "dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'), - "download-dir": ('string', 1, None, None, None, 'The download directory.'), - "download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'), - "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), - "download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'), - "encryption": ('string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), - "idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), - "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'), - "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'), - "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'), - "lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'), - "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), - "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), - "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), - "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'), - "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'), - "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), - "peer-port": ('number', 5, None, 'port', None, 'Peer port.'), - "peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), - "port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'), - "queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), - "queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'), - "rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'), - "rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'), - "rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'), - "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'), - "script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), - "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), - "seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'), - "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), - "seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'), - "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), - "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'), - "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), - "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'), - "start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'), - "trash-original-torrent-files": ('boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'), - 'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'), - 'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'), - "version": ('string', 3, None, None, None, 'Transmission version.'), + "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), + "alt-speed-enabled": ( + 'boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'), + "alt-speed-time-begin": ( + 'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), + "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'), + "alt-speed-time-end": ( + 'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), + "alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'), + "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'), + "blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'), + "blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'), + "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), + "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), + "config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'), + "dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'), + "download-dir": ('string', 1, None, None, None, 'The download directory.'), + "download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'), + "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), + "download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'), + "encryption": ( + 'string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), + "idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), + "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'), + "incomplete-dir": ( + 'string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'), + "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'), + "lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'), + "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), + "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), + "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), + "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'), + "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'), + "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), + "peer-port": ('number', 5, None, 'port', None, 'Peer port.'), + "peer-port-random-on-start": ( + 'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), + "port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'), + "queue-stalled-minutes": ( + 'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), + "queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'), + "rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'), + "rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'), + "rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'), + "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'), + "script-torrent-done-filename": ( + 'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), + "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), + "seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'), + "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), + "seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'), + "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), + "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'), + "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), + "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'), + "start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'), + "trash-original-torrent-files": ( + 'boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'), + 'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'), + 'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'), + "version": ('string', 3, None, None, None, 'Transmission version.'), }, 'set': { - "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), - "alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'), - "alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), - "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'), - "alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), - "alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'), - "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'), - "blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'), - "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), - "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), - "dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'), - "download-dir": ('string', 1, None, None, None, 'Set the session download directory.'), - "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), - "download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'), - "encryption": ('string', 1, None, None, None, 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), - "idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'), - "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'), - "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'), - "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'), - "lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'), - "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), - "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), - "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), - "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'), - "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'), - "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), - "peer-port": ('number', 5, None, 'port', None, 'Peer port.'), - "peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), - "port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'), - "rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'), - "queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), - "queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'), - "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'), - "script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), - "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), - "seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'), - "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), - "seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'), - "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), - "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'), - "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), - "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'), - "start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'), - "trash-original-torrent-files": ('boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'), - 'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'), + "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), + "alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'), + "alt-speed-time-begin": ( + 'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), + "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'), + "alt-speed-time-end": ( + 'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), + "alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'), + "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'), + "blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'), + "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), + "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), + "dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'), + "download-dir": ('string', 1, None, None, None, 'Set the session download directory.'), + "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), + "download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'), + "encryption": ('string', 1, None, None, None, + 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), + "idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'), + "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'), + "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'), + "incomplete-dir-enabled": ('boolean', 7, None, None, None, + 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'), + "lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'), + "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), + "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), + "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), + "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'), + "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'), + "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), + "peer-port": ('number', 5, None, 'port', None, 'Peer port.'), + "peer-port-random-on-start": ( + 'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), + "port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'), + "rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'), + "queue-stalled-minutes": ( + 'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), + "queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'), + "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'), + "script-torrent-done-filename": ( + 'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), + "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), + "seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'), + "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), + "seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'), + "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), + "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'), + "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), + "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'), + "start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'), + "trash-original-torrent-files": ( + 'boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'), + 'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'), }, } diff --git a/core/transmissionrpc/error.py b/core/transmissionrpc/error.py index fed65a43..6b44bf32 100644 --- a/core/transmissionrpc/error.py +++ b/core/transmissionrpc/error.py @@ -4,11 +4,13 @@ from core.transmissionrpc.six import string_types, integer_types + class TransmissionError(Exception): """ - This exception is raised when there has occurred an error related to - communication with Transmission. It is a subclass of Exception. + This exception is raised when there has occurred an error related to + communication with Transmission. It is a subclass of Exception. """ + def __init__(self, message='', original=None): Exception.__init__(self) self.message = message @@ -21,11 +23,13 @@ class TransmissionError(Exception): else: return self.message + class HTTPHandlerError(Exception): """ - This exception is raised when there has occurred an error related to - the HTTP handler. It is a subclass of Exception. + This exception is raised when there has occurred an error related to + the HTTP handler. It is a subclass of Exception. """ + def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None): Exception.__init__(self) self.url = '' diff --git a/core/transmissionrpc/httphandler.py b/core/transmissionrpc/httphandler.py index 0904206f..1e884399 100644 --- a/core/transmissionrpc/httphandler.py +++ b/core/transmissionrpc/httphandler.py @@ -18,10 +18,12 @@ else: from urllib2 import HTTPError, URLError from httplib import BadStatusLine + class HTTPHandler(object): """ Prototype for HTTP handling. """ + def set_authentication(self, uri, login, password): """ Transmission use basic authentication in earlier versions and digest @@ -44,10 +46,12 @@ class HTTPHandler(object): """ raise NotImplementedError("Bad HTTPHandler, failed to implement request.") + class DefaultHTTPHandler(HTTPHandler): """ The default HTTP handler provided with transmissionrpc. """ + def __init__(self): HTTPHandler.__init__(self) self.http_opener = build_opener() diff --git a/core/transmissionrpc/session.py b/core/transmissionrpc/session.py index bd2c4e2e..6b620373 100644 --- a/core/transmissionrpc/session.py +++ b/core/transmissionrpc/session.py @@ -6,6 +6,7 @@ from core.transmissionrpc.utils import Field from core.transmissionrpc.six import iteritems, integer_types + class Session(object): """ Session is a class holding the session data for a Transmission daemon. diff --git a/core/transmissionrpc/six.py b/core/transmissionrpc/six.py index b73b777a..0554cddc 100644 --- a/core/transmissionrpc/six.py +++ b/core/transmissionrpc/six.py @@ -28,7 +28,6 @@ import types __author__ = "Benjamin Peterson " __version__ = "1.4.1" - # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 @@ -56,6 +55,8 @@ else: class X(object): def __len__(self): return 1 << 31 + + try: len(X()) except OverflowError: @@ -79,7 +80,6 @@ def _import_module(name): class _LazyDescr(object): - def __init__(self, name): self.name = name @@ -92,7 +92,6 @@ class _LazyDescr(object): class MovedModule(_LazyDescr): - def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: @@ -107,7 +106,6 @@ class MovedModule(_LazyDescr): class MovedAttribute(_LazyDescr): - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: @@ -131,7 +129,6 @@ class MovedAttribute(_LazyDescr): return getattr(module, self.attr) - class _MovedItems(types.ModuleType): """Lazy loading of moved objects""" @@ -199,7 +196,6 @@ del attr moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") - class Module_six_moves_urllib_parse(types.ModuleType): """Lazy loading of moved objects in six.moves.urllib_parse""" @@ -320,8 +316,10 @@ for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser") -sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser") +sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser( + __name__ + ".moves.urllib_robotparser") +sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser( + __name__ + ".moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): @@ -379,7 +377,6 @@ else: _iteritems = "iteritems" _iterlists = "iterlists" - try: advance_iterator = next except NameError: @@ -387,18 +384,17 @@ except NameError: return it.next() next = advance_iterator - try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - if PY3: def get_unbound_function(unbound): return unbound + create_bound_method = types.MethodType Iterator = object @@ -406,19 +402,21 @@ else: def get_unbound_function(unbound): return unbound.im_func + def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) + class Iterator(object): def next(self): return type(self).__next__(self) + callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") - get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) @@ -431,14 +429,17 @@ def iterkeys(d, **kw): """Return an iterator over the keys of a dictionary.""" return iter(getattr(d, _iterkeys)(**kw)) + def itervalues(d, **kw): """Return an iterator over the values of a dictionary.""" return iter(getattr(d, _itervalues)(**kw)) + def iteritems(d, **kw): """Return an iterator over the (key, value) pairs of a dictionary.""" return iter(getattr(d, _iteritems)(**kw)) + def iterlists(d, **kw): """Return an iterator over the (key, [values]) pairs of a dictionary.""" return iter(getattr(d, _iterlists)(**kw)) @@ -447,8 +448,12 @@ def iterlists(d, **kw): if PY3: def b(s): return s.encode("latin-1") + + def u(s): return s + + unichr = chr if sys.version_info[1] <= 1: def int2byte(i): @@ -460,29 +465,43 @@ if PY3: indexbytes = operator.getitem iterbytes = iter import io + StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s + + def u(s): return unicode(s, "unicode_escape") + + unichr = unichr int2byte = chr + + def byte2int(bs): return ord(bs[0]) + + def indexbytes(buf, i): return ord(buf[i]) + + def iterbytes(buf): return (ord(byte) for byte in buf) + + import StringIO + StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") - if PY3: import builtins + exec_ = getattr(builtins, "exec") @@ -506,7 +525,7 @@ else: del frame elif _locs_ is None: _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") + exec ("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): @@ -519,10 +538,12 @@ else: fp = kwargs.pop("file", sys.stdout) if fp is None: return + def write(data): if not isinstance(data, basestring): data = str(data) fp.write(data) + want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: @@ -566,8 +587,10 @@ def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" return meta("NewBase", bases, {}) + def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): orig_vars = cls.__dict__.copy() orig_vars.pop('__dict__', None) @@ -575,4 +598,5 @@ def add_metaclass(metaclass): for slots_var in orig_vars.get('__slots__', ()): orig_vars.pop(slots_var) return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper diff --git a/core/transmissionrpc/torrent.py b/core/transmissionrpc/torrent.py index eaf7a52d..a0813464 100644 --- a/core/transmissionrpc/torrent.py +++ b/core/transmissionrpc/torrent.py @@ -13,14 +13,15 @@ from six import integer_types, string_types, text_type, iteritems def get_status_old(code): """Get the torrent status using old status codes""" mapping = { - (1<<0): 'check pending', - (1<<1): 'checking', - (1<<2): 'downloading', - (1<<3): 'seeding', - (1<<4): 'stopped', + (1 << 0): 'check pending', + (1 << 1): 'checking', + (1 << 2): 'downloading', + (1 << 3): 'seeding', + (1 << 4): 'stopped', } return mapping[code] + def get_status_new(code): """Get the torrent status using new status codes""" mapping = { @@ -34,6 +35,7 @@ def get_status_new(code): } return mapping[code] + class Torrent(object): """ Torrent is a class holding the data received from Transmission regarding a bittorrent transfer. @@ -99,8 +101,9 @@ class Torrent(object): def _dirty_fields(self): """Enumerate changed fields""" - outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition' - , 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', 'uploadLimited'] + outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition', + 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', + 'uploadLimited'] fields = [] for key in outgoing_keys: if key in self._fields and self._fields[key].dirty: @@ -131,7 +134,7 @@ class Torrent(object): else: raise ValueError('Cannot update with supplied data') self._incoming_pending = False - + def _status(self): """Get the torrent status""" code = self._fields['status'].value @@ -270,7 +273,8 @@ class Torrent(object): else: raise ValueError("Not a valid limit") - download_limit = property(_get_download_limit, _set_download_limit, None, "Download limit in Kbps or None. This is a mutator.") + download_limit = property(_get_download_limit, _set_download_limit, None, + "Download limit in Kbps or None. This is a mutator.") def _get_peer_limit(self): """ @@ -307,7 +311,7 @@ class Torrent(object): self._push() priority = property(_get_priority, _set_priority, None - , "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.") + , "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.") def _get_seed_idle_limit(self): """ @@ -326,7 +330,7 @@ class Torrent(object): raise ValueError("Not a valid limit") seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None - , "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.") + , "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.") def _get_seed_idle_mode(self): """ @@ -345,7 +349,7 @@ class Torrent(object): raise ValueError("Not a valid limit") seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None, - """ + """ Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'. * global, use session seed idle limit. @@ -354,7 +358,7 @@ class Torrent(object): This is a mutator. """ - ) + ) def _get_seed_ratio_limit(self): """ @@ -373,7 +377,7 @@ class Torrent(object): raise ValueError("Not a valid limit") seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None - , "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.") + , "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.") def _get_seed_ratio_mode(self): """ @@ -392,7 +396,7 @@ class Torrent(object): raise ValueError("Not a valid limit") seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None, - """ + """ Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. * global, use session seed ratio limit. @@ -401,7 +405,7 @@ class Torrent(object): This is a mutator. """ - ) + ) def _get_upload_limit(self): """ @@ -428,7 +432,8 @@ class Torrent(object): else: raise ValueError("Not a valid limit") - upload_limit = property(_get_upload_limit, _set_upload_limit, None, "Upload limit in Kbps or None. This is a mutator.") + upload_limit = property(_get_upload_limit, _set_upload_limit, None, + "Upload limit in Kbps or None. This is a mutator.") def _get_queue_position(self): """Get the queue position for this torrent.""" diff --git a/core/transmissionrpc/utils.py b/core/transmissionrpc/utils.py index d67a3d06..223921d6 100644 --- a/core/transmissionrpc/utils.py +++ b/core/transmissionrpc/utils.py @@ -10,6 +10,7 @@ from six import string_types, iteritems UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + def format_size(size): """ Format byte size into IEC prefixes, B, KiB, MiB ... @@ -21,6 +22,7 @@ def format_size(size): size /= 1024.0 return (size, UNITS[i]) + def format_speed(size): """ Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ... @@ -28,6 +30,7 @@ def format_speed(size): (size, unit) = format_size(size) return (size, unit + '/s') + def format_timedelta(delta): """ Format datetime.timedelta into ::. @@ -36,6 +39,7 @@ def format_timedelta(delta): hours, minutes = divmod(minutes, 60) return '%d %02d:%02d:%02d' % (delta.days, hours, minutes, seconds) + def format_timestamp(timestamp, utc=False): """ Format unix timestamp into ISO date format. @@ -49,12 +53,14 @@ def format_timestamp(timestamp, utc=False): else: return '-' + class INetAddressError(Exception): """ Error parsing / generating a internet address. """ pass + def inet_address(address, default_port, default_address='localhost'): """ Parse internet address. @@ -84,6 +90,7 @@ def inet_address(address, default_port, default_address='localhost'): raise INetAddressError('Cannot look up address "%s".' % address) return (addr, port) + def rpc_bool(arg): """ Convert between Python boolean and Transmission RPC boolean. @@ -95,27 +102,31 @@ def rpc_bool(arg): arg = arg.lower() in ['true', 'yes'] return 1 if bool(arg) else 0 + TR_TYPE_MAP = { - 'number' : int, - 'string' : str, + 'number': int, + 'string': str, 'double': float, - 'boolean' : rpc_bool, + 'boolean': rpc_bool, 'array': list, 'object': dict } + def make_python_name(name): """ Convert Transmission RPC name to python compatible name. """ return name.replace('-', '_') + def make_rpc_name(name): """ Convert python compatible name to Transmission RPC name. """ return name.replace('_', '-') + def argument_value_convert(method, argument, value, rpc_version): """ Check and fix Transmission RPC issues with regards to methods, arguments and values. @@ -154,6 +165,7 @@ def argument_value_convert(method, argument, value, rpc_version): raise ValueError('Argument "%s" does not exists for method "%s".', (argument, method)) + def get_arguments(method, rpc_version): """ Get arguments for method in specified Transmission RPC version. @@ -175,6 +187,7 @@ def get_arguments(method, rpc_version): accessible.append(argument) return accessible + def add_stdout_logger(level='debug'): """ Add a stdout target for the transmissionrpc logging. @@ -189,6 +202,7 @@ def add_stdout_logger(level='debug'): loghandler.setLevel(loglevel) trpc_logger.addHandler(loghandler) + def add_file_logger(filepath, level='debug'): """ Add a stdout target for the transmissionrpc logging. @@ -203,4 +217,5 @@ def add_file_logger(filepath, level='debug'): loghandler.setLevel(loglevel) trpc_logger.addHandler(loghandler) + Field = namedtuple('Field', ['value', 'dirty']) diff --git a/core/utorrent/__init__.py b/core/utorrent/__init__.py index bf893c06..9bad5790 100644 --- a/core/utorrent/__init__.py +++ b/core/utorrent/__init__.py @@ -1 +1 @@ -# coding=utf-8 \ No newline at end of file +# coding=utf-8 diff --git a/core/utorrent/client.py b/core/utorrent/client.py index 2f8e385e..ae5d4634 100644 --- a/core/utorrent/client.py +++ b/core/utorrent/client.py @@ -1,29 +1,31 @@ -#coding=utf8 +# coding=utf8 + import urllib import urllib2 import urlparse import cookielib import re import StringIO + try: - import json + import json except ImportError: import simplejson as json from core.utorrent.upload import MultiPartForm -class UTorrentClient(object): +class UTorrentClient(object): def __init__(self, base_url, username, password): self.base_url = base_url self.username = username self.password = password self.opener = self._make_opener('uTorrent', base_url, username, password) self.token = self._get_token() - #TODO refresh token, when necessary + # TODO refresh token, when necessary def _make_opener(self, realm, base_url, username, password): - '''uTorrent API need HTTP Basic Auth and cookie support for token verify.''' + """uTorrent API need HTTP Basic Auth and cookie support for token verify.""" auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(realm=realm, @@ -31,7 +33,7 @@ class UTorrentClient(object): user=username, passwd=password) opener = urllib2.build_opener(auth_handler) - urllib2.install_opener(opener) + urllib2.install_opener(opener) cookie_jar = cookielib.CookieJar() cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar) @@ -47,69 +49,68 @@ class UTorrentClient(object): match = re.search(token_re, response.read()) return match.group(1) - def list(self, **kwargs): params = [('list', '1')] params += kwargs.items() return self._action(params) def start(self, *hashes): - params = [('action', 'start'),] + params = [('action', 'start'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def stop(self, *hashes): - params = [('action', 'stop'),] + params = [('action', 'stop'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def pause(self, *hashes): - params = [('action', 'pause'),] + params = [('action', 'pause'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def forcestart(self, *hashes): - params = [('action', 'forcestart'),] + params = [('action', 'forcestart'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def remove(self, *hashes): - params = [('action', 'remove'),] + params = [('action', 'remove'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def removedata(self, *hashes): - params = [('action', 'removedata'),] + params = [('action', 'removedata'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def recheck(self, *hashes): - params = [('action', 'recheck'),] + params = [('action', 'recheck'), ] for hash in hashes: params.append(('hash', hash)) return self._action(params) - + def getfiles(self, hash): params = [('action', 'getfiles'), ('hash', hash)] return self._action(params) - + def getprops(self, hash): params = [('action', 'getprops'), ('hash', hash)] return self._action(params) - + def setprio(self, hash, priority, *files): params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))] for file_index in files: params.append(('f', str(file_index))) return self._action(params) - + def addfile(self, filename, filepath=None, bytes=None): params = [('action', 'add-file')] @@ -118,13 +119,13 @@ class UTorrentClient(object): file_handler = open(filepath) else: file_handler = StringIO.StringIO(bytes) - + form.add_file('torrent_file', filename.encode('utf-8'), file_handler) return self._action(params, str(form), form.get_content_type()) def _action(self, params, body=None, content_type=None): - #about token, see https://github.com/bittorrent/webui/wiki/TokenSystem + # about token, see https://github.com/bittorrent/webui/wiki/TokenSystem url = self.base_url + '?token=' + self.token + '&' + urllib.urlencode(params) request = urllib2.Request(url) @@ -137,6 +138,5 @@ class UTorrentClient(object): try: response = self.opener.open(request) return response.code, json.loads(response.read()) - except urllib2.HTTPError,e: - raise - + except urllib2.HTTPError, e: + raise diff --git a/core/utorrent/upload.py b/core/utorrent/upload.py index 8a72306a..de149efc 100644 --- a/core/utorrent/upload.py +++ b/core/utorrent/upload.py @@ -1,5 +1,5 @@ # coding=utf-8 -#code copied from http://www.doughellmann.com/PyMOTW/urllib2/ +# code copied from http://www.doughellmann.com/PyMOTW/urllib2/ import itertools import mimetools @@ -14,7 +14,7 @@ class MultiPartForm(object): self.files = [] self.boundary = mimetools.choose_boundary() return - + def get_content_type(self): return 'multipart/form-data; boundary=%s' % self.boundary @@ -30,7 +30,7 @@ class MultiPartForm(object): mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' self.files.append((fieldname, filename, mimetype, body)) return - + def __str__(self): """Return a string representing the form data, including attached files.""" # Build a list of lists, each containing "lines" of the @@ -39,29 +39,28 @@ class MultiPartForm(object): # line is separated by '\r\n'. parts = [] part_boundary = '--' + self.boundary - + # Add the form fields parts.extend( - [ part_boundary, - 'Content-Disposition: form-data; name="%s"' % name, - '', - value, - ] + [part_boundary, + 'Content-Disposition: form-data; name="%s"' % name, + '', + value, + ] for name, value in self.form_fields - ) - + ) + # Add the files to upload parts.extend( - [ part_boundary, - 'Content-Disposition: file; name="%s"; filename="%s"' % \ - (field_name, filename), - 'Content-Type: %s' % content_type, - '', - body, - ] + [part_boundary, + 'Content-Disposition: file; name="%s"; filename="%s"' % (field_name, filename), + 'Content-Type: %s' % content_type, + '', + body, + ] for field_name, filename, content_type, body in self.files - ) - + ) + # Flatten the list and add closing boundary marker, # then return CR+LF separated data flattened = list(itertools.chain(*parts)) diff --git a/core/versionCheck.py b/core/versionCheck.py index 773a7f25..b71f903c 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -16,6 +16,7 @@ import gh_api as github import core from core import logger + class CheckVersion(): """ Version check class meant to run as a thread object with the SB scheduler. @@ -80,6 +81,7 @@ class CheckVersion(): if self.updater.need_update(): return self.updater.update() + class UpdateManager(): def get_github_repo_user(self): return core.GIT_USER @@ -90,6 +92,7 @@ class UpdateManager(): def get_github_branch(self): return core.GIT_BRANCH + class GitUpdateManager(UpdateManager): def __init__(self): self._git_path = self._find_working_git() @@ -103,7 +106,8 @@ class GitUpdateManager(UpdateManager): self._num_commits_ahead = 0 def _git_error(self): - logger.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') + logger.debug( + 'Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') def _find_working_git(self): test_cmd = 'version' @@ -148,7 +152,8 @@ class GitUpdateManager(UpdateManager): logger.log(u"Not using: " + cur_git, logger.DEBUG) # Still haven't found a working git - logger.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') + logger.debug( + 'Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') return None @@ -279,9 +284,10 @@ class GitUpdateManager(UpdateManager): logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG) return - logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) - + u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str( - self._num_commits_ahead), logger.DEBUG) + logger.log( + u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) + + u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + + str(self._num_commits_ahead), logger.DEBUG) def set_newest_text(self): if self._num_commits_ahead: @@ -411,8 +417,9 @@ class SourceUpdateManager(UpdateManager): # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) - + u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG) + logger.log( + u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) + + u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG) def set_newest_text(self): @@ -489,9 +496,9 @@ class SourceUpdateManager(UpdateManager): old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(core.PROGRAM_DIR, dirname, curfile) - #Avoid DLL access problem on WIN32/64 - #These files needing to be updated manually - #or find a way to kill the access from memory + # Avoid DLL access problem on WIN32/64 + # These files needing to be updated manually + # or find a way to kill the access from memory if curfile in ('unrar.dll', 'unrar64.dll'): try: os.chmod(new_path, stat.S_IWRITE) @@ -519,4 +526,4 @@ class SourceUpdateManager(UpdateManager): logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) return False - return True \ No newline at end of file + return True From 92ae85251331db432a1dafdc0bd45b5cb26de1e1 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 22:16:52 -0400 Subject: [PATCH 22/82] PEP8: comparison to `None`, `True`, or `False` should use `is`/`is not` --- core/nzbToMediaDB.py | 12 ++++++------ core/nzbToMediaUtil.py | 2 +- core/synchronousdeluge/rencode.py | 2 +- core/transmissionrpc/client.py | 2 +- core/transmissionrpc/torrent.py | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index a34b8662..e2ff20f6 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -46,7 +46,7 @@ class DBConnection: return 0 def fetch(self, query, args=None): - if query == None: + if query is None: return sqlResult = None @@ -54,7 +54,7 @@ class DBConnection: while attempt < 5: try: - if args == None: + if args is None: logger.log(self.filename + ": " + query, logger.DB) cursor = self.connection.cursor() cursor.execute(query) @@ -82,7 +82,7 @@ class DBConnection: return sqlResult def mass_action(self, querylist, logTransaction=False): - if querylist == None: + if querylist is None: return sqlResult = [] @@ -123,7 +123,7 @@ class DBConnection: return sqlResult def action(self, query, args=None): - if query == None: + if query is None: return sqlResult = None @@ -131,7 +131,7 @@ class DBConnection: while attempt < 5: try: - if args == None: + if args is None: logger.log(self.filename + ": " + query, logger.DB) sqlResult = self.connection.execute(query) else: @@ -158,7 +158,7 @@ class DBConnection: sqlResults = self.action(query, args).fetchall() - if sqlResults == None: + if sqlResults is None: return [] return sqlResults diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 3899766d..c359ecda 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1110,7 +1110,7 @@ def extractFiles(src, dst=None, keep_archive=None): fullFileName = os.path.basename(inputFile) archiveName = os.path.splitext(fullFileName)[0] archiveName = re.sub(r"part[0-9]+", "", archiveName) - if not archiveName in extracted_archive or keep_archive == True: + if not archiveName in extracted_archive or keep_archive is True: continue # don't remove if we haven't extracted this archive, or if we want to preserve them. logger.info("Removing extracted archive %s from folder %s ..." % (fullFileName, folder)) try: diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index 655f903b..62e22b08 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -461,7 +461,7 @@ def test(): assert loads(dumps(L)) == L L = tuple(['a' * n for n in range(1000)]) + (None, True, None) assert loads(dumps(L)) == L - assert loads(dumps(None)) == None + assert loads(dumps(None)) is None assert loads(dumps({None: None})) == {None: None} assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6 assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6 diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index 461be0ad..c451e685 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -111,7 +111,7 @@ def parse_torrent_ids(args): ids.extend(parse_torrent_ids(item)) else: torrent_id = parse_torrent_id(args) - if torrent_id == None: + if torrent_id is None: raise ValueError('Invalid torrent id') else: ids = [torrent_id] diff --git a/core/transmissionrpc/torrent.py b/core/transmissionrpc/torrent.py index a0813464..5fd033db 100644 --- a/core/transmissionrpc/torrent.py +++ b/core/transmissionrpc/torrent.py @@ -267,7 +267,7 @@ class Torrent(object): self._fields['downloadLimited'] = Field(True, True) self._fields['downloadLimit'] = Field(limit, True) self._push() - elif limit == None: + elif limit is None: self._fields['downloadLimited'] = Field(False, True) self._push() else: @@ -426,7 +426,7 @@ class Torrent(object): self._fields['uploadLimited'] = Field(True, True) self._fields['uploadLimit'] = Field(limit, True) self._push() - elif limit == None: + elif limit is None: self._fields['uploadLimited'] = Field(False, True) self._push() else: From 1fd904eb5bb738a237c5552af5dba83c006f6c01 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 22:20:45 -0400 Subject: [PATCH 23/82] PEP8: Tests for membership should use `in`/`not in` * .has_key() is deprecated, use `in` --- core/__init__.py | 12 ++++----- core/logger.py | 2 +- core/nzbToMediaConfig.py | 46 +++++++++++++++++------------------ core/nzbToMediaUtil.py | 8 +++--- core/transcoder/transcoder.py | 6 ++--- 5 files changed, 37 insertions(+), 37 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index 21864078..12c413f1 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -227,7 +227,7 @@ def initialize(section=None): if __INITIALIZED__: return False - if os.environ.has_key('NTM_LOGFILE'): + if 'NTM_LOGFILE' in os.environ: LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] @@ -259,7 +259,7 @@ def initialize(section=None): except: print 'Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable' print 'or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.' - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: sys.exit(NZBGET_POSTPROCESS_ERROR) else: sys.exit(1) @@ -270,13 +270,13 @@ def initialize(section=None): # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not config.migrate(): logger.error("Unable to migrate config file %s, exiting ..." % (CONFIG_FILE)) - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: pass # We will try and read config from Environment. else: sys.exit(-1) # run migrate to convert NzbGet data from old cfg style to new cfg style - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: CFG = config.addnzbget() else: # load newly migrated config @@ -441,9 +441,9 @@ def initialize(section=None): GENERALOPTS = GENERALOPTS.split(',') if GENERALOPTS == ['']: GENERALOPTS = [] - if not '-fflags' in GENERALOPTS: + if '-fflags' not in GENERALOPTS: GENERALOPTS.append('-fflags') - if not '+genpts' in GENERALOPTS: + if '+genpts' not in GENERALOPTS: GENERALOPTS.append('+genpts') try: OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"]) diff --git a/core/logger.py b/core/logger.py index 324248d4..b3800a98 100644 --- a/core/logger.py +++ b/core/logger.py @@ -228,7 +228,7 @@ class NTMRotatingLogHandler(object): def log_error_and_exit(self, error_msg): log(error_msg, ERROR) - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: sys.exit(core.NZBGET_POSTPROCESS_ERROR) elif not self.console_logging: sys.exit(error_msg.encode(core.SYS_ENCODING, 'xmlcharrefreplace')) diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index e0bb8172..75d8ad19 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -186,7 +186,7 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW['Posix'][option] = value values.pop(option) if option == "remote_path": - if value and not value in ['0', '1', 0, 1]: + if value and value not in ['0', '1', 0, 1]: value = 1 elif not value: value = 0 @@ -251,14 +251,14 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW = config() try: - if os.environ.has_key('NZBPO_NDCATEGORY') and os.environ.has_key('NZBPO_SBCATEGORY'): + if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: logger.warning("%s category is set for SickBeard and NzbDrone. " "Please check your config in NZBGet" % (os.environ['NZBPO_NDCATEGORY'])) section = "Nzb" key = 'NZBOP_DESTDIR' - if os.environ.has_key(key): + if key in os.environ: option = 'default_downloadDirectory' value = os.environ[key] CFG_NEW[section][option] = value @@ -268,7 +268,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['auto_update', 'check_media', 'safe_mode'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -278,7 +278,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['mount_points'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -289,10 +289,10 @@ class ConfigObj(configobj.ConfigObj, Section): 'WAIT_FOR', 'WATCH_DIR'] cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_CPS' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -306,10 +306,10 @@ class ConfigObj(configobj.ConfigObj, Section): 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_SB' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -323,10 +323,10 @@ class ConfigObj(configobj.ConfigObj, Section): envCatKey = 'NZBPO_HPCATEGORY' envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH'] cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_HP' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -340,10 +340,10 @@ class ConfigObj(configobj.ConfigObj, Section): 'REMOTE_PATH'] cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_MY' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -355,10 +355,10 @@ class ConfigObj(configobj.ConfigObj, Section): envCatKey = 'NZBPO_GZCATEGORY' envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_GZ' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -372,10 +372,10 @@ class ConfigObj(configobj.ConfigObj, Section): 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_ND' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: @@ -390,7 +390,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -400,7 +400,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['niceness', 'ionice_class', 'ionice_classdata'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -428,7 +428,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'outputAudioOtherChannels'] for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -438,7 +438,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['wake', 'host', 'port', 'mac'] for index in range(len(envKeys)): key = 'NZBPO_WOL' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] CFG_NEW[section][option] = value @@ -449,10 +449,10 @@ class ConfigObj(configobj.ConfigObj, Section): 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] - if os.environ.has_key(envCatKey): + if envCatKey in os.environ: for index in range(len(envKeys)): key = 'NZBPO_' + envKeys[index] - if os.environ.has_key(key): + if key in os.environ: option = cfgKeys[index] value = os.environ[key] if os.environ[envCatKey] not in CFG_NEW[section].sections: diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index c359ecda..9e5eeaa5 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -157,7 +157,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): tordir = True imdbid = [item for item in pathlist if '.cp(tt' in item] # This looks for the .cp(tt imdb id in the path. - if imdbid and not '.cp(tt' in inputName: + if imdbid and '.cp(tt' not in inputName: inputName = imdbid[0] # This ensures the imdb id is preserved and passed to CP tordir = True @@ -454,7 +454,7 @@ def convert_to_ascii(inputName, dirName): dirName = os.path.join(dir, base2) logger.info("Renaming directory to: %s." % (base2), 'ENCODER') os.rename(os.path.join(dir, base), dirName) - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: print "[NZB] DIRECTORY=%s" % (dirName) # Return the new directory to NZBGet. for dirname, dirnames, filenames in os.walk(dirName, topdown=False): @@ -1038,7 +1038,7 @@ def find_imdbid(dirName, inputName): imdbid = m.group(1) logger.info("Found imdbID [%s] via file name" % imdbid) return imdbid - if os.environ.has_key('NZBPR__DNZB_MOREINFO'): + if 'NZBPR__DNZB_MOREINFO' in os.environ: dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '') if dnzb_more_info != '': regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE) @@ -1110,7 +1110,7 @@ def extractFiles(src, dst=None, keep_archive=None): fullFileName = os.path.basename(inputFile) archiveName = os.path.splitext(fullFileName)[0] archiveName = re.sub(r"part[0-9]+", "", archiveName) - if not archiveName in extracted_archive or keep_archive is True: + if archiveName not in extracted_archive or keep_archive is True: continue # don't remove if we haven't extracted this archive, or if we want to preserve them. logger.info("Removing extracted archive %s from folder %s ..." % (fullFileName, folder)) try: diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 1eed54b6..d31ef25e 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -594,11 +594,11 @@ def processList(List, newDir, bitbucket): for item in List: newfile = None ext = os.path.splitext(item)[1].lower() - if ext in ['.iso', '.bin', '.img'] and not ext in core.IGNOREEXTENSIONS: + if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: %s" % (item), "TRANSCODER") newList.extend(ripISO(item, newDir, bitbucket)) remList.append(item) - elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and not '.vob' in core.IGNOREEXTENSIONS: + elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: logger.debug("Found VIDEO_TS image file: %s" % (item), "TRANSCODER") if not vtsPath: try: @@ -618,7 +618,7 @@ def processList(List, newDir, bitbucket): if combine: newList.extend(combineCD(combine)) for file in newList: - if isinstance(file, str) and not 'concat:' in file and not os.path.isfile(file): + if isinstance(file, str) and 'concat:' not in file and not os.path.isfile(file): success = False break if success and newList: From 3acaf29f1e2024e6e9d23c3f6ee22d9651dc756e Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 00:36:12 -0400 Subject: [PATCH 24/82] Update six to 1.10.0 --- core/transmissionrpc/six.py | 468 ++++++++++++++++++++++++++++-------- libs/six.py | 372 ++++++++++++++++++++++------ 2 files changed, 671 insertions(+), 169 deletions(-) diff --git a/core/transmissionrpc/six.py b/core/transmissionrpc/six.py index 0554cddc..190c0239 100644 --- a/core/transmissionrpc/six.py +++ b/core/transmissionrpc/six.py @@ -1,7 +1,6 @@ -# coding=utf-8 """Utilities for writing code that runs on Python 2 and 3""" -# Copyright (c) 2010-2013 Benjamin Peterson +# Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -21,16 +20,22 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from __future__ import absolute_import + +import functools +import itertools import operator import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.4.1" +__version__ = "1.10.0" + # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, @@ -53,10 +58,9 @@ else: else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): + def __len__(self): return 1 << 31 - - try: len(X()) except OverflowError: @@ -80,18 +84,24 @@ def _import_module(name): class _LazyDescr(object): + def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() - setattr(obj, self.name, result) - # This is a bit ugly, but it avoids running this again. - delattr(tp, self.name) + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass return result class MovedModule(_LazyDescr): + def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: @@ -104,8 +114,30 @@ class MovedModule(_LazyDescr): def _resolve(self): return _import_module(self.mod) + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + class MovedAttribute(_LazyDescr): + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: @@ -129,8 +161,75 @@ class MovedAttribute(_LazyDescr): return getattr(module, self.attr) -class _MovedItems(types.ModuleType): +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + """Lazy loading of moved objects""" + __path__ = [] # mark as package _moved_attributes = [ @@ -138,25 +237,33 @@ _moved_attributes = [ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("config", "config"), + MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), @@ -166,12 +273,14 @@ _moved_attributes = [ MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", @@ -187,21 +296,35 @@ _moved_attributes = [ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("winreg", "_winreg"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) del attr -moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") -class Module_six_moves_urllib_parse(types.ModuleType): +class Module_six_moves_urllib_parse(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), @@ -215,16 +338,27 @@ _urllib_parse_moved_attributes = [ MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse") -sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse") +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") -class Module_six_moves_urllib_error(types.ModuleType): +class Module_six_moves_urllib_error(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_error""" @@ -237,11 +371,14 @@ for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error") -sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error") +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") -class Module_six_moves_urllib_request(types.ModuleType): +class Module_six_moves_urllib_request(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_request""" @@ -278,16 +415,20 @@ _urllib_request_moved_attributes = [ MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request") -sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request") +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") -class Module_six_moves_urllib_response(types.ModuleType): +class Module_six_moves_urllib_response(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_response""" @@ -301,11 +442,14 @@ for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response") -sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response") +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") -class Module_six_moves_urllib_robotparser(types.ModuleType): +class Module_six_moves_urllib_robotparser(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_robotparser""" @@ -316,22 +460,27 @@ for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr -sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser( - __name__ + ".moves.urllib_robotparser") -sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser( - __name__ + ".moves.urllib.robotparser") +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - parse = sys.modules[__name__ + ".moves.urllib_parse"] - error = sys.modules[__name__ + ".moves.urllib_error"] - request = sys.modules[__name__ + ".moves.urllib_request"] - response = sys.modules[__name__ + ".moves.urllib_response"] - robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"] + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] -sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib") +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") def add_move(move): @@ -358,11 +507,6 @@ if PY3: _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" - - _iterkeys = "keys" - _itervalues = "values" - _iteritems = "items" - _iterlists = "lists" else: _meth_func = "im_func" _meth_self = "im_self" @@ -372,10 +516,6 @@ else: _func_defaults = "func_defaults" _func_globals = "func_globals" - _iterkeys = "iterkeys" - _itervalues = "itervalues" - _iteritems = "iteritems" - _iterlists = "iterlists" try: advance_iterator = next @@ -384,39 +524,44 @@ except NameError: return it.next() next = advance_iterator + try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + if PY3: def get_unbound_function(unbound): return unbound - create_bound_method = types.MethodType + def create_unbound_method(func, cls): + return func + Iterator = object else: def get_unbound_function(unbound): return unbound.im_func - def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) - callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") + get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) @@ -425,95 +570,121 @@ get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) -def iterkeys(d, **kw): - """Return an iterator over the keys of a dictionary.""" - return iter(getattr(d, _iterkeys)(**kw)) +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + def itervalues(d, **kw): + return iter(d.values(**kw)) -def itervalues(d, **kw): - """Return an iterator over the values of a dictionary.""" - return iter(getattr(d, _itervalues)(**kw)) + def iteritems(d, **kw): + return iter(d.items(**kw)) + def iterlists(d, **kw): + return iter(d.lists(**kw)) -def iteritems(d, **kw): - """Return an iterator over the (key, value) pairs of a dictionary.""" - return iter(getattr(d, _iteritems)(**kw)) + viewkeys = operator.methodcaller("keys") + viewvalues = operator.methodcaller("values") -def iterlists(d, **kw): - """Return an iterator over the (key, [values]) pairs of a dictionary.""" - return iter(getattr(d, _iterlists)(**kw)) + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") - def u(s): return s - - unichr = chr - if sys.version_info[1] <= 1: - def int2byte(i): - return bytes((i,)) - else: - # This is about 2x faster than the implementation above on 3.2+ - int2byte = operator.methodcaller("to_bytes", 1, "big") + import struct + int2byte = struct.Struct(">B").pack + del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io - StringIO = io.StringIO BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" else: def b(s): return s - + # Workaround for standalone backslash def u(s): - return unicode(s, "unicode_escape") - - + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr - def byte2int(bs): return ord(bs[0]) - def indexbytes(buf, i): return ord(buf[i]) - - - def iterbytes(buf): - return (ord(byte) for byte in buf) - - + iterbytes = functools.partial(itertools.imap, ord) import StringIO - StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + if PY3: - import builtins - - exec_ = getattr(builtins, "exec") - + exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): + if value is None: + value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value - - print_ = getattr(builtins, "print") - del builtins - else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" @@ -525,16 +696,32 @@ else: del frame elif _locs_ is None: _locs_ = _globs_ - exec ("""exec _code_ in _globs_, _locs_""") - + exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: def print_(*args, **kwargs): - """The new-style print function.""" + """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return @@ -542,8 +729,15 @@ else: def write(data): if not isinstance(data, basestring): data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) fp.write(data) - want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: @@ -579,24 +773,96 @@ else: write(sep) write(arg) write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() _add_doc(reraise, """Reraise an exception.""") +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" - return meta("NewBase", bases, {}) + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) - for slots_var in orig_vars.get('__slots__', ()): - orig_vars.pop(slots_var) return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/libs/six.py b/libs/six.py index ab2c7e85..190c0239 100644 --- a/libs/six.py +++ b/libs/six.py @@ -1,6 +1,6 @@ """Utilities for writing code that runs on Python 2 and 3""" -# Copyright (c) 2010-2014 Benjamin Peterson +# Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -20,17 +20,22 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from __future__ import absolute_import + +import functools +import itertools import operator import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.5.2" +__version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, @@ -53,6 +58,7 @@ else: else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): + def __len__(self): return 1 << 31 try: @@ -84,9 +90,13 @@ class _LazyDescr(object): def __get__(self, obj, tp): result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - # This is a bit ugly, but it avoids running this again. - delattr(obj.__class__, self.name) + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass return result @@ -105,14 +115,6 @@ class MovedModule(_LazyDescr): return _import_module(self.mod) def __getattr__(self, attr): - # Hack around the Django autoreloader. The reloader tries to get - # __file__ or __name__ of every module in sys.modules. This doesn't work - # well if this MovedModule is for an module that is unavailable on this - # machine (like winreg on Unix systems). Thus, we pretend __file__ and - # __name__ don't exist if the module hasn't been loaded yet. See issues - # #51 and #53. - if attr in ("__file__", "__name__") and self.mod not in sys.modules: - raise AttributeError _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) @@ -159,9 +161,75 @@ class MovedAttribute(_LazyDescr): return getattr(module, self.attr) +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + class _MovedItems(_LazyModule): + """Lazy loading of moved objects""" + __path__ = [] # mark as package _moved_attributes = [ @@ -169,26 +237,33 @@ _moved_attributes = [ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), @@ -222,25 +297,34 @@ _moved_attributes = [ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("winreg", "_winreg"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): - sys.modules[__name__ + ".moves." + attr.name] = attr + _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes -moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), @@ -254,6 +338,14 @@ _urllib_parse_moved_attributes = [ MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) @@ -261,10 +353,12 @@ del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes -sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse") +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_error""" @@ -279,10 +373,12 @@ del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes -sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error") +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_request""" @@ -327,10 +423,12 @@ del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes -sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request") +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_response""" @@ -346,10 +444,12 @@ del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes -sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response") +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_robotparser""" @@ -362,22 +462,25 @@ del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes -sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser") +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - parse = sys.modules[__name__ + ".moves.urllib_parse"] - error = sys.modules[__name__ + ".moves.urllib_error"] - request = sys.modules[__name__ + ".moves.urllib_request"] - response = sys.modules[__name__ + ".moves.urllib_response"] - robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"] + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] - -sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib") +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") def add_move(move): @@ -404,11 +507,6 @@ if PY3: _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" - - _iterkeys = "keys" - _itervalues = "values" - _iteritems = "items" - _iterlists = "lists" else: _meth_func = "im_func" _meth_self = "im_self" @@ -418,11 +516,6 @@ else: _func_defaults = "func_defaults" _func_globals = "func_globals" - _iterkeys = "iterkeys" - _itervalues = "itervalues" - _iteritems = "iteritems" - _iterlists = "iterlists" - try: advance_iterator = next @@ -445,6 +538,9 @@ if PY3: create_bound_method = types.MethodType + def create_unbound_method(func, cls): + return func + Iterator = object else: def get_unbound_function(unbound): @@ -453,6 +549,9 @@ else: def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + class Iterator(object): def next(self): @@ -471,66 +570,117 @@ get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) -def iterkeys(d, **kw): - """Return an iterator over the keys of a dictionary.""" - return iter(getattr(d, _iterkeys)(**kw)) +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) -def itervalues(d, **kw): - """Return an iterator over the values of a dictionary.""" - return iter(getattr(d, _itervalues)(**kw)) + def itervalues(d, **kw): + return iter(d.values(**kw)) -def iteritems(d, **kw): - """Return an iterator over the (key, value) pairs of a dictionary.""" - return iter(getattr(d, _iteritems)(**kw)) + def iteritems(d, **kw): + return iter(d.items(**kw)) -def iterlists(d, **kw): - """Return an iterator over the (key, [values]) pairs of a dictionary.""" - return iter(getattr(d, _iterlists)(**kw)) + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") + def u(s): return s unichr = chr - if sys.version_info[1] <= 1: - def int2byte(i): - return bytes((i,)) - else: - # This is about 2x faster than the implementation above on 3.2+ - int2byte = operator.methodcaller("to_bytes", 1, "big") + import struct + int2byte = struct.Struct(">B").pack + del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash + def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr + def byte2int(bs): return ord(bs[0]) + def indexbytes(buf, i): return ord(buf[i]) - def iterbytes(buf): - return (ord(byte) for byte in buf) + iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + if PY3: exec_ = getattr(moves.builtins, "exec") - def reraise(tp, value, tb=None): + if value is None: + value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value @@ -548,12 +698,26 @@ else: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") - exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): @@ -561,13 +725,14 @@ if print_ is None: fp = kwargs.pop("file", sys.stdout) if fp is None: return + def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): + isinstance(data, unicode) and + fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" @@ -608,25 +773,96 @@ if print_ is None: write(sep) write(arg) write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() _add_doc(reraise, """Reraise an exception.""") +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" - return meta("NewBase", bases, {}) + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper \ No newline at end of file + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) From 382d108db238f8ac8c8b63295f88b2f696295e84 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 22:10:26 -0400 Subject: [PATCH 25/82] Optimize imports * PEP8: Fix module level import not at top of file * Remove unused imports * Remove simplejson * Replace mimetools with email --- core/__init__.py | 10 ++++++---- core/databases/mainDB.py | 2 +- core/gh_api.py | 2 +- core/nzbToMediaAutoFork.py | 5 +++-- core/synchronousdeluge/__init__.py | 5 ++--- core/synchronousdeluge/rencode.py | 7 ++++--- core/transcoder/transcoder.py | 2 -- core/transmissionrpc/utils.py | 7 +++++-- core/utorrent/client.py | 6 +----- core/utorrent/upload.py | 4 ++-- 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index 12c413f1..9f9fe812 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -33,10 +33,12 @@ from core.autoProcess.autoProcessMusic import autoProcessMusic from core.autoProcess.autoProcessTV import autoProcessTV from core import logger, versionCheck, nzbToMediaDB from core.nzbToMediaConfig import config -from core.nzbToMediaUtil import category_search, sanitizeName, copy_link, parse_args, flatten, getDirs, \ - rmReadOnly, rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, \ - extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir, \ - create_torrent_class, listMediaFiles, RunningProcess +from core.nzbToMediaUtil import ( + category_search, sanitizeName, copy_link, parse_args, flatten, getDirs, + rmReadOnly, rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, + extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir, + create_torrent_class, listMediaFiles, RunningProcess, + ) from core.transcoder import transcoder from core.databases import mainDB diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index 0d27b526..e32e6dae 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -1,5 +1,5 @@ # coding=utf-8 -import core + from core import logger, nzbToMediaDB from core.nzbToMediaUtil import backupVersionedFile diff --git a/core/gh_api.py b/core/gh_api.py index 95faf10e..1db7faf7 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -1,5 +1,5 @@ # coding=utf-8 -import json + import requests diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index d1c122c1..cfb4e60c 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -1,7 +1,8 @@ # coding=utf-8 -import urllib -import core + import requests + +import core from core import logger diff --git a/core/synchronousdeluge/__init__.py b/core/synchronousdeluge/__init__.py index 6155881f..9d4d8c77 100644 --- a/core/synchronousdeluge/__init__.py +++ b/core/synchronousdeluge/__init__.py @@ -15,10 +15,9 @@ Example usage: download_location = client.core.get_config_value("download_location").get() """ +from core.synchronousdeluge.exceptions import DelugeRPCError + __title__ = "synchronous-deluge" __version__ = "0.1" __author__ = "Christian Dale" - -from core.synchronousdeluge.exceptions import DelugeRPCError - diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index 62e22b08..0d960255 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -19,6 +19,10 @@ rencode module versions, so you should check that you are using the same rencode version throughout your project. """ +import struct +from threading import Lock + + __version__ = '1.0.1' __all__ = ['dumps', 'loads'] @@ -62,9 +66,6 @@ __all__ = ['dumps', 'loads'] # (The rencode module is licensed under the above license as well). # -import struct -from threading import Lock - # Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()). DEFAULT_FLOAT_BITS = 32 diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index d31ef25e..9065c78a 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -3,8 +3,6 @@ import errno import os import platform import subprocess -import urllib2 -import traceback import core import json import shutil diff --git a/core/transmissionrpc/utils.py b/core/transmissionrpc/utils.py index 223921d6..c2bca855 100644 --- a/core/transmissionrpc/utils.py +++ b/core/transmissionrpc/utils.py @@ -2,10 +2,13 @@ # Copyright (c) 2008-2013 Erik Svensson # Licensed under the MIT license. -import socket, datetime, logging, constants +import constants +import datetime +import logging +import socket from collections import namedtuple -from constants import LOGGER +from constants import LOGGER from six import string_types, iteritems UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] diff --git a/core/utorrent/client.py b/core/utorrent/client.py index ae5d4634..f8989acf 100644 --- a/core/utorrent/client.py +++ b/core/utorrent/client.py @@ -4,14 +4,10 @@ import urllib import urllib2 import urlparse import cookielib +import json import re import StringIO -try: - import json -except ImportError: - import simplejson as json - from core.utorrent.upload import MultiPartForm diff --git a/core/utorrent/upload.py b/core/utorrent/upload.py index de149efc..ddf228cc 100644 --- a/core/utorrent/upload.py +++ b/core/utorrent/upload.py @@ -1,8 +1,8 @@ # coding=utf-8 # code copied from http://www.doughellmann.com/PyMOTW/urllib2/ +from email.generator import _make_boundary as make_boundary import itertools -import mimetools import mimetypes @@ -12,7 +12,7 @@ class MultiPartForm(object): def __init__(self): self.form_fields = [] self.files = [] - self.boundary = mimetools.choose_boundary() + self.boundary = make_boundary() return def get_content_type(self): From cf1ae938fccbc85228aa72f60737e0b40733a815 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:53:26 -0400 Subject: [PATCH 26/82] Use six to standardize imports between Python 2 and Python 3 --- core/__init__.py | 4 +++- core/transmissionrpc/client.py | 8 ++----- core/transmissionrpc/httphandler.py | 20 +++++++---------- core/utorrent/client.py | 35 ++++++++++++++++------------- 4 files changed, 33 insertions(+), 34 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index 9f9fe812..46eef2c3 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -1,4 +1,6 @@ # coding=utf-8 + +from six.moves import reload_module import locale import os import re @@ -252,7 +254,7 @@ def initialize(section=None): SYS_ENCODING = 'UTF-8' if not hasattr(sys, "setdefaultencoding"): - reload(sys) + reload_module(sys) try: # pylint: disable=E1101 diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index c451e685..b64b709d 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -18,12 +18,8 @@ from core.transmissionrpc.torrent import Torrent from core.transmissionrpc.session import Session from six import PY3, integer_types, string_types, iteritems -if PY3: - from urllib.parse import urlparse - from urllib.request import urlopen -else: - from urlparse import urlparse - from urllib2 import urlopen +from six.moves.urllib_parse import urlparse +from six.moves.urllib_request import urlopen def debug_httperror(error): diff --git a/core/transmissionrpc/httphandler.py b/core/transmissionrpc/httphandler.py index 1e884399..2968762e 100644 --- a/core/transmissionrpc/httphandler.py +++ b/core/transmissionrpc/httphandler.py @@ -4,19 +4,15 @@ import sys -from core.transmissionrpc.error import HTTPHandlerError -from six import PY3 +from six.moves.urllib_request import ( + build_opener, install_opener, + HTTPBasicAuthHandler, HTTPDigestAuthHandler, HTTPPasswordMgrWithDefaultRealm, + Request, +) +from six.moves.urllib_error import HTTPError, URLError +from six.moves.http_client import BadStatusLine -if PY3: - from urllib.request import Request, build_opener, \ - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler - from urllib.error import HTTPError, URLError - from http.client import BadStatusLine -else: - from urllib2 import Request, build_opener, \ - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler - from urllib2 import HTTPError, URLError - from httplib import BadStatusLine +from core.transmissionrpc.error import HTTPHandlerError class HTTPHandler(object): diff --git a/core/utorrent/client.py b/core/utorrent/client.py index f8989acf..f8ddc80d 100644 --- a/core/utorrent/client.py +++ b/core/utorrent/client.py @@ -1,12 +1,17 @@ # coding=utf8 -import urllib -import urllib2 -import urlparse -import cookielib import json import re -import StringIO + +from six import StringIO +from six.moves.http_cookiejar import CookieJar +from six.moves.urllib_error import HTTPError +from six.moves.urllib_parse import urljoin, urlencode +from six.moves.urllib_request import ( + build_opener, install_opener, + HTTPBasicAuthHandler, HTTPCookieProcessor, + Request, +) from core.utorrent.upload import MultiPartForm @@ -23,23 +28,23 @@ class UTorrentClient(object): def _make_opener(self, realm, base_url, username, password): """uTorrent API need HTTP Basic Auth and cookie support for token verify.""" - auth_handler = urllib2.HTTPBasicAuthHandler() + auth_handler = HTTPBasicAuthHandler() auth_handler.add_password(realm=realm, uri=base_url, user=username, passwd=password) - opener = urllib2.build_opener(auth_handler) - urllib2.install_opener(opener) + opener = build_opener(auth_handler) + install_opener(opener) - cookie_jar = cookielib.CookieJar() - cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar) + cookie_jar = CookieJar() + cookie_handler = HTTPCookieProcessor(cookie_jar) handlers = [auth_handler, cookie_handler] - opener = urllib2.build_opener(*handlers) + opener = build_opener(*handlers) return opener def _get_token(self): - url = urlparse.urljoin(self.base_url, 'token.html') + url = urljoin(self.base_url, 'token.html') response = self.opener.open(url) token_re = "" match = re.search(token_re, response.read()) @@ -122,8 +127,8 @@ class UTorrentClient(object): def _action(self, params, body=None, content_type=None): # about token, see https://github.com/bittorrent/webui/wiki/TokenSystem - url = self.base_url + '?token=' + self.token + '&' + urllib.urlencode(params) - request = urllib2.Request(url) + url = self.base_url + '?token=' + self.token + '&' + urlencode(params) + request = Request(url) if body: request.add_data(body) @@ -134,5 +139,5 @@ class UTorrentClient(object): try: response = self.opener.open(request) return response.code, json.loads(response.read()) - except urllib2.HTTPError, e: + except HTTPError: raise From d4e5809a290168448c1030bb4ccf38551bb4372d Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:24:54 -0400 Subject: [PATCH 27/82] Use print_function to standardize printing between Python 2 and Python 3 --- core/__init__.py | 9 ++++++--- core/nzbToMediaDB.py | 5 +++-- core/nzbToMediaUtil.py | 6 ++++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index 46eef2c3..6b3ca265 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -1,6 +1,7 @@ # coding=utf-8 -from six.moves import reload_module +from __future__ import print_function + import locale import os import re @@ -9,6 +10,8 @@ import sys import platform import time +from six.moves import reload_module + # init libs PROGRAM_DIR = os.path.dirname(os.path.normpath(os.path.abspath(os.path.join(__file__, os.pardir)))) LIBS_DIR = os.path.join(PROGRAM_DIR, 'libs') @@ -261,8 +264,8 @@ def initialize(section=None): # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError sys.setdefaultencoding(SYS_ENCODING) except: - print 'Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable' - print 'or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.' + print('Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable') + print('or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.') if 'NZBOP_SCRIPTDIR' in os.environ: sys.exit(NZBGET_POSTPROCESS_ERROR) else: diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index e2ff20f6..0e1af3a9 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -1,5 +1,6 @@ # coding=utf-8 -from __future__ import with_statement + +from __future__ import print_function, with_statement import re import sqlite3 @@ -228,7 +229,7 @@ def _processUpgrade(connection, upgradeClass): try: instance.execute() except sqlite3.DatabaseError, e: - print "Error in " + str(upgradeClass.__name__) + ": " + str(e) + print("Error in " + str(upgradeClass.__name__) + ": " + str(e)) raise logger.log(upgradeClass.__name__ + " upgrade completed", logger.DEBUG) else: diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 9e5eeaa5..6883b926 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1,5 +1,7 @@ # coding=utf-8 -from __future__ import unicode_literals + +from __future__ import print_function, unicode_literals + import os import re import socket @@ -455,7 +457,7 @@ def convert_to_ascii(inputName, dirName): logger.info("Renaming directory to: %s." % (base2), 'ENCODER') os.rename(os.path.join(dir, base), dirName) if 'NZBOP_SCRIPTDIR' in os.environ: - print "[NZB] DIRECTORY=%s" % (dirName) # Return the new directory to NZBGet. + print("[NZB] DIRECTORY=%s" % (dirName)) for dirname, dirnames, filenames in os.walk(dirName, topdown=False): for subdirname in dirnames: From abf63d6bbea8712d07bf1af97b5090c488635f2d Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 08:58:17 -0400 Subject: [PATCH 28/82] Use six.iteritems helper * Standardizes dict.iteritems between Python 2 and Python 3 --- core/nzbToMediaAutoFork.py | 4 +++- core/nzbToMediaConfig.py | 6 ++++-- core/transcoder/transcoder.py | 6 ++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index cfb4e60c..ce1949c5 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -2,6 +2,8 @@ import requests +from six import iteritems + import core from core import logger @@ -89,7 +91,7 @@ def autoFork(section, inputCategory): rem_params.append(param) for param in rem_params: params.pop(param) - for fork in sorted(core.FORKS.iteritems(), reverse=False): + for fork in sorted(iteritems(core.FORKS), reverse=False): if params == fork[1]: detected = True break diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index 75d8ad19..bb1202ce 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -1,4 +1,6 @@ # coding=utf-8 + +from six import iteritems import os import shutil import copy @@ -150,7 +152,7 @@ class ConfigObj(configobj.ConfigObj, Section): continue def cleanup_values(values, section): - for option, value in values.iteritems(): + for option, value in iteritems(values): if section in ['CouchPotato']: if option == ['outputDirectory']: CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] @@ -227,7 +229,7 @@ class ConfigObj(configobj.ConfigObj, Section): subsection = None if section in list(chain.from_iterable(subsections.values())): subsection = section - section = ''.join([k for k, v in subsections.iteritems() if subsection in v]) + section = ''.join([k for k, v in iteritems(subsections) if subsection in v]) process_section(section, subsection) elif section in subsections.keys(): subsection = subsections[section] diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 9065c78a..730ce77e 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -1,4 +1,6 @@ # coding=utf-8 + +from six import iteritems import errno import os import platform @@ -128,7 +130,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. core.VEXTENSION = '-transcoded' + core.VEXTENSION # adds '-transcoded.ext' else: - img, data = file.iteritems().next() + img, data = iteritems(file).next() name = data['name'] video_details, result = getVideoDetails(data['files'][0], img, bitbucket) inputFile = '-' @@ -774,7 +776,7 @@ def Transcode_directory(dirName): if isinstance(file, str): proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) else: - img, data = file.iteritems().next() + img, data = iteritems(file).next() proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE) for vob in data['files']: procin = zip_out(vob, img, bitbucket) From ec2fc8d5380f81366e9499b92afb6f5b666ca3c2 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:32:32 -0400 Subject: [PATCH 29/82] Use six.text_type * Standarizes unicode function between Python 2 and Python 3 --- core/nzbToMediaUtil.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 6883b926..897ac36a 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1,7 +1,7 @@ # coding=utf-8 from __future__ import print_function, unicode_literals - +from six import text_type import os import re import socket @@ -1216,7 +1216,7 @@ def update_downloadInfoStatus(inputName, status): myDB = nzbToMediaDB.DBConnection() myDB.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", - [status, datetime.date.today().toordinal(), unicode(inputName)]) + [status, datetime.date.today().toordinal(), text_type(inputName)]) def get_downloadInfo(inputName, status): @@ -1224,7 +1224,7 @@ def get_downloadInfo(inputName, status): myDB = nzbToMediaDB.DBConnection() sqlResults = myDB.select("SELECT * FROM downloads WHERE input_name=? AND status=?", - [unicode(inputName), status]) + [text_type(inputName), status]) return sqlResults From 5903538ae50019e30fa01e145decaebe68cb6062 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:34:56 -0400 Subject: [PATCH 30/82] Python 3: Make long = int --- core/synchronousdeluge/rencode.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index 0d960255..843be62d 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -21,7 +21,10 @@ same rencode version throughout your project. import struct from threading import Lock +from six import PY3 +if PY3: + long = int __version__ = '1.0.1' __all__ = ['dumps', 'loads'] From ec71e7806d39cdd940f352e48db599851453bfdc Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:17:50 -0400 Subject: [PATCH 31/82] Python 3: Convert except ExceptClass, Target: to except ExceptClass as Target: --- core/nzbToMediaConfig.py | 8 ++++---- core/nzbToMediaDB.py | 16 ++++++++-------- core/nzbToMediaSceneExceptions.py | 4 ++-- core/nzbToMediaUtil.py | 8 ++++---- core/synchronousdeluge/client.py | 4 ++-- core/transcoder/transcoder.py | 4 ++-- core/versionCheck.py | 12 ++++++------ 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index bb1202ce..6303a698 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -118,7 +118,7 @@ class ConfigObj(configobj.ConfigObj, Section): if not os.path.isfile(core.CONFIG_FILE): shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE) CFG_OLD = config(core.CONFIG_FILE) - except Exception, e: + except Exception as e: logger.debug("Error %s when copying to .cfg" % (e)) try: @@ -126,7 +126,7 @@ class ConfigObj(configobj.ConfigObj, Section): if not os.path.isfile(core.CONFIG_SPEC_FILE): shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE) CFG_NEW = config(core.CONFIG_SPEC_FILE) - except Exception, e: + except Exception as e: logger.debug("Error %s when copying to .spec" % (e)) # check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail @@ -462,14 +462,14 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW[section][os.environ[envCatKey]][option] = value CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - except Exception, e: + except Exception as e: logger.debug("Error %s when applying NZBGet config" % (e)) try: # write our new config to autoProcessMedia.cfg CFG_NEW.filename = core.CONFIG_FILE CFG_NEW.write() - except Exception, e: + except Exception as e: logger.debug("Error %s when writing changes to .cfg" % (e)) return CFG_NEW diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 0e1af3a9..e11b7dbb 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -37,7 +37,7 @@ class DBConnection: result = None try: result = self.select("SELECT db_version FROM db_version") - except sqlite3.OperationalError, e: + except sqlite3.OperationalError as e: if "no such table: db_version" in e.args[0]: return 0 @@ -68,7 +68,7 @@ class DBConnection: # get out of the connection attempt loop since we were successful break - except sqlite3.OperationalError, e: + except sqlite3.OperationalError as e: if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: logger.log(u"DB error: " + str(e), logger.WARNING) attempt += 1 @@ -76,7 +76,7 @@ class DBConnection: else: logger.log(u"DB error: " + str(e), logger.ERROR) raise - except sqlite3.DatabaseError, e: + except sqlite3.DatabaseError as e: logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) raise @@ -103,7 +103,7 @@ class DBConnection: self.connection.commit() logger.log(u"Transaction with " + str(len(querylist)) + u" query's executed", logger.DEBUG) return sqlResult - except sqlite3.OperationalError, e: + except sqlite3.OperationalError as e: sqlResult = [] if self.connection: self.connection.rollback() @@ -114,7 +114,7 @@ class DBConnection: else: logger.log(u"DB error: " + str(e), logger.ERROR) raise - except sqlite3.DatabaseError, e: + except sqlite3.DatabaseError as e: sqlResult = [] if self.connection: self.connection.rollback() @@ -141,7 +141,7 @@ class DBConnection: self.connection.commit() # get out of the connection attempt loop since we were successful break - except sqlite3.OperationalError, e: + except sqlite3.OperationalError as e: if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: logger.log(u"DB error: " + str(e), logger.WARNING) attempt += 1 @@ -149,7 +149,7 @@ class DBConnection: else: logger.log(u"DB error: " + str(e), logger.ERROR) raise - except sqlite3.DatabaseError, e: + except sqlite3.DatabaseError as e: logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) raise @@ -228,7 +228,7 @@ def _processUpgrade(connection, upgradeClass): logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.MESSAGE) try: instance.execute() - except sqlite3.DatabaseError, e: + except sqlite3.DatabaseError as e: print("Error in " + str(upgradeClass.__name__) + ": " + str(e)) raise logger.log(upgradeClass.__name__ + " upgrade completed", logger.DEBUG) diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index b37ec268..40700961 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -64,7 +64,7 @@ def rename_file(filename, newfilePath): logger.debug("Replacing file name %s with download name %s" % (filename, newfilePath), "EXCEPTION") try: os.rename(filename, newfilePath) - except Exception, e: + except Exception as e: logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") @@ -132,7 +132,7 @@ def rename_script(dirname): logger.debug("Renaming file %s to %s" % (orig, dest), "EXCEPTION") try: os.rename(orig, dest) - except Exception, e: + except Exception as e: logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") # dict for custom groups diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 897ac36a..0c81cbe8 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -74,7 +74,7 @@ def makeDir(path): if not os.path.isdir(path): try: os.makedirs(path) - except Exception, e: + except Exception: return False return True @@ -262,7 +262,7 @@ def copy_link(src, targetLink, useLink): logger.info("Moving SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') shutil.move(src, targetLink) return True - except Exception, e: + except Exception as e: logger.warning("Error: %s, copying instead ... " % (e), 'COPYLINK') logger.info("Copying SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') @@ -1104,7 +1104,7 @@ def extractFiles(src, dst=None, keep_archive=None): if extractor.extract(inputFile, dst or dirPath): extracted_folder.append(dst or dirPath) extracted_archive.append(archiveName) - except Exception, e: + except Exception: logger.error("Extraction failed for: %s" % (fullFileName)) for folder in extracted_folder: @@ -1197,7 +1197,7 @@ def backupVersionedFile(old_file, version): shutil.copy(old_file, new_file) logger.log(u"Backup done", logger.DEBUG) break - except Exception, e: + except Exception as e: logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + str(e), logger.WARNING) numTries += 1 diff --git a/core/synchronousdeluge/client.py b/core/synchronousdeluge/client.py index af2c740c..fa80fb46 100644 --- a/core/synchronousdeluge/client.py +++ b/core/synchronousdeluge/client.py @@ -40,7 +40,7 @@ class DelugeClient(object): from xdg.BaseDirectory import save_config_path try: auth_file = os.path.join(save_config_path("deluge"), "auth") - except OSError, e: + except OSError: return username, password if os.path.exists(auth_file): @@ -51,7 +51,7 @@ class DelugeClient(object): line = line.strip() try: lsplit = line.split(":") - except Exception, e: + except Exception: continue if len(lsplit) == 2: diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 730ce77e..46e95896 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -763,10 +763,10 @@ def Transcode_directory(dirName): try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfilePath) - except OSError, e: + except OSError as e: if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist logger.debug("Error when removing transcoding target: %s" % (e)) - except Exception, e: + except Exception as e: logger.debug("Error when removing transcoding target: %s" % (e)) logger.info("Transcoding video: %s" % (newfilePath)) diff --git a/core/versionCheck.py b/core/versionCheck.py index b71f903c..f3cd3c1e 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -312,7 +312,7 @@ class GitUpdateManager(UpdateManager): else: try: self._check_github_for_update() - except Exception, e: + except Exception as e: logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) return False @@ -356,7 +356,7 @@ class SourceUpdateManager(UpdateManager): try: with open(version_file, 'r') as fp: self._cur_commit_hash = fp.read().strip(' \n\r') - except EnvironmentError, e: + except EnvironmentError as e: logger.log(u"Unable to open 'version.txt': " + str(e), logger.DEBUG) if not self._cur_commit_hash: @@ -370,7 +370,7 @@ class SourceUpdateManager(UpdateManager): try: self._check_github_for_update() - except Exception, e: + except Exception as e: logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) return False @@ -504,7 +504,7 @@ class SourceUpdateManager(UpdateManager): os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) os.renames(old_path, new_path) - except Exception, e: + except Exception as e: logger.log(u"Unable to update " + new_path + ': ' + str(e), logger.DEBUG) os.remove(old_path) # Trash the updated file without moving in new path continue @@ -517,11 +517,11 @@ class SourceUpdateManager(UpdateManager): try: with open(version_path, 'w') as ver_file: ver_file.write(self._newest_commit_hash) - except EnvironmentError, e: + except EnvironmentError as e: logger.log(u"Unable to write version file, update not complete: " + str(e), logger.ERROR) return False - except Exception, e: + except Exception as e: logger.log(u"Error while trying to update: " + str(e), logger.ERROR) logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) return False From 8434fd54193c8dadca31e70d87f3042468e0ea5e Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 09:35:33 -0400 Subject: [PATCH 32/82] Python 3: Fix relative imports --- core/synchronousdeluge/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/synchronousdeluge/client.py b/core/synchronousdeluge/client.py index fa80fb46..050d414e 100644 --- a/core/synchronousdeluge/client.py +++ b/core/synchronousdeluge/client.py @@ -4,9 +4,9 @@ import platform from collections import defaultdict from itertools import imap -from exceptions import DelugeRPCError -from protocol import DelugeRPCRequest, DelugeRPCResponse -from transfer import DelugeTransfer +from .exceptions import DelugeRPCError +from .protocol import DelugeRPCRequest, DelugeRPCResponse +from .transfer import DelugeTransfer __all__ = ["DelugeClient"] From 76a00b249316bc577d52cb385534cfff257a103b Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:01:15 -0400 Subject: [PATCH 33/82] Convert to new-style class by inheriting from object --- core/logger.py | 2 +- core/nzbToMediaConfig.py | 2 +- core/nzbToMediaDB.py | 2 +- core/nzbToMediaUtil.py | 6 +++--- core/versionCheck.py | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/core/logger.py b/core/logger.py index b3800a98..86772c15 100644 --- a/core/logger.py +++ b/core/logger.py @@ -236,7 +236,7 @@ class NTMRotatingLogHandler(object): sys.exit(1) -class DispatchingFormatter: +class DispatchingFormatter(object): def __init__(self, formatters, default_formatter): self._formatters = formatters self._default_formatter = default_formatter diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index 6303a698..badc597f 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -11,7 +11,7 @@ from core import logger from itertools import chain -class Section(configobj.Section): +class Section(configobj.Section, object): def isenabled(section): # checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {} if not section.sections: diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index e11b7dbb..c8a22318 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -23,7 +23,7 @@ def dbFilename(filename="nzbtomedia.db", suffix=None): return core.os.path.join(core.PROGRAM_DIR, filename) -class DBConnection: +class DBConnection(object): def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None): self.filename = filename diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 0c81cbe8..3f8f0f7e 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1229,7 +1229,7 @@ def get_downloadInfo(inputName, status): return sqlResults -class RunningProcess(): +class RunningProcess(object): """ Limits application to single instance """ def __init__(self): @@ -1245,7 +1245,7 @@ class RunningProcess(): # self.process.__del__() -class WindowsProcess(): +class WindowsProcess(object): def __init__(self): self.mutexname = "nzbtomedia_" + core.PID_FILE.replace('\\', '/') # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}" if platform.system() == 'Windows': @@ -1271,7 +1271,7 @@ class WindowsProcess(): self.CloseHandle(self.mutex) -class PosixProcess(): +class PosixProcess(object): def __init__(self): self.pidpath = core.PID_FILE self.lock_socket = None diff --git a/core/versionCheck.py b/core/versionCheck.py index f3cd3c1e..fd58af45 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -17,7 +17,7 @@ import core from core import logger -class CheckVersion(): +class CheckVersion(object): """ Version check class meant to run as a thread object with the SB scheduler. """ @@ -82,7 +82,7 @@ class CheckVersion(): return self.updater.update() -class UpdateManager(): +class UpdateManager(object): def get_github_repo_user(self): return core.GIT_USER From 22daf021670e56d6dd7857e8489320d9042b281a Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 4 Jun 2016 23:35:11 -0400 Subject: [PATCH 34/82] Python 3: Raise with no arguments can only be used in an except block --- core/nzbToMediaUtil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 3f8f0f7e..178dc784 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -724,7 +724,7 @@ def onerror(func, path, exc_info): os.chmod(path, stat.S_IWUSR) func(path) else: - raise + raise Exception def rmDir(dirName): From eb1ee8b5f57f85ede15ba22724fb49be947cb42d Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 09:28:47 -0400 Subject: [PATCH 35/82] Set expected, but passing list --- core/nzbToMediaUtil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 178dc784..30bbf794 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1144,7 +1144,7 @@ def import_subs(filename): logger.debug("Attempting to download subtitles for %s" % (filename), 'SUBTITLES') try: video = subliminal.scan_video(filename, subtitles=True, embedded_subtitles=True) - subtitles = subliminal.download_best_subtitles([video], languages, hearing_impaired=False) + subtitles = subliminal.download_best_subtitles({video}, languages, hearing_impaired=False) subliminal.save_subtitles(subtitles) except Exception as e: logger.error("Failed to download subtitles for %s due to: %s" % (filename, e), 'SUBTITLES') From a983c6c7bead63cb9cbcf5126764d6971364ca64 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 09:49:59 -0400 Subject: [PATCH 36/82] Streamline conditional statements * Non-zero ints evaluate True. --- core/autoProcess/autoProcessMovie.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index a5846e14..a4e4b0ba 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -169,11 +169,11 @@ class autoProcessMovie(object): if transcoder.isVideoGood(video, status): import_subs(video) good_files += 1 - if num_files > 0 and good_files == num_files: + if num_files and good_files == num_files: if status: logger.info("Status shown as failed from Downloader, but {0} valid video files found. Setting as success.".format(good_files), section) status = 0 - elif num_files > 0 and good_files < num_files: + elif num_files and good_files < num_files: logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') From 47c585d81c81ee456b21e719f1eb0adea3ac249b Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 09:55:56 -0400 Subject: [PATCH 37/82] Rewrite dictionary creation as a dictionary literal --- core/nzbToMediaUtil.py | 20 +++++----- core/synchronousdeluge/rencode.py | 66 ++++++++++++++++--------------- 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 30bbf794..f1098330 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -870,11 +870,12 @@ def find_download(clientAgent, download_id): else: baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL - params = {} - params['apikey'] = core.SABNZBDAPIKEY - params['mode'] = "get_files" - params['output'] = 'json' - params['value'] = download_id + params = { + 'apikey': core.SABNZBDAPIKEY, + 'mode': "get_files", + 'output': 'json', + 'value': download_id, + } try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: @@ -896,10 +897,11 @@ def get_nzoid(inputName): else: baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL - params = {} - params['apikey'] = core.SABNZBDAPIKEY - params['mode'] = "queue" - params['output'] = 'json' + params = { + 'apikey': core.SABNZBDAPIKEY, + 'mode': "queue", + 'output': 'json', + } try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index 843be62d..f0fcc69e 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -208,29 +208,30 @@ def decode_none(x, f): return (None, f + 1) -decode_func = {} -decode_func['0'] = decode_string -decode_func['1'] = decode_string -decode_func['2'] = decode_string -decode_func['3'] = decode_string -decode_func['4'] = decode_string -decode_func['5'] = decode_string -decode_func['6'] = decode_string -decode_func['7'] = decode_string -decode_func['8'] = decode_string -decode_func['9'] = decode_string -decode_func[CHR_LIST] = decode_list -decode_func[CHR_DICT] = decode_dict -decode_func[CHR_INT] = decode_int -decode_func[CHR_INT1] = decode_intb -decode_func[CHR_INT2] = decode_inth -decode_func[CHR_INT4] = decode_intl -decode_func[CHR_INT8] = decode_intq -decode_func[CHR_FLOAT32] = decode_float32 -decode_func[CHR_FLOAT64] = decode_float64 -decode_func[CHR_TRUE] = decode_true -decode_func[CHR_FALSE] = decode_false -decode_func[CHR_NONE] = decode_none +decode_func = { + '0': decode_string, + '1': decode_string, + '2': decode_string, + '3': decode_string, + '4': decode_string, + '5': decode_string, + '6': decode_string, + '7': decode_string, + '8': decode_string, + '9': decode_string, + CHR_LIST: decode_list, + CHR_DICT: decode_dict, + CHR_INT: decode_int, + CHR_INT1: decode_intb, + CHR_INT2: decode_inth, + CHR_INT4: decode_intl, + CHR_INT8: decode_intq, + CHR_FLOAT32: decode_float32, + CHR_FLOAT64: decode_float64, + CHR_TRUE: decode_true, + CHR_FALSE: decode_false, + CHR_NONE: decode_none, +} def make_fixed_length_string_decoders(): @@ -400,15 +401,16 @@ def encode_dict(x, r): r.append(CHR_TERM) -encode_func = {} -encode_func[IntType] = encode_int -encode_func[LongType] = encode_int -encode_func[StringType] = encode_string -encode_func[ListType] = encode_list -encode_func[TupleType] = encode_list -encode_func[DictType] = encode_dict -encode_func[NoneType] = encode_none -encode_func[UnicodeType] = encode_unicode +encode_func = { + IntType: encode_int, + LongType: encode_int, + StringType: encode_string, + ListType: encode_list, + TupleType: encode_list, + DictType: encode_dict, + NoneType: encode_none, + UnicodeType: encode_unicode, +} lock = Lock() From 94e8a45c6249e0e5736d2fe95d132b0120730336 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 02:31:04 -0400 Subject: [PATCH 38/82] Code cleanup * Streamline variable assignment * Replace assignment with augmented assignment * Remove unused variables and redundant parentheses --- core/nzbToMediaDB.py | 1 - core/nzbToMediaUserScript.py | 7 +++--- core/nzbToMediaUtil.py | 14 +++++------- core/synchronousdeluge/client.py | 5 ++--- core/synchronousdeluge/protocol.py | 2 +- core/synchronousdeluge/rencode.py | 34 +++++++++++++++--------------- core/transcoder/transcoder.py | 9 -------- core/transmissionrpc/client.py | 9 +++----- core/transmissionrpc/torrent.py | 1 - core/transmissionrpc/utils.py | 8 +++---- core/versionCheck.py | 7 +++--- 11 files changed, 39 insertions(+), 58 deletions(-) diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index c8a22318..4445b6e1 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -115,7 +115,6 @@ class DBConnection(object): logger.log(u"DB error: " + str(e), logger.ERROR) raise except sqlite3.DatabaseError as e: - sqlResult = [] if self.connection: self.connection.rollback() logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index 23c3c5de..fe6a453e 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -58,7 +58,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): fileName, fileExtension = os.path.splitext(file) if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or "ALL" in core.USER_SCRIPT_MEDIAEXTENSIONS: - num_files = num_files + 1 + num_files += 1 if core.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files. continue command = [core.USER_SCRIPT] @@ -103,16 +103,15 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): except: logger.error("UserScript %s has failed" % (command[0]), "USERSCRIPT") result = int(1) - final_result = final_result + result + final_result += result num_files_new = 0 for dirpath, dirnames, filenames in os.walk(outputDestination): for file in filenames: - filePath = core.os.path.join(dirpath, file) fileName, fileExtension = os.path.splitext(file) if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or core.USER_SCRIPT_MEDIAEXTENSIONS == "ALL": - num_files_new = num_files_new + 1 + num_files_new += 1 if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: logger.info("All files have been processed. Cleaning outputDirectory %s" % (outputDestination)) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index f1098330..bbb58346 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -38,7 +38,7 @@ def reportNzb(failure_link, clientAgent): else: return try: - r = requests.post(failure_link, headers=headers, timeout=(30, 300)) + requests.post(failure_link, headers=headers, timeout=(30, 300)) except Exception as e: logger.error("Unable to open URL %s due to %s" % (failure_link, e)) return @@ -202,7 +202,7 @@ def is_minSize(inputName, minSize): # audio files we need to check directory size not file size inputSize = os.path.getsize(inputName) - if fileExt in (core.AUDIOCONTAINER): + if fileExt in core.AUDIOCONTAINER: try: inputSize = getDirSize(os.path.dirname(inputName)) except: @@ -339,7 +339,7 @@ def rmReadOnly(filename): if os.path.isfile(filename): # check first the read-only attribute file_attribute = os.stat(filename)[0] - if (not file_attribute & stat.S_IWRITE): + if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable logger.debug('Read only mode on file ' + filename + ' Will try to make it writeable') try: @@ -631,11 +631,7 @@ def getDirs(section, subsection, link='hard'): f = guessit.guess_video_info(mediafile) # get title - title = None - try: - title = f['series'] - except: - title = f['title'] + title = f.get('series') or f.get('title') if not title: title = os.path.splitext(os.path.basename(mediafile))[0] @@ -1262,7 +1258,7 @@ class WindowsProcess(object): def alreadyrunning(self): self.mutex = self.CreateMutex(None, 0, self.mutexname) self.lasterror = self.GetLastError() - if (self.lasterror == self.ERROR_ALREADY_EXISTS): + if self.lasterror == self.ERROR_ALREADY_EXISTS: self.CloseHandle(self.mutex) return True else: diff --git a/core/synchronousdeluge/client.py b/core/synchronousdeluge/client.py index 050d414e..cecb2a88 100644 --- a/core/synchronousdeluge/client.py +++ b/core/synchronousdeluge/client.py @@ -23,7 +23,6 @@ class DelugeClient(object): self._request_counter = 0 def _get_local_auth(self): - auth_file = "" username = password = "" if platform.system() in ('Windows', 'Microsoft'): appDataPath = os.environ.get("APPDATA") @@ -62,9 +61,9 @@ class DelugeClient(object): continue if username == "localclient": - return (username, password) + return username, password - return ("", "") + return "", "" def _create_module_method(self, module, method): fullname = "{0}.{1}".format(module, method) diff --git a/core/synchronousdeluge/protocol.py b/core/synchronousdeluge/protocol.py index 2cb1a73e..98084d4f 100644 --- a/core/synchronousdeluge/protocol.py +++ b/core/synchronousdeluge/protocol.py @@ -10,7 +10,7 @@ class DelugeRPCRequest(object): self.kwargs = kwargs def format(self): - return (self.request_id, self.method, self.args, self.kwargs) + return self.request_id, self.method, self.args, self.kwargs class DelugeRPCResponse(object): diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index f0fcc69e..f27c3304 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -126,39 +126,39 @@ def decode_int(x, f): raise ValueError elif x[f] == '0' and newf != f + 1: raise ValueError - return (n, newf + 1) + return n, newf + 1 def decode_intb(x, f): f += 1 - return (struct.unpack('!b', x[f:f + 1])[0], f + 1) + return struct.unpack('!b', x[f:f + 1])[0], f + 1 def decode_inth(x, f): f += 1 - return (struct.unpack('!h', x[f:f + 2])[0], f + 2) + return struct.unpack('!h', x[f:f + 2])[0], f + 2 def decode_intl(x, f): f += 1 - return (struct.unpack('!l', x[f:f + 4])[0], f + 4) + return struct.unpack('!l', x[f:f + 4])[0], f + 4 def decode_intq(x, f): f += 1 - return (struct.unpack('!q', x[f:f + 8])[0], f + 8) + return struct.unpack('!q', x[f:f + 8])[0], f + 8 def decode_float32(x, f): f += 1 n = struct.unpack('!f', x[f:f + 4])[0] - return (n, f + 4) + return n, f + 4 def decode_float64(x, f): f += 1 n = struct.unpack('!d', x[f:f + 8])[0] - return (n, f + 8) + return n, f + 8 def decode_string(x, f): @@ -177,7 +177,7 @@ def decode_string(x, f): s = t except UnicodeDecodeError: pass - return (s, colon + n) + return s, colon + n def decode_list(x, f): @@ -185,7 +185,7 @@ def decode_list(x, f): while x[f] != CHR_TERM: v, f = decode_func[x[f]](x, f) r.append(v) - return (tuple(r), f + 1) + return tuple(r), f + 1 def decode_dict(x, f): @@ -193,19 +193,19 @@ def decode_dict(x, f): while x[f] != CHR_TERM: k, f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f) - return (r, f + 1) + return r, f + 1 def decode_true(x, f): - return (True, f + 1) + return True, f + 1 def decode_false(x, f): - return (False, f + 1) + return False, f + 1 def decode_none(x, f): - return (None, f + 1) + return None, f + 1 decode_func = { @@ -244,7 +244,7 @@ def make_fixed_length_string_decoders(): s = t except UnicodeDecodeError: pass - return (s, f + 1 + slen) + return s, f + 1 + slen return f @@ -262,7 +262,7 @@ def make_fixed_length_list_decoders(): for i in range(slen): v, f = decode_func[x[f]](x, f) r.append(v) - return (tuple(r), f) + return tuple(r), f return f @@ -276,7 +276,7 @@ make_fixed_length_list_decoders() def make_fixed_length_int_decoders(): def make_decoder(j): def f(x, f): - return (j, f + 1) + return j, f + 1 return f @@ -296,7 +296,7 @@ def make_fixed_length_dict_decoders(): for j in range(slen): k, f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f) - return (r, f) + return r, f return f diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 46e95896..9731f689 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -142,7 +142,6 @@ def buildCommands(file, newDir, movieName, bitbucket): video_cmd = [] audio_cmd = [] audio_cmd2 = [] - audio_cmd3 = [] sub_cmd = [] meta_cmd = [] other_cmd = [] @@ -221,10 +220,6 @@ def buildCommands(file, newDir, movieName, bitbucket): except: height = 0 scale = core.VRESOLUTION - try: - framerate = float(fr.split('/')[0]) / float(fr.split('/')[1]) - except: - framerate = 0 if codec in core.VCODEC_ALLOW or not core.VCODEC: video_cmd.extend(['-c:v', 'copy']) else: @@ -431,7 +426,6 @@ def buildCommands(file, newDir, movieName, bitbucket): audio_cmd.extend(audio_cmd3) s_mapped = [] - subs1 = [] burnt = 0 n = 0 for lan in core.SLANGUAGES: @@ -587,12 +581,10 @@ def extract_subs(file, newfilePath, bitbucket): def processList(List, newDir, bitbucket): remList = [] newList = [] - delList = [] combine = [] vtsPath = None success = True for item in List: - newfile = None ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: %s" % (item), "TRANSCODER") @@ -647,7 +639,6 @@ def ripISO(item, newDir, bitbucket): print_cmd(cmd) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) out, err = proc.communicate() - result = proc.returncode fileList = [re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line)] combined = [] diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index b64b709d..6379e595 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -409,11 +409,8 @@ class Client(object): pass if might_be_base64: torrent_data = torrent - args = {} - if torrent_data: - args = {'metainfo': torrent_data} - else: - args = {'filename': torrent} + + args = {'metainfo': torrent_data} if torrent_data else {'filename': torrent} for key, value in iteritems(kwargs): argument = make_rpc_name(key) (arg, val) = argument_value_convert('torrent-add', argument, value, self.rpc_version) @@ -804,7 +801,7 @@ class Client(object): raise ValueError("Target name cannot contain a path delimiter") args = {'path': location, 'name': name} result = self._request('torrent-rename-path', args, torrent_id, True, timeout=timeout) - return (result['path'], result['name']) + return result['path'], result['name'] def queue_top(self, ids, timeout=None): """Move transfer to the top of the queue.""" diff --git a/core/transmissionrpc/torrent.py b/core/transmissionrpc/torrent.py index 5fd033db..54ee2a2d 100644 --- a/core/transmissionrpc/torrent.py +++ b/core/transmissionrpc/torrent.py @@ -124,7 +124,6 @@ class Torrent(object): """ Update the torrent data from a Transmission JSON-RPC arguments dictionary """ - fields = None if isinstance(other, dict): for key, value in iteritems(other): self._fields[key.replace('-', '_')] = Field(value, False) diff --git a/core/transmissionrpc/utils.py b/core/transmissionrpc/utils.py index c2bca855..0ac2a32a 100644 --- a/core/transmissionrpc/utils.py +++ b/core/transmissionrpc/utils.py @@ -23,7 +23,7 @@ def format_size(size): while size >= 1024.0 and i < len(UNITS): i += 1 size /= 1024.0 - return (size, UNITS[i]) + return size, UNITS[i] def format_speed(size): @@ -31,7 +31,7 @@ def format_speed(size): Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ... """ (size, unit) = format_size(size) - return (size, unit + '/s') + return size, unit + '/s' def format_timedelta(delta): @@ -91,7 +91,7 @@ def inet_address(address, default_port, default_address='localhost'): socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except socket.gaierror: raise INetAddressError('Cannot look up address "%s".' % address) - return (addr, port) + return addr, port def rpc_bool(arg): @@ -163,7 +163,7 @@ def argument_value_convert(method, argument, value, rpc_version): raise ValueError( 'Method "%s" Argument "%s" does not exist in version %d.' % (method, argument, rpc_version)) - return (argument, TR_TYPE_MAP[info[0]](value)) + return argument, TR_TYPE_MAP[info[0]](value) else: raise ValueError('Argument "%s" does not exists for method "%s".', (argument, method)) diff --git a/core/versionCheck.py b/core/versionCheck.py index fd58af45..84e14947 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -159,12 +159,13 @@ class GitUpdateManager(UpdateManager): def _run_git(self, git_path, args): - output = err = exit_status = None + output = None + err = None if not git_path: logger.log(u"No git specified, can't use git commands", logger.DEBUG) exit_status = 1 - return (output, err, exit_status) + return output, err, exit_status cmd = git_path + ' ' + args @@ -203,7 +204,7 @@ class GitUpdateManager(UpdateManager): logger.log(cmd + u" returned : " + output + u", treat as error for now", logger.DEBUG) exit_status = 1 - return (output, err, exit_status) + return output, err, exit_status def _find_installed_version(self): """ From 1cd073cd52bd2622c51c95fc0451f37a37b89173 Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 31 May 2016 08:20:06 -0400 Subject: [PATCH 39/82] Use `format()` instead of `%` for string formatting --- TorrentToMedia.py | 64 +++++----- core/__init__.py | 18 +-- core/extractor/extractor.py | 35 ++++-- core/linktastic/linktastic.py | 10 +- core/logger.py | 4 +- core/nzbToMediaAutoFork.py | 37 +++--- core/nzbToMediaConfig.py | 21 ++-- core/nzbToMediaDB.py | 8 +- core/nzbToMediaSceneExceptions.py | 24 ++-- core/nzbToMediaUserScript.py | 28 ++--- core/nzbToMediaUtil.py | 173 ++++++++++++++-------------- core/synchronousdeluge/rencode.py | 2 +- core/transcoder/transcoder.py | 71 ++++++------ core/transmissionrpc/client.py | 15 +-- core/transmissionrpc/error.py | 8 +- core/transmissionrpc/httphandler.py | 4 +- core/transmissionrpc/session.py | 4 +- core/transmissionrpc/torrent.py | 8 +- core/transmissionrpc/utils.py | 18 ++- core/utorrent/upload.py | 8 +- nzbToMedia.py | 44 +++---- 21 files changed, 317 insertions(+), 287 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index f82b9b10..b1a317ae 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -18,7 +18,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, uniquePath = 1 if clientAgent != 'manual' and not core.DOWNLOADINFO: - logger.debug('Adding TORRENT download info for directory %s to database' % (inputDirectory)) + logger.debug('Adding TORRENT download info for directory {0} to database'.format(inputDirectory)) myDB = nzbToMediaDB.DBConnection() @@ -42,7 +42,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, } myDB.upsert("downloads", newValueDict, controlValueDict) - logger.debug("Received Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) + logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, inputCategory, root, @@ -58,7 +58,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, inputDirectory = inputDirectory.encode(core.SYS_ENCODING) except: pass - logger.debug("Determined Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) + logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) # auto-detect section section = core.CFG.findsection(inputCategory).isenabled() @@ -66,7 +66,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error( - 'Category:[%s] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.' % ( + 'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format( inputCategory)) return [-1, ""] else: @@ -74,15 +74,15 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if len(section) > 1: logger.error( - 'Category:[%s] is not unique, %s are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.' % ( + 'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format( usercat, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] - logger.info('Auto-detected SECTION:%s' % (sectionName)) + logger.info('Auto-detected SECTION:{0}'.format(sectionName)) else: - logger.error("Unable to locate a section with subsection:%s enabled in your autoProcessMedia.cfg, exiting!" % ( + logger.error("Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!".format( inputCategory)) return [-1, ""] @@ -129,15 +129,15 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if outputDestination in inputDirectory: outputDestination = inputDirectory - logger.info("Output directory set to: %s" % (outputDestination)) + logger.info("Output directory set to: {0}".format(outputDestination)) if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: logger.error( - 'The output directory:[%s] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting' % ( + 'The output directory:[{0}] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format( inputDirectory)) return [-1, ""] - logger.debug("Scanning files in directory: %s" % (inputDirectory)) + logger.debug("Scanning files in directory: {0}".format(inputDirectory)) if sectionName == 'HeadPhones': core.NOFLATTEN.extend( @@ -149,7 +149,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, inputFiles = core.listMediaFiles(inputDirectory, archives=False) else: inputFiles = core.listMediaFiles(inputDirectory) - logger.debug("Found %s files in %s" % (str(len(inputFiles)), inputDirectory)) + logger.debug("Found {0} files in {1}".format(len(inputFiles), inputDirectory)) for inputFile in inputFiles: filePath = os.path.dirname(inputFile) fileName, fileExt = os.path.splitext(os.path.basename(inputFile)) @@ -161,17 +161,17 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, targetFile = core.os.path.join( core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) logger.debug( - "Setting outputDestination to %s to preserve folder structure" % (os.path.dirname(targetFile))) + "Setting outputDestination to {0} to preserve folder structure".format(os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) except: pass if root == 1: if not foundFile: - logger.debug("Looking for %s in: %s" % (inputName, inputFile)) + logger.debug("Looking for {0} in: {1}".format(inputName, inputFile)) if (core.sanitizeName(inputName) in core.sanitizeName(inputFile)) or ( core.sanitizeName(fileName) in core.sanitizeName(inputName)): foundFile = True - logger.debug("Found file %s that matches Torrent Name %s" % (fullFileName, inputName)) + logger.debug("Found file {0} that matches Torrent Name {1}".format(fullFileName, inputName)) else: continue @@ -183,7 +183,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug("Looking for files with modified/created dates less than 5 minutes old.") if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): foundFile = True - logger.debug("Found file %s with date modifed/created less than 5 minutes ago." % (fullFileName)) + logger.debug("Found file {0} with date modifed/created less than 5 minutes ago.".format(fullFileName)) else: continue # This file has not been recently moved or created, skip it @@ -192,12 +192,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, core.copy_link(inputFile, targetFile, core.USELINK) core.rmReadOnly(targetFile) except: - logger.error("Failed to link: %s to %s" % (inputFile, targetFile)) + logger.error("Failed to link: {0} to {1}".format(inputFile, targetFile)) inputName, outputDestination = convert_to_ascii(inputName, outputDestination) if extract == 1: - logger.debug('Checking for archives to extract in directory: %s' % (inputDirectory)) + logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) core.extractFiles(inputDirectory, outputDestination, keep_archive) if not inputCategory in core.NOFLATTEN: #don't flatten hp in case multi cd albums, and we need to copy this back later. @@ -208,19 +208,19 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, numVideos = len( core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False)) if numVideos > 0: - logger.info("Found %s media files in %s" % (numVideos, outputDestination)) + logger.info("Found {0} media files in {1}".format(numVideos, outputDestination)) status = 0 elif extract != 1: - logger.info("Found no media files in %s. Sending to %s to process" % (outputDestination, sectionName)) + logger.info("Found no media files in {0}. Sending to {1} to process".format(outputDestination, sectionName)) status = 0 else: - logger.warning("Found no media files in %s" % outputDestination) + logger.warning("Found no media files in {0}".format(outputDestination)) # Only these sections can handling failed downloads so make sure everything else gets through without the check for failed if not sectionName in ['CouchPotato', 'SickBeard', 'NzbDrone']: status = 0 - logger.info("Calling %s:%s to post-process:%s" % (sectionName, usercat, inputName)) + logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, usercat, inputName)) if core.TORRENT_CHMOD_DIRECTORY: core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY) @@ -262,10 +262,10 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, # remove torrent if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: - logger.debug('Checking for sym-links to re-direct in: %s' % (inputDirectory)) + logger.debug('Checking for sym-links to re-direct in: {0}'.format(inputDirectory)) for dirpath, dirs, files in os.walk(inputDirectory): for file in files: - logger.debug('Checking symlink: %s' % (os.path.join(dirpath,file))) + logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath,file))) core.replace_links(os.path.join(dirpath,file)) core.remove_torrent(clientAgent, inputHash, inputID, inputName) @@ -284,11 +284,11 @@ def main(args): clientAgent = core.TORRENT_CLIENTAGENT logger.info("#########################################################") - logger.info("## ..::[%s]::.. ##" % os.path.basename(__file__)) + logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__))) logger.info("#########################################################") # debug command line options - logger.debug("Options passed into TorrentToMedia: %s" % (args)) + logger.debug("Options passed into TorrentToMedia: {0}".format(args)) # Post-Processing Result result = [ 0, "" ] @@ -310,16 +310,16 @@ def main(args): if not core.CFG[section][subsection].isenabled(): continue for dirName in core.getDirs(section, subsection, link='hard'): - logger.info("Starting manual run for %s:%s - Folder:%s" % (section, subsection, dirName)) + logger.info("Starting manual run for {0}:{1} - Folder:{2}".format(section, subsection, dirName)) - logger.info("Checking database for download info for %s ..." % (os.path.basename(dirName))) + logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: logger.info( - "Found download info for %s, setting variables now ..." % (os.path.basename(dirName))) + "Found download info for {0}, setting variables now ...".format(os.path.basename(dirName))) else: logger.info( - 'Unable to locate download info for %s, continuing to try and process this release ...' % ( + 'Unable to locate download info for {0}, continuing to try and process this release ...'.format( os.path.basename(dirName)) ) @@ -350,14 +350,14 @@ def main(args): results = processTorrent(dirName, inputName, subsection, inputHash, inputID, clientAgent) if results[0] != 0: - logger.error("A problem was reported when trying to perform a manual run for %s:%s." % ( + logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format( section, subsection)) result = results if result[0] == 0: - logger.info("The %s script completed successfully." % (args[0])) + logger.info("The {0} script completed successfully.".format(args[0])) else: - logger.error("A problem was reported in the %s script." % (args[0])) + logger.error("A problem was reported in the {0} script.".format(args[0])) del core.MYAPP return result[0] diff --git a/core/__init__.py b/core/__init__.py index 6b3ca265..b71a3b53 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -276,7 +276,7 @@ def initialize(section=None): # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not config.migrate(): - logger.error("Unable to migrate config file %s, exiting ..." % (CONFIG_FILE)) + logger.error("Unable to migrate config file {0}, exiting ...".format(CONFIG_FILE)) if 'NZBOP_SCRIPTDIR' in os.environ: pass # We will try and read config from Environment. else: @@ -287,7 +287,7 @@ def initialize(section=None): CFG = config.addnzbget() else: # load newly migrated config - logger.info("Loading config from [%s]" % (CONFIG_FILE)) + logger.info("Loading config from [{0}]".format(CONFIG_FILE)) CFG = config() # Enable/Disable DEBUG Logging @@ -298,7 +298,7 @@ def initialize(section=None): if LOG_ENV: for item in os.environ: - logger.info("%s: %s" % (item, os.environ[item]), "ENVIRONMENT") + logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT") # initialize the main SB database nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) @@ -399,20 +399,20 @@ def initialize(section=None): devnull = open(os.devnull, 'w') try: subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate() - NICENESS.extend(['nice', '-n%s' % (int(CFG["Posix"]["niceness"]))]) + NICENESS.extend(['nice', '-n{0}'.format(int(CFG["Posix"]["niceness"]))]) except: pass try: subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate() try: - NICENESS.extend(['ionice', '-c%s' % (int(CFG["Posix"]["ionice_class"]))]) + NICENESS.extend(['ionice', '-c{0}'.format(int(CFG["Posix"]["ionice_class"]))]) except: pass try: if 'ionice' in NICENESS: - NICENESS.extend(['-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) + NICENESS.extend(['-n{0}'.format(int(CFG["Posix"]["ionice_classdata"]))]) else: - NICENESS.extend(['ionice', '-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) + NICENESS.extend(['ionice', '-n{0}'.format(int(CFG["Posix"]["ionice_classdata"]))]) except: pass except: @@ -422,7 +422,7 @@ def initialize(section=None): COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I), re.compile('.part\d+.rar$', re.I), re.compile('.rar$', re.I)] - COMPRESSEDCONTAINER += [re.compile('%s$' % ext, re.I) for ext in CFG["Extensions"]["compressedExtensions"]] + COMPRESSEDCONTAINER += [re.compile('{0}$'.format(ext), re.I) for ext in CFG["Extensions"]["compressedExtensions"]] MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"] AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"] METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt @@ -851,7 +851,7 @@ def restart(): def rchmod(path, mod): - logger.log("Changing file mode of %s to %s" % (path, oct(mod))) + logger.log("Changing file mode of {0} to {1}".format(path, oct(mod))) os.chmod(path, mod) if not os.path.isdir(path): return # Skip files diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 865802ba..08011706 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -1,4 +1,5 @@ # coding=utf-8 + import os import platform import shutil @@ -53,7 +54,9 @@ def extract(filePath, outputDestination): else: for k, v in EXTRACT_COMMANDS.items(): if cmd in v[0]: - core.logger.error("EXTRACTOR: %s not found, disabling support for %s" % (cmd, k)) + core.logger.error("EXTRACTOR: {cmd} not found, " + "disabling support for {feature}".format + (cmd=cmd, feature=k)) del EXTRACT_COMMANDS[k] devnull.close() else: @@ -76,10 +79,11 @@ def extract(filePath, outputDestination): if ext[1] in EXTRACT_COMMANDS: cmd = EXTRACT_COMMANDS[ext[1]] else: - core.logger.debug("EXTRACTOR: Unknown file type: %s" % ext[1]) + core.logger.debug("EXTRACTOR: Unknown file type: {ext}".format + (ext=ext[1])) return False - # Create outputDestination folder + # Create outputDestination folder core.makeDir(outputDestination) if core.PASSWORDSFILE != "" and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): @@ -87,8 +91,10 @@ def extract(filePath, outputDestination): else: passwords = [] - core.logger.info("Extracting %s to %s" % (filePath, outputDestination)) - core.logger.debug("Extracting %s %s %s" % (cmd, filePath, outputDestination)) + core.logger.info("Extracting {file} to {destination}".format + (file=filePath, destination=outputDestination)) + core.logger.debug("Extracting {cmd} {file} {destination}".format + (cmd=cmd, file=filePath, destination=outputDestination)) origFiles = [] origDirs = [] @@ -114,9 +120,9 @@ def extract(filePath, outputDestination): cmd2.append("-p-") # don't prompt for password. p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. res = p.wait() - if ( - res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful. - core.logger.info("EXTRACTOR: Extraction was successful for %s to %s" % (filePath, outputDestination)) + if (res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful. + core.logger.info("EXTRACTOR: Extraction was successful for {file} to {destination}".format + (file=filePath, destination=outputDestination)) success = 1 elif len(passwords) > 0: core.logger.info("EXTRACTOR: Attempting to extract with passwords") @@ -130,14 +136,17 @@ def extract(filePath, outputDestination): p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. res = p.wait() if (res >= 0 and platform == 'Windows') or res == 0: - core.logger.info("EXTRACTOR: Extraction was successful for %s to %s using password: %s" % ( - filePath, outputDestination, password)) + core.logger.info("EXTRACTOR: Extraction was successful " + "for {file} to {destination} using password: {pwd}".format + (file=filePath, destination=outputDestination, pwd=password)) success = 1 break else: continue except: - core.logger.error("EXTRACTOR: Extraction failed for %s. Could not call command %s" % (filePath, cmd)) + core.logger.error("EXTRACTOR: Extraction failed for {file}. " + "Could not call command {cmd}".format + (file=filePath, cmd=cmd)) os.chdir(pwd) return False @@ -162,5 +171,7 @@ def extract(filePath, outputDestination): pass return True else: - core.logger.error("EXTRACTOR: Extraction failed for %s. Result was %s" % (filePath, res)) + core.logger.error("EXTRACTOR: Extraction failed for {file}. " + "Result was {result}".format + (file=filePath, result=res)) return False diff --git a/core/linktastic/linktastic.py b/core/linktastic/linktastic.py index af690158..95d2f8c6 100644 --- a/core/linktastic/linktastic.py +++ b/core/linktastic/linktastic.py @@ -33,14 +33,14 @@ if os.name == 'nt': # Prevent spaces from messing with us! def _escape_param(param): - return '"%s"' % param + return '"{0}"'.format(param) # Private function to create link on nt-based systems def _link_windows(src, dest): try: subprocess.check_output( - 'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)), + 'cmd /C mklink /H {0} {1}'.format(_escape_param(dest), _escape_param(src)), stderr=subprocess.STDOUT, startupinfo=info) except CalledProcessError as err: @@ -54,7 +54,7 @@ def _link_windows(src, dest): def _symlink_windows(src, dest): try: subprocess.check_output( - 'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)), + 'cmd /C mklink {0} {1}'.format(_escape_param(dest), _escape_param(src)), stderr=subprocess.STDOUT, startupinfo=info) except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) @@ -67,7 +67,7 @@ def _symlink_windows(src, dest): def _dirlink_windows(src, dest): try: subprocess.check_output( - 'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)), + 'cmd /C mklink /J {0} {1}'.format(_escape_param(dest), _escape_param(src)), stderr=subprocess.STDOUT, startupinfo=info) except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) @@ -80,7 +80,7 @@ def _dirlink_windows(src, dest): def _junctionlink_windows(src, dest): try: subprocess.check_output( - 'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)), + 'cmd /C mklink /D {0} {1}'.format(_escape_param(dest), _escape_param(src)), stderr=subprocess.STDOUT, startupinfo=info) except CalledProcessError as err: raise IOError(err.output.decode('utf-8')) diff --git a/core/logger.py b/core/logger.py index 86772c15..94d1764f 100644 --- a/core/logger.py +++ b/core/logger.py @@ -193,9 +193,9 @@ class NTMRotatingLogHandler(object): self.writes_since_check += 1 try: - message = u"%s: %s" % (str(section).upper(), toLog) + message = u"{0}: {1}".format(str(section).upper(), toLog) except: - message = u"%s: Message contains non-utf-8 string" % (str(section).upper()) + message = u"{0}: Message contains non-utf-8 string".format(str(section).upper()) out_line = message diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index ce1949c5..76a11204 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -52,42 +52,49 @@ def autoFork(section, inputCategory): detected = False if section == "NzbDrone": - logger.info("Attempting to verify %s fork" % inputCategory) - url = "%s%s:%s%s/api/rootfolder" % (protocol, host, port, web_root) + logger.info("Attempting to verify {category} fork".format + (category=inputCategory)) + url = "{protocol}{host}:{port}{root}/api/rootfolder".format( + protocol=protocol, host=host, port=port, root=web_root) headers = {"X-Api-Key": apikey} try: r = requests.get(url, headers=headers, stream=True, verify=False) except requests.ConnectionError: - logger.warning("Could not connect to %s:%s to verify fork!" % (section, inputCategory)) + logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, inputCategory)) if not r.ok: - logger.warning("Connection to %s:%s failed! Check your configuration" % (section, inputCategory)) + logger.warning("Connection to {section}:{category} failed! " + "Check your configuration".format + (section=section, category=inputCategory)) fork = ['default', {}] elif fork == "auto": params = core.ALL_FORKS rem_params = [] - logger.info("Attempting to auto-detect %s fork" % inputCategory) + logger.info("Attempting to auto-detect {category} fork".format(category=inputCategory)) # define the order to test. Default must be first since the default fork doesn't reject parameters. # then in order of most unique parameters. - url = "%s%s:%s%s/home/postprocess/" % (protocol, host, port, web_root) + url = "{protocol}{host}:{port}{root}/home/postprocess/".format( + protocol=protocol, host=host, port=port, root=web_root) # attempting to auto-detect fork try: if username and password: s = requests.Session() - login = "%s%s:%s%s/login" % (protocol, host, port, web_root) + login = "{protocol}{host}:{port}{root}/login".format( + protocol=protocol, host=host, port=port, root=web_root) login_params = {'username': username, 'password': password} s.post(login, data=login_params, stream=True, verify=False) r = s.get(url, auth=(username, password), verify=False) else: r = requests.get(url, verify=False) except requests.ConnectionError: - logger.info("Could not connect to %s:%s to perform auto-fork detection!" % (section, inputCategory)) + logger.info("Could not connect to {section}:{category} to perform auto-fork detection!".format + (section=section, category=inputCategory)) r = [] if r and r.ok: for param in params: - if not 'name="%s"' % (param) in r.text: + if not 'name={param!r}'.format(param=param) in r.text: rem_params.append(param) for param in rem_params: params.pop(param) @@ -96,13 +103,17 @@ def autoFork(section, inputCategory): detected = True break if detected: - logger.info("%s:%s fork auto-detection successful ..." % (section, inputCategory)) + logger.info("{section}:{category} fork auto-detection successful ...".format + (section=section, category=inputCategory)) elif rem_params: - logger.info("%s:%s fork auto-detection found custom params %s" % (section, inputCategory, params)) + logger.info("{section}:{category} fork auto-detection found custom params {params}".format + (section=section, category=inputCategory, params=params)) fork = ['custom', params] else: - logger.info("%s:%s fork auto-detection failed" % (section, inputCategory)) + logger.info("{section}:{category} fork auto-detection failed".format + (section=section, category=inputCategory)) fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)] - logger.info("%s:%s fork set to %s" % (section, inputCategory, fork[0])) + logger.info("{section}:{category} fork set to {fork}".format + (section=section, category=inputCategory, fork=fork[0])) return fork[0], fork[1] diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index badc597f..00aed989 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -118,16 +118,16 @@ class ConfigObj(configobj.ConfigObj, Section): if not os.path.isfile(core.CONFIG_FILE): shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE) CFG_OLD = config(core.CONFIG_FILE) - except Exception as e: - logger.debug("Error %s when copying to .cfg" % (e)) + except Exception as error: + logger.debug("Error {msg} when copying to .cfg".format(msg=error)) try: # check for autoProcessMedia.cfg.spec and create if it does not exist if not os.path.isfile(core.CONFIG_SPEC_FILE): shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE) CFG_NEW = config(core.CONFIG_SPEC_FILE) - except Exception as e: - logger.debug("Error %s when copying to .spec" % (e)) + except Exception as error: + logger.debug("Error {msg} when copying to .spec".format(msg=error)) # check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail if CFG_NEW is None or CFG_OLD is None: @@ -255,8 +255,9 @@ class ConfigObj(configobj.ConfigObj, Section): try: if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: - logger.warning("%s category is set for SickBeard and NzbDrone. " - "Please check your config in NZBGet" % (os.environ['NZBPO_NDCATEGORY'])) + logger.warning("{x} category is set for SickBeard and NzbDrone. " + "Please check your config in NZBGet".format + (x=os.environ['NZBPO_NDCATEGORY'])) section = "Nzb" key = 'NZBOP_DESTDIR' @@ -462,15 +463,15 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW[section][os.environ[envCatKey]][option] = value CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - except Exception as e: - logger.debug("Error %s when applying NZBGet config" % (e)) + except Exception as error: + logger.debug("Error {msg} when applying NZBGet config".format(msg=error)) try: # write our new config to autoProcessMedia.cfg CFG_NEW.filename = core.CONFIG_FILE CFG_NEW.write() - except Exception as e: - logger.debug("Error %s when writing changes to .cfg" % (e)) + except Exception as error: + logger.debug("Error {msg} when writing changes to .cfg".format(msg=error)) return CFG_NEW diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 4445b6e1..de7fd825 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -19,7 +19,7 @@ def dbFilename(filename="nzbtomedia.db", suffix=None): @return: the correct location of the database file. """ if suffix: - filename = "%s.%s" % (filename, suffix) + filename = "{0}.{1}".format(filename, suffix) return core.os.path.join(core.PROGRAM_DIR, filename) @@ -181,7 +181,7 @@ class DBConnection(object): def tableInfo(self, tableName): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually - cursor = self.connection.execute("PRAGMA table_info(%s)" % tableName) + cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) columns = {} for column in cursor: columns[column['name']] = {'type': column['type']} @@ -250,8 +250,8 @@ class SchemaUpgrade(object): return column in self.connection.tableInfo(tableName) def addColumn(self, table, column, type="NUMERIC", default=0): - self.connection.action("ALTER TABLE %s ADD %s %s" % (table, column, type)) - self.connection.action("UPDATE %s SET %s = ?" % (table, column), (default,)) + self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) + self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,)) def checkDBVersion(self): result = self.connection.select("SELECT db_version FROM db_version") diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 40700961..71ac28b2 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -61,23 +61,25 @@ def strip_groups(filename): def rename_file(filename, newfilePath): - logger.debug("Replacing file name %s with download name %s" % (filename, newfilePath), "EXCEPTION") + logger.debug("Replacing file name {old} with download name {new}".format + (old=filename, new=newfilePath), "EXCEPTION") try: os.rename(filename, newfilePath) - except Exception as e: - logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") + except Exception as error: + logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION") def replace_filename(filename, dirname, name): head, fileExtension = os.path.splitext(os.path.basename(filename)) if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None: newname = os.path.basename(dirname).replace(' ', '.') - logger.debug("Replacing file name %s with directory name %s" % (head, newname), "EXCEPTION") + logger.debug("Replacing file name {old} with directory name {new}".format(old=head, new=newname), "EXCEPTION") elif media_pattern.search(name.replace(' ', '.').lower()) is not None: newname = name.replace(' ', '.') - logger.debug("Replacing file name %s with download name %s" % (head, newname), "EXCEPTION") + logger.debug("Replacing file name {old} with download name {new}".format + (old=head, new=newname), "EXCEPTION") else: - logger.warning("No name replacement determined for %s" % (head), "EXCEPTION") + logger.warning("No name replacement determined for {name}".format(name=head), "EXCEPTION") newname = name newfile = newname + fileExtension newfilePath = os.path.join(dirname, newfile) @@ -103,7 +105,8 @@ def reverse_filename(filename, dirname, name): else: newname = head[::-1].title() newname = newname.replace(' ', '.') - logger.debug("Reversing filename %s to %s" % (head, newname), "EXCEPTION") + logger.debug("Reversing filename {old} to {new}".format + (old=head, new=newname), "EXCEPTION") newfile = newname + fileExtension newfilePath = os.path.join(dirname, newfile) return newfilePath @@ -129,11 +132,12 @@ def rename_script(dirname): dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1]) if os.path.isfile(dest): continue - logger.debug("Renaming file %s to %s" % (orig, dest), "EXCEPTION") + logger.debug("Renaming file {source} to {destination}".format + (source=orig, destination=dest), "EXCEPTION") try: os.rename(orig, dest) - except Exception as e: - logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") + except Exception as error: + logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION") # dict for custom groups # we can add more to this list diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index fe6a453e..d05273be 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -48,7 +48,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): if transcoder.isVideoGood(video, 0): import_subs(video) else: - logger.info("Corrupt video file found %s. Deleting." % (video), "USERSCRIPT") + logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") os.unlink(video) for dirpath, dirnames, filenames in os.walk(outputDestination): @@ -64,22 +64,22 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): command = [core.USER_SCRIPT] for param in core.USER_SCRIPT_PARAM: if param == "FN": - command.append('%s' % file) + command.append('{0}'.format(file)) continue elif param == "FP": - command.append('%s' % filePath) + command.append('{0}'.format(filePath)) continue elif param == "TN": - command.append('%s' % torrentName) + command.append('{0}'.format(torrentName)) continue elif param == "TL": - command.append('%s' % torrentLabel) + command.append('{0}'.format(torrentLabel)) continue elif param == "DN": if core.USER_SCRIPT_RUNONCE == 1: - command.append('%s' % outputDestination) + command.append('{0}'.format(outputDestination)) else: - command.append('%s' % dirpath) + command.append('{0}'.format(dirpath)) continue else: command.append(param) @@ -87,21 +87,21 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): cmd = "" for item in command: cmd = cmd + " " + item - logger.info("Running script %s on file %s." % (cmd, filePath), "USERSCRIPT") + logger.info("Running script {0} on file {1}.".format(cmd, filePath), "USERSCRIPT") try: p = Popen(command) res = p.wait() if str(res) in core.USER_SCRIPT_SUCCESSCODES: # Linux returns 0 for successful. - logger.info("UserScript %s was successfull" % (command[0])) + logger.info("UserScript {0} was successfull".format(command[0])) result = 0 else: - logger.error("UserScript %s has failed with return code: %s" % (command[0], res), "USERSCRIPT") + logger.error("UserScript {0} has failed with return code: {1}".format(command[0], res), "USERSCRIPT") logger.info( - "If the UserScript completed successfully you should add %s to the user_script_successCodes" % ( + "If the UserScript completed successfully you should add {0} to the user_script_successCodes".format( res), "USERSCRIPT") result = int(1) except: - logger.error("UserScript %s has failed" % (command[0]), "USERSCRIPT") + logger.error("UserScript {0} has failed".format(command[0]), "USERSCRIPT") result = int(1) final_result += result @@ -114,9 +114,9 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): num_files_new += 1 if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: - logger.info("All files have been processed. Cleaning outputDirectory %s" % (outputDestination)) + logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) rmDir(outputDestination) elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: - logger.info("%s files were processed, but %s still remain. outputDirectory will not be cleaned." % ( + logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( num_files, num_files_new)) return [final_result, ''] diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index bbb58346..75c3571a 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -40,7 +40,7 @@ def reportNzb(failure_link, clientAgent): try: requests.post(failure_link, headers=headers, timeout=(30, 300)) except Exception as e: - logger.error("Unable to open URL %s due to %s" % (failure_link, e)) + logger.error("Unable to open URL {0} due to {1}".format(failure_link, e)) return @@ -115,13 +115,13 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): pathlist = os.path.normpath(inputDirectory).split(os.sep) if inputCategory and inputCategory in pathlist: - logger.debug("SEARCH: Found the Category: %s in directory structure" % (inputCategory)) + logger.debug("SEARCH: Found the Category: {0} in directory structure".format(inputCategory)) elif inputCategory: - logger.debug("SEARCH: Could not find the category: %s in the directory structure" % (inputCategory)) + logger.debug("SEARCH: Could not find the category: {0} in the directory structure".format(inputCategory)) else: try: inputCategory = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category. - logger.debug("SEARCH: Found Category: %s in directory structure" % (inputCategory)) + logger.debug("SEARCH: Found Category: {0} in directory structure".format(inputCategory)) except IndexError: inputCategory = "" logger.debug("SEARCH: Could not find a category in the directory structure") @@ -132,30 +132,30 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): if inputCategory and os.path.isdir(os.path.join(inputDirectory, inputCategory)): logger.info( - "SEARCH: Found category directory %s in input directory directory %s" % (inputCategory, inputDirectory)) + "SEARCH: Found category directory {0} in input directory directory {1}".format(inputCategory, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputCategory) - logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) + logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) if inputName and os.path.isdir(os.path.join(inputDirectory, inputName)): - logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % (inputName, inputDirectory)) + logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format(inputName, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputName) - logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) + logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) tordir = True elif inputName and os.path.isdir(os.path.join(inputDirectory, sanitizeName(inputName))): - logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % ( + logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format( sanitizeName(inputName), inputDirectory)) inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) - logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) + logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) tordir = True elif inputName and os.path.isfile(os.path.join(inputDirectory, inputName)): - logger.info("SEARCH: Found torrent file %s in input directory directory %s" % (inputName, inputDirectory)) + logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format(inputName, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputName) - logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) + logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) tordir = True elif inputName and os.path.isfile(os.path.join(inputDirectory, sanitizeName(inputName))): - logger.info("SEARCH: Found torrent file %s in input directory directory %s" % ( + logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format( sanitizeName(inputName), inputDirectory)) inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) - logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) + logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) tordir = True imdbid = [item for item in pathlist if '.cp(tt' in item] # This looks for the .cp(tt imdb id in the path. @@ -168,7 +168,8 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): index = pathlist.index(inputCategory) if index + 1 < len(pathlist): tordir = True - logger.info("SEARCH: Found a unique directory %s in the category directory" % (pathlist[index + 1])) + logger.info("SEARCH: Found a unique directory {0} in the category directory".format + (pathlist[index + 1])) if not inputName: inputName = pathlist[index + 1] except ValueError: @@ -176,7 +177,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): if inputName and not tordir: if inputName in pathlist or sanitizeName(inputName) in pathlist: - logger.info("SEARCH: Found torrent directory %s in the directory structure" % (inputName)) + logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(inputName)) tordir = True else: root = 1 @@ -206,7 +207,7 @@ def is_minSize(inputName, minSize): try: inputSize = getDirSize(os.path.dirname(inputName)) except: - logger.error("Failed to get file size for %s" % (inputName), 'MINSIZE') + logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') return True # Ignore files under a certain size @@ -221,9 +222,9 @@ def is_sample(inputName): def copy_link(src, targetLink, useLink): - logger.info("MEDIAFILE: [%s]" % (os.path.basename(targetLink)), 'COPYLINK') - logger.info("SOURCE FOLDER: [%s]" % (os.path.dirname(src)), 'COPYLINK') - logger.info("TARGET FOLDER: [%s]" % (os.path.dirname(targetLink)), 'COPYLINK') + logger.info("MEDIAFILE: [{0}]".format(os.path.basename(targetLink)), 'COPYLINK') + logger.info("SOURCE FOLDER: [{0}]".format(os.path.dirname(src)), 'COPYLINK') + logger.info("TARGET FOLDER: [{0}]".format(os.path.dirname(targetLink)), 'COPYLINK') if src != targetLink and os.path.exists(targetLink): logger.info("MEDIAFILE already exists in the TARGET folder, skipping ...", 'COPYLINK') @@ -263,7 +264,7 @@ def copy_link(src, targetLink, useLink): shutil.move(src, targetLink) return True except Exception as e: - logger.warning("Error: %s, copying instead ... " % (e), 'COPYLINK') + logger.warning("Error: {0}, copying instead ... ".format(e), 'COPYLINK') logger.info("Copying SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') shutil.copy(src, targetLink) @@ -277,26 +278,26 @@ def replace_links(link): if os.name == 'nt': import jaraco if not jaraco.windows.filesystem.islink(link): - logger.debug('%s is not a link' % (link)) + logger.debug('{0} is not a link'.format(link)) return while jaraco.windows.filesystem.islink(target): target = jaraco.windows.filesystem.readlink(target) n = n + 1 else: if not os.path.islink(link): - logger.debug('%s is not a link' % (link)) + logger.debug('{0} is not a link'.format(link)) return while os.path.islink(target): target = os.readlink(target) n = n + 1 if n > 1: - logger.info("Changing sym-link: %s to point directly to file: %s" % (link, target), 'COPYLINK') + logger.info("Changing sym-link: {0} to point directly to file: {1}".format(link, target), 'COPYLINK') os.unlink(link) linktastic.symlink(target, link) def flatten(outputDestination): - logger.info("FLATTEN: Flattening directory: %s" % (outputDestination)) + logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) for outputFile in listMediaFiles(outputDestination): dirPath = os.path.dirname(outputFile) fileName = os.path.basename(outputFile) @@ -309,7 +310,7 @@ def flatten(outputDestination): try: shutil.move(outputFile, target) except: - logger.error("Could not flatten %s" % (outputFile), 'FLATTEN') + logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') removeEmptyFolders(outputDestination) # Cleanup empty directories @@ -320,7 +321,7 @@ def removeEmptyFolders(path, removeRoot=True): return # remove empty subfolders - logger.debug("Checking for empty folders in:%s" % (path)) + logger.debug("Checking for empty folders in:{0}".format(path)) files = os.listdir(path) if len(files): for f in files: @@ -331,7 +332,7 @@ def removeEmptyFolders(path, removeRoot=True): # if folder empty, delete it files = os.listdir(path) if len(files) == 0 and removeRoot: - logger.debug("Removing empty folder:%s" % (path)) + logger.debug("Removing empty folder:{}".format(path)) os.rmdir(path) @@ -386,16 +387,16 @@ def WakeUp(): i = 1 while TestCon(host, port) == "Down" and i < 4: - logger.info(("Sending WakeOnLan Magic Packet for mac: %s" % (mac))) + logger.info(("Sending WakeOnLan Magic Packet for mac: {0}".format(mac))) WakeOnLan(mac) time.sleep(20) i = i + 1 if TestCon(host, port) == "Down": # final check. - logger.warning("System with mac: %s has not woken after 3 attempts. Continuing with the rest of the script." % ( - mac)) + logger.warning("System with mac: {0} has not woken after 3 attempts. " + "Continuing with the rest of the script.".format(mac)) else: - logger.info("System with mac: %s has been woken. Continuing with the rest of the script." % (mac)) + logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) def CharReplace(Name): @@ -454,23 +455,23 @@ def convert_to_ascii(inputName, dirName): encoded, base2 = CharReplace(base) if encoded: dirName = os.path.join(dir, base2) - logger.info("Renaming directory to: %s." % (base2), 'ENCODER') + logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER') os.rename(os.path.join(dir, base), dirName) if 'NZBOP_SCRIPTDIR' in os.environ: - print("[NZB] DIRECTORY=%s" % (dirName)) + print("[NZB] DIRECTORY={0}".format(dirName)) for dirname, dirnames, filenames in os.walk(dirName, topdown=False): for subdirname in dirnames: encoded, subdirname2 = CharReplace(subdirname) if encoded: - logger.info("Renaming directory to: %s." % (subdirname2), 'ENCODER') + logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER') os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) for dirname, dirnames, filenames in os.walk(dirName): for filename in filenames: encoded, filename2 = CharReplace(filename) if encoded: - logger.info("Renaming file to: %s." % (filename2), 'ENCODER') + logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER') os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) return inputName, dirName @@ -604,7 +605,7 @@ def getDirs(section, subsection, link='hard'): def processDir(path): folders = [] - logger.info("Searching %s for mediafiles to post-process ..." % (path)) + logger.info("Searching {0} for mediafiles to post-process ...".format(path)) sync = [o for o in os.listdir(path) if os.path.splitext(o)[1] in ['.!sync', '.bts']] # search for single files and move them into their own folder for post-processing for mediafile in [os.path.join(path, o) for o in os.listdir(path) if @@ -614,7 +615,7 @@ def getDirs(section, subsection, link='hard'): if os.path.split(mediafile)[1] in ['Thumbs.db', 'thumbs.db']: continue try: - logger.debug("Found file %s in root directory %s." % (os.path.split(mediafile)[1], path)) + logger.debug("Found file {0} in root directory {1}.".format(os.path.split(mediafile)[1], path)) newPath = None fileExt = os.path.splitext(mediafile)[1] try: @@ -626,7 +627,7 @@ def getDirs(section, subsection, link='hard'): album = f.album # create new path - newPath = os.path.join(path, "%s - %s" % (sanitizeName(artist), sanitizeName(album))) + newPath = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) elif fileExt in core.MEDIACONTAINER: f = guessit.guess_video_info(mediafile) @@ -638,7 +639,7 @@ def getDirs(section, subsection, link='hard'): newPath = os.path.join(path, sanitizeName(title)) except Exception as e: - logger.error("Exception parsing name for media file: %s: %s" % (os.path.split(mediafile)[1], e)) + logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) if not newPath: title = os.path.splitext(os.path.basename(mediafile))[0] @@ -667,7 +668,7 @@ def getDirs(section, subsection, link='hard'): # link file to its new path copy_link(mediafile, newfile, link) except Exception as e: - logger.error("Failed to move %s to its own directory: %s" % (os.path.split(mediafile)[1], e)) + logger.error("Failed to move {0} to its own directory: {1}".format(os.path.split(mediafile)[1], e)) # removeEmptyFolders(path, removeRoot=False) @@ -687,8 +688,8 @@ def getDirs(section, subsection, link='hard'): elif os.path.exists(core.CFG[section][subsection]["watch_dir"]): to_return.extend(processDir(core.CFG[section][subsection]["watch_dir"])) except Exception as e: - logger.error("Failed to add directories from %s for post-processing: %s" % ( - core.CFG[section][subsection]["watch_dir"], e)) + logger.error("Failed to add directories from {0} for post-processing: {1}".format + (core.CFG[section][subsection]["watch_dir"], e)) if core.USELINK == 'move': try: @@ -696,10 +697,10 @@ def getDirs(section, subsection, link='hard'): if os.path.exists(outputDirectory): to_return.extend(processDir(outputDirectory)) except Exception as e: - logger.error("Failed to add directories from %s for post-processing: %s" % (core.OUTPUTDIRECTORY, e)) + logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e)) if not to_return: - logger.debug("No directories identified in %s:%s for post-processing" % (section, subsection)) + logger.debug("No directories identified in {0}:{1} for post-processing".format(section, subsection)) return list(set(to_return)) @@ -724,19 +725,19 @@ def onerror(func, path, exc_info): def rmDir(dirName): - logger.info("Deleting %s" % (dirName)) + logger.info("Deleting {0}".format(dirName)) try: shutil.rmtree(dirName, onerror=onerror) except: - logger.error("Unable to delete folder %s" % (dirName)) + logger.error("Unable to delete folder {0}".format(dirName)) def cleanDir(path, section, subsection): if not os.path.exists(path): - logger.info('Directory %s has been processed and removed ...' % (path), 'CLEANDIR') + logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR') return if core.FORCE_CLEAN and not core.FAILED: - logger.info('Doing Forceful Clean of %s' % (path), 'CLEANDIR') + logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') rmDir(path) return try: @@ -753,15 +754,15 @@ def cleanDir(path, section, subsection): num_files = 'unknown' if num_files > 0: logger.info( - "Directory %s still contains %s unprocessed file(s), skipping ..." % (path, num_files), + "Directory {0} still contains {1} unprocessed file(s), skipping ...".format(path, num_files), 'CLEANDIRS') return - logger.info("Directory %s has been processed, removing ..." % (path), 'CLEANDIRS') + logger.info("Directory {0} has been processed, removing ...".format(path), 'CLEANDIRS') try: shutil.rmtree(path, onerror=onerror) except: - logger.error("Unable to delete directory %s" % (path)) + logger.error("Unable to delete directory {0}".format(path)) def create_torrent_class(clientAgent): @@ -770,14 +771,14 @@ def create_torrent_class(clientAgent): if clientAgent == 'utorrent': try: - logger.debug("Connecting to %s: %s" % (clientAgent, core.UTORRENTWEBUI)) + logger.debug("Connecting to {0}: {1}".format(clientAgent, core.UTORRENTWEBUI)) tc = UTorrentClient(core.UTORRENTWEBUI, core.UTORRENTUSR, core.UTORRENTPWD) except: logger.error("Failed to connect to uTorrent") if clientAgent == 'transmission': try: - logger.debug("Connecting to %s: http://%s:%s" % ( + logger.debug("Connecting to {0}: http://{1}:{2}".format( clientAgent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT)) tc = TransmissionClient(core.TRANSMISSIONHOST, core.TRANSMISSIONPORT, core.TRANSMISSIONUSR, @@ -787,7 +788,7 @@ def create_torrent_class(clientAgent): if clientAgent == 'deluge': try: - logger.debug("Connecting to %s: http://%s:%s" % (clientAgent, core.DELUGEHOST, core.DELUGEPORT)) + logger.debug("Connecting to {0}: http://{1}:{2}".format(clientAgent, core.DELUGEHOST, core.DELUGEPORT)) tc = DelugeClient() tc.connect(host=core.DELUGEHOST, port=core.DELUGEPORT, username=core.DELUGEUSR, password=core.DELUGEPWD) @@ -798,7 +799,7 @@ def create_torrent_class(clientAgent): def pause_torrent(clientAgent, inputHash, inputID, inputName): - logger.debug("Stopping torrent %s in %s while processing" % (inputName, clientAgent)) + logger.debug("Stopping torrent {0} in {1} while processing".format(inputName, clientAgent)) try: if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": core.TORRENT_CLASS.stop(inputHash) @@ -808,13 +809,13 @@ def pause_torrent(clientAgent, inputHash, inputID, inputName): core.TORRENT_CLASS.core.pause_torrent([inputID]) time.sleep(5) except: - logger.warning("Failed to stop torrent %s in %s" % (inputName, clientAgent)) + logger.warning("Failed to stop torrent {0} in {1}".format(inputName, clientAgent)) def resume_torrent(clientAgent, inputHash, inputID, inputName): if not core.TORRENT_RESUME == 1: return - logger.debug("Starting torrent %s in %s" % (inputName, clientAgent)) + logger.debug("Starting torrent {0} in {1}".format(inputName, clientAgent)) try: if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": core.TORRENT_CLASS.start(inputHash) @@ -824,12 +825,12 @@ def resume_torrent(clientAgent, inputHash, inputID, inputName): core.TORRENT_CLASS.core.resume_torrent([inputID]) time.sleep(5) except: - logger.warning("Failed to start torrent %s in %s" % (inputName, clientAgent)) + logger.warning("Failed to start torrent {0} in {1}".format(inputName, clientAgent)) def remove_torrent(clientAgent, inputHash, inputID, inputName): if core.DELETE_ORIGINAL == 1 or core.USELINK == 'move': - logger.debug("Deleting torrent %s from %s" % (inputName, clientAgent)) + logger.debug("Deleting torrent {0} from {1}".format(inputName, clientAgent)) try: if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": core.TORRENT_CLASS.removedata(inputHash) @@ -840,13 +841,13 @@ def remove_torrent(clientAgent, inputHash, inputID, inputName): core.TORRENT_CLASS.core.remove_torrent(inputID, True) time.sleep(5) except: - logger.warning("Failed to delete torrent %s in %s" % (inputName, clientAgent)) + logger.warning("Failed to delete torrent {0} in {1}".format(inputName, clientAgent)) else: resume_torrent(clientAgent, inputHash, inputID, inputName) def find_download(clientAgent, download_id): - logger.debug("Searching for Download on %s ..." % (clientAgent)) + logger.debug("Searching for Download on {0} ...".format(clientAgent)) if clientAgent == 'utorrent': torrents = core.TORRENT_CLASS.list()[1]['torrents'] for torrent in torrents: @@ -862,9 +863,9 @@ def find_download(clientAgent, download_id): return False if clientAgent == 'sabnzbd': if "http" in core.SABNZBDHOST: - baseURL = "%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) + baseURL = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) else: - baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) + baseURL = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL params = { 'apikey': core.SABNZBDAPIKEY, @@ -889,9 +890,9 @@ def get_nzoid(inputName): slots = [] logger.debug("Searching for nzoid from SAbnzbd ...") if "http" in core.SABNZBDHOST: - baseURL = "%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) + baseURL = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) else: - baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) + baseURL = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL params = { 'apikey': core.SABNZBDAPIKEY, @@ -925,7 +926,7 @@ def get_nzoid(inputName): for nzo_id, name in slots: if name in [inputName, cleanName]: nzoid = nzo_id - logger.debug("Found nzoid: %s" % nzoid) + logger.debug("Found nzoid: {0}".format(nzoid)) break except: logger.warning("Data from SABnzbd could not be parsed") @@ -988,7 +989,8 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me if delete_ignored == 1: try: os.unlink(path) - logger.debug('Ignored file %s has been removed ...' % (curFile)) + logger.debug('Ignored file {0} has been removed ...'.format + (curFile)) except: pass else: @@ -1009,7 +1011,8 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me if delete_ignored == 1: try: os.unlink(fullCurFile) - logger.debug('Ignored file %s has been removed ...' % (curFile)) + logger.debug('Ignored file {0} has been removed ...'.format + (curFile)) except: pass continue @@ -1022,21 +1025,21 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me def find_imdbid(dirName, inputName): imdbid = None - logger.info('Attemping imdbID lookup for %s' % (inputName)) + logger.info('Attemping imdbID lookup for {0}'.format(inputName)) # find imdbid in dirName logger.info('Searching folder and file names for imdbID ...') m = re.search('(tt\d{7})', dirName + inputName) if m: imdbid = m.group(1) - logger.info("Found imdbID [%s]" % imdbid) + logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid if os.path.isdir(dirName): for file in os.listdir(dirName): m = re.search('(tt\d{7})', file) if m: imdbid = m.group(1) - logger.info("Found imdbID [%s] via file name" % imdbid) + logger.info("Found imdbID [{0}] via file name".format(imdbid)) return imdbid if 'NZBPR__DNZB_MOREINFO' in os.environ: dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '') @@ -1045,7 +1048,7 @@ def find_imdbid(dirName, inputName): m = regex.match(dnzb_more_info) if m: imdbid = m.group(1) - logger.info("Found imdbID [%s] from DNZB-MoreInfo" % imdbid) + logger.info("Found imdbID [{0}] from DNZB-MoreInfo".format(imdbid)) return imdbid logger.info('Searching IMDB for imdbID ...') guess = guessit.guess_movie_info(inputName) @@ -1062,12 +1065,12 @@ def find_imdbid(dirName, inputName): url = "http://www.omdbapi.com" - logger.debug("Opening URL: %s" % url) + logger.debug("Opening URL: {0}".format(url)) try: r = requests.get(url, params={'y': year, 't': title}, verify=False, timeout=(60, 300)) except requests.ConnectionError: - logger.error("Unable to open URL %s" % url) + logger.error("Unable to open URL {0}".format(url)) return results = r.json() @@ -1078,10 +1081,10 @@ def find_imdbid(dirName, inputName): pass if imdbid: - logger.info("Found imdbID [%s]" % imdbid) + logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid - logger.warning('Unable to find a imdbID for %s' % (inputName)) + logger.warning('Unable to find a imdbID for {0}'.format(inputName)) return imdbid @@ -1103,7 +1106,7 @@ def extractFiles(src, dst=None, keep_archive=None): extracted_folder.append(dst or dirPath) extracted_archive.append(archiveName) except Exception: - logger.error("Extraction failed for: %s" % (fullFileName)) + logger.error("Extraction failed for: {0}".format(fullFileName)) for folder in extracted_folder: for inputFile in listMediaFiles(folder, media=False, audio=False, meta=False, archives=True): @@ -1112,14 +1115,14 @@ def extractFiles(src, dst=None, keep_archive=None): archiveName = re.sub(r"part[0-9]+", "", archiveName) if archiveName not in extracted_archive or keep_archive is True: continue # don't remove if we haven't extracted this archive, or if we want to preserve them. - logger.info("Removing extracted archive %s from folder %s ..." % (fullFileName, folder)) + logger.info("Removing extracted archive {0} from folder {1} ...".format(fullFileName, folder)) try: if not os.access(inputFile, os.W_OK): os.chmod(inputFile, stat.S_IWUSR) os.remove(inputFile) time.sleep(1) except Exception as e: - logger.error("Unable to remove file %s due to: %s" % (inputFile, e)) + logger.error("Unable to remove file {0} due to: {1}".format(inputFile, e)) def import_subs(filename): @@ -1139,13 +1142,13 @@ def import_subs(filename): if not languages: return - logger.debug("Attempting to download subtitles for %s" % (filename), 'SUBTITLES') + logger.debug("Attempting to download subtitles for {0}".format(filename), 'SUBTITLES') try: video = subliminal.scan_video(filename, subtitles=True, embedded_subtitles=True) subtitles = subliminal.download_best_subtitles({video}, languages, hearing_impaired=False) subliminal.save_subtitles(subtitles) except Exception as e: - logger.error("Failed to download subtitles for %s due to: %s" % (filename, e), 'SUBTITLES') + logger.error("Failed to download subtitles for {0} due to: {1}".format(filename, e), 'SUBTITLES') def server_responding(baseURL): @@ -1167,7 +1170,7 @@ def plex_update(category): section = None if not core.PLEXSEC: return - logger.debug("Attempting to update Plex Library for category %s." % (category), 'PLEX') + logger.debug("Attempting to update Plex Library for category {0}.".format(category), 'PLEX') for item in core.PLEXSEC: if item[0] == category: section = item[1] @@ -1210,7 +1213,7 @@ def backupVersionedFile(old_file, version): def update_downloadInfoStatus(inputName, status): - logger.db("Updating status of our download %s in the DB to %s" % (inputName, status)) + logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) myDB = nzbToMediaDB.DBConnection() myDB.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", @@ -1218,7 +1221,7 @@ def update_downloadInfoStatus(inputName, status): def get_downloadInfo(inputName, status): - logger.db("Getting download info for %s from the DB" % (inputName)) + logger.db("Getting download info for {0} from the DB".format(inputName)) myDB = nzbToMediaDB.DBConnection() sqlResults = myDB.select("SELECT * FROM downloads WHERE input_name=? AND status=?", diff --git a/core/synchronousdeluge/rencode.py b/core/synchronousdeluge/rencode.py index f27c3304..8ab01375 100644 --- a/core/synchronousdeluge/rencode.py +++ b/core/synchronousdeluge/rencode.py @@ -435,7 +435,7 @@ def dumps(x, float_bits=DEFAULT_FLOAT_BITS): elif float_bits == 64: encode_func[FloatType] = encode_float64 else: - raise ValueError('Float bits (%d) is not 32 or 64' % float_bits) + raise ValueError('Float bits ({0:d}) is not 32 or 64'.format(float_bits)) r = [] encode_func[type(x)](x, r) finally: diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 9731f689..a557c808 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -37,24 +37,25 @@ def isVideoGood(videofile, status): else: return True - logger.info('Checking [%s] for corruption, please stand by ...' % (fileNameExt), 'TRANSCODER') + logger.info('Checking [{0}] for corruption, please stand by ...'.format(fileNameExt), 'TRANSCODER') video_details, result = getVideoDetails(videofile) if result != 0: - logger.error("FAILED: [%s] is corrupted!" % (fileNameExt), 'TRANSCODER') + logger.error("FAILED: [{0}] is corrupted!".format(fileNameExt), 'TRANSCODER') return False if video_details.get("error"): - logger.info("FAILED: [%s] returned error [%s]." % (fileNameExt, str(video_details.get("error"))), 'TRANSCODER') + logger.info("FAILED: [{0}] returned error [{1}].".format(fileNameExt, video_details.get("error")), 'TRANSCODER') return False if video_details.get("streams"): videoStreams = [item for item in video_details["streams"] if item["codec_type"] == "video"] audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] if len(videoStreams) > 0 and len(audioStreams) > 0: - logger.info("SUCCESS: [%s] has no corruption." % (fileNameExt), 'TRANSCODER') + logger.info("SUCCESS: [{0}] has no corruption.".format(fileNameExt), 'TRANSCODER') return True else: - logger.info("FAILED: [%s] has %s video streams and %s audio streams. Assume corruption." % ( - fileNameExt, str(len(videoStreams)), str(len(audioStreams))), 'TRANSCODER') + logger.info("FAILED: [{0}] has {1} video streams and {2} audio streams. " + "Assume corruption.".format + (fileNameExt, len(videoStreams), len(audioStreams)), 'TRANSCODER') return False @@ -64,7 +65,7 @@ def zip_out(file, img, bitbucket): try: procin = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) except: - logger.error("Extracting [%s] has failed" % (file), 'TRANSCODER') + logger.error("Extracting [{0}] has failed".format(file), 'TRANSCODER') return procin @@ -108,7 +109,7 @@ def getVideoDetails(videofile, img=None, bitbucket=None): result = proc.returncode video_details = json.loads(out) except: - logger.error("Checking [%s] has failed" % (file), 'TRANSCODER') + logger.error("Checking [{0}] has failed".format(file), 'TRANSCODER') return video_details, result @@ -124,7 +125,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if check and core.CONCAT: name = movieName elif check: - name = ('%s.cd%s' % (movieName, check.groups()[0])) + name = ('{0}.cd{1}'.format(movieName, check.groups()[0])) elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. @@ -545,20 +546,20 @@ def extract_subs(file, newfilePath, bitbucket): lan = "unk" if num == 1: - outputFile = os.path.join(subdir, "%s.srt" % (name)) + outputFile = os.path.join(subdir, "{0}.srt".format(name)) if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "%s.%s.srt" % (name, n)) + outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, n)) else: - outputFile = os.path.join(subdir, "%s.%s.srt" % (name, lan)) + outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, lan)) if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "%s.%s.%s.srt" % (name, lan, n)) + outputFile = os.path.join(subdir, "{0}.{1}.{2}.srt".format(name, lan, n)) command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', '-codec:' + str(idx), 'srt', outputFile] if platform.system() != 'Windows': command = core.NICENESS + command - logger.info("Extracting %s subtitle from: %s" % (lan, file)) + logger.info("Extracting {0} subtitle from: {1}".format(lan, file)) print_cmd(command) result = 1 # set result to failed in case call fails. try: @@ -573,7 +574,7 @@ def extract_subs(file, newfilePath, bitbucket): shutil.copymode(file, outputFile) except: pass - logger.info("Extracting %s subtitle from %s has succeeded" % (lan, file)) + logger.info("Extracting {0} subtitle from {1} has succeeded".format(lan, file)) else: logger.error("Extracting subtitles has failed") @@ -587,11 +588,11 @@ def processList(List, newDir, bitbucket): for item in List: ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: - logger.debug("Attempting to rip disk image: %s" % (item), "TRANSCODER") + logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") newList.extend(ripISO(item, newDir, bitbucket)) remList.append(item) elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: - logger.debug("Found VIDEO_TS image file: %s" % (item), "TRANSCODER") + logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") if not vtsPath: try: vtsPath = re.match("(.+VIDEO_TS)", item).groups()[0] @@ -617,7 +618,7 @@ def processList(List, newDir, bitbucket): List.extend(newList) for item in remList: List.remove(item) - logger.debug("Successfully extracted .vob file %s from disk image" % (newList[0]), "TRANSCODER") + logger.debug("Successfully extracted .vob file {0} from disk image".format(newList[0]), "TRANSCODER") elif newList and not success: newList = [] remList = [] @@ -630,12 +631,12 @@ def ripISO(item, newDir, bitbucket): failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. if not core.SEVENZIP: - logger.error("No 7zip installed. Can't extract image file %s" % (item), "TRANSCODER") + logger.error("No 7zip installed. Can't extract image file {0}".format(item), "TRANSCODER") newFiles = [failure_dir] return newFiles cmd = [core.SEVENZIP, 'l', item] try: - logger.debug("Attempting to extract .vob from image file %s" % (item), "TRANSCODER") + logger.debug("Attempting to extract .vob from image file {0}".format(item), "TRANSCODER") print_cmd(cmd) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) out, err = proc.communicate() @@ -646,7 +647,7 @@ def ripISO(item, newDir, bitbucket): concat = [] m = 1 while True: - vtsName = 'VIDEO_TS%sVTS_%02d_%d.VOB' % (os.sep, n + 1, m) + vtsName = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m) if vtsName in fileList: concat.append(vtsName) m += 1 @@ -657,16 +658,16 @@ def ripISO(item, newDir, bitbucket): if core.CONCAT: combined.extend(concat) continue - name = '%s.cd%s' % (os.path.splitext(os.path.split(item)[1])[0], str(n + 1)) + name = '{0}.cd{1}'.format(os.path.splitext(os.path.split(item)[1])[0], str(n + 1)) newFiles.append({item: {'name': name, 'files': concat}}) if core.CONCAT: name = os.path.splitext(os.path.split(item)[1])[0] newFiles.append({item: {'name': name, 'files': combined}}) if not newFiles: - logger.error("No VIDEO_TS folder found in image file %s" % (item), "TRANSCODER") + logger.error("No VIDEO_TS folder found in image file {0}".format(item), "TRANSCODER") newFiles = [failure_dir] except: - logger.error("Failed to extract from image file %s" % (item), "TRANSCODER") + logger.error("Failed to extract from image file {0}".format(item), "TRANSCODER") newFiles = [failure_dir] return newFiles @@ -678,7 +679,7 @@ def combineVTS(vtsPath): concat = '' m = 1 while True: - vtsName = 'VTS_%02d_%d.VOB' % (n + 1, m) + vtsName = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) if os.path.isfile(os.path.join(vtsPath, vtsName)): concat = concat + os.path.join(vtsPath, vtsName) + '|' m += 1 @@ -689,9 +690,9 @@ def combineVTS(vtsPath): if core.CONCAT: combined = combined + concat + '|' continue - newFiles.append('concat:%s' % concat[:-1]) + newFiles.append('concat:{0}'.format(concat[:-1])) if core.CONCAT: - newFiles.append('concat:%s' % combined[:-1]) + newFiles.append('concat:{0}'.format(combined[:-1])) return newFiles @@ -707,7 +708,7 @@ def combineCD(combine): else: break if concat: - newFiles.append('concat:%s' % concat[:-1]) + newFiles.append('concat:{0}'.format(concat[:-1])) return newFiles @@ -715,7 +716,7 @@ def print_cmd(command): cmd = "" for item in command: cmd = cmd + " " + str(item) - logger.debug("calling command:%s" % (cmd)) + logger.debug("calling command:{0}".format(cmd)) def Transcode_directory(dirName): @@ -756,11 +757,11 @@ def Transcode_directory(dirName): os.remove(newfilePath) except OSError as e: if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist - logger.debug("Error when removing transcoding target: %s" % (e)) + logger.debug("Error when removing transcoding target: {0}".format(e)) except Exception as e: - logger.debug("Error when removing transcoding target: %s" % (e)) + logger.debug("Error when removing transcoding target: {0}".format(e)) - logger.info("Transcoding video: %s" % (newfilePath)) + logger.info("Transcoding video: {0}".format(newfilePath)) print_cmd(command) result = 1 # set result to failed in case call fails. try: @@ -777,7 +778,7 @@ def Transcode_directory(dirName): proc.communicate() result = proc.returncode except: - logger.error("Transcoding of video %s has failed" % (newfilePath)) + logger.error("Transcoding of video {0} has failed".format(newfilePath)) if core.SUBSDIR and result == 0 and isinstance(file, str): for sub in get_subs(file): @@ -793,14 +794,14 @@ def Transcode_directory(dirName): shutil.copymode(file, newfilePath) except: pass - logger.info("Transcoding of video to %s succeeded" % (newfilePath)) + logger.info("Transcoding of video to {0} succeeded".format(newfilePath)) if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE): try: os.unlink(file) except: pass else: - logger.error("Transcoding of video to %s failed with result %s" % (newfilePath, str(result))) + logger.error("Transcoding of video to {0} failed with result {1}".format(newfilePath, result)) # this will be 0 (successful) it all are successful, else will return a positive integer for failure. final_result = final_result + result if final_result == 0 and not core.DUPLICATE: diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index 6379e595..04c85ac0 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -100,7 +100,7 @@ def parse_torrent_ids(args): except ValueError: pass if not addition: - raise ValueError('Invalid torrent id, \"%s\"' % item) + raise ValueError('Invalid torrent id, {item!r}'.format(item=item)) ids.extend(addition) elif isinstance(args, (list, tuple)): for item in args: @@ -251,20 +251,20 @@ class Client(object): start = time.time() http_data = self._http_query(query, timeout) elapsed = time.time() - start - LOGGER.info('http request took %.3f s' % (elapsed)) + LOGGER.info('http request took {time:.3f} s'.format(time=elapsed)) try: data = json.loads(http_data) except ValueError as error: LOGGER.error('Error: ' + str(error)) - LOGGER.error('Request: \"%s\"' % (query)) - LOGGER.error('HTTP data: \"%s\"' % (http_data)) + LOGGER.error('Request: {request!r}'.format(request=query)) + LOGGER.error('HTTP data: {data!r}'.format(data=http_data)) raise LOGGER.debug(json.dumps(data, indent=2)) if 'result' in data: if data['result'] != 'success': - raise TransmissionError('Query failed with result \"%s\".' % (data['result'])) + raise TransmissionError('Query failed with result {result!r}.'.format(result=data['result'])) else: raise TransmissionError('Query failed without result.') @@ -348,8 +348,9 @@ class Client(object): Add a warning to the log if the Transmission RPC version is lower then the provided version. """ if self.rpc_version < version: - LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.' - % (self.rpc_version, version)) + LOGGER.warning('Using feature not supported by server. ' + 'RPC version for server {x}, feature introduced in {y}.'.format + (x=self.rpc_version, y=version)) def add_torrent(self, torrent, timeout=None, **kwargs): """ diff --git a/core/transmissionrpc/error.py b/core/transmissionrpc/error.py index 6b44bf32..ecd6bf11 100644 --- a/core/transmissionrpc/error.py +++ b/core/transmissionrpc/error.py @@ -19,7 +19,7 @@ class TransmissionError(Exception): def __str__(self): if self.original: original_name = type(self.original).__name__ - return '%s Original exception: %s, "%s"' % (self.message, original_name, str(self.original)) + return '{0} Original exception: {1}, "{2}"'.format(self.message, original_name, str(self.original)) else: return self.message @@ -49,10 +49,10 @@ class HTTPHandlerError(Exception): self.data = httpdata def __repr__(self): - return '' % (self.code, self.message) + return ''.format(self.code, self.message) def __str__(self): - return 'HTTPHandlerError %d: %s' % (self.code, self.message) + return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message) def __unicode__(self): - return 'HTTPHandlerError %d: %s' % (self.code, self.message) + return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message) diff --git a/core/transmissionrpc/httphandler.py b/core/transmissionrpc/httphandler.py index 2968762e..02d65fa7 100644 --- a/core/transmissionrpc/httphandler.py +++ b/core/transmissionrpc/httphandler.py @@ -75,7 +75,7 @@ class DefaultHTTPHandler(HTTPHandler): if hasattr(error.reason, 'args') and isinstance(error.reason.args, tuple) and len(error.reason.args) == 2: raise HTTPHandlerError(httpcode=error.reason.args[0], httpmsg=error.reason.args[1]) else: - raise HTTPHandlerError(httpmsg='urllib2.URLError: %s' % (error.reason)) + raise HTTPHandlerError(httpmsg='urllib2.URLError: {error.reason}'.format(error=error)) except BadStatusLine as error: - raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: %s' % (error.line)) + raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: {error.line}'.format(error=error)) return response.read().decode('utf-8') diff --git a/core/transmissionrpc/session.py b/core/transmissionrpc/session.py index 6b620373..bb2c1560 100644 --- a/core/transmissionrpc/session.py +++ b/core/transmissionrpc/session.py @@ -27,12 +27,12 @@ class Session(object): try: return self._fields[name].value except KeyError: - raise AttributeError('No attribute %s' % name) + raise AttributeError('No attribute {0}'.format(name)) def __str__(self): text = '' for key in sorted(self._fields.keys()): - text += "% 32s: %s\n" % (key[-32:], self._fields[key].value) + text += "{0:32}: {1}\n".format(key[-32:], self._fields[key].value) return text def _update_fields(self, other): diff --git a/core/transmissionrpc/torrent.py b/core/transmissionrpc/torrent.py index 54ee2a2d..21d4f367 100644 --- a/core/transmissionrpc/torrent.py +++ b/core/transmissionrpc/torrent.py @@ -73,14 +73,14 @@ class Torrent(object): tid = self._fields['id'].value name = self._get_name_string() if isinstance(name, str): - return '' % (tid, name) + return ''.format(tid, name) else: - return '' % (tid) + return ''.format(tid) def __str__(self): name = self._get_name_string() if isinstance(name, str): - return 'Torrent \"%s\"' % (name) + return 'Torrent \"{0}\"'.format(name) else: return 'Torrent' @@ -91,7 +91,7 @@ class Torrent(object): try: return self._fields[name].value except KeyError: - raise AttributeError('No attribute %s' % name) + raise AttributeError('No attribute {0}'.format(name)) def _rpc_version(self): """Get the Transmission RPC API version.""" diff --git a/core/transmissionrpc/utils.py b/core/transmissionrpc/utils.py index 0ac2a32a..9381edac 100644 --- a/core/transmissionrpc/utils.py +++ b/core/transmissionrpc/utils.py @@ -40,7 +40,7 @@ def format_timedelta(delta): """ minutes, seconds = divmod(delta.seconds, 60) hours, minutes = divmod(minutes, 60) - return '%d %02d:%02d:%02d' % (delta.days, hours, minutes, seconds) + return '{0:d} {1:02d}:{2:02d}:{3:02d}'.format(delta.days, hours, minutes, seconds) def format_timestamp(timestamp, utc=False): @@ -80,17 +80,17 @@ def inet_address(address, default_port, default_address='localhost'): try: port = int(addr[1]) except ValueError: - raise INetAddressError('Invalid address "%s".' % address) + raise INetAddressError('Invalid address "{0}".'.format(address)) if len(addr[0]) == 0: addr = default_address else: addr = addr[0] else: - raise INetAddressError('Invalid address "%s".' % address) + raise INetAddressError('Invalid address "{0}".'.format(address)) try: socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except socket.gaierror: - raise INetAddressError('Cannot look up address "%s".' % address) + raise INetAddressError('Cannot look up address "{0}".'.format(address)) return addr, port @@ -139,7 +139,7 @@ def argument_value_convert(method, argument, value, rpc_version): elif method in ('session-get', 'session-set'): args = constants.SESSION_ARGS[method[-3:]] else: - return ValueError('Method "%s" not supported' % (method)) + return ValueError('Method "{0}" not supported'.format(method)) if argument in args: info = args[argument] invalid_version = True @@ -155,14 +155,12 @@ def argument_value_convert(method, argument, value, rpc_version): if invalid_version: if replacement: LOGGER.warning( - 'Replacing requested argument "%s" with "%s".' - % (argument, replacement)) + 'Replacing requested argument "{0}" with "{1}".'.format(argument, replacement)) argument = replacement info = args[argument] else: raise ValueError( - 'Method "%s" Argument "%s" does not exist in version %d.' - % (method, argument, rpc_version)) + 'Method "{0}" Argument "{1}" does not exist in version {2:d}.'.format(method, argument, rpc_version)) return argument, TR_TYPE_MAP[info[0]](value) else: raise ValueError('Argument "%s" does not exists for method "%s".', @@ -178,7 +176,7 @@ def get_arguments(method, rpc_version): elif method in ('session-get', 'session-set'): args = constants.SESSION_ARGS[method[-3:]] else: - return ValueError('Method "%s" not supported' % (method)) + return ValueError('Method "{0}" not supported'.format(method)) accessible = [] for argument, info in iteritems(args): valid_version = True diff --git a/core/utorrent/upload.py b/core/utorrent/upload.py index ddf228cc..f8db659c 100644 --- a/core/utorrent/upload.py +++ b/core/utorrent/upload.py @@ -16,7 +16,7 @@ class MultiPartForm(object): return def get_content_type(self): - return 'multipart/form-data; boundary=%s' % self.boundary + return 'multipart/form-data; boundary={0}'.format(self.boundary) def add_field(self, name, value): """Add a simple field to the form data.""" @@ -43,7 +43,7 @@ class MultiPartForm(object): # Add the form fields parts.extend( [part_boundary, - 'Content-Disposition: form-data; name="%s"' % name, + 'Content-Disposition: form-data; name="{0}"'.format(name), '', value, ] @@ -53,8 +53,8 @@ class MultiPartForm(object): # Add the files to upload parts.extend( [part_boundary, - 'Content-Disposition: file; name="%s"; filename="%s"' % (field_name, filename), - 'Content-Type: %s' % content_type, + 'Content-Disposition: file; name="{0}"; filename="{1}"'.format(field_name, filename), + 'Content-Type: {0}'.format(content_type), '', body, ] diff --git a/nzbToMedia.py b/nzbToMedia.py index 7c11c35a..c599e217 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -514,7 +514,7 @@ from core import logger, nzbToMediaDB def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): if core.SAFE_MODE and inputDirectory == core.NZB_DEFAULTDIR: logger.error( - 'The input directory:[%s] is the Default Download Directory. Please configure category directories to prevent processing of other media.' % ( + 'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format( inputDirectory)) return [-1, ""] @@ -522,7 +522,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down download_id = get_nzoid(inputName) if clientAgent != 'manual' and not core.DOWNLOADINFO: - logger.debug('Adding NZB download info for directory %s to database' % (inputDirectory)) + logger.debug('Adding NZB download info for directory {0} to database'.format(inputDirectory)) myDB = nzbToMediaDB.DBConnection() @@ -555,7 +555,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error( - 'Category:[%s] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.' % ( + 'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format( inputCategory)) return [-1, ""] else: @@ -563,15 +563,15 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if len(section) > 1: logger.error( - 'Category:[%s] is not unique, %s are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.' % ( + 'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format( inputCategory, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] - logger.info('Auto-detected SECTION:%s' % (sectionName)) + logger.info('Auto-detected SECTION:{0}'.format(sectionName)) else: - logger.error("Unable to locate a section with subsection:%s enabled in your autoProcessMedia.cfg, exiting!" % ( + logger.error("Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!".format( inputCategory)) return [-1, ""] @@ -582,20 +582,20 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down try: if int(section[usercat]['remote_path']) and not core.REMOTEPATHS: - logger.error('Remote Path is enabled for %s:%s but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!' % ( + logger.error('Remote Path is enabled for {0}:{1} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!'.format( sectionName, inputCategory)) return [-1, ""] except: - logger.error('Remote Path %s is not valid for %s:%s Please set this to either 0 to disable or 1 to enable!' % ( + logger.error('Remote Path {0} is not valid for {1}:{2} Please set this to either 0 to disable or 1 to enable!'.format( section[usercat]['remote_path'], sectionName, inputCategory)) inputName, inputDirectory = convert_to_ascii(inputName, inputDirectory) if extract == 1: - logger.debug('Checking for archives to extract in directory: %s' % (inputDirectory)) + logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) extractFiles(inputDirectory) - logger.info("Calling %s:%s to post-process:%s" % (sectionName, inputCategory, inputName)) + logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, inputCategory, inputName)) if sectionName == "CouchPotato": result = autoProcessMovie().process(sectionName, inputDirectory, inputName, status, clientAgent, download_id, @@ -636,11 +636,11 @@ def main(args, section=None): clientAgent = core.NZB_CLIENTAGENT logger.info("#########################################################") - logger.info("## ..::[%s]::.. ##" % os.path.basename(__file__)) + logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__))) logger.info("#########################################################") # debug command line options - logger.debug("Options passed into nzbToMedia: %s" % args) + logger.debug("Options passed into nzbToMedia: {0}".format(args)) # Post-Processing Result result = [0, ""] @@ -650,15 +650,15 @@ def main(args, section=None): if os.environ.has_key('NZBOP_SCRIPTDIR'): # Check if the script is called from nzbget 11.0 or later if os.environ['NZBOP_VERSION'][0:5] < '11.0': - logger.error("NZBGet Version %s is not supported. Please update NZBGet." %(str(os.environ['NZBOP_VERSION']))) + logger.error("NZBGet Version {0} is not supported. Please update NZBGet.".format(os.environ['NZBOP_VERSION'])) sys.exit(core.NZBGET_POSTPROCESS_ERROR) - logger.info("Script triggered from NZBGet Version %s." %(str(os.environ['NZBOP_VERSION']))) + logger.info("Script triggered from NZBGet Version {0}.".format(os.environ['NZBOP_VERSION'])) # Check if the script is called from nzbget 13.0 or later if os.environ.has_key('NZBPP_TOTALSTATUS'): if not os.environ['NZBPP_TOTALSTATUS'] == 'SUCCESS': - logger.info("Download failed with status %s." %(os.environ['NZBPP_STATUS'])) + logger.info("Download failed with status {0}.".format(os.environ['NZBPP_STATUS'])) status = 1 else: @@ -745,16 +745,16 @@ def main(args, section=None): if not core.CFG[section][subsection].isenabled(): continue for dirName in getDirs(section, subsection, link = 'move'): - logger.info("Starting manual run for %s:%s - Folder:%s" % (section, subsection, dirName)) + logger.info("Starting manual run for {0}:{1} - Folder:{2}".format(section, subsection, dirName)) - logger.info("Checking database for download info for %s ..." % (os.path.basename(dirName))) + logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: logger.info( - "Found download info for %s, setting variables now ..." % (os.path.basename(dirName))) + "Found download info for {0}, setting variables now ...".format(os.path.basename(dirName))) else: logger.info( - 'Unable to locate download info for %s, continuing to try and process this release ...' % ( + 'Unable to locate download info for {0}, continuing to try and process this release ...'.format( os.path.basename(dirName)) ) @@ -781,19 +781,19 @@ def main(args, section=None): results = process(dirName, inputName, 0, clientAgent=clientAgent, download_id=download_id, inputCategory=subsection) if results[0] != 0: - logger.error("A problem was reported when trying to perform a manual run for %s:%s." % ( + logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format( section, subsection)) result = results if result[0] == 0: - logger.info("The %s script completed successfully." % args[0]) + logger.info("The {0} script completed successfully.".format(args[0])) if result[1]: print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 del core.MYAPP return (core.NZBGET_POSTPROCESS_SUCCESS) else: - logger.error("A problem was reported in the %s script." % args[0]) + logger.error("A problem was reported in the {0} script.".format(args[0])) if result[1]: print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 From df8c6bc20f34f5b0ee390f1cef57dc9ee179ed3b Mon Sep 17 00:00:00 2001 From: Labrys Date: Sun, 5 Jun 2016 13:35:46 -0400 Subject: [PATCH 40/82] Too broad exceptions. * Use .get() with default value instead. --- core/nzbToMediaAutoFork.py | 46 ++++-------------- core/nzbToMediaUserScript.py | 20 +++----- core/nzbToMediaUtil.py | 10 +--- core/transcoder/transcoder.py | 90 +++++++---------------------------- 4 files changed, 37 insertions(+), 129 deletions(-) diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 76a11204..3e213fd8 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -11,44 +11,18 @@ from core import logger def autoFork(section, inputCategory): # auto-detect correct section # config settings - try: - host = core.CFG[section][inputCategory]["host"] - port = core.CFG[section][inputCategory]["port"] - except: - host = None - port = None - try: - username = core.CFG[section][inputCategory]["username"] - password = core.CFG[section][inputCategory]["password"] - except: - username = None - password = None + cfg = core.CFG[section][inputCategory] - try: - apikey = core.CFG[section][inputCategory]["apikey"] - except: - apikey = None - - try: - ssl = int(core.CFG[section][inputCategory]["ssl"]) - except: - ssl = 0 - - try: - web_root = core.CFG[section][inputCategory]["web_root"] - except: - web_root = "" - - try: - fork = core.FORKS.items()[core.FORKS.keys().index(core.CFG[section][inputCategory]["fork"])] - except: - fork = "auto" - - if ssl: - protocol = "https://" - else: - protocol = "http://" + host = cfg.get("host") + port = cfg.get("port") + username = cfg.get("username") + password = cfg.get("password") + apikey = cfg.get("apikey") + ssl = int(cfg.get("ssl", 0)) + web_root = cfg.get("web_root", "") + fork = core.FORKS.items()[core.FORKS.keys().index(cfg.get("fork", "auto"))] + protocol = "https://" if ssl else "http://" detected = False if section == "NzbDrone": diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index d05273be..a3a75dac 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -16,11 +16,10 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',') except: core.USER_SCRIPT_MEDIAEXTENSIONS = [] - try: - core.USER_SCRIPT = settings["user_script_path"] - except: - core.USER_SCRIPT = None - if core.USER_SCRIPT is None or core.USER_SCRIPT == "None": # do nothing and return success. + + core.USER_SCRIPT = settings.get("user_script_path") + + if not core.USER_SCRIPT or core.USER_SCRIPT == "None": # do nothing and return success. return [0, ""] try: core.USER_SCRIPT_PARAM = settings["user_script_param"] @@ -34,14 +33,9 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',') except: core.USER_SCRIPT_SUCCESSCODES = 0 - try: - core.USER_SCRIPT_CLEAN = int(settings["user_script_clean"]) - except: - core.USER_SCRIPT_CLEAN = 1 - try: - core.USER_SCRIPT_RUNONCE = int(settings["user_script_runOnce"]) - except: - core.USER_SCRIPT_RUNONCE = 1 + + core.USER_SCRIPT_CLEAN = int(settings.get("user_script_clean", 1)) + core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) if core.CHECK_MEDIA: for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False): diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 75c3571a..d600eceb 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -740,14 +740,8 @@ def cleanDir(path, section, subsection): logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') rmDir(path) return - try: - minSize = int(core.CFG[section][subsection]['minSize']) - except: - minSize = 0 - try: - delete_ignored = int(core.CFG[section][subsection]['delete_ignored']) - except: - delete_ignored = 0 + minSize = int(core.CFG[section][subsection].get('minSize', 0)) + delete_ignored = int(core.CFG[section][subsection].get('delete_ignored', 0)) try: num_files = len(listMediaFiles(path, minSize=minSize, delete_ignored=delete_ignored)) except: diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index a557c808..c5cb43a6 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -208,18 +208,9 @@ def buildCommands(file, newDir, movieName, bitbucket): for video in videoStreams: codec = video["codec_name"] - try: - fr = video["avg_frame_rate"] - except: - fr = 0 - try: - width = video["width"] - except: - width = 0 - try: - height = video["height"] - except: - height = 0 + fr = video.get("avg_frame_rate", 0) + width = video.get("width", 0) + height = video.get("height", 0) scale = core.VRESOLUTION if codec in core.VCODEC_ALLOW or not core.VCODEC: video_cmd.extend(['-c:v', 'copy']) @@ -270,26 +261,14 @@ def buildCommands(file, newDir, movieName, bitbucket): if audio2: # right language and codec... map_cmd.extend(['-map', '0:' + str(audio2[0]["index"])]) a_mapped.extend([audio2[0]["index"]]) - try: - bitrate = int(audio2[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio2[0]["channels"]) - except: - channels = 0 + bitrate = int(audio2[0].get("bit_rate", 0)) / 1000 + channels = int(audio2[0].get("channels", 0)) audio_cmd.extend(['-c:a:' + str(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) - try: - bitrate = int(audio1[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio1[0]["channels"]) - except: - channels = 0 + bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 + channels = int(audio1[0].get("channels", 0)) if core.ACODEC: audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) else: @@ -297,14 +276,8 @@ def buildCommands(file, newDir, movieName, bitbucket): elif audio3: # just pick the default audio track map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) - try: - bitrate = int(audio3[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio3[0]["channels"]) - except: - channels = 0 + bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 + channels = int(audio3[0].get("channels", 0)) if core.ACODEC: audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) else: @@ -331,26 +304,14 @@ def buildCommands(file, newDir, movieName, bitbucket): if audio4: # right language and codec. map_cmd.extend(['-map', '0:' + str(audio4[0]["index"])]) a_mapped.extend([audio4[0]["index"]]) - try: - bitrate = int(audio4[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio4[0]["channels"]) - except: - channels = 0 + bitrate = int(audio4[0].get("bit_rate", 0)) / 1000 + channels = int(audio4[0].get("channels", 0)) audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) - try: - bitrate = int(audio1[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio1[0]["channels"]) - except: - channels = 0 + bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 + channels = int(audio1[0].get("channels", 0)) if core.ACODEC2: audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) else: @@ -358,14 +319,8 @@ def buildCommands(file, newDir, movieName, bitbucket): elif audio3: # just pick the default audio track map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) - try: - bitrate = int(audio3[0]["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio3[0]["channels"]) - except: - channels = 0 + bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 + channels = int(audio3[0].get("channels", 0)) if core.ACODEC2: audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) else: @@ -394,14 +349,8 @@ def buildCommands(file, newDir, movieName, bitbucket): used_audio += 1 map_cmd.extend(['-map', '0:' + str(audio["index"])]) audio_cmd3 = [] - try: - bitrate = int(audio["bit_rate"]) / 1000 - except: - bitrate = 0 - try: - channels = int(audio["channels"]) - except: - channels = 0 + bitrate = int(audio.get("bit_rate", 0)) / 1000 + channels = int(audio.get("channels", 0)) if audio["codec_name"] in core.ACODEC3_ALLOW: audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) else: @@ -540,10 +489,7 @@ def extract_subs(file, newfilePath, bitbucket): for n in range(num): sub = subStreams[n] idx = sub["index"] - try: - lan = sub["tags"]["language"] - except: - lan = "unk" + lan = sub.geet("tags", {}).get("language", "unk") if num == 1: outputFile = os.path.join(subdir, "{0}.srt".format(name)) From f093fafd8d1b7c5ee0412fd67e2a266762f0737a Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Mon, 6 Jun 2016 21:29:50 +0930 Subject: [PATCH 41/82] move import of six after addition of path. --- core/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/__init__.py b/core/__init__.py index b71a3b53..80444fea 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -10,7 +10,6 @@ import sys import platform import time -from six.moves import reload_module # init libs PROGRAM_DIR = os.path.dirname(os.path.normpath(os.path.abspath(os.path.join(__file__, os.pardir)))) @@ -31,6 +30,8 @@ CONFIG_TV_FILE = os.path.join(PROGRAM_DIR, 'autoProcessTv.cfg') TEST_FILE = os.path.join(os.path.join(PROGRAM_DIR, 'tests'), 'test.mp4') MYAPP = None +from six.moves import reload_module + from core.autoProcess.autoProcessComics import autoProcessComics from core.autoProcess.autoProcessGames import autoProcessGames from core.autoProcess.autoProcessMovie import autoProcessMovie From 1a3b1ce4b6548f3e27f9e28d19768525bcad425e Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:12:02 -0400 Subject: [PATCH 42/82] Add encoding declaration --- TorrentToMedia.py | 1 + nzbToCouchPotato.py | 1 + nzbToGamez.py | 1 + nzbToHeadPhones.py | 1 + nzbToMedia.py | 1 + nzbToMylar.py | 1 + nzbToNzbDrone.py | 1 + nzbToSickBeard.py | 1 + 8 files changed, 8 insertions(+) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index b1a317ae..9d19b945 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 import datetime import os import time diff --git a/nzbToCouchPotato.py b/nzbToCouchPotato.py index c9957d4c..9bb81473 100755 --- a/nzbToCouchPotato.py +++ b/nzbToCouchPotato.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToGamez.py b/nzbToGamez.py index d85c2961..b6bca420 100755 --- a/nzbToGamez.py +++ b/nzbToGamez.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToHeadPhones.py b/nzbToHeadPhones.py index 9f24faf4..30be1dcc 100755 --- a/nzbToHeadPhones.py +++ b/nzbToHeadPhones.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToMedia.py b/nzbToMedia.py index c599e217..d4c819b2 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToMylar.py b/nzbToMylar.py index 21d7e183..71f88bab 100755 --- a/nzbToMylar.py +++ b/nzbToMylar.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToNzbDrone.py b/nzbToNzbDrone.py index 8d4f3a49..63c64864 100755 --- a/nzbToNzbDrone.py +++ b/nzbToNzbDrone.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### diff --git a/nzbToSickBeard.py b/nzbToSickBeard.py index 69ff5397..73fefbfe 100755 --- a/nzbToSickBeard.py +++ b/nzbToSickBeard.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### From 58d439f3eb75397c576b35adf4e16436cb43ded0 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:11:38 -0400 Subject: [PATCH 43/82] Remove unused imports, variables, and redundant parentheses --- TorrentToMedia.py | 3 --- nzbToMedia.py | 10 +++------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 9d19b945..20b78771 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -2,12 +2,9 @@ # coding=utf-8 import datetime import os -import time -import shutil import sys import core -from subprocess import Popen from core import logger, nzbToMediaDB from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update from core.nzbToMediaUserScript import external_script diff --git a/nzbToMedia.py b/nzbToMedia.py index d4c819b2..833215b4 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -527,7 +527,6 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down myDB = nzbToMediaDB.DBConnection() - encoded = False inputDirectory1 = inputDirectory inputName1 = inputName @@ -633,9 +632,6 @@ def main(args, section=None): # Initialize the config core.initialize(section) - # clientAgent for NZBs - clientAgent = core.NZB_CLIENTAGENT - logger.info("#########################################################") logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__))) logger.info("#########################################################") @@ -792,16 +788,16 @@ def main(args, section=None): print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 del core.MYAPP - return (core.NZBGET_POSTPROCESS_SUCCESS) + return core.NZBGET_POSTPROCESS_SUCCESS else: logger.error("A problem was reported in the {0} script.".format(args[0])) if result[1]: print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 del core.MYAPP - return (core.NZBGET_POSTPROCESS_ERROR) + return core.NZBGET_POSTPROCESS_ERROR del core.MYAPP - return (result[0]) + return result[0] if __name__ == '__main__': From c2bf14f775aedbe486e74c5f53e87d3c06e89f25 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:25:06 -0400 Subject: [PATCH 44/82] PEP8: Fix formatting * Remove redundant backslash between brackets * Fix multiple statements on one line * Fix missing/excess whitespace * Fix comments not starting with a single # and a space --- TorrentToMedia.py | 59 +++++---- nzbToCouchPotato.py | 151 +++++++++++------------ nzbToGamez.py | 57 ++++----- nzbToHeadPhones.py | 65 +++++----- nzbToMedia.py | 291 ++++++++++++++++++++++---------------------- nzbToMylar.py | 67 +++++----- nzbToNzbDrone.py | 141 ++++++++++----------- nzbToSickBeard.py | 153 +++++++++++------------ 8 files changed, 501 insertions(+), 483 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 20b78771..b3f4e719 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -9,6 +9,7 @@ from core import logger, nzbToMediaDB from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update from core.nzbToMediaUserScript import external_script + def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): status = 1 # 1 = failed | 0 = success root = 0 @@ -37,24 +38,26 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, "client_agent": unicode(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() - } + } myDB.upsert("downloads", newValueDict, controlValueDict) logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, - inputCategory, root, - core.CATEGORIES) # Confirm the category by parsing directory structure + inputCategory, root, + core.CATEGORIES) # Confirm the category by parsing directory structure if inputCategory == "": inputCategory = "UNCAT" usercat = inputCategory try: inputName = inputName.encode(core.SYS_ENCODING) - except: pass + except: + pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) - except: pass + except: + pass logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) @@ -122,7 +125,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) try: outputDestination = outputDestination.encode(core.SYS_ENCODING) - except: pass + except: + pass if outputDestination in inputDirectory: outputDestination = inputDirectory @@ -132,7 +136,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: logger.error( 'The output directory:[{0}] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format( - inputDirectory)) + inputDirectory)) return [-1, ""] logger.debug("Scanning files in directory: {0}".format(inputDirectory)) @@ -162,12 +166,13 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, "Setting outputDestination to {0} to preserve folder structure".format(os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) - except: pass + except: + pass if root == 1: if not foundFile: logger.debug("Looking for {0} in: {1}".format(inputName, inputFile)) - if (core.sanitizeName(inputName) in core.sanitizeName(inputFile)) or ( - core.sanitizeName(fileName) in core.sanitizeName(inputName)): + if any([core.sanitizeName(inputName) in core.sanitizeName(inputFile), + core.sanitizeName(fileName) in core.sanitizeName(inputName)]): foundFile = True logger.debug("Found file {0} that matches Torrent Name {1}".format(fullFileName, inputName)) else: @@ -198,7 +203,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) core.extractFiles(inputDirectory, outputDestination, keep_archive) - if not inputCategory in core.NOFLATTEN: #don't flatten hp in case multi cd albums, and we need to copy this back later. + if not inputCategory in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. core.flatten(outputDestination) # Now check if video files exist in destination: @@ -223,25 +228,25 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if core.TORRENT_CHMOD_DIRECTORY: core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY) - result = [ 0, "" ] + result = [0, ""] if sectionName == 'UserScript': result = external_script(outputDestination, inputName, inputCategory, section[usercat]) elif sectionName == 'CouchPotato': - result = core.autoProcessMovie().process(sectionName,outputDestination, inputName, status, clientAgent, inputHash, - inputCategory) - elif sectionName in ['SickBeard','NzbDrone']: + result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, status, clientAgent, inputHash, + inputCategory) + elif sectionName in ['SickBeard', 'NzbDrone']: if inputHash: inputHash = inputHash.upper() - result = core.autoProcessTV().processEpisode(sectionName,outputDestination, inputName, status, clientAgent, - inputHash, inputCategory) + result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, + inputHash, inputCategory) elif sectionName == 'HeadPhones': - result = core.autoProcessMusic().process(sectionName,outputDestination, inputName, status, clientAgent, inputCategory) + result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) elif sectionName == 'Mylar': - result = core.autoProcessComics().processEpisode(sectionName,outputDestination, inputName, status, clientAgent, - inputCategory) + result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, + inputCategory) elif sectionName == 'Gamez': - result = core.autoProcessGames().process(sectionName,outputDestination, inputName, status, clientAgent, inputCategory) + result = core.autoProcessGames().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) plex_update(inputCategory) @@ -263,8 +268,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug('Checking for sym-links to re-direct in: {0}'.format(inputDirectory)) for dirpath, dirs, files in os.walk(inputDirectory): for file in files: - logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath,file))) - core.replace_links(os.path.join(dirpath,file)) + logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) + core.replace_links(os.path.join(dirpath, file)) core.remove_torrent(clientAgent, inputHash, inputID, inputName) if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN @@ -289,7 +294,7 @@ def main(args): logger.debug("Options passed into TorrentToMedia: {0}".format(args)) # Post-Processing Result - result = [ 0, "" ] + result = [0, ""] try: inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args(clientAgent, args) @@ -339,11 +344,13 @@ def main(args): try: dirName = dirName.encode(core.SYS_ENCODING) - except: pass + except: + pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) - except: pass + except: + pass results = processTorrent(dirName, inputName, subsection, inputHash, inputID, clientAgent) diff --git a/nzbToCouchPotato.py b/nzbToCouchPotato.py index 9bb81473..8b8f7b46 100755 --- a/nzbToCouchPotato.py +++ b/nzbToCouchPotato.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 -# -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### + +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,242 +10,243 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## -### OPTIONS ### +# ############################################################################## +# ### OPTIONS ### -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -#check_media=1 +# check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## CouchPotato +# ## CouchPotato # CouchPotato script category. # # category that gets called for post-processing with CouchPotatoServer. -#cpsCategory=movie +# cpsCategory=movie # CouchPotato api key. -#cpsapikey= +# cpsapikey= # CouchPotato host. # # The ipaddress for your CouchPotato server. e.g For the Same system use localhost or 127.0.0.1 -#cpshost=localhost +# cpshost=localhost # CouchPotato port. -#cpsport=5050 +# cpsport=5050 # CouchPotato uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#cpsssl=0 +# cpsssl=0 # CouchPotato URL_Base # # set this if using a reverse proxy. -#cpsweb_root= +# cpsweb_root= # CouchPotato watch directory. # # set this to where your CouchPotato completed downloads are. -#cpswatch_dir= +# cpswatch_dir= # CouchPotato Postprocess Method (renamer, manage). # # use "renamer" for CPS renamer (default) or "manage" to call a manage update. -#cpsmethod=renamer +# cpsmethod=renamer # CouchPotato Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#cpsdelete_failed=0 +# cpsdelete_failed=0 # CouchPotato wait_for # # Set the number of minutes to wait after calling the renamer, to check the movie has changed status. -#cpswait_for=2 +# cpswait_for=2 # CouchPotatoServer and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#cpsremote_path=0 +# cpsremote_path=0 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## Extensions +# ## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Transcoder +# ## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -#getSubs=0 +# getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -#subLanguages=eng,spa,fra +# subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -#transcode=0 +# transcode=0 # create a duplicate, or replace the original (0, 1). # # set to 1 to cretae a new file or 0 to replace the original -#duplicate=1 +# duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -#ignoreExtensions=.avi,.mkv +# ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -#outputFastStart=0 +# outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -#outputVideoPath= +# outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -#processOutput=0 +# processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -#audioLanguage=eng +# audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -#allAudioLanguages=0 +# allAudioLanguages=0 # allSubLanguages (0,1). # # allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. -#allSubLanguages=0 +# allSubLanguages=0 # embedSubs (0,1). # # embedSubs. 1 will embded external sub/srt subs into your video if this is supported. -#embedSubs=1 +# embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -#burnInSubtitle=0 +# burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -#extractSubs=0 +# extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -#externalSubDir= +# externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -#outputDefault=None +# outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -#hwAccel=0 +# hwAccel=0 # ffmpeg output settings. -#outputVideoExtension=.mp4 -#outputVideoCodec=libx264 -#VideoCodecAllow= -#outputVideoPreset=medium -#outputVideoFramerate=24 -#outputVideoBitrate=800k -#outputAudioCodec=ac3 -#AudioCodecAllow= -#outputAudioChannels=6 -#outputAudioBitrate=640k -#outputQualityPercent= -#outputAudioTrack2Codec=libfaac -#AudioCodec2Allow= -#outputAudioTrack2Channels=2 -#outputAudioTrack2Bitrate=160k -#outputAudioOtherCodec=libmp3lame -#AudioOtherCodecAllow= -#outputAudioOtherChannels=2 -#outputAudioOtherBitrate=128k -#outputSubtitleCodec= +# outputVideoExtension=.mp4 +# outputVideoCodec=libx264 +# VideoCodecAllow= +# outputVideoPreset=medium +# outputVideoFramerate=24 +# outputVideoBitrate=800k +# outputAudioCodec=ac3 +# AudioCodecAllow= +# outputAudioChannels=6 +# outputAudioBitrate=640k +# outputQualityPercent= +# outputAudioTrack2Codec=libfaac +# AudioCodec2Allow= +# outputAudioTrack2Channels=2 +# outputAudioTrack2Bitrate=160k +# outputAudioOtherCodec=libmp3lame +# AudioOtherCodecAllow= +# outputAudioOtherChannels=2 +# outputAudioOtherBitrate=128k +# outputSubtitleCodec= -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "CouchPotato" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) diff --git a/nzbToGamez.py b/nzbToGamez.py index b6bca420..9b1cb355 100755 --- a/nzbToGamez.py +++ b/nzbToGamez.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 # -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,98 +10,99 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## +# ############################################################################## # -### OPTIONS ### +# ### OPTIONS ### -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## Gamez +# ## Gamez # Gamez script category. # # category that gets called for post-processing with Gamez. -#gzCategory=games +# gzCategory=games # Gamez api key. -#gzapikey= +# gzapikey= # Gamez host. # # The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1 -#gzhost=localhost +# gzhost=localhost # Gamez port. -#gzport=8085 +# gzport=8085 # Gamez uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#gzssl=0 +# gzssl=0 # Gamez library # # move downloaded games here. -#gzlibrary +# gzlibrary # Gamez web_root # # set this if using a reverse proxy. -#gzweb_root= +# gzweb_root= # Gamez watch directory. # # set this to where your Gamez completed downloads are. -#gzwatch_dir= +# gzwatch_dir= -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "Gamez" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) diff --git a/nzbToHeadPhones.py b/nzbToHeadPhones.py index 30be1dcc..26cf2e3e 100755 --- a/nzbToHeadPhones.py +++ b/nzbToHeadPhones.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 -# -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### + +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to HeadPhones. # @@ -10,110 +10,111 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## -### OPTIONS +# ############################################################################## +# ### OPTIONS -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## HeadPhones +# ## HeadPhones # HeadPhones script category. # # category that gets called for post-processing with HeadHones. -#hpCategory=music +# hpCategory=music # HeadPhones api key. -#hpapikey= +# hpapikey= # HeadPhones host. # # The ipaddress for your HeadPhones server. e.g For the Same system use localhost or 127.0.0.1 -#hphost=localhost +# hphost=localhost # HeadPhones port. -#hpport=8181 +# hpport=8181 # HeadPhones uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#hpssl=0 +# hpssl=0 # HeadPhones web_root # # set this if using a reverse proxy. -#hpweb_root= +# hpweb_root= # HeadPhones watch directory. # # set this to where your HeadPhones completed downloads are. -#hpwatch_dir= +# hpwatch_dir= # HeadPhones wait_for # # Set the number of minutes to wait after initiating HeadPhones post-processing to check if the album status has changed. -#hpwait_for=2 +# hpwait_for=2 # HeadPhones and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#hpremote_path=0 +# hpremote_path=0 -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "HeadPhones" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) diff --git a/nzbToMedia.py b/nzbToMedia.py index 833215b4..5e344fa8 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 # -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,494 +10,495 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## -### OPTIONS ### +# ############################################################################## +# ### OPTIONS ### -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -#check_media=1 +# check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## CouchPotato +# ## CouchPotato # CouchPotato script category. # # category that gets called for post-processing with CouchPotatoServer. -#cpsCategory=movie +# cpsCategory=movie # CouchPotato api key. -#cpsapikey= +# cpsapikey= # CouchPotato host. # # The ipaddress for your CouchPotato server. e.g For the Same system use localhost or 127.0.0.1 -#cpshost=localhost +# cpshost=localhost # CouchPotato port. -#cpsport=5050 +# cpsport=5050 # CouchPotato uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#cpsssl=0 +# cpsssl=0 # CouchPotato URL_Base # # set this if using a reverse proxy. -#cpsweb_root= +# cpsweb_root= # CouchPotato Postprocess Method (renamer, manage). # # use "renamer" for CPS renamer (default) or "manage" to call a manage update. -#cpsmethod=renamer +# cpsmethod=renamer # CouchPotato Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#cpsdelete_failed=0 +# cpsdelete_failed=0 # CouchPotato wait_for # # Set the number of minutes to wait after calling the renamer, to check the movie has changed status. -#cpswait_for=2 +# cpswait_for=2 # Couchpotato and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#cpsremote_path=0 +# cpsremote_path=0 -## SickBeard +# ## SickBeard # SickBeard script category. # # category that gets called for post-processing with SickBeard. -#sbCategory=tv +# sbCategory=tv # SickBeard host. # # The ipaddress for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 -#sbhost=localhost +# sbhost=localhost # SickBeard port. -#sbport=8081 +# sbport=8081 # SickBeard username. -#sbusername= +# sbusername= # SickBeard password. -#sbpassword= +# sbpassword= # SickBeard uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#sbssl=0 +# sbssl=0 # SickBeard web_root # # set this if using a reverse proxy. -#sbweb_root= +# sbweb_root= # SickBeard watch directory. # # set this if SickBeard and nzbGet are on different systems. -#sbwatch_dir= +# sbwatch_dir= # SickBeard fork. # # set to default or auto to auto-detect the custom fork type. -#sbfork=auto +# sbfork=auto # SickBeard Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#sbdelete_failed=0 +# sbdelete_failed=0 # SickBeard process method. # # set this to move, copy, hardlink, symlink as appropriate if you want to over-ride SB defaults. Leave blank to use SB default. -#sbprocess_method= +# sbprocess_method= # SickBeard and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#sbremote_path=0 +# sbremote_path=0 -## NzbDrone +# ## NzbDrone # NzbDrone script category. # # category that gets called for post-processing with NzbDrone. -#ndCategory=tv2 +# ndCategory=tv2 # NzbDrone host. # # The ipaddress for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 -#ndhost=localhost +# ndhost=localhost # NzbDrone port. -#ndport=8989 +# ndport=8989 # NzbDrone API key. -#ndapikey= +# ndapikey= # NzbDrone uses SSL (0, 1). # # Set to 1 if using SSL, else set to 0. -#ndssl=0 +# ndssl=0 # NzbDrone web root. # # set this if using a reverse proxy. -#ndweb_root= +# ndweb_root= # NzbDrone wait_for # # Set the number of minutes to wait after calling the renamer, to check the episode has changed status. -#ndwait_for=2 +# ndwait_for=2 # NzbDrone Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#nddelete_failed=0 +# nddelete_failed=0 # NzbDrone and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#ndremote_path=0 +# ndremote_path=0 -## HeadPhones +# ## HeadPhones # HeadPhones script category. # # category that gets called for post-processing with HeadHones. -#hpCategory=music +# hpCategory=music # HeadPhones api key. -#hpapikey= +# hpapikey= # HeadPhones host. # # The ipaddress for your HeadPhones server. e.g For the Same system use localhost or 127.0.0.1 -#hphost=localhost +# hphost=localhost # HeadPhones port. -#hpport=8181 +# hpport=8181 # HeadPhones uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#hpssl=0 +# hpssl=0 # HeadPhones web_root # # set this if using a reverse proxy. -#hpweb_root= +# hpweb_root= # HeadPhones and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#hpremote_path=0 +# hpremote_path=0 -## Mylar +# ## Mylar # Mylar script category. # # category that gets called for post-processing with Mylar. -#myCategory=comics +# myCategory=comics # Mylar host. # # The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 -#myhost=localhost +# myhost=localhost # Mylar port. -#myport=8090 +# myport=8090 # Mylar username. -#myusername= +# myusername= # Mylar password. -#mypassword= +# mypassword= # Mylar uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#myssl=0 +# myssl=0 # Mylar web_root # # set this if using a reverse proxy. -#myweb_root= +# myweb_root= # Mylar wait_for # # Set the number of minutes to wait after calling the force process, to check the issue has changed status. -#myswait_for=1 +# myswait_for=1 # Mylar and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#myremote_path=0 +# myremote_path=0 -## Gamez +# ## Gamez # Gamez script category. # # category that gets called for post-processing with Gamez. -#gzCategory=games +# gzCategory=games # Gamez api key. -#gzapikey= +# gzapikey= # Gamez host. # # The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1 -#gzhost=localhost +# gzhost=localhost # Gamez port. -#gzport=8085 +# gzport=8085 # Gamez uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#gzssl=0 +# gzssl=0 # Gamez library # # move downloaded games here. -#gzlibrary +# gzlibrary # Gamez web_root # # set this if using a reverse proxy. -#gzweb_root= +# gzweb_root= # Gamez and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#gzremote_path=0 +# gzremote_path=0 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## Extensions +# ## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Transcoder +# ## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -#getSubs=0 +# getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -#subLanguages=eng,spa,fra +# subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -#transcode=0 +# transcode=0 # create a duplicate, or replace the original (0, 1). # # set to 1 to cretae a new file or 0 to replace the original -#duplicate=1 +# duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -#ignoreExtensions=.avi,.mkv +# ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -#outputFastStart=0 +# outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -#outputVideoPath= +# outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -#processOutput=0 +# processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -#audioLanguage=eng +# audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -#allAudioLanguages=0 +# allAudioLanguages=0 # allSubLanguages (0,1). # # allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. -#allSubLanguages=0 +# allSubLanguages=0 # embedSubs (0,1). # # embedSubs. 1 will embded external sub/srt subs into your video if this is supported. -#embedSubs=1 +# embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -#burnInSubtitle=0 +# burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -#extractSubs=0 +# extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -#externalSubDir= +# externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -#outputDefault=None +# outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -#hwAccel=0 +# hwAccel=0 # ffmpeg output settings. -#outputVideoExtension=.mp4 -#outputVideoCodec=libx264 -#VideoCodecAllow= -#outputVideoPreset=medium -#outputVideoFramerate=24 -#outputVideoBitrate=800k -#outputAudioCodec=ac3 -#AudioCodecAllow= -#outputAudioChannels=6 -#outputAudioBitrate=640k -#outputQualityPercent= -#outputAudioTrack2Codec=libfaac -#AudioCodec2Allow= -#outputAudioTrack2Channels=2 -#outputAudioTrack2Bitrate=160k -#outputAudioOtherCodec=libmp3lame -#AudioOtherCodecAllow= -#outputAudioOtherChannels=2 -#outputAudioOtherBitrate=128k -#outputSubtitleCodec= +# outputVideoExtension=.mp4 +# outputVideoCodec=libx264 +# VideoCodecAllow= +# outputVideoPreset=medium +# outputVideoFramerate=24 +# outputVideoBitrate=800k +# outputAudioCodec=ac3 +# AudioCodecAllow= +# outputAudioChannels=6 +# outputAudioBitrate=640k +# outputQualityPercent= +# outputAudioTrack2Codec=libfaac +# AudioCodec2Allow= +# outputAudioTrack2Channels=2 +# outputAudioTrack2Bitrate=160k +# outputAudioOtherCodec=libmp3lame +# AudioOtherCodecAllow= +# outputAudioOtherChannels=2 +# outputAudioOtherBitrate=128k +# outputSubtitleCodec= -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 -## UserScript +# ## UserScript # User Script category. # # category that gets called for post-processing with user script (accepts "UNCAT", "ALL", or a defined category). -#usCategory=mine +# usCategory=mine # User Script Remote Path (0,1). # # Script calls commands on another system. -#usremote_path=0 +# usremote_path=0 # User Script extensions. # # What extension do you want to process? Specify all the extension, or use "ALL" to process all files. -#user_script_mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg +# user_script_mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg # User Script Path # # Specify the path to your custom script. -#user_script_path=/nzbToMedia/userscripts/script.sh +# user_script_path=/nzbToMedia/userscripts/script.sh # User Script arguments. # # Specify the argument(s) passed to script, comma separated in order. # for example FP,FN,DN, TN, TL for file path (absolute file name with path), file name, absolute directory name (with path), Torrent Name, Torrent Label/Category. # So the result is /media/test/script/script.sh FP FN DN TN TL. Add other arguments as needed eg -f, -r -#user_script_param=FN +# user_script_param=FN # User Script Run Once (0,1). # # Set user_script_runOnce = 0 to run for each file, or 1 to only run once (presumably on teh entire directory). -#user_script_runOnce=0 +# user_script_runOnce=0 # User Script Success Codes. # # Specify the successcodes returned by the user script as a comma separated list. Linux default is 0 -#user_script_successCodes=0 +# user_script_successCodes=0 # User Script Clean After (0,1). # # Clean after? Note that delay function is used to prevent possible mistake :) Delay is intended as seconds -#user_script_clean=1 +# user_script_clean=1 # User Script Delay. # # Delay in seconds after processing. -#usdelay=120 +# usdelay=120 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import os import sys import datetime @@ -510,17 +511,19 @@ from core.autoProcess.autoProcessTV import autoProcessTV from core.nzbToMediaUtil import getDirs, extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, CharReplace, convert_to_ascii, get_nzoid, plex_update from core.nzbToMediaUserScript import external_script from core import logger, nzbToMediaDB +from six import text_type + # post-processing def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): if core.SAFE_MODE and inputDirectory == core.NZB_DEFAULTDIR: logger.error( 'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format( - inputDirectory)) + inputDirectory)) return [-1, ""] if not download_id and clientAgent == 'sabnzbd': - download_id = get_nzoid(inputName) + download_id = get_nzoid(inputName) if clientAgent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding NZB download info for directory {0} to database'.format(inputDirectory)) @@ -543,7 +546,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down "client_agent": unicode(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() - } + } myDB.upsert("downloads", newValueDict, controlValueDict) # auto-detect section @@ -556,7 +559,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if section is None: logger.error( 'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format( - inputCategory)) + inputCategory)) return [-1, ""] else: usercat = "ALL" @@ -564,7 +567,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if len(section) > 1: logger.error( 'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format( - inputCategory, section.keys())) + inputCategory, section.keys())) return [-1, ""] if section: @@ -728,7 +731,7 @@ def main(args, section=None): clientAgent = 'sabnzbd' logger.info("Script triggered from SABnzbd 0.7.17+") result = process(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, - download_id='', failureLink=''.join(args[8:])) + download_id='', failureLink=''.join(args[8:])) # Generic program elif len(args) > 5 and args[5] == 'generic': logger.info("Script triggered from generic program") @@ -741,7 +744,7 @@ def main(args, section=None): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dirName in getDirs(section, subsection, link = 'move'): + for dirName in getDirs(section, subsection, link='move'): logger.info("Starting manual run for {0}:{1} - Folder:{2}".format(section, subsection, dirName)) logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) @@ -769,17 +772,19 @@ def main(args, section=None): try: dirName = dirName.encode(core.SYS_ENCODING) - except: pass + except: + pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) - except: pass + except: + pass results = process(dirName, inputName, 0, clientAgent=clientAgent, download_id=download_id, inputCategory=subsection) if results[0] != 0: - logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format( - section, subsection)) + logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format + (section, subsection)) result = results if result[0] == 0: diff --git a/nzbToMylar.py b/nzbToMylar.py index 71f88bab..eba87a71 100755 --- a/nzbToMylar.py +++ b/nzbToMylar.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 -# -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### + +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to Mylar. # @@ -10,114 +10,115 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## +# ############################################################################## # -### OPTIONS +# ### OPTIONS -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## Mylar +# ## Mylar # Mylar script category. # # category that gets called for post-processing with Mylar. -#myCategory=comics +# myCategory=comics # Mylar host. # # The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 -#myhost=localhost +# myhost=localhost # Mylar port. -#myport=8090 +# myport=8090 # Mylar username. -#myusername= +# myusername= # Mylar password. -#mypassword= +# mypassword= # Mylar uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#myssl=0 +# myssl=0 # Mylar web_root # # set this if using a reverse proxy. -#myweb_root= +# myweb_root= # Mylar wait_for # # Set the number of minutes to wait after calling the force process, to check the issue has changed status. -#myswait_for=1 +# myswait_for=1 # Mylar watch directory. # # set this to where your Mylar completed downloads are. -#mywatch_dir= +# mywatch_dir= # Mylar and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#myremote_path=0 +# myremote_path=0 -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "Mylar" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) diff --git a/nzbToNzbDrone.py b/nzbToNzbDrone.py index 63c64864..ac2e6d5f 100755 --- a/nzbToNzbDrone.py +++ b/nzbToNzbDrone.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 -# -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### + +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to NzbDrone. # @@ -10,229 +10,230 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## -### OPTIONS ### +# ############################################################################## +# ### OPTIONS ### -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -#check_media=1 +# check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## NzbDrone +# ## NzbDrone # NzbDrone script category. # # category that gets called for post-processing with NzbDrone. -#ndCategory=tv2 +# ndCategory=tv2 # NzbDrone host. # # The ipaddress for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 -#ndhost=localhost +# ndhost=localhost # NzbDrone port. -#ndport=8989 +# ndport=8989 # NzbDrone API key. -#ndapikey= +# ndapikey= # NzbDrone uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#ndssl=0 +# ndssl=0 # NzbDrone web_root # # set this if using a reverse proxy. -#ndweb_root= +# ndweb_root= # NzbDrone wait_for # # Set the number of minutes to wait after calling the renamer, to check the episode has changed status. -#ndwait_for=2 +# ndwait_for=2 # NzbDrone Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#nddelete_failed=0 +# nddelete_failed=0 # NzbDrone and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#ndremote_path=0 +# ndremote_path=0 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## Extensions +# ## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Transcoder +# ## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -#getSubs = 0 +# getSubs = 0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -#subLanguages = eng,spa,fra +# subLanguages = eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -#transcode=0 +# transcode=0 # create a duplicate, or replace the original (0, 1). # # set to 1 to cretae a new file or 0 to replace the original -#duplicate=1 +# duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -#ignoreExtensions=.avi,.mkv +# ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -#outputFastStart = 0 +# outputFastStart = 0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -#outputVideoPath = +# outputVideoPath = # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -#processOutput = 0 +# processOutput = 0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -#audioLanguage = eng +# audioLanguage = eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -#allAudioLanguages = 0 +# allAudioLanguages = 0 # allSubLanguages (0,1). # # allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. -#allSubLanguages = 0 +# allSubLanguages = 0 # embedSubs (0,1). # # embedSubs. 1 will embded external sub/srt subs into your video if this is supported. -#embedSubs = 1 +# embedSubs = 1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -#burnInSubtitle = 0 +# burnInSubtitle = 0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -#extractSubs = 0 +# extractSubs = 0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -#externalSubDir = +# externalSubDir = # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -#outputDefault = None +# outputDefault = None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -#hwAccel=0 +# hwAccel=0 # ffmpeg output settings. -#outputVideoExtension=.mp4 -#outputVideoCodec=libx264 -#VideoCodecAllow = -#outputVideoPreset=medium -#outputVideoFramerate=24 -#outputVideoBitrate=800k -#outputAudioCodec=libmp3lame -#AudioCodecAllow = -#outputAudioBitrate=128k -#outputQualityPercent = 0 -#outputAudioTrack2Codec = libfaac -#AudioCodec2Allow = -#outputAudioTrack2Bitrate = 128k -#outputAudioOtherCodec = libmp3lame -#AudioOtherCodecAllow = -#outputAudioOtherBitrate = 128k -#outputSubtitleCodec = +# outputVideoExtension=.mp4 +# outputVideoCodec=libx264 +# VideoCodecAllow = +# outputVideoPreset=medium +# outputVideoFramerate=24 +# outputVideoBitrate=800k +# outputAudioCodec=libmp3lame +# AudioCodecAllow = +# outputAudioBitrate=128k +# outputQualityPercent = 0 +# outputAudioTrack2Codec = libfaac +# AudioCodec2Allow = +# outputAudioTrack2Bitrate = 128k +# outputAudioOtherCodec = libmp3lame +# AudioOtherCodecAllow = +# outputAudioOtherBitrate = 128k +# outputSubtitleCodec = -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "NzbDrone" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) diff --git a/nzbToSickBeard.py b/nzbToSickBeard.py index 73fefbfe..92474ea5 100755 --- a/nzbToSickBeard.py +++ b/nzbToSickBeard.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 -# -############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### + +# ############################################################################## +# ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to SickBeard. # @@ -10,245 +10,246 @@ # # NOTE: This script requires Python to be installed on your system. -############################################################################## -### OPTIONS ### +# ############################################################################## +# ### OPTIONS ### -## General +# ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -#auto_update=0 +# auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -#check_media=1 +# check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -#safe_mode=1 +# safe_mode=1 -## SickBeard +# ## SickBeard # SickBeard script category. # # category that gets called for post-processing with SickBeard. -#sbCategory=tv +# sbCategory=tv # SickBeard host. # # The ipaddress for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 -#sbhost=localhost +# sbhost=localhost # SickBeard port. -#sbport=8081 +# sbport=8081 # SickBeard username. -#sbusername= +# sbusername= # SickBeard password. -#sbpassword= +# sbpassword= # SickBeard uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -#sbssl=0 +# sbssl=0 # SickBeard web_root # # set this if using a reverse proxy. -#sbweb_root= +# sbweb_root= # SickBeard watch directory. # # set this to where your SickBeard completed downloads are. -#sbwatch_dir= +# sbwatch_dir= # SickBeard fork. # # set to default or auto to auto-detect the custom fork type. -#sbfork=auto +# sbfork=auto # SickBeard Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -#sbdelete_failed=0 +# sbdelete_failed=0 # SickBeard process method. # # set this to move, copy, hardlink, symlink as appropriate if you want to over-ride SB defaults. Leave blank to use SB default. -#sbprocess_method= +# sbprocess_method= # SickBeard and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -#sbremote_path=0 +# sbremote_path=0 -## Network +# ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -#mountPoints= +# mountPoints= -## Extensions +# ## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -## Posix +# ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -#niceness=10 +# niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -#ionice_class=2 +# ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -#ionice_classdata=4 +# ionice_classdata=4 -## Transcoder +# ## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -#getSubs=0 +# getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -#subLanguages=eng,spa,fra +# subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -#transcode=0 +# transcode=0 # create a duplicate, or replace the original (0, 1). # # set to 1 to cretae a new file or 0 to replace the original -#duplicate=1 +# duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -#ignoreExtensions=.avi,.mkv +# ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -#outputFastStart=0 +# outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -#outputVideoPath= +# outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -#processOutput=0 +# processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -#audioLanguage=eng +# audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -#allAudioLanguages=0 +# allAudioLanguages=0 # allSubLanguages (0,1). # # allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. -#allSubLanguages=0 +# allSubLanguages=0 # embedSubs (0,1). # # embedSubs. 1 will embded external sub/srt subs into your video if this is supported. -#embedSubs=1 +# embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -#burnInSubtitle=0 +# burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -#extractSubs=0 +# extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -#externalSubDir= +# externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -#outputDefault=None +# outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -#hwAccel=0 +# hwAccel=0 # ffmpeg output settings. -#outputVideoExtension=.mp4 -#outputVideoCodec=libx264 -#VideoCodecAllow= -#outputVideoPreset=medium -#outputVideoFramerate=24 -#outputVideoBitrate=800k -#outputAudioCodec=ac3 -#AudioCodecAllow= -#outputAudioChannels=6 -#outputAudioBitrate=640k -#outputQualityPercent= -#outputAudioTrack2Codec=libfaac -#AudioCodec2Allow= -#outputAudioTrack2Channels=2 -#outputAudioTrack2Bitrate=160k -#outputAudioOtherCodec=libmp3lame -#AudioOtherCodecAllow= -#outputAudioOtherChannels=2 -#outputAudioOtherBitrate=128k -#outputSubtitleCodec= +# outputVideoExtension=.mp4 +# outputVideoCodec=libx264 +# VideoCodecAllow= +# outputVideoPreset=medium +# outputVideoFramerate=24 +# outputVideoBitrate=800k +# outputAudioCodec=ac3 +# AudioCodecAllow= +# outputAudioChannels=6 +# outputAudioBitrate=640k +# outputQualityPercent= +# outputAudioTrack2Codec=libfaac +# AudioCodec2Allow= +# outputAudioTrack2Channels=2 +# outputAudioTrack2Bitrate=160k +# outputAudioOtherCodec=libmp3lame +# AudioOtherCodecAllow= +# outputAudioOtherChannels=2 +# outputAudioOtherBitrate=128k +# outputSubtitleCodec= -## WakeOnLan +# ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -#wolwake=0 +# wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -#wolmac=00:01:2e:2D:64:e1 +# wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -#wolhost=192.168.1.37 -#wolport=80 +# wolhost=192.168.1.37 +# wolport=80 + +# ### NZBGET POST-PROCESSING SCRIPT ### +# ############################################################################## -### NZBGET POST-PROCESSING SCRIPT ### -############################################################################## import sys import nzbToMedia section = "SickBeard" result = nzbToMedia.main(sys.argv, section) -sys.exit(result) \ No newline at end of file +sys.exit(result) From 84a89c62d5f3504babc7f9fc9e18fcebec52e0d3 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:25:34 -0400 Subject: [PATCH 45/82] PEP8: Tests for membership should use in/not in PEP8: .has_key() is deprecated, use in --- TorrentToMedia.py | 4 ++-- nzbToMedia.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index b3f4e719..55138674 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -203,7 +203,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) core.extractFiles(inputDirectory, outputDestination, keep_archive) - if not inputCategory in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. + if inputCategory not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. core.flatten(outputDestination) # Now check if video files exist in destination: @@ -220,7 +220,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.warning("Found no media files in {0}".format(outputDestination)) # Only these sections can handling failed downloads so make sure everything else gets through without the check for failed - if not sectionName in ['CouchPotato', 'SickBeard', 'NzbDrone']: + if sectionName not in ['CouchPotato', 'SickBeard', 'NzbDrone']: status = 0 logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, usercat, inputName)) diff --git a/nzbToMedia.py b/nzbToMedia.py index 5e344fa8..4997116b 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -624,7 +624,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if clientAgent != 'manual': # update download status in our DB update_downloadInfoStatus(inputName, 1) - if not sectionName in ['UserScript', 'NzbDrone']: + if sectionName not in ['UserScript', 'NzbDrone']: # cleanup our processing folders of any misc unwanted files and empty directories cleanDir(inputDirectory, sectionName, inputCategory) @@ -647,7 +647,7 @@ def main(args, section=None): status = 0 # NZBGet - if os.environ.has_key('NZBOP_SCRIPTDIR'): + if 'NZBOP_SCRIPTDIR' in os.environ: # Check if the script is called from nzbget 11.0 or later if os.environ['NZBOP_VERSION'][0:5] < '11.0': logger.error("NZBGet Version {0} is not supported. Please update NZBGet.".format(os.environ['NZBOP_VERSION'])) @@ -656,7 +656,7 @@ def main(args, section=None): logger.info("Script triggered from NZBGet Version {0}.".format(os.environ['NZBOP_VERSION'])) # Check if the script is called from nzbget 13.0 or later - if os.environ.has_key('NZBPP_TOTALSTATUS'): + if 'NZBPP_TOTALSTATUS' in os.environ: if not os.environ['NZBPP_TOTALSTATUS'] == 'SUCCESS': logger.info("Download failed with status {0}.".format(os.environ['NZBPP_STATUS'])) status = 1 @@ -689,13 +689,13 @@ def main(args, section=None): # Check for download_id to pass to CouchPotato download_id = "" failureLink = None - if os.environ.has_key('NZBPR_COUCHPOTATO'): + if 'NZBPR_COUCHPOTATO' in os.environ: download_id = os.environ['NZBPR_COUCHPOTATO'] - elif os.environ.has_key('NZBPR_DRONE'): + elif 'NZBPR_DRONE' in os.environ: download_id = os.environ['NZBPR_DRONE'] - elif os.environ.has_key('NZBPR_SONARR'): + elif 'NZBPR_SONARR' in os.environ: download_id = os.environ['NZBPR_SONARR'] - if os.environ.has_key('NZBPR__DNZB_FAILURE'): + if 'NZBPR__DNZB_FAILURE' in os.environ: failureLink = os.environ['NZBPR__DNZB_FAILURE'] # All checks done, now launching the script. @@ -791,14 +791,14 @@ def main(args, section=None): logger.info("The {0} script completed successfully.".format(args[0])) if result[1]: print result[1] + "!" # For SABnzbd Status display. - if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 + if 'NZBOP_SCRIPTDIR' in os.environ: # return code for nzbget v11 del core.MYAPP return core.NZBGET_POSTPROCESS_SUCCESS else: logger.error("A problem was reported in the {0} script.".format(args[0])) if result[1]: print result[1] + "!" # For SABnzbd Status display. - if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 + if 'NZBOP_SCRIPTDIR' in os.environ: # return code for nzbget v11 del core.MYAPP return core.NZBGET_POSTPROCESS_ERROR del core.MYAPP From 2671becddeb19cac47383404bea5facf48f1bba9 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:44:32 -0400 Subject: [PATCH 46/82] Use six.text_type to standardize unicode function --- TorrentToMedia.py | 11 ++++++----- nzbToMedia.py | 12 +++++++----- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 55138674..1d0d1f87 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -5,6 +5,7 @@ import os import sys import core +from libs.six import text_type from core import logger, nzbToMediaDB from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update from core.nzbToMediaUserScript import external_script @@ -31,11 +32,11 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, except: pass - controlValueDict = {"input_directory": unicode(inputDirectory1)} - newValueDict = {"input_name": unicode(inputName1), - "input_hash": unicode(inputHash), - "input_id": unicode(inputID), - "client_agent": unicode(clientAgent), + controlValueDict = {"input_directory": text_type(inputDirectory1)} + newValueDict = {"input_name": text_type(inputName1), + "input_hash": text_type(inputHash), + "input_id": text_type(inputID), + "client_agent": text_type(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } diff --git a/nzbToMedia.py b/nzbToMedia.py index 4997116b..7efa244f 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -502,6 +502,8 @@ import os import sys import datetime +from libs.six import text_type + import core from core.autoProcess.autoProcessComics import autoProcessComics from core.autoProcess.autoProcessGames import autoProcessGames @@ -539,11 +541,11 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down except: pass - controlValueDict = {"input_directory": unicode(inputDirectory1)} - newValueDict = {"input_name": unicode(inputName1), - "input_hash": unicode(download_id), - "input_id": unicode(download_id), - "client_agent": unicode(clientAgent), + controlValueDict = {"input_directory": text_type(inputDirectory1)} + newValueDict = {"input_name": text_type(inputName1), + "input_hash": text_type(download_id), + "input_id": text_type(download_id), + "client_agent": text_type(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } From 9f6ca4eaadfb95229351f062fa7df397233761df Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 04:20:58 -0400 Subject: [PATCH 47/82] Fix unresolved reference to replace_links --- TorrentToMedia.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 1d0d1f87..6afbc984 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -7,7 +7,7 @@ import core from libs.six import text_type from core import logger, nzbToMediaDB -from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update +from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update, replace_links from core.nzbToMediaUserScript import external_script @@ -270,7 +270,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, for dirpath, dirs, files in os.walk(inputDirectory): for file in files: logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) - core.replace_links(os.path.join(dirpath, file)) + replace_links(os.path.join(dirpath, file)) core.remove_torrent(clientAgent, inputHash, inputID, inputName) if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN From ccdad818fd939a2a3b8529769dcc42bf45093e58 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 03:47:27 -0400 Subject: [PATCH 48/82] Use print_function to standardize printing --- nzbToMedia.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nzbToMedia.py b/nzbToMedia.py index 7efa244f..cd42cf18 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -499,6 +499,8 @@ # ### NZBGET POST-PROCESSING SCRIPT ### # ############################################################################## +from __future__ import print_function + import os import sys import datetime @@ -513,7 +515,6 @@ from core.autoProcess.autoProcessTV import autoProcessTV from core.nzbToMediaUtil import getDirs, extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, CharReplace, convert_to_ascii, get_nzoid, plex_update from core.nzbToMediaUserScript import external_script from core import logger, nzbToMediaDB -from six import text_type # post-processing @@ -792,14 +793,14 @@ def main(args, section=None): if result[0] == 0: logger.info("The {0} script completed successfully.".format(args[0])) if result[1]: - print result[1] + "!" # For SABnzbd Status display. + print(result[1] + "!") if 'NZBOP_SCRIPTDIR' in os.environ: # return code for nzbget v11 del core.MYAPP return core.NZBGET_POSTPROCESS_SUCCESS else: logger.error("A problem was reported in the {0} script.".format(args[0])) if result[1]: - print result[1] + "!" # For SABnzbd Status display. + print(result[1] + "!") if 'NZBOP_SCRIPTDIR' in os.environ: # return code for nzbget v11 del core.MYAPP return core.NZBGET_POSTPROCESS_ERROR From a3a59af3f880394ee32382f7a512d868a16c4617 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 04:03:14 -0400 Subject: [PATCH 49/82] Too broad exceptions: * Use .get() with default value instead. * Use UnicodeError to catch .encode exceptions --- TorrentToMedia.py | 58 +++++++++++++---------------------------------- nzbToMedia.py | 25 +++++++------------- 2 files changed, 24 insertions(+), 59 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 6afbc984..6fa16974 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -15,14 +15,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, status = 1 # 1 = failed | 0 = success root = 0 foundFile = 0 - uniquePath = 1 if clientAgent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding TORRENT download info for directory {0} to database'.format(inputDirectory)) myDB = nzbToMediaDB.DBConnection() - encoded = False inputDirectory1 = inputDirectory inputName1 = inputName @@ -53,11 +51,11 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, usercat = inputCategory try: inputName = inputName.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) @@ -88,25 +86,10 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, inputCategory)) return [-1, ""] - try: - Torrent_NoLink = int(section[usercat]["Torrent_NoLink"]) - except: - Torrent_NoLink = 0 - - try: - keep_archive = int(section[usercat]["keep_archive"]) - except: - keep_archive = 0 - - try: - extract = int(section[usercat]['extract']) - except: - extract = 0 - - try: - uniquePath = int(section[usercat]["unique_path"]) - except: - uniquePath = 1 + Torrent_NoLink = int(section[usercat].get("Torrent_NoLink", 0)) + keep_archive = int(section[usercat].get("keep_archive", 0)) + extract = int(section[usercat].get('extract', 0)) + uniquePath = int(section[usercat].get("unique_path", 1)) if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) @@ -126,7 +109,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) try: outputDestination = outputDestination.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass if outputDestination in inputDirectory: @@ -167,7 +150,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, "Setting outputDestination to {0} to preserve folder structure".format(os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass if root == 1: if not foundFile: @@ -327,34 +310,25 @@ def main(args): os.path.basename(dirName)) ) - try: - clientAgent = str(core.DOWNLOADINFO[0]['client_agent']) - except: - clientAgent = 'manual' - try: - inputHash = str(core.DOWNLOADINFO[0]['input_hash']) - except: - inputHash = None - try: - inputID = str(core.DOWNLOADINFO[0]['input_id']) - except: - inputID = None + clientAgent = str(core.DOWNLOADINFO[0].get('client_agent', '')) + inputHash = str(core.DOWNLOADINFO[0].get('input_hash', '')) + inputID = str(core.DOWNLOADINFO[0].get('input_id', '')) - if clientAgent.lower() not in core.TORRENT_CLIENTS and clientAgent != 'manual': + if clientAgent and clientAgent.lower() not in core.TORRENT_CLIENTS: continue try: dirName = dirName.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass - results = processTorrent(dirName, inputName, subsection, inputHash, inputID, - clientAgent) + results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None, + clientAgent or 'manual') if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format( section, subsection)) diff --git a/nzbToMedia.py b/nzbToMedia.py index cd42cf18..a3e6363c 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -581,10 +581,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down inputCategory)) return [-1, ""] - try: - extract = int(section[usercat]['extract']) - except: - extract = 0 + extract = int(section[usercat].get('extract', 0)) try: if int(section[usercat]['remote_path']) and not core.REMOTEPATHS: @@ -761,30 +758,24 @@ def main(args, section=None): os.path.basename(dirName)) ) - try: - clientAgent = str(core.DOWNLOADINFO[0]['client_agent']) - except: - clientAgent = 'manual' - try: - download_id = str(core.DOWNLOADINFO[0]['input_id']) - except: - download_id = None + clientAgent = str(core.DOWNLOADINFO[0].get('client_agent', '')) + download_id = str(core.DOWNLOADINFO[0].get('input_id', '')) - if clientAgent.lower() not in core.NZB_CLIENTS and clientAgent != 'manual': + if clientAgent and clientAgent.lower() not in core.NZB_CLIENTS: continue try: dirName = dirName.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) - except: + except UnicodeError: pass - results = process(dirName, inputName, 0, clientAgent=clientAgent, - download_id=download_id, inputCategory=subsection) + results = process(dirName, inputName, 0, clientAgent=clientAgent or 'manual', + download_id=download_id or None, inputCategory=subsection) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) From 2ad9f2e35f9294cd70381bd4df43d1f80bf2d1d7 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 05:45:42 -0400 Subject: [PATCH 50/82] Standardize string formatting to use .format instead of concat --- TorrentToMedia.py | 6 +- core/__init__.py | 12 ++- core/autoProcess/autoProcessMovie.py | 4 +- core/databases/mainDB.py | 19 ++-- core/extractor/extractor.py | 4 +- core/gh_api.py | 8 +- core/logger.py | 8 +- core/nzbToMediaConfig.py | 28 +++--- core/nzbToMediaDB.py | 94 +++++++++++------- core/nzbToMediaUserScript.py | 4 +- core/nzbToMediaUtil.py | 35 +++---- core/transcoder/transcoder.py | 109 ++++++++++---------- core/transmissionrpc/client.py | 13 ++- core/transmissionrpc/utils.py | 2 +- core/utorrent/client.py | 2 +- core/utorrent/upload.py | 4 +- core/versionCheck.py | 142 +++++++++++++-------------- nzbToMedia.py | 4 +- 18 files changed, 259 insertions(+), 239 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 6fa16974..ad61a6a2 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -310,9 +310,9 @@ def main(args): os.path.basename(dirName)) ) - clientAgent = str(core.DOWNLOADINFO[0].get('client_agent', '')) - inputHash = str(core.DOWNLOADINFO[0].get('input_hash', '')) - inputID = str(core.DOWNLOADINFO[0].get('input_id', '')) + clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) + inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) + inputID = text_type(core.DOWNLOADINFO[0].get('input_id', '')) if clientAgent and clientAgent.lower() not in core.TORRENT_CLIENTS: continue diff --git a/core/__init__.py b/core/__init__.py index 80444fea..39b6500d 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -265,8 +265,9 @@ def initialize(section=None): # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError sys.setdefaultencoding(SYS_ENCODING) except: - print('Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable') - print('or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.') + print('Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable' + '\nor find another way to force Python to use {codec} for string encoding.'.format + (codec=SYS_ENCODING)) if 'NZBOP_SCRIPTDIR' in os.environ: sys.exit(NZBGET_POSTPROCESS_ERROR) else: @@ -333,8 +334,9 @@ def initialize(section=None): logger.error("Update wasn't successful, not restarting. Check your log for more information.") # Set Current Version - logger.info( - 'nzbToMedia Version:' + NZBTOMEDIA_VERSION + ' Branch:' + GIT_BRANCH + ' (' + platform.system() + ' ' + platform.release() + ')') + logger.info('nzbToMedia Version:{version} Branch:{branch} ({system} {release})'.format + (version=NZBTOMEDIA_VERSION, branch=GIT_BRANCH, + system=platform.system(), release=platform.release())) if int(CFG["WakeOnLan"]["wake"]) == 1: WakeUp() @@ -842,7 +844,7 @@ def restart(): if popen_list: popen_list += SYS_ARGV - logger.log(u"Restarting nzbToMedia with " + str(popen_list)) + logger.log(u"Restarting nzbToMedia with {args}".format(args=popen_list)) logger.close() p = subprocess.Popen(popen_list, cwd=os.getcwd()) p.wait() diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index a4e4b0ba..7b89e40f 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -46,7 +46,7 @@ class autoProcessMovie(object): if not result['success']: if 'error' in result: - logger.error(str(result['error'])) + logger.error('{0}'.format(result['error'])) else: logger.error("no media found for id {0}".format(params['id'])) return results @@ -262,7 +262,7 @@ class autoProcessMovie(object): if release_id: logger.postprocess("Setting failed release {0} to ignored ...".format(inputName), section) - url = baseURL + "/release.ignore" + url = "{url}/release.ignore".format(url=baseURL) params = {'id': release_id} logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index e32e6dae..d79033db 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -42,18 +42,17 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade): cur_db_version = self.checkDBVersion() if cur_db_version < MIN_DB_VERSION: - logger.log_error_and_exit("Your database version (" + str( - cur_db_version) + ") is too old to migrate from what this version of nzbToMedia supports (" + \ - str(MIN_DB_VERSION) + ").\n" + \ - "Please remove nzbtomedia.db file to begin fresh." - ) + logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate " + u"from what this version of nzbToMedia supports ({min})." + u"\nPlease remove nzbtomedia.db file to begin fresh.".format + (current=cur_db_version, min=MIN_DB_VERSION)) if cur_db_version > MAX_DB_VERSION: - logger.log_error_and_exit("Your database version (" + str( - cur_db_version) + ") has been incremented past what this version of nzbToMedia supports (" + \ - str(MAX_DB_VERSION) + ").\n" + \ - "If you have used other forks of nzbToMedia, your database may be unusable due to their modifications." - ) + logger.log_error_and_exit(u"Your database version ({current}) has been incremented " + u"past what this version of nzbToMedia supports ({max})." + u"\nIf you have used other forks of nzbToMedia, your database " + u"may be unusable due to their modifications.".format + (current=cur_db_version, max=MAX_DB_VERSION)) if cur_db_version < MAX_DB_VERSION: # We need to upgrade. queries = [ "CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 08011706..7cbd34b9 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -70,7 +70,7 @@ def extract(filePath, outputDestination): if ext[1] in (".gz", ".bz2", ".lzma"): # Check if this is a tar if os.path.splitext(ext[0])[1] == ".tar": - cmd = EXTRACT_COMMANDS[".tar" + ext[1]] + cmd = EXTRACT_COMMANDS[".tar{ext}".format(ext=ext[1])] elif ext[1] in (".1", ".01", ".001") and os.path.splitext(ext[0])[1] in (".rar", ".zip", ".7z"): cmd = EXTRACT_COMMANDS[os.path.splitext(ext[0])[1]] elif ext[1] in (".cb7", ".cba", ".cbr", ".cbt", ".cbz"): # don't extract these comic book archives. @@ -131,7 +131,7 @@ def extract(filePath, outputDestination): continue cmd2 = cmd # append password here. - passcmd = "-p" + password + passcmd = "-p{pwd}".format(pwd=password) cmd2.append(passcmd) p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. res = p.wait() diff --git a/core/gh_api.py b/core/gh_api.py index 1db7faf7..f1264c09 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -1,6 +1,7 @@ # coding=utf-8 import requests +from six import iteritems class GitHub(object): @@ -19,10 +20,11 @@ class GitHub(object): Access the API at the path given and with the optional params given. """ - url = 'https://api.github.com/' + '/'.join(path) + url = 'https://api.github.com/{path}'.format(path='/'.join(path)) if params and type(params) is dict: - url += '?' + '&'.join([str(x) + '=' + str(params[x]) for x in params.keys()]) + url += '?{params}'.format(params='&'.join(['{key}={value}'.format(key=k, value=v) + for k, v in iteritems(params)])) data = requests.get(url, verify=False) @@ -59,6 +61,6 @@ class GitHub(object): Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ """ access_API = self._access_API( - ['repos', self.github_repo_user, self.github_repo, 'compare', base + '...' + head], + ['repos', self.github_repo_user, self.github_repo, 'compare', '{base}...{head}'.format(base=base, head=head)], params={'per_page': per_page}) return access_API diff --git a/core/logger.py b/core/logger.py index 94d1764f..5a555bf2 100644 --- a/core/logger.py +++ b/core/logger.py @@ -136,7 +136,7 @@ class NTMRotatingLogHandler(object): i: Log number to ues """ - return self.log_file_path + ('.' + str(i) if i else '') + return self.log_file_path + ('.{0}'.format(i) if i else '') def _num_logs(self): """ @@ -193,9 +193,9 @@ class NTMRotatingLogHandler(object): self.writes_since_check += 1 try: - message = u"{0}: {1}".format(str(section).upper(), toLog) - except: - message = u"{0}: Message contains non-utf-8 string".format(str(section).upper()) + message = u"{0}: {1}".format(section.upper(), toLog) + except UnicodeError: + message = u"{0}: Message contains non-utf-8 string".format(section.upper()) out_line = message diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index 00aed989..b40b1cf8 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -238,7 +238,7 @@ class ConfigObj(configobj.ConfigObj, Section): process_section(section, subsection) # create a backup of our old config - CFG_OLD.filename = core.CONFIG_FILE + ".old" + CFG_OLD.filename ="{config}.old".format(config=core.CONFIG_FILE) CFG_OLD.write() # write our new config to autoProcessMedia.cfg @@ -270,7 +270,7 @@ class ConfigObj(configobj.ConfigObj, Section): envKeys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE'] cfgKeys = ['auto_update', 'check_media', 'safe_mode'] for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -280,7 +280,7 @@ class ConfigObj(configobj.ConfigObj, Section): envKeys = ['MOUNTPOINTS'] cfgKeys = ['mount_points'] for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -294,7 +294,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'wait_for', 'watch_dir'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_CPS' + envKeys[index] + key = 'NZBPO_CPS{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -311,7 +311,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_SB' + envKeys[index] + key = 'NZBPO_SB{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -328,7 +328,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_HP' + envKeys[index] + key = 'NZBPO_HP{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -345,7 +345,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'remote_path'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_MY' + envKeys[index] + key = 'NZBPO_MY{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -360,7 +360,7 @@ class ConfigObj(configobj.ConfigObj, Section): cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_GZ' + envKeys[index] + key = 'NZBPO_GZ{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -377,7 +377,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_ND' + envKeys[index] + key = 'NZBPO_ND{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -392,7 +392,7 @@ class ConfigObj(configobj.ConfigObj, Section): envKeys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] cfgKeys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -402,7 +402,7 @@ class ConfigObj(configobj.ConfigObj, Section): envKeys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] cfgKeys = ['niceness', 'ionice_class', 'ionice_classdata'] for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -430,7 +430,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels'] for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -440,7 +440,7 @@ class ConfigObj(configobj.ConfigObj, Section): envKeys = ['WAKE', 'HOST', 'PORT', 'MAC'] cfgKeys = ['wake', 'host', 'port', 'mac'] for index in range(len(envKeys)): - key = 'NZBPO_WOL' + envKeys[index] + key = 'NZBPO_WOL{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] @@ -454,7 +454,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] if envCatKey in os.environ: for index in range(len(envKeys)): - key = 'NZBPO_' + envKeys[index] + key = 'NZBPO_{index}'.format(index=envKeys[index]) if key in os.environ: option = cfgKeys[index] value = os.environ[key] diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index de7fd825..de9e3c91 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -56,28 +56,29 @@ class DBConnection(object): while attempt < 5: try: if args is None: - logger.log(self.filename + ": " + query, logger.DB) + logger.log("{name}: {query}".format(name=self.filename, query=query), logger.DB) cursor = self.connection.cursor() cursor.execute(query) sqlResult = cursor.fetchone()[0] else: - logger.log(self.filename + ": " + query + " with args " + str(args), logger.DB) + logger.log("{name}: {query} with args {args}".format + (name=self.filename, query=query, args=args), logger.DB) cursor = self.connection.cursor() cursor.execute(query, args) sqlResult = cursor.fetchone()[0] # get out of the connection attempt loop since we were successful break - except sqlite3.OperationalError as e: - if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: - logger.log(u"DB error: " + str(e), logger.WARNING) + except sqlite3.OperationalError as error: + if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]: + logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING) attempt += 1 time.sleep(1) else: - logger.log(u"DB error: " + str(e), logger.ERROR) + logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR) raise - except sqlite3.DatabaseError as e: - logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) + except sqlite3.DatabaseError as error: + logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise return sqlResult @@ -98,26 +99,26 @@ class DBConnection(object): sqlResult.append(self.connection.execute(qu[0])) elif len(qu) > 1: if logTransaction: - logger.log(qu[0] + " with args " + str(qu[1]), logger.DEBUG) + logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG) sqlResult.append(self.connection.execute(qu[0], qu[1])) self.connection.commit() - logger.log(u"Transaction with " + str(len(querylist)) + u" query's executed", logger.DEBUG) + logger.log(u"Transaction with {x} query's executed".format(x=len(querylist)), logger.DEBUG) return sqlResult - except sqlite3.OperationalError as e: + except sqlite3.OperationalError as error: sqlResult = [] if self.connection: self.connection.rollback() - if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: - logger.log(u"DB error: " + str(e), logger.WARNING) + if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]: + logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING) attempt += 1 time.sleep(1) else: - logger.log(u"DB error: " + str(e), logger.ERROR) + logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR) raise - except sqlite3.DatabaseError as e: + except sqlite3.DatabaseError as error: if self.connection: self.connection.rollback() - logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) + logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise return sqlResult @@ -132,24 +133,25 @@ class DBConnection(object): while attempt < 5: try: if args is None: - logger.log(self.filename + ": " + query, logger.DB) + logger.log(u"{name}: {query}".format(name=self.filename, query=query), logger.DB) sqlResult = self.connection.execute(query) else: - logger.log(self.filename + ": " + query + " with args " + str(args), logger.DB) + logger.log(u"{name}: {query} with args {args}".format + (name=self.filename, query=query, args=args), logger.DB) sqlResult = self.connection.execute(query, args) self.connection.commit() # get out of the connection attempt loop since we were successful break - except sqlite3.OperationalError as e: - if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: - logger.log(u"DB error: " + str(e), logger.WARNING) + except sqlite3.OperationalError as error: + if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]: + logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING) attempt += 1 time.sleep(1) else: - logger.log(u"DB error: " + str(e), logger.ERROR) + logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR) raise - except sqlite3.DatabaseError as e: - logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) + except sqlite3.DatabaseError as error: + logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise return sqlResult @@ -167,17 +169,28 @@ class DBConnection(object): changesBefore = self.connection.total_changes - genParams = lambda myDict: [x + " = ?" for x in myDict.keys()] + genParams = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()] - query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join( - genParams(keyDict)) - - self.action(query, valueDict.values() + keyDict.values()) + self.action( + "UPDATE {table} " + "SET {params} " + "WHERE {conditions}".format( + table=tableName, + params=", ".join(genParams(valueDict)), + conditions=" AND ".join(genParams(keyDict))), + valueDict.values() + keyDict.values() + ) if self.connection.total_changes == changesBefore: - query = "INSERT OR IGNORE INTO " + tableName + " (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \ - " VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")" - self.action(query, valueDict.values() + keyDict.values()) + self.action( + "INSERT OR IGNORE INTO {table} ({columns}) " + "VALUES ({values})".format( + table=tableName, + columns=", ".join(valueDict.keys() + keyDict.keys()), + values=", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ) + , valueDict.values() + keyDict.values() + ) def tableInfo(self, tableName): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually @@ -222,17 +235,22 @@ def prettyName(class_name): def _processUpgrade(connection, upgradeClass): instance = upgradeClass(connection) - logger.log(u"Checking " + prettyName(upgradeClass.__name__) + " database upgrade", logger.DEBUG) + logger.log(u"Checking {name} database upgrade".format + (name=prettyName(upgradeClass.__name__)), logger.DEBUG) if not instance.test(): - logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.MESSAGE) + logger.log(u"Database upgrade required: {name}".format + (name=prettyName(upgradeClass.__name__)), logger.MESSAGE) try: instance.execute() - except sqlite3.DatabaseError as e: - print("Error in " + str(upgradeClass.__name__) + ": " + str(e)) + except sqlite3.DatabaseError as error: + print(u"Error in {name}: {msg}".format + (name=upgradeClass.__name__, msg=error)) raise - logger.log(upgradeClass.__name__ + " upgrade completed", logger.DEBUG) + logger.log(u"{name} upgrade completed".format + (name=upgradeClass.__name__), logger.DEBUG) else: - logger.log(upgradeClass.__name__ + " upgrade not required", logger.DEBUG) + logger.log(u"{name} upgrade not required".format + (name=upgradeClass.__name__), logger.DEBUG) for upgradeSubClass in upgradeClass.__subclasses__(): _processUpgrade(connection, upgradeSubClass) diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index a3a75dac..028a396e 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -80,8 +80,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): continue cmd = "" for item in command: - cmd = cmd + " " + item - logger.info("Running script {0} on file {1}.".format(cmd, filePath), "USERSCRIPT") + cmd = "{cmd} {item}".format(cmd=cmd, item=item) + logger.info("Running script {cmd} on file {path}.".format(cmd=cmd, path=filePath), "USERSCRIPT") try: p = Popen(command) res = p.wait() diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index d600eceb..204766a3 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -342,11 +342,12 @@ def rmReadOnly(filename): file_attribute = os.stat(filename)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable - logger.debug('Read only mode on file ' + filename + ' Will try to make it writeable') + logger.debug('Read only mode on file {name}. Attempting to make it writeable'.format + (name=filename)) try: os.chmod(filename, stat.S_IWRITE) except: - logger.warning('Cannot change permissions of ' + filename, logger.WARNING) + logger.warning('Cannot change permissions of {file}'.format(file=filename), logger.WARNING) # Wake function @@ -1156,11 +1157,11 @@ def server_responding(baseURL): def plex_update(category): if core.FAILED: return - if core.PLEXSSL: - url = "https://" - else: - url = "http://" - url = url + core.PLEXHOST + ':' + core.PLEXPORT + '/library/sections/' + url = '{scheme}://{host}:{port}/library/sections/'.format( + scheme='https' if core.PLEXSSL else 'http', + host=core.PLEXHOST, + port=core.PLEXPORT, + ) section = None if not core.PLEXSEC: return @@ -1170,7 +1171,7 @@ def plex_update(category): section = item[1] if section: - url = url + section + '/refresh?X-Plex-Token=' + core.PLEXTOKEN + url = '{url}{section}/refresh?X-Plex-Token={token}'.format(url=url, section=section, token=core.PLEXTOKEN) requests.get(url, timeout=(60, 120), verify=False) logger.debug("Plex Library has been refreshed.", 'PLEX') else: @@ -1180,27 +1181,27 @@ def plex_update(category): def backupVersionedFile(old_file, version): numTries = 0 - new_file = old_file + '.' + 'v' + str(version) + new_file = '{old}.v{version}'.format(old=old_file, version=version) while not os.path.isfile(new_file): if not os.path.isfile(old_file): - logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG) + logger.log(u"Not creating backup, {file} doesn't exist".format(file=old_file), logger.DEBUG) break try: - logger.log(u"Trying to back up " + old_file + " to " + new_file, logger.DEBUG) + logger.log(u"Trying to back up {old} to {new]".format(old=old_file, new=new_file), logger.DEBUG) shutil.copy(old_file, new_file) logger.log(u"Backup done", logger.DEBUG) break - except Exception as e: - logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + str(e), - logger.WARNING) + except Exception as error: + logger.log(u"Error while trying to back up {old} to {new} : {msg}".format + (old=old_file, new=new_file, msg=error), logger.WARNING) numTries += 1 time.sleep(1) logger.log(u"Trying again.", logger.DEBUG) if numTries >= 10: - logger.log(u"Unable to back up " + old_file + " to " + new_file + " please do it manually.", logger.ERROR) + logger.log(u"Unable to back up {old} to {new} please do it manually.".format(old=old_file, new=new_file), logger.ERROR) return False return True @@ -1242,7 +1243,7 @@ class RunningProcess(object): class WindowsProcess(object): def __init__(self): - self.mutexname = "nzbtomedia_" + core.PID_FILE.replace('\\', '/') # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}" + self.mutexname = "nzbtomedia_{pid}".format(pid=core.PID_FILE.replace('\\', '/')) # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}" if platform.system() == 'Windows': from win32event import CreateMutex from win32api import CloseHandle, GetLastError @@ -1274,7 +1275,7 @@ class PosixProcess(object): def alreadyrunning(self): try: self.lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - self.lock_socket.bind('\0' + self.pidpath) + self.lock_socket.bind('\0{path}'.format(path=self.pidpath)) self.lasterror = False return self.lasterror except socket.error as e: diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index c5cb43a6..807516f4 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -129,7 +129,7 @@ def buildCommands(file, newDir, movieName, bitbucket): elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. - core.VEXTENSION = '-transcoded' + core.VEXTENSION # adds '-transcoded.ext' + core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext' else: img, data = iteritems(file).next() name = data['name'] @@ -165,7 +165,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.VBITRATE: video_cmd.extend(['-b:v', str(core.VBITRATE)]) if core.VRESOLUTION: - video_cmd.extend(['-vf', 'scale=' + core.VRESOLUTION]) + video_cmd.extend(['-vf', 'scale={vres}'.format(vres=core.VRESOLUTION)]) if core.VPRESET: video_cmd.extend(['-preset', core.VPRESET]) if core.VCRF: @@ -222,13 +222,19 @@ def buildCommands(file, newDir, movieName, bitbucket): w_scale = width / float(scale.split(':')[0]) h_scale = height / float(scale.split(':')[1]) if w_scale > h_scale: # widescreen, Scale by width only. - scale = scale.split(':')[0] + ":" + str(int((height / w_scale) / 2) * 2) + scale = "{width}:{height}".format( + width=scale.split(':')[0], + height=int((height / w_scale) / 2) * 2, + ) if w_scale > 1: - video_cmd.extend(['-vf', 'scale=' + scale]) + video_cmd.extend(['-vf', 'scale={width}'.format(width=scale)]) else: # lower or mathcing ratio, scale by height only. - scale = str(int((width / h_scale) / 2) * 2) + ":" + scale.split(':')[1] + scale = "{width}:{height}".format( + width=int((width / h_scale) / 2) * 2, + height=scale.split(':')[1], + ) if h_scale > 1: - video_cmd.extend(['-vf', 'scale=' + scale]) + video_cmd.extend(['-vf', 'scale={height}'.format(height=scale)]) if core.VBITRATE: video_cmd.extend(['-b:v', str(core.VBITRATE)]) if core.VPRESET: @@ -242,7 +248,7 @@ def buildCommands(file, newDir, movieName, bitbucket): video_cmd[1] = core.VCODEC if core.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding. video_cmd = ['-c:v', 'copy'] - map_cmd.extend(['-map', '0:' + str(video["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=video["index"])]) break # Only one video needed used_audio = 0 @@ -259,40 +265,34 @@ def buildCommands(file, newDir, movieName, bitbucket): audio3 = [] if audio2: # right language and codec... - map_cmd.extend(['-map', '0:' + str(audio2[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio2[0]["index"])]) a_mapped.extend([audio2[0]["index"]]) bitrate = int(audio2[0].get("bit_rate", 0)) / 1000 channels = int(audio2[0].get("channels", 0)) - audio_cmd.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd.extend(['-c:a:{0}'.format(used_audio), 'copy']) elif audio1: # right language wrong codec. - map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 channels = int(audio1[0].get("channels", 0)) - if core.ACODEC: - audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) - else: - audio_cmd.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy']) elif audio3: # just pick the default audio track - map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 channels = int(audio3[0].get("channels", 0)) - if core.ACODEC: - audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC]) - else: - audio_cmd.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy']) if core.ACHANNELS and channels and channels > core.ACHANNELS: - audio_cmd.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS)]) + audio_cmd.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS)]) if audio_cmd[1] == 'copy': audio_cmd[1] = core.ACODEC if core.ABITRATE and not (core.ABITRATE * 0.9 < bitrate < core.ABITRATE * 1.1): - audio_cmd.extend(['-b:a:' + str(used_audio), str(core.ABITRATE)]) + audio_cmd.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE)]) if audio_cmd[1] == 'copy': audio_cmd[1] = core.ACODEC if core.OUTPUTQUALITYPERCENT: - audio_cmd.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) + audio_cmd.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)]) if audio_cmd[1] == 'copy': audio_cmd[1] = core.ACODEC if audio_cmd[1] in ['aac', 'dts']: @@ -302,40 +302,40 @@ def buildCommands(file, newDir, movieName, bitbucket): used_audio += 1 audio4 = [item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW] if audio4: # right language and codec. - map_cmd.extend(['-map', '0:' + str(audio4[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]["index"])]) a_mapped.extend([audio4[0]["index"]]) bitrate = int(audio4[0].get("bit_rate", 0)) / 1000 channels = int(audio4[0].get("channels", 0)) - audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy']) elif audio1: # right language wrong codec. - map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 channels = int(audio1[0].get("channels", 0)) if core.ACODEC2: - audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) + audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2]) else: - audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy']) elif audio3: # just pick the default audio track - map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 channels = int(audio3[0].get("channels", 0)) if core.ACODEC2: - audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) + audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2]) else: - audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy']) if core.ACHANNELS2 and channels and channels > core.ACHANNELS2: - audio_cmd2.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS2)]) + audio_cmd2.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = core.ACODEC2 if core.ABITRATE2 and not (core.ABITRATE2 * 0.9 < bitrate < core.ABITRATE2 * 1.1): - audio_cmd2.extend(['-b:a:' + str(used_audio), str(core.ABITRATE2)]) + audio_cmd2.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = core.ACODEC2 if core.OUTPUTQUALITYPERCENT: - audio_cmd2.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) + audio_cmd2.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = core.ACODEC2 if audio_cmd2[1] in ['aac', 'dts']: @@ -347,28 +347,28 @@ def buildCommands(file, newDir, movieName, bitbucket): if audio["index"] in a_mapped: continue used_audio += 1 - map_cmd.extend(['-map', '0:' + str(audio["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=audio["index"])]) audio_cmd3 = [] bitrate = int(audio.get("bit_rate", 0)) / 1000 channels = int(audio.get("channels", 0)) if audio["codec_name"] in core.ACODEC3_ALLOW: - audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd3.extend(['-c:a:{0}'.format(used_audio), 'copy']) else: if core.ACODEC3: - audio_cmd3.extend(['-c:a:' + str(used_audio), core.ACODEC3]) + audio_cmd3.extend(['-c:a:{0}'.format(used_audio), core.ACODEC3]) else: - audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) + audio_cmd3.extend(['-c:a:{0}'.format(used_audio), 'copy']) if core.ACHANNELS3 and channels and channels > core.ACHANNELS3: - audio_cmd3.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS3)]) + audio_cmd3.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = core.ACODEC3 if core.ABITRATE3 and not (core.ABITRATE3 * 0.9 < bitrate < core.ABITRATE3 * 1.1): - audio_cmd3.extend(['-b:a:' + str(used_audio), str(core.ABITRATE3)]) + audio_cmd3.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = core.ACODEC3 if core.OUTPUTQUALITYPERCENT > 0: - audio_cmd3.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) + audio_cmd3.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = core.ACODEC3 if audio_cmd3[1] in ['aac', 'dts']: @@ -386,7 +386,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.BURN and not subs1 and not burnt and os.path.isfile(file): for subfile in get_subs(file): if lan in os.path.split(subfile)[1]: - video_cmd.extend(['-vf', 'subtitles=' + subfile]) + video_cmd.extend(['-vf', 'subtitles={subs}'.format(subs=subfile)]) burnt = 1 for sub in subs1: if core.BURN and not burnt and os.path.isfile(inputFile): @@ -395,11 +395,11 @@ def buildCommands(file, newDir, movieName, bitbucket): if subStreams[index]["index"] == sub["index"]: subloc = index break - video_cmd.extend(['-vf', 'subtitles=' + inputFile + ':si=' + str(subloc)]) + video_cmd.extend(['-vf', 'subtitles={sub}:si={loc}'.format(sub=inputFile, loc=subloc)]) burnt = 1 if not core.ALLOWSUBS: break - map_cmd.extend(['-map', '0:' + str(sub["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=sub["index"])]) s_mapped.extend([sub["index"]]) if core.SINCLUDE: @@ -408,7 +408,7 @@ def buildCommands(file, newDir, movieName, bitbucket): break if sub["index"] in s_mapped: continue - map_cmd.extend(['-map', '0:' + str(sub["index"])]) + map_cmd.extend(['-map', '0:{index}'.format(index=sub["index"])]) s_mapped.extend([sub["index"]]) if core.OUTPUTFASTSTART: @@ -430,9 +430,10 @@ def buildCommands(file, newDir, movieName, bitbucket): continue lan = os.path.splitext(os.path.splitext(subfile)[0])[1] command.extend(['-i', subfile]) - meta_cmd.extend(['-metadata:s:s:' + str(len(s_mapped) + n), 'language=' + lan[1:]]) + meta_cmd.extend(['-metadata:s:s:{x}'.format(x=len(s_mapped) + n), + 'language={lang}'.format(lang=lan[1:])]) n += 1 - map_cmd.extend(['-map', str(n) + ':0']) + map_cmd.extend(['-map', '{x}:0'.format(x=n)]) if not core.ALLOWSUBS or (not s_mapped and not n): sub_cmd.extend(['-sn']) @@ -500,8 +501,8 @@ def extract_subs(file, newfilePath, bitbucket): if os.path.isfile(outputFile): outputFile = os.path.join(subdir, "{0}.{1}.{2}.srt".format(name, lan, n)) - command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', '-codec:' + str(idx), 'srt', - outputFile] + command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', + '-codec:{index}'.format(index=idx), 'srt', outputFile] if platform.system() != 'Windows': command = core.NICENESS + command @@ -604,7 +605,9 @@ def ripISO(item, newDir, bitbucket): if core.CONCAT: combined.extend(concat) continue - name = '{0}.cd{1}'.format(os.path.splitext(os.path.split(item)[1])[0], str(n + 1)) + name = '{name}.cd{x}'.format( + name=os.path.splitext(os.path.split(item)[1])[0], x=n + 1 + ) newFiles.append({item: {'name': name, 'files': concat}}) if core.CONCAT: name = os.path.splitext(os.path.split(item)[1])[0] @@ -627,14 +630,14 @@ def combineVTS(vtsPath): while True: vtsName = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) if os.path.isfile(os.path.join(vtsPath, vtsName)): - concat = concat + os.path.join(vtsPath, vtsName) + '|' + concat += '{file}|'.format(file=os.path.join(vtsPath, vtsName)) m += 1 else: break if not concat: break if core.CONCAT: - combined = combined + concat + '|' + combined += '{files}|'.format(files=concat) continue newFiles.append('concat:{0}'.format(concat[:-1])) if core.CONCAT: @@ -650,7 +653,7 @@ def combineCD(combine): files = [file for file in combine if n + 1 == int(re.match(".+[cC][dD]([0-9]+).", file).groups()[0]) and item in file] if files: - concat = concat + files[0] + '|' + concat += '{file}|'.format(file=files[0]) else: break if concat: @@ -661,7 +664,7 @@ def combineCD(combine): def print_cmd(command): cmd = "" for item in command: - cmd = cmd + " " + str(item) + cmd = "{cmd} {item}".format(cmd=cmd, item=item) logger.debug("calling command:{0}".format(cmd)) diff --git a/core/transmissionrpc/client.py b/core/transmissionrpc/client.py index 04c85ac0..66353762 100644 --- a/core/transmissionrpc/client.py +++ b/core/transmissionrpc/client.py @@ -141,15 +141,14 @@ class Client(object): else: self._query_timeout = DEFAULT_TIMEOUT urlo = urlparse(address) - if urlo.scheme == '': - base_url = 'http://' + address + ':' + str(port) - self.url = base_url + '/transmission/rpc/' + if not urlo.scheme: + self.url = 'http://{host}:{port}/transmission/rpc/'.format(host=address, port=port) else: if urlo.port: - self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path + self.url = '{url.scheme}://{url.hostname}:{url.port}{url.path}'.format(url=urlo) else: - self.url = urlo.scheme + '://' + urlo.hostname + urlo.path - LOGGER.info('Using custom URL "' + self.url + '".') + self.url = '{url.scheme}://{url.hostname}{url.path}'.format(url=urlo) + LOGGER.info('Using custom URL {url!r}.'.format(url=self.url)) if urlo.username and urlo.password: user = urlo.username password = urlo.password @@ -256,7 +255,7 @@ class Client(object): try: data = json.loads(http_data) except ValueError as error: - LOGGER.error('Error: ' + str(error)) + LOGGER.error('Error: {msg}'.format(msg=error)) LOGGER.error('Request: {request!r}'.format(request=query)) LOGGER.error('HTTP data: {data!r}'.format(data=http_data)) raise diff --git a/core/transmissionrpc/utils.py b/core/transmissionrpc/utils.py index 9381edac..cff602ee 100644 --- a/core/transmissionrpc/utils.py +++ b/core/transmissionrpc/utils.py @@ -31,7 +31,7 @@ def format_speed(size): Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ... """ (size, unit) = format_size(size) - return size, unit + '/s' + return size, '{unit}/s'.format(unit=unit) def format_timedelta(delta): diff --git a/core/utorrent/client.py b/core/utorrent/client.py index f8ddc80d..02c29583 100644 --- a/core/utorrent/client.py +++ b/core/utorrent/client.py @@ -127,7 +127,7 @@ class UTorrentClient(object): def _action(self, params, body=None, content_type=None): # about token, see https://github.com/bittorrent/webui/wiki/TokenSystem - url = self.base_url + '?token=' + self.token + '&' + urlencode(params) + url = '{url}?token={token}&{params}'.format(url=self.url, token=self.token, params=urlencode(params)) request = Request(url) if body: diff --git a/core/utorrent/upload.py b/core/utorrent/upload.py index f8db659c..948e5491 100644 --- a/core/utorrent/upload.py +++ b/core/utorrent/upload.py @@ -38,7 +38,7 @@ class MultiPartForm(object): # Once the list is built, return a string where each # line is separated by '\r\n'. parts = [] - part_boundary = '--' + self.boundary + part_boundary = '--{boundary}'.format(boundary=self.boundary) # Add the form fields parts.extend( @@ -64,6 +64,6 @@ class MultiPartForm(object): # Flatten the list and add closing boundary marker, # then return CR+LF separated data flattened = list(itertools.chain(*parts)) - flattened.append('--' + self.boundary + '--') + flattened.append('--{boundary}--'.format(boundary=self.boundary)) flattened.append('') return '\r\n'.join(flattened) diff --git a/core/versionCheck.py b/core/versionCheck.py index 84e14947..79824df6 100644 --- a/core/versionCheck.py +++ b/core/versionCheck.py @@ -68,7 +68,7 @@ class CheckVersion(object): logger.log(u"Version checking is disabled, not checking for the newest version") return False - logger.log(u"Checking if " + self.install_type + " needs an update") + logger.log(u"Checking if {install} needs an update".format(install=self.install_type)) if not self.updater.need_update(): core.NEWEST_VERSION_STRING = None logger.log(u"No update needed") @@ -113,18 +113,19 @@ class GitUpdateManager(UpdateManager): test_cmd = 'version' if core.GIT_PATH: - main_git = '"' + core.GIT_PATH + '"' + main_git = '"{git}"'.format(git=core.GIT_PATH) else: main_git = 'git' - logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG) + logger.log(u"Checking if we can use git commands: {git} {cmd}".format + (git=main_git, cmd=test_cmd), logger.DEBUG) output, err, exit_status = self._run_git(main_git, test_cmd) if exit_status == 0: - logger.log(u"Using: " + main_git, logger.DEBUG) + logger.log(u"Using: {git}".format(git=main_git), logger.DEBUG) return main_git else: - logger.log(u"Not using: " + main_git, logger.DEBUG) + logger.log(u"Not using: {git}".format(git=main_git), logger.DEBUG) # trying alternatives @@ -142,18 +143,20 @@ class GitUpdateManager(UpdateManager): logger.log(u"Trying known alternative git locations", logger.DEBUG) for cur_git in alternative_git: - logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG) + logger.log(u"Checking if we can use git commands: {git} {cmd}".format + (git=cur_git, cmd=test_cmd), logger.DEBUG) output, err, exit_status = self._run_git(cur_git, test_cmd) if exit_status == 0: - logger.log(u"Using: " + cur_git, logger.DEBUG) + logger.log(u"Using: {git}".format(git=cur_git), logger.DEBUG) return cur_git else: - logger.log(u"Not using: " + cur_git, logger.DEBUG) + logger.log(u"Not using: {git}".format(git=cur_git), logger.DEBUG) # Still haven't found a working git - logger.debug( - 'Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') + logger.debug('Unable to find your git executable - ' + 'Set git_path in your autoProcessMedia.cfg OR ' + 'delete your .git folder and run from source to enable updates.') return None @@ -167,10 +170,11 @@ class GitUpdateManager(UpdateManager): exit_status = 1 return output, err, exit_status - cmd = git_path + ' ' + args + cmd = '{git} {args}'.format(git=git_path, args=args) try: - logger.log(u"Executing " + cmd + " with your shell in " + core.PROGRAM_DIR, logger.DEBUG) + logger.log(u"Executing {cmd} with your shell in {directory}".format + (cmd=cmd, directory=core.PROGRAM_DIR), logger.DEBUG) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=core.PROGRAM_DIR) output, err = p.communicate() @@ -179,29 +183,23 @@ class GitUpdateManager(UpdateManager): if output: output = output.strip() if core.LOG_GIT: - logger.log(u"git output: " + output, logger.DEBUG) + logger.log(u"git output: {output}".format(output=output), logger.DEBUG) except OSError: - logger.log(u"Command " + cmd + " didn't work") + logger.log(u"Command {cmd} didn't work".format(cmd=cmd)) exit_status = 1 + exit_status = 128 if ('fatal:' in output) or err else exit_status if exit_status == 0: - logger.log(cmd + u" : returned successful", logger.DEBUG) + logger.log(u"{cmd} : returned successful".format(cmd=cmd), logger.DEBUG) exit_status = 0 - - elif exit_status == 1: - if core.LOG_GIT: - logger.log(cmd + u" returned : " + output, logger.DEBUG) - exit_status = 1 - - elif exit_status == 128 or 'fatal:' in output or err: - if core.LOG_GIT: - logger.log(cmd + u" returned : " + output, logger.DEBUG) - exit_status = 128 - + elif core.LOG_GIT and exit_status in (1, 128): + logger.log(u"{cmd} returned : {output}".format + (cmd=cmd, output=output), logger.DEBUG) else: if core.LOG_GIT: - logger.log(cmd + u" returned : " + output + u", treat as error for now", logger.DEBUG) + logger.log(u"{cmd} returned : {output}, treat as error for now".format + (cmd=cmd, output=output), logger.DEBUG) exit_status = 1 return output, err, exit_status @@ -285,21 +283,18 @@ class GitUpdateManager(UpdateManager): logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG) return - logger.log( - u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) + - u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + - str(self._num_commits_ahead), logger.DEBUG) + logger.log(u"cur_commit = {current} % (newest_commit)= {new}, " + u"num_commits_behind = {x}, num_commits_ahead = {y}".format + (current=self._cur_commit_hash, new=self._newest_commit_hash, + x=self._num_commits_behind, y=self._num_commits_ahead), logger.DEBUG) def set_newest_text(self): if self._num_commits_ahead: - logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.ERROR) - elif self._num_commits_behind > 0: - newest_text = 'There is a newer version available ' - newest_text += " (you're " + str(self._num_commits_behind) + " commit" - if self._num_commits_behind > 1: - newest_text += 's' - newest_text += ' behind)' - logger.log(newest_text, logger.MESSAGE) + logger.log(u"Local branch is ahead of {branch}. Automatic update not possible.".format + (branch=self.branch), logger.ERROR) + elif self._num_commits_behind: + logger.log(u"There is a newer version available (you're {x} commit{s} behind)".format + (x=self._num_commits_behind, s=u's' if self._num_commits_behind > 1 else u''), logger.MESSAGE) else: return @@ -313,8 +308,8 @@ class GitUpdateManager(UpdateManager): else: try: self._check_github_for_update() - except Exception as e: - logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) + except Exception as error: + logger.log(u"Unable to contact github, can't check for update: {msg!r}".format(msg=error), logger.ERROR) return False if self._num_commits_behind > 0: @@ -328,7 +323,7 @@ class GitUpdateManager(UpdateManager): on the call's success. """ - output, err, exit_status = self._run_git(self._git_path, 'pull origin ' + self.branch) # @UnusedVariable + output, err, exit_status = self._run_git(self._git_path, 'pull origin {branch}'.format(branch=self.branch)) # @UnusedVariable if exit_status == 0: return True @@ -357,8 +352,8 @@ class SourceUpdateManager(UpdateManager): try: with open(version_file, 'r') as fp: self._cur_commit_hash = fp.read().strip(' \n\r') - except EnvironmentError as e: - logger.log(u"Unable to open 'version.txt': " + str(e), logger.DEBUG) + except EnvironmentError as error: + logger.log(u"Unable to open 'version.txt': {msg}".format(msg=error), logger.DEBUG) if not self._cur_commit_hash: self._cur_commit_hash = None @@ -371,8 +366,8 @@ class SourceUpdateManager(UpdateManager): try: self._check_github_for_update() - except Exception as e: - logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) + except Exception as error: + logger.log(u"Unable to contact github, can't check for update: {msg!r}".format(msg=error), logger.ERROR) return False if not self._cur_commit_hash or self._num_commits_behind > 0: @@ -418,9 +413,8 @@ class SourceUpdateManager(UpdateManager): # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - logger.log( - u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) + - u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG) + logger.log(u"cur_commit = {current} % (newest_commit)= {new}, num_commits_behind = {x}".format + (current=self._cur_commit_hash, new=self._newest_commit_hash, x=self._num_commits_behind), logger.DEBUG) def set_newest_text(self): @@ -430,12 +424,8 @@ class SourceUpdateManager(UpdateManager): if not self._cur_commit_hash: logger.log(u"Unknown current version number, don't know if we should update or not", logger.ERROR) elif self._num_commits_behind > 0: - newest_text = 'There is a newer version available' - newest_text += " (you're " + str(self._num_commits_behind) + " commit" - if self._num_commits_behind > 1: - newest_text += "s" - newest_text += " behind)" - logger.log(newest_text, logger.MESSAGE) + logger.log(u"There is a newer version available (you're {x} commit{s} behind)".format + (x=self._num_commits_behind, s=u's' if self._num_commits_behind > 1 else u''), logger.MESSAGE) else: return @@ -443,8 +433,8 @@ class SourceUpdateManager(UpdateManager): """ Downloads the latest source tarball from github and installs it over the existing version. """ - base_url = 'https://github.com/' + self.github_repo_user + '/' + self.github_repo - tar_download_url = base_url + '/tarball/' + self.branch + tar_download_url = 'https://github.com/{org}/{repo}/tarball/{branch}'.format( + org=self.github_repo_user, repo=self.github_repo, branch=self.branch) version_path = os.path.join(core.PROGRAM_DIR, u'version.txt') try: @@ -452,45 +442,48 @@ class SourceUpdateManager(UpdateManager): sb_update_dir = os.path.join(core.PROGRAM_DIR, u'sb-update') if os.path.isdir(sb_update_dir): - logger.log(u"Clearing out update folder " + sb_update_dir + " before extracting") + logger.log(u"Clearing out update folder {dir} before extracting".format(dir=sb_update_dir)) shutil.rmtree(sb_update_dir) - logger.log(u"Creating update folder " + sb_update_dir + " before extracting") + logger.log(u"Creating update folder {dir} before extracting".format(dir=sb_update_dir)) os.makedirs(sb_update_dir) # retrieve file - logger.log(u"Downloading update from " + repr(tar_download_url)) + logger.log(u"Downloading update from {url!r}".format(url=tar_download_url)) tar_download_path = os.path.join(sb_update_dir, u'nzbtomedia-update.tar') urllib.urlretrieve(tar_download_url, tar_download_path) if not os.path.isfile(tar_download_path): - logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.ERROR) + logger.log(u"Unable to retrieve new version from {url}, can't update".format + (url=tar_download_url), logger.ERROR) return False if not tarfile.is_tarfile(tar_download_path): - logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR) + logger.log(u"Retrieved version from {url} is corrupt, can't update".format + (url=tar_download_url), logger.ERROR) return False # extract to sb-update dir - logger.log(u"Extracting file " + tar_download_path) + logger.log(u"Extracting file {path}".format(path=tar_download_path)) tar = tarfile.open(tar_download_path) tar.extractall(sb_update_dir) tar.close() # delete .tar.gz - logger.log(u"Deleting file " + tar_download_path) + logger.log(u"Deleting file {path}".format(path=tar_download_path)) os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))] if len(update_dir_contents) != 1: - logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR) + logger.log(u"Invalid update data, update failed: {0}".format(update_dir_contents), logger.ERROR) return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder - logger.log(u"Moving files from " + content_dir + " to " + core.PROGRAM_DIR) + logger.log(u"Moving files from {source} to {destination}".format + (source=content_dir, destination=core.PROGRAM_DIR)) for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable dirname = dirname[len(content_dir) + 1:] for curfile in filenames: @@ -505,8 +498,9 @@ class SourceUpdateManager(UpdateManager): os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) os.renames(old_path, new_path) - except Exception as e: - logger.log(u"Unable to update " + new_path + ': ' + str(e), logger.DEBUG) + except Exception as error: + logger.log(u"Unable to update {path}: {msg}".format + (path=new_path, msg=error), logger.DEBUG) os.remove(old_path) # Trash the updated file without moving in new path continue @@ -518,13 +512,15 @@ class SourceUpdateManager(UpdateManager): try: with open(version_path, 'w') as ver_file: ver_file.write(self._newest_commit_hash) - except EnvironmentError as e: - logger.log(u"Unable to write version file, update not complete: " + str(e), logger.ERROR) + except EnvironmentError as error: + logger.log(u"Unable to write version file, update not complete: {msg}".format + (msg=error), logger.ERROR) return False - except Exception as e: - logger.log(u"Error while trying to update: " + str(e), logger.ERROR) - logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) + except Exception as error: + logger.log(u"Error while trying to update: {msg}".format + (msg=error), logger.ERROR) + logger.log(u"Traceback: {error}".format(error=traceback.format_exc()), logger.DEBUG) return False return True diff --git a/nzbToMedia.py b/nzbToMedia.py index a3e6363c..8be0110b 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -758,8 +758,8 @@ def main(args, section=None): os.path.basename(dirName)) ) - clientAgent = str(core.DOWNLOADINFO[0].get('client_agent', '')) - download_id = str(core.DOWNLOADINFO[0].get('input_id', '')) + clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) + download_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) if clientAgent and clientAgent.lower() not in core.NZB_CLIENTS: continue From c5b60adb33d091a57dd9bb7139baaeb0929ddb86 Mon Sep 17 00:00:00 2001 From: labrys Date: Mon, 6 Jun 2016 04:22:38 -0400 Subject: [PATCH 51/82] Fix spelling --- TorrentToMedia.py | 4 ++-- core/transcoder/transcoder.py | 2 +- nzbToCouchPotato.py | 6 +++--- nzbToMylar.py | 2 +- nzbToNzbDrone.py | 8 ++++---- nzbToSickBeard.py | 8 ++++---- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index ad61a6a2..2b0ef0c7 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -94,7 +94,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) - # Incase input is not directory, make sure to create one. + # In case input is not directory, make sure to create one. # This way Processing is isolated. if not os.path.isdir(os.path.join(inputDirectory, inputName)): basename = os.path.basename(inputDirectory) @@ -170,7 +170,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug("Looking for files with modified/created dates less than 5 minutes old.") if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): foundFile = True - logger.debug("Found file {0} with date modifed/created less than 5 minutes ago.".format(fullFileName)) + logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format(fullFileName)) else: continue # This file has not been recently moved or created, skip it diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 807516f4..3356c6ef 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -228,7 +228,7 @@ def buildCommands(file, newDir, movieName, bitbucket): ) if w_scale > 1: video_cmd.extend(['-vf', 'scale={width}'.format(width=scale)]) - else: # lower or mathcing ratio, scale by height only. + else: # lower or matching ratio, scale by height only. scale = "{width}:{height}".format( width=int((width / h_scale) / 2) * 2, height=scale.split(':')[1], diff --git a/nzbToCouchPotato.py b/nzbToCouchPotato.py index 8b8f7b46..32416471 100755 --- a/nzbToCouchPotato.py +++ b/nzbToCouchPotato.py @@ -134,7 +134,7 @@ # create a duplicate, or replace the original (0, 1). # -# set to 1 to cretae a new file or 0 to replace the original +# set to 1 to create a new file or 0 to replace the original # duplicate=1 # ignore extensions. @@ -169,12 +169,12 @@ # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. # allSubLanguages=0 # embedSubs (0,1). # -# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. # embedSubs=1 # burnInSubtitle (0,1). diff --git a/nzbToMylar.py b/nzbToMylar.py index eba87a71..5d566123 100755 --- a/nzbToMylar.py +++ b/nzbToMylar.py @@ -35,7 +35,7 @@ # Mylar host. # -# The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 +# The ip address for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 # myhost=localhost # Mylar port. diff --git a/nzbToNzbDrone.py b/nzbToNzbDrone.py index ac2e6d5f..549bb9e3 100755 --- a/nzbToNzbDrone.py +++ b/nzbToNzbDrone.py @@ -39,7 +39,7 @@ # NzbDrone host. # -# The ipaddress for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 +# The ip address for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 # ndhost=localhost # NzbDrone port. @@ -124,7 +124,7 @@ # create a duplicate, or replace the original (0, 1). # -# set to 1 to cretae a new file or 0 to replace the original +# set to 1 to create a new file or 0 to replace the original # duplicate=1 # ignore extensions. @@ -159,12 +159,12 @@ # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. # allSubLanguages = 0 # embedSubs (0,1). # -# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. # embedSubs = 1 # burnInSubtitle (0,1). diff --git a/nzbToSickBeard.py b/nzbToSickBeard.py index 92474ea5..5a0a03a6 100755 --- a/nzbToSickBeard.py +++ b/nzbToSickBeard.py @@ -39,7 +39,7 @@ # SickBeard host. # -# The ipaddress for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 +# The ip address for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 # sbhost=localhost # SickBeard port. @@ -137,7 +137,7 @@ # create a duplicate, or replace the original (0, 1). # -# set to 1 to cretae a new file or 0 to replace the original +# set to 1 to create a new file or 0 to replace the original # duplicate=1 # ignore extensions. @@ -172,12 +172,12 @@ # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. # allSubLanguages=0 # embedSubs (0,1). # -# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. # embedSubs=1 # burnInSubtitle (0,1). From 1111074dc33b23d408283629c6a406bb829d637c Mon Sep 17 00:00:00 2001 From: Labrys Date: Mon, 6 Jun 2016 12:08:03 -0400 Subject: [PATCH 52/82] Update beets to 1.3.18: Dependencies: * PyYAML 3.11 * Unidecode 0.4.19 * beets 1.3.18 * colorama 0.3.7 * enum34 1.1.6 * jellyfish 0.5.4 * munkres 1.0.7 * musicbrainzngs 0.6 * mutagen 1.32 --- libs/beets/__init__.py | 32 +- libs/beets/art.py | 200 + libs/beets/autotag/__init__.py | 152 +- libs/beets/autotag/hooks.py | 119 +- libs/beets/autotag/match.py | 187 +- libs/beets/autotag/mb.py | 87 +- libs/beets/config_default.yaml | 34 +- libs/beets/dbcore/__init__.py | 11 +- libs/beets/dbcore/db.py | 384 +- libs/beets/dbcore/query.py | 430 +- libs/beets/dbcore/queryparse.py | 250 + libs/beets/dbcore/types.py | 133 +- libs/beets/importer.py | 1900 ++-- libs/beets/library.py | 1150 +- libs/beets/logging.py | 133 + libs/beets/mediafile.py | 920 +- libs/beets/plugins.py | 246 +- libs/beets/ui/__init__.py | 838 +- libs/beets/ui/commands.py | 1251 ++- libs/beets/ui/migrate.py | 401 - libs/beets/util/__init__.py | 422 +- libs/beets/util/artresizer.py | 178 +- libs/beets/util/bluelet.py | 40 +- libs/beets/util/confit.py | 833 +- libs/beets/util/enumeration.py | 179 +- libs/beets/util/functemplate.py | 50 +- libs/beets/util/hidden.py | 88 + libs/beets/util/pipeline.py | 111 +- libs/beets/vfs.py | 7 +- libs/beetsplug/__init__.py | 22 + libs/beetsplug/acousticbrainz.py | 165 + libs/beetsplug/badfiles.py | 120 + libs/beetsplug/bench.py | 109 + libs/beetsplug/bpd/__init__.py | 1193 +++ libs/beetsplug/bpd/gstplayer.py | 223 + libs/beetsplug/bpm.py | 87 + libs/beetsplug/bucket.py | 243 + libs/beetsplug/chroma.py | 308 + libs/beetsplug/convert.py | 449 + libs/beetsplug/cue.py | 57 + libs/beetsplug/discogs.py | 350 + libs/beetsplug/duplicates.py | 337 + libs/beetsplug/edit.py | 392 + libs/beetsplug/embedart.py | 154 + libs/beetsplug/embyupdate.py | 135 + libs/beetsplug/export.py | 151 + libs/beetsplug/fetchart.py | 861 ++ libs/beetsplug/filefilter.py | 78 + libs/beetsplug/freedesktop.py | 37 + libs/beetsplug/fromfilename.py | 173 + libs/beetsplug/ftintitle.py | 169 + libs/beetsplug/fuzzy.py | 48 + libs/beetsplug/hook.py | 108 + libs/beetsplug/ihate.py | 82 + libs/beetsplug/importadded.py | 134 + libs/beetsplug/importfeeds.py | 149 + libs/beetsplug/info.py | 241 + libs/beetsplug/inline.py | 124 + libs/beetsplug/ipfs.py | 286 + libs/beetsplug/keyfinder.py | 87 + libs/beetsplug/lastgenre/__init__.py | 425 + libs/beetsplug/lastgenre/genres-tree.yaml | 749 ++ libs/beetsplug/lastgenre/genres.txt | 1534 +++ libs/beetsplug/lastimport.py | 249 + libs/beetsplug/lyrics.py | 760 ++ libs/beetsplug/mbcollection.py | 114 + libs/beetsplug/mbsubmit.py | 60 + libs/beetsplug/mbsync.py | 167 + libs/beetsplug/metasync/__init__.py | 142 + libs/beetsplug/metasync/amarok.py | 110 + libs/beetsplug/metasync/itunes.py | 121 + libs/beetsplug/missing.py | 146 + libs/beetsplug/mpdstats.py | 368 + libs/beetsplug/mpdupdate.py | 128 + libs/beetsplug/permissions.py | 102 + libs/beetsplug/play.py | 155 + libs/beetsplug/plexupdate.py | 92 + libs/beetsplug/random.py | 81 + libs/beetsplug/replaygain.py | 953 ++ libs/beetsplug/rewrite.py | 75 + libs/beetsplug/scrub.py | 146 + libs/beetsplug/smartplaylist.py | 202 + libs/beetsplug/spotify.py | 176 + libs/beetsplug/the.py | 100 + libs/beetsplug/thumbnails.py | 289 + libs/beetsplug/types.py | 52 + libs/beetsplug/web/__init__.py | 328 + libs/beetsplug/web/static/backbone.js | 1158 ++ libs/beetsplug/web/static/beets.css | 160 + libs/beetsplug/web/static/beets.js | 314 + libs/beetsplug/web/static/jquery.js | 9266 +++++++++++++++++ libs/beetsplug/web/static/underscore.js | 977 ++ libs/beetsplug/web/templates/index.html | 98 + libs/beetsplug/zero.py | 140 + libs/colorama/__init__.py | 7 + libs/colorama/ansi.py | 102 + libs/colorama/ansitowin32.py | 236 + libs/colorama/initialise.py | 82 + libs/colorama/win32.py | 154 + libs/colorama/winterm.py | 162 + libs/enum/LICENSE | 32 + libs/enum/README | 3 + libs/enum/__init__.py | 837 ++ libs/jellyfish/__init__.py | 4 + libs/jellyfish/_jellyfish.py | 489 + libs/jellyfish/cjellyfish.pyd | Bin 0 -> 26624 bytes libs/jellyfish/compat.py | 13 + libs/jellyfish/porter.py | 218 + libs/jellyfish/test.py | 213 + libs/munkres.py | 786 ++ libs/musicbrainzngs/__init__.py | 2 + libs/musicbrainzngs/caa.py | 177 + libs/musicbrainzngs/compat.py | 62 + libs/musicbrainzngs/mbxml.py | 821 ++ libs/musicbrainzngs/musicbrainz.py | 1337 +++ libs/musicbrainzngs/util.py | 44 + libs/mutagen/__init__.py | 248 +- libs/mutagen/_compat.py | 86 + libs/mutagen/_constants.py | 2 + libs/mutagen/_file.py | 255 + libs/mutagen/_mp3util.py | 420 + libs/mutagen/_tags.py | 124 + libs/mutagen/_toolsutil.py | 231 + libs/mutagen/_util.py | 545 +- libs/mutagen/_vorbis.py | 156 +- libs/mutagen/aac.py | 410 + libs/mutagen/aiff.py | 357 + libs/mutagen/apev2.py | 437 +- libs/mutagen/asf.py | 704 -- libs/mutagen/asf/__init__.py | 319 + libs/mutagen/asf/_attrs.py | 438 + libs/mutagen/asf/_objects.py | 460 + libs/mutagen/asf/_util.py | 315 + libs/mutagen/easyid3.py | 52 +- libs/mutagen/easymp4.py | 47 +- libs/mutagen/flac.py | 289 +- libs/mutagen/{id3.py => id3/__init__.py} | 900 +- .../mutagen/{_id3frames.py => id3/_frames.py} | 372 +- libs/mutagen/{_id3specs.py => id3/_specs.py} | 377 +- libs/mutagen/{_id3util.py => id3/_util.py} | 121 +- libs/mutagen/m4a.py | 487 +- libs/mutagen/monkeysaudio.py | 18 +- libs/mutagen/mp3.py | 213 +- libs/mutagen/mp4.py | 822 -- libs/mutagen/mp4/__init__.py | 1023 ++ libs/mutagen/mp4/_as_entry.py | 542 + libs/mutagen/mp4/_atom.py | 194 + libs/mutagen/mp4/_util.py | 21 + libs/mutagen/musepack.py | 81 +- libs/mutagen/ogg.py | 197 +- libs/mutagen/oggflac.py | 90 +- libs/mutagen/oggopus.py | 71 +- libs/mutagen/oggspeex.py | 57 +- libs/mutagen/oggtheora.py | 60 +- libs/mutagen/oggvorbis.py | 58 +- libs/mutagen/optimfrog.py | 21 +- libs/mutagen/smf.py | 203 + libs/mutagen/trueaudio.py | 21 +- libs/mutagen/wavpack.py | 88 +- libs/unidecode/__init__.py | 54 +- libs/unidecode/util.py | 58 + libs/unidecode/x000.py | 163 +- libs/unidecode/x020.py | 2 +- libs/unidecode/x021.py | 42 +- libs/unidecode/x04e.py | 2 +- 165 files changed, 48385 insertions(+), 7424 deletions(-) create mode 100644 libs/beets/art.py create mode 100644 libs/beets/dbcore/queryparse.py create mode 100644 libs/beets/logging.py delete mode 100644 libs/beets/ui/migrate.py create mode 100644 libs/beets/util/hidden.py create mode 100644 libs/beetsplug/__init__.py create mode 100644 libs/beetsplug/acousticbrainz.py create mode 100644 libs/beetsplug/badfiles.py create mode 100644 libs/beetsplug/bench.py create mode 100644 libs/beetsplug/bpd/__init__.py create mode 100644 libs/beetsplug/bpd/gstplayer.py create mode 100644 libs/beetsplug/bpm.py create mode 100644 libs/beetsplug/bucket.py create mode 100644 libs/beetsplug/chroma.py create mode 100644 libs/beetsplug/convert.py create mode 100644 libs/beetsplug/cue.py create mode 100644 libs/beetsplug/discogs.py create mode 100644 libs/beetsplug/duplicates.py create mode 100644 libs/beetsplug/edit.py create mode 100644 libs/beetsplug/embedart.py create mode 100644 libs/beetsplug/embyupdate.py create mode 100644 libs/beetsplug/export.py create mode 100644 libs/beetsplug/fetchart.py create mode 100644 libs/beetsplug/filefilter.py create mode 100644 libs/beetsplug/freedesktop.py create mode 100644 libs/beetsplug/fromfilename.py create mode 100644 libs/beetsplug/ftintitle.py create mode 100644 libs/beetsplug/fuzzy.py create mode 100644 libs/beetsplug/hook.py create mode 100644 libs/beetsplug/ihate.py create mode 100644 libs/beetsplug/importadded.py create mode 100644 libs/beetsplug/importfeeds.py create mode 100644 libs/beetsplug/info.py create mode 100644 libs/beetsplug/inline.py create mode 100644 libs/beetsplug/ipfs.py create mode 100644 libs/beetsplug/keyfinder.py create mode 100644 libs/beetsplug/lastgenre/__init__.py create mode 100644 libs/beetsplug/lastgenre/genres-tree.yaml create mode 100644 libs/beetsplug/lastgenre/genres.txt create mode 100644 libs/beetsplug/lastimport.py create mode 100644 libs/beetsplug/lyrics.py create mode 100644 libs/beetsplug/mbcollection.py create mode 100644 libs/beetsplug/mbsubmit.py create mode 100644 libs/beetsplug/mbsync.py create mode 100644 libs/beetsplug/metasync/__init__.py create mode 100644 libs/beetsplug/metasync/amarok.py create mode 100644 libs/beetsplug/metasync/itunes.py create mode 100644 libs/beetsplug/missing.py create mode 100644 libs/beetsplug/mpdstats.py create mode 100644 libs/beetsplug/mpdupdate.py create mode 100644 libs/beetsplug/permissions.py create mode 100644 libs/beetsplug/play.py create mode 100644 libs/beetsplug/plexupdate.py create mode 100644 libs/beetsplug/random.py create mode 100644 libs/beetsplug/replaygain.py create mode 100644 libs/beetsplug/rewrite.py create mode 100644 libs/beetsplug/scrub.py create mode 100644 libs/beetsplug/smartplaylist.py create mode 100644 libs/beetsplug/spotify.py create mode 100644 libs/beetsplug/the.py create mode 100644 libs/beetsplug/thumbnails.py create mode 100644 libs/beetsplug/types.py create mode 100644 libs/beetsplug/web/__init__.py create mode 100644 libs/beetsplug/web/static/backbone.js create mode 100644 libs/beetsplug/web/static/beets.css create mode 100644 libs/beetsplug/web/static/beets.js create mode 100644 libs/beetsplug/web/static/jquery.js create mode 100644 libs/beetsplug/web/static/underscore.js create mode 100644 libs/beetsplug/web/templates/index.html create mode 100644 libs/beetsplug/zero.py create mode 100644 libs/colorama/__init__.py create mode 100644 libs/colorama/ansi.py create mode 100644 libs/colorama/ansitowin32.py create mode 100644 libs/colorama/initialise.py create mode 100644 libs/colorama/win32.py create mode 100644 libs/colorama/winterm.py create mode 100644 libs/enum/LICENSE create mode 100644 libs/enum/README create mode 100644 libs/enum/__init__.py create mode 100644 libs/jellyfish/__init__.py create mode 100644 libs/jellyfish/_jellyfish.py create mode 100644 libs/jellyfish/cjellyfish.pyd create mode 100644 libs/jellyfish/compat.py create mode 100644 libs/jellyfish/porter.py create mode 100644 libs/jellyfish/test.py create mode 100644 libs/munkres.py create mode 100644 libs/musicbrainzngs/__init__.py create mode 100644 libs/musicbrainzngs/caa.py create mode 100644 libs/musicbrainzngs/compat.py create mode 100644 libs/musicbrainzngs/mbxml.py create mode 100644 libs/musicbrainzngs/musicbrainz.py create mode 100644 libs/musicbrainzngs/util.py create mode 100644 libs/mutagen/_compat.py create mode 100644 libs/mutagen/_file.py create mode 100644 libs/mutagen/_mp3util.py create mode 100644 libs/mutagen/_tags.py create mode 100644 libs/mutagen/_toolsutil.py create mode 100644 libs/mutagen/aac.py create mode 100644 libs/mutagen/aiff.py delete mode 100644 libs/mutagen/asf.py create mode 100644 libs/mutagen/asf/__init__.py create mode 100644 libs/mutagen/asf/_attrs.py create mode 100644 libs/mutagen/asf/_objects.py create mode 100644 libs/mutagen/asf/_util.py rename libs/mutagen/{id3.py => id3/__init__.py} (54%) rename libs/mutagen/{_id3frames.py => id3/_frames.py} (82%) rename libs/mutagen/{_id3specs.py => id3/_specs.py} (55%) rename libs/mutagen/{_id3util.py => id3/_util.py} (64%) delete mode 100644 libs/mutagen/mp4.py create mode 100644 libs/mutagen/mp4/__init__.py create mode 100644 libs/mutagen/mp4/_as_entry.py create mode 100644 libs/mutagen/mp4/_atom.py create mode 100644 libs/mutagen/mp4/_util.py create mode 100644 libs/mutagen/smf.py create mode 100644 libs/unidecode/util.py diff --git a/libs/beets/__init__.py b/libs/beets/__init__.py index 17651f4b..830477a9 100644 --- a/libs/beets/__init__.py +++ b/libs/beets/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,15 +13,30 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -# This particular version has been slightly modified to work with headphones -# https://github.com/rembo10/headphones +from __future__ import division, absolute_import, print_function -__version__ = '1.3.4' -__author__ = 'Adrian Sampson ' +import os -import beets.library from beets.util import confit -Library = beets.library.Library +__version__ = u'1.3.18' +__author__ = u'Adrian Sampson ' -config = confit.LazyConfig('beets', __name__) + +class IncludeLazyConfig(confit.LazyConfig): + """A version of Confit's LazyConfig that also merges in data from + YAML files specified in an `include` setting. + """ + def read(self, user=True, defaults=True): + super(IncludeLazyConfig, self).read(user, defaults) + + try: + for view in self['include']: + filename = view.as_filename() + if os.path.isfile(filename): + self.set_file(filename) + except confit.NotFoundError: + pass + + +config = IncludeLazyConfig('beets', __name__) diff --git a/libs/beets/art.py b/libs/beets/art.py new file mode 100644 index 00000000..7a65a2b8 --- /dev/null +++ b/libs/beets/art.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""High-level utilities for manipulating image files associated with +music and items' embedded album art. +""" + +from __future__ import division, absolute_import, print_function + +import subprocess +import platform +from tempfile import NamedTemporaryFile +import imghdr +import os + +from beets.util import displayable_path, syspath +from beets.util.artresizer import ArtResizer +from beets import mediafile + + +def mediafile_image(image_path, maxwidth=None): + """Return a `mediafile.Image` object for the path. + """ + + with open(syspath(image_path), 'rb') as f: + data = f.read() + return mediafile.Image(data, type=mediafile.ImageType.front) + + +def get_art(log, item): + # Extract the art. + try: + mf = mediafile.MediaFile(syspath(item.path)) + except mediafile.UnreadableFileError as exc: + log.warning(u'Could not extract art from {0}: {1}', + displayable_path(item.path), exc) + return + + return mf.art + + +def embed_item(log, item, imagepath, maxwidth=None, itempath=None, + compare_threshold=0, ifempty=False, as_album=False): + """Embed an image into the item's media file. + """ + # Conditions and filters. + if compare_threshold: + if not check_art_similarity(log, item, imagepath, compare_threshold): + log.info(u'Image not similar; skipping.') + return + if ifempty and get_art(log, item): + log.info(u'media file already contained art') + return + if maxwidth and not as_album: + imagepath = resize_image(log, imagepath, maxwidth) + + # Get the `Image` object from the file. + try: + log.debug(u'embedding {0}', displayable_path(imagepath)) + image = mediafile_image(imagepath, maxwidth) + except IOError as exc: + log.warning(u'could not read image file: {0}', exc) + return + + # Make sure the image kind is safe (some formats only support PNG + # and JPEG). + if image.mime_type not in ('image/jpeg', 'image/png'): + log.info('not embedding image of unsupported type: {}', + image.mime_type) + return + + item.try_write(path=itempath, tags={'images': [image]}) + + +def embed_album(log, album, maxwidth=None, quiet=False, + compare_threshold=0, ifempty=False): + """Embed album art into all of the album's items. + """ + imagepath = album.artpath + if not imagepath: + log.info(u'No album art present for {0}', album) + return + if not os.path.isfile(syspath(imagepath)): + log.info(u'Album art not found at {0} for {1}', + displayable_path(imagepath), album) + return + if maxwidth: + imagepath = resize_image(log, imagepath, maxwidth) + + log.info(u'Embedding album art into {0}', album) + + for item in album.items(): + embed_item(log, item, imagepath, maxwidth, None, + compare_threshold, ifempty, as_album=True) + + +def resize_image(log, imagepath, maxwidth): + """Returns path to an image resized to maxwidth. + """ + log.debug(u'Resizing album art to {0} pixels wide', maxwidth) + imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath)) + return imagepath + + +def check_art_similarity(log, item, imagepath, compare_threshold): + """A boolean indicating if an image is similar to embedded item art. + """ + with NamedTemporaryFile(delete=True) as f: + art = extract(log, f.name, item) + + if art: + is_windows = platform.system() == "Windows" + + # Converting images to grayscale tends to minimize the weight + # of colors in the diff score. + convert_proc = subprocess.Popen( + [b'convert', syspath(imagepath), syspath(art), + b'-colorspace', b'gray', b'MIFF:-'], + stdout=subprocess.PIPE, + close_fds=not is_windows, + ) + compare_proc = subprocess.Popen( + [b'compare', b'-metric', b'PHASH', b'-', b'null:'], + stdin=convert_proc.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=not is_windows, + ) + convert_proc.stdout.close() + + stdout, stderr = compare_proc.communicate() + if compare_proc.returncode: + if compare_proc.returncode != 1: + log.debug(u'IM phashes compare failed for {0}, {1}', + displayable_path(imagepath), + displayable_path(art)) + return + out_str = stderr + else: + out_str = stdout + + try: + phash_diff = float(out_str) + except ValueError: + log.debug(u'IM output is not a number: {0!r}', out_str) + return + + log.debug(u'compare PHASH score is {0}', phash_diff) + return phash_diff <= compare_threshold + + return True + + +def extract(log, outpath, item): + art = get_art(log, item) + + if not art: + log.info(u'No album art present in {0}, skipping.', item) + return + + # Add an extension to the filename. + ext = imghdr.what(None, h=art) + if not ext: + log.warning(u'Unknown image type in {0}.', + displayable_path(item.path)) + return + outpath += b'.' + ext + + log.info(u'Extracting album art from: {0} to: {1}', + item, displayable_path(outpath)) + with open(syspath(outpath), 'wb') as f: + f.write(art) + return outpath + + +def extract_first(log, outpath, items): + for item in items: + real_path = extract(log, outpath, item) + if real_path: + return real_path + + +def clear(log, lib, query): + items = lib.items(query) + log.info(u'Clearing album art from {0} items', len(items)) + for item in items: + log.debug(u'Clearing art for {0}', item) + item.try_write(tags={'images': None}) diff --git a/libs/beets/autotag/__init__.py b/libs/beets/autotag/__init__.py index a3696354..f8233be6 100644 --- a/libs/beets/autotag/__init__.py +++ b/libs/beets/autotag/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,135 +15,23 @@ """Facilities for automatically determining files' correct metadata. """ -import os -import logging -import re -from beets import library, mediafile, config -from beets.util import sorted_walk, ancestry, displayable_path +from __future__ import division, absolute_import, print_function + +from beets import logging +from beets import config # Parts of external interface. -from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch -from .match import tag_item, tag_album -from .match import recommendation +from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa +from .match import tag_item, tag_album # noqa +from .match import Recommendation # noqa # Global logger. log = logging.getLogger('beets') -# Constants for directory walker. -MULTIDISC_MARKERS = (r'dis[ck]', r'cd') -MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' - # Additional utilities for the main interface. -def albums_in_dir(path): - """Recursively searches the given directory and returns an iterable - of (paths, items) where paths is a list of directories and items is - a list of Items that is probably an album. Specifically, any folder - containing any media files is an album. - """ - collapse_pat = collapse_paths = collapse_items = None - - for root, dirs, files in sorted_walk(path, - ignore=config['ignore'].as_str_seq(), - logger=log): - # Get a list of items in the directory. - items = [] - for filename in files: - try: - i = library.Item.from_path(os.path.join(root, filename)) - except mediafile.FileTypeError: - pass - except mediafile.UnreadableFileError: - log.warn(u'unreadable file: {0}'.format( - displayable_path(filename)) - ) - else: - items.append(i) - - # If we're currently collapsing the constituent directories in a - # multi-disc album, check whether we should continue collapsing - # and add the current directory. If so, just add the directory - # and move on to the next directory. If not, stop collapsing. - if collapse_paths: - if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \ - (collapse_pat and - collapse_pat.match(os.path.basename(root))): - # Still collapsing. - collapse_paths.append(root) - collapse_items += items - continue - else: - # Collapse finished. Yield the collapsed directory and - # proceed to process the current one. - if collapse_items: - yield collapse_paths, collapse_items - collapse_pat = collapse_paths = collapse_items = None - - # Check whether this directory looks like the *first* directory - # in a multi-disc sequence. There are two indicators: the file - # is named like part of a multi-disc sequence (e.g., "Title Disc - # 1") or it contains no items but only directories that are - # named in this way. - start_collapsing = False - for marker in MULTIDISC_MARKERS: - marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) - match = marker_pat.match(os.path.basename(root)) - - # Is this directory the root of a nested multi-disc album? - if dirs and not items: - # Check whether all subdirectories have the same prefix. - start_collapsing = True - subdir_pat = None - for subdir in dirs: - # The first directory dictates the pattern for - # the remaining directories. - if not subdir_pat: - match = marker_pat.match(subdir) - if match: - subdir_pat = re.compile(r'^%s\d' % - re.escape(match.group(1)), re.I) - else: - start_collapsing = False - break - - # Subsequent directories must match the pattern. - elif not subdir_pat.match(subdir): - start_collapsing = False - break - - # If all subdirectories match, don't check other - # markers. - if start_collapsing: - break - - # Is this directory the first in a flattened multi-disc album? - elif match: - start_collapsing = True - # Set the current pattern to match directories with the same - # prefix as this one, followed by a digit. - collapse_pat = re.compile(r'^%s\d' % - re.escape(match.group(1)), re.I) - break - - # If either of the above heuristics indicated that this is the - # beginning of a multi-disc album, initialize the collapsed - # directory and item lists and check the next directory. - if start_collapsing: - # Start collapsing; continue to the next iteration. - collapse_paths = [root] - collapse_items = items - continue - - # If it's nonempty, yield it. - if items: - yield [root], items - - # Clear out any unfinished collapse. - if collapse_paths and collapse_items: - yield collapse_paths, collapse_items - def apply_item_metadata(item, track_info): """Set an item's metadata from its matched TrackInfo object. """ @@ -153,9 +42,12 @@ def apply_item_metadata(item, track_info): item.mb_trackid = track_info.track_id if track_info.artist_id: item.mb_artistid = track_info.artist_id + if track_info.data_source: + item.data_source = track_info.data_source # At the moment, the other metadata is left intact (including album # and track number). Perhaps these should be emptied? + def apply_metadata(album_info, mapping): """Set the items' metadata to match an AlbumInfo object using a mapping from Items to TrackInfo objects. @@ -171,8 +63,8 @@ def apply_metadata(album_info, mapping): # Artist sort and credit names. item.artist_sort = track_info.artist_sort or album_info.artist_sort - item.artist_credit = track_info.artist_credit or \ - album_info.artist_credit + item.artist_credit = (track_info.artist_credit or + album_info.artist_credit) item.albumartist_sort = album_info.artist_sort item.albumartist_credit = album_info.artist_credit @@ -203,7 +95,11 @@ def apply_metadata(album_info, mapping): item.title = track_info.title if config['per_disc_numbering']: - item.track = track_info.medium_index or track_info.index + # We want to let the track number be zero, but if the medium index + # is not provided we need to fall back to the overall index. + item.track = track_info.medium_index + if item.track is None: + item.track = track_info.index item.tracktotal = track_info.medium_total or len(album_info.tracks) else: item.track = track_info.index @@ -235,13 +131,13 @@ def apply_metadata(album_info, mapping): 'language', 'country', 'albumstatus', - 'media', - 'albumdisambig'): + 'albumdisambig', + 'data_source',): value = getattr(album_info, field) if value is not None: item[field] = value if track_info.disctitle is not None: item.disctitle = track_info.disctitle - - # Headphones seal of approval - item.comments = 'tagged by headphones/beets' + + if track_info.media is not None: + item.media = track_info.media diff --git a/libs/beets/autotag/hooks.py b/libs/beets/autotag/hooks.py index 74c8cf82..3de80389 100644 --- a/libs/beets/autotag/hooks.py +++ b/libs/beets/autotag/hooks.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,14 +14,16 @@ # included in all copies or substantial portions of the Software. """Glue between metadata sources and the matching logic.""" -import logging +from __future__ import division, absolute_import, print_function + from collections import namedtuple import re +from beets import logging from beets import plugins from beets import config from beets.autotag import mb -from beets.util import levenshtein +from jellyfish import levenshtein_distance from unidecode import unidecode log = logging.getLogger('beets') @@ -109,13 +112,14 @@ class AlbumInfo(object): 'catalognum', 'script', 'language', 'country', 'albumstatus', 'albumdisambig', 'artist_credit', 'media']: value = getattr(self, fld) - if isinstance(value, str): + if isinstance(value, bytes): setattr(self, fld, value.decode(codec, 'ignore')) if self.tracks: for track in self.tracks: track.decode(codec) + class TrackInfo(object): """Describes a canonical track present on a release. Appears as part of an AlbumInfo's ``tracks`` list. Consists of these data members: @@ -126,12 +130,15 @@ class TrackInfo(object): - ``artist_id`` - ``length``: float: duration of the track in seconds - ``index``: position on the entire release + - ``media``: delivery mechanism (Vinyl, etc.) - ``medium``: the disc number this track appears on in the album - ``medium_index``: the track's position on the disc - ``medium_total``: the number of tracks on the item's disc - ``artist_sort``: name of the track artist for sorting - ``disctitle``: name of the individual medium (subtitle) - ``artist_credit``: Recording-specific artist name + - ``data_source``: The original data source (MusicBrainz, Discogs, etc.) + - ``data_url``: The data source release URL. Only ``title`` and ``track_id`` are required. The rest of the fields may be None. The indices ``index``, ``medium``, and ``medium_index`` @@ -140,13 +147,15 @@ class TrackInfo(object): def __init__(self, title, track_id, artist=None, artist_id=None, length=None, index=None, medium=None, medium_index=None, medium_total=None, artist_sort=None, disctitle=None, - artist_credit=None, data_source=None, data_url=None): + artist_credit=None, data_source=None, data_url=None, + media=None): self.title = title self.track_id = track_id self.artist = artist self.artist_id = artist_id self.length = length self.index = index + self.media = media self.medium = medium self.medium_index = medium_index self.medium_total = medium_total @@ -162,9 +171,9 @@ class TrackInfo(object): to Unicode. """ for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle', - 'artist_credit']: + 'artist_credit', 'media']: value = getattr(self, fld) - if isinstance(value, str): + if isinstance(value, bytes): setattr(self, fld, value.decode(codec, 'ignore')) @@ -187,27 +196,33 @@ SD_REPLACE = [ (r'&', 'and'), ] + def _string_dist_basic(str1, str2): """Basic edit distance between two strings, ignoring non-alphanumeric characters and case. Comparisons are based on a transliteration/lowering to ASCII characters. Normalized by string length. """ - str1 = unidecode(str1) - str2 = unidecode(str2) + assert isinstance(str1, unicode) + assert isinstance(str2, unicode) + str1 = unidecode(str1).decode('ascii') + str2 = unidecode(str2).decode('ascii') str1 = re.sub(r'[^a-z0-9]', '', str1.lower()) str2 = re.sub(r'[^a-z0-9]', '', str2.lower()) if not str1 and not str2: return 0.0 - return levenshtein(str1, str2) / float(max(len(str1), len(str2))) + return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2))) + def string_dist(str1, str2): """Gives an "intuitive" edit distance between two strings. This is an edit distance, normalized by the string length, with a number of tweaks that reflect intuition about text. """ - if str1 == None and str2 == None: return 0.0 - if str1 == None or str2 == None: return 1.0 + if str1 is None and str2 is None: + return 0.0 + if str1 is None or str2 is None: + return 1.0 str1 = str1.lower() str2 = str2.lower() @@ -217,9 +232,9 @@ def string_dist(str1, str2): # "something, the". for word in SD_END_WORDS: if str1.endswith(', %s' % word): - str1 = '%s %s' % (word, str1[:-len(word)-2]) + str1 = '%s %s' % (word, str1[:-len(word) - 2]) if str2.endswith(', %s' % word): - str2 = '%s %s' % (word, str2[:-len(word)-2]) + str2 = '%s %s' % (word, str2[:-len(word) - 2]) # Perform a couple of basic normalizing substitutions. for pat, repl in SD_REPLACE: @@ -256,6 +271,23 @@ def string_dist(str1, str2): return base_dist + penalty + +class LazyClassProperty(object): + """A decorator implementing a read-only property that is *lazy* in + the sense that the getter is only invoked once. Subsequent accesses + through *any* instance use the cached result. + """ + def __init__(self, getter): + self.getter = getter + self.computed = False + + def __get__(self, obj, owner): + if not self.computed: + self.value = self.getter(owner) + self.computed = True + return self.value + + class Distance(object): """Keeps track of multiple distance penalties. Provides a single weighted distance for all penalties as well as a weighted distance @@ -264,11 +296,15 @@ class Distance(object): def __init__(self): self._penalties = {} + @LazyClassProperty + def _weights(cls): # noqa + """A dictionary from keys to floating-point weights. + """ weights_view = config['match']['distance_weights'] - self._weights = {} + weights = {} for key in weights_view.keys(): - self._weights[key] = weights_view[key].as_number() - + weights[key] = weights_view[key].as_number() + return weights # Access the components and their aggregates. @@ -313,8 +349,10 @@ class Distance(object): # Convert distance into a negative float we can sort items in # ascending order (for keys, when the penalty is equal) and # still get the items with the biggest distance first. - return sorted(list_, key=lambda (key, dist): (0-dist, key)) - + return sorted( + list_, + key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0]) + ) # Behave like a float. @@ -323,12 +361,15 @@ class Distance(object): def __float__(self): return self.distance + def __sub__(self, other): return self.distance - other def __rsub__(self, other): return other - self.distance + def __unicode__(self): + return "{0:.2f}".format(self.distance) # Behave like a dict. @@ -355,11 +396,11 @@ class Distance(object): """ if not isinstance(dist, Distance): raise ValueError( - '`dist` must be a Distance object. It is: %r' % dist) + u'`dist` must be a Distance object, not {0}'.format(type(dist)) + ) for key, penalties in dist._penalties.iteritems(): self._penalties.setdefault(key, []).extend(penalties) - # Adding components. def _eq(self, value1, value2): @@ -379,7 +420,8 @@ class Distance(object): """ if not 0.0 <= dist <= 1.0: raise ValueError( - '`dist` must be between 0.0 and 1.0. It is: %r' % dist) + u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist) + ) self._penalties.setdefault(key, []).append(dist) def add_equality(self, key, value, options): @@ -472,31 +514,47 @@ def album_for_mbid(release_id): if the ID is not found. """ try: - return mb.album_for_id(release_id) + album = mb.album_for_id(release_id) + if album: + plugins.send(u'albuminfo_received', info=album) + return album except mb.MusicBrainzAPIError as exc: exc.log(log) + def track_for_mbid(recording_id): """Get a TrackInfo object for a MusicBrainz recording ID. Return None if the ID is not found. """ try: - return mb.track_for_id(recording_id) + track = mb.track_for_id(recording_id) + if track: + plugins.send(u'trackinfo_received', info=track) + return track except mb.MusicBrainzAPIError as exc: exc.log(log) + def albums_for_id(album_id): """Get a list of albums for an ID.""" candidates = [album_for_mbid(album_id)] - candidates.extend(plugins.album_for_id(album_id)) + plugin_albums = plugins.album_for_id(album_id) + for a in plugin_albums: + plugins.send(u'albuminfo_received', info=a) + candidates.extend(plugin_albums) return filter(None, candidates) + def tracks_for_id(track_id): """Get a list of tracks for an ID.""" candidates = [track_for_mbid(track_id)] - candidates.extend(plugins.track_for_id(track_id)) + plugin_tracks = plugins.track_for_id(track_id) + for t in plugin_tracks: + plugins.send(u'trackinfo_received', info=t) + candidates.extend(plugin_tracks) return filter(None, candidates) + def album_candidates(items, artist, album, va_likely): """Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective @@ -523,8 +581,13 @@ def album_candidates(items, artist, album, va_likely): # Candidates from plugins. out.extend(plugins.candidates(items, artist, album, va_likely)) + # Notify subscribed plugins about fetched album info + for a in out: + plugins.send(u'albuminfo_received', info=a) + return out + def item_candidates(item, artist, title): """Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or @@ -542,4 +605,8 @@ def item_candidates(item, artist, title): # Plugin candidates. out.extend(plugins.item_candidates(item, artist, title)) + # Notify subscribed plugins about fetched track info + for i in out: + plugins.send(u'trackinfo_received', info=i) + return out diff --git a/libs/beets/autotag/match.py b/libs/beets/autotag/match.py index a4bc47fa..cfe184e7 100644 --- a/libs/beets/autotag/match.py +++ b/libs/beets/autotag/match.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,21 +16,20 @@ """Matches existing metadata with canonical information to identify releases and tracks. """ -from __future__ import division + +from __future__ import division, absolute_import, print_function import datetime -import logging import re from munkres import Munkres +from beets import logging from beets import plugins from beets import config from beets.util import plurality -from beets.util.enumeration import enum from beets.autotag import hooks - -# Recommendation enumeration. -recommendation = enum('none', 'low', 'medium', 'strong', name='recommendation') +from beets.util.enumeration import OrderedEnum +from functools import reduce # Artist signals that indicate "various artists". These are used at the # album level to determine whether a given release is likely a VA @@ -41,6 +41,18 @@ VA_ARTISTS = (u'', u'various artists', u'various', u'va', u'unknown') log = logging.getLogger('beets') +# Recommendation enumeration. + +class Recommendation(OrderedEnum): + """Indicates a qualitative suggestion to the user about what should + be done with a given match. + """ + none = 0 + low = 1 + medium = 2 + strong = 3 + + # Primary matching functionality. def current_metadata(items): @@ -56,10 +68,10 @@ def current_metadata(items): fields = ['artist', 'album', 'albumartist', 'year', 'disctotal', 'mb_albumid', 'label', 'catalognum', 'country', 'media', 'albumdisambig'] - for key in fields: - values = [getattr(item, key) for item in items if item] - likelies[key], freq = plurality(values) - consensus[key] = (freq == len(values)) + for field in fields: + values = [item[field] for item in items if item] + likelies[field], freq = plurality(values) + consensus[field] = (freq == len(values)) # If there's an album artist consensus, use this for the artist. if consensus['albumartist'] and likelies['albumartist']: @@ -67,6 +79,7 @@ def current_metadata(items): return likelies, consensus + def assign_items(items, tracks): """Given a list of Items and a list of TrackInfo objects, find the best mapping between them. Returns a mapping from Items to TrackInfo @@ -93,12 +106,14 @@ def assign_items(items, tracks): extra_tracks.sort(key=lambda t: (t.index, t.title)) return mapping, extra_items, extra_tracks + def track_index_changed(item, track_info): """Returns True if the item and track info index is different. Tolerates per disc and per release numbering. """ return item.track not in (track_info.medium_index, track_info.index) + def track_distance(item, track_info, incl_artist=False): """Determines the significance of a track metadata change. Returns a Distance object. `incl_artist` indicates that a distance component should @@ -109,7 +124,7 @@ def track_distance(item, track_info, incl_artist=False): # Length. if track_info.length: diff = abs(item.length - track_info.length) - \ - config['match']['track_length_grace'].as_number() + config['match']['track_length_grace'].as_number() dist.add_ratio('track_length', diff, config['match']['track_length_max'].as_number()) @@ -134,6 +149,7 @@ def track_distance(item, track_info, incl_artist=False): return dist + def distance(items, album_info, mapping): """Determines how "significant" an album metadata change would be. Returns a Distance object. `album_info` is an AlbumInfo object @@ -239,6 +255,7 @@ def distance(items, album_info, mapping): return dist + def match_by_id(items): """If the items are tagged with a MusicBrainz album ID, returns an AlbumInfo object for the corresponding album. Otherwise, returns @@ -247,16 +264,17 @@ def match_by_id(items): # Is there a consensus on the MB album ID? albumids = [item.mb_albumid for item in items if item.mb_albumid] if not albumids: - log.debug('No album IDs found.') + log.debug(u'No album IDs found.') return None # If all album IDs are equal, look up the album. - if bool(reduce(lambda x,y: x if x==y else (), albumids)): + if bool(reduce(lambda x, y: x if x == y else (), albumids)): albumid = albumids[0] - log.debug('Searching for discovered album ID: ' + albumid) + log.debug(u'Searching for discovered album ID: {0}', albumid) return hooks.album_for_mbid(albumid) else: - log.debug('No album ID consensus.') + log.debug(u'No album ID consensus.') + def _recommendation(results): """Given a sorted list of AlbumMatch or TrackMatch objects, return a @@ -268,26 +286,26 @@ def _recommendation(results): """ if not results: # No candidates: no recommendation. - return recommendation.none + return Recommendation.none # Basic distance thresholding. min_dist = results[0].distance if min_dist < config['match']['strong_rec_thresh'].as_number(): # Strong recommendation level. - rec = recommendation.strong + rec = Recommendation.strong elif min_dist <= config['match']['medium_rec_thresh'].as_number(): # Medium recommendation level. - rec = recommendation.medium + rec = Recommendation.medium elif len(results) == 1: # Only a single candidate. - rec = recommendation.low + rec = Recommendation.low elif results[1].distance - min_dist >= \ config['match']['rec_gap_thresh'].as_number(): # Gap between first two candidates is large. - rec = recommendation.low + rec = Recommendation.low else: # No conclusion. Return immediately. Can't be downgraded any further. - return recommendation.none + return Recommendation.none # Downgrade to the max rec if it is lower than the current rec for an # applied penalty. @@ -299,28 +317,40 @@ def _recommendation(results): for key in keys: if key in max_rec_view.keys(): max_rec = max_rec_view[key].as_choice({ - 'strong': recommendation.strong, - 'medium': recommendation.medium, - 'low': recommendation.low, - 'none': recommendation.none, + 'strong': Recommendation.strong, + 'medium': Recommendation.medium, + 'low': Recommendation.low, + 'none': Recommendation.none, }) rec = min(rec, max_rec) return rec + def _add_candidate(items, results, info): """Given a candidate AlbumInfo object, attempt to add the candidate to the output dictionary of AlbumMatch objects. This involves checking the track count, ordering the items, checking for duplicates, and calculating the distance. """ - log.debug('Candidate: %s - %s' % (info.artist, info.album)) + log.debug(u'Candidate: {0} - {1}', info.artist, info.album) + + # Discard albums with zero tracks. + if not info.tracks: + log.debug(u'No tracks.') + return # Don't duplicate. if info.album_id in results: - log.debug('Duplicate.') + log.debug(u'Duplicate.') return + # Discard matches without required tags. + for req_tag in config['match']['required'].as_str_seq(): + if getattr(info, req_tag) is None: + log.debug(u'Ignored. Missing required tag: {0}', req_tag) + return + # Find mapping between the items and the track info. mapping, extra_items, extra_tracks = assign_items(items, info.tracks) @@ -328,42 +358,53 @@ def _add_candidate(items, results, info): dist = distance(items, info, mapping) # Skip matches with ignored penalties. - penalties = [key for _, key in dist] + penalties = [key for key, _ in dist] for penalty in config['match']['ignored'].as_str_seq(): if penalty in penalties: - log.debug('Ignored. Penalty: %s' % penalty) + log.debug(u'Ignored. Penalty: {0}', penalty) return - log.debug('Success. Distance: %f' % dist) + log.debug(u'Success. Distance: {0}', dist) results[info.album_id] = hooks.AlbumMatch(dist, info, mapping, extra_items, extra_tracks) + def tag_album(items, search_artist=None, search_album=None, - search_id=None): - """Bundles together the functionality used to infer tags for a - set of items comprised by an album. Returns everything relevant: - - The current artist. - - The current album. - - A list of AlbumMatch objects. The candidates are sorted by - distance (i.e., best match first). - - A recommendation. - If search_artist and search_album or search_id are provided, then - they are used as search terms in place of the current metadata. + search_ids=[]): + """Return a tuple of a artist name, an album name, a list of + `AlbumMatch` candidates from the metadata backend, and a + `Recommendation`. + + The artist and album are the most common values of these fields + among `items`. + + The `AlbumMatch` objects are generated by searching the metadata + backends. By default, the metadata of the items is used for the + search. This can be customized by setting the parameters. + `search_ids` is a list of metadata backend IDs: if specified, + it will restrict the candidates to those IDs, ignoring + `search_artist` and `search album`. The `mapping` field of the + album has the matched `items` as keys. + + The recommendation is calculated from the match quality of the + candidates. """ # Get current metadata. likelies, consensus = current_metadata(items) cur_artist = likelies['artist'] cur_album = likelies['album'] - log.debug('Tagging %s - %s' % (cur_artist, cur_album)) + log.debug(u'Tagging {0} - {1}', cur_artist, cur_album) # The output result (distance, AlbumInfo) tuples (keyed by MB album # ID). candidates = {} # Search by explicit ID. - if search_id is not None: - log.debug('Searching for album ID: ' + search_id) - search_cands = hooks.albums_for_id(search_id) + if search_ids: + search_cands = [] + for search_id in search_ids: + log.debug(u'Searching for album ID: {0}', search_id) + search_cands.extend(hooks.albums_for_id(search_id)) # Use existing metadata or text search. else: @@ -372,32 +413,32 @@ def tag_album(items, search_artist=None, search_album=None, if id_info: _add_candidate(items, candidates, id_info) rec = _recommendation(candidates.values()) - log.debug('Album ID match recommendation is ' + str(rec)) + log.debug(u'Album ID match recommendation is {0}', rec) if candidates and not config['import']['timid']: # If we have a very good MBID match, return immediately. # Otherwise, this match will compete against metadata-based # matches. - if rec == recommendation.strong: - log.debug('ID match.') + if rec == Recommendation.strong: + log.debug(u'ID match.') return cur_artist, cur_album, candidates.values(), rec # Search terms. if not (search_artist and search_album): # No explicit search terms -- use current metadata. search_artist, search_album = cur_artist, cur_album - log.debug(u'Search terms: %s - %s' % (search_artist, search_album)) + log.debug(u'Search terms: {0} - {1}', search_artist, search_album) # Is this album likely to be a "various artist" release? va_likely = ((not consensus['artist']) or - (search_artist.lower() in VA_ARTISTS) or - any(item.comp for item in items)) - log.debug(u'Album might be VA: %s' % str(va_likely)) + (search_artist.lower() in VA_ARTISTS) or + any(item.comp for item in items)) + log.debug(u'Album might be VA: {0}', va_likely) # Get the results from the data sources. search_cands = hooks.album_candidates(items, search_artist, search_album, va_likely) - log.debug(u'Evaluating %i candidates.' % len(search_cands)) + log.debug(u'Evaluating {0} candidates.', len(search_cands)) for info in search_cands: _add_candidate(items, candidates, info) @@ -406,43 +447,47 @@ def tag_album(items, search_artist=None, search_album=None, rec = _recommendation(candidates) return cur_artist, cur_album, candidates, rec + def tag_item(item, search_artist=None, search_title=None, - search_id=None): + search_ids=[]): """Attempts to find metadata for a single track. Returns a `(candidates, recommendation)` pair where `candidates` is a list of TrackMatch objects. `search_artist` and `search_title` may be used to override the current metadata for the purposes of the MusicBrainz - title; likewise `search_id`. + title. `search_ids` may be used for restricting the search to a list + of metadata backend IDs. """ # Holds candidates found so far: keys are MBIDs; values are # (distance, TrackInfo) pairs. candidates = {} # First, try matching by MusicBrainz ID. - trackid = search_id or item.mb_trackid - if trackid: - log.debug('Searching for track ID: ' + trackid) - for track_info in hooks.tracks_for_id(trackid): - dist = track_distance(item, track_info, incl_artist=True) - candidates[track_info.track_id] = \ + trackids = search_ids or filter(None, [item.mb_trackid]) + if trackids: + for trackid in trackids: + log.debug(u'Searching for track ID: {0}', trackid) + for track_info in hooks.tracks_for_id(trackid): + dist = track_distance(item, track_info, incl_artist=True) + candidates[track_info.track_id] = \ hooks.TrackMatch(dist, track_info) - # If this is a good match, then don't keep searching. - rec = _recommendation(candidates.values()) - if rec == recommendation.strong and not config['import']['timid']: - log.debug('Track ID match.') - return candidates.values(), rec + # If this is a good match, then don't keep searching. + rec = _recommendation(sorted(candidates.itervalues())) + if rec == Recommendation.strong and \ + not config['import']['timid']: + log.debug(u'Track ID match.') + return sorted(candidates.itervalues()), rec # If we're searching by ID, don't proceed. - if search_id is not None: + if search_ids: if candidates: - return candidates.values(), rec + return sorted(candidates.itervalues()), rec else: - return [], recommendation.none + return [], Recommendation.none # Search terms. if not (search_artist and search_title): search_artist, search_title = item.artist, item.title - log.debug(u'Item search terms: %s - %s' % (search_artist, search_title)) + log.debug(u'Item search terms: {0} - {1}', search_artist, search_title) # Get and evaluate candidate metadata. for track_info in hooks.item_candidates(item, search_artist, search_title): @@ -450,7 +495,7 @@ def tag_item(item, search_artist=None, search_title=None, candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) # Sort by distance and return with recommendation. - log.debug('Found %i candidates.' % len(candidates)) + log.debug(u'Found {0} candidates.', len(candidates)) candidates = sorted(candidates.itervalues()) rec = _recommendation(candidates) return candidates, rec diff --git a/libs/beets/autotag/mb.py b/libs/beets/autotag/mb.py index 779ec4b3..e64da8d5 100644 --- a/libs/beets/autotag/mb.py +++ b/libs/beets/autotag/mb.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,23 +15,25 @@ """Searches for albums in the MusicBrainz database. """ -import logging +from __future__ import division, absolute_import, print_function + import musicbrainzngs import re import traceback from urlparse import urljoin +from beets import logging import beets.autotag.hooks import beets from beets import util from beets import config -SEARCH_LIMIT = 5 VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377' BASE_URL = 'http://musicbrainz.org/' musicbrainzngs.set_useragent('beets', beets.__version__, - 'http://beets.radbox.org/') + 'http://beets.io/') + class MusicBrainzAPIError(util.HumanReadableException): """An error while talking to MusicBrainz. The `query` field is the @@ -38,10 +41,12 @@ class MusicBrainzAPIError(util.HumanReadableException): """ def __init__(self, reason, verb, query, tb=None): self.query = query + if isinstance(reason, musicbrainzngs.WebServiceError): + reason = u'MusicBrainz not reachable' super(MusicBrainzAPIError, self).__init__(reason, verb, tb) def get_message(self): - return u'"{0}" in {1} with query {2}'.format( + return u'{0} in {1} with query {2}'.format( self._reasonstr(), self.verb, repr(self.query) ) @@ -51,12 +56,15 @@ RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups', 'labels', 'artist-credits', 'aliases'] TRACK_INCLUDES = ['artists', 'aliases'] + def track_url(trackid): return urljoin(BASE_URL, 'recording/' + trackid) + def album_url(albumid): return urljoin(BASE_URL, 'release/' + albumid) + def configure(): """Set up the python-musicbrainz-ngs module according to settings from the beets configuration. This should be called at startup. @@ -67,6 +75,7 @@ def configure(): config['musicbrainz']['ratelimit'].get(int), ) + def _preferred_alias(aliases): """Given an list of alias structures for an artist credit, select and return the user's preferred alias alias or None if no matching @@ -81,13 +90,15 @@ def _preferred_alias(aliases): # Search configured locales in order. for locale in config['import']['languages'].as_str_seq(): # Find matching primary aliases for this locale. - matches = [a for a in aliases if a['locale'] == locale and 'primary' in a] + matches = [a for a in aliases + if a['locale'] == locale and 'primary' in a] # Skip to the next locale if we have no matches if not matches: continue return matches[0] + def _flatten_artist_credit(credit): """Given a list representing an ``artist-credit`` block, flatten the data into a triple of joined artist name strings: canonical, sort, and @@ -133,6 +144,7 @@ def _flatten_artist_credit(credit): ''.join(artist_credit_parts), ) + def track_info(recording, index=None, medium=None, medium_index=None, medium_total=None): """Translates a MusicBrainz recording result dictionary into a beets @@ -149,6 +161,7 @@ def track_info(recording, index=None, medium=None, medium_index=None, medium=medium, medium_index=medium_index, medium_total=medium_total, + data_source=u'MusicBrainz', data_url=track_url(recording['id']), ) @@ -167,6 +180,7 @@ def track_info(recording, index=None, medium=None, medium_index=None, info.decode() return info + def _set_date_str(info, date_str, original=False): """Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo object, set the object's release date fields appropriately. If @@ -186,6 +200,7 @@ def _set_date_str(info, date_str, original=False): key = 'original_' + key setattr(info, key, date_num) + def album_info(release): """Takes a MusicBrainz release result dictionary and returns a beets AlbumInfo object containing the interesting data about that release. @@ -199,7 +214,13 @@ def album_info(release): index = 0 for medium in release['medium-list']: disctitle = medium.get('title') - for track in medium['track-list']: + format = medium.get('format') + + all_tracks = medium['track-list'] + if 'pregap' in medium: + all_tracks.insert(0, medium['pregap']) + + for track in all_tracks: # Basic information from the recording. index += 1 ti = track_info( @@ -210,6 +231,7 @@ def album_info(release): len(medium['track-list']), ) ti.disctitle = disctitle + ti.media = format # Prefer track data, where present, over recording data. if track.get('title'): @@ -233,10 +255,12 @@ def album_info(release): mediums=len(release['medium-list']), artist_sort=artist_sort_name, artist_credit=artist_credit_name, - data_source='MusicBrainz', + data_source=u'MusicBrainz', data_url=album_url(release['id']), ) info.va = info.artist_id == VARIOUS_ARTISTS_ID + if info.va: + info.artist = config['va_name'].get(unicode) info.asin = release.get('asin') info.releasegroup_id = release['release-group']['id'] info.country = release.get('country') @@ -288,7 +312,8 @@ def album_info(release): info.decode() return info -def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT): + +def match_album(artist, album, tracks=None): """Searches for a single album ("release" in MusicBrainz parlance) and returns an iterator over AlbumInfo objects. May raise a MusicBrainzAPIError. @@ -297,21 +322,22 @@ def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT): optionally, a number of tracks on the album. """ # Build search criteria. - criteria = {'release': album.lower()} + criteria = {'release': album.lower().strip()} if artist is not None: - criteria['artist'] = artist.lower() + criteria['artist'] = artist.lower().strip() else: # Various Artists search. criteria['arid'] = VARIOUS_ARTISTS_ID if tracks is not None: - criteria['tracks'] = str(tracks) + criteria['tracks'] = unicode(tracks) # Abort if we have no search terms. if not any(criteria.itervalues()): return try: - res = musicbrainzngs.search_releases(limit=limit, **criteria) + res = musicbrainzngs.search_releases( + limit=config['musicbrainz']['searchlimit'].get(int), **criteria) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError(exc, 'release search', criteria, traceback.format_exc()) @@ -322,69 +348,74 @@ def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT): if albuminfo is not None: yield albuminfo -def match_track(artist, title, limit=SEARCH_LIMIT): + +def match_track(artist, title): """Searches for a single track and returns an iterable of TrackInfo objects. May raise a MusicBrainzAPIError. """ criteria = { - 'artist': artist.lower(), - 'recording': title.lower(), + 'artist': artist.lower().strip(), + 'recording': title.lower().strip(), } if not any(criteria.itervalues()): return try: - res = musicbrainzngs.search_recordings(limit=limit, **criteria) + res = musicbrainzngs.search_recordings( + limit=config['musicbrainz']['searchlimit'].get(int), **criteria) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError(exc, 'recording search', criteria, traceback.format_exc()) for recording in res['recording-list']: yield track_info(recording) + def _parse_id(s): """Search for a MusicBrainz ID in the given string and return it. If no ID can be found, return None. """ # Find the first thing that looks like a UUID/MBID. - match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) + match = re.search(ur'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) if match: return match.group() -def album_for_id(albumid): + +def album_for_id(releaseid): """Fetches an album by its MusicBrainz ID and returns an AlbumInfo object or None if the album is not found. May raise a MusicBrainzAPIError. """ - albumid = _parse_id(albumid) + albumid = _parse_id(releaseid) if not albumid: - log.error('Invalid MBID.') + log.debug(u'Invalid MBID ({0}).', releaseid) return try: res = musicbrainzngs.get_release_by_id(albumid, RELEASE_INCLUDES) except musicbrainzngs.ResponseError: - log.debug('Album ID match failed.') + log.debug(u'Album ID match failed.') return None except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError(exc, 'get release by ID', albumid, + raise MusicBrainzAPIError(exc, u'get release by ID', albumid, traceback.format_exc()) return album_info(res['release']) -def track_for_id(trackid): + +def track_for_id(releaseid): """Fetches a track by its MusicBrainz ID. Returns a TrackInfo object or None if no track is found. May raise a MusicBrainzAPIError. """ - trackid = _parse_id(trackid) + trackid = _parse_id(releaseid) if not trackid: - log.error('Invalid MBID.') + log.debug(u'Invalid MBID ({0}).', releaseid) return try: res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) except musicbrainzngs.ResponseError: - log.debug('Track ID match failed.') + log.debug(u'Track ID match failed.') return None except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError(exc, 'get recording by ID', trackid, + raise MusicBrainzAPIError(exc, u'get recording by ID', trackid, traceback.format_exc()) return track_info(res['recording']) diff --git a/libs/beets/config_default.yaml b/libs/beets/config_default.yaml index d35368ea..4c12c3df 100644 --- a/libs/beets/config_default.yaml +++ b/libs/beets/config_default.yaml @@ -5,6 +5,7 @@ import: write: yes copy: yes move: no + link: no delete: no resume: ask incremental: no @@ -20,9 +21,13 @@ import: detail: no flat: no group_albums: no + pretend: false + search_ids: [] clutter: ["Thumbs.DB", ".DS_Store"] -ignore: [".*", "*~", "System Volume Information"] +ignore: [".*", "*~", "System Volume Information", "lost+found"] +ignore_hidden: yes + replace: '[\\/]': _ '^\.': _ @@ -32,27 +37,42 @@ replace: '\s+$': '' '^\s+': '' path_sep_replace: _ +asciify_paths: false art_filename: cover max_filename_length: 0 plugins: [] pluginpath: [] threaded: yes -color: yes timeout: 5.0 per_disc_numbering: no -verbose: no -terminal_encoding: utf8 +verbose: 0 +terminal_encoding: original_date: no id3v23: no +va_name: "Various Artists" ui: terminal_width: 80 length_diff_thresh: 10.0 + color: yes + colors: + text_success: green + text_warning: yellow + text_error: red + text_highlight: red + text_highlight_minor: lightgray + action_default: turquoise + action: blue -list_format_item: $artist - $album - $title -list_format_album: $albumartist - $album +format_item: $artist - $album - $title +format_album: $albumartist - $album time_format: '%Y-%m-%d %H:%M:%S' +format_raw_length: no + +sort_album: albumartist+ album+ +sort_item: artist+ album+ disc+ track+ +sort_case_insensitive: yes paths: default: $albumartist/$album%aunique{}/$track $title @@ -65,6 +85,7 @@ musicbrainz: host: musicbrainz.org ratelimit: 1 ratelimit_interval: 1.0 + searchlimit: 5 match: strong_rec_thresh: 0.04 @@ -98,5 +119,6 @@ match: media: [] original_year: no ignored: [] + required: [] track_length_grace: 10 track_length_max: 30 diff --git a/libs/beets/dbcore/__init__.py b/libs/beets/dbcore/__init__.py index b4f80fb9..689e7202 100644 --- a/libs/beets/dbcore/__init__.py +++ b/libs/beets/dbcore/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,6 +16,14 @@ """DBCore is an abstract database package that forms the basis for beets' Library. """ +from __future__ import division, absolute_import, print_function + from .db import Model, Database from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery from .types import Type +from .queryparse import query_from_strings +from .queryparse import sort_from_strings +from .queryparse import parse_sorted_query +from .query import InvalidQueryError + +# flake8: noqa diff --git a/libs/beets/dbcore/db.py b/libs/beets/dbcore/db.py index cbdaf5a7..3f701be5 100644 --- a/libs/beets/dbcore/db.py +++ b/libs/beets/dbcore/db.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,22 +15,70 @@ """The central Model and Database constructs for DBCore. """ +from __future__ import division, absolute_import, print_function + import time import os from collections import defaultdict import threading import sqlite3 import contextlib +import collections import beets from beets.util.functemplate import Template -from .query import MatchQuery +from beets.dbcore import types +from .query import MatchQuery, NullSort, TrueQuery +class FormattedMapping(collections.Mapping): + """A `dict`-like formatted view of a model. + + The accessor `mapping[key]` returns the formatted version of + `model[key]` as a unicode string. + + If `for_path` is true, all path separators in the formatted values + are replaced. + """ + + def __init__(self, model, for_path=False): + self.for_path = for_path + self.model = model + self.model_keys = model.keys(True) + + def __getitem__(self, key): + if key in self.model_keys: + return self._get_formatted(self.model, key) + else: + raise KeyError(key) + + def __iter__(self): + return iter(self.model_keys) + + def __len__(self): + return len(self.model_keys) + + def get(self, key, default=None): + if default is None: + default = self.model._type(key).format(None) + return super(FormattedMapping, self).get(key, default) + + def _get_formatted(self, model, key): + value = model._type(key).format(model.get(key)) + if isinstance(value, bytes): + value = value.decode('utf8', 'ignore') + + if self.for_path: + sep_repl = beets.config['path_sep_replace'].get(unicode) + for sep in (os.path.sep, os.path.altsep): + if sep: + value = value.replace(sep, sep_repl) + + return value + # Abstract base for model classes. - class Model(object): """An abstract object representing an object in the database. Model objects act like dictionaries (i.e., the allow subscript access like @@ -66,12 +115,7 @@ class Model(object): _fields = {} """A mapping indicating available "fixed" fields on this type. The - keys are field names and the values are Type objects. - """ - - _bytes_keys = () - """Keys whose values should be stored as raw bytes blobs rather than - strings. + keys are field names and the values are `Type` objects. """ _search_fields = () @@ -79,6 +123,21 @@ class Model(object): terms. """ + _types = {} + """Optional Types for non-fixed (i.e., flexible and computed) fields. + """ + + _sorts = {} + """Optional named sort criteria. The keys are strings and the values + are subclasses of `Sort`. + """ + + _always_dirty = False + """By default, fields only become "dirty" when their value actually + changes. Enabling this flag marks fields as dirty even when the new + value is the same as the old value (e.g., `o.f = o.f`). + """ + @classmethod def _getters(cls): """Return a mapping from field names to getter functions. @@ -94,7 +153,6 @@ class Model(object): # As above: we could consider caching this result. raise NotImplementedError() - # Basic operation. def __init__(self, db=None, **values): @@ -110,6 +168,20 @@ class Model(object): self.update(values) self.clear_dirty() + @classmethod + def _awaken(cls, db=None, fixed_values={}, flex_values={}): + """Create an object with values drawn from the database. + + This is a performance optimization: the checks involved with + ordinary construction are bypassed. + """ + obj = cls(db) + for key, value in fixed_values.iteritems(): + obj._values_fixed[key] = cls._type(key).from_sql(value) + for key, value in flex_values.iteritems(): + obj._values_flex[key] = cls._type(key).from_sql(value) + return obj + def __repr__(self): return '{0}({1})'.format( type(self).__name__, @@ -128,13 +200,23 @@ class Model(object): exception is raised otherwise. """ if not self._db: - raise ValueError('{0} has no database'.format(type(self).__name__)) + raise ValueError( + u'{0} has no database'.format(type(self).__name__) + ) if need_id and not self.id: - raise ValueError('{0} has no id'.format(type(self).__name__)) - + raise ValueError(u'{0} has no id'.format(type(self).__name__)) # Essential field accessors. + @classmethod + def _type(cls, key): + """Get the type of a field, a `Type` instance. + + If the field has no explicit type, it is given the base `Type`, + which does no conversion. + """ + return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT + def __getitem__(self, key): """Get the value for a field. Raise a KeyError if the field is not available. @@ -152,11 +234,19 @@ class Model(object): def __setitem__(self, key, value): """Assign the value for a field. """ - source = self._values_fixed if key in self._fields \ - else self._values_flex + # Choose where to place the value. + if key in self._fields: + source = self._values_fixed + else: + source = self._values_flex + + # If the field has a type, filter the value. + value = self._type(key).normalize(value) + + # Assign value and possibly mark as dirty. old_value = source.get(key) source[key] = value - if old_value != value: + if self._always_dirty or old_value != value: self._dirty.add(key) def __delitem__(self, key): @@ -166,11 +256,11 @@ class Model(object): del self._values_flex[key] self._dirty.add(key) # Mark for dropping on store. elif key in self._getters(): # Computed. - raise KeyError('computed field {0} cannot be deleted'.format(key)) + raise KeyError(u'computed field {0} cannot be deleted'.format(key)) elif key in self._fields: # Fixed. - raise KeyError('fixed field {0} cannot be deleted'.format(key)) + raise KeyError(u'fixed field {0} cannot be deleted'.format(key)) else: - raise KeyError('no such field {0}'.format(key)) + raise KeyError(u'no such field {0}'.format(key)) def keys(self, computed=False): """Get a list of available field names for this object. The @@ -183,6 +273,12 @@ class Model(object): else: return base_keys + @classmethod + def all_keys(cls): + """Get a list of available keys for objects of this type. + Includes fixed and computed fields. + """ + return list(cls._fields) + cls._getters().keys() # Act like a dictionary. @@ -219,17 +315,16 @@ class Model(object): """ return iter(self.keys()) - # Convenient attribute access. def __getattr__(self, key): if key.startswith('_'): - raise AttributeError('model has no attribute {0!r}'.format(key)) + raise AttributeError(u'model has no attribute {0!r}'.format(key)) else: try: return self[key] except KeyError: - raise AttributeError('no such field {0!r}'.format(key)) + raise AttributeError(u'no such field {0!r}'.format(key)) def __setattr__(self, key, value): if key.startswith('_'): @@ -243,7 +338,6 @@ class Model(object): else: del self[key] - # Database interaction (CRUD methods). def store(self): @@ -252,19 +346,15 @@ class Model(object): self._check_db() # Build assignments for query. - assignments = '' + assignments = [] subvars = [] for key in self._fields: if key != 'id' and key in self._dirty: self._dirty.remove(key) - assignments += key + '=?,' - value = self[key] - # Wrap path strings in buffers so they get stored - # "in the raw". - if key in self._bytes_keys and isinstance(value, str): - value = buffer(value) + assignments.append(key + '=?') + value = self._type(key).to_sql(self[key]) subvars.append(value) - assignments = assignments[:-1] # Knock off last , + assignments = ','.join(assignments) with self._db.transaction() as tx: # Main table update. @@ -301,7 +391,9 @@ class Model(object): """ self._check_db() stored_obj = self._db._get(type(self), self.id) - assert stored_obj is not None, "object {0} not in DB".format(self.id) + assert stored_obj is not None, u"object {0} not in DB".format(self.id) + self._values_fixed = {} + self._values_flex = {} self.update(dict(stored_obj)) self.clear_dirty() @@ -344,76 +436,26 @@ class Model(object): self._dirty.add(key) self.store() - # Formatting and templating. - @classmethod - def _format(cls, key, value, for_path=False): - """Format a value as the given field for this model. - """ - # Format the value as a string according to its type, if any. - if key in cls._fields: - value = cls._fields[key].format(value) - # Formatting must result in a string. To deal with - # Python2isms, implicitly convert ASCII strings. - assert isinstance(value, basestring), \ - u'field formatter must produce strings' - if isinstance(value, bytes): - value = value.decode('utf8', 'ignore') + _formatter = FormattedMapping - elif not isinstance(value, unicode): - # Fallback formatter. Convert to unicode at all cost. - if value is None: - value = u'' - elif isinstance(value, basestring): - if isinstance(value, bytes): - value = value.decode('utf8', 'ignore') - else: - value = unicode(value) - - if for_path: - sep_repl = beets.config['path_sep_replace'].get(unicode) - for sep in (os.path.sep, os.path.altsep): - if sep: - value = value.replace(sep, sep_repl) - - return value - - def _get_formatted(self, key, for_path=False): - """Get a field value formatted as a string (`unicode` object) - for display to the user. If `for_path` is true, then the value - will be sanitized for inclusion in a pathname (i.e., path - separators will be removed from the value). - """ - return self._format(key, self.get(key), for_path) - - def _formatted_mapping(self, for_path=False): + def formatted(self, for_path=False): """Get a mapping containing all values on this object formatted - as human-readable strings. + as human-readable unicode strings. """ - # In the future, this could be made "lazy" to avoid computing - # fields unnecessarily. - out = {} - for key in self.keys(True): - out[key] = self._get_formatted(key, for_path) - return out + return self._formatter(self, for_path) def evaluate_template(self, template, for_path=False): """Evaluate a template (a string or a `Template` object) using the object's fields. If `for_path` is true, then no new path separators will be added to the template. """ - # Build value mapping. - mapping = self._formatted_mapping(for_path) - - # Get template functions. - funcs = self._template_funcs() - # Perform substitution. if isinstance(template, basestring): template = Template(template) - return template.substitute(mapping, funcs) - + return template.substitute(self.formatted(for_path), + self._template_funcs()) # Parsing. @@ -422,65 +464,124 @@ class Model(object): """Parse a string as a value for the given key. """ if not isinstance(string, basestring): - raise TypeError("_parse() argument must be a string") + raise TypeError(u"_parse() argument must be a string") - typ = cls._fields.get(key) - if typ: - return typ.parse(string) - else: - # Fall back to unparsed string. - return string + return cls._type(key).parse(string) + def set_parse(self, key, string): + """Set the object's key to a value represented by a string. + """ + self[key] = self._parse(key, string) # Database controller and supporting interfaces. - class Results(object): """An item query result set. Iterating over the collection lazily constructs LibModel objects that reflect database rows. """ - def __init__(self, model_class, rows, db, query=None): + def __init__(self, model_class, rows, db, query=None, sort=None): """Create a result set that will construct objects of type - `model_class`, which should be a subclass of `LibModel`, out of - the query result mapping in `rows`. The new objects are - associated with the database `db`. If `query` is provided, it is - used as a predicate to filter the results for a "slow query" that - cannot be evaluated by the database directly. + `model_class`. + + `model_class` is a subclass of `LibModel` that will be + constructed. `rows` is a query result: a list of mappings. The + new objects will be associated with the database `db`. + + If `query` is provided, it is used as a predicate to filter the + results for a "slow query" that cannot be evaluated by the + database directly. If `sort` is provided, it is used to sort the + full list of results before returning. This means it is a "slow + sort" and all objects must be built before returning the first + one. """ self.model_class = model_class self.rows = rows self.db = db self.query = query + self.sort = sort + + # We keep a queue of rows we haven't yet consumed for + # materialization. We preserve the original total number of + # rows. + self._rows = rows + self._row_count = len(rows) + + # The materialized objects corresponding to rows that have been + # consumed. + self._objects = [] + + def _get_objects(self): + """Construct and generate Model objects for they query. The + objects are returned in the order emitted from the database; no + slow sort is applied. + + For performance, this generator caches materialized objects to + avoid constructing them more than once. This way, iterating over + a `Results` object a second time should be much faster than the + first. + """ + index = 0 # Position in the materialized objects. + while index < len(self._objects) or self._rows: + # Are there previously-materialized objects to produce? + if index < len(self._objects): + yield self._objects[index] + index += 1 + + # Otherwise, we consume another row, materialize its object + # and produce it. + else: + while self._rows: + row = self._rows.pop(0) + obj = self._make_model(row) + # If there is a slow-query predicate, ensurer that the + # object passes it. + if not self.query or self.query.match(obj): + self._objects.append(obj) + index += 1 + yield obj + break def __iter__(self): - """Construct Python objects for all rows that pass the query - predicate. + """Construct and generate Model objects for all matching + objects, in sorted order. """ - for row in self.rows: - # Get the flexible attributes for the object. - with self.db.transaction() as tx: - flex_rows = tx.query( - 'SELECT * FROM {0} WHERE entity_id=?'.format( - self.model_class._flex_table - ), - (row['id'],) - ) - values = dict(row) - values.update( - dict((row['key'], row['value']) for row in flex_rows) + if self.sort: + # Slow sort. Must build the full list first. + objects = self.sort.sort(list(self._get_objects())) + return iter(objects) + + else: + # Objects are pre-sorted (i.e., by the database). + return self._get_objects() + + def _make_model(self, row): + # Get the flexible attributes for the object. + with self.db.transaction() as tx: + flex_rows = tx.query( + 'SELECT * FROM {0} WHERE entity_id=?'.format( + self.model_class._flex_table + ), + (row['id'],) ) - # Construct the Python object and yield it if it passes the - # predicate. - obj = self.model_class(self.db, **values) - if not self.query or self.query.match(obj): - yield obj + cols = dict(row) + values = dict((k, v) for (k, v) in cols.items() + if not k[:4] == 'flex') + flex_values = dict((row['key'], row['value']) for row in flex_rows) + + # Construct the Python object + obj = self.model_class._awaken(self.db, values, flex_values) + return obj def __len__(self): """Get the number of matching objects. """ - if self.query: + if not self._rows: + # Fully materialized. Just count the objects. + return len(self._objects) + + elif self.query: # A slow query. Fall back to testing every object. count = 0 for obj in self: @@ -489,7 +590,7 @@ class Results(object): else: # A fast query. Just count the rows. - return len(self.rows) + return self._row_count def __nonzero__(self): """Does this result contain any objects? @@ -500,13 +601,18 @@ class Results(object): """Get the nth item in this result set. This is inefficient: all items up to n are materialized and thrown away. """ + if not self._rows and not self.sort: + # Fully materialized and already in order. Just look up the + # object. + return self._objects[n] + it = iter(self) try: for i in range(n): - it.next() - return it.next() + next(it) + return next(it) except StopIteration: - raise IndexError('result index {0} out of range'.format(n)) + raise IndexError(u'result index {0} out of range'.format(n)) def get(self): """Return the first matching object, or None if no objects @@ -514,7 +620,7 @@ class Results(object): """ it = iter(self) try: - return it.next() + return next(it) except StopIteration: return None @@ -604,7 +710,6 @@ class Database(object): self._make_table(model_cls._table, model_cls._fields) self._make_attribute_table(model_cls._flex_table) - # Primitive access control: connections and transactions. def _connection(self): @@ -644,7 +749,6 @@ class Database(object): """ return Transaction(self) - # Schema setup and migration. def _make_table(self, table, fields): @@ -698,27 +802,33 @@ class Database(object): ON {0} (entity_id); """.format(flex_table)) - # Querying. - def _fetch(self, model_cls, query, order_by=None): + def _fetch(self, model_cls, query=None, sort=None): """Fetch the objects of type `model_cls` matching the given query. The query may be given as a string, string sequence, a - Query object, or None (to fetch everything). If provided, - `order_by` is a SQLite ORDER BY clause for sorting. + Query object, or None (to fetch everything). `sort` is an + `Sort` object. """ + query = query or TrueQuery() # A null query. + sort = sort or NullSort() # Unsorted. where, subvals = query.clause() + order_by = sort.order_clause() - sql = "SELECT * FROM {0} WHERE {1}".format( + sql = ("SELECT * FROM {0} WHERE {1} {2}").format( model_cls._table, where or '1', + "ORDER BY {0}".format(order_by) if order_by else '', ) - if order_by: - sql += " ORDER BY {0}".format(order_by) + with self.transaction() as tx: rows = tx.query(sql, subvals) - return Results(model_cls, rows, self, None if where else query) + return Results( + model_cls, rows, self, + None if where else query, # Slow query component. + sort if sort.is_slow() else None, # Slow sort component. + ) def _get(self, model_cls, id): """Get a Model object by its id or None if the id does not diff --git a/libs/beets/dbcore/query.py b/libs/beets/dbcore/query.py index 4c888302..caf38026 100644 --- a/libs/beets/dbcore/query.py +++ b/libs/beets/dbcore/query.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,9 +15,45 @@ """The Query type hierarchy for DBCore. """ +from __future__ import division, absolute_import, print_function + import re +from operator import mul from beets import util from datetime import datetime, timedelta +import unicodedata +from functools import reduce + + +class ParsingError(ValueError): + """Abstract class for any unparseable user-requested album/query + specification. + """ + + +class InvalidQueryError(ParsingError): + """Represent any kind of invalid query. + + The query should be a unicode string or a list, which will be space-joined. + """ + def __init__(self, query, explanation): + if isinstance(query, list): + query = " ".join(query) + message = u"'{0}': {1}".format(query, explanation) + super(InvalidQueryError, self).__init__(message) + + +class InvalidQueryArgumentTypeError(ParsingError): + """Represent a query argument that could not be converted as expected. + + It exists to be caught in upper stack levels so a meaningful (i.e. with the + query) InvalidQueryError can be raised. + """ + def __init__(self, what, expected, detail=None): + message = u"'{0}' is not {1}".format(what, expected) + if detail: + message = u"{0}: {1}".format(message, detail) + super(InvalidQueryArgumentTypeError, self).__init__(message) class Query(object): @@ -24,9 +61,8 @@ class Query(object): """ def clause(self): """Generate an SQLite expression implementing the query. - Return a clause string, a sequence of substitution values for - the clause, and a Query object representing the "remainder" - Returns (clause, subvals) where clause is a valid sqlite + + Return (clause, subvals) where clause is a valid sqlite WHERE clause implementing the query and subvals is a list of items to be substituted for ?s in the clause. """ @@ -38,6 +74,15 @@ class Query(object): """ raise NotImplementedError + def __repr__(self): + return "{0.__class__.__name__}()".format(self) + + def __eq__(self, other): + return type(self) == type(other) + + def __hash__(self): + return 0 + class FieldQuery(Query): """An abstract query that searches in a specific field for a @@ -71,6 +116,17 @@ class FieldQuery(Query): def match(self, item): return self.value_match(self.pattern, item.get(self.field)) + def __repr__(self): + return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, " + "{0.fast})".format(self)) + + def __eq__(self, other): + return super(FieldQuery, self).__eq__(other) and \ + self.field == other.field and self.pattern == other.pattern + + def __hash__(self): + return hash((self.field, hash(self.pattern))) + class MatchQuery(FieldQuery): """A query that looks for exact matches in an item field.""" @@ -82,6 +138,25 @@ class MatchQuery(FieldQuery): return pattern == value +class NoneQuery(FieldQuery): + + def __init__(self, field, fast=True): + super(NoneQuery, self).__init__(field, None, fast) + + def col_clause(self): + return self.field + " IS NULL", () + + @classmethod + def match(cls, item): + try: + return item[cls.field] is None + except KeyError: + return True + + def __repr__(self): + return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self) + + class StringFieldQuery(FieldQuery): """A FieldQuery that converts values to strings before matching them. @@ -104,8 +179,11 @@ class StringFieldQuery(FieldQuery): class SubstringQuery(StringFieldQuery): """A query that matches a substring in a specific item field.""" def col_clause(self): - search = '%' + (self.pattern.replace('\\','\\\\').replace('%','\\%') - .replace('_','\\_')) + '%' + pattern = (self.pattern + .replace('\\', '\\\\') + .replace('%', '\\%') + .replace('_', '\\_')) + search = '%' + pattern + '%' clause = self.field + " like ? escape '\\'" subvals = [search] return clause, subvals @@ -118,15 +196,31 @@ class SubstringQuery(StringFieldQuery): class RegexpQuery(StringFieldQuery): """A query that matches a regular expression in a specific item field. + + Raises InvalidQueryError when the pattern is not a valid regular + expression. """ + def __init__(self, field, pattern, fast=True): + super(RegexpQuery, self).__init__(field, pattern, fast) + pattern = self._normalize(pattern) + try: + self.pattern = re.compile(self.pattern) + except re.error as exc: + # Invalid regular expression. + raise InvalidQueryArgumentTypeError(pattern, + u"a regular expression", + format(exc)) + + @staticmethod + def _normalize(s): + """Normalize a Unicode string's representation (used on both + patterns and matched values). + """ + return unicodedata.normalize('NFC', s) + @classmethod def string_match(cls, pattern, value): - try: - res = re.search(pattern, value) - except re.error: - # Invalid regular expression. - return False - return res is not None + return pattern.search(cls._normalize(value)) is not None class BooleanQuery(MatchQuery): @@ -142,7 +236,7 @@ class BooleanQuery(MatchQuery): class BytesQuery(MatchQuery): """Match a raw bytes field (i.e., a path). This is a necessary hack - to work around the `sqlite3` module's desire to treat `str` and + to work around the `sqlite3` module's desire to treat `bytes` and `unicode` equivalently in Python 2. Always use this query instead of `MatchQuery` when matching on BLOB values. """ @@ -170,19 +264,26 @@ class NumericQuery(FieldQuery): """Matches numeric fields. A syntax using Ruby-style range ellipses (``..``) lets users specify one- or two-sided ranges. For example, ``year:2001..`` finds music released since the turn of the century. + + Raises InvalidQueryError when the pattern does not represent an int or + a float. """ def _convert(self, s): - """Convert a string to a numeric type (float or int). If the - string cannot be converted, return None. + """Convert a string to a numeric type (float or int). + + Return None if `s` is empty. + Raise an InvalidQueryError if the string cannot be converted. """ # This is really just a bit of fun premature optimization. + if not s: + return None try: return int(s) except ValueError: try: return float(s) except ValueError: - return None + raise InvalidQueryArgumentTypeError(s, u"an int or a float") def __init__(self, field, pattern, fast=True): super(NumericQuery, self).__init__(field, pattern, fast) @@ -200,7 +301,9 @@ class NumericQuery(FieldQuery): self.rangemax = self._convert(parts[1]) def match(self, item): - value = getattr(item, self.field) + if self.field not in item: + return False + value = item[self.field] if isinstance(value, basestring): value = self._convert(value) @@ -225,7 +328,7 @@ class NumericQuery(FieldQuery): elif self.rangemax is not None: return u'{0} <= ?'.format(self.field), (self.rangemax,) else: - return '1', () + return u'1', () class CollectionQuery(Query): @@ -236,17 +339,21 @@ class CollectionQuery(Query): self.subqueries = subqueries # Act like a sequence. + def __len__(self): return len(self.subqueries) + def __getitem__(self, key): return self.subqueries[key] + def __iter__(self): return iter(self.subqueries) + def __contains__(self, item): return item in self.subqueries def clause_with_joiner(self, joiner): - """Returns a clause created by joining together the clauses of + """Return a clause created by joining together the clauses of all subqueries with the string joiner (padded by spaces). """ clause_parts = [] @@ -261,6 +368,19 @@ class CollectionQuery(Query): clause = (' ' + joiner + ' ').join(clause_parts) return clause, subvals + def __repr__(self): + return "{0.__class__.__name__}({0.subqueries!r})".format(self) + + def __eq__(self, other): + return super(CollectionQuery, self).__eq__(other) and \ + self.subqueries == other.subqueries + + def __hash__(self): + """Since subqueries are mutable, this object should not be hashable. + However and for conveniences purposes, it can be hashed. + """ + return reduce(mul, map(hash, self.subqueries), 1) + class AnyFieldQuery(CollectionQuery): """A query that matches if a given FieldQuery subclass matches in @@ -286,6 +406,17 @@ class AnyFieldQuery(CollectionQuery): return True return False + def __repr__(self): + return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, " + "{0.query_class.__name__})".format(self)) + + def __eq__(self, other): + return super(AnyFieldQuery, self).__eq__(other) and \ + self.query_class == other.query_class + + def __hash__(self): + return hash((self.pattern, tuple(self.fields), self.query_class)) + class MutableCollectionQuery(CollectionQuery): """A collection query whose subqueries may be modified after the @@ -316,6 +447,36 @@ class OrQuery(MutableCollectionQuery): return any([q.match(item) for q in self.subqueries]) +class NotQuery(Query): + """A query that matches the negation of its `subquery`, as a shorcut for + performing `not(subquery)` without using regular expressions. + """ + def __init__(self, subquery): + self.subquery = subquery + + def clause(self): + clause, subvals = self.subquery.clause() + if clause: + return 'not ({0})'.format(clause), subvals + else: + # If there is no clause, there is nothing to negate. All the logic + # is handled by match() for slow queries. + return clause, subvals + + def match(self, item): + return not self.subquery.match(item) + + def __repr__(self): + return "{0.__class__.__name__}({0.subquery!r})".format(self) + + def __eq__(self, other): + return super(NotQuery, self).__eq__(other) and \ + self.subquery == other.subquery + + def __hash__(self): + return hash(('not', hash(self.subquery))) + + class TrueQuery(Query): """A query that always matches.""" def clause(self): @@ -334,21 +495,15 @@ class FalseQuery(Query): return False - # Time/date queries. - def _to_epoch_time(date): """Convert a `datetime` object to an integer number of seconds since the (local) Unix epoch. """ epoch = datetime.fromtimestamp(0) delta = date - epoch - try: - return int(delta.total_seconds()) - except AttributeError: - # datetime.timedelta.total_seconds() is not available on Python 2.6 - return delta.seconds + delta.days * 24 * 3600 + return int(delta.total_seconds()) def _parse_periods(pattern): @@ -380,7 +535,7 @@ class Period(object): precision (a string, one of "year", "month", or "day"). """ if precision not in Period.precisions: - raise ValueError('Invalid precision ' + str(precision)) + raise ValueError(u'Invalid precision {0}'.format(precision)) self.date = date self.precision = precision @@ -393,10 +548,14 @@ class Period(object): return None ordinal = string.count('-') if ordinal >= len(cls.date_formats): - raise ValueError('date is not in one of the formats ' - + ', '.join(cls.date_formats)) + # Too many components. + return None date_format = cls.date_formats[ordinal] - date = datetime.strptime(string, date_format) + try: + date = datetime.strptime(string, date_format) + except ValueError: + # Parsing failed. + return None precision = cls.precisions[ordinal] return cls(date, precision) @@ -416,7 +575,7 @@ class Period(object): elif 'day' == precision: return date + timedelta(days=1) else: - raise ValueError('unhandled precision ' + str(precision)) + raise ValueError(u'unhandled precision {0}'.format(precision)) class DateInterval(object): @@ -428,7 +587,7 @@ class DateInterval(object): def __init__(self, start, end): if start is not None and end is not None and not start < end: - raise ValueError("start date {0} is not before end date {1}" + raise ValueError(u"start date {0} is not before end date {1}" .format(start, end)) self.start = start self.end = end @@ -449,7 +608,7 @@ class DateInterval(object): return True def __str__(self): - return'[{0}, {1})'.format(self.start, self.end) + return '[{0}, {1})'.format(self.start, self.end) class DateQuery(FieldQuery): @@ -492,3 +651,208 @@ class DateQuery(FieldQuery): # Match any date. clause = '1' return clause, subvals + + +class DurationQuery(NumericQuery): + """NumericQuery that allow human-friendly (M:SS) time interval formats. + + Converts the range(s) to a float value, and delegates on NumericQuery. + + Raises InvalidQueryError when the pattern does not represent an int, float + or M:SS time interval. + """ + def _convert(self, s): + """Convert a M:SS or numeric string to a float. + + Return None if `s` is empty. + Raise an InvalidQueryError if the string cannot be converted. + """ + if not s: + return None + try: + return util.raw_seconds_short(s) + except ValueError: + try: + return float(s) + except ValueError: + raise InvalidQueryArgumentTypeError( + s, + u"a M:SS string or a float") + + +# Sorting. + +class Sort(object): + """An abstract class representing a sort operation for a query into + the item database. + """ + + def order_clause(self): + """Generates a SQL fragment to be used in a ORDER BY clause, or + None if no fragment is used (i.e., this is a slow sort). + """ + return None + + def sort(self, items): + """Sort the list of objects and return a list. + """ + return sorted(items) + + def is_slow(self): + """Indicate whether this query is *slow*, meaning that it cannot + be executed in SQL and must be executed in Python. + """ + return False + + def __hash__(self): + return 0 + + def __eq__(self, other): + return type(self) == type(other) + + +class MultipleSort(Sort): + """Sort that encapsulates multiple sub-sorts. + """ + + def __init__(self, sorts=None): + self.sorts = sorts or [] + + def add_sort(self, sort): + self.sorts.append(sort) + + def _sql_sorts(self): + """Return the list of sub-sorts for which we can be (at least + partially) fast. + + A contiguous suffix of fast (SQL-capable) sub-sorts are + executable in SQL. The remaining, even if they are fast + independently, must be executed slowly. + """ + sql_sorts = [] + for sort in reversed(self.sorts): + if not sort.order_clause() is None: + sql_sorts.append(sort) + else: + break + sql_sorts.reverse() + return sql_sorts + + def order_clause(self): + order_strings = [] + for sort in self._sql_sorts(): + order = sort.order_clause() + order_strings.append(order) + + return ", ".join(order_strings) + + def is_slow(self): + for sort in self.sorts: + if sort.is_slow(): + return True + return False + + def sort(self, items): + slow_sorts = [] + switch_slow = False + for sort in reversed(self.sorts): + if switch_slow: + slow_sorts.append(sort) + elif sort.order_clause() is None: + switch_slow = True + slow_sorts.append(sort) + else: + pass + + for sort in slow_sorts: + items = sort.sort(items) + return items + + def __repr__(self): + return 'MultipleSort({!r})'.format(self.sorts) + + def __hash__(self): + return hash(tuple(self.sorts)) + + def __eq__(self, other): + return super(MultipleSort, self).__eq__(other) and \ + self.sorts == other.sorts + + +class FieldSort(Sort): + """An abstract sort criterion that orders by a specific field (of + any kind). + """ + def __init__(self, field, ascending=True, case_insensitive=True): + self.field = field + self.ascending = ascending + self.case_insensitive = case_insensitive + + def sort(self, objs): + # TODO: Conversion and null-detection here. In Python 3, + # comparisons with None fail. We should also support flexible + # attributes with different types without falling over. + + def key(item): + field_val = item.get(self.field, '') + if self.case_insensitive and isinstance(field_val, unicode): + field_val = field_val.lower() + return field_val + + return sorted(objs, key=key, reverse=not self.ascending) + + def __repr__(self): + return '<{0}: {1}{2}>'.format( + type(self).__name__, + self.field, + '+' if self.ascending else '-', + ) + + def __hash__(self): + return hash((self.field, self.ascending)) + + def __eq__(self, other): + return super(FieldSort, self).__eq__(other) and \ + self.field == other.field and \ + self.ascending == other.ascending + + +class FixedFieldSort(FieldSort): + """Sort object to sort on a fixed field. + """ + def order_clause(self): + order = "ASC" if self.ascending else "DESC" + if self.case_insensitive: + field = '(CASE ' \ + 'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \ + 'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \ + 'ELSE {0} END)'.format(self.field) + else: + field = self.field + return "{0} {1}".format(field, order) + + +class SlowFieldSort(FieldSort): + """A sort criterion by some model field other than a fixed field: + i.e., a computed or flexible field. + """ + def is_slow(self): + return True + + +class NullSort(Sort): + """No sorting. Leave results unsorted.""" + def sort(self, items): + return items + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + return False + + def __eq__(self, other): + return type(self) == type(other) or other is None + + def __hash__(self): + return 0 diff --git a/libs/beets/dbcore/queryparse.py b/libs/beets/dbcore/queryparse.py new file mode 100644 index 00000000..bc9cc77e --- /dev/null +++ b/libs/beets/dbcore/queryparse.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Parsing of strings into DBCore queries. +""" +from __future__ import division, absolute_import, print_function + +import re +import itertools +from . import query +import beets + +PARSE_QUERY_PART_REGEX = re.compile( + # Non-capturing optional segment for the keyword. + r'(-|\^)?' # Negation prefixes. + + r'(?:' + r'(\S+?)' # The field key. + r'(? `(None, 'stapler', SubstringQuery, False)` + - `'color:red'` -> `('color', 'red', SubstringQuery, False)` + - `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because + the `^` follows the `:` + - `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)` + - `'-color:red'` -> `('color', 'red', SubstringQuery, True)` + """ + # Apply the regular expression and extract the components. + part = part.strip() + match = PARSE_QUERY_PART_REGEX.match(part) + + assert match # Regex should always match + negate = bool(match.group(1)) + key = match.group(2) + term = match.group(3).replace('\:', ':') + + # Check whether there's a prefix in the query and use the + # corresponding query type. + for pre, query_class in prefixes.items(): + if term.startswith(pre): + return key, term[len(pre):], query_class, negate + + # No matching prefix, so use either the query class determined by + # the field or the default as a fallback. + query_class = query_classes.get(key, default_class) + return key, term, query_class, negate + + +def construct_query_part(model_cls, prefixes, query_part): + """Parse a *query part* string and return a :class:`Query` object. + + :param model_cls: The :class:`Model` class that this is a query for. + This is used to determine the appropriate query types for the + model's fields. + :param prefixes: A map from prefix strings to :class:`Query` types. + :param query_part: The string to parse. + + See the documentation for `parse_query_part` for more information on + query part syntax. + """ + # A shortcut for empty query parts. + if not query_part: + return query.TrueQuery() + + # Use `model_cls` to build up a map from field names to `Query` + # classes. + query_classes = {} + for k, t in itertools.chain(model_cls._fields.items(), + model_cls._types.items()): + query_classes[k] = t.query + + # Parse the string. + key, pattern, query_class, negate = \ + parse_query_part(query_part, query_classes, prefixes) + + # If there's no key (field name) specified, this is a "match + # anything" query. + if key is None: + if issubclass(query_class, query.FieldQuery): + # The query type matches a specific field, but none was + # specified. So we use a version of the query that matches + # any field. + q = query.AnyFieldQuery(pattern, model_cls._search_fields, + query_class) + if negate: + return query.NotQuery(q) + else: + return q + else: + # Non-field query type. + if negate: + return query.NotQuery(query_class(pattern)) + else: + return query_class(pattern) + + # Otherwise, this must be a `FieldQuery`. Use the field name to + # construct the query object. + key = key.lower() + q = query_class(key.lower(), pattern, key in model_cls._fields) + if negate: + return query.NotQuery(q) + return q + + +def query_from_strings(query_cls, model_cls, prefixes, query_parts): + """Creates a collection query of type `query_cls` from a list of + strings in the format used by parse_query_part. `model_cls` + determines how queries are constructed from strings. + """ + subqueries = [] + for part in query_parts: + subqueries.append(construct_query_part(model_cls, prefixes, part)) + if not subqueries: # No terms in query. + subqueries = [query.TrueQuery()] + return query_cls(subqueries) + + +def construct_sort_part(model_cls, part): + """Create a `Sort` from a single string criterion. + + `model_cls` is the `Model` being queried. `part` is a single string + ending in ``+`` or ``-`` indicating the sort. + """ + assert part, "part must be a field name and + or -" + field = part[:-1] + assert field, "field is missing" + direction = part[-1] + assert direction in ('+', '-'), "part must end with + or -" + is_ascending = direction == '+' + + case_insensitive = beets.config['sort_case_insensitive'].get(bool) + if field in model_cls._sorts: + sort = model_cls._sorts[field](model_cls, is_ascending, + case_insensitive) + elif field in model_cls._fields: + sort = query.FixedFieldSort(field, is_ascending, case_insensitive) + else: + # Flexible or computed. + sort = query.SlowFieldSort(field, is_ascending, case_insensitive) + return sort + + +def sort_from_strings(model_cls, sort_parts): + """Create a `Sort` from a list of sort criteria (strings). + """ + if not sort_parts: + sort = query.NullSort() + elif len(sort_parts) == 1: + sort = construct_sort_part(model_cls, sort_parts[0]) + else: + sort = query.MultipleSort() + for part in sort_parts: + sort.add_sort(construct_sort_part(model_cls, part)) + return sort + + +def parse_sorted_query(model_cls, parts, prefixes={}): + """Given a list of strings, create the `Query` and `Sort` that they + represent. + """ + # Separate query token and sort token. + query_parts = [] + sort_parts = [] + + # Split up query in to comma-separated subqueries, each representing + # an AndQuery, which need to be joined together in one OrQuery + subquery_parts = [] + for part in parts + [u',']: + if part.endswith(u','): + # Ensure we can catch "foo, bar" as well as "foo , bar" + last_subquery_part = part[:-1] + if last_subquery_part: + subquery_parts.append(last_subquery_part) + # Parse the subquery in to a single AndQuery + # TODO: Avoid needlessly wrapping AndQueries containing 1 subquery? + query_parts.append(query_from_strings( + query.AndQuery, model_cls, prefixes, subquery_parts + )) + del subquery_parts[:] + else: + # Sort parts (1) end in + or -, (2) don't have a field, and + # (3) consist of more than just the + or -. + if part.endswith((u'+', u'-')) \ + and u':' not in part \ + and len(part) > 1: + sort_parts.append(part) + else: + subquery_parts.append(part) + + # Avoid needlessly wrapping single statements in an OR + q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0] + s = sort_from_strings(model_cls, sort_parts) + return q, s diff --git a/libs/beets/dbcore/types.py b/libs/beets/dbcore/types.py index 165c0b60..2726969d 100644 --- a/libs/beets/dbcore/types.py +++ b/libs/beets/dbcore/types.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,59 +15,117 @@ """Representation of type information for DBCore model fields. """ +from __future__ import division, absolute_import, print_function + from . import query from beets.util import str2bool - # Abstract base. - class Type(object): """An object encapsulating the type of a model field. Includes - information about how to store the value in the database, query, - format, and parse a given field. + information about how to store, query, format, and parse a given + field. """ - sql = None + sql = u'TEXT' """The SQLite column type for the value. """ - query = None + query = query.SubstringQuery """The `Query` subclass to be used when querying the field. """ + model_type = unicode + """The Python type that is used to represent the value in the model. + + The model is guaranteed to return a value of this type if the field + is accessed. To this end, the constructor is used by the `normalize` + and `from_sql` methods and the `default` property. + """ + + @property + def null(self): + """The value to be exposed when the underlying value is None. + """ + return self.model_type() + def format(self, value): """Given a value of this type, produce a Unicode string representing the value. This is used in template evaluation. """ - raise NotImplementedError() + if value is None: + value = self.null + # `self.null` might be `None` + if value is None: + value = u'' + if isinstance(value, bytes): + value = value.decode('utf8', 'ignore') + + return unicode(value) def parse(self, string): """Parse a (possibly human-written) string and return the indicated value of this type. """ - raise NotImplementedError() + try: + return self.model_type(string) + except ValueError: + return self.null + def normalize(self, value): + """Given a value that will be assigned into a field of this + type, normalize the value to have the appropriate type. This + base implementation only reinterprets `None`. + """ + if value is None: + return self.null + else: + # TODO This should eventually be replaced by + # `self.model_type(value)` + return value + + def from_sql(self, sql_value): + """Receives the value stored in the SQL backend and return the + value to be stored in the model. + + For fixed fields the type of `value` is determined by the column + type affinity given in the `sql` property and the SQL to Python + mapping of the database adapter. For more information see: + http://www.sqlite.org/datatype3.html + https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types + + Flexible fields have the type affinity `TEXT`. This means the + `sql_value` is either a `buffer` or a `unicode` object` and the + method must handle these in addition. + """ + if isinstance(sql_value, buffer): + sql_value = bytes(sql_value).decode('utf8', 'ignore') + if isinstance(sql_value, unicode): + return self.parse(sql_value) + else: + return self.normalize(sql_value) + + def to_sql(self, model_value): + """Convert a value as stored in the model object to a value used + by the database adapter. + """ + return model_value # Reusable types. +class Default(Type): + null = None + class Integer(Type): """A basic integer type. """ sql = u'INTEGER' query = query.NumericQuery - - def format(self, value): - return unicode(value or 0) - - def parse(self, string): - try: - return int(string) - except ValueError: - return 0 + model_type = int class PaddedInt(Integer): @@ -93,9 +152,14 @@ class ScaledInt(Integer): class Id(Integer): - """An integer used as the row key for a SQLite table. + """An integer used as the row id or a foreign key in a SQLite table. + This type is nullable: None values are not translated to zero. """ - sql = u'INTEGER PRIMARY KEY' + null = None + + def __init__(self, primary=True): + if primary: + self.sql = u'INTEGER PRIMARY KEY' class Float(Type): @@ -103,15 +167,16 @@ class Float(Type): """ sql = u'REAL' query = query.NumericQuery + model_type = float def format(self, value): return u'{0:.1f}'.format(value or 0.0) - def parse(self, string): - try: - return float(string) - except ValueError: - return 0.0 + +class NullFloat(Float): + """Same as `Float`, but does not normalize `None` to `0.0`. + """ + null = None class String(Type): @@ -120,21 +185,27 @@ class String(Type): sql = u'TEXT' query = query.SubstringQuery - def format(self, value): - return unicode(value) if value else u'' - - def parse(self, string): - return string - class Boolean(Type): """A boolean type. """ sql = u'INTEGER' query = query.BooleanQuery + model_type = bool def format(self, value): return unicode(bool(value)) def parse(self, string): return str2bool(string) + + +# Shared instances of common types. +DEFAULT = Default() +INTEGER = Integer() +PRIMARY_ID = Id(True) +FOREIGN_ID = Id(False) +FLOAT = Float() +NULL_FLOAT = NullFloat() +STRING = String() +BOOLEAN = Boolean() diff --git a/libs/beets/importer.py b/libs/beets/importer.py index f997770c..bfaa21a0 100644 --- a/libs/beets/importer.py +++ b/libs/beets/importer.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,40 +13,51 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. +from __future__ import division, absolute_import, print_function + """Provides the basic, interface-agnostic workflow for importing and autotagging music files. """ -from __future__ import print_function import os -import logging +import re import pickle import itertools from collections import defaultdict +from tempfile import mkdtemp +from bisect import insort, bisect_left +from contextlib import contextmanager +import shutil +import time +from beets import logging from beets import autotag from beets import library from beets import dbcore from beets import plugins from beets import util from beets import config -from beets.util import pipeline +from beets.util import pipeline, sorted_walk, ancestry from beets.util import syspath, normpath, displayable_path -from beets.util.enumeration import enum +from enum import Enum from beets import mediafile -action = enum( - 'SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID', - 'ALBUMS', name='action' -) +action = Enum('action', + ['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID', + 'ALBUMS', 'RETAG']) +# The RETAG action represents "don't apply any match, but do record +# new metadata". It's not reachable via the standard command prompt but +# can be used by plugins. QUEUE_SIZE = 128 SINGLE_ARTIST_THRESH = 0.25 -VARIOUS_ARTISTS = u'Various Artists' +PROGRESS_KEY = 'tagprogress' +HISTORY_KEY = 'taghistory' # Global logger. log = logging.getLogger('beets') + class ImportAbort(Exception): """Raised when the user aborts the tagging operation. """ @@ -54,151 +66,91 @@ class ImportAbort(Exception): # Utilities. -def _duplicate_check(lib, task): - """Check whether an album already exists in the library. Returns a - list of Album objects (empty if no duplicates are found). - """ - assert task.choice_flag in (action.ASIS, action.APPLY) - artist, album = task.chosen_ident() - - if artist is None: - # As-is import with no artist. Skip check. - return [] - - found_albums = [] - cur_paths = set(i.path for i in task.items if i) - for album_cand in lib.albums(dbcore.MatchQuery('albumartist', artist)): - if album_cand.album == album: - # Check whether the album is identical in contents, in which - # case it is not a duplicate (will be replaced). - other_paths = set(i.path for i in album_cand.items()) - if other_paths == cur_paths: - continue - found_albums.append(album_cand) - return found_albums - -def _item_duplicate_check(lib, task): - """Check whether an item already exists in the library. Returns a - list of Item objects. - """ - assert task.choice_flag in (action.ASIS, action.APPLY) - artist, title = task.chosen_ident() - - found_items = [] - query = dbcore.AndQuery(( - dbcore.MatchQuery('artist', artist), - dbcore.MatchQuery('title', title), - )) - for other_item in lib.items(query): - # Existing items not considered duplicates. - if other_item.path == task.item.path: - continue - found_items.append(other_item) - return found_items - -def _infer_album_fields(task): - """Given an album and an associated import task, massage the - album-level metadata. This ensures that the album artist is set - and that the "compilation" flag is set automatically. - """ - assert task.is_album - assert task.items - - changes = {} - - if task.choice_flag == action.ASIS: - # Taking metadata "as-is". Guess whether this album is VA. - plur_albumartist, freq = util.plurality( - [i.albumartist or i.artist for i in task.items]) - if freq == len(task.items) or (freq > 1 and - float(freq) / len(task.items) >= SINGLE_ARTIST_THRESH): - # Single-artist album. - changes['albumartist'] = plur_albumartist - changes['comp'] = False - else: - # VA. - changes['albumartist'] = VARIOUS_ARTISTS - changes['comp'] = True - - elif task.choice_flag == action.APPLY: - # Applying autotagged metadata. Just get AA from the first - # item. - for item in task.items: - if item is not None: - first_item = item - break - else: - assert False, "all items are None" - if not first_item.albumartist: - changes['albumartist'] = first_item.artist - if not first_item.mb_albumartistid: - changes['mb_albumartistid'] = first_item.mb_artistid - - else: - assert False - - # Apply new metadata. - for item in task.items: - if item is not None: - for k, v in changes.iteritems(): - setattr(item, k, v) - -def _resume(): - """Check whether an import should resume and return a boolean or the - string 'ask' indicating that the user should be queried. - """ - return config['import']['resume'].as_choice([True, False, 'ask']) - def _open_state(): """Reads the state file, returning a dictionary.""" try: with open(config['statefile'].as_filename()) as f: return pickle.load(f) - except (IOError, EOFError): + except Exception as exc: + # The `pickle` module can emit all sorts of exceptions during + # unpickling, including ImportError. We use a catch-all + # exception to avoid enumerating them all (the docs don't even have a + # full list!). + log.debug(u'state file could not be read: {0}', exc) return {} + + def _save_state(state): """Writes the state dictionary out to disk.""" try: with open(config['statefile'].as_filename(), 'w') as f: pickle.dump(state, f) except IOError as exc: - log.error(u'state file could not be written: %s' % unicode(exc)) + log.error(u'state file could not be written: {0}', exc) # Utilities for reading and writing the beets progress file, which # allows long tagging tasks to be resumed when they pause (or crash). -PROGRESS_KEY = 'tagprogress' -def progress_set(toppath, paths): - """Record that tagging for the given `toppath` was successful up to - `paths`. If paths is None, then clear the progress value (indicating - that the tagging completed). - """ + +def progress_read(): state = _open_state() - if PROGRESS_KEY not in state: - state[PROGRESS_KEY] = {} + return state.setdefault(PROGRESS_KEY, {}) - if paths is None: - # Remove progress from file. - if toppath in state[PROGRESS_KEY]: - del state[PROGRESS_KEY][toppath] - else: - state[PROGRESS_KEY][toppath] = paths +@contextmanager +def progress_write(): + state = _open_state() + progress = state.setdefault(PROGRESS_KEY, {}) + yield progress _save_state(state) -def progress_get(toppath): - """Get the last successfully tagged subpath of toppath. If toppath - has no progress information, returns None. + + +def progress_add(toppath, *paths): + """Record that the files under all of the `paths` have been imported + under `toppath`. """ - state = _open_state() - if PROGRESS_KEY not in state: - return None - return state[PROGRESS_KEY].get(toppath) + with progress_write() as state: + imported = state.setdefault(toppath, []) + for path in paths: + # Normally `progress_add` will be called with the path + # argument increasing. This is because of the ordering in + # `albums_in_dir`. We take advantage of that to make the + # code faster + if imported and imported[len(imported) - 1] <= path: + imported.append(path) + else: + insort(imported, path) + + +def progress_element(toppath, path): + """Return whether `path` has been imported in `toppath`. + """ + state = progress_read() + if toppath not in state: + return False + imported = state[toppath] + i = bisect_left(imported, path) + return i != len(imported) and imported[i] == path + + +def has_progress(toppath): + """Return `True` if there exist paths that have already been + imported under `toppath`. + """ + state = progress_read() + return toppath in state + + +def progress_reset(toppath): + with progress_write() as state: + if toppath in state: + del state[toppath] # Similarly, utilities for manipulating the "incremental" import log. # This keeps track of all directories that were ever imported, which # allows the importer to only import new stuff. -HISTORY_KEY = 'taghistory' + def history_add(paths): """Indicate that the import of the album in `paths` is completed and should not be repeated in incremental imports. @@ -210,6 +162,8 @@ def history_add(paths): state[HISTORY_KEY].add(tuple(paths)) _save_state(state) + + def history_get(): """Get the set of completed path tuples in incremental imports. """ @@ -225,27 +179,37 @@ class ImportSession(object): """Controls an import action. Subclasses should implement methods to communicate with the user or otherwise make decisions. """ - def __init__(self, lib, logfile, paths, query): - """Create a session. `lib` is a Library object. `logfile` is a - file-like object open for writing or None if no logging is to be - performed. Either `paths` or `query` is non-null and indicates + def __init__(self, lib, loghandler, paths, query): + """Create a session. `lib` is a Library object. `loghandler` is a + logging.Handler. Either `paths` or `query` is non-null and indicates the source of files to be imported. """ self.lib = lib - self.logfile = logfile + self.logger = self._setup_logging(loghandler) self.paths = paths self.query = query + self._is_resuming = dict() # Normalize the paths. if self.paths: - self.paths = map(normpath, self.paths) + self.paths = list(map(normpath, self.paths)) - def _amend_config(self): - """Make implied changes the importer configuration. + def _setup_logging(self, loghandler): + logger = logging.getLogger(__name__) + logger.propagate = False + if not loghandler: + loghandler = logging.NullHandler() + logger.handlers = [loghandler] + return logger + + def set_config(self, config): + """Set `config` property from global import config and make + implied changes. """ # FIXME: Maybe this function should not exist and should instead # provide "decision wrappers" like "should_resume()", etc. - iconfig = config['import'] + iconfig = dict(config) + self.config = iconfig # Incremental and progress are mutually exclusive. if iconfig['incremental']: @@ -257,43 +221,46 @@ class ImportSession(object): iconfig['resume'] = False iconfig['incremental'] = False - # Copy and move are mutually exclusive. + # Copy, move, and link are mutually exclusive. if iconfig['move']: iconfig['copy'] = False + iconfig['link'] = False + elif iconfig['link']: + iconfig['copy'] = False + iconfig['move'] = False # Only delete when copying. if not iconfig['copy']: iconfig['delete'] = False + self.want_resume = config['resume'].as_choice([True, False, 'ask']) + def tag_log(self, status, paths): - """Log a message about a given album to logfile. The status should - reflect the reason the album couldn't be tagged. + """Log a message about a given album to the importer log. The status + should reflect the reason the album couldn't be tagged. """ - if self.logfile: - print(u'{0} {1}'.format(status, displayable_path(paths)), - file=self.logfile) - self.logfile.flush() + self.logger.info(u'{0} {1}', status, displayable_path(paths)) def log_choice(self, task, duplicate=False): """Logs the task's current choice if it should be logged. If ``duplicate``, then this is a secondary choice after a duplicate was detected and a decision was made. """ - paths = task.paths if task.is_album else [task.item.path] + paths = task.paths if duplicate: # Duplicate: log all three choices (skip, keep both, and trump). - if task.remove_duplicates: - self.tag_log('duplicate-replace', paths) + if task.should_remove_duplicates: + self.tag_log(u'duplicate-replace', paths) elif task.choice_flag in (action.ASIS, action.APPLY): - self.tag_log('duplicate-keep', paths) + self.tag_log(u'duplicate-keep', paths) elif task.choice_flag is (action.SKIP): - self.tag_log('duplicate-skip', paths) + self.tag_log(u'duplicate-skip', paths) else: # Non-duplicate: log "skip" and "asis" choices. if task.choice_flag is action.ASIS: - self.tag_log('asis', paths) + self.tag_log(u'asis', paths) elif task.choice_flag is action.SKIP: - self.tag_log('skip', paths) + self.tag_log(u'skip', paths) def should_resume(self, path): raise NotImplementedError @@ -301,7 +268,7 @@ class ImportSession(object): def choose_match(self, task): raise NotImplementedError - def resolve_duplicate(self, task): + def resolve_duplicate(self, task, found_duplicates): raise NotImplementedError def choose_item(self, task): @@ -310,38 +277,44 @@ class ImportSession(object): def run(self): """Run the import task. """ - self._amend_config() + self.logger.info(u'import started {0}', time.asctime()) + self.set_config(config['import']) # Set up the pipeline. if self.query is None: stages = [read_tasks(self)] else: stages = [query_tasks(self)] - if config['import']['singletons']: - # Singleton importer. - if config['import']['autotag']: - stages += [item_lookup(self), item_query(self)] - else: - stages += [item_progress(self)] + + # In pretend mode, just log what would otherwise be imported. + if self.config['pretend']: + stages += [log_files(self)] else: - # Whole-album importer. - if config['import']['group_albums']: - # Split directory tasks into one task for each album + if self.config['group_albums'] and \ + not self.config['singletons']: + # Split directory tasks into one task for each album. stages += [group_albums(self)] - if config['import']['autotag']: - # Only look up and query the user when autotagging. - stages += [initial_lookup(self), user_query(self)] + + # These stages either talk to the user to get a decision or, + # in the case of a non-autotagged import, just choose to + # import everything as-is. In *both* cases, these stages + # also add the music to the library database, so later + # stages need to read and write data from there. + if self.config['autotag']: + stages += [lookup_candidates(self), user_query(self)] else: - # When not autotagging, just display progress. - stages += [show_progress(self)] - stages += [apply_choices(self)] - for stage_func in plugins.import_stages(): - stages.append(plugin_stage(self, stage_func)) - stages += [manipulate_files(self)] - stages += [finalize(self)] + stages += [import_asis(self)] + + # Plugin stages. + for stage_func in plugins.import_stages(): + stages.append(plugin_stage(self, stage_func)) + + stages += [manipulate_files(self)] + pl = pipeline.Pipeline(stages) # Run the pipeline. + plugins.send('import_begin', session=self) try: if config['threaded']: pl.run_parallel(QUEUE_SIZE) @@ -351,91 +324,132 @@ class ImportSession(object): # User aborted operation. Silently stop. pass + # Incremental and resumed imports + + def already_imported(self, toppath, paths): + """Returns true if the files belonging to this task have already + been imported in a previous session. + """ + if self.is_resuming(toppath) \ + and all(map(lambda p: progress_element(toppath, p), paths)): + return True + if self.config['incremental'] \ + and tuple(paths) in self.history_dirs: + return True + + return False + + @property + def history_dirs(self): + if not hasattr(self, '_history_dirs'): + self._history_dirs = history_get() + return self._history_dirs + + def is_resuming(self, toppath): + """Return `True` if user wants to resume import of this path. + + You have to call `ask_resume` first to determine the return value. + """ + return self._is_resuming.get(toppath, False) + + def ask_resume(self, toppath): + """If import of `toppath` was aborted in an earlier session, ask + user if she wants to resume the import. + + Determines the return value of `is_resuming(toppath)`. + """ + if self.want_resume and has_progress(toppath): + # Either accept immediately or prompt for input to decide. + if self.want_resume is True or \ + self.should_resume(toppath): + log.warn(u'Resuming interrupted import of {0}', + util.displayable_path(toppath)) + self._is_resuming[toppath] = True + else: + # Clear progress; we're starting from the top. + progress_reset(toppath) + # The importer task class. -class ImportTask(object): - """Represents a single set of items to be imported along with its - intermediate state. May represent an album or a single item. - """ - def __init__(self, toppath=None, paths=None, items=None): +class BaseImportTask(object): + """An abstract base class for importer tasks. + + Tasks flow through the importer pipeline. Each stage can update + them. """ + def __init__(self, toppath, paths, items): + """Create a task. The primary fields that define a task are: + + * `toppath`: The user-specified base directory that contains the + music for this task. If the task has *no* user-specified base + (for example, when importing based on an -L query), this can + be None. This is used for tracking progress and history. + * `paths`: A list of *specific* paths where the music for this task + came from. These paths can be directories, when their entire + contents are being imported, or files, when the task comprises + individual tracks. This is used for progress/history tracking and + for displaying the task to the user. + * `items`: A list of `Item` objects representing the music being + imported. + + These fields should not change after initialization. + """ self.toppath = toppath self.paths = paths self.items = items - self.sentinel = False - self.remove_duplicates = False - self.is_album = True + + +class ImportTask(BaseImportTask): + """Represents a single set of items to be imported along with its + intermediate state. May represent an album or a single item. + + The import session and stages call the following methods in the + given order. + + * `lookup_candidates()` Sets the `common_artist`, `common_album`, + `candidates`, and `rec` attributes. `candidates` is a list of + `AlbumMatch` objects. + + * `choose_match()` Uses the session to set the `match` attribute + from the `candidates` list. + + * `find_duplicates()` Returns a list of albums from `lib` with the + same artist and album name as the task. + + * `apply_metadata()` Sets the attributes of the items from the + task's `match` attribute. + + * `add()` Add the imported items and album to the database. + + * `manipulate_files()` Copy, move, and write files depending on the + session configuration. + + * `finalize()` Update the import progress and cleanup the file + system. + """ + def __init__(self, toppath, paths, items): + super(ImportTask, self).__init__(toppath, paths, items) self.choice_flag = None - - @classmethod - def done_sentinel(cls, toppath): - """Create an ImportTask that indicates the end of a top-level - directory import. - """ - obj = cls(toppath) - obj.sentinel = True - return obj - - @classmethod - def progress_sentinel(cls, toppath, paths): - """Create a task indicating that a single directory in a larger - import has finished. This is only required for singleton - imports; progress is implied for album imports. - """ - obj = cls(toppath, paths) - obj.sentinel = True - return obj - - @classmethod - def item_task(cls, item): - """Creates an ImportTask for a single item.""" - obj = cls() - obj.item = item - obj.is_album = False - return obj - - def set_candidates(self, cur_artist, cur_album, candidates, rec): - """Sets the candidates for this album matched by the - `autotag.tag_album` method. - """ - assert self.is_album - assert not self.sentinel - self.cur_artist = cur_artist - self.cur_album = cur_album - self.candidates = candidates - self.rec = rec - - def set_null_candidates(self): - """Set the candidates to indicate no album match was found. - """ - self.cur_artist = None self.cur_album = None - self.candidates = None + self.cur_artist = None + self.candidates = [] self.rec = None - - def set_item_candidates(self, candidates, rec): - """Set the match for a single-item task.""" - assert not self.is_album - assert self.item is not None - self.candidates = candidates - self.rec = rec + self.should_remove_duplicates = False + self.is_album = True + self.search_ids = [] # user-supplied candidate IDs. def set_choice(self, choice): """Given an AlbumMatch or TrackMatch object or an action constant, indicates that an action has been selected for this task. """ - assert not self.sentinel # Not part of the task structure: assert choice not in (action.MANUAL, action.MANUAL_ID) assert choice != action.APPLY # Only used internally. - if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS): + if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS, + action.RETAG): self.choice_flag = choice self.match = None else: - if self.is_album: - assert isinstance(choice, autotag.AlbumMatch) - else: - assert isinstance(choice, autotag.TrackMatch) self.choice_flag = action.APPLY # Implicit choice. self.match = choice @@ -443,38 +457,24 @@ class ImportTask(object): """Updates the progress state to indicate that this album has finished. """ - if self.sentinel and self.paths is None: - # "Done" sentinel. - progress_set(self.toppath, None) - elif self.sentinel or self.is_album: - # "Directory progress" sentinel for singletons or a real - # album task, which implies the same. - progress_set(self.toppath, self.paths) + if self.toppath: + progress_add(self.toppath, *self.paths) def save_history(self): """Save the directory in the history for incremental imports. """ - if self.is_album and self.paths and not self.sentinel: + if self.paths: history_add(self.paths) - # Logical decisions. - def should_write_tags(self): - """Should new info be written to the files' metadata?""" - if self.choice_flag == action.APPLY: - return True - elif self.choice_flag in (action.ASIS, action.TRACKS, action.SKIP): - return False - else: - assert False - - def should_skip(self): - """After a choice has been made, returns True if this is a - sentinel or it has been marked for skipping. - """ - return self.sentinel or self.choice_flag == action.SKIP + @property + def apply(self): + return self.choice_flag == action.APPLY + @property + def skip(self): + return self.choice_flag == action.SKIP # Convenient data. @@ -482,37 +482,310 @@ class ImportTask(object): """Returns identifying metadata about the current choice. For albums, this is an (artist, album) pair. For items, this is (artist, title). May only be called when the choice flag is ASIS - (in which case the data comes from the files' current metadata) - or APPLY (data comes from the choice). + or RETAG (in which case the data comes from the files' current + metadata) or APPLY (data comes from the choice). """ - assert self.choice_flag in (action.ASIS, action.APPLY) - if self.is_album: - if self.choice_flag is action.ASIS: - return (self.cur_artist, self.cur_album) - elif self.choice_flag is action.APPLY: - return (self.match.info.artist, self.match.info.album) - else: - if self.choice_flag is action.ASIS: - return (self.item.artist, self.item.title) - elif self.choice_flag is action.APPLY: - return (self.match.info.artist, self.match.info.title) + if self.choice_flag in (action.ASIS, action.RETAG): + return (self.cur_artist, self.cur_album) + elif self.choice_flag is action.APPLY: + return (self.match.info.artist, self.match.info.album) def imported_items(self): """Return a list of Items that should be added to the library. - If this is an album task, return the list of items in the - selected match or everything if the choice is ASIS. If this is a - singleton task, return a list containing the item. + + If the tasks applies an album match the method only returns the + matched items. + """ + if self.choice_flag in (action.ASIS, action.RETAG): + return list(self.items) + elif self.choice_flag == action.APPLY: + return self.match.mapping.keys() + else: + assert False + + def apply_metadata(self): + """Copy metadata from match info to the items. + """ + autotag.apply_metadata(self.match.info, self.match.mapping) + + def duplicate_items(self, lib): + duplicate_items = [] + for album in self.find_duplicates(lib): + duplicate_items += album.items() + return duplicate_items + + def remove_duplicates(self, lib): + duplicate_items = self.duplicate_items(lib) + log.debug(u'removing {0} old duplicated items', len(duplicate_items)) + for item in duplicate_items: + item.remove() + if lib.directory in util.ancestry(item.path): + log.debug(u'deleting duplicate {0}', + util.displayable_path(item.path)) + util.remove(item.path) + util.prune_dirs(os.path.dirname(item.path), + lib.directory) + + def finalize(self, session): + """Save progress, clean up files, and emit plugin event. + """ + # Update progress. + if session.want_resume: + self.save_progress() + if session.config['incremental']: + self.save_history() + + self.cleanup(copy=session.config['copy'], + delete=session.config['delete'], + move=session.config['move']) + + if not self.skip: + self._emit_imported(session.lib) + + def cleanup(self, copy=False, delete=False, move=False): + """Remove and prune imported paths. + """ + # Do not delete any files or prune directories when skipping. + if self.skip: + return + + items = self.imported_items() + + # When copying and deleting originals, delete old files. + if copy and delete: + new_paths = [os.path.realpath(item.path) for item in items] + for old_path in self.old_paths: + # Only delete files that were actually copied. + if old_path not in new_paths: + util.remove(syspath(old_path), False) + self.prune(old_path) + + # When moving, prune empty directories containing the original files. + elif move: + for old_path in self.old_paths: + self.prune(old_path) + + def _emit_imported(self, lib): + plugins.send('album_imported', lib=lib, album=self.album) + + def handle_created(self, session): + """Send the `import_task_created` event for this task. Return a list of + tasks that should continue through the pipeline. By default, this is a + list containing only the task itself, but plugins can replace the task + with new ones. + """ + tasks = plugins.send('import_task_created', session=session, task=self) + if not tasks: + tasks = [self] + else: + # The plugins gave us a list of lists of tasks. Flatten it. + tasks = [t for inner in tasks for t in inner] + return tasks + + def lookup_candidates(self): + """Retrieve and store candidates for this album. User-specified + candidate IDs are stored in self.search_ids: if present, the + initial lookup is restricted to only those IDs. + """ + artist, album, candidates, recommendation = \ + autotag.tag_album(self.items, search_ids=self.search_ids) + self.cur_artist = artist + self.cur_album = album + self.candidates = candidates + self.rec = recommendation + + def find_duplicates(self, lib): + """Return a list of albums from `lib` with the same artist and + album name as the task. + """ + artist, album = self.chosen_ident() + + if artist is None: + # As-is import with no artist. Skip check. + return [] + + duplicates = [] + task_paths = set(i.path for i in self.items if i) + duplicate_query = dbcore.AndQuery(( + dbcore.MatchQuery('albumartist', artist), + dbcore.MatchQuery('album', album), + )) + + for album in lib.albums(duplicate_query): + # Check whether the album is identical in contents, in which + # case it is not a duplicate (will be replaced). + album_paths = set(i.path for i in album.items()) + if album_paths != task_paths: + duplicates.append(album) + return duplicates + + def align_album_level_fields(self): + """Make some album fields equal across `self.items`. For the + RETAG action, we assume that the responsible for returning it + (ie. a plugin) always ensures that the first item contains + valid data on the relevant fields. + """ + changes = {} + + if self.choice_flag == action.ASIS: + # Taking metadata "as-is". Guess whether this album is VA. + plur_albumartist, freq = util.plurality( + [i.albumartist or i.artist for i in self.items] + ) + if freq == len(self.items) or \ + (freq > 1 and + float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH): + # Single-artist album. + changes['albumartist'] = plur_albumartist + changes['comp'] = False + else: + # VA. + changes['albumartist'] = config['va_name'].get(unicode) + changes['comp'] = True + + elif self.choice_flag in (action.APPLY, action.RETAG): + # Applying autotagged metadata. Just get AA from the first + # item. + if not self.items[0].albumartist: + changes['albumartist'] = self.items[0].artist + if not self.items[0].mb_albumartistid: + changes['mb_albumartistid'] = self.items[0].mb_artistid + + # Apply new metadata. + for item in self.items: + item.update(changes) + + def manipulate_files(self, move=False, copy=False, write=False, + link=False, session=None): + items = self.imported_items() + # Save the original paths of all items for deletion and pruning + # in the next step (finalization). + self.old_paths = [item.path for item in items] + for item in items: + if move or copy or link: + # In copy and link modes, treat re-imports specially: + # move in-library files. (Out-of-library files are + # copied/moved as usual). + old_path = item.path + if (copy or link) and self.replaced_items[item] and \ + session.lib.directory in util.ancestry(old_path): + item.move() + # We moved the item, so remove the + # now-nonexistent file from old_paths. + self.old_paths.remove(old_path) + else: + # A normal import. Just copy files and keep track of + # old paths. + item.move(copy, link) + + if write and (self.apply or self.choice_flag == action.RETAG): + item.try_write() + + with session.lib.transaction(): + for item in self.imported_items(): + item.store() + + plugins.send('import_task_files', session=session, task=self) + + def add(self, lib): + """Add the items as an album to the library and remove replaced items. + """ + self.align_album_level_fields() + with lib.transaction(): + self.record_replaced(lib) + self.remove_replaced(lib) + self.album = lib.add_album(self.imported_items()) + self.reimport_metadata(lib) + + def record_replaced(self, lib): + """Records the replaced items and albums in the `replaced_items` + and `replaced_albums` dictionaries. + """ + self.replaced_items = defaultdict(list) + self.replaced_albums = defaultdict(list) + replaced_album_ids = set() + for item in self.imported_items(): + dup_items = list(lib.items( + dbcore.query.BytesQuery('path', item.path) + )) + self.replaced_items[item] = dup_items + for dup_item in dup_items: + if (not dup_item.album_id or + dup_item.album_id in replaced_album_ids): + continue + replaced_album = dup_item.get_album() + if replaced_album: + replaced_album_ids.add(dup_item.album_id) + self.replaced_albums[replaced_album.path] = replaced_album + + def reimport_metadata(self, lib): + """For reimports, preserves metadata for reimported items and + albums. """ if self.is_album: - if self.choice_flag == action.ASIS: - return list(self.items) - elif self.choice_flag == action.APPLY: - return self.match.mapping.keys() - else: - assert False - else: - return [self.item] + replaced_album = self.replaced_albums.get(self.album.path) + if replaced_album: + self.album.added = replaced_album.added + self.album.update(replaced_album._values_flex) + self.album.artpath = replaced_album.artpath + self.album.store() + log.debug( + u'Reimported album: added {0}, flexible ' + u'attributes {1} from album {2} for {3}', + self.album.added, + replaced_album._values_flex.keys(), + replaced_album.id, + displayable_path(self.album.path) + ) + for item in self.imported_items(): + dup_items = self.replaced_items[item] + for dup_item in dup_items: + if dup_item.added and dup_item.added != item.added: + item.added = dup_item.added + log.debug( + u'Reimported item added {0} ' + u'from item {1} for {2}', + item.added, + dup_item.id, + displayable_path(item.path) + ) + item.update(dup_item._values_flex) + log.debug( + u'Reimported item flexible attributes {0} ' + u'from item {1} for {2}', + dup_item._values_flex.keys(), + dup_item.id, + displayable_path(item.path) + ) + item.store() + + def remove_replaced(self, lib): + """Removes all the items from the library that have the same + path as an item from this task. + """ + for item in self.imported_items(): + for dup_item in self.replaced_items[item]: + log.debug(u'Replacing item {0}: {1}', + dup_item.id, displayable_path(item.path)) + dup_item.remove() + log.debug(u'{0} of {1} items replaced', + sum(bool(l) for l in self.replaced_items.values()), + len(self.imported_items())) + + def choose_match(self, session): + """Ask the session which match should apply and apply it. + """ + choice = session.choose_match(self) + self.set_choice(choice) + session.log_choice(self) + + def reload(self): + """Reload albums and items from the database. + """ + for item in self.imported_items(): + item.load() + self.album.load() # Utilities. @@ -529,6 +802,389 @@ class ImportTask(object): clutter=config['clutter'].as_str_seq()) +class SingletonImportTask(ImportTask): + """ImportTask for a single track that is not associated to an album. + """ + + def __init__(self, toppath, item): + super(SingletonImportTask, self).__init__(toppath, [item.path], [item]) + self.item = item + self.is_album = False + self.paths = [item.path] + + def chosen_ident(self): + assert self.choice_flag in (action.ASIS, action.APPLY, action.RETAG) + if self.choice_flag in (action.ASIS, action.RETAG): + return (self.item.artist, self.item.title) + elif self.choice_flag is action.APPLY: + return (self.match.info.artist, self.match.info.title) + + def imported_items(self): + return [self.item] + + def apply_metadata(self): + autotag.apply_item_metadata(self.item, self.match.info) + + def _emit_imported(self, lib): + for item in self.imported_items(): + plugins.send('item_imported', lib=lib, item=item) + + def lookup_candidates(self): + candidates, recommendation = autotag.tag_item( + self.item, search_ids=self.search_ids) + self.candidates = candidates + self.rec = recommendation + + def find_duplicates(self, lib): + """Return a list of items from `lib` that have the same artist + and title as the task. + """ + artist, title = self.chosen_ident() + + found_items = [] + query = dbcore.AndQuery(( + dbcore.MatchQuery('artist', artist), + dbcore.MatchQuery('title', title), + )) + for other_item in lib.items(query): + # Existing items not considered duplicates. + if other_item.path != self.item.path: + found_items.append(other_item) + return found_items + + duplicate_items = find_duplicates + + def add(self, lib): + with lib.transaction(): + self.record_replaced(lib) + self.remove_replaced(lib) + lib.add(self.item) + self.reimport_metadata(lib) + + def infer_album_fields(self): + raise NotImplementedError + + def choose_match(self, session): + """Ask the session which match should apply and apply it. + """ + choice = session.choose_item(self) + self.set_choice(choice) + session.log_choice(self) + + def reload(self): + self.item.load() + + +# FIXME The inheritance relationships are inverted. This is why there +# are so many methods which pass. More responsibility should be delegated to +# the BaseImportTask class. +class SentinelImportTask(ImportTask): + """A sentinel task marks the progress of an import and does not + import any items itself. + + If only `toppath` is set the task indicates the end of a top-level + directory import. If the `paths` argument is also given, the task + indicates the progress in the `toppath` import. + """ + + def __init__(self, toppath, paths): + super(SentinelImportTask, self).__init__(toppath, paths, ()) + # TODO Remove the remaining attributes eventually + self.should_remove_duplicates = False + self.is_album = True + self.choice_flag = None + + def save_history(self): + pass + + def save_progress(self): + if self.paths is None: + # "Done" sentinel. + progress_reset(self.toppath) + else: + # "Directory progress" sentinel for singletons + progress_add(self.toppath, *self.paths) + + def skip(self): + return True + + def set_choice(self, choice): + raise NotImplementedError + + def cleanup(self, **kwargs): + pass + + def _emit_imported(self, session): + pass + + +class ArchiveImportTask(SentinelImportTask): + """An import task that represents the processing of an archive. + + `toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks + serve two purposes: + - First, it will unarchive the files to a temporary directory and + return it. The client should read tasks from the resulting + directory and send them through the pipeline. + - Second, it will clean up the temporary directory when it proceeds + through the pipeline. The client should send the archive task + after sending the rest of the music tasks to make this work. + """ + + def __init__(self, toppath): + super(ArchiveImportTask, self).__init__(toppath, ()) + self.extracted = False + + @classmethod + def is_archive(cls, path): + """Returns true if the given path points to an archive that can + be handled. + """ + if not os.path.isfile(path): + return False + + for path_test, _ in cls.handlers(): + if path_test(path): + return True + return False + + @classmethod + def handlers(cls): + """Returns a list of archive handlers. + + Each handler is a `(path_test, ArchiveClass)` tuple. `path_test` + is a function that returns `True` if the given path can be + handled by `ArchiveClass`. `ArchiveClass` is a class that + implements the same interface as `tarfile.TarFile`. + """ + if not hasattr(cls, '_handlers'): + cls._handlers = [] + from zipfile import is_zipfile, ZipFile + cls._handlers.append((is_zipfile, ZipFile)) + from tarfile import is_tarfile, TarFile + cls._handlers.append((is_tarfile, TarFile)) + try: + from rarfile import is_rarfile, RarFile + except ImportError: + pass + else: + cls._handlers.append((is_rarfile, RarFile)) + + return cls._handlers + + def cleanup(self, **kwargs): + """Removes the temporary directory the archive was extracted to. + """ + if self.extracted: + log.debug(u'Removing extracted directory: {0}', + displayable_path(self.toppath)) + shutil.rmtree(self.toppath) + + def extract(self): + """Extracts the archive to a temporary directory and sets + `toppath` to that directory. + """ + for path_test, handler_class in self.handlers(): + if path_test(self.toppath): + break + + try: + extract_to = mkdtemp() + archive = handler_class(self.toppath, mode='r') + archive.extractall(extract_to) + finally: + archive.close() + self.extracted = True + self.toppath = extract_to + + +class ImportTaskFactory(object): + """Generate album and singleton import tasks for all media files + indicated by a path. + """ + def __init__(self, toppath, session): + """Create a new task factory. + + `toppath` is the user-specified path to search for music to + import. `session` is the `ImportSession`, which controls how + tasks are read from the directory. + """ + self.toppath = toppath + self.session = session + self.skipped = 0 # Skipped due to incremental/resume. + self.imported = 0 # "Real" tasks created. + self.is_archive = ArchiveImportTask.is_archive(syspath(toppath)) + + def tasks(self): + """Yield all import tasks for music found in the user-specified + path `self.toppath`. Any necessary sentinel tasks are also + produced. + + During generation, update `self.skipped` and `self.imported` + with the number of tasks that were not produced (due to + incremental mode or resumed imports) and the number of concrete + tasks actually produced, respectively. + + If `self.toppath` is an archive, it is adjusted to point to the + extracted data. + """ + # Check whether this is an archive. + if self.is_archive: + archive_task = self.unarchive() + if not archive_task: + return + + # Search for music in the directory. + for dirs, paths in self.paths(): + if self.session.config['singletons']: + for path in paths: + tasks = self._create(self.singleton(path)) + for task in tasks: + yield task + yield self.sentinel(dirs) + + else: + tasks = self._create(self.album(paths, dirs)) + for task in tasks: + yield task + + # Produce the final sentinel for this toppath to indicate that + # it is finished. This is usually just a SentinelImportTask, but + # for archive imports, send the archive task instead (to remove + # the extracted directory). + if self.is_archive: + yield archive_task + else: + yield self.sentinel() + + def _create(self, task): + """Handle a new task to be emitted by the factory. + + Emit the `import_task_created` event and increment the + `imported` count if the task is not skipped. Return the same + task. If `task` is None, do nothing. + """ + if task: + tasks = task.handle_created(self.session) + self.imported += len(tasks) + return tasks + return [] + + def paths(self): + """Walk `self.toppath` and yield `(dirs, files)` pairs where + `files` are individual music files and `dirs` the set of + containing directories where the music was found. + + This can either be a recursive search in the ordinary case, a + single track when `toppath` is a file, a single directory in + `flat` mode. + """ + if not os.path.isdir(syspath(self.toppath)): + yield [self.toppath], [self.toppath] + elif self.session.config['flat']: + paths = [] + for dirs, paths_in_dir in albums_in_dir(self.toppath): + paths += paths_in_dir + yield [self.toppath], paths + else: + for dirs, paths in albums_in_dir(self.toppath): + yield dirs, paths + + def singleton(self, path): + """Return a `SingletonImportTask` for the music file. + """ + if self.session.already_imported(self.toppath, [path]): + log.debug(u'Skipping previously-imported path: {0}', + displayable_path(path)) + self.skipped += 1 + return None + + item = self.read_item(path) + if item: + return SingletonImportTask(self.toppath, item) + else: + return None + + def album(self, paths, dirs=None): + """Return a `ImportTask` with all media files from paths. + + `dirs` is a list of parent directories used to record already + imported albums. + """ + if not paths: + return None + + if dirs is None: + dirs = list(set(os.path.dirname(p) for p in paths)) + + if self.session.already_imported(self.toppath, dirs): + log.debug(u'Skipping previously-imported path: {0}', + displayable_path(dirs)) + self.skipped += 1 + return None + + items = map(self.read_item, paths) + items = [item for item in items if item] + + if items: + return ImportTask(self.toppath, dirs, items) + else: + return None + + def sentinel(self, paths=None): + """Return a `SentinelImportTask` indicating the end of a + top-level directory import. + """ + return SentinelImportTask(self.toppath, paths) + + def unarchive(self): + """Extract the archive for this `toppath`. + + Extract the archive to a new directory, adjust `toppath` to + point to the extracted directory, and return an + `ArchiveImportTask`. If extraction fails, return None. + """ + assert self.is_archive + + if not (self.session.config['move'] or + self.session.config['copy']): + log.warn(u"Archive importing requires either " + u"'copy' or 'move' to be enabled.") + return + + log.debug(u'Extracting archive: {0}', + displayable_path(self.toppath)) + archive_task = ArchiveImportTask(self.toppath) + try: + archive_task.extract() + except Exception as exc: + log.error(u'extraction failed: {0}', exc) + return + + # Now read albums from the extracted directory. + self.toppath = archive_task.toppath + log.debug(u'Archive extracted to: {0}', self.toppath) + return archive_task + + def read_item(self, path): + """Return an `Item` read from the path. + + If an item cannot be read, return `None` instead and log an + error. + """ + try: + return library.Item.from_path(path) + except library.ReadError as exc: + if isinstance(exc.reason, mediafile.FileTypeError): + # Silently ignore non-music files. + pass + elif isinstance(exc.reason, mediafile.UnreadableFileError): + log.warn(u'unreadable file: {0}', displayable_path(path)) + else: + log.error(u'error reading {0}: {1}', + displayable_path(path), exc) + + # Full-album pipeline stages. def read_tasks(session): @@ -536,483 +1192,232 @@ def read_tasks(session): in the user-specified list of paths. In the case of a singleton import, yields single-item tasks instead. """ - # Look for saved progress. - if _resume(): - resume_dirs = {} - for path in session.paths: - resume_dir = progress_get(path) - if resume_dir: - - # Either accept immediately or prompt for input to decide. - if _resume() is True: - do_resume = True - log.warn('Resuming interrupted import of %s' % path) - else: - do_resume = session.should_resume(path) - - if do_resume: - resume_dirs[path] = resume_dir - else: - # Clear progress; we're starting from the top. - progress_set(path, None) - - # Look for saved incremental directories. - if config['import']['incremental']: - incremental_skipped = 0 - history_dirs = history_get() - + skipped = 0 for toppath in session.paths: - # Check whether the path is to a file. - if config['import']['singletons'] and \ - not os.path.isdir(syspath(toppath)): - try: - item = library.Item.from_path(toppath) - except mediafile.UnreadableFileError: - log.warn(u'unreadable file: {0}'.format( - util.displayable_path(toppath) - )) - continue - yield ImportTask.item_task(item) - continue + # Check whether we need to resume the import. + session.ask_resume(toppath) - # A flat album import merges all items into one album. - if config['import']['flat'] and not config['import']['singletons']: - all_items = [] - for _, items in autotag.albums_in_dir(toppath): - all_items += items - yield ImportTask(toppath, toppath, all_items) - yield ImportTask.done_sentinel(toppath) - continue + # Generate tasks. + task_factory = ImportTaskFactory(toppath, session) + for t in task_factory.tasks(): + yield t + skipped += task_factory.skipped - # Produce paths under this directory. - if _resume(): - resume_dir = resume_dirs.get(toppath) - for path, items in autotag.albums_in_dir(toppath): - # Skip according to progress. - if _resume() and resume_dir: - # We're fast-forwarding to resume a previous tagging. - if path == resume_dir: - # We've hit the last good path! Turn off the - # fast-forwarding. - resume_dir = None - continue + if not task_factory.imported: + log.warn(u'No files imported from {0}', + displayable_path(toppath)) - # When incremental, skip paths in the history. - if config['import']['incremental'] and tuple(path) in history_dirs: - log.debug(u'Skipping previously-imported path: %s' % - displayable_path(path)) - incremental_skipped += 1 - continue + # Show skipped directories (due to incremental/resume). + if skipped: + log.info(u'Skipped {0} paths.', skipped) - # Yield all the necessary tasks. - if config['import']['singletons']: - for item in items: - yield ImportTask.item_task(item) - yield ImportTask.progress_sentinel(toppath, path) - else: - yield ImportTask(toppath, path, items) - - # Indicate the directory is finished. - yield ImportTask.done_sentinel(toppath) - - # Show skipped directories. - if config['import']['incremental'] and incremental_skipped: - log.info(u'Incremental import: skipped %i directories.' % - incremental_skipped) def query_tasks(session): """A generator that works as a drop-in-replacement for read_tasks. Instead of finding files from the filesystem, a query is used to match items from the library. """ - if config['import']['singletons']: + if session.config['singletons']: # Search for items. for item in session.lib.items(session.query): - yield ImportTask.item_task(item) + task = SingletonImportTask(None, item) + for task in task.handle_created(session): + yield task else: # Search for albums. for album in session.lib.albums(session.query): - log.debug('yielding album %i: %s - %s' % - (album.id, album.albumartist, album.album)) + log.debug(u'yielding album {0}: {1} - {2}', + album.id, album.albumartist, album.album) items = list(album.items()) - yield ImportTask(None, [album.item_dir()], items) -def initial_lookup(session): + # Clear IDs from re-tagged items so they appear "fresh" when + # we add them back to the library. + for item in items: + item.id = None + item.album_id = None + + task = ImportTask(None, [album.item_dir()], items) + for task in task.handle_created(session): + yield task + + +@pipeline.mutator_stage +def lookup_candidates(session, task): """A coroutine for performing the initial MusicBrainz lookup for an album. It accepts lists of Items and yields (items, cur_artist, cur_album, candidates, rec) tuples. If no match is found, all of the yielded parameters (except items) are None. """ - task = None - while True: - task = yield task - if task.should_skip(): - continue + if task.skip: + # FIXME This gets duplicated a lot. We need a better + # abstraction. + return - plugins.send('import_task_start', session=session, task=task) + plugins.send('import_task_start', session=session, task=task) + log.debug(u'Looking up: {0}', displayable_path(task.paths)) - log.debug('Looking up: %s' % displayable_path(task.paths)) - task.set_candidates( - *autotag.tag_album(task.items) - ) + # Restrict the initial lookup to IDs specified by the user via the -m + # option. Currently all the IDs are passed onto the tasks directly. + task.search_ids = session.config['search_ids'].as_str_seq() -def user_query(session): + task.lookup_candidates() + + +@pipeline.stage +def user_query(session, task): """A coroutine for interfacing with the user about the tagging process. The coroutine accepts an ImportTask objects. It uses the - session's ``choose_match`` method to determine the ``action`` for - this task. Depending on the action additional stages are exectuted + session's `choose_match` method to determine the `action` for + this task. Depending on the action additional stages are executed and the processed task is yielded. It emits the ``import_task_choice`` event for plugins. Plugins have acces to the choice via the ``taks.choice_flag`` property and may choose to change it. """ - recent = set() - task = None - while True: - task = yield task - if task.should_skip(): - continue + if task.skip: + return task - # Ask the user for a choice. - choice = session.choose_match(task) - task.set_choice(choice) - session.log_choice(task) - plugins.send('import_task_choice', session=session, task=task) + # Ask the user for a choice. + task.choose_match(session) + plugins.send('import_task_choice', session=session, task=task) - # As-tracks: transition to singleton workflow. - if task.choice_flag is action.TRACKS: - # Set up a little pipeline for dealing with the singletons. - def emitter(task): - for item in task.items: - yield ImportTask.item_task(item) - yield ImportTask.progress_sentinel(task.toppath, task.paths) + # As-tracks: transition to singleton workflow. + if task.choice_flag is action.TRACKS: + # Set up a little pipeline for dealing with the singletons. + def emitter(task): + for item in task.items: + task = SingletonImportTask(task.toppath, item) + for new_task in task.handle_created(session): + yield new_task + yield SentinelImportTask(task.toppath, task.paths) - ipl = pipeline.Pipeline([ - emitter(task), - item_lookup(session), - item_query(session), - ]) - task = pipeline.multiple(ipl.pull()) - continue + ipl = pipeline.Pipeline([ + emitter(task), + lookup_candidates(session), + user_query(session), + ]) + return pipeline.multiple(ipl.pull()) - # As albums: group items by albums and create task for each album - if task.choice_flag is action.ALBUMS: - def emitter(task): - yield task + # As albums: group items by albums and create task for each album + if task.choice_flag is action.ALBUMS: + ipl = pipeline.Pipeline([ + iter([task]), + group_albums(session), + lookup_candidates(session), + user_query(session) + ]) + return pipeline.multiple(ipl.pull()) - ipl = pipeline.Pipeline([ - emitter(task), - group_albums(session), - initial_lookup(session), - user_query(session) - ]) - task = pipeline.multiple(ipl.pull()) - continue + resolve_duplicates(session, task) + apply_choice(session, task) + return task - # Check for duplicates if we have a match (or ASIS). - if task.choice_flag in (action.ASIS, action.APPLY): - ident = task.chosen_ident() - # The "recent" set keeps track of identifiers for recently - # imported albums -- those that haven't reached the database - # yet. - if ident in recent or _duplicate_check(session.lib, task): - session.resolve_duplicate(task) - session.log_choice(task, True) - recent.add(ident) -def show_progress(session): - """This stage replaces the initial_lookup and user_query stages - when the importer is run without autotagging. It displays the album - name and artist as the files are added. +def resolve_duplicates(session, task): + """Check if a task conflicts with items or albums already imported + and ask the session to resolve this. """ - task = None - while True: - task = yield task - if task.should_skip(): - continue + if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG): + found_duplicates = task.find_duplicates(session.lib) + if found_duplicates: + log.debug(u'found duplicates: {}'.format( + [o.id for o in found_duplicates] + )) + session.resolve_duplicate(task, found_duplicates) + session.log_choice(task, True) - log.info(displayable_path(task.paths)) - # Behave as if ASIS were selected. - task.set_null_candidates() - task.set_choice(action.ASIS) +@pipeline.mutator_stage +def import_asis(session, task): + """Select the `action.ASIS` choice for all tasks. -def apply_choices(session): - """A coroutine for applying changes to albums and singletons during - the autotag process. + This stage replaces the initial_lookup and user_query stages + when the importer is run without autotagging. """ - task = None - while True: - task = yield task - if task.should_skip(): - continue + if task.skip: + return - items = task.imported_items() - # Clear IDs in case the items are being re-tagged. - for item in items: - item.id = None - item.album_id = None + log.info(u'{}', displayable_path(task.paths)) + task.set_choice(action.ASIS) + apply_choice(session, task) - # Change metadata. - if task.should_write_tags(): - if task.is_album: - autotag.apply_metadata( - task.match.info, task.match.mapping - ) - else: - autotag.apply_item_metadata(task.item, task.match.info) - plugins.send('import_task_apply', session=session, task=task) - # Infer album-level fields. - if task.is_album: - _infer_album_fields(task) +def apply_choice(session, task): + """Apply the task's choice to the Album or Item it contains and add + it to the library. + """ + if task.skip: + return - # Find existing item entries that these are replacing (for - # re-imports). Old album structures are automatically cleaned up - # when the last item is removed. - task.replaced_items = defaultdict(list) - for item in items: - dup_items = session.lib.items( - dbcore.query.BytesQuery('path', item.path) - ) - for dup_item in dup_items: - task.replaced_items[item].append(dup_item) - log.debug('replacing item %i: %s' % - (dup_item.id, displayable_path(item.path))) - log.debug('%i of %i items replaced' % (len(task.replaced_items), - len(items))) + # Change metadata. + if task.apply: + task.apply_metadata() + plugins.send('import_task_apply', session=session, task=task) - # Find old items that should be replaced as part of a duplicate - # resolution. - duplicate_items = [] - if task.remove_duplicates: - if task.is_album: - for album in _duplicate_check(session.lib, task): - duplicate_items += album.items() - else: - duplicate_items = _item_duplicate_check(session.lib, task) - log.debug('removing %i old duplicated items' % - len(duplicate_items)) + task.add(session.lib) - # Delete duplicate files that are located inside the library - # directory. - task.duplicate_paths = [] - for duplicate_path in [i.path for i in duplicate_items]: - if session.lib.directory in util.ancestry(duplicate_path): - # Mark the path for deletion in the manipulate_files - # stage. - task.duplicate_paths.append(duplicate_path) - # Add items -- before path changes -- to the library. We add the - # items now (rather than at the end) so that album structures - # are in place before calls to destination(). - with session.lib.transaction(): - # Remove old items. - for replaced in task.replaced_items.itervalues(): - for item in replaced: - item.remove() - for item in duplicate_items: - item.remove() - - # Add new ones. - if task.is_album: - # Add an album. - album = session.lib.add_album(items) - task.album_id = album.id - else: - # Add tracks. - for item in items: - session.lib.add(item) - -def plugin_stage(session, func): +@pipeline.mutator_stage +def plugin_stage(session, func, task): """A coroutine (pipeline stage) that calls the given function with each non-skipped import task. These stages occur between applying metadata changes and moving/copying/writing files. """ - task = None - while True: - task = yield task - if task.should_skip(): - continue - func(session, task) + if task.skip: + return - # Stage may modify DB, so re-load cached item data. - for item in task.imported_items(): - item.load() + func(session, task) -def manipulate_files(session): + # Stage may modify DB, so re-load cached item data. + # FIXME Importer plugins should not modify the database but instead + # the albums and items attached to tasks. + task.reload() + + +@pipeline.stage +def manipulate_files(session, task): """A coroutine (pipeline stage) that performs necessary file - manipulations *after* items have been added to the library. + manipulations *after* items have been added to the library and + finalizes each task. """ - task = None - while True: - task = yield task - if task.should_skip(): - continue + if not task.skip: + if task.should_remove_duplicates: + task.remove_duplicates(session.lib) - # Remove duplicate files marked for deletion. - if task.remove_duplicates: - for duplicate_path in task.duplicate_paths: - log.debug(u'deleting replaced duplicate %s' % - util.displayable_path(duplicate_path)) - util.remove(duplicate_path) - util.prune_dirs(os.path.dirname(duplicate_path), - session.lib.directory) + task.manipulate_files( + move=session.config['move'], + copy=session.config['copy'], + write=session.config['write'], + link=session.config['link'], + session=session, + ) - # Move/copy/write files. - items = task.imported_items() - # Save the original paths of all items for deletion and pruning - # in the next step (finalization). - task.old_paths = [item.path for item in items] - for item in items: - if config['import']['move']: - # Just move the file. - item.move(False) - elif config['import']['copy']: - # If it's a reimport, move in-library files and copy - # out-of-library files. Otherwise, copy and keep track - # of the old path. - old_path = item.path - if task.replaced_items[item]: - # This is a reimport. Move in-library files and copy - # out-of-library files. - if session.lib.directory in util.ancestry(old_path): - item.move(False) - # We moved the item, so remove the - # now-nonexistent file from old_paths. - task.old_paths.remove(old_path) - else: - item.move(True) - else: - # A normal import. Just copy files and keep track of - # old paths. - item.move(True) + # Progress, cleanup, and event. + task.finalize(session) - if config['import']['write'] and task.should_write_tags(): - try: - item.write() - except library.FileOperationError as exc: - log.error(exc) - # Save new paths. - with session.lib.transaction(): - for item in items: - item.store() - - # Plugin event. - plugins.send('import_task_files', session=session, task=task) - -def finalize(session): - """A coroutine that finishes up importer tasks. In particular, the - coroutine sends plugin events, deletes old files, and saves - progress. This is a "terminal" coroutine (it yields None). +@pipeline.stage +def log_files(session, task): + """A coroutine (pipeline stage) to log each file to be imported. """ - while True: - task = yield - if task.should_skip(): - if _resume(): - task.save_progress() - if config['import']['incremental']: - task.save_history() - continue - - items = task.imported_items() - - # Announce that we've added an album. - if task.is_album: - album = session.lib.get_album(task.album_id) - plugins.send('album_imported', - lib=session.lib, album=album) - else: - for item in items: - plugins.send('item_imported', - lib=session.lib, item=item) - - # When copying and deleting originals, delete old files. - if config['import']['copy'] and config['import']['delete']: - new_paths = [os.path.realpath(item.path) for item in items] - for old_path in task.old_paths: - # Only delete files that were actually copied. - if old_path not in new_paths: - util.remove(syspath(old_path), False) - task.prune(old_path) - - # When moving, prune empty directories containing the original - # files. - elif config['import']['move']: - for old_path in task.old_paths: - task.prune(old_path) - - # Update progress. - if _resume(): - task.save_progress() - if config['import']['incremental']: - task.save_history() - - -# Singleton pipeline stages. - -def item_lookup(session): - """A coroutine used to perform the initial MusicBrainz lookup for - an item task. - """ - task = None - while True: - task = yield task - if task.should_skip(): - continue - - plugins.send('import_task_start', session=session, task=task) - - task.set_item_candidates(*autotag.tag_item(task.item)) - -def item_query(session): - """A coroutine that queries the user for input on single-item - lookups. - """ - task = None - recent = set() - while True: - task = yield task - if task.should_skip(): - continue - - choice = session.choose_item(task) - task.set_choice(choice) - session.log_choice(task) - plugins.send('import_task_choice', session=session, task=task) - - # Duplicate check. - if task.choice_flag in (action.ASIS, action.APPLY): - ident = task.chosen_ident() - if ident in recent or _item_duplicate_check(session.lib, task): - session.resolve_duplicate(task) - session.log_choice(task, True) - recent.add(ident) - -def item_progress(session): - """Skips the lookup and query stages in a non-autotagged singleton - import. Just shows progress. - """ - task = None - log.info('Importing items:') - while True: - task = yield task - if task.should_skip(): - continue - - log.info(displayable_path(task.item.path)) - task.set_null_candidates() - task.set_choice(action.ASIS) + if isinstance(task, SingletonImportTask): + log.info(u'Singleton: {0}', displayable_path(task.item['path'])) + elif task.items: + log.info(u'Album: {0}', displayable_path(task.paths[0])) + for item in task.items: + log.info(u' {0}', displayable_path(item['path'])) def group_albums(session): - """Group the items of a task by albumartist and album name and create a new - task for each album. Yield the tasks as a multi message. + """A pipeline stage that groups the items of each task into albums + using their metadata. + + Groups are identified using their artist and album fields. The + pipeline stage emits new album tasks for each discovered group. """ def group(item): return (item.albumartist or item.artist, item.album) @@ -1020,11 +1425,118 @@ def group_albums(session): task = None while True: task = yield task - if task.should_skip(): + if task.skip: continue tasks = [] - for _, items in itertools.groupby(task.items, group): - tasks.append(ImportTask(items=list(items))) - tasks.append(ImportTask.progress_sentinel(task.toppath, task.paths)) + sorted_items = sorted(task.items, key=group) + for _, items in itertools.groupby(sorted_items, group): + items = list(items) + task = ImportTask(task.toppath, [i.path for i in items], + items) + tasks += task.handle_created(session) + tasks.append(SentinelImportTask(task.toppath, task.paths)) task = pipeline.multiple(tasks) + + +MULTIDISC_MARKERS = (r'dis[ck]', r'cd') +MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' + + +def albums_in_dir(path): + """Recursively searches the given directory and returns an iterable + of (paths, items) where paths is a list of directories and items is + a list of Items that is probably an album. Specifically, any folder + containing any media files is an album. + """ + collapse_pat = collapse_paths = collapse_items = None + ignore = config['ignore'].as_str_seq() + ignore_hidden = config['ignore_hidden'].get(bool) + + for root, dirs, files in sorted_walk(path, ignore=ignore, + ignore_hidden=ignore_hidden, + logger=log): + items = [os.path.join(root, f) for f in files] + # If we're currently collapsing the constituent directories in a + # multi-disc album, check whether we should continue collapsing + # and add the current directory. If so, just add the directory + # and move on to the next directory. If not, stop collapsing. + if collapse_paths: + if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \ + (collapse_pat and + collapse_pat.match(os.path.basename(root))): + # Still collapsing. + collapse_paths.append(root) + collapse_items += items + continue + else: + # Collapse finished. Yield the collapsed directory and + # proceed to process the current one. + if collapse_items: + yield collapse_paths, collapse_items + collapse_pat = collapse_paths = collapse_items = None + + # Check whether this directory looks like the *first* directory + # in a multi-disc sequence. There are two indicators: the file + # is named like part of a multi-disc sequence (e.g., "Title Disc + # 1") or it contains no items but only directories that are + # named in this way. + start_collapsing = False + for marker in MULTIDISC_MARKERS: + marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) + match = marker_pat.match(os.path.basename(root)) + + # Is this directory the root of a nested multi-disc album? + if dirs and not items: + # Check whether all subdirectories have the same prefix. + start_collapsing = True + subdir_pat = None + for subdir in dirs: + # The first directory dictates the pattern for + # the remaining directories. + if not subdir_pat: + match = marker_pat.match(subdir) + if match: + subdir_pat = re.compile( + br'^%s\d' % re.escape(match.group(1)), re.I + ) + else: + start_collapsing = False + break + + # Subsequent directories must match the pattern. + elif not subdir_pat.match(subdir): + start_collapsing = False + break + + # If all subdirectories match, don't check other + # markers. + if start_collapsing: + break + + # Is this directory the first in a flattened multi-disc album? + elif match: + start_collapsing = True + # Set the current pattern to match directories with the same + # prefix as this one, followed by a digit. + collapse_pat = re.compile( + br'^%s\d' % re.escape(match.group(1)), re.I + ) + break + + # If either of the above heuristics indicated that this is the + # beginning of a multi-disc album, initialize the collapsed + # directory and item lists and check the next directory. + if start_collapsing: + # Start collapsing; continue to the next iteration. + collapse_paths = [root] + collapse_items = items + continue + + # If it's nonempty, yield it. + if items: + yield [root], items + + # Clear out any unfinished collapse. + if collapse_paths and collapse_items: + yield collapse_paths, collapse_items diff --git a/libs/beets/library.py b/libs/beets/library.py index 94559430..99397013 100644 --- a/libs/beets/library.py +++ b/libs/beets/library.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,15 +15,17 @@ """The core data store and collection logic for beets. """ +from __future__ import division, absolute_import, print_function + import os -import re import sys -import logging -import shlex import unicodedata import time +import re from unidecode import unidecode -from beets.mediafile import MediaFile, MutagenError + +from beets import logging +from beets.mediafile import MediaFile, MutagenError, UnreadableFileError from beets import plugins from beets import util from beets.util import bytestring_path, syspath, normpath, samefile @@ -32,52 +35,85 @@ from beets.dbcore import types import beets +log = logging.getLogger('beets') + # Library-specific query types. - class PathQuery(dbcore.FieldQuery): - """A query that matches all items under a given path.""" - def __init__(self, field, pattern, fast=True): + """A query that matches all items under a given path. + + Matching can either be case-insensitive or case-sensitive. By + default, the behavior depends on the OS: case-insensitive on Windows + and case-sensitive otherwise. + """ + + escape_re = re.compile(r'[\\_%]') + escape_char = b'\\' + + def __init__(self, field, pattern, fast=True, case_sensitive=None): + """Create a path query. `pattern` must be a path, either to a + file or a directory. + + `case_sensitive` can be a bool or `None`, indicating that the + behavior should depend on the filesystem. + """ super(PathQuery, self).__init__(field, pattern, fast) + # By default, the case sensitivity depends on the filesystem + # that the query path is located on. + if case_sensitive is None: + path = util.bytestring_path(util.normpath(pattern)) + case_sensitive = beets.util.case_sensitive(path) + self.case_sensitive = case_sensitive + + # Use a normalized-case pattern for case-insensitive matches. + if not case_sensitive: + pattern = pattern.lower() + # Match the path as a single file. self.file_path = util.bytestring_path(util.normpath(pattern)) # As a directory (prefix). - self.dir_path = util.bytestring_path(os.path.join(self.file_path, '')) + self.dir_path = util.bytestring_path(os.path.join(self.file_path, b'')) + + @classmethod + def is_path_query(cls, query_part): + """Try to guess whether a unicode query part is a path query. + + Condition: separator precedes colon and the file exists. + """ + colon = query_part.find(':') + if colon != -1: + query_part = query_part[:colon] + return (os.sep in query_part and + os.path.exists(syspath(normpath(query_part)))) def match(self, item): - return (item.path == self.file_path) or \ - item.path.startswith(self.dir_path) + path = item.path if self.case_sensitive else item.path.lower() + return (path == self.file_path) or path.startswith(self.dir_path) - def clause(self): - dir_pat = buffer(self.dir_path + '%') - file_blob = buffer(self.file_path) - return '({0} = ?) || ({0} LIKE ?)'.format(self.field), \ - (file_blob, dir_pat) - - -class SingletonQuery(dbcore.Query): - """Matches either singleton or non-singleton items.""" - def __init__(self, sense): - self.sense = sense - - def clause(self): - if self.sense: - return "album_id ISNULL", () - else: - return "NOT album_id ISNULL", () - - def match(self, item): - return (not item.album_id) == self.sense + def col_clause(self): + if self.case_sensitive: + file_blob = buffer(self.file_path) + dir_blob = buffer(self.dir_path) + return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \ + (file_blob, len(dir_blob), dir_blob) + escape = lambda m: self.escape_char + m.group(0) + dir_pattern = self.escape_re.sub(escape, self.dir_path) + dir_blob = buffer(dir_pattern + b'%') + file_pattern = self.escape_re.sub(escape, self.file_path) + file_blob = buffer(file_pattern) + return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format( + self.field), (file_blob, self.escape_char, dir_blob, + self.escape_char) # Library-specific field types. - -class DateType(types.Type): - sql = u'REAL' +class DateType(types.Float): + # TODO representation should be `datetime` object + # TODO distinguish between date and time types query = dbcore.query.DateQuery def format(self, value): @@ -95,12 +131,13 @@ class DateType(types.Type): try: return float(string) except ValueError: - return 0.0 + return self.null class PathType(types.Type): sql = u'BLOB' query = PathQuery + model_type = bytes def format(self, value): return util.displayable_path(value) @@ -108,158 +145,116 @@ class PathType(types.Type): def parse(self, string): return normpath(bytestring_path(string)) + def normalize(self, value): + if isinstance(value, unicode): + # Paths stored internally as encoded bytes. + return bytestring_path(value) + + elif isinstance(value, buffer): + # SQLite must store bytestings as buffers to avoid decoding. + # We unwrap buffers to bytes. + return bytes(value) + + else: + return value + + def from_sql(self, sql_value): + return self.normalize(sql_value) + + def to_sql(self, value): + if isinstance(value, bytes): + value = buffer(value) + return value -# Model field lists. +class MusicalKey(types.String): + """String representing the musical key of a song. + + The standard format is C, Cm, C#, C#m, etc. + """ + ENHARMONIC = { + r'db': 'c#', + r'eb': 'd#', + r'gb': 'f#', + r'ab': 'g#', + r'bb': 'a#', + } + + def parse(self, key): + key = key.lower() + for flat, sharp in self.ENHARMONIC.items(): + key = re.sub(flat, sharp, key) + key = re.sub(r'[\W\s]+minor', 'm', key) + key = re.sub(r'[\W\s]+major', '', key) + return key.capitalize() + + def normalize(self, key): + if key is None: + return None + else: + return self.parse(key) -# Fields in the "items" database table; all the metadata available for -# items in the library. These are used directly in SQL; they are -# vulnerable to injection if accessible to the user. -# Each tuple has the following values: -# - The name of the field. -# - The (Python) type of the field. -# - Is the field writable? -# - Does the field reflect an attribute of a MediaFile? -ITEM_FIELDS = [ - ('id', types.Id(), False, False), - ('path', PathType(), False, False), - ('album_id', types.Integer(), False, False), +class DurationType(types.Float): + """Human-friendly (M:SS) representation of a time interval.""" + query = dbcore.query.DurationQuery - ('title', types.String(), True, True), - ('artist', types.String(), True, True), - ('artist_sort', types.String(), True, True), - ('artist_credit', types.String(), True, True), - ('album', types.String(), True, True), - ('albumartist', types.String(), True, True), - ('albumartist_sort', types.String(), True, True), - ('albumartist_credit', types.String(), True, True), - ('genre', types.String(), True, True), - ('composer', types.String(), True, True), - ('grouping', types.String(), True, True), - ('year', types.PaddedInt(4), True, True), - ('month', types.PaddedInt(2), True, True), - ('day', types.PaddedInt(2), True, True), - ('track', types.PaddedInt(2), True, True), - ('tracktotal', types.PaddedInt(2), True, True), - ('disc', types.PaddedInt(2), True, True), - ('disctotal', types.PaddedInt(2), True, True), - ('lyrics', types.String(), True, True), - ('comments', types.String(), True, True), - ('bpm', types.Integer(), True, True), - ('comp', types.Boolean(), True, True), - ('mb_trackid', types.String(), True, True), - ('mb_albumid', types.String(), True, True), - ('mb_artistid', types.String(), True, True), - ('mb_albumartistid', types.String(), True, True), - ('albumtype', types.String(), True, True), - ('label', types.String(), True, True), - ('acoustid_fingerprint', types.String(), True, True), - ('acoustid_id', types.String(), True, True), - ('mb_releasegroupid', types.String(), True, True), - ('asin', types.String(), True, True), - ('catalognum', types.String(), True, True), - ('script', types.String(), True, True), - ('language', types.String(), True, True), - ('country', types.String(), True, True), - ('albumstatus', types.String(), True, True), - ('media', types.String(), True, True), - ('albumdisambig', types.String(), True, True), - ('disctitle', types.String(), True, True), - ('encoder', types.String(), True, True), - ('rg_track_gain', types.Float(), True, True), - ('rg_track_peak', types.Float(), True, True), - ('rg_album_gain', types.Float(), True, True), - ('rg_album_peak', types.Float(), True, True), - ('original_year', types.PaddedInt(4), True, True), - ('original_month', types.PaddedInt(2), True, True), - ('original_day', types.PaddedInt(2), True, True), + def format(self, value): + if not beets.config['format_raw_length'].get(bool): + return beets.ui.human_seconds_short(value or 0.0) + else: + return value - ('length', types.Float(), False, True), - ('bitrate', types.ScaledInt(1000, u'kbps'), False, True), - ('format', types.String(), False, True), - ('samplerate', types.ScaledInt(1000, u'kHz'), False, True), - ('bitdepth', types.Integer(), False, True), - ('channels', types.Integer(), False, True), - ('mtime', DateType(), False, False), - ('added', DateType(), False, False), -] -ITEM_KEYS_WRITABLE = [f[0] for f in ITEM_FIELDS if f[3] and f[2]] -ITEM_KEYS_META = [f[0] for f in ITEM_FIELDS if f[3]] -ITEM_KEYS = [f[0] for f in ITEM_FIELDS] - -# Database fields for the "albums" table. -# The third entry in each tuple indicates whether the field reflects an -# identically-named field in the items table. -ALBUM_FIELDS = [ - ('id', types.Id(), False), - ('artpath', PathType(), False), - ('added', DateType(), True), - - ('albumartist', types.String(), True), - ('albumartist_sort', types.String(), True), - ('albumartist_credit', types.String(), True), - ('album', types.String(), True), - ('genre', types.String(), True), - ('year', types.PaddedInt(4), True), - ('month', types.PaddedInt(2), True), - ('day', types.PaddedInt(2), True), - ('tracktotal', types.PaddedInt(2), True), - ('disctotal', types.PaddedInt(2), True), - ('comp', types.Boolean(), True), - ('mb_albumid', types.String(), True), - ('mb_albumartistid', types.String(), True), - ('albumtype', types.String(), True), - ('label', types.String(), True), - ('mb_releasegroupid', types.String(), True), - ('asin', types.String(), True), - ('catalognum', types.String(), True), - ('script', types.String(), True), - ('language', types.String(), True), - ('country', types.String(), True), - ('albumstatus', types.String(), True), - ('media', types.String(), True), - ('albumdisambig', types.String(), True), - ('rg_album_gain', types.Float(), True), - ('rg_album_peak', types.Float(), True), - ('original_year', types.PaddedInt(4), True), - ('original_month', types.PaddedInt(2), True), - ('original_day', types.PaddedInt(2), True), -] -ALBUM_KEYS = [f[0] for f in ALBUM_FIELDS] -ALBUM_KEYS_ITEM = [f[0] for f in ALBUM_FIELDS if f[2]] + def parse(self, string): + try: + # Try to format back hh:ss to seconds. + return util.raw_seconds_short(string) + except ValueError: + # Fall back to a plain float. + try: + return float(string) + except ValueError: + return self.null -# Default search fields for each model. -ALBUM_DEFAULT_FIELDS = ('album', 'albumartist', 'genre') -ITEM_DEFAULT_FIELDS = ALBUM_DEFAULT_FIELDS + ('artist', 'title', 'comments') +# Library-specific sort types. + +class SmartArtistSort(dbcore.query.Sort): + """Sort by artist (either album artist or track artist), + prioritizing the sort field over the raw field. + """ + def __init__(self, model_cls, ascending=True, case_insensitive=True): + self.album = model_cls is Album + self.ascending = ascending + self.case_insensitive = case_insensitive + + def order_clause(self): + order = "ASC" if self.ascending else "DESC" + field = 'albumartist' if self.album else 'artist' + collate = 'COLLATE NOCASE' if self.case_insensitive else '' + return ('(CASE {0}_sort WHEN NULL THEN {0} ' + 'WHEN "" THEN {0} ' + 'ELSE {0}_sort END) {1} {2}').format(field, collate, order) + + def sort(self, objs): + if self.album: + field = lambda a: a.albumartist_sort or a.albumartist + else: + field = lambda i: i.artist_sort or i.artist + + if self.case_insensitive: + key = lambda x: field(x).lower() + else: + key = field + return sorted(objs, key=key, reverse=not self.ascending) # Special path format key. PF_KEY_DEFAULT = 'default' -# Logger. -log = logging.getLogger('beets') -if not log.handlers: - log.addHandler(logging.StreamHandler()) -log.propagate = False # Don't propagate to root handler. - - -# A little SQL utility. -def _orelse(exp1, exp2): - """Generates an SQLite expression that evaluates to exp1 if exp1 is - non-null and non-empty or exp2 otherwise. - """ - return ('(CASE {0} WHEN NULL THEN {1} ' - 'WHEN "" THEN {1} ' - 'ELSE {0} END)').format(exp1, exp2) - - - # Exceptions. - class FileOperationError(Exception): """Indicates an error when interacting with a file on disk. Possibilities include an unsupported media type, a permissions @@ -300,14 +295,15 @@ class WriteError(FileOperationError): return u'error writing ' + super(WriteError, self).__unicode__() - # Item and Album model classes. - class LibModel(dbcore.Model): """Shared concrete functionality for Items and Albums. """ - _bytes_keys = ('path', 'artpath') + + _format_config_key = None + """Config key that specifies how an instance should be formatted. + """ def _template_funcs(self): funcs = DefaultTemplateFunctions(self, self._db).functions() @@ -316,26 +312,188 @@ class LibModel(dbcore.Model): def store(self): super(LibModel, self).store() - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) def remove(self): super(LibModel, self).remove() - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) def add(self, lib=None): super(LibModel, self).add(lib) - plugins.send('database_change', lib=self._db) + plugins.send('database_change', lib=self._db, model=self) + + def __format__(self, spec): + if not spec: + spec = beets.config[self._format_config_key].get(unicode) + result = self.evaluate_template(spec) + if isinstance(spec, bytes): + # if spec is a byte string then we must return a one as well + return result.encode('utf8') + else: + return result + + def __str__(self): + return format(self).encode('utf8') + + def __unicode__(self): + return format(self) + + +class FormattedItemMapping(dbcore.db.FormattedMapping): + """Add lookup for album-level fields. + + Album-level fields take precedence if `for_path` is true. + """ + + def __init__(self, item, for_path=False): + super(FormattedItemMapping, self).__init__(item, for_path) + self.album = item.get_album() + self.album_keys = [] + if self.album: + for key in self.album.keys(True): + if key in Album.item_keys or key not in item._fields.keys(): + self.album_keys.append(key) + self.all_keys = set(self.model_keys).union(self.album_keys) + + def _get(self, key): + """Get the value for a key, either from the album or the item. + Raise a KeyError for invalid keys. + """ + if self.for_path and key in self.album_keys: + return self._get_formatted(self.album, key) + elif key in self.model_keys: + return self._get_formatted(self.model, key) + elif key in self.album_keys: + return self._get_formatted(self.album, key) + else: + raise KeyError(key) + + def __getitem__(self, key): + """Get the value for a key. Certain unset values are remapped. + """ + value = self._get(key) + + # `artist` and `albumartist` fields fall back to one another. + # This is helpful in path formats when the album artist is unset + # on as-is imports. + if key == 'artist' and not value: + return self._get('albumartist') + elif key == 'albumartist' and not value: + return self._get('artist') + else: + return value + + def __iter__(self): + return iter(self.all_keys) + + def __len__(self): + return len(self.all_keys) class Item(LibModel): - _fields = dict((name, typ) for (name, typ, _, _) in ITEM_FIELDS) _table = 'items' _flex_table = 'item_attributes' - _search_fields = ITEM_DEFAULT_FIELDS + _fields = { + 'id': types.PRIMARY_ID, + 'path': PathType(), + 'album_id': types.FOREIGN_ID, + + 'title': types.STRING, + 'artist': types.STRING, + 'artist_sort': types.STRING, + 'artist_credit': types.STRING, + 'album': types.STRING, + 'albumartist': types.STRING, + 'albumartist_sort': types.STRING, + 'albumartist_credit': types.STRING, + 'genre': types.STRING, + 'composer': types.STRING, + 'grouping': types.STRING, + 'year': types.PaddedInt(4), + 'month': types.PaddedInt(2), + 'day': types.PaddedInt(2), + 'track': types.PaddedInt(2), + 'tracktotal': types.PaddedInt(2), + 'disc': types.PaddedInt(2), + 'disctotal': types.PaddedInt(2), + 'lyrics': types.STRING, + 'comments': types.STRING, + 'bpm': types.INTEGER, + 'comp': types.BOOLEAN, + 'mb_trackid': types.STRING, + 'mb_albumid': types.STRING, + 'mb_artistid': types.STRING, + 'mb_albumartistid': types.STRING, + 'albumtype': types.STRING, + 'label': types.STRING, + 'acoustid_fingerprint': types.STRING, + 'acoustid_id': types.STRING, + 'mb_releasegroupid': types.STRING, + 'asin': types.STRING, + 'catalognum': types.STRING, + 'script': types.STRING, + 'language': types.STRING, + 'country': types.STRING, + 'albumstatus': types.STRING, + 'media': types.STRING, + 'albumdisambig': types.STRING, + 'disctitle': types.STRING, + 'encoder': types.STRING, + 'rg_track_gain': types.NULL_FLOAT, + 'rg_track_peak': types.NULL_FLOAT, + 'rg_album_gain': types.NULL_FLOAT, + 'rg_album_peak': types.NULL_FLOAT, + 'original_year': types.PaddedInt(4), + 'original_month': types.PaddedInt(2), + 'original_day': types.PaddedInt(2), + 'initial_key': MusicalKey(), + + 'length': DurationType(), + 'bitrate': types.ScaledInt(1000, u'kbps'), + 'format': types.STRING, + 'samplerate': types.ScaledInt(1000, u'kHz'), + 'bitdepth': types.INTEGER, + 'channels': types.INTEGER, + 'mtime': DateType(), + 'added': DateType(), + } + + _search_fields = ('artist', 'title', 'comments', + 'album', 'albumartist', 'genre') + + _types = { + 'data_source': types.STRING, + } + + _media_fields = set(MediaFile.readable_fields()) \ + .intersection(_fields.keys()) + """Set of item fields that are backed by `MediaFile` fields. + + Any kind of field (fixed, flexible, and computed) may be a media + field. Only these fields are read from disk in `read` and written in + `write`. + """ + + _media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys()) + """Set of item fields that are backed by *writable* `MediaFile` tag + fields. + + This excludes fields that represent audio data, such as `bitrate` or + `length`. + """ + + _formatter = FormattedItemMapping + + _sorts = {'artist': SmartArtistSort} + + _format_config_key = 'format_item' @classmethod def _getters(cls): - return plugins.item_field_getters() + getters = plugins.item_field_getters() + getters['singleton'] = lambda i: i.album_id is None + getters['filesize'] = Item.try_filesize # In bytes. + return getters @classmethod def from_path(cls, path): @@ -355,15 +513,15 @@ class Item(LibModel): if isinstance(value, unicode): value = bytestring_path(value) elif isinstance(value, buffer): - value = str(value) + value = bytes(value) - if key in ITEM_KEYS_WRITABLE: + if key in MediaFile.fields(): self.mtime = 0 # Reset mtime on dirty. super(Item, self).__setitem__(key, value) def update(self, values): - """Sett all key/value pairs in the mapping. If mtime is + """Set all key/value pairs in the mapping. If mtime is specified, it is not reset (as it might otherwise be). """ super(Item, self).update(values) @@ -379,12 +537,14 @@ class Item(LibModel): return None return self._db.get_album(self) - # Interaction with file metadata. def read(self, read_path=None): - """Read the metadata from the associated file. If read_path is - specified, read metadata from that file instead. + """Read the metadata from the associated file. + + If `read_path` is specified, read metadata from that file + instead. Updates all the properties in `_media_fields` + from the media file. Raises a `ReadError` if the file could not be read. """ @@ -393,20 +553,16 @@ class Item(LibModel): else: read_path = normpath(read_path) try: - f = MediaFile(syspath(read_path)) - except (OSError, IOError) as exc: + mediafile = MediaFile(syspath(read_path)) + except (OSError, IOError, UnreadableFileError) as exc: raise ReadError(read_path, exc) - for key in ITEM_KEYS_META: - value = getattr(f, key) + for key in self._media_fields: + value = getattr(mediafile, key) if isinstance(value, (int, long)): - # Filter values wider than 64 bits (in signed - # representation). SQLite cannot store them. - # py26: Post transition, we can use: - # value.bit_length() > 63 - if abs(value) >= 2 ** 63: + if value.bit_length() > 63: value = 0 - setattr(self, key, value) + self[key] = value # Database's mtime should now reflect the on-disk value. if read_path == self.path: @@ -414,33 +570,90 @@ class Item(LibModel): self.path = read_path - def write(self): - """Write the item's metadata to the associated file. + def write(self, path=None, tags=None): + """Write the item's metadata to a media file. + + All fields in `_media_fields` are written to disk according to + the values on this object. + + `path` is the path of the mediafile to write the data to. It + defaults to the item's path. + + `tags` is a dictionary of additional metadata the should be + written to the file. (These tags need not be in `_media_fields`.) Can raise either a `ReadError` or a `WriteError`. """ + if path is None: + path = self.path + else: + path = normpath(path) + + # Get the data to write to the file. + item_tags = dict(self) + item_tags = {k: v for k, v in item_tags.items() + if k in self._media_fields} # Only write media fields. + if tags is not None: + item_tags.update(tags) + plugins.send('write', item=self, path=path, tags=item_tags) + + # Open the file. try: - f = MediaFile(syspath(self.path)) - except (OSError, IOError) as exc: + mediafile = MediaFile(syspath(path), + id3v23=beets.config['id3v23'].get(bool)) + except (OSError, IOError, UnreadableFileError) as exc: raise ReadError(self.path, exc) - plugins.send('write', item=self) - - for key in ITEM_KEYS_WRITABLE: - setattr(f, key, self[key]) + # Write the tags to the file. + mediafile.update(item_tags) try: - f.save(id3v23=beets.config['id3v23'].get(bool)) + mediafile.save() except (OSError, IOError, MutagenError) as exc: raise WriteError(self.path, exc) # The file has a new mtime. - self.mtime = self.current_mtime() - plugins.send('after_write', item=self) + if path == self.path: + self.mtime = self.current_mtime() + plugins.send('after_write', item=self, path=path) + def try_write(self, path=None, tags=None): + """Calls `write()` but catches and logs `FileOperationError` + exceptions. + + Returns `False` an exception was caught and `True` otherwise. + """ + try: + self.write(path, tags) + return True + except FileOperationError as exc: + log.error(u"{0}", exc) + return False + + def try_sync(self, write, move, with_album=True): + """Synchronize the item with the database and, possibly, updates its + tags on disk and its path (by moving the file). + + `write` indicates whether to write new tags into the file. Similarly, + `move` controls whether the path should be updated. In the + latter case, files are *only* moved when they are inside their + library's directory (if any). + + Similar to calling :meth:`write`, :meth:`move`, and :meth:`store` + (conditionally). + """ + if write: + self.try_write() + if move: + # Check whether this file is inside the library directory. + if self._db and self._db.directory in util.ancestry(self.path): + log.debug(u'moving {0} to synchronize path', + util.displayable_path(self.path)) + self.move(with_album=with_album) + self.store() # Files themselves. - def move_file(self, dest, copy=False): + def move_file(self, dest, copy=False, link=False): """Moves or copies the item's file, updating the path value if the move succeeds. If a file exists at ``dest``, then it is slightly modified to be unique. @@ -451,7 +664,13 @@ class Item(LibModel): util.copy(self.path, dest) plugins.send("item_copied", item=self, source=self.path, destination=dest) + elif link: + util.link(self.path, dest) + plugins.send("item_linked", item=self, source=self.path, + destination=dest) else: + plugins.send("before_item_moved", item=self, source=self.path, + destination=dest) util.move(self.path, dest) plugins.send("item_moved", item=self, source=self.path, destination=dest) @@ -465,6 +684,16 @@ class Item(LibModel): """ return int(os.path.getmtime(syspath(self.path))) + def try_filesize(self): + """Get the size of the underlying file in bytes. + + If the file is missing, return 0 (and log a warning). + """ + try: + return os.path.getsize(syspath(self.path)) + except (OSError, Exception) as exc: + log.warning(u'could not get filesize: {0}', exc) + return 0 # Model methods. @@ -491,13 +720,14 @@ class Item(LibModel): self._db._memotable = {} - def move(self, copy=False, basedir=None, with_album=True): + def move(self, copy=False, link=False, basedir=None, with_album=True): """Move the item to its designated location within the library directory (provided by destination()). Subdirectories are created as needed. If the operation succeeds, the item's path field is updated to reflect the new location. - If copy is True, moving the file is copied rather than moved. + If `copy` is true, moving the file is copied rather than moved. + Similarly, `link` creates a symlink instead. basedir overrides the library base directory for the destination. @@ -519,7 +749,7 @@ class Item(LibModel): # Perform the move and store the change. old_path = self.path - self.move_file(dest, copy) + self.move_file(dest, copy, link) self.store() # If this item is in an album, move its art. @@ -533,31 +763,8 @@ class Item(LibModel): if not copy: util.prune_dirs(os.path.dirname(old_path), self._db.directory) - # Templating. - def _formatted_mapping(self, for_path=False): - """Get a mapping containing string-formatted values from either - this item or the associated album, if any. - """ - mapping = super(Item, self)._formatted_mapping(for_path) - - # Merge in album-level fields. - album = self.get_album() - if album: - for key in album.keys(True): - if key in ALBUM_KEYS_ITEM or key not in ITEM_KEYS: - mapping[key] = album._get_formatted(key, for_path) - - # Use the album artist if the track artist is not set and - # vice-versa. - if not mapping['artist']: - mapping['artist'] = mapping['albumartist'] - if not mapping['albumartist']: - mapping['albumartist'] = mapping['artist'] - - return mapping - def destination(self, fragment=False, basedir=None, platform=None, path_formats=None): """Returns the path in the library directory designated for the @@ -577,7 +784,7 @@ class Item(LibModel): for query, path_format in path_formats: if query == PF_KEY_DEFAULT: continue - query = get_query(query, type(self)) + query, _ = parse_query_string(query, type(self)) if query.match(self): # The query matches the item! Use the corresponding path # format. @@ -588,7 +795,7 @@ class Item(LibModel): if query == PF_KEY_DEFAULT: break else: - assert False, "no default path format" + assert False, u"no default path format" if isinstance(path_format, Template): subpath_tmpl = path_format else: @@ -602,25 +809,28 @@ class Item(LibModel): subpath = unicodedata.normalize('NFD', subpath) else: subpath = unicodedata.normalize('NFC', subpath) - # Truncate components and remove forbidden characters. - subpath = util.sanitize_path(subpath, self._db.replacements) - # Encode for the filesystem. - if not fragment: - subpath = bytestring_path(subpath) - # Preserve extension. - _, extension = os.path.splitext(self.path) - if fragment: - # Outputting Unicode. - extension = extension.decode('utf8', 'ignore') - subpath += extension.lower() + if beets.config['asciify_paths']: + subpath = unidecode(subpath) - # Truncate too-long components. maxlen = beets.config['max_filename_length'].get(int) if not maxlen: # When zero, try to determine from filesystem. maxlen = util.max_filename_length(self._db.directory) - subpath = util.truncate_path(subpath, maxlen) + + subpath, fellback = util.legalize_path( + subpath, self._db.replacements, maxlen, + os.path.splitext(self.path)[1], fragment + ) + if fellback: + # Print an error message if legalization fell back to + # default replacements because of the maximum length. + log.warning( + u'Fell back to default replacements when naming ' + u'file {}. Configure replacements to avoid lengthening ' + u'the filename.', + subpath + ) if fragment: return subpath @@ -633,10 +843,89 @@ class Album(LibModel): library. Reflects the library's "albums" table, including album art. """ - _fields = dict((name, typ) for (name, typ, _) in ALBUM_FIELDS) _table = 'albums' _flex_table = 'album_attributes' - _search_fields = ALBUM_DEFAULT_FIELDS + _always_dirty = True + _fields = { + 'id': types.PRIMARY_ID, + 'artpath': PathType(), + 'added': DateType(), + + 'albumartist': types.STRING, + 'albumartist_sort': types.STRING, + 'albumartist_credit': types.STRING, + 'album': types.STRING, + 'genre': types.STRING, + 'year': types.PaddedInt(4), + 'month': types.PaddedInt(2), + 'day': types.PaddedInt(2), + 'disctotal': types.PaddedInt(2), + 'comp': types.BOOLEAN, + 'mb_albumid': types.STRING, + 'mb_albumartistid': types.STRING, + 'albumtype': types.STRING, + 'label': types.STRING, + 'mb_releasegroupid': types.STRING, + 'asin': types.STRING, + 'catalognum': types.STRING, + 'script': types.STRING, + 'language': types.STRING, + 'country': types.STRING, + 'albumstatus': types.STRING, + 'albumdisambig': types.STRING, + 'rg_album_gain': types.NULL_FLOAT, + 'rg_album_peak': types.NULL_FLOAT, + 'original_year': types.PaddedInt(4), + 'original_month': types.PaddedInt(2), + 'original_day': types.PaddedInt(2), + } + + _search_fields = ('album', 'albumartist', 'genre') + + _types = { + 'path': PathType(), + 'data_source': types.STRING, + } + + _sorts = { + 'albumartist': SmartArtistSort, + 'artist': SmartArtistSort, + } + + item_keys = [ + 'added', + 'albumartist', + 'albumartist_sort', + 'albumartist_credit', + 'album', + 'genre', + 'year', + 'month', + 'day', + 'disctotal', + 'comp', + 'mb_albumid', + 'mb_albumartistid', + 'albumtype', + 'label', + 'mb_releasegroupid', + 'asin', + 'catalognum', + 'script', + 'language', + 'country', + 'albumstatus', + 'albumdisambig', + 'rg_album_gain', + 'rg_album_peak', + 'original_year', + 'original_month', + 'original_day', + ] + """List of keys that are set on an album's items. + """ + + _format_config_key = 'format_album' @classmethod def _getters(cls): @@ -644,17 +933,9 @@ class Album(LibModel): # the album's directory as `path`. getters = plugins.album_field_getters() getters['path'] = Album.item_dir + getters['albumtotal'] = Album._albumtotal return getters - def __setitem__(self, key, value): - """Set the value of an album attribute.""" - if key == 'artpath': - if isinstance(value, unicode): - value = bytestring_path(value) - elif isinstance(value, buffer): - value = bytes(value) - super(Album, self).__setitem__(key, value) - def items(self): """Returns an iterable over the items associated with this album. @@ -681,7 +962,7 @@ class Album(LibModel): for item in self.items(): item.remove(delete, False) - def move_art(self, copy=False): + def move_art(self, copy=False, link=False): """Move or copy any existing album art so that it remains in the same directory as the items. """ @@ -694,9 +975,13 @@ class Album(LibModel): return new_art = util.unique_path(new_art) - log.debug('moving album art %s to %s' % (old_art, new_art)) + log.debug(u'moving album art {0} to {1}', + util.displayable_path(old_art), + util.displayable_path(new_art)) if copy: util.copy(old_art, new_art) + elif link: + util.link(old_art, new_art) else: util.move(old_art, new_art) self.artpath = new_art @@ -706,7 +991,7 @@ class Album(LibModel): util.prune_dirs(os.path.dirname(old_art), self._db.directory) - def move(self, copy=False, basedir=None): + def move(self, copy=False, link=False, basedir=None): """Moves (or copies) all items to their destination. Any album art moves along with them. basedir overrides the library base directory for the destination. The album is stored to the @@ -721,10 +1006,10 @@ class Album(LibModel): # Move items. items = list(self.items()) for item in items: - item.move(copy, basedir=basedir, with_album=False) + item.move(copy, link, basedir=basedir, with_album=False) # Move art. - self.move_art(copy) + self.move_art(copy, link) self.store() def item_dir(self): @@ -733,9 +1018,30 @@ class Album(LibModel): """ item = self.items().get() if not item: - raise ValueError('empty album') + raise ValueError(u'empty album') return os.path.dirname(item.path) + def _albumtotal(self): + """Return the total number of tracks on all discs on the album + """ + if self.disctotal == 1 or not beets.config['per_disc_numbering']: + return self.items()[0].tracktotal + + counted = [] + total = 0 + + for item in self.items(): + if item.disc in counted: + continue + + total += item.tracktotal + counted.append(item.disc) + + if len(counted) == self.disctotal: + break + + return total + def art_destination(self, image, item_dir=None): """Returns a path to the destination for the album art image for the album. `image` is the path of the image that will be @@ -750,6 +1056,8 @@ class Album(LibModel): filename_tmpl = Template(beets.config['art_filename'].get(unicode)) subpath = self.evaluate_template(filename_tmpl, True) + if beets.config['asciify_paths']: + subpath = unidecode(subpath) subpath = util.sanitize_path(subpath, replacements=self._db.replacements) subpath = bytestring_path(subpath) @@ -763,6 +1071,8 @@ class Album(LibModel): """Sets the album's cover art to the image at the given path. The image is copied (or moved) into place, replacing any existing art. + + Sends an 'art_set' event with `self` as the sole argument. """ path = bytestring_path(path) oldart = self.artpath @@ -786,13 +1096,15 @@ class Album(LibModel): util.move(path, artdest) self.artpath = artdest + plugins.send('art_set', album=self) + def store(self): """Update the database with the album information. The album's tracks are also updated. """ # Get modified track fields. track_updates = {} - for key in ALBUM_KEYS_ITEM: + for key in self.item_keys: if key in self._dirty: track_updates[key] = self[key] @@ -804,163 +1116,82 @@ class Album(LibModel): item[key] = value item.store() + def try_sync(self, write, move): + """Synchronize the album and its items with the database. + Optionally, also write any new tags into the files and update + their paths. + + `write` indicates whether to write tags to the item files, and + `move` controls whether files (both audio and album art) are + moved. + """ + self.store() + for item in self.items(): + item.try_sync(write, move) -# Query construction and parsing helpers. +# Query construction helpers. +def parse_query_parts(parts, model_cls): + """Given a beets query string as a list of components, return the + `Query` and `Sort` they represent. -PARSE_QUERY_PART_REGEX = re.compile( - # Non-capturing optional segment for the keyword. - r'(?:' - r'(\S+?)' # The field key. - r'(? (None, 'stapler', SubstringQuery) - 'color:red' -> ('color', 'red', SubstringQuery) - ':^Quiet' -> (None, '^Quiet', RegexpQuery) - 'color::b..e' -> ('color', 'b..e', RegexpQuery) - - Prefixes may be "escaped" with a backslash to disable the keying - behavior. + Like `dbcore.parse_sorted_query`, with beets query prefixes and + special path query detection. """ - part = part.strip() - match = PARSE_QUERY_PART_REGEX.match(part) - - assert match # Regex should always match. - key = match.group(1) - term = match.group(2).replace('\:', ':') - - # Match the search term against the list of prefixes. - for pre, query_class in prefixes.items(): - if term.startswith(pre): - return key, term[len(pre):], query_class - - # No matching prefix: use type-based or fallback/default query. - query_class = query_classes.get(key, default_class) - return key, term, query_class - - -def construct_query_part(query_part, model_cls): - """Create a query from a single query component, `query_part`, for - querying instances of `model_cls`. Return a `Query` instance. - """ - # Shortcut for empty query parts. - if not query_part: - return dbcore.query.TrueQuery() - - # Set up and parse the string. - query_classes = dict((k, t.query) for (k, t) in model_cls._fields.items()) + # Get query types and their prefix characters. prefixes = {':': dbcore.query.RegexpQuery} prefixes.update(plugins.queries()) - key, pattern, query_class = \ - parse_query_part(query_part, query_classes, prefixes) - # No key specified. - if key is None: - if os.sep in pattern and 'path' in model_cls._fields: - # This looks like a path. - return PathQuery('path', pattern) - elif issubclass(query_class, dbcore.FieldQuery): - # The query type matches a specific field, but none was - # specified. So we use a version of the query that matches - # any field. - return dbcore.query.AnyFieldQuery(pattern, - model_cls._search_fields, - query_class) + # Special-case path-like queries, which are non-field queries + # containing path separators (/). + path_parts = [] + non_path_parts = [] + for s in parts: + if PathQuery.is_path_query(s): + path_parts.append(s) else: - # Other query type. - return query_class(pattern) + non_path_parts.append(s) - key = key.lower() + query, sort = dbcore.parse_sorted_query( + model_cls, non_path_parts, prefixes + ) - # Singleton query (not a real field). - if key == 'singleton': - return SingletonQuery(util.str2bool(pattern)) + # Add path queries to aggregate query. + # Match field / flexattr depending on whether the model has the path field + fast_path_query = 'path' in model_cls._fields + query.subqueries += [PathQuery('path', s, fast_path_query) + for s in path_parts] - # Other field. - else: - return query_class(key.lower(), pattern, key in model_cls._fields) + return query, sort -def query_from_strings(query_cls, model_cls, query_parts): - """Creates a collection query of type `query_cls` from a list of - strings in the format used by parse_query_part. `model_cls` - determines how queries are constructed from strings. +def parse_query_string(s, model_cls): + """Given a beets query string, return the `Query` and `Sort` they + represent. + + The string is split into components using shell-like syntax. """ - subqueries = [] - for part in query_parts: - subqueries.append(construct_query_part(part, model_cls)) - if not subqueries: # No terms in query. - subqueries = [dbcore.query.TrueQuery()] - return query_cls(subqueries) - - -def get_query(val, model_cls): - """Takes a value which may be None, a query string, a query string - list, or a Query object, and returns a suitable Query object. - `model_cls` is the subclass of Model indicating which entity this - is a query for (i.e., Album or Item) and is used to determine which - fields are searched. - """ - # Convert a single string into a list of space-separated - # criteria. - if isinstance(val, basestring): - # A bug in Python < 2.7.3 prevents correct shlex splitting of - # Unicode strings. - # http://bugs.python.org/issue6988 - if isinstance(val, unicode): - val = val.encode('utf8') - val = [s.decode('utf8') for s in shlex.split(val)] - - if val is None: - return dbcore.query.TrueQuery() - elif isinstance(val, list) or isinstance(val, tuple): - return query_from_strings(dbcore.AndQuery, model_cls, val) - elif isinstance(val, dbcore.Query): - return val - else: - raise ValueError('query must be None or have type Query or str') - + assert isinstance(s, unicode), u"Query is not unicode: {0!r}".format(s) + try: + parts = util.shlex_split(s) + except ValueError as exc: + raise dbcore.InvalidQueryError(s, exc) + return parse_query_parts(parts, model_cls) # The Library: interface to the database. - class Library(dbcore.Database): """A database of music containing songs and albums. """ _models = (Item, Album) def __init__(self, path='library.blb', - directory='~/Music', - path_formats=((PF_KEY_DEFAULT, - '$artist/$album/$track $title'),), - replacements=None): + directory='~/Music', + path_formats=((PF_KEY_DEFAULT, + '$artist/$album/$track $title'),), + replacements=None): if path != ':memory:': self.path = bytestring_path(normpath(path)) super(Library, self).__init__(path) @@ -971,7 +1202,6 @@ class Library(dbcore.Database): self._memotable = {} # Used for template substitution performance. - # Adding objects to the database. def add(self, obj): @@ -983,12 +1213,17 @@ class Library(dbcore.Database): return obj.id def add_album(self, items): - """Create a new album consisting of a list of items. The items - are added to the database if they don't yet have an ID. Return a - new :class:`Album` object. + """Create a new album consisting of a list of items. + + The items are added to the database if they don't yet have an + ID. Return a new :class:`Album` object. The list items must not + be empty. """ + if not items: + raise ValueError(u'need at least one item') + # Create the album structure using metadata from the first item. - values = dict((key, items[0][key]) for key in ALBUM_KEYS_ITEM) + values = dict((key, items[0][key]) for key in Album.item_keys) album = Album(self, **values) # Add the album structure and set the items' album_id fields. @@ -1004,34 +1239,54 @@ class Library(dbcore.Database): return album - # Querying. - def _fetch(self, model_cls, query, order_by=None): - """Parse a query and fetch. + def _fetch(self, model_cls, query, sort=None): + """Parse a query and fetch. If a order specification is present + in the query string the `sort` argument is ignored. """ + # Parse the query, if necessary. + try: + parsed_sort = None + if isinstance(query, basestring): + query, parsed_sort = parse_query_string(query, model_cls) + elif isinstance(query, (list, tuple)): + query, parsed_sort = parse_query_parts(query, model_cls) + except dbcore.query.InvalidQueryArgumentTypeError as exc: + raise dbcore.InvalidQueryError(query, exc) + + # Any non-null sort specified by the parsed query overrides the + # provided sort. + if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort): + sort = parsed_sort + return super(Library, self)._fetch( - model_cls, get_query(query, model_cls), order_by + model_cls, query, sort ) - def albums(self, query=None): - """Get a sorted list of :class:`Album` objects matching the - given query. + @staticmethod + def get_default_album_sort(): + """Get a :class:`Sort` object for albums from the config option. """ - order = '{0}, album'.format( - _orelse("albumartist_sort", "albumartist") - ) - return self._fetch(Album, query, order) + return dbcore.sort_from_strings( + Album, beets.config['sort_album'].as_str_seq()) - def items(self, query=None): - """Get a sorted list of :class:`Item` objects matching the given - query. + @staticmethod + def get_default_item_sort(): + """Get a :class:`Sort` object for items from the config option. """ - order = '{0}, album, disc, track'.format( - _orelse("artist_sort", "artist") - ) - return self._fetch(Item, query, order) + return dbcore.sort_from_strings( + Item, beets.config['sort_item'].as_str_seq()) + def albums(self, query=None, sort=None): + """Get :class:`Album` objects matching the query. + """ + return self._fetch(Album, query, sort or self.get_default_album_sort()) + + def items(self, query=None, sort=None): + """Get :class:`Item` objects matching the query. + """ + return self._fetch(Item, query, sort or self.get_default_item_sort()) # Convenience accessors. @@ -1055,10 +1310,8 @@ class Library(dbcore.Database): return self._get(Album, album_id) - # Default path template resources. - def _int_arg(s): """Convert a string argument to an integer for use in a template function. May raise a ValueError. @@ -1075,7 +1328,7 @@ class DefaultTemplateFunctions(object): _prefix = 'tmpl_' def __init__(self, item=None, lib=None): - """Paramaterize the functions. If `item` or `lib` is None, then + """Parametrize the functions. If `item` or `lib` is None, then some functions (namely, ``aunique``) will always evaluate to the empty string. """ @@ -1123,9 +1376,13 @@ class DefaultTemplateFunctions(object): otherwise, emit ``falseval`` (if provided). """ try: - condition = _int_arg(condition) + int_condition = _int_arg(condition) except ValueError: - condition = condition.strip() + if condition.lower() == "false": + return falseval + else: + condition = int_condition + if condition: return trueval else: @@ -1138,11 +1395,11 @@ class DefaultTemplateFunctions(object): return unidecode(s) @staticmethod - def tmpl_time(s, format): + def tmpl_time(s, fmt): """Format a time value using `strftime`. """ cur_fmt = beets.config['time_format'].get(unicode) - return time.strftime(format, time.strptime(s, cur_fmt)) + return time.strftime(fmt, time.strptime(s, cur_fmt)) def tmpl_aunique(self, keys=None, disam=None): """Generate a string that is guaranteed to be unique among all @@ -1176,7 +1433,7 @@ class DefaultTemplateFunctions(object): # Find matching albums to disambiguate with. subqueries = [] for key in keys: - value = getattr(album, key) + value = album.get(key, '') subqueries.append(dbcore.MatchQuery(key, value)) albums = self.lib.albums(dbcore.AndQuery(subqueries)) @@ -1189,7 +1446,7 @@ class DefaultTemplateFunctions(object): # Find the first disambiguator that distinguishes the albums. for disambiguator in disam: # Get the value for each album for the current field. - disam_values = set([getattr(a, disambiguator) for a in albums]) + disam_values = set([a.get(disambiguator, '') for a in albums]) # If the set of unique values is equal to the number of # albums in the disambiguation set, we're done -- this is @@ -1204,11 +1461,40 @@ class DefaultTemplateFunctions(object): return res # Flatten disambiguation value into a string. - disam_value = album._get_formatted(disambiguator, True) + disam_value = album.formatted(True).get(disambiguator) res = u' [{0}]'.format(disam_value) self.lib._memotable[memokey] = res return res + @staticmethod + def tmpl_first(s, count=1, skip=0, sep=u'; ', join_str=u'; '): + """ Gets the item(s) from x to y in a string separated by something + and join then with something + + :param s: the string + :param count: The number of items included + :param skip: The number of items skipped + :param sep: the separator. Usually is '; ' (default) or '/ ' + :param join_str: the string which will join the items, default '; '. + """ + skip = int(skip) + count = skip + int(count) + return join_str.join(s.split(sep)[skip:count]) + + def tmpl_ifdef(self, field, trueval=u'', falseval=u''): + """ If field exists return trueval or the field (default) + otherwise, emit return falseval (if provided). + + :param field: The name of the field + :param trueval: The string if the condition is true + :param falseval: The string if the condition is false + :return: The string, based on condition + """ + if self.item.formatted().get(field): + return trueval if trueval else self.item.formatted().get(field) + else: + return falseval + # Get the name of tmpl_* functions in the above class. DefaultTemplateFunctions._func_names = \ diff --git a/libs/beets/logging.py b/libs/beets/logging.py new file mode 100644 index 00000000..a94da1c6 --- /dev/null +++ b/libs/beets/logging.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A drop-in replacement for the standard-library `logging` module that +allows {}-style log formatting on Python 2 and 3. + +Provides everything the "logging" module does. The only difference is +that when getLogger(name) instantiates a logger that logger uses +{}-style formatting. +""" + +from __future__ import division, absolute_import, print_function + +from copy import copy +from logging import * # noqa +import subprocess +import threading + + +def logsafe(val): + """Coerce a potentially "problematic" value so it can be formatted + in a Unicode log string. + + This works around a number of pitfalls when logging objects in + Python 2: + - Logging path names, which must be byte strings, requires + conversion for output. + - Some objects, including some exceptions, will crash when you call + `unicode(v)` while `str(v)` works fine. CalledProcessError is an + example. + """ + # Already Unicode. + if isinstance(val, unicode): + return val + + # Bytestring: needs decoding. + elif isinstance(val, bytes): + # Blindly convert with UTF-8. Eventually, it would be nice to + # (a) only do this for paths, if they can be given a distinct + # type, and (b) warn the developer if they do this for other + # bytestrings. + return val.decode('utf8', 'replace') + + # A "problem" object: needs a workaround. + elif isinstance(val, subprocess.CalledProcessError): + try: + return unicode(val) + except UnicodeDecodeError: + # An object with a broken __unicode__ formatter. Use __str__ + # instead. + return str(val).decode('utf8', 'replace') + + # Other objects are used as-is so field access, etc., still works in + # the format string. + else: + return val + + +class StrFormatLogger(Logger): + """A version of `Logger` that uses `str.format`-style formatting + instead of %-style formatting. + """ + + class _LogMessage(object): + def __init__(self, msg, args, kwargs): + self.msg = msg + self.args = args + self.kwargs = kwargs + + def __str__(self): + args = [logsafe(a) for a in self.args] + kwargs = dict((k, logsafe(v)) for (k, v) in self.kwargs.items()) + return self.msg.format(*args, **kwargs) + + def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs): + """Log msg.format(*args, **kwargs)""" + m = self._LogMessage(msg, args, kwargs) + return super(StrFormatLogger, self)._log(level, m, (), exc_info, extra) + + +class ThreadLocalLevelLogger(Logger): + """A version of `Logger` whose level is thread-local instead of shared. + """ + def __init__(self, name, level=NOTSET): + self._thread_level = threading.local() + self.default_level = NOTSET + super(ThreadLocalLevelLogger, self).__init__(name, level) + + @property + def level(self): + try: + return self._thread_level.level + except AttributeError: + self._thread_level.level = self.default_level + return self.level + + @level.setter + def level(self, value): + self._thread_level.level = value + + def set_global_level(self, level): + """Set the level on the current thread + the default value for all + threads. + """ + self.default_level = level + self.setLevel(level) + + +class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger): + pass + + +my_manager = copy(Logger.manager) +my_manager.loggerClass = BeetsLogger + + +def getLogger(name=None): # noqa + if name: + return my_manager.getLogger(name) + else: + return Logger.root diff --git a/libs/beets/mediafile.py b/libs/beets/mediafile.py index 301e0f37..a359a5b4 100644 --- a/libs/beets/mediafile.py +++ b/libs/beets/mediafile.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -32,14 +33,18 @@ Internally ``MediaFile`` uses ``MediaField`` descriptors to access the data from the tags. In turn ``MediaField`` uses a number of ``StorageStyle`` strategies to handle format specific logic. """ +from __future__ import division, absolute_import, print_function + import mutagen import mutagen.mp3 +import mutagen.id3 import mutagen.oggopus import mutagen.oggvorbis import mutagen.mp4 import mutagen.flac import mutagen.monkeysaudio import mutagen.asf +import mutagen.aiff import datetime import re import base64 @@ -47,40 +52,17 @@ import math import struct import imghdr import os -import logging import traceback -from beets.util.enumeration import enum +import enum + +from beets import logging +from beets.util import displayable_path, syspath + __all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile'] - - -# Logger. log = logging.getLogger('beets') - - -# Exceptions. - -class UnreadableFileError(Exception): - """Indicates a file that MediaFile can't read. - """ - pass - -class FileTypeError(UnreadableFileError): - """Raised for files that don't seem to have a type MediaFile - supports. - """ - pass - -class MutagenError(UnreadableFileError): - """Raised when Mutagen fails unexpectedly---probably due to a bug. - """ - - - -# Constants. - # Human-readable type names. TYPES = { 'mp3': 'MP3', @@ -93,9 +75,41 @@ TYPES = { 'wv': 'WavPack', 'mpc': 'Musepack', 'asf': 'Windows Media', + 'aiff': 'AIFF', } +# Exceptions. + +class UnreadableFileError(Exception): + """Mutagen is not able to extract information from the file. + """ + def __init__(self, path): + Exception.__init__(self, displayable_path(path)) + + +class FileTypeError(UnreadableFileError): + """Reading this type of file is not supported. + + If passed the `mutagen_type` argument this indicates that the + mutagen type is not supported by `Mediafile`. + """ + def __init__(self, path, mutagen_type=None): + path = displayable_path(path) + if mutagen_type is None: + msg = path + else: + msg = u'{0}: of mutagen type {1}'.format(path, mutagen_type) + Exception.__init__(self, msg) + + +class MutagenError(UnreadableFileError): + """Raised when Mutagen fails unexpectedly---probably due to a bug. + """ + def __init__(self, path, mutagen_exc): + msg = u'{0}: {1}'.format(displayable_path(path), mutagen_exc) + Exception.__init__(self, msg) + # Utility. @@ -105,10 +119,11 @@ def _safe_cast(out_type, val): returned. out_type should be bool, int, or unicode; otherwise, the value is just passed through. """ + if val is None: + return None + if out_type == int: - if val is None: - return 0 - elif isinstance(val, int) or isinstance(val, float): + if isinstance(val, int) or isinstance(val, float): # Just a number. return int(val) else: @@ -123,45 +138,38 @@ def _safe_cast(out_type, val): return int(val) elif out_type == bool: - if val is None: + try: + # Should work for strings, bools, ints: + return bool(int(val)) + except ValueError: return False - else: - try: - # Should work for strings, bools, ints: - return bool(int(val)) - except ValueError: - return False elif out_type == unicode: - if val is None: - return u'' + if isinstance(val, bytes): + return val.decode('utf8', 'ignore') + elif isinstance(val, unicode): + return val else: - if isinstance(val, str): - return val.decode('utf8', 'ignore') - elif isinstance(val, unicode): - return val - else: - return unicode(val) + return unicode(val) elif out_type == float: - if val is None: - return 0.0 - elif isinstance(val, int) or isinstance(val, float): + if isinstance(val, int) or isinstance(val, float): return float(val) else: if not isinstance(val, basestring): val = unicode(val) - val = re.match(r'[\+-]?[0-9\.]*', val.strip()).group(0) - if not val: - return 0.0 - else: - return float(val) + match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)', + val.strip()) + if match: + val = match.group(0) + if val: + return float(val) + return 0.0 else: return val - # Image coding for ASF/WMA. def _unpack_asf_image(data): @@ -173,15 +181,15 @@ def _unpack_asf_image(data): of exceptions (out-of-bounds, etc.). We should clean this up sometime so that the failure modes are well-defined. """ - type, size = struct.unpack_from(" 0 else None + image_data = frame.value[text_delimiter_index + 1:] + images.append(Image(data=image_data, type=cover_type, + desc=comment)) + except KeyError: + pass + + return images + + def set_list(self, mutagen_file, values): + self.delete(mutagen_file) + + for image in values: + image_type = image.type or ImageType.other + comment = image.desc or '' + image_data = comment.encode('utf8') + b'\x00' + image.data + cover_tag = self.TAG_NAMES[image_type] + mutagen_file[cover_tag] = image_data + + def delete(self, mutagen_file): + """Remove all images from the file. + """ + for cover_tag in self.TAG_NAMES.values(): + try: + del mutagen_file[cover_tag] + except KeyError: + pass # MediaField is a descriptor that represents a single logical field. It @@ -913,12 +1061,14 @@ class MediaField(object): def __init__(self, *styles, **kwargs): """Creates a new MediaField. - - `styles`: `StorageStyle` instances that describe the strategy - for reading and writing the field in particular formats. - There must be at least one style for each possible file - format. - - `out_type`: the type of the value that should be returned when - getting this property. + :param styles: `StorageStyle` instances that describe the strategy + for reading and writing the field in particular + formats. There must be at least one style for + each possible file format. + + :param out_type: the type of the value that should be returned when + getting this property. + """ self.out_type = kwargs.get('out_type', unicode) self._styles = styles @@ -945,6 +1095,10 @@ class MediaField(object): for style in self.styles(mediafile.mgfile): style.set(mediafile.mgfile, value) + def __delete__(self, mediafile): + for style in self.styles(mediafile.mgfile): + style.delete(mediafile.mgfile) + def _none_value(self): """Get an appropriate "null" value for this field's type. This is used internally when setting the field to None. @@ -1007,27 +1161,40 @@ class DateField(MediaField): def __get__(self, mediafile, owner=None): year, month, day = self._get_date_tuple(mediafile) + if not year: + return None try: return datetime.date( - year or datetime.MINYEAR, + year, month or 1, day or 1 ) except ValueError: # Out of range values. - return datetime.date.min + return None def __set__(self, mediafile, date): - self._set_date_tuple(mediafile, date.year, date.month, date.day) + if date is None: + self._set_date_tuple(mediafile, None, None, None) + else: + self._set_date_tuple(mediafile, date.year, date.month, date.day) + + def __delete__(self, mediafile): + super(DateField, self).__delete__(mediafile) + if hasattr(self, '_year_field'): + self._year_field.__delete__(mediafile) def _get_date_tuple(self, mediafile): """Get a 3-item sequence representing the date consisting of a year, month, and day number. Each number is either an integer or None. """ - # Get the underlying data and split on hyphens. + # Get the underlying data and split on hyphens and slashes. datestring = super(DateField, self).__get__(mediafile, None) - datestring = re.sub(r'[Tt ].*$', '', unicode(datestring)) - items = unicode(datestring).split('-') + if isinstance(datestring, basestring): + datestring = re.sub(r'[Tt ].*$', '', unicode(datestring)) + items = re.split('[-/]', unicode(datestring)) + else: + items = [] # Ensure that we have exactly 3 components, possibly by # truncating or padding. @@ -1040,20 +1207,30 @@ class DateField(MediaField): items[0] = self._year_field.__get__(mediafile) # Convert each component to an integer if possible. - return [_safe_cast(int, item) for item in items] + items_ = [] + for item in items: + try: + items_.append(int(item)) + except: + items_.append(None) + return items_ def _set_date_tuple(self, mediafile, year, month=None, day=None): """Set the value of the field given a year, month, and day number. Each number can be an integer or None to indicate an unset component. """ - date = [year or 0] + if year is None: + self.__delete__(mediafile) + return + + date = [u'{0:04d}'.format(int(year))] if month: - date.append(month) + date.append(u'{0:02d}'.format(int(month))) if month and day: - date.append(day) + date.append(u'{0:02d}'.format(int(day))) date = map(unicode, date) - super(DateField, self).__set__(mediafile, '-'.join(date)) + super(DateField, self).__set__(mediafile, u'-'.join(date)) if hasattr(self, '_year_field'): self._year_field.__set__(mediafile, year) @@ -1084,29 +1261,48 @@ class DateItemField(MediaField): items[self.item_pos] = value self.date_field._set_date_tuple(mediafile, *items) + def __delete__(self, mediafile): + self.__set__(mediafile, None) + class CoverArtField(MediaField): """A descriptor that provides access to the *raw image data* for the - first image on a file. This is used for backwards compatibility: the + cover image on a file. This is used for backwards compatibility: the full `ImageListField` provides richer `Image` objects. + + When there are multiple images we try to pick the most likely to be a front + cover. """ def __init__(self): pass def __get__(self, mediafile, _): - try: - return mediafile.images[0].data - except IndexError: + candidates = mediafile.images + if candidates: + return self.guess_cover_image(candidates).data + else: return None + @staticmethod + def guess_cover_image(candidates): + if len(candidates) == 1: + return candidates[0] + try: + return next(c for c in candidates if c.type == ImageType.front) + except StopIteration: + return candidates[0] + def __set__(self, mediafile, data): if data: mediafile.images = [Image(data=data)] else: mediafile.images = [] + def __delete__(self, mediafile): + delattr(mediafile, 'images') -class ImageListField(MediaField): + +class ImageListField(ListMediaField): """Descriptor to access the list of images embedded in tags. The getter returns a list of `Image` instances obtained from @@ -1123,31 +1319,25 @@ class ImageListField(MediaField): ASFImageStorageStyle(), VorbisImageStorageStyle(), FlacImageStorageStyle(), + APEv2ImageStorageStyle(), + out_type=Image, ) - def __get__(self, mediafile, _): - images = [] - for style in self.styles(mediafile.mgfile): - images.extend(style.get_list(mediafile.mgfile)) - return images - - def __set__(self, mediafile, images): - for style in self.styles(mediafile.mgfile): - style.set_list(mediafile.mgfile, images) - - # MediaFile is a collection of fields. - class MediaFile(object): """Represents a multimedia file on disk and provides access to its metadata. """ - def __init__(self, path): - """Constructs a new MediaFile reflecting the file at path. May - throw UnreadableFileError. + def __init__(self, path, id3v23=False): + """Constructs a new `MediaFile` reflecting the file at path. May + throw `UnreadableFileError`. + + By default, MP3 files are saved with ID3v2.4 tags. You can use + the older ID3v2.3 standard by specifying the `id3v23` option. """ + path = syspath(path) self.path = path unreadable_exc = ( @@ -1161,41 +1351,49 @@ class MediaFile(object): mutagen.ogg.error, mutagen.asf.error, mutagen.apev2.error, + mutagen.aiff.error, ) try: self.mgfile = mutagen.File(path) except unreadable_exc as exc: - log.debug(u'header parsing failed: {0}'.format(unicode(exc))) - raise UnreadableFileError('Mutagen could not read file') + log.debug(u'header parsing failed: {0}', unicode(exc)) + raise UnreadableFileError(path) except IOError as exc: if type(exc) == IOError: # This is a base IOError, not a subclass from Mutagen or # anywhere else. raise else: - log.debug(traceback.format_exc()) - raise MutagenError('Mutagen raised an exception') + log.debug(u'{}', traceback.format_exc()) + raise MutagenError(path, exc) except Exception as exc: # Isolate bugs in Mutagen. - log.debug(traceback.format_exc()) - log.error('uncaught Mutagen exception in open: {0}'.format(exc)) - raise MutagenError('Mutagen raised an exception') + log.debug(u'{}', traceback.format_exc()) + log.error(u'uncaught Mutagen exception in open: {0}', exc) + raise MutagenError(path, exc) - if self.mgfile is None: # Mutagen couldn't guess the type - raise FileTypeError('file type unsupported by Mutagen') - elif type(self.mgfile).__name__ == 'M4A' or \ - type(self.mgfile).__name__ == 'MP4': - # This hack differentiates AAC and ALAC until we find a more - # deterministic approach. Mutagen only sets the sample rate - # for AAC files. See: - # https://github.com/sampsyo/beets/pull/295 - if hasattr(self.mgfile.info, 'sample_rate') and \ - self.mgfile.info.sample_rate > 0: - self.type = 'aac' + if self.mgfile is None: + # Mutagen couldn't guess the type + raise FileTypeError(path) + elif (type(self.mgfile).__name__ == 'M4A' or + type(self.mgfile).__name__ == 'MP4'): + info = self.mgfile.info + if hasattr(info, 'codec'): + if info.codec and info.codec.startswith('alac'): + self.type = 'alac' + else: + self.type = 'aac' else: - self.type = 'alac' - elif type(self.mgfile).__name__ == 'ID3' or \ - type(self.mgfile).__name__ == 'MP3': + # This hack differentiates AAC and ALAC on versions of + # Mutagen < 1.26. Once Mutagen > 1.26 is out and + # required by beets, we can remove this. + if hasattr(self.mgfile.info, 'bitrate') and \ + self.mgfile.info.bitrate > 0: + self.type = 'aac' + else: + self.type = 'alac' + elif (type(self.mgfile).__name__ == 'ID3' or + type(self.mgfile).__name__ == 'MP3'): self.type = 'mp3' elif type(self.mgfile).__name__ == 'FLAC': self.type = 'flac' @@ -1211,22 +1409,24 @@ class MediaFile(object): self.type = 'mpc' elif type(self.mgfile).__name__ == 'ASF': self.type = 'asf' + elif type(self.mgfile).__name__ == 'AIFF': + self.type = 'aiff' else: - raise FileTypeError('file type %s unsupported by MediaFile' % - type(self.mgfile).__name__) + raise FileTypeError(path, type(self.mgfile).__name__) - # add a set of tags if it's missing + # Add a set of tags if it's missing. if self.mgfile.tags is None: self.mgfile.add_tags() - def save(self, id3v23=False): - """Write the object's tags back to the file. + # Set the ID3v2.3 flag only for MP3s. + self.id3v23 = id3v23 and self.type == 'mp3' - By default, MP3 files are saved with ID3v2.4 tags. You can use - the older ID3v2.3 standard by specifying the `id3v23` option. + def save(self): + """Write the object's tags back to the file. """ + # Possibly save the tags to ID3v2.3. kwargs = {} - if id3v23 and self.type == 'mp3': + if self.id3v23: id3 = self.mgfile if hasattr(id3, 'tags'): # In case this is an MP3 object, not an ID3 object. @@ -1241,9 +1441,9 @@ class MediaFile(object): # Propagate these through: they don't represent Mutagen bugs. raise except Exception as exc: - log.debug(traceback.format_exc()) - log.error('uncaught Mutagen exception in save: {0}'.format(exc)) - raise MutagenError('Mutagen raised an exception') + log.debug(u'{}', traceback.format_exc()) + log.error(u'uncaught Mutagen exception in save: {0}', exc) + raise MutagenError(self.path, exc) def delete(self): """Remove the current metadata tag from the file. @@ -1256,30 +1456,112 @@ class MediaFile(object): for tag in self.mgfile.keys(): del self.mgfile[tag] + # Convenient access to the set of available fields. + + @classmethod + def fields(cls): + """Get the names of all writable properties that reflect + metadata tags (i.e., those that are instances of + :class:`MediaField`). + """ + for property, descriptor in cls.__dict__.items(): + if isinstance(descriptor, MediaField): + yield property.decode('utf8') + + @classmethod + def _field_sort_name(cls, name): + """Get a sort key for a field name that determines the order + fields should be written in. + + Fields names are kept unchanged, unless they are instances of + :class:`DateItemField`, in which case `year`, `month`, and `day` + are replaced by `date0`, `date1`, and `date2`, respectively, to + make them appear in that order. + """ + if isinstance(cls.__dict__[name], DateItemField): + name = re.sub('year', 'date0', name) + name = re.sub('month', 'date1', name) + name = re.sub('day', 'date2', name) + return name + + @classmethod + def sorted_fields(cls): + """Get the names of all writable metadata fields, sorted in the + order that they should be written. + + This is a lexicographic order, except for instances of + :class:`DateItemField`, which are sorted in year-month-day + order. + """ + for property in sorted(cls.fields(), key=cls._field_sort_name): + yield property + + @classmethod + def readable_fields(cls): + """Get all metadata fields: the writable ones from + :meth:`fields` and also other audio properties. + """ + for property in cls.fields(): + yield property + for property in ('length', 'samplerate', 'bitdepth', 'bitrate', + 'channels', 'format'): + yield property + + @classmethod + def add_field(cls, name, descriptor): + """Add a field to store custom tags. + + :param name: the name of the property the field is accessed + through. It must not already exist on this class. + + :param descriptor: an instance of :class:`MediaField`. + """ + if not isinstance(descriptor, MediaField): + raise ValueError( + u'{0} must be an instance of MediaField'.format(descriptor)) + if name in cls.__dict__: + raise ValueError( + u'property "{0}" already exists on MediaField'.format(name)) + setattr(cls, name, descriptor) + + def update(self, dict): + """Set all field values from a dictionary. + + For any key in `dict` that is also a field to store tags the + method retrieves the corresponding value from `dict` and updates + the `MediaFile`. If a key has the value `None`, the + corresponding property is deleted from the `MediaFile`. + """ + for field in self.sorted_fields(): + if field in dict: + if dict[field] is None: + delattr(self, field) + else: + setattr(self, field, dict[field]) # Field definitions. title = MediaField( MP3StorageStyle('TIT2'), - MP4StorageStyle("\xa9nam"), + MP4StorageStyle(b"\xa9nam"), StorageStyle('TITLE'), ASFStorageStyle('Title'), ) artist = MediaField( MP3StorageStyle('TPE1'), - MP4StorageStyle("\xa9ART"), + MP4StorageStyle(b"\xa9ART"), StorageStyle('ARTIST'), ASFStorageStyle('Author'), ) album = MediaField( MP3StorageStyle('TALB'), - MP4StorageStyle("\xa9alb"), + MP4StorageStyle(b"\xa9alb"), StorageStyle('ALBUM'), ASFStorageStyle('WM/AlbumTitle'), ) genres = ListMediaField( MP3ListStorageStyle('TCON'), - MP4ListStorageStyle("\xa9gen"), + MP4ListStorageStyle(b"\xa9gen"), ListStorageStyle('GENRE'), ASFStorageStyle('WM/Genre'), ) @@ -1287,19 +1569,19 @@ class MediaFile(object): composer = MediaField( MP3StorageStyle('TCOM'), - MP4StorageStyle("\xa9wrt"), + MP4StorageStyle(b"\xa9wrt"), StorageStyle('COMPOSER'), ASFStorageStyle('WM/Composer'), ) grouping = MediaField( MP3StorageStyle('TIT1'), - MP4StorageStyle("\xa9grp"), + MP4StorageStyle(b"\xa9grp"), StorageStyle('GROUPING'), ASFStorageStyle('WM/ContentGroupDescription'), ) track = MediaField( MP3SlashPackStorageStyle('TRCK', pack_pos=0), - MP4TupleStorageStyle('trkn', index=0), + MP4TupleStorageStyle(b'trkn', index=0), StorageStyle('TRACK'), StorageStyle('TRACKNUMBER'), ASFStorageStyle('WM/TrackNumber'), @@ -1307,7 +1589,7 @@ class MediaFile(object): ) tracktotal = MediaField( MP3SlashPackStorageStyle('TRCK', pack_pos=1), - MP4TupleStorageStyle('trkn', index=1), + MP4TupleStorageStyle(b'trkn', index=1), StorageStyle('TRACKTOTAL'), StorageStyle('TRACKC'), StorageStyle('TOTALTRACKS'), @@ -1316,7 +1598,7 @@ class MediaFile(object): ) disc = MediaField( MP3SlashPackStorageStyle('TPOS', pack_pos=0), - MP4TupleStorageStyle('disk', index=0), + MP4TupleStorageStyle(b'disk', index=0), StorageStyle('DISC'), StorageStyle('DISCNUMBER'), ASFStorageStyle('WM/PartOfSet'), @@ -1324,7 +1606,7 @@ class MediaFile(object): ) disctotal = MediaField( MP3SlashPackStorageStyle('TPOS', pack_pos=1), - MP4TupleStorageStyle('disk', index=1), + MP4TupleStorageStyle(b'disk', index=1), StorageStyle('DISCTOTAL'), StorageStyle('DISCC'), StorageStyle('TOTALDISCS'), @@ -1333,124 +1615,125 @@ class MediaFile(object): ) lyrics = MediaField( MP3DescStorageStyle(key='USLT'), - MP4StorageStyle("\xa9lyr"), + MP4StorageStyle(b"\xa9lyr"), StorageStyle('LYRICS'), ASFStorageStyle('WM/Lyrics'), ) comments = MediaField( MP3DescStorageStyle(key='COMM'), - MP4StorageStyle("\xa9cmt"), + MP4StorageStyle(b"\xa9cmt"), StorageStyle('DESCRIPTION'), StorageStyle('COMMENT'), ASFStorageStyle('WM/Comments'), + ASFStorageStyle('Description') ) bpm = MediaField( MP3StorageStyle('TBPM'), - MP4StorageStyle('tmpo', as_type=int), + MP4StorageStyle(b'tmpo', as_type=int), StorageStyle('BPM'), ASFStorageStyle('WM/BeatsPerMinute'), out_type=int, ) comp = MediaField( MP3StorageStyle('TCMP'), - MP4BoolStorageStyle('cpil'), + MP4BoolStorageStyle(b'cpil'), StorageStyle('COMPILATION'), ASFStorageStyle('WM/IsCompilation', as_type=bool), out_type=bool, ) albumartist = MediaField( MP3StorageStyle('TPE2'), - MP4StorageStyle('aART'), + MP4StorageStyle(b'aART'), StorageStyle('ALBUM ARTIST'), StorageStyle('ALBUMARTIST'), ASFStorageStyle('WM/AlbumArtist'), ) albumtype = MediaField( MP3DescStorageStyle(u'MusicBrainz Album Type'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Album Type'), StorageStyle('MUSICBRAINZ_ALBUMTYPE'), ASFStorageStyle('MusicBrainz/Album Type'), ) label = MediaField( MP3StorageStyle('TPUB'), - MP4StorageStyle('----:com.apple.iTunes:Label'), - MP4StorageStyle('----:com.apple.iTunes:publisher'), + MP4StorageStyle(b'----:com.apple.iTunes:Label'), + MP4StorageStyle(b'----:com.apple.iTunes:publisher'), StorageStyle('LABEL'), StorageStyle('PUBLISHER'), # Traktor ASFStorageStyle('WM/Publisher'), ) artist_sort = MediaField( MP3StorageStyle('TSOP'), - MP4StorageStyle("soar"), + MP4StorageStyle(b"soar"), StorageStyle('ARTISTSORT'), ASFStorageStyle('WM/ArtistSortOrder'), ) albumartist_sort = MediaField( MP3DescStorageStyle(u'ALBUMARTISTSORT'), - MP4StorageStyle("soaa"), + MP4StorageStyle(b"soaa"), StorageStyle('ALBUMARTISTSORT'), ASFStorageStyle('WM/AlbumArtistSortOrder'), ) asin = MediaField( MP3DescStorageStyle(u'ASIN'), - MP4StorageStyle("----:com.apple.iTunes:ASIN"), + MP4StorageStyle(b"----:com.apple.iTunes:ASIN"), StorageStyle('ASIN'), ASFStorageStyle('MusicBrainz/ASIN'), ) catalognum = MediaField( MP3DescStorageStyle(u'CATALOGNUMBER'), - MP4StorageStyle("----:com.apple.iTunes:CATALOGNUMBER"), + MP4StorageStyle(b"----:com.apple.iTunes:CATALOGNUMBER"), StorageStyle('CATALOGNUMBER'), ASFStorageStyle('WM/CatalogNo'), ) disctitle = MediaField( MP3StorageStyle('TSST'), - MP4StorageStyle("----:com.apple.iTunes:DISCSUBTITLE"), + MP4StorageStyle(b"----:com.apple.iTunes:DISCSUBTITLE"), StorageStyle('DISCSUBTITLE'), ASFStorageStyle('WM/SetSubTitle'), ) encoder = MediaField( MP3StorageStyle('TENC'), - MP4StorageStyle("\xa9too"), + MP4StorageStyle(b"\xa9too"), StorageStyle('ENCODEDBY'), StorageStyle('ENCODER'), ASFStorageStyle('WM/EncodedBy'), ) script = MediaField( MP3DescStorageStyle(u'Script'), - MP4StorageStyle("----:com.apple.iTunes:SCRIPT"), + MP4StorageStyle(b"----:com.apple.iTunes:SCRIPT"), StorageStyle('SCRIPT'), ASFStorageStyle('WM/Script'), ) language = MediaField( MP3StorageStyle('TLAN'), - MP4StorageStyle("----:com.apple.iTunes:LANGUAGE"), + MP4StorageStyle(b"----:com.apple.iTunes:LANGUAGE"), StorageStyle('LANGUAGE'), ASFStorageStyle('WM/Language'), ) country = MediaField( MP3DescStorageStyle('MusicBrainz Album Release Country'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album " - "Release Country"), + MP4StorageStyle(b"----:com.apple.iTunes:MusicBrainz " + b"Album Release Country"), StorageStyle('RELEASECOUNTRY'), ASFStorageStyle('MusicBrainz/Album Release Country'), ) albumstatus = MediaField( MP3DescStorageStyle(u'MusicBrainz Album Status'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Status"), + MP4StorageStyle(b"----:com.apple.iTunes:MusicBrainz Album Status"), StorageStyle('MUSICBRAINZ_ALBUMSTATUS'), ASFStorageStyle('MusicBrainz/Album Status'), ) media = MediaField( MP3StorageStyle('TMED'), - MP4StorageStyle("----:com.apple.iTunes:MEDIA"), + MP4StorageStyle(b"----:com.apple.iTunes:MEDIA"), StorageStyle('MEDIA'), ASFStorageStyle('WM/Media'), ) albumdisambig = MediaField( # This tag mapping was invented for beets (not used by Picard, etc). MP3DescStorageStyle(u'MusicBrainz Album Comment'), - MP4StorageStyle("----:com.apple.iTunes:MusicBrainz Album Comment"), + MP4StorageStyle(b"----:com.apple.iTunes:MusicBrainz Album Comment"), StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'), ASFStorageStyle('MusicBrainz/Album Comment'), ) @@ -1458,7 +1741,7 @@ class MediaFile(object): # Release date. date = DateField( MP3StorageStyle('TDRC'), - MP4StorageStyle("\xa9day"), + MP4StorageStyle(b"\xa9day"), StorageStyle('DATE'), ASFStorageStyle('WM/Year'), year=(StorageStyle('YEAR'),)) @@ -1470,7 +1753,7 @@ class MediaFile(object): # *Original* release date. original_date = DateField( MP3StorageStyle('TDOR'), - MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'), + MP4StorageStyle(b'----:com.apple.iTunes:ORIGINAL YEAR'), StorageStyle('ORIGINALDATE'), ASFStorageStyle('WM/OriginalReleaseYear')) @@ -1481,13 +1764,13 @@ class MediaFile(object): # Nonstandard metadata. artist_credit = MediaField( MP3DescStorageStyle(u'Artist Credit'), - MP4StorageStyle("----:com.apple.iTunes:Artist Credit"), + MP4StorageStyle(b"----:com.apple.iTunes:Artist Credit"), StorageStyle('ARTIST_CREDIT'), ASFStorageStyle('beets/Artist Credit'), ) albumartist_credit = MediaField( MP3DescStorageStyle(u'Album Artist Credit'), - MP4StorageStyle("----:com.apple.iTunes:Album Artist Credit"), + MP4StorageStyle(b"----:com.apple.iTunes:Album Artist Credit"), StorageStyle('ALBUMARTIST_CREDIT'), ASFStorageStyle('beets/Album Artist Credit'), ) @@ -1501,31 +1784,31 @@ class MediaFile(object): # MusicBrainz IDs. mb_trackid = MediaField( MP3UFIDStorageStyle(owner='http://musicbrainz.org'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Track Id'), StorageStyle('MUSICBRAINZ_TRACKID'), ASFStorageStyle('MusicBrainz/Track Id'), ) mb_albumid = MediaField( MP3DescStorageStyle(u'MusicBrainz Album Id'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Album Id'), StorageStyle('MUSICBRAINZ_ALBUMID'), ASFStorageStyle('MusicBrainz/Album Id'), ) mb_artistid = MediaField( MP3DescStorageStyle(u'MusicBrainz Artist Id'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Artist Id'), StorageStyle('MUSICBRAINZ_ARTISTID'), ASFStorageStyle('MusicBrainz/Artist Id'), ) mb_albumartistid = MediaField( MP3DescStorageStyle(u'MusicBrainz Album Artist Id'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Artist Id'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Album Artist Id'), StorageStyle('MUSICBRAINZ_ALBUMARTISTID'), ASFStorageStyle('MusicBrainz/Album Artist Id'), ) mb_releasegroupid = MediaField( MP3DescStorageStyle(u'MusicBrainz Release Group Id'), - MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'), + MP4StorageStyle(b'----:com.apple.iTunes:MusicBrainz Release Group Id'), StorageStyle('MUSICBRAINZ_RELEASEGROUPID'), ASFStorageStyle('MusicBrainz/Release Group Id'), ) @@ -1533,79 +1816,124 @@ class MediaFile(object): # Acoustid fields. acoustid_fingerprint = MediaField( MP3DescStorageStyle(u'Acoustid Fingerprint'), - MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'), + MP4StorageStyle(b'----:com.apple.iTunes:Acoustid Fingerprint'), StorageStyle('ACOUSTID_FINGERPRINT'), ASFStorageStyle('Acoustid/Fingerprint'), ) acoustid_id = MediaField( MP3DescStorageStyle(u'Acoustid Id'), - MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'), + MP4StorageStyle(b'----:com.apple.iTunes:Acoustid Id'), StorageStyle('ACOUSTID_ID'), ASFStorageStyle('Acoustid/Id'), ) # ReplayGain fields. rg_track_gain = MediaField( - MP3DescStorageStyle(u'REPLAYGAIN_TRACK_GAIN', - float_places=2, suffix=u' dB'), - MP3DescStorageStyle(u'replaygain_track_gain', - float_places=2, suffix=u' dB'), - MP3SoundCheckStorageStyle(key='COMM', index=0, desc=u'iTunNORM', - id3_lang='eng'), - MP4StorageStyle(key='----:com.apple.iTunes:replaygain_track_gain', - float_places=2, suffix=b' dB'), - MP4SoundCheckStorageStyle(key='----:com.apple.iTunes:iTunNORM', - index=0), - StorageStyle(u'REPLAYGAIN_TRACK_GAIN', - float_places=2, suffix=u' dB'), - ASFStorageStyle(u'replaygain_track_gain', - float_places=2, suffix=u' dB'), + MP3DescStorageStyle( + u'REPLAYGAIN_TRACK_GAIN', + float_places=2, suffix=u' dB' + ), + MP3DescStorageStyle( + u'replaygain_track_gain', + float_places=2, suffix=u' dB' + ), + MP3SoundCheckStorageStyle( + key='COMM', + index=0, desc=u'iTunNORM', + id3_lang='eng' + ), + MP4StorageStyle( + b'----:com.apple.iTunes:replaygain_track_gain', + float_places=2, suffix=b' dB' + ), + MP4SoundCheckStorageStyle( + b'----:com.apple.iTunes:iTunNORM', + index=0 + ), + StorageStyle( + u'REPLAYGAIN_TRACK_GAIN', + float_places=2, suffix=u' dB' + ), + ASFStorageStyle( + u'replaygain_track_gain', + float_places=2, suffix=u' dB' + ), out_type=float ) rg_album_gain = MediaField( - MP3DescStorageStyle(u'REPLAYGAIN_ALBUM_GAIN', - float_places=2, suffix=u' dB'), - MP3DescStorageStyle(u'replaygain_album_gain', - float_places=2, suffix=u' dB'), - MP4SoundCheckStorageStyle(key='----:com.apple.iTunes:iTunNORM', - index=1), - StorageStyle(u'REPLAYGAIN_ALBUM_GAIN', - float_places=2, suffix=u' dB'), - ASFStorageStyle(u'replaygain_album_gain', - float_places=2, suffix=u' dB'), + MP3DescStorageStyle( + u'REPLAYGAIN_ALBUM_GAIN', + float_places=2, suffix=u' dB' + ), + MP3DescStorageStyle( + u'replaygain_album_gain', + float_places=2, suffix=u' dB' + ), + MP4SoundCheckStorageStyle( + b'----:com.apple.iTunes:iTunNORM', + index=1 + ), + StorageStyle( + u'REPLAYGAIN_ALBUM_GAIN', + float_places=2, suffix=u' dB' + ), + ASFStorageStyle( + u'replaygain_album_gain', + float_places=2, suffix=u' dB' + ), out_type=float ) rg_track_peak = MediaField( - MP3DescStorageStyle(u'REPLAYGAIN_TRACK_PEAK', - float_places=6), - MP3DescStorageStyle(u'replaygain_track_peak', - float_places=6), - MP3SoundCheckStorageStyle(key='COMM', index=1, desc=u'iTunNORM', - id3_lang='eng'), - MP4StorageStyle('----:com.apple.iTunes:replaygain_track_peak', - float_places=6), - MP4SoundCheckStorageStyle(key='----:com.apple.iTunes:iTunNORM', - index=1), - StorageStyle(u'REPLAYGAIN_TRACK_PEAK', - float_places=6), - ASFStorageStyle(u'replaygain_track_peak', - float_places=6), + MP3DescStorageStyle( + u'REPLAYGAIN_TRACK_PEAK', + float_places=6 + ), + MP3DescStorageStyle( + u'replaygain_track_peak', + float_places=6 + ), + MP3SoundCheckStorageStyle( + key=u'COMM', + index=1, desc=u'iTunNORM', + id3_lang='eng' + ), + MP4StorageStyle( + b'----:com.apple.iTunes:replaygain_track_peak', + float_places=6 + ), + MP4SoundCheckStorageStyle( + b'----:com.apple.iTunes:iTunNORM', + index=1 + ), + StorageStyle(u'REPLAYGAIN_TRACK_PEAK', float_places=6), + ASFStorageStyle(u'replaygain_track_peak', float_places=6), out_type=float, ) rg_album_peak = MediaField( - MP3DescStorageStyle(u'REPLAYGAIN_ALBUM_PEAK', - float_places=6), - MP3DescStorageStyle(u'replaygain_album_peak', - float_places=6), - MP4StorageStyle('----:com.apple.iTunes:replaygain_album_peak', - float_places=6), - StorageStyle(u'REPLAYGAIN_ALBUM_PEAK', - float_places=6), - ASFStorageStyle(u'replaygain_album_peak', - float_places=6), + MP3DescStorageStyle( + u'REPLAYGAIN_ALBUM_PEAK', + float_places=6 + ), + MP3DescStorageStyle( + u'replaygain_album_peak', + float_places=6 + ), + MP4StorageStyle( + b'----:com.apple.iTunes:replaygain_album_peak', + float_places=6 + ), + StorageStyle(u'REPLAYGAIN_ALBUM_PEAK', float_places=6), + ASFStorageStyle(u'replaygain_album_peak', float_places=6), out_type=float, ) + initial_key = MediaField( + MP3StorageStyle('TKEY'), + MP4StorageStyle(b'----:com.apple.iTunes:initialkey'), + StorageStyle('INITIALKEY'), + ASFStorageStyle('INITIALKEY'), + ) + @property def length(self): """The duration of the audio in seconds (a float).""" diff --git a/libs/beets/plugins.py b/libs/beets/plugins.py index 6a58777c..239f64fb 100644 --- a/libs/beets/plugins.py +++ b/libs/beets/plugins.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,11 +15,17 @@ """Support for beets plugins.""" -import logging +from __future__ import division, absolute_import, print_function + +import inspect import traceback +import re from collections import defaultdict +from functools import wraps + import beets +from beets import logging from beets import mediafile PLUGIN_NAMESPACE = 'beetsplug' @@ -30,6 +37,31 @@ LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43' log = logging.getLogger('beets') +class PluginConflictException(Exception): + """Indicates that the services provided by one plugin conflict with + those of another. + + For example two plugins may define different types for flexible fields. + """ + + +class PluginLogFilter(logging.Filter): + """A logging filter that identifies the plugin that emitted a log + message. + """ + def __init__(self, plugin): + self.prefix = u'{0}: '.format(plugin.name) + + def filter(self, record): + if hasattr(record.msg, 'msg') and isinstance(record.msg.msg, + basestring): + # A _LogMessage from our hacked-up Logging replacement. + record.msg.msg = self.prefix + record.msg.msg + elif isinstance(record.msg, basestring): + record.msg = self.prefix + record.msg + return True + + # Managing the plugins themselves. class BeetsPlugin(object): @@ -40,8 +72,6 @@ class BeetsPlugin(object): def __init__(self, name=None): """Perform one-time plugin setup. """ - _add_media_fields(self.item_fields()) - self.import_stages = [] self.name = name or self.__module__.split('.')[-1] self.config = beets.config[self.name] if not self.template_funcs: @@ -50,6 +80,12 @@ class BeetsPlugin(object): self.template_fields = {} if not self.album_template_fields: self.album_template_fields = {} + self.import_stages = [] + + self._log = log.getChild(self.name) + self._log.setLevel(logging.NOTSET) # Use `beets` logger level. + if not any(isinstance(f, PluginLogFilter) for f in self._log.filters): + self._log.addFilter(PluginLogFilter(self)) def commands(self): """Should return a list of beets.ui.Subcommand objects for @@ -57,6 +93,46 @@ class BeetsPlugin(object): """ return () + def get_import_stages(self): + """Return a list of functions that should be called as importer + pipelines stages. + + The callables are wrapped versions of the functions in + `self.import_stages`. Wrapping provides some bookkeeping for the + plugin: specifically, the logging level is adjusted to WARNING. + """ + return [self._set_log_level_and_params(logging.WARNING, import_stage) + for import_stage in self.import_stages] + + def _set_log_level_and_params(self, base_log_level, func): + """Wrap `func` to temporarily set this plugin's logger level to + `base_log_level` + config options (and restore it to its previous + value after the function returns). Also determines which params may not + be sent for backwards-compatibility. + """ + argspec = inspect.getargspec(func) + + @wraps(func) + def wrapper(*args, **kwargs): + assert self._log.level == logging.NOTSET + verbosity = beets.config['verbose'].get(int) + log_level = max(logging.DEBUG, base_log_level - 10 * verbosity) + self._log.setLevel(log_level) + try: + try: + return func(*args, **kwargs) + except TypeError as exc: + if exc.args[0].startswith(func.__name__): + # caused by 'func' and not stuff internal to 'func' + kwargs = dict((arg, val) for arg, val in kwargs.items() + if arg in argspec.args) + return func(*args, **kwargs) + else: + raise + finally: + self._log.setLevel(logging.NOTSET) + return wrapper + def queries(self): """Should return a dict mapping prefixes to Query subclasses. """ @@ -86,14 +162,6 @@ class BeetsPlugin(object): """ return () - def item_fields(self): - """Returns field descriptors to be added to the MediaFile class, - in the form of a dictionary whose keys are field names and whose - values are descriptor (e.g., MediaField) instances. The Library - database schema is not (currently) extended. - """ - return {} - def album_for_id(self, album_id): """Return an AlbumInfo object or None if no matching release was found. @@ -106,38 +174,36 @@ class BeetsPlugin(object): """ return None + def add_media_field(self, name, descriptor): + """Add a field that is synchronized between media files and items. + When a media field is added ``item.write()`` will set the name + property of the item's MediaFile to ``item[name]`` and save the + changes. Similarly ``item.read()`` will set ``item[name]`` to + the value of the name property of the media file. + + ``descriptor`` must be an instance of ``mediafile.MediaField``. + """ + # Defer impor to prevent circular dependency + from beets import library + mediafile.MediaFile.add_field(name, descriptor) + library.Item._media_fields.add(name) + + _raw_listeners = None listeners = None - @classmethod - def register_listener(cls, event, func): - """Add a function as a listener for the specified event. (An - imperative alternative to the @listen decorator.) + def register_listener(self, event, func): + """Add a function as a listener for the specified event. """ - if cls.listeners is None: + wrapped_func = self._set_log_level_and_params(logging.WARNING, func) + + cls = self.__class__ + if cls.listeners is None or cls._raw_listeners is None: + cls._raw_listeners = defaultdict(list) cls.listeners = defaultdict(list) - cls.listeners[event].append(func) - - @classmethod - def listen(cls, event): - """Decorator that adds a function as an event handler for the - specified event (as a string). The parameters passed to function - will vary depending on what event occurred. - - The function should respond to named parameters. - function(**kwargs) will trap all arguments in a dictionary. - Example: - - >>> @MyPlugin.listen("imported") - >>> def importListener(**kwargs): - >>> pass - """ - def helper(func): - if cls.listeners is None: - cls.listeners = defaultdict(list) - cls.listeners[event].append(func) - return func - return helper + if func not in cls._raw_listeners[event]: + cls._raw_listeners[event].append(func) + cls.listeners[event].append(wrapped_func) template_funcs = None template_fields = None @@ -170,7 +236,10 @@ class BeetsPlugin(object): return func return helper + _classes = set() + + def load_plugins(names=()): """Imports the modules for a sequence of plugin names. Each name must be the name of a Python module under the "beetsplug" namespace @@ -178,14 +247,14 @@ def load_plugins(names=()): BeetsPlugin subclasses desired. """ for name in names: - modname = '%s.%s' % (PLUGIN_NAMESPACE, name) + modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name) try: try: namespace = __import__(modname, None, None) except ImportError as exc: # Again, this is hacky: if exc.args[0].endswith(' ' + name): - log.warn('** plugin %s not found' % name) + log.warn(u'** plugin {0} not found', name) else: raise else: @@ -195,10 +264,16 @@ def load_plugins(names=()): _classes.add(obj) except: - log.warn('** error loading plugin %s' % name) - log.warn(traceback.format_exc()) + log.warn( + u'** error loading plugin {}:\n{}', + name, + traceback.format_exc(), + ) + _instances = {} + + def find_plugins(): """Returns a list of BeetsPlugin subclass instances from all currently loaded beets plugins. Loads the default plugin set @@ -224,6 +299,7 @@ def commands(): out += plugin.commands() return out + def queries(): """Returns a dict mapping prefix strings to Query subclasses all loaded plugins. @@ -233,6 +309,24 @@ def queries(): out.update(plugin.queries()) return out + +def types(model_cls): + # Gives us `item_types` and `album_types` + attr_name = '{0}_types'.format(model_cls.__name__.lower()) + types = {} + for plugin in find_plugins(): + plugin_types = getattr(plugin, attr_name, {}) + for field in plugin_types: + if field in types and plugin_types[field] != types[field]: + raise PluginConflictException( + u'Plugin {0} defines flexible field {1} ' + u'which has already been defined with ' + u'another type.'.format(plugin.name, field) + ) + types.update(plugin_types) + return types + + def track_distance(item, info): """Gets the track distance calculated by all loaded plugins. Returns a Distance object. @@ -243,6 +337,7 @@ def track_distance(item, info): dist.update(plugin.track_distance(item, info)) return dist + def album_distance(items, album_info, mapping): """Returns the album distance calculated by plugins.""" from beets.autotag.hooks import Distance @@ -251,6 +346,7 @@ def album_distance(items, album_info, mapping): dist.update(plugin.album_distance(items, album_info, mapping)) return dist + def candidates(items, artist, album, va_likely): """Gets MusicBrainz candidates for an album from each plugin. """ @@ -259,6 +355,7 @@ def candidates(items, artist, album, va_likely): out.extend(plugin.candidates(items, artist, album, va_likely)) return out + def item_candidates(item, artist, title): """Gets MusicBrainz candidates for an item from the plugins. """ @@ -267,6 +364,7 @@ def item_candidates(item, artist, title): out.extend(plugin.item_candidates(item, artist, title)) return out + def album_for_id(album_id): """Get AlbumInfo objects for a given ID string. """ @@ -277,6 +375,7 @@ def album_for_id(album_id): out.append(res) return out + def track_for_id(track_id): """Get TrackInfo objects for a given ID string. """ @@ -287,6 +386,7 @@ def track_for_id(track_id): out.append(res) return out + def template_funcs(): """Get all the template functions declared by plugins as a dictionary. @@ -297,19 +397,12 @@ def template_funcs(): funcs.update(plugin.template_funcs) return funcs -def _add_media_fields(fields): - """Adds a {name: descriptor} dictionary of fields to the MediaFile - class. Called during the plugin initialization. - """ - for key, value in fields.iteritems(): - setattr(mediafile.MediaFile, key, value) def import_stages(): """Get a list of import stage functions defined by plugins.""" stages = [] for plugin in find_plugins(): - if hasattr(plugin, 'import_stages'): - stages += plugin.import_stages + stages += plugin.get_import_stages() return stages @@ -325,6 +418,7 @@ def item_field_getters(): funcs.update(plugin.template_fields) return funcs + def album_field_getters(): """As above, for album fields. """ @@ -348,12 +442,48 @@ def event_handlers(): all_handlers[event] += handlers return all_handlers -def send(event, **arguments): - """Sends an event to all assigned event listeners. Event is the - name of the event to send, all other named arguments go to the - event handler(s). - Returns a list of return values from the handlers. +def send(event, **arguments): + """Send an event to all assigned event listeners. + + `event` is the name of the event to send, all other named arguments + are passed along to the handlers. + + Return a list of non-None values returned from the handlers. """ - log.debug('Sending event: %s' % event) - return [handler(**arguments) for handler in event_handlers()[event]] + log.debug(u'Sending event: {0}', event) + results = [] + for handler in event_handlers()[event]: + result = handler(**arguments) + if result is not None: + results.append(result) + return results + + +def feat_tokens(for_artist=True): + """Return a regular expression that matches phrases like "featuring" + that separate a main artist or a song title from secondary artists. + The `for_artist` option determines whether the regex should be + suitable for matching artist fields (the default) or title fields. + """ + feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.'] + if for_artist: + feat_words += ['with', 'vs', 'and', 'con', '&'] + return '(?<=\s)(?:{0})(?=\s)'.format( + '|'.join(re.escape(x) for x in feat_words) + ) + + +def sanitize_choices(choices, choices_all): + """Clean up a stringlist configuration attribute: keep only choices + elements present in choices_all, remove duplicate elements, expand '*' + wildcard while keeping original stringlist order. + """ + seen = set() + others = [x for x in choices_all if x not in choices] + res = [] + for s in choices: + if s in list(choices_all) + ['*']: + if not (s in seen or seen.add(s)): + res.extend(list(others) if s == '*' else [s]) + return res diff --git a/libs/beets/ui/__init__.py b/libs/beets/ui/__init__.py index 2df74ea7..797df44d 100644 --- a/libs/beets/ui/__init__.py +++ b/libs/beets/ui/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -16,20 +17,22 @@ interface. To invoke the CLI, just call beets.ui.main(). The actual CLI commands are implemented in the ui.commands module. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import locale import optparse import textwrap import sys from difflib import SequenceMatcher -import logging import sqlite3 import errno import re import struct import traceback +import os.path +from beets import logging from beets import library from beets import plugins from beets import util @@ -37,10 +40,9 @@ from beets.util.functemplate import Template from beets import config from beets.util import confit from beets.autotag import mb - +from beets.dbcore import query as db_query # On Windows platforms, use colorama to support "ANSI" terminal colors. - if sys.platform == 'win32': try: import colorama @@ -50,36 +52,59 @@ if sys.platform == 'win32': colorama.init() - -# Constants. +log = logging.getLogger('beets') +if not log.handlers: + log.addHandler(logging.StreamHandler()) +log.propagate = False # Don't propagate to root handler. PF_KEY_QUERIES = { - 'comp': 'comp:true', - 'singleton': 'singleton:true', + 'comp': u'comp:true', + 'singleton': u'singleton:true', } -# UI exception. Commands should throw this in order to display -# nonrecoverable errors to the user. + class UserError(Exception): - pass - -# Main logger. -log = logging.getLogger('beets') + """UI exception. Commands should throw this in order to display + nonrecoverable errors to the user. + """ - -# Utilities. +# Encoding utilities. -def _encoding(): - """Tries to guess the encoding used by the terminal.""" +def _in_encoding(default=u'utf-8'): + """Get the encoding to use for *inputting* strings from the console. + + :param default: the fallback sys.stdin encoding + """ + + return config['terminal_encoding'].get() or getattr(sys.stdin, 'encoding', + default) + + +def _out_encoding(): + """Get the encoding to use for *outputting* strings to the console. + """ # Configured override? encoding = config['terminal_encoding'].get() if encoding: return encoding - # Determine from locale settings. + # For testing: When sys.stdout is a StringIO under the test harness, + # it doesn't have an `encoding` attribute. Just use UTF-8. + if not hasattr(sys.stdout, 'encoding'): + return 'utf8' + + # Python's guessed output stream encoding, or UTF-8 as a fallback + # (e.g., when piped to a file). + return sys.stdout.encoding or 'utf8' + + +def _arg_encoding(): + """Get the encoding for command-line arguments (and other OS + locale-sensitive strings). + """ try: return locale.getdefaultlocale()[1] or 'utf8' except ValueError: @@ -92,25 +117,75 @@ def decargs(arglist): """Given a list of command-line argument bytestrings, attempts to decode them to Unicode strings. """ - return [s.decode(_encoding()) for s in arglist] + return [s.decode(_arg_encoding()) for s in arglist] -def print_(*strings): +def print_(*strings, **kwargs): """Like print, but rather than raising an error when a character is not in the terminal's encoding's character set, just silently replaces it. - """ - if strings: - if isinstance(strings[0], unicode): - txt = u' '.join(strings) - else: - txt = ' '.join(strings) - else: - txt = u'' - if isinstance(txt, unicode): - txt = txt.encode(_encoding(), 'replace') - print(txt) + If the arguments are strings then they're expected to share the same + type: either bytes or unicode. + + The `end` keyword argument behaves similarly to the built-in `print` + (it defaults to a newline). The value should have the same string + type as the arguments. + """ + end = kwargs.get('end') + + if not strings or isinstance(strings[0], unicode): + txt = u' '.join(strings) + txt += u'\n' if end is None else end + else: + txt = b' '.join(strings) + txt += b'\n' if end is None else end + + # Always send bytes to the stdout stream. + if isinstance(txt, unicode): + txt = txt.encode(_out_encoding(), 'replace') + + sys.stdout.write(txt) + + +# Configuration wrappers. + +def _bool_fallback(a, b): + """Given a boolean or None, return the original value or a fallback. + """ + if a is None: + assert isinstance(b, bool) + return b + else: + assert isinstance(a, bool) + return a + + +def should_write(write_opt=None): + """Decide whether a command that updates metadata should also write + tags, using the importer configuration as the default. + """ + return _bool_fallback(write_opt, config['import']['write'].get(bool)) + + +def should_move(move_opt=None): + """Decide whether a command that updates metadata should also move + files when they're inside the library, using the importer + configuration as the default. + + Specifically, commands should move files after metadata updates only + when the importer is configured *either* to move *or* to copy files. + They should avoid moving files when the importer is configured not + to touch any filenames. + """ + return _bool_fallback( + move_opt, + config['import']['move'].get(bool) or + config['import']['copy'].get(bool) + ) + + +# Input prompts. def input_(prompt=None): """Like `raw_input`, but decodes the result to a Unicode string. @@ -122,16 +197,14 @@ def input_(prompt=None): # use print() explicitly to display prompts. # http://bugs.python.org/issue1927 if prompt: - if isinstance(prompt, unicode): - prompt = prompt.encode(_encoding(), 'replace') - print(prompt, end=' ') + print_(prompt, end=' ') try: resp = raw_input() except EOFError: - raise UserError('stdin stream ended while input required') + raise UserError(u'stdin stream ended while input required') - return resp.decode(sys.stdin.encoding or 'utf8', 'ignore') + return resp.decode(_in_encoding(), 'ignore') def input_options(options, require=False, prompt=None, fallback_prompt=None, @@ -170,20 +243,21 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None, # Infer a letter. for letter in option: if not letter.isalpha(): - continue # Don't use punctuation. + continue # Don't use punctuation. if letter not in letters: found_letter = letter break else: - raise ValueError('no unambiguous lettering found') + raise ValueError(u'no unambiguous lettering found') letters[found_letter.lower()] = option index = option.index(found_letter) # Mark the option's shortcut letter for display. - if not require and ((default is None and not numrange and first) or - (isinstance(default, basestring) and - found_letter.lower() == default.lower())): + if not require and ( + (default is None and not numrange and first) or + (isinstance(default, basestring) and + found_letter.lower() == default.lower())): # The first option is the default; mark it. show_letter = '[%s]' % found_letter.upper() is_default = True @@ -192,7 +266,7 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None, is_default = False # Colorize the letter shortcut. - show_letter = colorize('turquoise' if is_default else 'blue', + show_letter = colorize('action_default' if is_default else 'action', show_letter) # Insert the highlighted letter back into the word. @@ -218,11 +292,11 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None, prompt_part_lengths = [] if numrange: if isinstance(default, int): - default_name = str(default) - default_name = colorize('turquoise', default_name) + default_name = unicode(default) + default_name = colorize('action_default', default_name) tmpl = '# selection (default %s)' prompt_parts.append(tmpl % default_name) - prompt_part_lengths.append(len(tmpl % str(default))) + prompt_part_lengths.append(len(tmpl % unicode(default))) else: prompt_parts.append('# selection') prompt_part_lengths.append(len(prompt_parts[-1])) @@ -257,9 +331,9 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None, # Make a fallback prompt too. This is displayed if the user enters # something that is not recognized. if not fallback_prompt: - fallback_prompt = 'Enter one of ' + fallback_prompt = u'Enter one of ' if numrange: - fallback_prompt += '%i-%i, ' % numrange + fallback_prompt += u'%i-%i, ' % numrange fallback_prompt += ', '.join(display_letters) + ':' resp = input_(prompt) @@ -298,19 +372,52 @@ def input_yn(prompt, require=False): "yes" unless `require` is `True`, in which case there is no default. """ sel = input_options( - ('y', 'n'), require, prompt, 'Enter Y or N:' + ('y', 'n'), require, prompt, u'Enter Y or N:' ) - return sel == 'y' + return sel == u'y' +def input_select_objects(prompt, objs, rep): + """Prompt to user to choose all, none, or some of the given objects. + Return the list of selected objects. + + `prompt` is the prompt string to use for each question (it should be + phrased as an imperative verb). `rep` is a function to call on each + object to print it out when confirming objects individually. + """ + choice = input_options( + (u'y', u'n', u's'), False, + u'%s? (Yes/no/select)' % prompt) + print() # Blank line. + + if choice == u'y': # Yes. + return objs + + elif choice == u's': # Select. + out = [] + for obj in objs: + rep(obj) + if input_yn(u'%s? (yes/no)' % prompt, True): + out.append(obj) + print() # go to a new line + return out + + else: # No. + return [] + + +# Human output formatting. + def human_bytes(size): """Formats size, a number of bytes, in a human-readable way.""" - suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB'] - for suffix in suffices: + powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H'] + unit = 'B' + for power in powers: if size < 1024: - return "%3.1f %s" % (size, suffix) + return u"%3.1f %s%s" % (size, power, unit) size /= 1024.0 - return "big" + unit = u'iB' + return u"big" def human_seconds(interval): @@ -318,13 +425,13 @@ def human_seconds(interval): interval using English words. """ units = [ - (1, 'second'), - (60, 'minute'), - (60, 'hour'), - (24, 'day'), - (7, 'week'), - (52, 'year'), - (10, 'decade'), + (1, u'second'), + (60, u'minute'), + (60, u'hour'), + (24, u'day'), + (7, u'week'), + (52, u'year'), + (10, u'decade'), ] for i in range(len(units) - 1): increment, suffix = units[i] @@ -337,7 +444,7 @@ def human_seconds(interval): increment, suffix = units[-1] interval /= float(increment) - return "%3.1f %ss" % (interval, suffix) + return u"%3.1f %ss" % (interval, suffix) def human_seconds_short(interval): @@ -348,40 +455,82 @@ def human_seconds_short(interval): return u'%i:%02i' % (interval // 60, interval % 60) +# Colorization. + # ANSI terminal colorization code heavily inspired by pygments: # http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py # (pygments is by Tim Hatch, Armin Ronacher, et al.) COLOR_ESCAPE = "\x1b[" -DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue", - "purple", "teal", "lightgray"] -LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue", - "fuchsia", "turquoise", "white"] +DARK_COLORS = { + "black": 0, + "darkred": 1, + "darkgreen": 2, + "brown": 3, + "darkyellow": 3, + "darkblue": 4, + "purple": 5, + "darkmagenta": 5, + "teal": 6, + "darkcyan": 6, + "lightgray": 7 +} +LIGHT_COLORS = { + "darkgray": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "fuchsia": 5, + "magenta": 5, + "turquoise": 6, + "cyan": 6, + "white": 7 +} RESET_COLOR = COLOR_ESCAPE + "39;49;00m" + +# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS +# as they are defined in the configuration files, see function: colorize +COLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight', + 'text_highlight_minor', 'action_default', 'action'] +COLORS = None + + def _colorize(color, text): """Returns a string that prints the given text in the given color in a terminal that is ANSI color-aware. The color must be something in DARK_COLORS or LIGHT_COLORS. """ if color in DARK_COLORS: - escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30) + escape = COLOR_ESCAPE + "%im" % (DARK_COLORS[color] + 30) elif color in LIGHT_COLORS: - escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30) + escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS[color] + 30) else: - raise ValueError('no such color %s', color) + raise ValueError(u'no such color %s', color) return escape + text + RESET_COLOR -def colorize(color, text): +def colorize(color_name, text): """Colorize text if colored output is enabled. (Like _colorize but conditional.) """ - if config['color']: + if config['ui']['color']: + global COLORS + if not COLORS: + COLORS = dict((name, config['ui']['colors'][name].get(unicode)) + for name in COLOR_NAMES) + # In case a 3rd party plugin is still passing the actual color ('red') + # instead of the abstract color name ('text_error') + color = COLORS.get(color_name) + if not color: + log.debug(u'Invalid color_name: {0}', color_name) + color = color_name return _colorize(color, text) else: return text -def _colordiff(a, b, highlight='red', minor_highlight='lightgray'): +def _colordiff(a, b, highlight='text_highlight', + minor_highlight='text_highlight_minor'): """Given two values, return the same pair of strings except with their differences highlighted in the specified color. Strings are highlighted intelligently to show differences; other values are @@ -431,40 +580,16 @@ def _colordiff(a, b, highlight='red', minor_highlight='lightgray'): return u''.join(a_out), u''.join(b_out) -def colordiff(a, b, highlight='red'): +def colordiff(a, b, highlight='text_highlight'): """Colorize differences between two values if color is enabled. (Like _colordiff but conditional.) """ - if config['color']: + if config['ui']['color']: return _colordiff(a, b, highlight) else: return unicode(a), unicode(b) -def color_diff_suffix(a, b, highlight='red'): - """Colorize the differing suffix between two strings.""" - a, b = unicode(a), unicode(b) - if not config['color']: - return a, b - - # Fast path. - if a == b: - return a, b - - # Find the longest common prefix. - first_diff = None - for i in range(min(len(a), len(b))): - if a[i] != b[i]: - first_diff = i - break - else: - first_diff = min(len(a), len(b)) - - # Colorize from the first difference on. - return a[:first_diff] + colorize(highlight, a[first_diff:]), \ - b[:first_diff] + colorize(highlight, b[first_diff:]) - - def get_path_formats(subview=None): """Get the configuration's path formats as a list of query/template pairs. @@ -494,46 +619,6 @@ def get_replacements(): return replacements -def get_plugin_paths(): - """Get the list of search paths for plugins from the config file. - The value for "pluginpath" may be a single string or a list of - strings. - """ - pluginpaths = config['pluginpath'].get() - if isinstance(pluginpaths, basestring): - pluginpaths = [pluginpaths] - if not isinstance(pluginpaths, list): - raise confit.ConfigTypeError( - u'pluginpath must be string or a list of strings' - ) - return map(util.normpath, pluginpaths) - - -def _pick_format(album, fmt=None): - """Pick a format string for printing Album or Item objects, - falling back to config options and defaults. - """ - if fmt: - return fmt - if album: - return config['list_format_album'].get(unicode) - else: - return config['list_format_item'].get(unicode) - - -def print_obj(obj, lib, fmt=None): - """Print an Album or Item object. If `fmt` is specified, use that - format string. Otherwise, use the configured template. - """ - album = isinstance(obj, library.Album) - fmt = _pick_format(album, fmt) - if isinstance(fmt, Template): - template = fmt - else: - template = Template(fmt) - print_(obj.evaluate_template(template)) - - def term_width(): """Get the width (columns) of the terminal.""" fallback = config['ui']['terminal_width'].get(int) @@ -558,6 +643,8 @@ def term_width(): FLOAT_EPSILON = 0.01 + + def _field_diff(field, old, new): """Given two Model objects, format their values for `field` and highlight changes among them. Return a human-readable string. If the @@ -574,15 +661,16 @@ def _field_diff(field, old, new): return None # Get formatted values for output. - oldstr = old._get_formatted(field) - newstr = new._get_formatted(field) + oldstr = old.formatted().get(field, u'') + newstr = new.formatted().get(field, u'') # For strings, highlight changes. For others, colorize the whole # thing. if isinstance(oldval, basestring): - oldstr, newstr = colordiff(oldval, newval) + oldstr, newstr = colordiff(oldval, newstr) else: - oldstr, newstr = colorize('red', oldstr), colorize('red', newstr) + oldstr = colorize('text_error', oldstr) + newstr = colorize('text_error', newstr) return u'{0} -> {1}'.format(oldstr, newstr) @@ -613,24 +701,178 @@ def show_model_changes(new, old=None, fields=None, always=False): # New fields. for field in set(new) - set(old): + if fields and field not in fields: + continue + changes.append(u' {0}: {1}'.format( field, - colorize('red', new._get_formatted(field)) + colorize('text_highlight', new.formatted()[field]) )) # Print changes. if changes or always: - print_obj(old, old._db) + print_(format(old)) if changes: print_(u'\n'.join(changes)) return bool(changes) +def show_path_changes(path_changes): + """Given a list of tuples (source, destination) that indicate the + path changes, log the changes as INFO-level output to the beets log. + The output is guaranteed to be unicode. + + Every pair is shown on a single line if the terminal width permits it, + else it is split over two lines. E.g., + + Source -> Destination + + vs. + + Source + -> Destination + """ + sources, destinations = zip(*path_changes) + + # Ensure unicode output + sources = list(map(util.displayable_path, sources)) + destinations = list(map(util.displayable_path, destinations)) + + # Calculate widths for terminal split + col_width = (term_width() - len(' -> ')) // 2 + max_width = len(max(sources + destinations, key=len)) + + if max_width > col_width: + # Print every change over two lines + for source, dest in zip(sources, destinations): + log.info(u'{0} \n -> {1}', source, dest) + else: + # Print every change on a single line, and add a header + title_pad = max_width - len('Source ') + len(' -> ') + + log.info(u'Source {0} Destination', ' ' * title_pad) + for source, dest in zip(sources, destinations): + pad = max_width - len(source) + log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest) + + +class CommonOptionsParser(optparse.OptionParser, object): + """Offers a simple way to add common formatting options. + + Options available include: + - matching albums instead of tracks: add_album_option() + - showing paths instead of items/albums: add_path_option() + - changing the format of displayed items/albums: add_format_option() + + The last one can have several behaviors: + - against a special target + - with a certain format + - autodetected target with the album option + + Each method is fully documented in the related method. + """ + def __init__(self, *args, **kwargs): + super(CommonOptionsParser, self).__init__(*args, **kwargs) + self._album_flags = False + # this serves both as an indicator that we offer the feature AND allows + # us to check whether it has been specified on the CLI - bypassing the + # fact that arguments may be in any order + + def add_album_option(self, flags=('-a', '--album')): + """Add a -a/--album option to match albums instead of tracks. + + If used then the format option can auto-detect whether we're setting + the format for items or albums. + Sets the album property on the options extracted from the CLI. + """ + album = optparse.Option(*flags, action='store_true', + help=u'match albums instead of tracks') + self.add_option(album) + self._album_flags = set(flags) + + def _set_format(self, option, opt_str, value, parser, target=None, + fmt=None, store_true=False): + """Internal callback that sets the correct format while parsing CLI + arguments. + """ + if store_true: + setattr(parser.values, option.dest, True) + + value = fmt or value and unicode(value) or '' + parser.values.format = value + if target: + config[target._format_config_key].set(value) + else: + if self._album_flags: + if parser.values.album: + target = library.Album + else: + # the option is either missing either not parsed yet + if self._album_flags & set(parser.rargs): + target = library.Album + else: + target = library.Item + config[target._format_config_key].set(value) + else: + config[library.Item._format_config_key].set(value) + config[library.Album._format_config_key].set(value) + + def add_path_option(self, flags=('-p', '--path')): + """Add a -p/--path option to display the path instead of the default + format. + + By default this affects both items and albums. If add_album_option() + is used then the target will be autodetected. + + Sets the format property to u'$path' on the options extracted from the + CLI. + """ + path = optparse.Option(*flags, nargs=0, action='callback', + callback=self._set_format, + callback_kwargs={'fmt': '$path', + 'store_true': True}, + help=u'print paths for matched items or albums') + self.add_option(path) + + def add_format_option(self, flags=('-f', '--format'), target=None): + """Add -f/--format option to print some LibModel instances with a + custom format. + + `target` is optional and can be one of ``library.Item``, 'item', + ``library.Album`` and 'album'. + + Several behaviors are available: + - if `target` is given then the format is only applied to that + LibModel + - if the album option is used then the target will be autodetected + - otherwise the format is applied to both items and albums. + + Sets the format property on the options extracted from the CLI. + """ + kwargs = {} + if target: + if isinstance(target, basestring): + target = {'item': library.Item, + 'album': library.Album}[target] + kwargs['target'] = target + + opt = optparse.Option(*flags, action='callback', + callback=self._set_format, + callback_kwargs=kwargs, + help=u'print with custom format') + self.add_option(opt) + + def add_all_common_options(self): + """Add album, path and format options. + """ + self.add_album_option() + self.add_path_option() + self.add_format_option() + # Subcommand parsing infrastructure. - - +# # This is a fairly generic subcommand parser for optparse. It is # maintained externally here: # http://gist.github.com/462717 @@ -646,58 +888,68 @@ class Subcommand(object): the subcommand; aliases are alternate names. parser is an OptionParser responsible for parsing the subcommand's options. help is a short description of the command. If no parser is - given, it defaults to a new, empty OptionParser. + given, it defaults to a new, empty CommonOptionsParser. """ self.name = name - self.parser = parser or optparse.OptionParser() + self.parser = parser or CommonOptionsParser() self.aliases = aliases self.help = help self.hide = hide + self._root_parser = None -class SubcommandsOptionParser(optparse.OptionParser): + def print_help(self): + self.parser.print_help() + + def parse_args(self, args): + return self.parser.parse_args(args) + + @property + def root_parser(self): + return self._root_parser + + @root_parser.setter + def root_parser(self, root_parser): + self._root_parser = root_parser + self.parser.prog = '{0} {1}'.format( + root_parser.get_prog_name().decode('utf8'), self.name) + + +class SubcommandsOptionParser(CommonOptionsParser): """A variant of OptionParser that parses subcommands and their arguments. """ - # A singleton command used to give help on other subcommands. - _HelpSubcommand = Subcommand('help', optparse.OptionParser(), - help='give detailed help on a specific sub-command', - aliases=('?',)) def __init__(self, *args, **kwargs): """Create a new subcommand-aware option parser. All of the options to OptionParser.__init__ are supported in addition to subcommands, a sequence of Subcommand objects. """ - # The subcommand array, with the help command included. - self.subcommands = list(kwargs.pop('subcommands', [])) - self.subcommands.append(self._HelpSubcommand) - # A more helpful default usage. if 'usage' not in kwargs: - kwargs['usage'] = """ + kwargs['usage'] = u""" %prog COMMAND [ARGS...] %prog help COMMAND""" + kwargs['add_help_option'] = False # Super constructor. - optparse.OptionParser.__init__(self, *args, **kwargs) - - # Adjust the help-visible name of each subcommand. - for subcommand in self.subcommands: - subcommand.parser.prog = '%s %s' % \ - (self.get_prog_name(), subcommand.name) + super(SubcommandsOptionParser, self).__init__(*args, **kwargs) # Our root parser needs to stop on the first unrecognized argument. self.disable_interspersed_args() - def add_subcommand(self, cmd): + self.subcommands = [] + + def add_subcommand(self, *cmds): """Adds a Subcommand object to the parser's list of commands. """ - self.subcommands.append(cmd) + for cmd in cmds: + cmd.root_parser = self + self.subcommands.append(cmd) # Add the list of subcommands to the help message. def format_help(self, formatter=None): # Get the original help message, to which we will append. - out = optparse.OptionParser.format_help(self, formatter) + out = super(SubcommandsOptionParser, self).format_help(formatter) if formatter is None: formatter = self.formatter @@ -711,6 +963,7 @@ class SubcommandsOptionParser(optparse.OptionParser): disp_names = [] help_position = 0 subcommands = [c for c in self.subcommands if not c.hide] + subcommands.sort(key=lambda c: c.name) for subcommand in subcommands: name = subcommand.name if subcommand.aliases: @@ -736,7 +989,8 @@ class SubcommandsOptionParser(optparse.OptionParser): result.append(name) help_width = formatter.width - help_position help_lines = textwrap.wrap(subcommand.help, help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + help_line = help_lines[0] if help_lines else '' + result.append("%*s%s\n" % (indent_first, "", help_line)) result.extend(["%*s%s\n" % (help_position, "", line) for line in help_lines[1:]]) formatter.dedent() @@ -756,52 +1010,40 @@ class SubcommandsOptionParser(optparse.OptionParser): return subcommand return None - def parse_args(self, a=None, v=None): - """Like OptionParser.parse_args, but returns these four items: - - options: the options passed to the root parser - - subcommand: the Subcommand object that was invoked - - suboptions: the options passed to the subcommand parser - - subargs: the positional arguments passed to the subcommand + def parse_global_options(self, args): + """Parse options up to the subcommand argument. Returns a tuple + of the options object and the remaining arguments. """ - options, args = optparse.OptionParser.parse_args(self, a, v) - subcommand, suboptions, subargs = self._parse_sub(args) - return options, subcommand, suboptions, subargs + options, subargs = self.parse_args(args) - def _parse_sub(self, args): - """Given the `args` left unused by a typical OptionParser - `parse_args`, return the invoked subcommand, the subcommand - options, and the subcommand arguments. + # Force the help command + if options.help: + subargs = ['help'] + elif options.version: + subargs = ['version'] + return options, subargs + + def parse_subcommand(self, args): + """Given the `args` left unused by a `parse_global_options`, + return the invoked subcommand, the subcommand options, and the + subcommand arguments. """ + # Help is default command if not args: - # No command given. - self.print_help() - self.exit() - else: - cmdname = args.pop(0) - subcommand = self._subcommand_for_name(cmdname) - if not subcommand: - self.error('unknown command ' + cmdname) + args = ['help'] - suboptions, subargs = subcommand.parser.parse_args(args) - - if subcommand is self._HelpSubcommand: - if subargs: - # particular - cmdname = subargs[0] - helpcommand = self._subcommand_for_name(cmdname) - if not helpcommand: - self.error('no command named {0}'.format(cmdname)) - helpcommand.parser.print_help() - self.exit() - else: - # general - self.print_help() - self.exit() + cmdname = args.pop(0) + subcommand = self._subcommand_for_name(cmdname) + if not subcommand: + raise UserError(u"unknown command '{0}'".format(cmdname)) + suboptions, subargs = subcommand.parse_args(args) return subcommand, suboptions, subargs optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',) + + def vararg_callback(option, opt_str, value, parser): """Callback for an option with variable arguments. Manually collect arguments right of a callback-action @@ -838,53 +1080,55 @@ def vararg_callback(option, opt_str, value, parser): setattr(parser.values, option.dest, value) - # The main entry point and bootstrapping. - -def _load_plugins(): +def _load_plugins(config): """Load the plugins specified in the configuration. """ - # Add plugin paths. + paths = config['pluginpath'].get(confit.StrSeq(split=False)) + paths = map(util.normpath, paths) + log.debug(u'plugin paths: {0}', util.displayable_path(paths)) + import beetsplug - beetsplug.__path__ = get_plugin_paths() + beetsplug.__path__ - + beetsplug.__path__ = paths + beetsplug.__path__ # For backwards compatibility. - sys.path += get_plugin_paths() + sys.path += paths - # Load requested plugins. plugins.load_plugins(config['plugins'].as_str_seq()) plugins.send("pluginload") + return plugins -def _configure(args): - """Parse the command line, load configuration files (including - loading any indicated plugins), and return the invoked subcomand, - the subcommand options, and the subcommand arguments. +def _setup(options, lib=None): + """Prepare and global state and updates it with command line options. + + Returns a list of subcommands, a list of plugins, and a library instance. """ - # Temporary: Migrate from 1.0-style configuration. - from beets.ui import migrate - migrate.automigrate() + # Configure the MusicBrainz API. + mb.configure() + + config = _configure(options) + + plugins = _load_plugins(config) # Get the default subcommands. from beets.ui.commands import default_commands - # Construct the root parser. - commands = list(default_commands) - commands.append(migrate.migrate_cmd) # Temporary. - parser = SubcommandsOptionParser(subcommands=commands) - parser.add_option('-l', '--library', dest='library', - help='library database file to use') - parser.add_option('-d', '--directory', dest='directory', - help="destination music directory") - parser.add_option('-v', '--verbose', dest='verbose', action='store_true', - help='print debugging information') - parser.add_option('-c', '--config', dest='config', - help='path to configuration file') + subcommands = list(default_commands) + subcommands.extend(plugins.commands()) - # Parse the command-line! - options, args = optparse.OptionParser.parse_args(parser, args) + if lib is None: + lib = _open_library(config) + plugins.send("library_opened", lib=lib) + library.Item._types.update(plugins.types(library.Item)) + library.Album._types.update(plugins.types(library.Album)) + return subcommands, plugins, lib + + +def _configure(options): + """Amend the global configuration object with command line options. + """ # Add any additional config files specified with --config. This # special handling lets specified plugins get loaded before we # finish parsing the command line. @@ -894,22 +1138,50 @@ def _configure(args): config.set_file(config_path) config.set_args(options) - # Now add the plugin commands to the parser. - _load_plugins() - for cmd in plugins.commands(): - parser.add_subcommand(cmd) + # Configure the logger. + if config['verbose'].get(int): + log.set_global_level(logging.DEBUG) + else: + log.set_global_level(logging.INFO) - # Parse the remainder of the command line with loaded plugins. - return parser._parse_sub(args) + # Ensure compatibility with old (top-level) color configuration. + # Deprecation msg to motivate user to switch to config['ui']['color]. + if config['color'].exists(): + log.warning(u'Warning: top-level configuration of `color` ' + u'is deprecated. Configure color use under `ui`. ' + u'See documentation for more info.') + config['ui']['color'].set(config['color'].get(bool)) + + # Compatibility from list_format_{item,album} to format_{item,album} + for elem in ('item', 'album'): + old_key = 'list_format_{0}'.format(elem) + if config[old_key].exists(): + new_key = 'format_{0}'.format(elem) + log.warning( + u'Warning: configuration uses "{0}" which is deprecated' + u' in favor of "{1}" now that it affects all commands. ' + u'See changelog & documentation.', + old_key, + new_key, + ) + config[new_key].set(config[old_key]) + + config_path = config.user_config_path() + if os.path.isfile(config_path): + log.debug(u'user configuration: {0}', + util.displayable_path(config_path)) + else: + log.debug(u'no user configuration found at {0}', + util.displayable_path(config_path)) + + log.debug(u'data directory: {0}', + util.displayable_path(config.config_dir())) + return config -def _raw_main(args): - """A helper function for `main` without top-level exception - handling. +def _open_library(config): + """Create a new library instance from the configuration. """ - subcommand, suboptions, subargs = _configure(args) - - # Open library file. dbpath = config['library'].as_filename() try: lib = library.Library( @@ -918,32 +1190,55 @@ def _raw_main(args): get_path_formats(), get_replacements(), ) - except sqlite3.OperationalError: + lib.get_item(0) # Test database connection. + except (sqlite3.OperationalError, sqlite3.DatabaseError): + log.debug(u'{}', traceback.format_exc()) raise UserError(u"database file {0} could not be opened".format( util.displayable_path(dbpath) )) - plugins.send("library_opened", lib=lib) + log.debug(u'library database: {0}\n' + u'library directory: {1}', + util.displayable_path(lib.path), + util.displayable_path(lib.directory)) + return lib - # Configure the logger. - if config['verbose'].get(bool): - log.setLevel(logging.DEBUG) - else: - log.setLevel(logging.INFO) - log.debug(u'data directory: {0}\n' - u'library database: {1}\n' - u'library directory: {2}' - .format( - util.displayable_path(config.config_dir()), - util.displayable_path(lib.path), - util.displayable_path(lib.directory), - ) - ) - # Configure the MusicBrainz API. - mb.configure() +def _raw_main(args, lib=None): + """A helper function for `main` without top-level exception + handling. + """ + parser = SubcommandsOptionParser() + parser.add_format_option(flags=('--format-item',), target=library.Item) + parser.add_format_option(flags=('--format-album',), target=library.Album) + parser.add_option('-l', '--library', dest='library', + help=u'library database file to use') + parser.add_option('-d', '--directory', dest='directory', + help=u"destination music directory") + parser.add_option('-v', '--verbose', dest='verbose', action='count', + help=u'log more details (use twice for even more)') + parser.add_option('-c', '--config', dest='config', + help=u'path to configuration file') + parser.add_option('-h', '--help', dest='help', action='store_true', + help=u'show this help message and exit') + parser.add_option('--version', dest='version', action='store_true', + help=optparse.SUPPRESS_HELP) - # Invoke the subcommand. + options, subargs = parser.parse_global_options(args) + + # Special case for the `config --edit` command: bypass _setup so + # that an invalid configuration does not prevent the editor from + # starting. + if subargs and subargs[0] == 'config' \ + and ('-e' in subargs or '--edit' in subargs): + from beets.ui.commands import config_edit + return config_edit() + + subcommands, plugins, lib = _setup(options, lib) + parser.add_subcommand(*subcommands) + + subcommand, suboptions, subargs = parser.parse_subcommand(subargs) subcommand.func(lib, suboptions, subargs) + plugins.send('cli_exit', lib=lib) @@ -955,7 +1250,7 @@ def main(args=None): _raw_main(args) except UserError as exc: message = exc.args[0] if exc.args else None - log.error(u'error: {0}'.format(message)) + log.error(u'error: {0}', message) sys.exit(1) except util.HumanReadableException as exc: exc.log(log) @@ -963,11 +1258,14 @@ def main(args=None): except library.FileOperationError as exc: # These errors have reasonable human-readable descriptions, but # we still want to log their tracebacks for debugging. - log.debug(traceback.format_exc()) - log.error(exc) + log.debug('{}', traceback.format_exc()) + log.error('{}', exc) sys.exit(1) except confit.ConfigError as exc: - log.error(u'configuration error: {0}'.format(exc)) + log.error(u'configuration error: {0}', exc) + sys.exit(1) + except db_query.InvalidQueryError as exc: + log.error(u'invalid query: {0}', exc) sys.exit(1) except IOError as exc: if exc.errno == errno.EPIPE: @@ -977,4 +1275,4 @@ def main(args=None): raise except KeyboardInterrupt: # Silently ignore ^C except in verbose mode. - log.debug(traceback.format_exc()) + log.debug(u'{}', traceback.format_exc()) diff --git a/libs/beets/ui/commands.py b/libs/beets/ui/commands.py index e7e631a4..867a4737 100644 --- a/libs/beets/ui/commands.py +++ b/libs/beets/ui/commands.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,30 +16,32 @@ """This module provides the default commands for beets' command-line interface. """ -from __future__ import print_function -import logging +from __future__ import division, absolute_import, print_function + import os -import time -import itertools -import codecs -import platform +import re +from collections import namedtuple, Counter +from itertools import chain import beets from beets import ui -from beets.ui import print_, input_, decargs +from beets.ui import print_, input_, decargs, show_path_changes from beets import autotag -from beets.autotag import recommendation +from beets.autotag import Recommendation from beets.autotag import hooks from beets import plugins from beets import importer from beets import util from beets.util import syspath, normpath, ancestry, displayable_path -from beets.util.functemplate import Template from beets import library from beets import config +from beets import logging from beets.util.confit import _package_path +VARIOUS_ARTISTS = u'Various Artists' +PromptChoice = namedtuple('ExtraChoice', ['short', 'long', 'callback']) + # Global logger. log = logging.getLogger('beets') @@ -47,10 +50,8 @@ log = logging.getLogger('beets') default_commands = [] - # Utilities. - def _do_query(lib, query, album, also_items=True): """For commands that operate on matched items, performs a query and returns a list of matching items and a list of matching @@ -70,49 +71,77 @@ def _do_query(lib, query, album, also_items=True): items = list(lib.items(query)) if album and not albums: - raise ui.UserError('No matching albums found.') + raise ui.UserError(u'No matching albums found.') elif not album and not items: - raise ui.UserError('No matching items found.') + raise ui.UserError(u'No matching items found.') return items, albums # fields: Shows a list of available fields for queries and format strings. -fields_cmd = ui.Subcommand('fields', - help='show fields available for queries and format strings') +def _print_keys(query): + """Given a SQLite query result, print the `key` field of each + returned row, with identation of 2 spaces. + """ + for row in query: + print_(' ' * 2 + row['key']) + def fields_func(lib, opts, args): def _print_rows(names): - print(" " + "\n ".join(names)) + names.sort() + print_(" " + "\n ".join(names)) - def _show_plugin_fields(album): - plugin_fields = [] - for plugin in plugins.find_plugins(): - if album: - fdict = plugin.album_template_fields - else: - fdict = plugin.template_fields - plugin_fields += fdict.keys() - if plugin_fields: - print("Template fields from plugins:") - _print_rows(plugin_fields) + print_(u"Item fields:") + _print_rows(library.Item.all_keys()) - print("Item fields:") - _print_rows(library.ITEM_KEYS) - _show_plugin_fields(False) + print_(u"Album fields:") + _print_rows(library.Album.all_keys()) - print("\nAlbum fields:") - _print_rows(library.ALBUM_KEYS) - _show_plugin_fields(True) + with lib.transaction() as tx: + # The SQL uses the DISTINCT to get unique values from the query + unique_fields = 'SELECT DISTINCT key FROM (%s)' + print_(u"Item flexible attributes:") + _print_keys(tx.query(unique_fields % library.Item._flex_table)) + + print_(u"Album flexible attributes:") + _print_keys(tx.query(unique_fields % library.Album._flex_table)) + +fields_cmd = ui.Subcommand( + 'fields', + help=u'show fields available for queries and format strings' +) fields_cmd.func = fields_func default_commands.append(fields_cmd) -# import: Autotagger and importer. +# help: Print help text for commands -VARIOUS_ARTISTS = u'Various Artists' +class HelpCommand(ui.Subcommand): + + def __init__(self): + super(HelpCommand, self).__init__( + 'help', aliases=('?',), + help=u'give detailed help on a specific sub-command', + ) + + def func(self, lib, opts, args): + if args: + cmdname = args[0] + helpcommand = self.root_parser._subcommand_for_name(cmdname) + if not helpcommand: + raise ui.UserError(u"unknown command '{0}'".format(cmdname)) + helpcommand.print_help() + else: + self.root_parser.print_help() + + +default_commands.append(HelpCommand()) + + +# import: Autotagger and importer. # Importer utilities and support. @@ -145,19 +174,21 @@ def disambig_string(info): if disambig: return u', '.join(disambig) + def dist_string(dist): """Formats a distance (a float) as a colorized similarity percentage string. """ - out = '%.1f%%' % ((1 - dist) * 100) + out = u'%.1f%%' % ((1 - dist) * 100) if dist <= config['match']['strong_rec_thresh'].as_number(): - out = ui.colorize('green', out) + out = ui.colorize('text_success', out) elif dist <= config['match']['medium_rec_thresh'].as_number(): - out = ui.colorize('yellow', out) + out = ui.colorize('text_warning', out) else: - out = ui.colorize('red', out) + out = ui.colorize('text_error', out) return out + def penalty_string(distance, limit=None): """Returns a colorized string that indicates all the penalties applied to a distance object. @@ -171,7 +202,8 @@ def penalty_string(distance, limit=None): if penalties: if limit and len(penalties) > limit: penalties = penalties[:limit] + ['...'] - return ui.colorize('yellow', '(%s)' % ', '.join(penalties)) + return ui.colorize('text_warning', u'(%s)' % ', '.join(penalties)) + def show_change(cur_artist, cur_album, match): """Print out a representation of the changes that will be made if an @@ -213,29 +245,29 @@ def show_change(cur_artist, cur_album, match): (cur_album != match.info.album and match.info.album != VARIOUS_ARTISTS): artist_l, artist_r = cur_artist or '', match.info.artist - album_l, album_r = cur_album or '', match.info.album + album_l, album_r = cur_album or '', match.info.album if artist_r == VARIOUS_ARTISTS: # Hide artists for VA releases. artist_l, artist_r = u'', u'' artist_l, artist_r = ui.colordiff(artist_l, artist_r) - album_l, album_r = ui.colordiff(album_l, album_r) + album_l, album_r = ui.colordiff(album_l, album_r) - print_("Correcting tags from:") + print_(u"Correcting tags from:") show_album(artist_l, album_l) - print_("To:") + print_(u"To:") show_album(artist_r, album_r) else: print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info)) # Data URL. if match.info.data_url: - print_('URL:\n %s' % match.info.data_url) + print_(u'URL:\n %s' % match.info.data_url) # Info line. info = [] # Similarity. - info.append('(Similarity: %s)' % dist_string(match.distance)) + info.append(u'(Similarity: %s)' % dist_string(match.distance)) # Penalties. penalties = penalty_string(match.distance) if penalties: @@ -243,12 +275,12 @@ def show_change(cur_artist, cur_album, match): # Disambiguation. disambig = disambig_string(match.info) if disambig: - info.append(ui.colorize('lightgray', '(%s)' % disambig)) + info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig)) print_(' '.join(info)) # Tracks. pairs = match.mapping.items() - pairs.sort(key=lambda (_, track_info): track_info.index) + pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index) # Build up LHS and RHS for track difference display. The `lines` list # contains ``(lhs, rhs, width)`` tuples where `width` is the length (in @@ -261,16 +293,16 @@ def show_change(cur_artist, cur_album, match): if medium != track_info.medium or disctitle != track_info.disctitle: media = match.info.media or 'Media' if match.info.mediums > 1 and track_info.disctitle: - lhs = '%s %s: %s' % (media, track_info.medium, - track_info.disctitle) + lhs = u'%s %s: %s' % (media, track_info.medium, + track_info.disctitle) elif match.info.mediums > 1: - lhs = '%s %s' % (media, track_info.medium) + lhs = u'%s %s' % (media, track_info.medium) elif track_info.disctitle: - lhs = '%s: %s' % (media, track_info.disctitle) + lhs = u'%s: %s' % (media, track_info.disctitle) else: lhs = None if lhs: - lines.append((lhs, '', 0)) + lines.append((lhs, u'', 0)) medium, disctitle = track_info.medium, track_info.disctitle # Titles. @@ -288,20 +320,12 @@ def show_change(cur_artist, cur_album, match): cur_track, new_track = format_index(item), format_index(track_info) if cur_track != new_track: if item.track in (track_info.index, track_info.medium_index): - color = 'lightgray' + color = 'text_highlight_minor' else: - color = 'red' - if (cur_track + new_track).count('-') == 1: - lhs_track, rhs_track = ui.colorize(color, cur_track), \ - ui.colorize(color, new_track) - else: - color = 'red' - lhs_track, rhs_track = ui.color_diff_suffix(cur_track, - new_track) - templ = ui.colorize(color, u' (#') + u'{0}' + \ - ui.colorize(color, u')') - lhs += templ.format(lhs_track) - rhs += templ.format(rhs_track) + color = 'text_highlight' + templ = ui.colorize(color, u' (#{0})') + lhs += templ.format(cur_track) + rhs += templ.format(new_track) lhs_width += len(cur_track) + 4 # Length change. @@ -310,12 +334,9 @@ def show_change(cur_artist, cur_album, match): config['ui']['length_diff_thresh'].as_number(): cur_length = ui.human_seconds_short(item.length) new_length = ui.human_seconds_short(track_info.length) - lhs_length, rhs_length = ui.color_diff_suffix(cur_length, - new_length) - templ = ui.colorize('red', u' (') + u'{0}' + \ - ui.colorize('red', u')') - lhs += templ.format(lhs_length) - rhs += templ.format(rhs_length) + templ = ui.colorize('text_highlight', u' ({0})') + lhs += templ.format(cur_length) + rhs += templ.format(new_length) lhs_width += len(cur_length) + 3 # Penalties. @@ -324,9 +345,9 @@ def show_change(cur_artist, cur_album, match): rhs += ' %s' % penalties if lhs != rhs: - lines.append((' * %s' % lhs, rhs, lhs_width)) + lines.append((u' * %s' % lhs, rhs, lhs_width)) elif config['import']['detail']: - lines.append((' * %s' % lhs, '', lhs_width)) + lines.append((u' * %s' % lhs, '', lhs_width)) # Print each track in two columns, or across two lines. col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2 @@ -343,19 +364,24 @@ def show_change(cur_artist, cur_album, match): # Missing and unmatched tracks. if match.extra_tracks: - print_('Missing tracks:') + print_(u'Missing tracks ({0}/{1} - {2:.1%}):'.format( + len(match.extra_tracks), + len(match.info.tracks), + len(match.extra_tracks) / len(match.info.tracks) + )) for track_info in match.extra_tracks: - line = ' ! %s (#%s)' % (track_info.title, format_index(track_info)) + line = u' ! %s (#%s)' % (track_info.title, format_index(track_info)) if track_info.length: - line += ' (%s)' % ui.human_seconds_short(track_info.length) - print_(ui.colorize('yellow', line)) + line += u' (%s)' % ui.human_seconds_short(track_info.length) + print_(ui.colorize('text_warning', line)) if match.extra_items: - print_('Unmatched tracks:') + print_(u'Unmatched tracks ({0}):'.format(len(match.extra_items))) for item in match.extra_items: - line = ' ! %s (#%s)' % (item.title, format_index(item)) + line = u' ! %s (#%s)' % (item.title, format_index(item)) if item.length: - line += ' (%s)' % ui.human_seconds_short(item.length) - print_(ui.colorize('yellow', line)) + line += u' (%s)' % ui.human_seconds_short(item.length) + print_(ui.colorize('text_warning', line)) + def show_item_change(item, match): """Print out the change that would occur by tagging `item` with the @@ -368,22 +394,22 @@ def show_item_change(item, match): cur_artist, new_artist = ui.colordiff(cur_artist, new_artist) cur_title, new_title = ui.colordiff(cur_title, new_title) - print_("Correcting track tags from:") - print_(" %s - %s" % (cur_artist, cur_title)) - print_("To:") - print_(" %s - %s" % (new_artist, new_title)) + print_(u"Correcting track tags from:") + print_(u" %s - %s" % (cur_artist, cur_title)) + print_(u"To:") + print_(u" %s - %s" % (new_artist, new_title)) else: - print_("Tagging track: %s - %s" % (cur_artist, cur_title)) + print_(u"Tagging track: %s - %s" % (cur_artist, cur_title)) # Data URL. if match.info.data_url: - print_('URL:\n %s' % match.info.data_url) + print_(u'URL:\n %s' % match.info.data_url) # Info line. info = [] # Similarity. - info.append('(Similarity: %s)' % dist_string(match.distance)) + info.append(u'(Similarity: %s)' % dist_string(match.distance)) # Penalties. penalties = penalty_string(match.distance) if penalties: @@ -391,10 +417,48 @@ def show_item_change(item, match): # Disambiguation. disambig = disambig_string(match.info) if disambig: - info.append(ui.colorize('lightgray', '(%s)' % disambig)) + info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig)) print_(' '.join(info)) -def _summary_judment(rec): + +def summarize_items(items, singleton): + """Produces a brief summary line describing a set of items. Used for + manually resolving duplicates during import. + + `items` is a list of `Item` objects. `singleton` indicates whether + this is an album or single-item import (if the latter, them `items` + should only have one element). + """ + summary_parts = [] + if not singleton: + summary_parts.append(u"{0} items".format(len(items))) + + format_counts = {} + for item in items: + format_counts[item.format] = format_counts.get(item.format, 0) + 1 + if len(format_counts) == 1: + # A single format. + summary_parts.append(items[0].format) + else: + # Enumerate all the formats by decreasing frequencies: + for fmt, count in sorted( + format_counts.items(), + key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]) + ): + summary_parts.append('{0} {1}'.format(fmt, count)) + + if items: + average_bitrate = sum([item.bitrate for item in items]) / len(items) + total_duration = sum([item.length for item in items]) + total_filesize = sum([item.filesize for item in items]) + summary_parts.append(u'{0}kbps'.format(int(average_bitrate / 1000))) + summary_parts.append(ui.human_seconds_short(total_duration)) + summary_parts.append(ui.human_bytes(total_filesize)) + + return u', '.join(summary_parts) + + +def _summary_judgment(rec): """Determines whether a decision should be made without even asking the user. This occurs in quiet mode and when an action is chosen for NONE recommendations. Return an action or None if the user should be @@ -402,7 +466,7 @@ def _summary_judment(rec): made. """ if config['import']['quiet']: - if rec == recommendation.strong: + if rec == Recommendation.strong: return importer.action.APPLY else: action = config['import']['quiet_fallback'].as_choice({ @@ -410,7 +474,7 @@ def _summary_judment(rec): 'asis': importer.action.ASIS, }) - elif rec == recommendation.none: + elif rec == Recommendation.none: action = config['import']['none_rec_action'].as_choice({ 'skip': importer.action.SKIP, 'asis': importer.action.ASIS, @@ -421,13 +485,15 @@ def _summary_judment(rec): return None if action == importer.action.SKIP: - print_('Skipping.') + print_(u'Skipping.') elif action == importer.action.ASIS: - print_('Importing as-is.') + print_(u'Importing as-is.') return action + def choose_candidate(candidates, singleton, rec, cur_artist=None, - cur_album=None, item=None, itemcount=None): + cur_album=None, item=None, itemcount=None, + extra_choices=[]): """Given a sorted list of candidates, ask the user for a selection of which candidate to use. Applies to both full albums and singletons (tracks). Candidates are either AlbumMatch or TrackMatch @@ -435,8 +501,16 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None, `cur_album`, and `itemcount` must be provided. For singletons, `item` must be provided. - Returns the result of the choice, which may SKIP, ASIS, TRACKS, or - MANUAL or a candidate (an AlbumMatch/TrackMatch object). + `extra_choices` is a list of `PromptChoice`s, containg the choices + appended by the plugins after receiving the `before_choose_candidate` + event. If not empty, the choices are appended to the prompt presented + to the user. + + Returns one of the following: + * the result of the choice, which may be SKIP, ASIS, TRACKS, or MANUAL + * a candidate (an AlbumMatch/TrackMatch object) + * the short letter of a `PromptChoice` (if the user selected one of + the `extra_choices`). """ # Sanity check. if singleton: @@ -445,47 +519,53 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None, assert cur_artist is not None assert cur_album is not None + # Build helper variables for extra choices. + extra_opts = tuple(c.long for c in extra_choices) + extra_actions = tuple(c.short for c in extra_choices) + # Zero candidates. if not candidates: if singleton: - print_("No matching recordings found.") - opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id', - 'aBort') + print_(u"No matching recordings found.") + opts = (u'Use as-is', u'Skip', u'Enter search', u'enter Id', + u'aBort') else: - print_("No matching release found for {0} tracks." + print_(u"No matching release found for {0} tracks." .format(itemcount)) - print_('For help, see: ' - 'http://beets.readthedocs.org/en/latest/faq.html#nomatch') - opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip', - 'Enter search', 'enter Id', 'aBort') - sel = ui.input_options(opts) - if sel == 'u': + print_(u'For help, see: ' + u'http://beets.readthedocs.org/en/latest/faq.html#nomatch') + opts = (u'Use as-is', u'as Tracks', u'Group albums', u'Skip', + u'Enter search', u'enter Id', u'aBort') + sel = ui.input_options(opts + extra_opts) + if sel == u'u': return importer.action.ASIS - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 's': + elif sel == u's': return importer.action.SKIP - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS + elif sel in extra_actions: + return sel else: assert False # Is the change good enough? bypass_candidates = False - if rec != recommendation.none: + if rec != Recommendation.none: match = candidates[0] bypass_candidates = True while True: # Display and choose from candidates. - require = rec <= recommendation.low + require = rec <= Recommendation.low if not bypass_candidates: # Display list of candidates. @@ -515,35 +595,39 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None, # Disambiguation disambig = disambig_string(match.info) if disambig: - line.append(ui.colorize('lightgray', '(%s)' % disambig)) + line.append(ui.colorize('text_highlight_minor', + u'(%s)' % disambig)) - print_(' '.join(line)) + print_(u' '.join(line)) # Ask the user for a choice. if singleton: - opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id', - 'aBort') + opts = (u'Skip', u'Use as-is', u'Enter search', u'enter Id', + u'aBort') else: - opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums', - 'Enter search', 'enter Id', 'aBort') - sel = ui.input_options(opts, numrange=(1, len(candidates))) - if sel == 's': + opts = (u'Skip', u'Use as-is', u'as Tracks', u'Group albums', + u'Enter search', u'enter Id', u'aBort') + sel = ui.input_options(opts + extra_opts, + numrange=(1, len(candidates))) + if sel == u's': return importer.action.SKIP - elif sel == 'u': + elif sel == u'u': return importer.action.ASIS - elif sel == 'm': + elif sel == u'm': pass - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS + elif sel in extra_actions: + return sel else: # Numerical selection. match = candidates[sel - 1] if sel != 1: @@ -559,58 +643,64 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None, show_change(cur_artist, cur_album, match) # Exact match => tag automatically if we're not in timid mode. - if rec == recommendation.strong and not config['import']['timid']: + if rec == Recommendation.strong and not config['import']['timid']: return match # Ask for confirmation. if singleton: - opts = ('Apply', 'More candidates', 'Skip', 'Use as-is', - 'Enter search', 'enter Id', 'aBort') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'Enter search', u'enter Id', u'aBort') else: - opts = ('Apply', 'More candidates', 'Skip', 'Use as-is', - 'as Tracks', 'Group albums', 'Enter search', 'enter Id', - 'aBort') + opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is', + u'as Tracks', u'Group albums', u'Enter search', + u'enter Id', u'aBort') default = config['import']['default_action'].as_choice({ - 'apply': 'a', - 'skip': 's', - 'asis': 'u', - 'none': None, + u'apply': u'a', + u'skip': u's', + u'asis': u'u', + u'none': None, }) if default is None: require = True - sel = ui.input_options(opts, require=require, default=default) - if sel == 'a': + sel = ui.input_options(opts + extra_opts, require=require, + default=default) + if sel == u'a': return match - elif sel == 'g': + elif sel == u'g': return importer.action.ALBUMS - elif sel == 's': + elif sel == u's': return importer.action.SKIP - elif sel == 'u': + elif sel == u'u': return importer.action.ASIS - elif sel == 't': + elif sel == u't': assert not singleton return importer.action.TRACKS - elif sel == 'e': + elif sel == u'e': return importer.action.MANUAL - elif sel == 'b': + elif sel == u'b': raise importer.ImportAbort() - elif sel == 'i': + elif sel == u'i': return importer.action.MANUAL_ID + elif sel in extra_actions: + return sel + def manual_search(singleton): """Input either an artist and album (for full albums) or artist and track name (for singletons) for manual search. """ - artist = input_('Artist:') - name = input_('Track:' if singleton else 'Album:') + artist = input_(u'Artist:') + name = input_(u'Track:' if singleton else u'Album:') return artist.strip(), name.strip() + def manual_id(singleton): """Input an ID, either for an album ("release") or a track ("recording"). """ - prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release') + prompt = u'Enter {0} ID:'.format(u'recording' if singleton else u'release') return input_(prompt).strip() + class TerminalImportSession(importer.ImportSession): """An import session that runs in a terminal. """ @@ -625,7 +715,7 @@ class TerminalImportSession(importer.ImportSession): u' ({0} items)'.format(len(task.items))) # Take immediate action if appropriate. - action = _summary_judment(task.rec) + action = _summary_judgment(task.rec) if action == importer.action.APPLY: match = task.candidates[0] show_change(task.cur_artist, task.cur_album, match) @@ -636,13 +726,19 @@ class TerminalImportSession(importer.ImportSession): # Loop until we have a choice. candidates, rec = task.candidates, task.rec while True: + # Gather extra choices from plugins. + extra_choices = self._get_plugin_choices(task) + extra_ops = {c.short: c.callback for c in extra_choices} + # Ask for a choice from the user. - choice = choose_candidate(candidates, False, rec, task.cur_artist, - task.cur_album, itemcount=len(task.items)) + choice = choose_candidate( + candidates, False, rec, task.cur_artist, task.cur_album, + itemcount=len(task.items), extra_choices=extra_choices + ) # Choose which tags to use. if choice in (importer.action.SKIP, importer.action.ASIS, - importer.action.TRACKS, importer.action.ALBUMS): + importer.action.TRACKS, importer.action.ALBUMS): # Pass selection to main control flow. return choice elif choice is importer.action.MANUAL: @@ -656,8 +752,14 @@ class TerminalImportSession(importer.ImportSession): search_id = manual_id(False) if search_id: _, _, candidates, rec = autotag.tag_album( - task.items, search_id=search_id + task.items, search_ids=search_id.split() ) + elif choice in extra_ops.keys(): + # Allow extra ops to automatically set the post-choice. + post_choice = extra_ops[choice](self, task) + if isinstance(post_choice, importer.action): + # MANUAL and MANUAL_ID have no effect, even if returned. + return post_choice else: # We have a candidate! Finish tagging. Here, choice is an # AlbumMatch object. @@ -673,7 +775,7 @@ class TerminalImportSession(importer.ImportSession): candidates, rec = task.candidates, task.rec # Take immediate action if appropriate. - action = _summary_judment(task.rec) + action = _summary_judgment(task.rec) if action == importer.action.APPLY: match = candidates[0] show_item_change(task.item, match) @@ -682,148 +784,169 @@ class TerminalImportSession(importer.ImportSession): return action while True: + extra_choices = self._get_plugin_choices(task) + extra_ops = {c.short: c.callback for c in extra_choices} + # Ask for a choice. - choice = choose_candidate(candidates, True, rec, item=task.item) + choice = choose_candidate(candidates, True, rec, item=task.item, + extra_choices=extra_choices) if choice in (importer.action.SKIP, importer.action.ASIS): return choice elif choice == importer.action.TRACKS: - assert False # TRACKS is only legal for albums. + assert False # TRACKS is only legal for albums. elif choice == importer.action.MANUAL: # Continue in the loop with a new set of candidates. search_artist, search_title = manual_search(True) candidates, rec = autotag.tag_item(task.item, search_artist, - search_title) + search_title) elif choice == importer.action.MANUAL_ID: # Ask for a track ID. search_id = manual_id(True) if search_id: - candidates, rec = autotag.tag_item(task.item, - search_id=search_id) + candidates, rec = autotag.tag_item( + task.item, search_ids=search_id.split()) + elif choice in extra_ops.keys(): + # Allow extra ops to automatically set the post-choice. + post_choice = extra_ops[choice](self, task) + if isinstance(post_choice, importer.action): + # MANUAL and MANUAL_ID have no effect, even if returned. + return post_choice else: # Chose a candidate. assert isinstance(choice, autotag.TrackMatch) return choice - def resolve_duplicate(self, task): + def resolve_duplicate(self, task, found_duplicates): """Decide what to do when a new album or item seems similar to one that's already in the library. """ - log.warn("This %s is already in the library!" % - ("album" if task.is_album else "item")) + log.warn(u"This {0} is already in the library!", + (u"album" if task.is_album else u"item")) if config['import']['quiet']: # In quiet mode, don't prompt -- just skip. - log.info('Skipping.') - sel = 's' + log.info(u'Skipping.') + sel = u's' else: + # Print some detail about the existing and new items so the + # user can make an informed decision. + for duplicate in found_duplicates: + print_(u"Old: " + summarize_items( + list(duplicate.items()) if task.is_album else [duplicate], + not task.is_album, + )) + + print_(u"New: " + summarize_items( + task.imported_items(), + not task.is_album, + )) + sel = ui.input_options( - ('Skip new', 'Keep both', 'Remove old') + (u'Skip new', u'Keep both', u'Remove old') ) - if sel == 's': + if sel == u's': # Skip new. task.set_choice(importer.action.SKIP) - elif sel == 'k': + elif sel == u'k': # Keep both. Do nothing; leave the choice intact. pass - elif sel == 'r': + elif sel == u'r': # Remove old. - task.remove_duplicates = True + task.should_remove_duplicates = True else: assert False def should_resume(self, path): return ui.input_yn(u"Import of the directory:\n{0}\n" - "was interrupted. Resume (Y/n)?" + u"was interrupted. Resume (Y/n)?" .format(displayable_path(path))) + def _get_plugin_choices(self, task): + """Get the extra choices appended to the plugins to the ui prompt. + + The `before_choose_candidate` event is sent to the plugins, with + session and task as its parameters. Plugins are responsible for + checking the right conditions and returning a list of `PromptChoice`s, + which is flattened and checked for conflicts. + + If two or more choices have the same short letter, a warning is + emitted and all but one choices are discarded, giving preference + to the default importer choices. + + Returns a list of `PromptChoice`s. + """ + # Send the before_choose_candidate event and flatten list. + extra_choices = list(chain(*plugins.send('before_choose_candidate', + session=self, task=task))) + # Take into account default options, for duplicate checking. + all_choices = [PromptChoice(u'a', u'Apply', None), + PromptChoice(u's', u'Skip', None), + PromptChoice(u'u', u'Use as-is', None), + PromptChoice(u't', u'as Tracks', None), + PromptChoice(u'g', u'Group albums', None), + PromptChoice(u'e', u'Enter search', None), + PromptChoice(u'i', u'enter Id', None), + PromptChoice(u'b', u'aBort', None)] +\ + extra_choices + + short_letters = [c.short for c in all_choices] + if len(short_letters) != len(set(short_letters)): + # Duplicate short letter has been found. + duplicates = [i for i, count in Counter(short_letters).items() + if count > 1] + for short in duplicates: + # Keep the first of the choices, removing the rest. + dup_choices = [c for c in all_choices if c.short == short] + for c in dup_choices[1:]: + log.warn(u"Prompt choice '{0}' removed due to conflict " + u"with '{1}' (short letter: '{2}')", + c.long, dup_choices[0].long, c.short) + extra_choices.remove(c) + return extra_choices + + # The import command. + def import_files(lib, paths, query): """Import the files in the given list of paths or matching the query. """ # Check the user-specified directories. for path in paths: - fullpath = syspath(normpath(path)) - if not config['import']['singletons'] and not os.path.isdir(fullpath): - raise ui.UserError(u'not a directory: {0}'.format( - displayable_path(path))) - elif config['import']['singletons'] and not os.path.exists(fullpath): - raise ui.UserError(u'no such file: {0}'.format( + if not os.path.exists(syspath(normpath(path))): + raise ui.UserError(u'no such file or directory: {0}'.format( displayable_path(path))) # Check parameter consistency. if config['import']['quiet'] and config['import']['timid']: - raise ui.UserError("can't be both quiet and timid") + raise ui.UserError(u"can't be both quiet and timid") # Open the log. if config['import']['log'].get() is not None: - logpath = config['import']['log'].as_filename() + logpath = syspath(config['import']['log'].as_filename()) try: - logfile = codecs.open(syspath(logpath), 'a', 'utf8') + loghandler = logging.FileHandler(logpath) except IOError: - raise ui.UserError(u"could not open log file for writing: %s" % - displayable_path(logpath)) - print(u'import started', time.asctime(), file=logfile) + raise ui.UserError(u"could not open log file for writing: " + u"{0}".format(displayable_path(logpath))) else: - logfile = None + loghandler = None # Never ask for input in quiet mode. if config['import']['resume'].get() == 'ask' and \ config['import']['quiet']: config['import']['resume'] = False - session = TerminalImportSession(lib, logfile, paths, query) - try: - session.run() - finally: - # If we were logging, close the file. - if logfile: - print(u'', file=logfile) - logfile.close() + session = TerminalImportSession(lib, loghandler, paths, query) + session.run() # Emit event. plugins.send('import', lib=lib, paths=paths) -import_cmd = ui.Subcommand('import', help='import new music', - aliases=('imp', 'im')) -import_cmd.parser.add_option('-c', '--copy', action='store_true', - default=None, help="copy tracks into library directory (default)") -import_cmd.parser.add_option('-C', '--nocopy', action='store_false', - dest='copy', help="don't copy tracks (opposite of -c)") -import_cmd.parser.add_option('-w', '--write', action='store_true', - default=None, help="write new metadata to files' tags (default)") -import_cmd.parser.add_option('-W', '--nowrite', action='store_false', - dest='write', help="don't write metadata (opposite of -w)") -import_cmd.parser.add_option('-a', '--autotag', action='store_true', - dest='autotag', help="infer tags for imported files (default)") -import_cmd.parser.add_option('-A', '--noautotag', action='store_false', - dest='autotag', - help="don't infer tags for imported files (opposite of -a)") -import_cmd.parser.add_option('-p', '--resume', action='store_true', - default=None, help="resume importing if interrupted") -import_cmd.parser.add_option('-P', '--noresume', action='store_false', - dest='resume', help="do not try to resume importing") -import_cmd.parser.add_option('-q', '--quiet', action='store_true', - dest='quiet', help="never prompt for input: skip albums instead") -import_cmd.parser.add_option('-l', '--log', dest='log', - help='file to log untaggable albums for later review') -import_cmd.parser.add_option('-s', '--singletons', action='store_true', - help='import individual tracks instead of full albums') -import_cmd.parser.add_option('-t', '--timid', dest='timid', - action='store_true', help='always confirm all actions') -import_cmd.parser.add_option('-L', '--library', dest='library', - action='store_true', help='retag items matching a query') -import_cmd.parser.add_option('-i', '--incremental', dest='incremental', - action='store_true', help='skip already-imported directories') -import_cmd.parser.add_option('-I', '--noincremental', dest='incremental', - action='store_false', help='do not skip already-imported directories') -import_cmd.parser.add_option('--flat', dest='flat', - action='store_true', help='import an entire tree as a single album') -import_cmd.parser.add_option('-g', '--group-albums', dest='group_albums', - action='store_true', help='group tracks in a folder into seperate albums') + def import_func(lib, opts, args): config['import'].set_args(opts) @@ -839,40 +962,117 @@ def import_func(lib, opts, args): query = None paths = args if not paths: - raise ui.UserError('no path specified') + raise ui.UserError(u'no path specified') import_files(lib, paths, query) + + +import_cmd = ui.Subcommand( + u'import', help=u'import new music', aliases=(u'imp', u'im') +) +import_cmd.parser.add_option( + u'-c', u'--copy', action='store_true', default=None, + help=u"copy tracks into library directory (default)" +) +import_cmd.parser.add_option( + u'-C', u'--nocopy', action='store_false', dest='copy', + help=u"don't copy tracks (opposite of -c)" +) +import_cmd.parser.add_option( + u'-w', u'--write', action='store_true', default=None, + help=u"write new metadata to files' tags (default)" +) +import_cmd.parser.add_option( + u'-W', u'--nowrite', action='store_false', dest='write', + help=u"don't write metadata (opposite of -w)" +) +import_cmd.parser.add_option( + u'-a', u'--autotag', action='store_true', dest='autotag', + help=u"infer tags for imported files (default)" +) +import_cmd.parser.add_option( + u'-A', u'--noautotag', action='store_false', dest='autotag', + help=u"don't infer tags for imported files (opposite of -a)" +) +import_cmd.parser.add_option( + u'-p', u'--resume', action='store_true', default=None, + help=u"resume importing if interrupted" +) +import_cmd.parser.add_option( + u'-P', u'--noresume', action='store_false', dest='resume', + help=u"do not try to resume importing" +) +import_cmd.parser.add_option( + u'-q', u'--quiet', action='store_true', dest='quiet', + help=u"never prompt for input: skip albums instead" +) +import_cmd.parser.add_option( + u'-l', u'--log', dest='log', + help=u'file to log untaggable albums for later review' +) +import_cmd.parser.add_option( + u'-s', u'--singletons', action='store_true', + help=u'import individual tracks instead of full albums' +) +import_cmd.parser.add_option( + u'-t', u'--timid', dest='timid', action='store_true', + help=u'always confirm all actions' +) +import_cmd.parser.add_option( + u'-L', u'--library', dest='library', action='store_true', + help=u'retag items matching a query' +) +import_cmd.parser.add_option( + u'-i', u'--incremental', dest='incremental', action='store_true', + help=u'skip already-imported directories' +) +import_cmd.parser.add_option( + u'-I', u'--noincremental', dest='incremental', action='store_false', + help=u'do not skip already-imported directories' +) +import_cmd.parser.add_option( + u'--flat', dest='flat', action='store_true', + help=u'import an entire tree as a single album' +) +import_cmd.parser.add_option( + u'-g', u'--group-albums', dest='group_albums', action='store_true', + help=u'group tracks in a folder into separate albums' +) +import_cmd.parser.add_option( + u'--pretend', dest='pretend', action='store_true', + help=u'just print the files to import' +) +import_cmd.parser.add_option( + u'-S', u'--search-id', dest='search_ids', action='append', + metavar='BACKEND_ID', + help=u'restrict matching to a specific metadata backend ID' +) import_cmd.func = import_func default_commands.append(import_cmd) # list: Query and show library contents. -def list_items(lib, query, album, fmt): +def list_items(lib, query, album, fmt=''): """Print out items in lib matching query. If album, then search for albums instead of single items. """ - tmpl = Template(ui._pick_format(album, fmt)) if album: for album in lib.albums(query): - ui.print_obj(album, lib, tmpl) + ui.print_(format(album, fmt)) else: for item in lib.items(query): - ui.print_obj(item, lib, tmpl) + ui.print_(format(item, fmt)) + -list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',)) -list_cmd.parser.add_option('-a', '--album', action='store_true', - help='show matching albums instead of tracks') -list_cmd.parser.add_option('-p', '--path', action='store_true', - help='print paths for matched items or albums') -list_cmd.parser.add_option('-f', '--format', action='store', - help='print with custom format', default=None) def list_func(lib, opts, args): - if opts.path: - fmt = '$path' - else: - fmt = opts.format - list_items(lib, decargs(args), opts.album, fmt) + list_items(lib, decargs(args), opts.album) + + +list_cmd = ui.Subcommand(u'list', help=u'query the library', aliases=(u'ls',)) +list_cmd.parser.usage += u"\n" \ + u'Example: %prog -f \'$album: $title\' artist:beatles' +list_cmd.parser.add_all_common_options() list_cmd.func = list_func default_commands.append(list_cmd) @@ -891,8 +1091,8 @@ def update_items(lib, query, album, move, pretend): for item in items: # Item deleted? if not os.path.exists(syspath(item.path)): - ui.print_obj(item, lib) - ui.print_(ui.colorize('red', u' deleted')) + ui.print_(format(item)) + ui.print_(ui.colorize('text_error', u' deleted')) if not pretend: item.remove(True) affected_albums.add(item.album_id) @@ -900,16 +1100,16 @@ def update_items(lib, query, album, move, pretend): # Did the item change since last checked? if item.current_mtime() <= item.mtime: - log.debug(u'skipping %s because mtime is up to date (%i)' % - (displayable_path(item.path), item.mtime)) + log.debug(u'skipping {0} because mtime is up to date ({1})', + displayable_path(item.path), item.mtime) continue # Read new data. try: item.read() - except Exception as exc: - log.error(u'error reading {0}: {1}'.format( - displayable_path(item.path), exc)) + except library.ReadError as exc: + log.error(u'error reading {0}: {1}', + displayable_path(item.path), exc) continue # Special-case album artist when it matches track artist. (Hacky @@ -919,11 +1119,11 @@ def update_items(lib, query, album, move, pretend): old_item = lib.get_item(item.id) if old_item.albumartist == old_item.artist == item.artist: item.albumartist = old_item.albumartist - item._dirty.discard('albumartist') + item._dirty.discard(u'albumartist') # Check for and display changes. changed = ui.show_model_changes(item, - fields=library.ITEM_KEYS_META) + fields=library.Item._media_fields) # Save changes. if not pretend: @@ -951,32 +1151,43 @@ def update_items(lib, query, album, move, pretend): continue album = lib.get_album(album_id) if not album: # Empty albums have already been removed. - log.debug('emptied album %i' % album_id) + log.debug(u'emptied album {0}', album_id) continue first_item = album.items().get() # Update album structure to reflect an item in it. - for key in library.ALBUM_KEYS_ITEM: + for key in library.Album.item_keys: album[key] = first_item[key] album.store() # Move album art (and any inconsistent items). if move and lib.directory in ancestry(first_item.path): - log.debug('moving album %i' % album_id) + log.debug(u'moving album {0}', album_id) album.move() -update_cmd = ui.Subcommand('update', - help='update the library', aliases=('upd','up',)) -update_cmd.parser.add_option('-a', '--album', action='store_true', - help='match albums instead of tracks') -update_cmd.parser.add_option('-M', '--nomove', action='store_false', - default=True, dest='move', help="don't move files in library") -update_cmd.parser.add_option('-p', '--pretend', action='store_true', - help="show all changes but do nothing") -update_cmd.parser.add_option('-f', '--format', action='store', - help='print with custom format', default=None) + def update_func(lib, opts, args): - update_items(lib, decargs(args), opts.album, opts.move, opts.pretend) + update_items(lib, decargs(args), opts.album, ui.should_move(opts.move), + opts.pretend) + + +update_cmd = ui.Subcommand( + u'update', help=u'update the library', aliases=(u'upd', u'up',) +) +update_cmd.parser.add_album_option() +update_cmd.parser.add_format_option() +update_cmd.parser.add_option( + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory" +) +update_cmd.parser.add_option( + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library" +) +update_cmd.parser.add_option( + u'-p', u'--pretend', action='store_true', + help=u"show all changes but do nothing" +) update_cmd.func = update_func default_commands.append(update_cmd) @@ -990,17 +1201,22 @@ def remove_items(lib, query, album, delete): # Get the matching items. items, albums = _do_query(lib, query, album) - # Show all the items. - for item in items: - ui.print_obj(item, lib) - - # Confirm with user. + # Prepare confirmation with user. print_() if delete: - prompt = 'Really DELETE %i files (y/n)?' % len(items) + fmt = u'$path - $title' + prompt = u'Really DELETE %i file%s (y/n)?' % \ + (len(items), 's' if len(items) > 1 else '') else: - prompt = 'Really remove %i items from the library (y/n)?' % \ - len(items) + fmt = '' + prompt = u'Really remove %i item%s from the library (y/n)?' % \ + (len(items), 's' if len(items) > 1 else '') + + # Show all the items. + for item in items: + ui.print_(format(item, fmt)) + + # Confirm with user. if not ui.input_yn(prompt, True): return @@ -1009,14 +1225,19 @@ def remove_items(lib, query, album, delete): for obj in (albums if album else items): obj.remove(delete) -remove_cmd = ui.Subcommand('remove', - help='remove matching items from the library', aliases=('rm',)) -remove_cmd.parser.add_option("-d", "--delete", action="store_true", - help="also remove files from disk") -remove_cmd.parser.add_option('-a', '--album', action='store_true', - help='match albums instead of tracks') + def remove_func(lib, opts, args): remove_items(lib, decargs(args), opts.album, opts.delete) + + +remove_cmd = ui.Subcommand( + u'remove', help=u'remove matching items from the library', aliases=(u'rm',) +) +remove_cmd.parser.add_option( + u"-d", u"--delete", action="store_true", + help=u"also remove files from disk" +) +remove_cmd.parser.add_album_option() remove_cmd.func = remove_func default_commands.append(remove_cmd) @@ -1032,34 +1253,55 @@ def show_stats(lib, query, exact): total_items = 0 artists = set() albums = set() + album_artists = set() for item in items: if exact: - total_size += os.path.getsize(item.path) + try: + total_size += os.path.getsize(syspath(item.path)) + except OSError as exc: + log.info(u'could not get size of {}: {}', item.path, exc) else: total_size += int(item.length * item.bitrate / 8) total_time += item.length total_items += 1 artists.add(item.artist) - albums.add(item.album) + album_artists.add(item.albumartist) + if item.album_id: + albums.add(item.album_id) - size_str = '' + ui.human_bytes(total_size) + size_str = u'' + ui.human_bytes(total_size) if exact: - size_str += ' ({0} bytes)'.format(total_size) + size_str += u' ({0} bytes)'.format(total_size) + + print_(u"""Tracks: {0} +Total time: {1}{2} +{3}: {4} +Artists: {5} +Albums: {6} +Album artists: {7}""".format( + total_items, + ui.human_seconds(total_time), + u' ({0:.2f} seconds)'.format(total_time) if exact else '', + u'Total size' if exact else u'Approximate total size', + size_str, + len(artists), + len(albums), + len(album_artists)), + ) - print_("""Tracks: {0} -Total time: {1} ({2:.2f} seconds) -Total size: {3} -Artists: {4} -Albums: {5}""".format(total_items, ui.human_seconds(total_time), total_time, - size_str, len(artists), len(albums))) -stats_cmd = ui.Subcommand('stats', - help='show statistics about the library or a query') -stats_cmd.parser.add_option('-e', '--exact', action='store_true', - help='get exact file sizes') def stats_func(lib, opts, args): show_stats(lib, decargs(args), opts.exact) + + +stats_cmd = ui.Subcommand( + u'stats', help=u'show statistics about the library or a query' +) +stats_cmd.parser.add_option( + u'-e', u'--exact', action='store_true', + help=u'exact size and time' +) stats_cmd.func = stats_func default_commands.append(stats_cmd) @@ -1067,15 +1309,18 @@ default_commands.append(stats_cmd) # version: Show current beets version. def show_version(lib, opts, args): - print_('beets version %s' % beets.__version__) + print_(u'beets version %s' % beets.__version__) # Show plugins. - names = [p.name for p in plugins.find_plugins()] + names = sorted(p.name for p in plugins.find_plugins()) if names: - print_('plugins:', ', '.join(names)) + print_(u'plugins:', ', '.join(names)) else: - print_('no plugins loaded') -version_cmd = ui.Subcommand('version', - help='output version information') + print_(u'no plugins loaded') + + +version_cmd = ui.Subcommand( + u'version', help=u'output version information' +) version_cmd.func = show_version default_commands.append(version_cmd) @@ -1083,13 +1328,17 @@ default_commands.append(version_cmd) # modify: Declaratively change metadata. def modify_items(lib, mods, dels, query, write, move, album, confirm): - """Modifies matching items according to key=value assignments.""" + """Modifies matching items according to user-specified assignments and + deletions. + + `mods` is a dictionary of field and value pairse indicating + assignments. `dels` is a list of fields to be deleted. + """ # Parse key=value specifications into a dictionary. model_cls = library.Album if album else library.Item - fsets = {} - for mod in mods: - key, value = mod.split('=', 1) - fsets[key] = model_cls._parse(key, value) + + for key, value in mods.items(): + mods[key] = model_cls._parse(key, value) # Get the items to modify. items, albums = _do_query(lib, query, album, False) @@ -1097,89 +1346,115 @@ def modify_items(lib, mods, dels, query, write, move, album, confirm): # Apply changes *temporarily*, preview them, and collect modified # objects. - print_('Modifying %i %ss.' % (len(objs), 'album' if album else 'item')) + print_(u'Modifying {0} {1}s.' + .format(len(objs), u'album' if album else u'item')) changed = set() for obj in objs: - for field, value in fsets.iteritems(): - obj[field] = value - for field in dels: - del obj[field] - if ui.show_model_changes(obj): + if print_and_modify(obj, mods, dels): changed.add(obj) # Still something to do? if not changed: - print_('No changes to make.') + print_(u'No changes to make.') return # Confirm action. if confirm: - extra = ' and write tags' if write else '' - if not ui.input_yn('Really modify%s (Y/n)?' % extra): - return + if write and move: + extra = u', move and write tags' + elif write: + extra = u' and write tags' + elif move: + extra = u' and move' + else: + extra = u'' - # Apply changes to database. + changed = ui.input_select_objects( + u'Really modify%s' % extra, changed, + lambda o: print_and_modify(o, mods, dels) + ) + + # Apply changes to database and files with lib.transaction(): for obj in changed: - if move: - cur_path = obj.path - if lib.directory in ancestry(cur_path): # In library? - log.debug('moving object %s' % cur_path) - obj.move() + obj.try_sync(write, move) - obj.store() - # Apply tags if requested. - if write: - if album: - changed_items = itertools.chain(*(a.items() for a in changed)) - else: - changed_items = changed - for item in changed_items: - try: - item.write() - except library.FileOperationError as exc: - log.error(exc) +def print_and_modify(obj, mods, dels): + """Print the modifications to an item and return a bool indicating + whether any changes were made. -modify_cmd = ui.Subcommand('modify', - help='change metadata fields', aliases=('mod',)) -modify_cmd.parser.add_option('-M', '--nomove', action='store_false', - default=True, dest='move', help="don't move files in library") -modify_cmd.parser.add_option('-w', '--write', action='store_true', - default=None, help="write new metadata to files' tags (default)") -modify_cmd.parser.add_option('-W', '--nowrite', action='store_false', - dest='write', help="don't write metadata (opposite of -w)") -modify_cmd.parser.add_option('-a', '--album', action='store_true', - help='modify whole albums instead of tracks') -modify_cmd.parser.add_option('-y', '--yes', action='store_true', - help='skip confirmation') -modify_cmd.parser.add_option('-f', '--format', action='store', - help='print with custom format', default=None) -def modify_func(lib, opts, args): - args = decargs(args) - mods = [] + `mods` is a dictionary of fields and values to update on the object; + `dels` is a sequence of fields to delete. + """ + obj.update(mods) + for field in dels: + try: + del obj[field] + except KeyError: + pass + return ui.show_model_changes(obj) + + +def modify_parse_args(args): + """Split the arguments for the modify subcommand into query parts, + assignments (field=value), and deletions (field!). Returns the result as + a three-tuple in that order. + """ + mods = {} dels = [] query = [] for arg in args: if arg.endswith('!') and '=' not in arg and ':' not in arg: - dels.append(arg[:-1]) - elif '=' in arg: - mods.append(arg) + dels.append(arg[:-1]) # Strip trailing !. + elif '=' in arg and ':' not in arg.split('=', 1)[0]: + key, val = arg.split('=', 1) + mods[key] = val else: query.append(arg) + return query, mods, dels + + +def modify_func(lib, opts, args): + query, mods, dels = modify_parse_args(decargs(args)) if not mods and not dels: - raise ui.UserError('no modifications specified') - write = opts.write if opts.write is not None else \ - config['import']['write'].get(bool) - modify_items(lib, mods, dels, query, write, opts.move, opts.album, - not opts.yes) + raise ui.UserError(u'no modifications specified') + modify_items(lib, mods, dels, query, ui.should_write(opts.write), + ui.should_move(opts.move), opts.album, not opts.yes) + + +modify_cmd = ui.Subcommand( + u'modify', help=u'change metadata fields', aliases=(u'mod',) +) +modify_cmd.parser.add_option( + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory" +) +modify_cmd.parser.add_option( + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library" +) +modify_cmd.parser.add_option( + u'-w', u'--write', action='store_true', default=None, + help=u"write new metadata to files' tags (default)" +) +modify_cmd.parser.add_option( + u'-W', u'--nowrite', action='store_false', dest='write', + help=u"don't write metadata (opposite of -w)" +) +modify_cmd.parser.add_album_option() +modify_cmd.parser.add_format_option(target='item') +modify_cmd.parser.add_option( + u'-y', u'--yes', action='store_true', + help=u'skip confirmation' +) modify_cmd.func = modify_func default_commands.append(modify_cmd) # move: Move/copy files to the library or a new base directory. -def move_items(lib, dest, query, copy, album): +def move_items(lib, dest, query, copy, album, pretend, confirm=False): """Moves or copies items to a new base directory, given by dest. If dest is None, then the library's base directory is used, making the command "consolidate" files. @@ -1187,38 +1462,78 @@ def move_items(lib, dest, query, copy, album): items, albums = _do_query(lib, query, album, False) objs = albums if album else items - action = 'Copying' if copy else 'Moving' - entity = 'album' if album else 'item' - log.info('%s %i %ss.' % (action, len(objs), entity)) - for obj in objs: - log.debug('moving: %s' % obj.path) + # Filter out files that don't need to be moved. + isitemmoved = lambda item: item.path != item.destination(basedir=dest) + isalbummoved = lambda album: any(isitemmoved(i) for i in album.items()) + objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] + + action = u'Copying' if copy else u'Moving' + act = u'copy' if copy else u'move' + entity = u'album' if album else u'item' + log.info(u'{0} {1} {2}{3}.', action, len(objs), entity, + u's' if len(objs) != 1 else u'') + if not objs: + return + + if pretend: + if album: + show_path_changes([(item.path, item.destination(basedir=dest)) + for obj in objs for item in obj.items()]) + else: + show_path_changes([(obj.path, obj.destination(basedir=dest)) + for obj in objs]) + else: + if confirm: + objs = ui.input_select_objects( + u'Really %s' % act, objs, + lambda o: show_path_changes( + [(o.path, o.destination(basedir=dest))])) + + for obj in objs: + log.debug(u'moving: {0}', util.displayable_path(obj.path)) + + obj.move(copy, basedir=dest) + obj.store() - obj.move(copy, basedir=dest) - obj.store() -move_cmd = ui.Subcommand('move', - help='move or copy items', aliases=('mv',)) -move_cmd.parser.add_option('-d', '--dest', metavar='DIR', dest='dest', - help='destination directory') -move_cmd.parser.add_option('-c', '--copy', default=False, action='store_true', - help='copy instead of moving') -move_cmd.parser.add_option('-a', '--album', default=False, action='store_true', - help='match whole albums instead of tracks') def move_func(lib, opts, args): dest = opts.dest if dest is not None: dest = normpath(dest) if not os.path.isdir(dest): - raise ui.UserError('no such directory: %s' % dest) + raise ui.UserError(u'no such directory: %s' % dest) - move_items(lib, dest, decargs(args), opts.copy, opts.album) + move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend, + opts.timid) + + +move_cmd = ui.Subcommand( + u'move', help=u'move or copy items', aliases=(u'mv',) +) +move_cmd.parser.add_option( + u'-d', u'--dest', metavar='DIR', dest='dest', + help=u'destination directory' +) +move_cmd.parser.add_option( + u'-c', u'--copy', default=False, action='store_true', + help=u'copy instead of moving' +) +move_cmd.parser.add_option( + u'-p', u'--pretend', default=False, action='store_true', + help=u'show how files would be moved, but don\'t touch anything' +) +move_cmd.parser.add_option( + u'-t', u'--timid', dest='timid', action='store_true', + help=u'always confirm all actions' +) +move_cmd.parser.add_album_option() move_cmd.func = move_func default_commands.append(move_cmd) # write: Write tags into files. -def write_items(lib, query, pretend): +def write_items(lib, query, pretend, force): """Write tag information from the database to the respective files in the filesystem. """ @@ -1227,48 +1542,45 @@ def write_items(lib, query, pretend): for item in items: # Item deleted? if not os.path.exists(syspath(item.path)): - log.info(u'missing file: {0}'.format( - util.displayable_path(item.path) - )) + log.info(u'missing file: {0}', util.displayable_path(item.path)) continue # Get an Item object reflecting the "clean" (on-disk) state. try: clean_item = library.Item.from_path(item.path) - except Exception as exc: - log.error(u'error reading {0}: {1}'.format( - displayable_path(item.path), exc - )) + except library.ReadError as exc: + log.error(u'error reading {0}: {1}', + displayable_path(item.path), exc) continue # Check for and display changes. changed = ui.show_model_changes(item, clean_item, - library.ITEM_KEYS_WRITABLE, always=True) - if changed and not pretend: - try: - item.write() - except library.FileOperationError as exc: - log.error(exc) + library.Item._media_tag_fields, force) + if (changed or force) and not pretend: + # We use `try_sync` here to keep the mtime up to date in the + # database. + item.try_sync(True, False) + -write_cmd = ui.Subcommand('write', help='write tag information to files') -write_cmd.parser.add_option('-p', '--pretend', action='store_true', - help="show all changes but do nothing") def write_func(lib, opts, args): - write_items(lib, decargs(args), opts.pretend) + write_items(lib, decargs(args), opts.pretend, opts.force) + + +write_cmd = ui.Subcommand(u'write', help=u'write tag information to files') +write_cmd.parser.add_option( + u'-p', u'--pretend', action='store_true', + help=u"show all changes but do nothing" +) +write_cmd.parser.add_option( + u'-f', u'--force', action='store_true', + help=u"write tags even if the existing tags match the database" +) write_cmd.func = write_func default_commands.append(write_cmd) # config: Show and edit user configuration. -config_cmd = ui.Subcommand('config', - help='show or edit the user configuration') -config_cmd.parser.add_option('-p', '--paths', action='store_true', - help='show files that configuration was loaded from') -config_cmd.parser.add_option('-e', '--edit', action='store_true', - help='edit user configuration with $EDITOR') -config_cmd.parser.add_option('-d', '--defaults', action='store_true', - help='include the default configuration') def config_func(lib, opts, args): # Make sure lazy configuration is loaded config.resolve() @@ -1289,52 +1601,74 @@ def config_func(lib, opts, args): filenames.insert(0, user_path) for filename in filenames: - print(filename) + print_(filename) # Open in editor. elif opts.edit: - path = config.user_config_path() - - if 'EDITOR' in os.environ: - editor = os.environ['EDITOR'] - args = [editor, editor, path] - elif platform.system() == 'Darwin': - args = ['open', 'open', '-n', path] - elif platform.system() == 'Windows': - # On windows we can execute arbitrary files. The os will - # take care of starting an appropriate application - args = [path, path] - else: - # Assume Unix - args = ['xdg-open', 'xdg-open', path] - - try: - os.execlp(*args) - except OSError: - raise ui.UserError("Could not edit configuration. Please" - "set the EDITOR environment variable.") + config_edit() # Dump configuration. else: - print(config.dump(full=opts.defaults)) + print_(config.dump(full=opts.defaults, redact=opts.redact)) + +def config_edit(): + """Open a program to edit the user configuration. + An empty config file is created if no existing config file exists. + """ + path = config.user_config_path() + editor = util.editor_command() + try: + if not os.path.isfile(path): + open(path, 'w+').close() + util.interactive_open([path], editor) + except OSError as exc: + message = u"Could not edit configuration: {0}".format(exc) + if not editor: + message += u". Please set the EDITOR environment variable" + raise ui.UserError(message) + +config_cmd = ui.Subcommand(u'config', + help=u'show or edit the user configuration') +config_cmd.parser.add_option( + u'-p', u'--paths', action='store_true', + help=u'show files that configuration was loaded from' +) +config_cmd.parser.add_option( + u'-e', u'--edit', action='store_true', + help=u'edit user configuration with $EDITOR' +) +config_cmd.parser.add_option( + u'-d', u'--defaults', action='store_true', + help=u'include the default configuration' +) +config_cmd.parser.add_option( + u'-c', u'--clear', action='store_false', + dest='redact', default=True, + help=u'do not redact sensitive fields' +) config_cmd.func = config_func default_commands.append(config_cmd) # completion: print completion script -completion_cmd = ui.Subcommand('completion', - help='print shell script that provides command line completion') def print_completion(*args): for line in completion_script(default_commands + plugins.commands()): - print(line, end='') - if not (os.path.isfile(u'/etc/bash_completion') or - os.path.isfile(u'/usr/share/bash-completion/bash_completion') or - os.path.isfile(u'/usr/share/local/bash-completion/bash_completion')): + print_(line, end='') + if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)): log.warn(u'Warning: Unable to find the bash-completion package. ' u'Command line completion might not work.') +BASH_COMPLETION_PATHS = map(syspath, [ + u'/etc/bash_completion', + u'/usr/share/bash-completion/bash_completion', + u'/usr/share/local/bash-completion/bash_completion', + u'/opt/local/share/bash-completion/bash_completion', # SmartOS + u'/usr/local/etc/bash_completion', # Homebrew +]) + + def completion_script(commands): """Yield the full completion shell script as strings. @@ -1355,7 +1689,8 @@ def completion_script(commands): command_names.append(name) for alias in cmd.aliases: - aliases[alias] = name + if re.match(r'^\w+$', alias): + aliases[alias] = name options[name] = {'flags': [], 'opts': []} for opts in cmd.parser._get_all_options()[1:]: @@ -1370,46 +1705,50 @@ def completion_script(commands): # Add global options options['_global'] = { - 'flags': ['-v', '--verbose'], - 'opts': '-l --library -c --config -d --directory -h --help'.split(' ') + 'flags': [u'-v', u'--verbose'], + 'opts': u'-l --library -c --config -d --directory -h --help'.split( + u' ') } - # Help subcommand - command_names.append('help') - # Add flags common to all commands options['_common'] = { - 'flags': ['-h', '--help'] + 'flags': [u'-h', u'--help'] } # Start generating the script - yield "_beet() {\n" + yield u"_beet() {\n" # Command names - yield " local commands='%s'\n" % ' '.join(command_names) - yield "\n" + yield u" local commands='%s'\n" % ' '.join(command_names) + yield u"\n" # Command aliases - yield " local aliases='%s'\n" % ' '.join(aliases.keys()) + yield u" local aliases='%s'\n" % ' '.join(aliases.keys()) for alias, cmd in aliases.items(): - yield " local alias__%s=%s\n" % (alias, cmd) - yield '\n' + yield u" local alias__%s=%s\n" % (alias, cmd) + yield u'\n' # Fields - yield " fields='%s'\n" % ' '.join( - set(library.ITEM_KEYS + library.ALBUM_KEYS)) + yield u" fields='%s'\n" % ' '.join( + set(library.Item._fields.keys() + library.Album._fields.keys()) + ) # Command options for cmd, opts in options.items(): for option_type, option_list in opts.items(): if option_list: option_list = ' '.join(option_list) - yield " local %s__%s='%s'\n" % (option_type, cmd, option_list) + yield u" local %s__%s='%s'\n" % ( + option_type, cmd, option_list) - yield ' _beet_dispatch\n' - yield '}\n' + yield u' _beet_dispatch\n' + yield u'}\n' +completion_cmd = ui.Subcommand( + 'completion', + help=u'print shell script that provides command line completion' +) completion_cmd.func = print_completion completion_cmd.hide = True default_commands.append(completion_cmd) diff --git a/libs/beets/ui/migrate.py b/libs/beets/ui/migrate.py deleted file mode 100644 index 784d7c82..00000000 --- a/libs/beets/ui/migrate.py +++ /dev/null @@ -1,401 +0,0 @@ -# This file is part of beets. -# Copyright 2013, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Conversion from legacy (pre-1.1) configuration to Confit/YAML -configuration. -""" -import os -import ConfigParser -import codecs -import yaml -import logging -import time -import itertools -import re - -import beets -from beets import util -from beets import ui -from beets.util import confit - -CONFIG_PATH_VAR = 'BEETSCONFIG' -DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig' -DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini' -DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb' -DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb' -WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~' - -OLD_CONFIG_SUFFIX = '.old' -PLUGIN_NAMES = { - 'rdm': 'random', - 'fuzzy_search': 'fuzzy', -} -AUTO_KEYS = ('automatic', 'autofetch', 'autoembed', 'autoscrub') -IMPORTFEEDS_PREFIX = 'feeds_' -CONFIG_MIGRATED_MESSAGE = u""" -You appear to be upgrading from beets 1.0 (or earlier) to 1.1. Your -configuration file has been migrated automatically to: -{newconfig} -Edit this file to configure beets. You might want to remove your -old-style ".beetsconfig" file now. See the documentation for more -details on the new configuration system: -http://beets.readthedocs.org/page/reference/config.html -""".strip() -DB_MIGRATED_MESSAGE = u'Your database file has also been copied to:\n{newdb}' -YAML_COMMENT = '# Automatically migrated from legacy .beetsconfig.\n\n' - -log = logging.getLogger('beets') - -# An itertools recipe. -def grouper(n, iterable): - args = [iter(iterable)] * n - return itertools.izip_longest(*args) - -def _displace(fn): - """Move a file aside using a timestamp suffix so a new file can be - put in its place. - """ - util.move( - fn, - u'{0}.old.{1}'.format(fn, int(time.time())), - True - ) - -def default_paths(): - """Produces the appropriate default config and library database - paths for the current system. On Unix, this is always in ~. On - Windows, tries ~ first and then $APPDATA for the config and library - files (for backwards compatibility). - """ - windows = os.path.__name__ == 'ntpath' - if windows: - windata = os.environ.get('APPDATA') or '~' - - # Shorthand for joining paths. - def exp(*vals): - return os.path.expanduser(os.path.join(*vals)) - - config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX) - if windows and not os.path.exists(config): - config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS) - - libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX) - if windows and not os.path.exists(libpath): - libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS) - - return config, libpath - -def get_config(): - """Using the same logic as beets 1.0, locate and read the - .beetsconfig file. Return a ConfigParser instance or None if no - config is found. - """ - default_config, default_libpath = default_paths() - if CONFIG_PATH_VAR in os.environ: - configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR]) - else: - configpath = default_config - - config = ConfigParser.SafeConfigParser() - if os.path.exists(util.syspath(configpath)): - with codecs.open(configpath, 'r', encoding='utf-8') as f: - config.readfp(f) - return config, configpath - else: - return None, configpath - -def flatten_config(config): - """Given a ConfigParser, flatten the values into a dict-of-dicts - representation where each section gets its own dictionary of values. - """ - out = confit.OrderedDict() - for section in config.sections(): - sec_dict = out[section] = confit.OrderedDict() - for option in config.options(section): - sec_dict[option] = config.get(section, option, True) - return out - -def transform_value(value): - """Given a string read as the value of a config option, return a - massaged version of that value (possibly with a different type). - """ - # Booleans. - if value.lower() in ('false', 'no', 'off'): - return False - elif value.lower() in ('true', 'yes', 'on'): - return True - - # Integers. - try: - return int(value) - except ValueError: - pass - - # Floats. - try: - return float(value) - except ValueError: - pass - - return value - -def transform_data(data): - """Given a dict-of-dicts representation of legacy config data, tweak - the data into a new form. This new form is suitable for dumping as - YAML. - """ - out = confit.OrderedDict() - - for section, pairs in data.items(): - if section == 'beets': - # The "main" section. In the new config system, these values - # are in the "root": no section at all. - for key, value in pairs.items(): - value = transform_value(value) - - if key.startswith('import_'): - # Importer config is now under an "import:" key. - if 'import' not in out: - out['import'] = confit.OrderedDict() - out['import'][key[7:]] = value - - elif key == 'plugins': - # Renamed plugins. - plugins = value.split() - new_plugins = [PLUGIN_NAMES.get(p, p) for p in plugins] - out['plugins'] = ' '.join(new_plugins) - - elif key == 'replace': - # YAMLy representation for character replacements. - replacements = confit.OrderedDict() - for pat, repl in grouper(2, value.split()): - if repl == '': - repl = '' - replacements[pat] = repl - out['replace'] = replacements - - elif key == 'pluginpath': - # Used to be a colon-separated string. Now a list. - out['pluginpath'] = value.split(':') - - else: - out[key] = value - - elif pairs: - # Other sections (plugins, etc). - sec_out = out[section] = confit.OrderedDict() - for key, value in pairs.items(): - - # Standardized "auto" option. - if key in AUTO_KEYS: - key = 'auto' - - # Unnecessary : hack in queries. - if section == 'paths': - key = key.replace('_', ':') - - # Changed option names for importfeeds plugin. - if section == 'importfeeds': - if key.startswith(IMPORTFEEDS_PREFIX): - key = key[len(IMPORTFEEDS_PREFIX):] - - sec_out[key] = transform_value(value) - - return out - -class Dumper(yaml.SafeDumper): - """A PyYAML Dumper that represents OrderedDicts as ordinary mappings - (in order, of course). - """ - # From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py - def represent_mapping(self, tag, mapping, flow_style=None): - value = [] - node = yaml.MappingNode(tag, value, flow_style=flow_style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - best_style = True - if hasattr(mapping, 'items'): - mapping = list(mapping.items()) - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, yaml.ScalarNode) and \ - not node_key.style): - best_style = False - if not (isinstance(node_value, yaml.ScalarNode) and \ - not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - if self.default_flow_style is not None: - node.flow_style = self.default_flow_style - else: - node.flow_style = best_style - return node -Dumper.add_representer(confit.OrderedDict, Dumper.represent_dict) - -def migrate_config(replace=False): - """Migrate a legacy beetsconfig file to a new-style config.yaml file - in an appropriate place. If `replace` is enabled, then any existing - config.yaml will be moved aside. Otherwise, the process is aborted - when the file exists. - """ - - # Load legacy configuration data, if any. - config, configpath = get_config() - if not config: - log.debug(u'no config file found at {0}'.format( - util.displayable_path(configpath) - )) - return - - # Get the new configuration file path and possibly move it out of - # the way. - destfn = os.path.join(beets.config.config_dir(), confit.CONFIG_FILENAME) - if os.path.exists(destfn): - if replace: - log.debug(u'moving old config aside: {0}'.format( - util.displayable_path(destfn) - )) - _displace(destfn) - else: - # File exists and we won't replace it. We're done. - return - - log.debug(u'migrating config file {0}'.format( - util.displayable_path(configpath) - )) - - # Convert the configuration to a data structure ready to be dumped - # as the new Confit file. - data = transform_data(flatten_config(config)) - - # Encode result as YAML. - yaml_out = yaml.dump( - data, - Dumper=Dumper, - default_flow_style=False, - indent=4, - width=1000, - ) - # A ridiculous little hack to add some whitespace between "sections" - # in the YAML output. I hope this doesn't break any YAML syntax. - yaml_out = re.sub(r'(\n\w+:\n [^-\s])', '\n\\1', yaml_out) - yaml_out = YAML_COMMENT + yaml_out - - # Write the data to the new config destination. - log.debug(u'writing migrated config to {0}'.format( - util.displayable_path(destfn) - )) - with open(destfn, 'w') as f: - f.write(yaml_out) - return destfn - -def migrate_db(replace=False): - """Copy the beets library database file to the new location (e.g., - from ~/.beetsmusic.blb to ~/.config/beets/library.db). - """ - _, srcfn = default_paths() - destfn = beets.config['library'].as_filename() - - if not os.path.exists(srcfn) or srcfn == destfn: - # Old DB does not exist or we're configured to point to the same - # database. Do nothing. - return - - if os.path.exists(destfn): - if replace: - log.debug(u'moving old database aside: {0}'.format( - util.displayable_path(destfn) - )) - _displace(destfn) - else: - return - - log.debug(u'copying database from {0} to {1}'.format( - util.displayable_path(srcfn), util.displayable_path(destfn) - )) - util.copy(srcfn, destfn) - return destfn - -def migrate_state(replace=False): - """Copy the beets runtime state file from the old path (i.e., - ~/.beetsstate) to the new path (i.e., ~/.config/beets/state.pickle). - """ - srcfn = os.path.expanduser(os.path.join('~', '.beetsstate')) - if not os.path.exists(srcfn): - return - - destfn = beets.config['statefile'].as_filename() - if os.path.exists(destfn): - if replace: - _displace(destfn) - else: - return - - log.debug(u'copying state file from {0} to {1}'.format( - util.displayable_path(srcfn), util.displayable_path(destfn) - )) - util.copy(srcfn, destfn) - return destfn - - -# Automatic migration when beets starts. - -def automigrate(): - """Migrate the configuration, database, and state files. If any - migration occurs, print out a notice with some helpful next steps. - """ - config_fn = migrate_config() - db_fn = migrate_db() - migrate_state() - - if config_fn: - ui.print_(ui.colorize('fuchsia', u'MIGRATED CONFIGURATION')) - - ui.print_(CONFIG_MIGRATED_MESSAGE.format( - newconfig=util.displayable_path(config_fn)) - ) - if db_fn: - ui.print_(DB_MIGRATED_MESSAGE.format( - newdb=util.displayable_path(db_fn) - )) - - ui.input_(ui.colorize('fuchsia', u'Press ENTER to continue:')) - ui.print_() - - -# CLI command for explicit migration. - -migrate_cmd = ui.Subcommand('migrate', help='convert legacy config') -def migrate_func(lib, opts, args): - """Explicit command for migrating files. Existing files in each - destination are moved aside. - """ - config_fn = migrate_config(replace=True) - if config_fn: - log.info(u'Migrated configuration to: {0}'.format( - util.displayable_path(config_fn) - )) - db_fn = migrate_db(replace=True) - if db_fn: - log.info(u'Migrated library database to: {0}'.format( - util.displayable_path(db_fn) - )) - state_fn = migrate_state(replace=True) - if state_fn: - log.info(u'Migrated state file to: {0}'.format( - util.displayable_path(state_fn) - )) -migrate_cmd.func = migrate_func diff --git a/libs/beets/util/__init__.py b/libs/beets/util/__init__.py index f5810ff3..3cc270ae 100644 --- a/libs/beets/util/__init__.py +++ b/libs/beets/util/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -13,20 +14,25 @@ # included in all copies or substantial portions of the Software. """Miscellaneous utility functions.""" -from __future__ import division +from __future__ import division, absolute_import, print_function import os import sys import re import shutil import fnmatch -from collections import defaultdict +from collections import Counter import traceback import subprocess +import platform +import shlex +from beets.util import hidden + MAX_FILENAME_LENGTH = 200 WINDOWS_MAGIC_PREFIX = u'\\\\?\\' + class HumanReadableException(Exception): """An Exception that can include a human-readable error message to be logged without a traceback. Can preserve a traceback for @@ -51,12 +57,12 @@ class HumanReadableException(Exception): def _gerund(self): """Generate a (likely) gerund form of the English verb. """ - if ' ' in self.verb: + if u' ' in self.verb: return self.verb - gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb - gerund += 'ing' + gerund = self.verb[:-1] if self.verb.endswith(u'e') else self.verb + gerund += u'ing' return gerund - + def _reasonstr(self): """Get the reason as a string.""" if isinstance(self.reason, unicode): @@ -80,7 +86,8 @@ class HumanReadableException(Exception): """ if self.tb: logger.debug(self.tb) - logger.error(u'{0}: {1}'.format(self.error_kind, self.args[0])) + logger.error(u'{0}: {1}', self.error_kind, self.args[0]) + class FilesystemError(HumanReadableException): """An error that occurred while performing a filesystem manipulation @@ -111,6 +118,7 @@ class FilesystemError(HumanReadableException): return u'{0} {1}'.format(self._reasonstr(), clause) + def normpath(path): """Provide the canonical form of the path suitable for storing in the database. @@ -119,6 +127,7 @@ def normpath(path): path = os.path.normpath(os.path.abspath(os.path.expanduser(path))) return bytestring_path(path) + def ancestry(path): """Return a list consisting of path's parent directory, its grandparent, and so on. For instance: @@ -137,11 +146,13 @@ def ancestry(path): break last_path = path - if path: # don't yield '' + if path: + # don't yield '' out.insert(0, path) return out -def sorted_walk(path, ignore=(), logger=None): + +def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None): """Like `os.walk`, but yields things in case-insensitive sorted, breadth-first order. Directory and file names matching any glob pattern in `ignore` are skipped. If `logger` is provided, then @@ -175,10 +186,11 @@ def sorted_walk(path, ignore=(), logger=None): # Add to output as either a file or a directory. cur = os.path.join(path, base) - if os.path.isdir(syspath(cur)): - dirs.append(base) - else: - files.append(base) + if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden: + if os.path.isdir(syspath(cur)): + dirs.append(base) + else: + files.append(base) # Sort lists (case-insensitive) and yield the current level. dirs.sort(key=bytes.lower) @@ -189,9 +201,10 @@ def sorted_walk(path, ignore=(), logger=None): for base in dirs: cur = os.path.join(path, base) # yield from sorted_walk(...) - for res in sorted_walk(cur, ignore, logger): + for res in sorted_walk(cur, ignore, ignore_hidden, logger): yield res + def mkdirall(path): """Make all the enclosing directories of path (like mkdir -p on the parent). @@ -204,6 +217,7 @@ def mkdirall(path): raise FilesystemError(exc, 'create', (ancestor,), traceback.format_exc()) + def fnmatch_all(names, patterns): """Determine whether all strings in `names` match at least one of the `patterns`, which should be shell glob expressions. @@ -218,6 +232,7 @@ def fnmatch_all(names, patterns): return False return True + def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')): """If path is an empty directory, then remove it. Recursively remove path's ancestry up to root (which is never removed) where there are @@ -236,7 +251,7 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')): ancestors = [] elif root in ancestors: # Only remove directories below the root. - ancestors = ancestors[ancestors.index(root)+1:] + ancestors = ancestors[ancestors.index(root) + 1:] else: # Remove nothing. return @@ -258,6 +273,7 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')): else: break + def components(path): """Return a list of the path components in path. For instance: @@ -281,6 +297,7 @@ def components(path): return comps + def _fsencoding(): """Get the system's filesystem encoding. On Windows, this is always UTF-8 (not MBCS). @@ -295,12 +312,13 @@ def _fsencoding(): encoding = 'utf8' return encoding + def bytestring_path(path): - """Given a path, which is either a str or a unicode, returns a str + """Given a path, which is either a bytes or a unicode, returns a str path (ensuring that we never deal with Unicode pathnames). """ # Pass through bytestrings. - if isinstance(path, str): + if isinstance(path, bytes): return path # On Windows, remove the magic prefix added by `syspath`. This makes @@ -315,6 +333,7 @@ def bytestring_path(path): except (UnicodeError, LookupError): return path.encode('utf8') + def displayable_path(path, separator=u'; '): """Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a @@ -324,7 +343,7 @@ def displayable_path(path, separator=u'; '): return separator.join(displayable_path(p) for p in path) elif isinstance(path, unicode): return path - elif not isinstance(path, str): + elif not isinstance(path, bytes): # A non-string object: just get its unicode representation. return unicode(path) @@ -333,6 +352,7 @@ def displayable_path(path, separator=u'; '): except (UnicodeError, LookupError): return path.decode('utf8', 'ignore') + def syspath(path, prefix=True): """Convert a path for use by the operating system. In particular, paths on Windows must receive a magic prefix and must be converted @@ -356,16 +376,22 @@ def syspath(path, prefix=True): encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() path = path.decode(encoding, 'replace') - # Add the magic prefix if it isn't already there + # Add the magic prefix if it isn't already there. + # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX): + if path.startswith(u'\\\\'): + # UNC path. Final path should look like \\?\UNC\... + path = u'UNC' + path[1:] path = WINDOWS_MAGIC_PREFIX + path return path + def samefile(p1, p2): """Safer equality for paths.""" return shutil._samefile(syspath(p1), syspath(p2)) + def remove(path, soft=True): """Remove the file. If `soft`, then no error will be raised if the file does not exist. @@ -378,6 +404,7 @@ def remove(path, soft=True): except (OSError, IOError) as exc: raise FilesystemError(exc, 'delete', (path,), traceback.format_exc()) + def copy(path, dest, replace=False): """Copy a plain file. Permissions are not copied. If `dest` already exists, raises a FilesystemError unless `replace` is True. Has no @@ -389,13 +416,14 @@ def copy(path, dest, replace=False): path = syspath(path) dest = syspath(dest) if not replace and os.path.exists(dest): - raise FilesystemError('file exists', 'copy', (path, dest)) + raise FilesystemError(u'file exists', 'copy', (path, dest)) try: shutil.copyfile(path, dest) except (OSError, IOError) as exc: raise FilesystemError(exc, 'copy', (path, dest), traceback.format_exc()) + def move(path, dest, replace=False): """Rename a file. `dest` may not be a directory. If `dest` already exists, raises an OSError unless `replace` is True. Has no effect if @@ -409,7 +437,7 @@ def move(path, dest, replace=False): path = syspath(path) dest = syspath(dest) if os.path.exists(dest) and not replace: - raise FilesystemError('file exists', 'rename', (path, dest), + raise FilesystemError(u'file exists', 'rename', (path, dest), traceback.format_exc()) # First, try renaming the file. @@ -424,6 +452,27 @@ def move(path, dest, replace=False): raise FilesystemError(exc, 'move', (path, dest), traceback.format_exc()) + +def link(path, dest, replace=False): + """Create a symbolic link from path to `dest`. Raises an OSError if + `dest` already exists, unless `replace` is True. Does nothing if + `path` == `dest`.""" + if (samefile(path, dest)): + return + + path = syspath(path) + dest = syspath(dest) + if os.path.exists(dest) and not replace: + raise FilesystemError(u'file exists', 'rename', (path, dest), + traceback.format_exc()) + try: + os.symlink(path, dest) + except OSError: + raise FilesystemError(u'Operating system does not support symbolic ' + u'links.', 'link', (path, dest), + traceback.format_exc()) + + def unique_path(path): """Returns a version of ``path`` that does not exist on the filesystem. Specifically, if ``path` itself already exists, then @@ -433,7 +482,7 @@ def unique_path(path): return path base, ext = os.path.splitext(path) - match = re.search(r'\.(\d)+$', base) + match = re.search(br'\.(\d)+$', base) if match: num = int(match.group(1)) base = base[:match.start()] @@ -441,7 +490,7 @@ def unique_path(path): num = 0 while True: num += 1 - new_path = '%s.%i%s' % (base, num, ext) + new_path = b'%s.%i%s' % (base, num, ext) if not os.path.exists(new_path): return new_path @@ -450,13 +499,15 @@ def unique_path(path): # shares, which are sufficiently common as to cause frequent problems. # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx CHAR_REPLACE = [ - (re.compile(ur'[\\/]'), u'_'), # / and \ -- forbidden everywhere. - (re.compile(ur'^\.'), u'_'), # Leading dot (hidden files on Unix). - (re.compile(ur'[\x00-\x1f]'), u''), # Control characters. - (re.compile(ur'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters". - (re.compile(ur'\.$'), u'_'), # Trailing dots. - (re.compile(ur'\s+$'), u''), # Trailing whitespace. + (re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere. + (re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix). + (re.compile(r'[\x00-\x1f]'), u''), # Control characters. + (re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters". + (re.compile(r'\.$'), u'_'), # Trailing dots. + (re.compile(r'\s+$'), u''), # Trailing whitespace. ] + + def sanitize_path(path, replacements=None): """Takes a path (as a Unicode string) and makes sure that it is legal. Returns a new path. Only works with fragments; won't work @@ -477,6 +528,7 @@ def sanitize_path(path, replacements=None): comps[i] = comp return os.path.join(*comps) + def truncate_path(path, length=MAX_FILENAME_LENGTH): """Given a bytestring path or a Unicode path fragment, truncate the components to a legal length. In the last component, the extension @@ -493,12 +545,83 @@ def truncate_path(path, length=MAX_FILENAME_LENGTH): return os.path.join(*out) + +def _legalize_stage(path, replacements, length, extension, fragment): + """Perform a single round of path legalization steps + (sanitation/replacement, encoding from Unicode to bytes, + extension-appending, and truncation). Return the path (Unicode if + `fragment` is set, `bytes` otherwise) and whether truncation was + required. + """ + # Perform an initial sanitization including user replacements. + path = sanitize_path(path, replacements) + + # Encode for the filesystem. + if not fragment: + path = bytestring_path(path) + + # Preserve extension. + path += extension.lower() + + # Truncate too-long components. + pre_truncate_path = path + path = truncate_path(path, length) + + return path, path != pre_truncate_path + + +def legalize_path(path, replacements, length, extension, fragment): + """Given a path-like Unicode string, produce a legal path. Return + the path and a flag indicating whether some replacements had to be + ignored (see below). + + The legalization process (see `_legalize_stage`) consists of + applying the sanitation rules in `replacements`, encoding the string + to bytes (unless `fragment` is set), truncating components to + `length`, appending the `extension`. + + This function performs up to three calls to `_legalize_stage` in + case truncation conflicts with replacements (as can happen when + truncation creates whitespace at the end of the string, for + example). The limited number of iterations iterations avoids the + possibility of an infinite loop of sanitation and truncation + operations, which could be caused by replacement rules that make the + string longer. The flag returned from this function indicates that + the path has to be truncated twice (indicating that replacements + made the string longer again after it was truncated); the + application should probably log some sort of warning. + """ + + if fragment: + # Outputting Unicode. + extension = extension.decode('utf8', 'ignore') + + first_stage_path, _ = _legalize_stage( + path, replacements, length, extension, fragment + ) + + # Convert back to Unicode with extension removed. + first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path)) + + # Re-sanitize following truncation (including user replacements). + second_stage_path, retruncated = _legalize_stage( + first_stage_path, replacements, length, extension, fragment + ) + + # If the path was once again truncated, discard user replacements + # and run through one last legalization stage. + if retruncated: + second_stage_path, _ = _legalize_stage( + first_stage_path, None, length, extension, fragment + ) + + return second_stage_path, retruncated + + def str2bool(value): """Returns a boolean reflecting a human-entered string.""" - if value.lower() in ('yes', '1', 'true', 't', 'y'): - return True - else: - return False + return value.lower() in (u'yes', u'1', u'true', u't', u'y') + def as_string(value): """Convert a value to a Unicode object for matching with a query. @@ -507,56 +630,23 @@ def as_string(value): if value is None: return u'' elif isinstance(value, buffer): - return str(value).decode('utf8', 'ignore') - elif isinstance(value, str): + return bytes(value).decode('utf8', 'ignore') + elif isinstance(value, bytes): return value.decode('utf8', 'ignore') else: return unicode(value) -def levenshtein(s1, s2): - """A nice DP edit distance implementation from Wikibooks: - http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/ - Levenshtein_distance#Python - """ - if len(s1) < len(s2): - return levenshtein(s2, s1) - if not s1: - return len(s2) - - previous_row = xrange(len(s2) + 1) - for i, c1 in enumerate(s1): - current_row = [i + 1] - for j, c2 in enumerate(s2): - insertions = previous_row[j + 1] + 1 - deletions = current_row[j] + 1 - substitutions = previous_row[j] + (c1 != c2) - current_row.append(min(insertions, deletions, substitutions)) - previous_row = current_row - - return previous_row[-1] def plurality(objs): - """Given a sequence of comparable objects, returns the object that - is most common in the set and the frequency of that object. The + """Given a sequence of hashble objects, returns the object that + is most common in the set and the its number of appearance. The sequence must contain at least one object. """ - # Calculate frequencies. - freqs = defaultdict(int) - for obj in objs: - freqs[obj] += 1 + c = Counter(objs) + if not c: + raise ValueError(u'sequence must be non-empty') + return c.most_common(1)[0] - if not freqs: - raise ValueError('sequence must be non-empty') - - # Find object with maximum frequency. - max_freq = 0 - res = None - for obj, freq in freqs.items(): - if freq > max_freq: - max_freq = freq - res = obj - - return res, max_freq def cpu_count(): """Return the number of hardware thread contexts (cores or SMT @@ -571,8 +661,8 @@ def cpu_count(): num = 0 elif sys.platform == 'darwin': try: - num = int(os.popen('sysctl -n hw.ncpu').read()) - except ValueError: + num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu'])) + except (ValueError, OSError, subprocess.CalledProcessError): num = 0 else: try: @@ -584,23 +674,38 @@ def cpu_count(): else: return 1 -def command_output(cmd): - """Wraps the `subprocess` module to invoke a command (given as a - list of arguments starting with the command name) and collect - stdout. The stderr stream is ignored. May raise - `subprocess.CalledProcessError` or an `OSError`. - This replaces `subprocess.check_output`, which isn't available in - Python 2.6 and which can have problems if lots of output is sent to - stderr. +def command_output(cmd, shell=False): + """Runs the command and returns its output after it has exited. + + ``cmd`` is a list of byte string arguments starting with the command names. + If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a + shell to execute. + + If the process exits with a non-zero return code + ``subprocess.CalledProcessError`` is raised. May also raise + ``OSError``. + + This replaces `subprocess.check_output` which can have problems if lots of + output is sent to stderr. """ - with open(os.devnull, 'w') as devnull: - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull) - stdout, _ = proc.communicate() + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=platform.system() != 'Windows', + shell=shell + ) + stdout, stderr = proc.communicate() if proc.returncode: - raise subprocess.CalledProcessError(proc.returncode, cmd) + raise subprocess.CalledProcessError( + returncode=proc.returncode, + cmd=b' '.join(cmd), + output=stdout + stderr, + ) return stdout + def max_filename_length(path, limit=MAX_FILENAME_LENGTH): """Attempt to determine the maximum filename length for the filesystem containing `path`. If the value is greater than `limit`, @@ -616,3 +721,142 @@ def max_filename_length(path, limit=MAX_FILENAME_LENGTH): return min(res[9], limit) else: return limit + + +def open_anything(): + """Return the system command that dispatches execution to the correct + program. + """ + sys_name = platform.system() + if sys_name == 'Darwin': + base_cmd = 'open' + elif sys_name == 'Windows': + base_cmd = 'start' + else: # Assume Unix + base_cmd = 'xdg-open' + return base_cmd + + +def editor_command(): + """Get a command for opening a text file. + + Use the `EDITOR` environment variable by default. If it is not + present, fall back to `open_anything()`, the platform-specific tool + for opening files in general. + """ + editor = os.environ.get('EDITOR') + if editor: + return editor + return open_anything() + + +def shlex_split(s): + """Split a Unicode or bytes string according to shell lexing rules. + + Raise `ValueError` if the string is not a well-formed shell string. + This is a workaround for a bug in some versions of Python. + """ + if isinstance(s, bytes): + # Shlex works fine. + return shlex.split(s) + + elif isinstance(s, unicode): + # Work around a Python bug. + # http://bugs.python.org/issue6988 + bs = s.encode('utf8') + return [c.decode('utf8') for c in shlex.split(bs)] + + else: + raise TypeError(u'shlex_split called with non-string') + + +def interactive_open(targets, command): + """Open the files in `targets` by `exec`ing a new `command`, given + as a Unicode string. (The new program takes over, and Python + execution ends: this does not fork a subprocess.) + + Can raise `OSError`. + """ + assert command + + # Split the command string into its arguments. + try: + args = shlex_split(command) + except ValueError: # Malformed shell tokens. + args = [command] + + args.insert(0, args[0]) # for argv[0] + + args += targets + + return os.execlp(*args) + + +def _windows_long_path_name(short_path): + """Use Windows' `GetLongPathNameW` via ctypes to get the canonical, + long path given a short filename. + """ + if not isinstance(short_path, unicode): + short_path = unicode(short_path) + + import ctypes + buf = ctypes.create_unicode_buffer(260) + get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW + return_value = get_long_path_name_w(short_path, buf, 260) + + if return_value == 0 or return_value > 260: + # An error occurred + return short_path + else: + long_path = buf.value + # GetLongPathNameW does not change the case of the drive + # letter. + if len(long_path) > 1 and long_path[1] == ':': + long_path = long_path[0].upper() + long_path[1:] + return long_path + + +def case_sensitive(path): + """Check whether the filesystem at the given path is case sensitive. + + To work best, the path should point to a file or a directory. If the path + does not exist, assume a case sensitive file system on every platform + except Windows. + """ + # A fallback in case the path does not exist. + if not os.path.exists(syspath(path)): + # By default, the case sensitivity depends on the platform. + return platform.system() != 'Windows' + + # If an upper-case version of the path exists but a lower-case + # version does not, then the filesystem must be case-sensitive. + # (Otherwise, we have more work to do.) + if not (os.path.exists(syspath(path.lower())) and + os.path.exists(syspath(path.upper()))): + return True + + # Both versions of the path exist on the file system. Check whether + # they refer to different files by their inodes. Alas, + # `os.path.samefile` is only available on Unix systems on Python 2. + if platform.system() != 'Windows': + return not os.path.samefile(syspath(path.lower()), + syspath(path.upper())) + + # On Windows, we check whether the canonical, long filenames for the + # files are the same. + lower = _windows_long_path_name(path.lower()) + upper = _windows_long_path_name(path.upper()) + return lower != upper + + +def raw_seconds_short(string): + """Formats a human-readable M:SS string as a float (number of seconds). + + Raises ValueError if the conversion cannot take place due to `string` not + being in the right format. + """ + match = re.match(r'^(\d+):([0-5]\d)$', string) + if not match: + raise ValueError(u'String not in M:SS format') + minutes, seconds = map(int, match.groups()) + return float(minutes * 60 + seconds) diff --git a/libs/beets/util/artresizer.py b/libs/beets/util/artresizer.py index 6e367a0a..6970a7da 100644 --- a/libs/beets/util/artresizer.py +++ b/libs/beets/util/artresizer.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Fabrice Laporte +# Copyright 2016, Fabrice Laporte # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,11 +16,15 @@ """Abstraction layer to resize images using PIL, ImageMagick, or a public resizing proxy if neither is available. """ +from __future__ import division, absolute_import, print_function + import urllib import subprocess import os +import re from tempfile import NamedTemporaryFile -import logging + +from beets import logging from beets import util # Resizing methods @@ -37,8 +42,8 @@ def resize_url(url, maxwidth): maxwidth (preserving aspect ratio). """ return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({ - 'url': url.replace('http://',''), - 'w': str(maxwidth), + 'url': url.replace('http://', ''), + 'w': bytes(maxwidth), })) @@ -57,9 +62,8 @@ def pil_resize(maxwidth, path_in, path_out=None): """ path_out = path_out or temp_file_for(path_in) from PIL import Image - log.debug(u'artresizer: PIL resizing {0} to {1}'.format( - util.displayable_path(path_in), util.displayable_path(path_out) - )) + log.debug(u'artresizer: PIL resizing {0} to {1}', + util.displayable_path(path_in), util.displayable_path(path_out)) try: im = Image.open(util.syspath(path_in)) @@ -68,20 +72,18 @@ def pil_resize(maxwidth, path_in, path_out=None): im.save(path_out) return path_out except IOError: - log.error(u"PIL cannot create thumbnail for '{0}'".format( - util.displayable_path(path_in) - )) + log.error(u"PIL cannot create thumbnail for '{0}'", + util.displayable_path(path_in)) return path_in def im_resize(maxwidth, path_in, path_out=None): """Resize using ImageMagick's ``convert`` tool. - tool. Return the output path of resized image. + Return the output path of resized image. """ path_out = path_out or temp_file_for(path_in) - log.debug(u'artresizer: ImageMagick resizing {0} to {1}'.format( - util.displayable_path(path_in), util.displayable_path(path_out) - )) + log.debug(u'artresizer: ImageMagick resizing {0} to {1}', + util.displayable_path(path_in), util.displayable_path(path_out)) # "-resize widthxheight>" shrinks images with dimension(s) larger # than the corresponding width and/or height dimension(s). The > @@ -89,13 +91,13 @@ def im_resize(maxwidth, path_in, path_out=None): # compatibility. try: util.command_output([ - 'convert', util.syspath(path_in), - '-resize', '{0}x^>'.format(maxwidth), path_out + b'convert', util.syspath(path_in, prefix=False), + b'-resize', b'{0}x^>'.format(maxwidth), + util.syspath(path_out, prefix=False), ]) except subprocess.CalledProcessError: - log.warn(u'artresizer: IM convert failed for {0}'.format( - util.displayable_path(path_in) - )) + log.warn(u'artresizer: IM convert failed for {0}', + util.displayable_path(path_in)) return path_in return path_out @@ -106,21 +108,56 @@ BACKEND_FUNCS = { } +def pil_getsize(path_in): + from PIL import Image + try: + im = Image.open(util.syspath(path_in)) + return im.size + except IOError as exc: + log.error(u"PIL could not read file {}: {}", + util.displayable_path(path_in), exc) + + +def im_getsize(path_in): + cmd = [b'identify', b'-format', b'%w %h', + util.syspath(path_in, prefix=False)] + try: + out = util.command_output(cmd) + except subprocess.CalledProcessError as exc: + log.warn(u'ImageMagick size query failed') + log.debug( + u'`convert` exited with (status {}) when ' + u'getting size with command {}:\n{}', + exc.returncode, cmd, exc.output.strip() + ) + return + try: + return tuple(map(int, out.split(b' '))) + except IndexError: + log.warn(u'Could not understand IM output: {0!r}', out) + + +BACKEND_GET_SIZE = { + PIL: pil_getsize, + IMAGEMAGICK: im_getsize, +} + + class Shareable(type): """A pseudo-singleton metaclass that allows both shared and non-shared instances. The ``MyClass.shared`` property holds a lazily-created shared instance of ``MyClass`` while calling ``MyClass()`` to construct a new object works as usual. """ - def __init__(cls, name, bases, dict): - super(Shareable, cls).__init__(name, bases, dict) - cls._instance = None + def __init__(self, name, bases, dict): + super(Shareable, self).__init__(name, bases, dict) + self._instance = None @property - def shared(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance + def shared(self): + if self._instance is None: + self._instance = self() + return self._instance class ArtResizer(object): @@ -128,12 +165,12 @@ class ArtResizer(object): """ __metaclass__ = Shareable - def __init__(self, method=None): - """Create a resizer object for the given method or, if none is - specified, with an inferred method. + def __init__(self): + """Create a resizer object with an inferred method. """ - self.method = method or self._guess_method() - log.debug(u"artresizer: method is {0}".format(self.method)) + self.method = self._check_method() + log.debug(u"artresizer: method is {0}", self.method) + self.can_compare = self._can_compare() def resize(self, maxwidth, path_in, path_out=None): """Manipulate an image file according to the method, returning a @@ -141,7 +178,7 @@ class ArtResizer(object): temporary file. For WEBPROXY, returns `path_in` unmodified. """ if self.local: - func = BACKEND_FUNCS[self.method] + func = BACKEND_FUNCS[self.method[0]] return func(maxwidth, path_in, path_out) else: return path_in @@ -159,30 +196,63 @@ class ArtResizer(object): @property def local(self): """A boolean indicating whether the resizing method is performed - locally (i.e., PIL or IMAGEMAGICK). + locally (i.e., PIL or ImageMagick). """ - return self.method in BACKEND_FUNCS + return self.method[0] in BACKEND_FUNCS + + def get_size(self, path_in): + """Return the size of an image file as an int couple (width, height) + in pixels. + + Only available locally + """ + if self.local: + func = BACKEND_GET_SIZE[self.method[0]] + return func(path_in) + + def _can_compare(self): + """A boolean indicating whether image comparison is available""" + + return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7) @staticmethod - def _guess_method(): - """Determine which resizing method to use. Returns PIL, - IMAGEMAGICK, or WEBPROXY depending on available dependencies. - """ - # Try importing PIL. - try: - __import__('PIL', fromlist=['Image']) - return PIL - except ImportError: - pass + def _check_method(): + """Return a tuple indicating an available method and its version.""" + version = get_im_version() + if version: + return IMAGEMAGICK, version - # Try invoking ImageMagick's "convert". - try: - out = util.command_output(['convert', '--version']) - if 'imagemagick' in out.lower(): - # system32/convert.exe may be interfering - return IMAGEMAGICK - except (subprocess.CalledProcessError, OSError): - pass + version = get_pil_version() + if version: + return PIL, version - # Fall back to Web proxy method. - return WEBPROXY + return WEBPROXY, (0) + + +def get_im_version(): + """Return Image Magick version or None if it is unavailable + Try invoking ImageMagick's "convert".""" + try: + out = util.command_output([b'identify', b'--version']) + + if 'imagemagick' in out.lower(): + pattern = r".+ (\d+)\.(\d+)\.(\d+).*" + match = re.search(pattern, out) + if match: + return (int(match.group(1)), + int(match.group(2)), + int(match.group(3))) + return (0,) + + except (subprocess.CalledProcessError, OSError): + return None + + +def get_pil_version(): + """Return Image Magick version or None if it is unavailable + Try importing PIL.""" + try: + __import__('PIL', fromlist=[str('Image')]) + return (0,) + except ImportError: + return None diff --git a/libs/beets/util/bluelet.py b/libs/beets/util/bluelet.py index 9d9432f2..d81c2919 100644 --- a/libs/beets/util/bluelet.py +++ b/libs/beets/util/bluelet.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """Extremely simple pure-Python implementation of coroutine-style asynchronous socket I/O. Inspired by, but inferior to, Eventlet. Bluelet can also be thought of as a less-terrible replacement for @@ -5,6 +7,8 @@ asyncore. Bluelet: easy concurrency without all the messy parallelism. """ +from __future__ import division, absolute_import, print_function + import socket import select import sys @@ -38,6 +42,7 @@ class Event(object): """ pass + class WaitableEvent(Event): """A waitable event is one encapsulating an action that can be waited for using a select() call. That is, it's an event with an @@ -57,21 +62,25 @@ class WaitableEvent(Event): """ pass + class ValueEvent(Event): """An event that does nothing but return a fixed value.""" def __init__(self, value): self.value = value + class ExceptionEvent(Event): """Raise an exception at the yield point. Used internally.""" def __init__(self, exc_info): self.exc_info = exc_info + class SpawnEvent(Event): """Add a new coroutine thread to the scheduler.""" def __init__(self, coro): self.spawned = coro + class JoinEvent(Event): """Suspend the thread until the specified child thread has completed. @@ -79,11 +88,13 @@ class JoinEvent(Event): def __init__(self, child): self.child = child + class KillEvent(Event): """Unschedule a child thread.""" def __init__(self, child): self.child = child + class DelegationEvent(Event): """Suspend execution of the current thread, start a new thread and, once the child thread finished, return control to the parent @@ -92,6 +103,7 @@ class DelegationEvent(Event): def __init__(self, coro): self.spawned = coro + class ReturnEvent(Event): """Return a value the current thread's delegator at the point of delegation. Ends the current (delegate) thread. @@ -99,6 +111,7 @@ class ReturnEvent(Event): def __init__(self, value): self.value = value + class SleepEvent(WaitableEvent): """Suspend the thread for a given duration. """ @@ -108,6 +121,7 @@ class SleepEvent(WaitableEvent): def time_left(self): return max(self.wakeup_time - time.time(), 0.0) + class ReadEvent(WaitableEvent): """Reads from a file-like object.""" def __init__(self, fd, bufsize): @@ -120,6 +134,7 @@ class ReadEvent(WaitableEvent): def fire(self): return self.fd.read(self.bufsize) + class WriteEvent(WaitableEvent): """Writes to a file-like object.""" def __init__(self, fd, data): @@ -192,15 +207,19 @@ def _event_select(events): return ready_events + class ThreadException(Exception): def __init__(self, coro, exc_info): self.coro = coro self.exc_info = exc_info + def reraise(self): _reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2]) + SUSPENDED = Event() # Special sentinel placeholder for suspended threads. + class Delegated(Event): """Placeholder indicating that a thread has delegated execution to a different thread. @@ -208,6 +227,7 @@ class Delegated(Event): def __init__(self, child): self.child = child + def run(root_coro): """Schedules a coroutine, running it to completion. This encapsulates the Bluelet scheduler, which the root coroutine can @@ -329,7 +349,7 @@ def run(root_coro): break # Wait and fire. - event2coro = dict((v,k) for k,v in threads.items()) + event2coro = dict((v, k) for k, v in threads.items()) for event in _event_select(threads.values()): # Run the IO operation, but catch socket errors. try: @@ -378,6 +398,7 @@ def run(root_coro): class SocketClosedError(Exception): pass + class Listener(object): """A socket wrapper object for listening sockets. """ @@ -407,6 +428,7 @@ class Listener(object): self._closed = True self.sock.close() + class Connection(object): """A socket wrapper object for connected sockets. """ @@ -468,6 +490,7 @@ class Connection(object): yield ReturnEvent(line) break + class AcceptEvent(WaitableEvent): """An event for Listener objects (listening sockets) that suspends execution until the socket gets a connection. @@ -482,6 +505,7 @@ class AcceptEvent(WaitableEvent): sock, addr = self.listener.sock.accept() return Connection(sock, addr) + class ReceiveEvent(WaitableEvent): """An event for Connection objects (connected sockets) for asynchronously reading data. @@ -496,6 +520,7 @@ class ReceiveEvent(WaitableEvent): def fire(self): return self.conn.sock.recv(self.bufsize) + class SendEvent(WaitableEvent): """An event for Connection objects (connected sockets) for asynchronously writing data. @@ -523,29 +548,33 @@ def null(): """ return ValueEvent(None) + def spawn(coro): """Event: add another coroutine to the scheduler. Both the parent and child coroutines run concurrently. """ if not isinstance(coro, types.GeneratorType): - raise ValueError('%s is not a coroutine' % str(coro)) + raise ValueError(u'%s is not a coroutine' % coro) return SpawnEvent(coro) + def call(coro): """Event: delegate to another coroutine. The current coroutine is resumed once the sub-coroutine finishes. If the sub-coroutine returns a value using end(), then this event returns that value. """ if not isinstance(coro, types.GeneratorType): - raise ValueError('%s is not a coroutine' % str(coro)) + raise ValueError(u'%s is not a coroutine' % coro) return DelegationEvent(coro) + def end(value=None): """Event: ends the coroutine and returns a value to its delegator. """ return ReturnEvent(value) + def read(fd, bufsize=None): """Event: read from a file descriptor asynchronously.""" if bufsize is None: @@ -563,10 +592,12 @@ def read(fd, bufsize=None): else: return ReadEvent(fd, bufsize) + def write(fd, data): """Event: write to a file descriptor asynchronously.""" return WriteEvent(fd, data) + def connect(host, port): """Event: connect to a network address and return a Connection object for communicating on the socket. @@ -575,17 +606,20 @@ def connect(host, port): sock = socket.create_connection(addr) return ValueEvent(Connection(sock, addr)) + def sleep(duration): """Event: suspend the thread for ``duration`` seconds. """ return SleepEvent(duration) + def join(coro): """Suspend the thread until another, previously `spawn`ed thread completes. """ return JoinEvent(coro) + def kill(coro): """Halt the execution of a different `spawn`ed thread. """ diff --git a/libs/beets/util/confit.py b/libs/beets/util/confit.py index cf8b3629..aa49f6f1 100644 --- a/libs/beets/util/confit.py +++ b/libs/beets/util/confit.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of Confit. -# Copyright 2014, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -14,13 +15,15 @@ """Worry-free YAML configuration files. """ -from __future__ import unicode_literals +from __future__ import division, absolute_import, print_function + import platform import os import pkgutil import sys import yaml -import types +import collections +import re try: from collections import OrderedDict except ImportError: @@ -38,6 +41,8 @@ ROOT_NAME = 'root' YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token" +REDACTED_TOMBSTONE = 'REDACTED' + # Utilities. @@ -45,7 +50,7 @@ PY3 = sys.version_info[0] == 3 STRING = str if PY3 else unicode BASESTRING = str if PY3 else basestring NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) -TYPE_TYPES = (type,) if PY3 else (type, types.ClassType) + def iter_first(sequence): """Get the first element from an iterable or raise a ValueError if @@ -53,10 +58,7 @@ def iter_first(sequence): """ it = iter(sequence) try: - if PY3: - return next(it) - else: - return it.next() + return next(it) except StopIteration: raise ValueError() @@ -67,16 +69,25 @@ class ConfigError(Exception): """Base class for exceptions raised when querying a configuration. """ + class NotFoundError(ConfigError): """A requested value could not be found in the configuration trees. """ -class ConfigTypeError(ConfigError, TypeError): + +class ConfigValueError(ConfigError): + """The value in the configuration is illegal.""" + + +class ConfigTypeError(ConfigValueError): """The value in the configuration did not match the expected type. """ -class ConfigValueError(ConfigError, ValueError): - """The value in the configuration is illegal.""" + +class ConfigTemplateError(ConfigError): + """Base class for exceptions raised because of an invalid template. + """ + class ConfigReadError(ConfigError): """A configuration file could not be read.""" @@ -84,17 +95,17 @@ class ConfigReadError(ConfigError): self.filename = filename self.reason = reason - message = 'file {0} could not be read'.format(filename) + message = u'file {0} could not be read'.format(filename) if isinstance(reason, yaml.scanner.ScannerError) and \ reason.problem == YAML_TAB_PROBLEM: # Special-case error message for tab indentation in YAML markup. - message += ': found tab character at line {0}, column {1}'.format( + message += u': found tab character at line {0}, column {1}'.format( reason.problem_mark.line + 1, reason.problem_mark.column + 1, ) elif reason: # Generic error message uses exception's message. - message += ': {0}'.format(reason) + message += u': {0}'.format(reason) super(ConfigReadError, self).__init__(message) @@ -108,19 +119,19 @@ class ConfigSource(dict): def __init__(self, value, filename=None, default=False): super(ConfigSource, self).__init__(value) if filename is not None and not isinstance(filename, BASESTRING): - raise TypeError('filename must be a string or None') + raise TypeError(u'filename must be a string or None') self.filename = filename self.default = default def __repr__(self): - return 'ConfigSource({0}, {1}, {2})'.format( - super(ConfigSource, self).__repr__(), - repr(self.filename), - repr(self.default) + return 'ConfigSource({0!r}, {1!r}, {2!r})'.format( + super(ConfigSource, self), + self.filename, + self.default, ) @classmethod - def of(self, value): + def of(cls, value): """Given either a dictionary or a `ConfigSource` object, return a `ConfigSource` object. This lets a function accept either type of object as an argument. @@ -130,7 +141,8 @@ class ConfigSource(dict): elif isinstance(value, dict): return ConfigSource(value) else: - raise TypeError('source value must be a dict') + raise TypeError(u'source value must be a dict') + class ConfigView(object): """A configuration "view" is a query into a program's configuration @@ -164,7 +176,7 @@ class ConfigView(object): try: return iter_first(pairs) except ValueError: - raise NotFoundError("{0} not found".format(self.name)) + raise NotFoundError(u"{0} not found".format(self.name)) def exists(self): """Determine whether the view has a setting in any source. @@ -195,7 +207,31 @@ class ConfigView(object): raise NotImplementedError def __repr__(self): - return '' % self.name + return '<{}: {}>'.format(self.__class__.__name__, self.name) + + def __iter__(self): + """Iterate over the keys of a dictionary view or the *subviews* + of a list view. + """ + # Try getting the keys, if this is a dictionary view. + try: + keys = self.keys() + for key in keys: + yield key + + except ConfigTypeError: + # Otherwise, try iterating over a list. + collection = self.get() + if not isinstance(collection, (list, tuple)): + raise ConfigTypeError( + u'{0} must be a dictionary or a list, not {1}'.format( + self.name, type(collection).__name__ + ) + ) + + # Yield all the indices in the list. + for index in range(len(collection)): + yield self[index] def __getitem__(self, key): """Get a subview of this view.""" @@ -207,6 +243,9 @@ class ConfigView(object): """ self.set({key: value}) + def __contains__(self, key): + return self[key].exists() + def set_args(self, namespace): """Overlay parsed command-line arguments, generated by a library like argparse or optparse, onto this view's value. @@ -223,14 +262,17 @@ class ConfigView(object): # just say ``bool(view)`` or use ``view`` in a conditional. def __str__(self): - """Gets the value for this view as a byte string.""" - return str(self.get()) + """Get the value for this view as a bytestring. + """ + if PY3: + return self.__unicode__() + else: + return bytes(self.get()) def __unicode__(self): - """Gets the value for this view as a unicode string. (Python 2 - only.) + """Get the value for this view as a Unicode string. """ - return unicode(self.get()) + return STRING(self.get()) def __nonzero__(self): """Gets the value for this view as a boolean. (Python 2 only.) @@ -260,7 +302,7 @@ class ConfigView(object): cur_keys = dic.keys() except AttributeError: raise ConfigTypeError( - '{0} must be a dict, not {1}'.format( + u'{0} must be a dict, not {1}'.format( self.name, type(dic).__name__ ) ) @@ -301,7 +343,7 @@ class ConfigView(object): it = iter(collection) except TypeError: raise ConfigTypeError( - '{0} must be an iterable, not {1}'.format( + u'{0} must be an iterable, not {1}'.format( self.name, type(collection).__name__ ) ) @@ -310,111 +352,78 @@ class ConfigView(object): # Validation and conversion. - def get(self, typ=None): - """Returns the canonical value for the view, checked against the - passed-in type. If the value is not an instance of the given - type, a ConfigTypeError is raised. May also raise a - NotFoundError. - """ - value, _ = self.first() - - if typ is not None: - if not isinstance(typ, TYPE_TYPES): - raise TypeError('argument to get() must be a type') - - if not isinstance(value, typ): - raise ConfigTypeError( - "{0} must be of type {1}, not {2}".format( - self.name, typ.__name__, type(value).__name__ - ) - ) - - return value - - def as_filename(self): - """Get a string as a normalized as an absolute, tilde-free path. - - Relative paths are relative to the configuration directory (see - the `config_dir` method) if they come from a file. Otherwise, - they are relative to the current working directory. This helps - attain the expected behavior when using command-line options. - """ - path, source = self.first() - if not isinstance(path, BASESTRING): - raise ConfigTypeError('{0} must be a filename, not {1}'.format( - self.name, type(path).__name__ - )) - path = os.path.expanduser(STRING(path)) - - if not os.path.isabs(path) and source.filename: - # From defaults: relative to the app's directory. - path = os.path.join(self.root().config_dir(), path) - - return os.path.abspath(path) - - def as_choice(self, choices): - """Ensure that the value is among a collection of choices and - return it. If `choices` is a dictionary, then return the - corresponding value rather than the value itself (the key). - """ - value = self.get() - - if value not in choices: - raise ConfigValueError( - '{0} must be one of {1}, not {2}'.format( - self.name, repr(list(choices)), repr(value) - ) - ) - - if isinstance(choices, dict): - return choices[value] - else: - return value - - def as_number(self): - """Ensure that a value is of numeric type.""" - value = self.get() - if isinstance(value, NUMERIC_TYPES): - return value - raise ConfigTypeError( - '{0} must be numeric, not {1}'.format( - self.name, type(value).__name__ - ) - ) - - def as_str_seq(self): - """Get the value as a list of strings. The underlying configured - value can be a sequence or a single string. In the latter case, - the string is treated as a white-space separated list of words. - """ - value = self.get() - if isinstance(value, bytes): - value = value.decode('utf8', 'ignore') - - if isinstance(value, STRING): - return value.split() - else: - try: - return list(value) - except TypeError: - raise ConfigTypeError( - '{0} must be a whitespace-separated string or ' - 'a list'.format(self.name) - ) - - def flatten(self): + def flatten(self, redact=False): """Create a hierarchy of OrderedDicts containing the data from this view, recursively reifying all views to get their represented values. + + If `redact` is set, then sensitive values are replaced with + the string "REDACTED". """ od = OrderedDict() for key, view in self.items(): - try: - od[key] = view.flatten() - except ConfigTypeError: - od[key] = view.get() + if redact and view.redact: + od[key] = REDACTED_TOMBSTONE + else: + try: + od[key] = view.flatten(redact=redact) + except ConfigTypeError: + od[key] = view.get() return od + def get(self, template=None): + """Retrieve the value for this view according to the template. + + The `template` against which the values are checked can be + anything convertible to a `Template` using `as_template`. This + means you can pass in a default integer or string value, for + example, or a type to just check that something matches the type + you expect. + + May raise a `ConfigValueError` (or its subclass, + `ConfigTypeError`) or a `NotFoundError` when the configuration + doesn't satisfy the template. + """ + return as_template(template).value(self, template) + + # Old validation methods (deprecated). + + def as_filename(self): + return self.get(Filename()) + + def as_choice(self, choices): + return self.get(Choice(choices)) + + def as_number(self): + return self.get(Number()) + + def as_str_seq(self): + return self.get(StrSeq()) + + # Redaction. + + @property + def redact(self): + """Whether the view contains sensitive information and should be + redacted from output. + """ + return () in self.get_redactions() + + @redact.setter + def redact(self, flag): + self.set_redaction((), flag) + + def set_redaction(self, path, flag): + """Add or remove a redaction for a key path, which should be an + iterable of keys. + """ + raise NotImplementedError() + + def get_redactions(self): + """Get the set of currently-redacted sub-key-paths at this view. + """ + raise NotImplementedError() + class RootView(ConfigView): """The base of a view hierarchy. This view keeps track of the @@ -427,6 +436,7 @@ class RootView(ConfigView): """ self.sources = list(sources) self.name = ROOT_NAME + self.redactions = set() def add(self, obj): self.sources.append(ConfigSource.of(obj)) @@ -438,12 +448,24 @@ class RootView(ConfigView): return ((dict(s), s) for s in self.sources) def clear(self): - """Remove all sources from this configuration.""" + """Remove all sources (and redactions) from this + configuration. + """ del self.sources[:] + self.redactions.clear() def root(self): return self + def set_redaction(self, path, flag): + if flag: + self.redactions.add(path) + elif path in self.redactions: + self.redactions.remove(path) + + def get_redactions(self): + return self.redactions + class Subview(ConfigView): """A subview accessed via a subscript of a parent view.""" @@ -461,11 +483,14 @@ class Subview(ConfigView): if not isinstance(self.key, int): self.name += '.' if isinstance(self.key, int): - self.name += '#{0}'.format(self.key) + self.name += u'#{0}'.format(self.key) elif isinstance(self.key, BASESTRING): - self.name += '{0}'.format(self.key) + if isinstance(self.key, bytes): + self.name += self.key.decode('utf8') + else: + self.name += self.key else: - self.name += '{0}'.format(repr(self.key)) + self.name += repr(self.key) def resolve(self): for collection, source in self.parent.resolve(): @@ -480,7 +505,7 @@ class Subview(ConfigView): except TypeError: # Not subscriptable. raise ConfigTypeError( - "{0} must be a collection, not {1}".format( + u"{0} must be a collection, not {1}".format( self.parent.name, type(collection).__name__ ) ) @@ -495,6 +520,13 @@ class Subview(ConfigView): def root(self): return self.parent.root() + def set_redaction(self, path, flag): + self.parent.set_redaction((self.key,) + path, flag) + + def get_redactions(self): + return (kp[1:] for kp in self.parent.get_redactions() + if kp and kp[0] == self.key) + # Config file paths, including platform-specific paths and in-package # defaults. @@ -518,6 +550,7 @@ def _package_path(name): return os.path.dirname(os.path.abspath(filepath)) + def config_dirs(): """Return a platform-specific list of candidates for user configuration directories on the system. @@ -582,7 +615,7 @@ class Loader(yaml.SafeLoader): else: raise yaml.constructor.ConstructorError( None, None, - 'expected a mapping node, but found %s' % node.id, + u'expected a mapping node, but found %s' % node.id, node.start_mark ) @@ -593,7 +626,7 @@ class Loader(yaml.SafeLoader): hash(key) except TypeError as exc: raise yaml.constructor.ConstructorError( - 'while constructing a mapping', + u'while constructing a mapping', node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark ) @@ -606,10 +639,12 @@ class Loader(yaml.SafeLoader): plain = super(Loader, self).check_plain() return plain or self.peek() == '%' + Loader.add_constructor('tag:yaml.org,2002:str', Loader._construct_unicode) Loader.add_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map) Loader.add_constructor('tag:yaml.org,2002:omap', Loader.construct_yaml_map) + def load_yaml(filename): """Read a YAML document from a file. If the file cannot be read or parsed, a ConfigReadError is raised. @@ -639,11 +674,11 @@ class Dumper(yaml.SafeDumper): for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) - if not (isinstance(node_key, yaml.ScalarNode) - and not node_key.style): + if not (isinstance(node_key, yaml.ScalarNode) and + not node_key.style): best_style = False - if not (isinstance(node_value, yaml.ScalarNode) - and not node_value.style): + if not (isinstance(node_value, yaml.ScalarNode) and + not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: @@ -669,9 +704,9 @@ class Dumper(yaml.SafeDumper): """Represent bool as 'yes' or 'no' instead of 'true' or 'false'. """ if data: - value = 'yes' + value = u'yes' else: - value = 'no' + value = u'no' return self.represent_scalar('tag:yaml.org,2002:bool', value) def represent_none(self, data): @@ -679,11 +714,13 @@ class Dumper(yaml.SafeDumper): """ return self.represent_scalar('tag:yaml.org,2002:null', '') + Dumper.add_representer(OrderedDict, Dumper.represent_dict) Dumper.add_representer(bool, Dumper.represent_bool) Dumper.add_representer(type(None), Dumper.represent_none) Dumper.add_representer(list, Dumper.represent_list) + def restore_yaml_comments(data, default_data): """Scan default_data for comments (we include empty lines in our definition of comments) and place them before the same keys in data. @@ -794,7 +831,7 @@ class Configuration(RootView): appdir = os.environ[self._env_var] appdir = os.path.abspath(os.path.expanduser(appdir)) if os.path.isfile(appdir): - raise ConfigError('{0} must be a directory'.format( + raise ConfigError(u'{0} must be a directory'.format( self._env_var )) @@ -818,7 +855,7 @@ class Configuration(RootView): filename = os.path.abspath(filename) self.set(ConfigSource(load_yaml(filename), filename)) - def dump(self, full=True): + def dump(self, full=True, redact=False): """Dump the Configuration object to a YAML file. The order of the keys is determined from the default @@ -830,13 +867,17 @@ class Configuration(RootView): :type filename: unicode :param full: Dump settings that don't differ from the defaults as well + :param redact: Remove sensitive information (views with the `redact` + flag set) from the output """ if full: - out_dict = self.flatten() + out_dict = self.flatten(redact=redact) else: # Exclude defaults when flattening. sources = [s for s in self.sources if not s.default] - out_dict = RootView(sources).flatten() + temp_root = RootView(sources) + temp_root.redactions = self.redactions + out_dict = temp_root.flatten(redact=redact) yaml_out = yaml.dump(out_dict, Dumper=Dumper, default_flow_style=None, indent=4, @@ -848,7 +889,7 @@ class Configuration(RootView): if source.default: default_source = source break - if default_source: + if default_source and default_source.filename: with open(default_source.filename, 'r') as fp: default_data = fp.read() yaml_out = restore_yaml_comments(yaml_out, default_data) @@ -895,6 +936,506 @@ class LazyConfig(Configuration): def clear(self): """Remove all sources from this configuration.""" - del self.sources[:] + super(LazyConfig, self).clear() self._lazy_suffix = [] self._lazy_prefix = [] + + +# "Validated" configuration views: experimental! + + +REQUIRED = object() +"""A sentinel indicating that there is no default value and an exception +should be raised when the value is missing. +""" + + +class Template(object): + """A value template for configuration fields. + + The template works like a type and instructs Confit about how to + interpret a deserialized YAML value. This includes type conversions, + providing a default value, and validating for errors. For example, a + filepath type might expand tildes and check that the file exists. + """ + def __init__(self, default=REQUIRED): + """Create a template with a given default value. + + If `default` is the sentinel `REQUIRED` (as it is by default), + then an error will be raised when a value is missing. Otherwise, + missing values will instead return `default`. + """ + self.default = default + + def __call__(self, view): + """Invoking a template on a view gets the view's value according + to the template. + """ + return self.value(view, self) + + def value(self, view, template=None): + """Get the value for a `ConfigView`. + + May raise a `NotFoundError` if the value is missing (and the + template requires it) or a `ConfigValueError` for invalid values. + """ + if view.exists(): + value, _ = view.first() + return self.convert(value, view) + elif self.default is REQUIRED: + # Missing required value. This is an error. + raise NotFoundError(u"{0} not found".format(view.name)) + else: + # Missing value, but not required. + return self.default + + def convert(self, value, view): + """Convert the YAML-deserialized value to a value of the desired + type. + + Subclasses should override this to provide useful conversions. + May raise a `ConfigValueError` when the configuration is wrong. + """ + # Default implementation does no conversion. + return value + + def fail(self, message, view, type_error=False): + """Raise an exception indicating that a value cannot be + accepted. + + `type_error` indicates whether the error is due to a type + mismatch rather than a malformed value. In this case, a more + specific exception is raised. + """ + exc_class = ConfigTypeError if type_error else ConfigValueError + raise exc_class( + u'{0}: {1}'.format(view.name, message) + ) + + def __repr__(self): + return '{0}({1})'.format( + type(self).__name__, + '' if self.default is REQUIRED else repr(self.default), + ) + + +class Integer(Template): + """An integer configuration value template. + """ + def convert(self, value, view): + """Check that the value is an integer. Floats are rounded. + """ + if isinstance(value, int): + return value + elif isinstance(value, float): + return int(value) + else: + self.fail(u'must be a number', view, True) + + +class Number(Template): + """A numeric type: either an integer or a floating-point number. + """ + def convert(self, value, view): + """Check that the value is an int or a float. + """ + if isinstance(value, NUMERIC_TYPES): + return value + else: + self.fail( + u'must be numeric, not {0}'.format(type(value).__name__), + view, + True + ) + + +class MappingTemplate(Template): + """A template that uses a dictionary to specify other types for the + values for a set of keys and produce a validated `AttrDict`. + """ + def __init__(self, mapping): + """Create a template according to a dict (mapping). The + mapping's values should themselves either be Types or + convertible to Types. + """ + subtemplates = {} + for key, typ in mapping.items(): + subtemplates[key] = as_template(typ) + self.subtemplates = subtemplates + + def value(self, view, template=None): + """Get a dict with the same keys as the template and values + validated according to the value types. + """ + out = AttrDict() + for key, typ in self.subtemplates.items(): + out[key] = typ.value(view[key], self) + return out + + def __repr__(self): + return 'MappingTemplate({0})'.format(repr(self.subtemplates)) + + +class String(Template): + """A string configuration value template. + """ + def __init__(self, default=REQUIRED, pattern=None): + """Create a template with the added optional `pattern` argument, + a regular expression string that the value should match. + """ + super(String, self).__init__(default) + self.pattern = pattern + if pattern: + self.regex = re.compile(pattern) + + def __repr__(self): + args = [] + + if self.default is not REQUIRED: + args.append(repr(self.default)) + + if self.pattern is not None: + args.append('pattern=' + repr(self.pattern)) + + return 'String({0})'.format(', '.join(args)) + + def convert(self, value, view): + """Check that the value is a string and matches the pattern. + """ + if isinstance(value, BASESTRING): + if self.pattern and not self.regex.match(value): + self.fail( + u"must match the pattern {0}".format(self.pattern), + view + ) + return value + else: + self.fail(u'must be a string', view, True) + + +class Choice(Template): + """A template that permits values from a sequence of choices. + """ + def __init__(self, choices): + """Create a template that validates any of the values from the + iterable `choices`. + + If `choices` is a map, then the corresponding value is emitted. + Otherwise, the value itself is emitted. + """ + self.choices = choices + + def convert(self, value, view): + """Ensure that the value is among the choices (and remap if the + choices are a mapping). + """ + if value not in self.choices: + self.fail( + u'must be one of {0}, not {1}'.format( + repr(list(self.choices)), repr(value) + ), + view + ) + + if isinstance(self.choices, collections.Mapping): + return self.choices[value] + else: + return value + + def __repr__(self): + return 'Choice({0!r})'.format(self.choices) + + +class OneOf(Template): + """A template that permits values complying to one of the given templates. + """ + def __init__(self, allowed, default=REQUIRED): + super(OneOf, self).__init__(default) + self.allowed = list(allowed) + + def __repr__(self): + args = [] + + if self.allowed is not None: + args.append('allowed=' + repr(self.allowed)) + + if self.default is not REQUIRED: + args.append(repr(self.default)) + + return 'OneOf({0})'.format(', '.join(args)) + + def value(self, view, template): + self.template = template + return super(OneOf, self).value(view, template) + + def convert(self, value, view): + """Ensure that the value follows at least one template. + """ + is_mapping = isinstance(self.template, MappingTemplate) + + for candidate in self.allowed: + try: + if is_mapping: + if isinstance(candidate, Filename) and \ + candidate.relative_to: + next_template = candidate.template_with_relatives( + view, + self.template + ) + + next_template.subtemplates[view.key] = as_template( + candidate + ) + else: + next_template = MappingTemplate({view.key: candidate}) + + return view.parent.get(next_template)[view.key] + else: + return view.get(candidate) + except ConfigTemplateError: + raise + except ConfigError: + pass + except ValueError as exc: + raise ConfigTemplateError(exc) + + self.fail( + u'must be one of {0}, not {1}'.format( + repr(self.allowed), repr(value) + ), + view + ) + + +class StrSeq(Template): + """A template for values that are lists of strings. + + Validates both actual YAML string lists and single strings. Strings + can optionally be split on whitespace. + """ + def __init__(self, split=True): + """Create a new template. + + `split` indicates whether, when the underlying value is a single + string, it should be split on whitespace. Otherwise, the + resulting value is a list containing a single string. + """ + super(StrSeq, self).__init__() + self.split = split + + def convert(self, value, view): + if isinstance(value, bytes): + value = value.decode('utf8', 'ignore') + + if isinstance(value, STRING): + if self.split: + return value.split() + else: + return [value] + + try: + value = list(value) + except TypeError: + self.fail(u'must be a whitespace-separated string or a list', + view, True) + + def convert(x): + if isinstance(x, STRING): + return x + elif isinstance(x, bytes): + return x.decode('utf8', 'ignore') + else: + self.fail(u'must be a list of strings', view, True) + return list(map(convert, value)) + + +class Filename(Template): + """A template that validates strings as filenames. + + Filenames are returned as absolute, tilde-free paths. + + Relative paths are relative to the template's `cwd` argument + when it is specified, then the configuration directory (see + the `config_dir` method) if they come from a file. Otherwise, + they are relative to the current working directory. This helps + attain the expected behavior when using command-line options. + """ + def __init__(self, default=REQUIRED, cwd=None, relative_to=None, + in_app_dir=False): + """`relative_to` is the name of a sibling value that is + being validated at the same time. + + `in_app_dir` indicates whether the path should be resolved + inside the application's config directory (even when the setting + does not come from a file). + """ + super(Filename, self).__init__(default) + self.cwd = cwd + self.relative_to = relative_to + self.in_app_dir = in_app_dir + + def __repr__(self): + args = [] + + if self.default is not REQUIRED: + args.append(repr(self.default)) + + if self.cwd is not None: + args.append('cwd=' + repr(self.cwd)) + + if self.relative_to is not None: + args.append('relative_to=' + repr(self.relative_to)) + + if self.in_app_dir: + args.append('in_app_dir=True') + + return 'Filename({0})'.format(', '.join(args)) + + def resolve_relative_to(self, view, template): + if not isinstance(template, (collections.Mapping, MappingTemplate)): + # disallow config.get(Filename(relative_to='foo')) + raise ConfigTemplateError( + u'relative_to may only be used when getting multiple values.' + ) + + elif self.relative_to == view.key: + raise ConfigTemplateError( + u'{0} is relative to itself'.format(view.name) + ) + + elif self.relative_to not in view.parent.keys(): + # self.relative_to is not in the config + self.fail( + ( + u'needs sibling value "{0}" to expand relative path' + ).format(self.relative_to), + view + ) + + old_template = {} + old_template.update(template.subtemplates) + + # save time by skipping MappingTemplate's init loop + next_template = MappingTemplate({}) + next_relative = self.relative_to + + # gather all the needed templates and nothing else + while next_relative is not None: + try: + # pop to avoid infinite loop because of recursive + # relative paths + rel_to_template = old_template.pop(next_relative) + except KeyError: + if next_relative in template.subtemplates: + # we encountered this config key previously + raise ConfigTemplateError(( + u'{0} and {1} are recursively relative' + ).format(view.name, self.relative_to)) + else: + raise ConfigTemplateError(( + u'missing template for {0}, needed to expand {1}\'s' + + u'relative path' + ).format(self.relative_to, view.name)) + + next_template.subtemplates[next_relative] = rel_to_template + next_relative = rel_to_template.relative_to + + return view.parent.get(next_template)[self.relative_to] + + def value(self, view, template=None): + path, source = view.first() + if not isinstance(path, BASESTRING): + self.fail( + u'must be a filename, not {0}'.format(type(path).__name__), + view, + True + ) + path = os.path.expanduser(STRING(path)) + + if not os.path.isabs(path): + if self.cwd is not None: + # relative to the template's argument + path = os.path.join(self.cwd, path) + + elif self.relative_to is not None: + path = os.path.join( + self.resolve_relative_to(view, template), + path, + ) + + elif source.filename or self.in_app_dir: + # From defaults: relative to the app's directory. + path = os.path.join(view.root().config_dir(), path) + + return os.path.abspath(path) + + +class TypeTemplate(Template): + """A simple template that checks that a value is an instance of a + desired Python type. + """ + def __init__(self, typ, default=REQUIRED): + """Create a template that checks that the value is an instance + of `typ`. + """ + super(TypeTemplate, self).__init__(default) + self.typ = typ + + def convert(self, value, view): + if not isinstance(value, self.typ): + self.fail( + u'must be a {0}, not {1}'.format( + self.typ.__name__, + type(value).__name__, + ), + view, + True + ) + return value + + +class AttrDict(dict): + """A `dict` subclass that can be accessed via attributes (dot + notation) for convenience. + """ + def __getattr__(self, key): + if key in self: + return self[key] + else: + raise AttributeError(key) + + +def as_template(value): + """Convert a simple "shorthand" Python value to a `Template`. + """ + if isinstance(value, Template): + # If it's already a Template, pass it through. + return value + elif isinstance(value, collections.Mapping): + # Dictionaries work as templates. + return MappingTemplate(value) + elif value is int: + return Integer() + elif isinstance(value, int): + return Integer(value) + elif isinstance(value, type) and issubclass(value, BASESTRING): + return String() + elif isinstance(value, BASESTRING): + return String(value) + elif isinstance(value, set): + # convert to list to avoid hash related problems + return Choice(list(value)) + elif isinstance(value, list): + return OneOf(value) + elif value is float: + return Number() + elif value is None: + return Template() + elif value is dict: + return TypeTemplate(collections.Mapping) + elif value is list: + return TypeTemplate(collections.Sequence) + elif isinstance(value, type): + return TypeTemplate(value) + else: + raise ValueError(u'cannot convert to template: {0!r}'.format(value)) diff --git a/libs/beets/util/enumeration.py b/libs/beets/util/enumeration.py index e6ec0766..3e946718 100644 --- a/libs/beets/util/enumeration.py +++ b/libs/beets/util/enumeration.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -12,167 +13,31 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -"""A metaclass for enumerated types that really are types. +from __future__ import division, absolute_import, print_function -You can create enumerations with `enum(values, [name])` and they work -how you would expect them to. +from enum import Enum - >>> from enumeration import enum - >>> Direction = enum('north east south west', name='Direction') - >>> Direction.west - Direction.west - >>> Direction.west == Direction.west - True - >>> Direction.west == Direction.east - False - >>> isinstance(Direction.west, Direction) - True - >>> Direction[3] - Direction.west - >>> Direction['west'] - Direction.west - >>> Direction.west.name - 'west' - >>> Direction.north < Direction.west - True -Enumerations are classes; their instances represent the possible values -of the enumeration. Because Python classes must have names, you may -provide a `name` parameter to `enum`; if you don't, a meaningless one -will be chosen for you. -""" -import random - -class Enumeration(type): - """A metaclass whose classes are enumerations. - - The `values` attribute of the class is used to populate the - enumeration. Values may either be a list of enumerated names or a - string containing a space-separated list of names. When the class - is created, it is instantiated for each name value in `values`. - Each such instance is the name of the enumerated item as the sole - argument. - - The `Enumerated` class is a good choice for a superclass. +class OrderedEnum(Enum): """ - - def __init__(cls, name, bases, dic): - super(Enumeration, cls).__init__(name, bases, dic) - - if 'values' not in dic: - # Do nothing if no values are provided (i.e., with - # Enumerated itself). - return - - # May be called with a single string, in which case we split on - # whitespace for convenience. - values = dic['values'] - if isinstance(values, basestring): - values = values.split() - - # Create the Enumerated instances for each value. We have to use - # super's __setattr__ here because we disallow setattr below. - super(Enumeration, cls).__setattr__('_items_dict', {}) - super(Enumeration, cls).__setattr__('_items_list', []) - for value in values: - item = cls(value, len(cls._items_list)) - cls._items_dict[value] = item - cls._items_list.append(item) - - def __getattr__(cls, key): - try: - return cls._items_dict[key] - except KeyError: - raise AttributeError("enumeration '" + cls.__name__ + - "' has no item '" + key + "'") - - def __setattr__(cls, key, val): - raise TypeError("enumerations do not support attribute assignment") - - def __getitem__(cls, key): - if isinstance(key, int): - return cls._items_list[key] - else: - return getattr(cls, key) - - def __len__(cls): - return len(cls._items_list) - - def __iter__(cls): - return iter(cls._items_list) - - def __nonzero__(cls): - # Ensures that __len__ doesn't get called before __init__ by - # pydoc. - return True - -class Enumerated(object): - """An item in an enumeration. - - Contains instance methods inherited by enumerated objects. The - metaclass is preset to `Enumeration` for your convenience. - - Instance attributes: - name -- The name of the item. - index -- The index of the item in its enumeration. - - >>> from enumeration import Enumerated - >>> class Garment(Enumerated): - ... values = 'hat glove belt poncho lederhosen suspenders' - ... def wear(self): - ... print('now wearing a ' + self.name) - ... - >>> Garment.poncho.wear() - now wearing a poncho + An Enum subclass that allows comparison of members. """ + def __ge__(self, other): + if self.__class__ is other.__class__: + return self.value >= other.value + return NotImplemented - __metaclass__ = Enumeration + def __gt__(self, other): + if self.__class__ is other.__class__: + return self.value > other.value + return NotImplemented - def __init__(self, name, index): - self.name = name - self.index = index + def __le__(self, other): + if self.__class__ is other.__class__: + return self.value <= other.value + return NotImplemented - def __str__(self): - return type(self).__name__ + '.' + self.name - - def __repr__(self): - return str(self) - - def __cmp__(self, other): - if type(self) is type(other): - # Note that we're assuming that the items are direct - # instances of the same Enumeration (i.e., no fancy - # subclassing), which is probably okay. - return cmp(self.index, other.index) - else: - return NotImplemented - -def enum(*values, **kwargs): - """Shorthand for creating a new Enumeration class. - - Call with enumeration values as a list, a space-delimited string, or - just an argument list. To give the class a name, pass it as the - `name` keyword argument. Otherwise, a name will be chosen for you. - - The following are all equivalent: - - enum('pinkie ring middle index thumb') - enum('pinkie', 'ring', 'middle', 'index', 'thumb') - enum(['pinkie', 'ring', 'middle', 'index', 'thumb']) - """ - - if ('name' not in kwargs) or kwargs['name'] is None: - # Create a probably-unique name. It doesn't really have to be - # unique, but getting distinct names each time helps with - # identification in debugging. - name = 'Enumeration' + hex(random.randint(0,0xfffffff))[2:].upper() - else: - name = kwargs['name'] - - if len(values) == 1: - # If there's only one value, we have a couple of alternate calling - # styles. - if isinstance(values[0], basestring) or hasattr(values[0], '__iter__'): - values = values[0] - - return type(name, (Enumerated,), {'values': values}) + def __lt__(self, other): + if self.__class__ is other.__class__: + return self.value < other.value + return NotImplemented diff --git a/libs/beets/util/functemplate.py b/libs/beets/util/functemplate.py index 0fce41e5..05f0892c 100644 --- a/libs/beets/util/functemplate.py +++ b/libs/beets/util/functemplate.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -25,13 +26,16 @@ library: unknown symbols are left intact. This is sort of like a tiny, horrible degeneration of a real templating engine like Jinja2 or Mustache. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import re import ast import dis import types +from .confit import NUMERIC_TYPES + SYMBOL_DELIM = u'$' FUNC_DELIM = u'%' GROUP_OPEN = u'{' @@ -42,6 +46,7 @@ ESCAPE_CHAR = u'$' VARIABLE_PREFIX = '__var_' FUNCTION_PREFIX = '__func_' + class Environment(object): """Contains the values and functions to be substituted into a template. @@ -57,23 +62,26 @@ def ex_lvalue(name): """A variable load expression.""" return ast.Name(name, ast.Store()) + def ex_rvalue(name): """A variable store expression.""" return ast.Name(name, ast.Load()) + def ex_literal(val): """An int, float, long, bool, string, or None literal with the given value. """ if val is None: return ast.Name('None', ast.Load()) - elif isinstance(val, (int, float, long)): + elif isinstance(val, NUMERIC_TYPES): return ast.Num(val) elif isinstance(val, bool): - return ast.Name(str(val), ast.Load()) + return ast.Name(bytes(val), ast.Load()) elif isinstance(val, basestring): return ast.Str(val) - raise TypeError('no literal for {0}'.format(type(val))) + raise TypeError(u'no literal for {0}'.format(type(val))) + def ex_varassign(name, expr): """Assign an expression into a single variable. The expression may @@ -83,6 +91,7 @@ def ex_varassign(name, expr): expr = ex_literal(expr) return ast.Assign([ex_lvalue(name)], expr) + def ex_call(func, args): """A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each @@ -98,13 +107,14 @@ def ex_call(func, args): return ast.Call(func, args, [], None, None) + def compile_func(arg_names, statements, name='_the_func', debug=False): """Compile a list of statements as the body of a function and return the resulting Python function. If `debug`, then print out the bytecode of the compiled function. """ func_def = ast.FunctionDef( - name, + name.encode('utf8'), ast.arguments( [ast.Name(n, ast.Param()) for n in arg_names], None, None, @@ -126,7 +136,7 @@ def compile_func(arg_names, statements, name='_the_func', debug=False): dis.dis(const) the_locals = {} - exec prog in {}, the_locals + exec(prog, {}, the_locals) return the_locals[name] @@ -157,6 +167,7 @@ class Symbol(object): expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8')) return [expr], set([self.ident.encode('utf8')]), set() + class Call(object): """A function call in a template.""" def __init__(self, ident, args, original): @@ -214,6 +225,7 @@ class Call(object): ) return [subexpr_call], varnames, funcnames + class Expression(object): """Top-level template construct: contains a list of text blobs, Symbols, and Calls. @@ -259,6 +271,7 @@ class Expression(object): class ParseError(Exception): pass + class Parser(object): """Parses a template expression string. Instantiate the class with the template source and call ``parse_expression``. The ``pos`` field @@ -280,7 +293,7 @@ class Parser(object): # Common parsing resources. special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE, ARG_SEP, ESCAPE_CHAR) - special_char_re = re.compile(ur'[%s]|$' % + special_char_re = re.compile(r'[%s]|$' % u''.join(re.escape(c) for c in special_chars)) def parse_expression(self): @@ -298,8 +311,8 @@ class Parser(object): # A non-special character. Skip to the next special # character, treating the interstice as literal text. next_pos = ( - self.special_char_re.search(self.string[self.pos:]).start() - + self.pos + self.special_char_re.search( + self.string[self.pos:]).start() + self.pos ) text_parts.append(self.string[self.pos:next_pos]) self.pos = next_pos @@ -316,13 +329,13 @@ class Parser(object): next_char = self.string[self.pos + 1] if char == ESCAPE_CHAR and next_char in \ - (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP): + (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP): # An escaped special character ($$, $}, etc.). Note that # ${ is not an escape sequence: this is ambiguous with # the start of a symbol and it's not necessary (just # using { suffices in all cases). text_parts.append(next_char) - self.pos += 2 # Skip the next character. + self.pos += 2 # Skip the next character. continue # Shift all characters collected so far into a single string. @@ -372,7 +385,7 @@ class Parser(object): if next_char == GROUP_OPEN: # A symbol like ${this}. - self.pos += 1 # Skip opening. + self.pos += 1 # Skip opening. closer = self.string.find(GROUP_CLOSE, self.pos) if closer == -1 or closer == self.pos: # No closing brace found or identifier is empty. @@ -431,7 +444,7 @@ class Parser(object): self.parts.append(self.string[start_pos:self.pos]) return - self.pos += 1 # Move past closing brace. + self.pos += 1 # Move past closing brace. self.parts.append(Call(ident, args, self.string[start_pos:self.pos])) def parse_argument_list(self): @@ -468,10 +481,11 @@ class Parser(object): Updates ``pos``. """ remainder = self.string[self.pos:] - ident = re.match(ur'\w*', remainder).group(0) + ident = re.match(r'\w*', remainder).group(0) self.pos += len(ident) return ident + def _parse(template): """Parse a top-level template string Expression. Any extraneous text is considered literal text. @@ -522,9 +536,9 @@ class Template(object): argnames = [] for varname in varnames: - argnames.append(VARIABLE_PREFIX.encode('utf8') + varname) + argnames.append(VARIABLE_PREFIX + varname) for funcname in funcnames: - argnames.append(FUNCTION_PREFIX.encode('utf8') + funcname) + argnames.append(FUNCTION_PREFIX + funcname) func = compile_func( argnames, @@ -558,4 +572,4 @@ if __name__ == '__main__': 'from __main__ import _tmpl, _vars, _funcs', number=10000) print(comp_time) - print('Speedup:', interp_time / comp_time) + print(u'Speedup:', interp_time / comp_time) diff --git a/libs/beets/util/hidden.py b/libs/beets/util/hidden.py new file mode 100644 index 00000000..262d371e --- /dev/null +++ b/libs/beets/util/hidden.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Simple library to work out if a file is hidden on different platforms.""" +from __future__ import division, absolute_import, print_function + +import os +import stat +import ctypes +import sys + + +def _is_hidden_osx(path): + """Return whether or not a file is hidden on OS X. + + This uses os.lstat to work out if a file has the "hidden" flag. + """ + file_stat = os.lstat(path) + + if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'): + return bool(file_stat.st_flags & stat.UF_HIDDEN) + else: + return False + + +def _is_hidden_win(path): + """Return whether or not a file is hidden on Windows. + + This uses GetFileAttributes to work out if a file has the "hidden" flag + (FILE_ATTRIBUTE_HIDDEN). + """ + # FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation. + hidden_mask = 2 + + # Retrieve the attributes for the file. + attrs = ctypes.windll.kernel32.GetFileAttributesW(path) + + # Ensure we have valid attribues and compare them against the mask. + return attrs >= 0 and attrs & hidden_mask + + +def _is_hidden_dot(path): + """Return whether or not a file starts with a dot. + + Files starting with a dot are seen as "hidden" files on Unix-based OSes. + """ + return os.path.basename(path).startswith('.') + + +def is_hidden(path): + """Return whether or not a file is hidden. + + This method works differently depending on the platform it is called on. + + On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to + work out if a file is hidden. + + On Windows, it uses the result of `is_hidden_win` to work out if a file is + hidden. + + On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to + work out if a file is hidden. + """ + # Convert the path to unicode if it is not already. + if not isinstance(path, unicode): + path = path.decode('utf-8') + + # Run platform specific functions depending on the platform + if sys.platform == 'darwin': + return _is_hidden_osx(path) or _is_hidden_dot(path) + elif sys.platform == 'win32': + return _is_hidden_win(path) + else: + return _is_hidden_dot(path) + +__all__ = ['is_hidden'] diff --git a/libs/beets/util/pipeline.py b/libs/beets/util/pipeline.py index c64454ff..b5f77733 100644 --- a/libs/beets/util/pipeline.py +++ b/libs/beets/util/pipeline.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -30,18 +31,19 @@ up a bottleneck stage by dividing its work among multiple threads. To do so, pass an iterable of coroutines to the Pipeline constructor in place of any single coroutine. """ -from __future__ import print_function + +from __future__ import division, absolute_import, print_function import Queue from threading import Thread, Lock import sys -import types BUBBLE = '__PIPELINE_BUBBLE__' POISON = '__PIPELINE_POISON__' DEFAULT_QUEUE_SIZE = 16 + def _invalidate_queue(q, val=None, sync=True): """Breaks a Queue such that it never blocks, always has size 1, and has no maximum size. get()ing from the queue returns `val`, @@ -50,8 +52,10 @@ def _invalidate_queue(q, val=None, sync=True): """ def _qsize(len=len): return 1 + def _put(item): pass + def _get(): return val @@ -70,6 +74,7 @@ def _invalidate_queue(q, val=None, sync=True): if sync: q.mutex.release() + class CountedQueue(Queue.Queue): """A queue that keeps track of the number of threads that are still feeding into it. The queue is poisoned when all threads are @@ -104,6 +109,7 @@ class CountedQueue(Queue.Queue): # Replacement _get invalidates when no items remain. _old_get = self._get + def _get(): out = _old_get() if not self.queue: @@ -117,18 +123,67 @@ class CountedQueue(Queue.Queue): # No items. Invalidate immediately. _invalidate_queue(self, POISON, False) + class MultiMessage(object): """A message yielded by a pipeline stage encapsulating multiple values to be sent to the next stage. """ def __init__(self, messages): self.messages = messages + + def multiple(messages): """Yield multiple([message, ..]) from a pipeline stage to send multiple values to the next pipeline stage. """ return MultiMessage(messages) + +def stage(func): + """Decorate a function to become a simple stage. + + >>> @stage + ... def add(n, i): + ... return i + n + >>> pipe = Pipeline([ + ... iter([1, 2, 3]), + ... add(2), + ... ]) + >>> list(pipe.pull()) + [3, 4, 5] + """ + + def coro(*args): + task = None + while True: + task = yield task + task = func(*(args + (task,))) + return coro + + +def mutator_stage(func): + """Decorate a function that manipulates items in a coroutine to + become a simple stage. + + >>> @mutator_stage + ... def setkey(key, item): + ... item[key] = True + >>> pipe = Pipeline([ + ... iter([{'x': False}, {'a': False}]), + ... setkey('x'), + ... ]) + >>> list(pipe.pull()) + [{'x': True}, {'a': False, 'x': True}] + """ + + def coro(*args): + task = None + while True: + task = yield task + func(*(args + (task,))) + return coro + + def _allmsgs(obj): """Returns a list of all the messages encapsulated in obj. If obj is a MultiMessage, returns its enclosed messages. If obj is BUBBLE, @@ -141,6 +196,7 @@ def _allmsgs(obj): else: return [obj] + class PipelineThread(Thread): """Abstract base class for pipeline-stage threads.""" def __init__(self, all_threads): @@ -169,6 +225,7 @@ class PipelineThread(Thread): for thread in self.all_threads: thread.abort() + class FirstPipelineThread(PipelineThread): """The thread running the first stage in a parallel pipeline setup. The coroutine should just be a generator. @@ -191,7 +248,7 @@ class FirstPipelineThread(PipelineThread): # Get the value from the generator. try: - msg = self.coro.next() + msg = next(self.coro) except StopIteration: break @@ -209,6 +266,7 @@ class FirstPipelineThread(PipelineThread): # Generator finished; shut down the pipeline. self.out_queue.release() + class MiddlePipelineThread(PipelineThread): """A thread running any stage in the pipeline except the first or last. @@ -223,7 +281,7 @@ class MiddlePipelineThread(PipelineThread): def run(self): try: # Prime the coroutine. - self.coro.next() + next(self.coro) while True: with self.abort_lock: @@ -256,6 +314,7 @@ class MiddlePipelineThread(PipelineThread): # Pipeline is shutting down normally. self.out_queue.release() + class LastPipelineThread(PipelineThread): """A thread running the last stage in a pipeline. The coroutine should yield nothing. @@ -267,7 +326,7 @@ class LastPipelineThread(PipelineThread): def run(self): # Prime the coroutine. - self.coro.next() + next(self.coro) try: while True: @@ -291,6 +350,7 @@ class LastPipelineThread(PipelineThread): self.abort_all(sys.exc_info()) return + class Pipeline(object): """Represents a staged pattern of work. Each stage in the pipeline is a coroutine that receives messages from the previous stage and @@ -301,7 +361,7 @@ class Pipeline(object): be at least two stages. """ if len(stages) < 2: - raise ValueError('pipeline must have at least two stages') + raise ValueError(u'pipeline must have at least two stages') self.stages = [] for stage in stages: if isinstance(stage, (list, tuple)): @@ -322,7 +382,8 @@ class Pipeline(object): messages between the stages are stored in queues of the given size. """ - queues = [CountedQueue(queue_size) for i in range(len(self.stages)-1)] + queue_count = len(self.stages) - 1 + queues = [CountedQueue(queue_size) for i in range(queue_count)] threads = [] # Set up first stage. @@ -330,10 +391,10 @@ class Pipeline(object): threads.append(FirstPipelineThread(coro, queues[0], threads)) # Middle stages. - for i in range(1, len(self.stages)-1): + for i in range(1, queue_count): for coro in self.stages[i]: threads.append(MiddlePipelineThread( - coro, queues[i-1], queues[i], threads + coro, queues[i - 1], queues[i], threads )) # Last stage. @@ -383,7 +444,7 @@ class Pipeline(object): # "Prime" the coroutines. for coro in coros[1:]: - coro.next() + next(coro) # Begin the pipeline. for out in coros[0]: @@ -405,20 +466,23 @@ if __name__ == '__main__': # in parallel. def produce(): for i in range(5): - print('generating %i' % i) + print(u'generating %i' % i) time.sleep(1) yield i + def work(): num = yield while True: - print('processing %i' % num) + print(u'processing %i' % num) time.sleep(2) - num = yield num*2 + num = yield num * 2 + def consume(): while True: num = yield time.sleep(1) - print('received %i' % num) + print(u'received %i' % num) + ts_start = time.time() Pipeline([produce(), work(), consume()]).run_sequential() ts_seq = time.time() @@ -426,29 +490,30 @@ if __name__ == '__main__': ts_par = time.time() Pipeline([produce(), (work(), work()), consume()]).run_parallel() ts_end = time.time() - print('Sequential time:', ts_seq - ts_start) - print('Parallel time:', ts_par - ts_seq) - print('Multiply-parallel time:', ts_end - ts_par) + print(u'Sequential time:', ts_seq - ts_start) + print(u'Parallel time:', ts_par - ts_seq) + print(u'Multiply-parallel time:', ts_end - ts_par) print() # Test a pipeline that raises an exception. def exc_produce(): for i in range(10): - print('generating %i' % i) + print(u'generating %i' % i) time.sleep(1) yield i + def exc_work(): num = yield while True: - print('processing %i' % num) + print(u'processing %i' % num) time.sleep(3) if num == 3: raise Exception() num = yield num * 2 + def exc_consume(): while True: num = yield - #if num == 4: - # raise Exception() - print('received %i' % num) + print(u'received %i' % num) + Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1) diff --git a/libs/beets/vfs.py b/libs/beets/vfs.py index 235f3604..7f9a049e 100644 --- a/libs/beets/vfs.py +++ b/libs/beets/vfs.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # This file is part of beets. -# Copyright 2013, Adrian Sampson. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -15,11 +16,14 @@ """A simple utility for constructing filesystem-like trees from beets libraries. """ +from __future__ import division, absolute_import, print_function + from collections import namedtuple from beets import util Node = namedtuple('Node', ['files', 'dirs']) + def _insert(node, path, itemid): """Insert an item into a virtual filesystem node.""" if len(path) == 1: @@ -33,6 +37,7 @@ def _insert(node, path, itemid): node.dirs[dirname] = Node({}, {}) _insert(node.dirs[dirname], rest, itemid) + def libtree(lib): """Generates a filesystem-like directory tree for the files contained in `lib`. Filesystem nodes are (files, dirs) named diff --git a/libs/beetsplug/__init__.py b/libs/beetsplug/__init__.py new file mode 100644 index 00000000..febeb66f --- /dev/null +++ b/libs/beetsplug/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A namespace package for beets plugins.""" + +from __future__ import division, absolute_import, print_function + +# Make this a namespace package. +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/libs/beetsplug/acousticbrainz.py b/libs/beetsplug/acousticbrainz.py new file mode 100644 index 00000000..df790b26 --- /dev/null +++ b/libs/beetsplug/acousticbrainz.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2015-2016, Ohm Patel. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Fetch various AcousticBrainz metadata using MBID. +""" +from __future__ import division, absolute_import, print_function + +import requests +import operator + +from beets import plugins, ui +from functools import reduce + +ACOUSTIC_BASE = "https://acousticbrainz.org/" +LEVELS = ["/low-level", "/high-level"] + + +class AcousticPlugin(plugins.BeetsPlugin): + def __init__(self): + super(AcousticPlugin, self).__init__() + + self.config.add({'auto': True}) + if self.config['auto']: + self.register_listener('import_task_files', + self.import_task_files) + + def commands(self): + cmd = ui.Subcommand('acousticbrainz', + help=u"fetch metadata from AcousticBrainz") + + def func(lib, opts, args): + items = lib.items(ui.decargs(args)) + fetch_info(self._log, items, ui.should_write()) + + cmd.func = func + return [cmd] + + def import_task_files(self, session, task): + """Function is called upon beet import. + """ + + items = task.imported_items() + fetch_info(self._log, items, False) + + +def fetch_info(log, items, write): + """Get data from AcousticBrainz for the items. + """ + + def get_value(*map_path): + try: + return reduce(operator.getitem, map_path, data) + except KeyError: + log.debug(u'Invalid Path: {}', map_path) + + for item in items: + if item.mb_trackid: + log.info(u'getting data for: {}', item) + + # Fetch the data from the AB API. + urls = [generate_url(item.mb_trackid, path) for path in LEVELS] + log.debug(u'fetching URLs: {}', ' '.join(urls)) + try: + res = [requests.get(url) for url in urls] + except requests.RequestException as exc: + log.info(u'request error: {}', exc) + continue + + # Check for missing tracks. + if any(r.status_code == 404 for r in res): + log.info(u'recording ID {} not found', item.mb_trackid) + continue + + # Parse the JSON response. + try: + data = res[0].json() + data.update(res[1].json()) + except ValueError: + log.debug(u'Invalid Response: {} & {}', [r.text for r in res]) + + # Get each field and assign it on the item. + item.danceable = get_value( + "highlevel", "danceability", "all", "danceable", + ) + item.gender = get_value( + "highlevel", "gender", "value", + ) + item.genre_rosamerica = get_value( + "highlevel", "genre_rosamerica", "value" + ) + item.mood_acoustic = get_value( + "highlevel", "mood_acoustic", "all", "acoustic" + ) + item.mood_aggressive = get_value( + "highlevel", "mood_aggressive", "all", "aggressive" + ) + item.mood_electronic = get_value( + "highlevel", "mood_electronic", "all", "electronic" + ) + item.mood_happy = get_value( + "highlevel", "mood_happy", "all", "happy" + ) + item.mood_party = get_value( + "highlevel", "mood_party", "all", "party" + ) + item.mood_relaxed = get_value( + "highlevel", "mood_relaxed", "all", "relaxed" + ) + item.mood_sad = get_value( + "highlevel", "mood_sad", "all", "sad" + ) + item.rhythm = get_value( + "highlevel", "ismir04_rhythm", "value" + ) + item.tonal = get_value( + "highlevel", "tonal_atonal", "all", "tonal" + ) + item.voice_instrumental = get_value( + "highlevel", "voice_instrumental", "value" + ) + item.average_loudness = get_value( + "lowlevel", "average_loudness" + ) + item.chords_changes_rate = get_value( + "tonal", "chords_changes_rate" + ) + item.chords_key = get_value( + "tonal", "chords_key" + ) + item.chords_number_rate = get_value( + "tonal", "chords_number_rate" + ) + item.chords_scale = get_value( + "tonal", "chords_scale" + ) + item.initial_key = '{} {}'.format( + get_value("tonal", "key_key"), + get_value("tonal", "key_scale") + ) + item.key_strength = get_value( + "tonal", "key_strength" + ) + + # Store the data. + item.store() + if write: + item.try_write() + + +def generate_url(mbid, level): + """Generates AcousticBrainz end point url for given MBID. + """ + return ACOUSTIC_BASE + mbid + level diff --git a/libs/beetsplug/badfiles.py b/libs/beetsplug/badfiles.py new file mode 100644 index 00000000..f9704d48 --- /dev/null +++ b/libs/beetsplug/badfiles.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, François-Xavier Thomas. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Use command-line tools to check for audio file corruption. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand +from beets.util import displayable_path, confit +from beets import ui +from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT +import shlex +import os +import errno +import sys + + +class BadFiles(BeetsPlugin): + def run_command(self, cmd): + self._log.debug(u"running command: {}", + displayable_path(list2cmdline(cmd))) + try: + output = check_output(cmd, stderr=STDOUT) + errors = 0 + status = 0 + except CalledProcessError as e: + output = e.output + errors = 1 + status = e.returncode + except OSError as e: + if e.errno == errno.ENOENT: + ui.print_(u"command not found: {}".format(cmd[0])) + sys.exit(1) + else: + raise + output = output.decode(sys.getfilesystemencoding()) + return status, errors, [line for line in output.split("\n") if line] + + def check_mp3val(self, path): + status, errors, output = self.run_command(["mp3val", path]) + if status == 0: + output = [line for line in output if line.startswith("WARNING:")] + errors = len(output) + return status, errors, output + + def check_flac(self, path): + return self.run_command(["flac", "-wst", path]) + + def check_custom(self, command): + def checker(path): + cmd = shlex.split(command) + cmd.append(path) + return self.run_command(cmd) + return checker + + def get_checker(self, ext): + ext = ext.lower() + try: + command = self.config['commands'].get(dict).get(ext) + except confit.NotFoundError: + command = None + if command: + return self.check_custom(command) + elif ext == "mp3": + return self.check_mp3val + elif ext == "flac": + return self.check_flac + + def check_bad(self, lib, opts, args): + for item in lib.items(ui.decargs(args)): + + # First, check whether the path exists. If not, the user + # should probably run `beet update` to cleanup your library. + dpath = displayable_path(item.path) + self._log.debug(u"checking path: {}", dpath) + if not os.path.exists(item.path): + ui.print_(u"{}: file does not exist".format( + ui.colorize('text_error', dpath))) + + # Run the checker against the file if one is found + ext = os.path.splitext(item.path)[1][1:] + checker = self.get_checker(ext) + if not checker: + continue + path = item.path + if not isinstance(path, unicode): + path = item.path.decode(sys.getfilesystemencoding()) + status, errors, output = checker(path) + if status > 0: + ui.print_(u"{}: checker exited withs status {}" + .format(ui.colorize('text_error', dpath), status)) + for line in output: + ui.print_(" {}".format(displayable_path(line))) + elif errors > 0: + ui.print_(u"{}: checker found {} errors or warnings" + .format(ui.colorize('text_warning', dpath), errors)) + for line in output: + ui.print_(u" {}".format(displayable_path(line))) + else: + ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath))) + + def commands(self): + bad_command = Subcommand('bad', + help=u'check for corrupt or missing files') + bad_command.func = self.check_bad + return [bad_command] diff --git a/libs/beetsplug/bench.py b/libs/beetsplug/bench.py new file mode 100644 index 00000000..41f575cd --- /dev/null +++ b/libs/beetsplug/bench.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Some simple performance benchmarks for beets. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui +from beets import vfs +from beets import library +from beets.util.functemplate import Template +from beets.autotag import match +from beets import plugins +from beets import importer +import cProfile +import timeit + + +def aunique_benchmark(lib, prof): + def _build_tree(): + vfs.libtree(lib) + + # Measure path generation performance with %aunique{} included. + lib.path_formats = [ + (library.PF_KEY_DEFAULT, + Template('$albumartist/$album%aunique{}/$track $title')), + ] + if prof: + cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree}, + 'paths.withaunique.prof') + else: + interval = timeit.timeit(_build_tree, number=1) + print('With %aunique:', interval) + + # And with %aunique replaceed with a "cheap" no-op function. + lib.path_formats = [ + (library.PF_KEY_DEFAULT, + Template('$albumartist/$album%lower{}/$track $title')), + ] + if prof: + cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree}, + 'paths.withoutaunique.prof') + else: + interval = timeit.timeit(_build_tree, number=1) + print('Without %aunique:', interval) + + +def match_benchmark(lib, prof, query=None, album_id=None): + # If no album ID is provided, we'll match against a suitably huge + # album. + if not album_id: + album_id = '9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc' + + # Get an album from the library to use as the source for the match. + items = lib.albums(query).get().items() + + # Ensure fingerprinting is invoked (if enabled). + plugins.send('import_task_start', + task=importer.ImportTask(None, None, items), + session=importer.ImportSession(lib, None, None, None)) + + # Run the match. + def _run_match(): + match.tag_album(items, search_ids=[album_id]) + if prof: + cProfile.runctx('_run_match()', {}, {'_run_match': _run_match}, + 'match.prof') + else: + interval = timeit.timeit(_run_match, number=1) + print('match duration:', interval) + + +class BenchmarkPlugin(BeetsPlugin): + """A plugin for performing some simple performance benchmarks. + """ + def commands(self): + aunique_bench_cmd = ui.Subcommand('bench_aunique', + help='benchmark for %aunique{}') + aunique_bench_cmd.parser.add_option('-p', '--profile', + action='store_true', default=False, + help='performance profiling') + aunique_bench_cmd.func = lambda lib, opts, args: \ + aunique_benchmark(lib, opts.profile) + + match_bench_cmd = ui.Subcommand('bench_match', + help='benchmark for track matching') + match_bench_cmd.parser.add_option('-p', '--profile', + action='store_true', default=False, + help='performance profiling') + match_bench_cmd.parser.add_option('-i', '--id', default=None, + help='album ID to match against') + match_bench_cmd.func = lambda lib, opts, args: \ + match_benchmark(lib, opts.profile, ui.decargs(args), opts.id) + + return [aunique_bench_cmd, match_bench_cmd] diff --git a/libs/beetsplug/bpd/__init__.py b/libs/beetsplug/bpd/__init__.py new file mode 100644 index 00000000..33deda02 --- /dev/null +++ b/libs/beetsplug/bpd/__init__.py @@ -0,0 +1,1193 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A clone of the Music Player Daemon (MPD) that plays music from a +Beets library. Attempts to implement a compatible protocol to allow +use of the wide range of MPD clients. +""" + +from __future__ import division, absolute_import, print_function + +import re +from string import Template +import traceback +import random +import time + +import beets +from beets.plugins import BeetsPlugin +import beets.ui +from beets import logging +from beets import vfs +from beets.util import bluelet +from beets.library import Item +from beets import dbcore +from beets.mediafile import MediaFile + +PROTOCOL_VERSION = '0.13.0' +BUFSIZE = 1024 + +HELLO = 'OK MPD %s' % PROTOCOL_VERSION +CLIST_BEGIN = 'command_list_begin' +CLIST_VERBOSE_BEGIN = 'command_list_ok_begin' +CLIST_END = 'command_list_end' +RESP_OK = 'OK' +RESP_CLIST_VERBOSE = 'list_OK' +RESP_ERR = 'ACK' + +NEWLINE = u"\n" + +ERROR_NOT_LIST = 1 +ERROR_ARG = 2 +ERROR_PASSWORD = 3 +ERROR_PERMISSION = 4 +ERROR_UNKNOWN = 5 +ERROR_NO_EXIST = 50 +ERROR_PLAYLIST_MAX = 51 +ERROR_SYSTEM = 52 +ERROR_PLAYLIST_LOAD = 53 +ERROR_UPDATE_ALREADY = 54 +ERROR_PLAYER_SYNC = 55 +ERROR_EXIST = 56 + +VOLUME_MIN = 0 +VOLUME_MAX = 100 + +SAFE_COMMANDS = ( + # Commands that are available when unauthenticated. + u'close', u'commands', u'notcommands', u'password', u'ping', +) + +ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys()) + +# Loggers. +log = logging.getLogger('beets.bpd') +global_log = logging.getLogger('beets') + + +# Gstreamer import error. +class NoGstreamerError(Exception): + pass + + +# Error-handling, exceptions, parameter parsing. + +class BPDError(Exception): + """An error that should be exposed to the client to the BPD + server. + """ + def __init__(self, code, message, cmd_name='', index=0): + self.code = code + self.message = message + self.cmd_name = cmd_name + self.index = index + + template = Template(u'$resp [$code@$index] {$cmd_name} $message') + + def response(self): + """Returns a string to be used as the response code for the + erring command. + """ + return self.template.substitute({ + 'resp': RESP_ERR, + 'code': self.code, + 'index': self.index, + 'cmd_name': self.cmd_name, + 'message': self.message, + }) + + +def make_bpd_error(s_code, s_message): + """Create a BPDError subclass for a static code and message. + """ + + class NewBPDError(BPDError): + code = s_code + message = s_message + cmd_name = '' + index = 0 + + def __init__(self): + pass + return NewBPDError + +ArgumentTypeError = make_bpd_error(ERROR_ARG, u'invalid type for argument') +ArgumentIndexError = make_bpd_error(ERROR_ARG, u'argument out of range') +ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, u'argument not found') + + +def cast_arg(t, val): + """Attempts to call t on val, raising a ArgumentTypeError + on ValueError. + + If 't' is the special string 'intbool', attempts to cast first + to an int and then to a bool (i.e., 1=True, 0=False). + """ + if t == 'intbool': + return cast_arg(bool, cast_arg(int, val)) + else: + try: + return t(val) + except ValueError: + raise ArgumentTypeError() + + +class BPDClose(Exception): + """Raised by a command invocation to indicate that the connection + should be closed. + """ + +# Generic server infrastructure, implementing the basic protocol. + + +class BaseServer(object): + """A MPD-compatible music player server. + + The functions with the `cmd_` prefix are invoked in response to + client commands. For instance, if the client says `status`, + `cmd_status` will be invoked. The arguments to the client's commands + are used as function arguments following the connection issuing the + command. The functions may send data on the connection. They may + also raise BPDError exceptions to report errors. + + This is a generic superclass and doesn't support many commands. + """ + + def __init__(self, host, port, password): + """Create a new server bound to address `host` and listening + on port `port`. If `password` is given, it is required to do + anything significant on the server. + """ + self.host, self.port, self.password = host, port, password + + # Default server values. + self.random = False + self.repeat = False + self.volume = VOLUME_MAX + self.crossfade = 0 + self.playlist = [] + self.playlist_version = 0 + self.current_index = -1 + self.paused = False + self.error = None + + # Object for random numbers generation + self.random_obj = random.Random() + + def run(self): + """Block and start listening for connections from clients. An + interrupt (^C) closes the server. + """ + self.startup_time = time.time() + bluelet.run(bluelet.server(self.host, self.port, + Connection.handler(self))) + + def _item_info(self, item): + """An abstract method that should response lines containing a + single song's metadata. + """ + raise NotImplementedError + + def _item_id(self, item): + """An abstract method returning the integer id for an item. + """ + raise NotImplementedError + + def _id_to_index(self, track_id): + """Searches the playlist for a song with the given id and + returns its index in the playlist. + """ + track_id = cast_arg(int, track_id) + for index, track in enumerate(self.playlist): + if self._item_id(track) == track_id: + return index + # Loop finished with no track found. + raise ArgumentNotFoundError() + + def _random_idx(self): + """Returns a random index different from the current one. + If there are no songs in the playlist it returns -1. + If there is only one song in the playlist it returns 0. + """ + if len(self.playlist) < 2: + return len(self.playlist) - 1 + new_index = self.random_obj.randint(0, len(self.playlist) - 1) + while new_index == self.current_index: + new_index = self.random_obj.randint(0, len(self.playlist) - 1) + return new_index + + def _succ_idx(self): + """Returns the index for the next song to play. + It also considers random and repeat flags. + No boundaries are checked. + """ + if self.repeat: + return self.current_index + if self.random: + return self._random_idx() + return self.current_index + 1 + + def _prev_idx(self): + """Returns the index for the previous song to play. + It also considers random and repeat flags. + No boundaries are checked. + """ + if self.repeat: + return self.current_index + if self.random: + return self._random_idx() + return self.current_index - 1 + + def cmd_ping(self, conn): + """Succeeds.""" + pass + + def cmd_kill(self, conn): + """Exits the server process.""" + exit(0) + + def cmd_close(self, conn): + """Closes the connection.""" + raise BPDClose() + + def cmd_password(self, conn, password): + """Attempts password authentication.""" + if password == self.password: + conn.authenticated = True + else: + conn.authenticated = False + raise BPDError(ERROR_PASSWORD, u'incorrect password') + + def cmd_commands(self, conn): + """Lists the commands available to the user.""" + if self.password and not conn.authenticated: + # Not authenticated. Show limited list of commands. + for cmd in SAFE_COMMANDS: + yield u'command: ' + cmd + + else: + # Authenticated. Show all commands. + for func in dir(self): + if func.startswith('cmd_'): + yield u'command: ' + func[4:] + + def cmd_notcommands(self, conn): + """Lists all unavailable commands.""" + if self.password and not conn.authenticated: + # Not authenticated. Show privileged commands. + for func in dir(self): + if func.startswith('cmd_'): + cmd = func[4:] + if cmd not in SAFE_COMMANDS: + yield u'command: ' + cmd + + else: + # Authenticated. No commands are unavailable. + pass + + def cmd_status(self, conn): + """Returns some status information for use with an + implementation of cmd_status. + + Gives a list of response-lines for: volume, repeat, random, + playlist, playlistlength, and xfade. + """ + yield ( + u'volume: ' + unicode(self.volume), + u'repeat: ' + unicode(int(self.repeat)), + u'random: ' + unicode(int(self.random)), + u'playlist: ' + unicode(self.playlist_version), + u'playlistlength: ' + unicode(len(self.playlist)), + u'xfade: ' + unicode(self.crossfade), + ) + + if self.current_index == -1: + state = u'stop' + elif self.paused: + state = u'pause' + else: + state = u'play' + yield u'state: ' + state + + if self.current_index != -1: # i.e., paused or playing + current_id = self._item_id(self.playlist[self.current_index]) + yield u'song: ' + unicode(self.current_index) + yield u'songid: ' + unicode(current_id) + + if self.error: + yield u'error: ' + self.error + + def cmd_clearerror(self, conn): + """Removes the persistent error state of the server. This + error is set when a problem arises not in response to a + command (for instance, when playing a file). + """ + self.error = None + + def cmd_random(self, conn, state): + """Set or unset random (shuffle) mode.""" + self.random = cast_arg('intbool', state) + + def cmd_repeat(self, conn, state): + """Set or unset repeat mode.""" + self.repeat = cast_arg('intbool', state) + + def cmd_setvol(self, conn, vol): + """Set the player's volume level (0-100).""" + vol = cast_arg(int, vol) + if vol < VOLUME_MIN or vol > VOLUME_MAX: + raise BPDError(ERROR_ARG, u'volume out of range') + self.volume = vol + + def cmd_crossfade(self, conn, crossfade): + """Set the number of seconds of crossfading.""" + crossfade = cast_arg(int, crossfade) + if crossfade < 0: + raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative') + + def cmd_clear(self, conn): + """Clear the playlist.""" + self.playlist = [] + self.playlist_version += 1 + self.cmd_stop(conn) + + def cmd_delete(self, conn, index): + """Remove the song at index from the playlist.""" + index = cast_arg(int, index) + try: + del(self.playlist[index]) + except IndexError: + raise ArgumentIndexError() + self.playlist_version += 1 + + if self.current_index == index: # Deleted playing song. + self.cmd_stop(conn) + elif index < self.current_index: # Deleted before playing. + # Shift playing index down. + self.current_index -= 1 + + def cmd_deleteid(self, conn, track_id): + self.cmd_delete(conn, self._id_to_index(track_id)) + + def cmd_move(self, conn, idx_from, idx_to): + """Move a track in the playlist.""" + idx_from = cast_arg(int, idx_from) + idx_to = cast_arg(int, idx_to) + try: + track = self.playlist.pop(idx_from) + self.playlist.insert(idx_to, track) + except IndexError: + raise ArgumentIndexError() + + # Update currently-playing song. + if idx_from == self.current_index: + self.current_index = idx_to + elif idx_from < self.current_index <= idx_to: + self.current_index -= 1 + elif idx_from > self.current_index >= idx_to: + self.current_index += 1 + + self.playlist_version += 1 + + def cmd_moveid(self, conn, idx_from, idx_to): + idx_from = self._id_to_index(idx_from) + return self.cmd_move(conn, idx_from, idx_to) + + def cmd_swap(self, conn, i, j): + """Swaps two tracks in the playlist.""" + i = cast_arg(int, i) + j = cast_arg(int, j) + try: + track_i = self.playlist[i] + track_j = self.playlist[j] + except IndexError: + raise ArgumentIndexError() + + self.playlist[j] = track_i + self.playlist[i] = track_j + + # Update currently-playing song. + if self.current_index == i: + self.current_index = j + elif self.current_index == j: + self.current_index = i + + self.playlist_version += 1 + + def cmd_swapid(self, conn, i_id, j_id): + i = self._id_to_index(i_id) + j = self._id_to_index(j_id) + return self.cmd_swap(conn, i, j) + + def cmd_urlhandlers(self, conn): + """Indicates supported URL schemes. None by default.""" + pass + + def cmd_playlistinfo(self, conn, index=-1): + """Gives metadata information about the entire playlist or a + single track, given by its index. + """ + index = cast_arg(int, index) + if index == -1: + for track in self.playlist: + yield self._item_info(track) + else: + try: + track = self.playlist[index] + except IndexError: + raise ArgumentIndexError() + yield self._item_info(track) + + def cmd_playlistid(self, conn, track_id=-1): + return self.cmd_playlistinfo(conn, self._id_to_index(track_id)) + + def cmd_plchanges(self, conn, version): + """Sends playlist changes since the given version. + + This is a "fake" implementation that ignores the version and + just returns the entire playlist (rather like version=0). This + seems to satisfy many clients. + """ + return self.cmd_playlistinfo(conn) + + def cmd_plchangesposid(self, conn, version): + """Like plchanges, but only sends position and id. + + Also a dummy implementation. + """ + for idx, track in enumerate(self.playlist): + yield u'cpos: ' + unicode(idx) + yield u'Id: ' + unicode(track.id) + + def cmd_currentsong(self, conn): + """Sends information about the currently-playing song. + """ + if self.current_index != -1: # -1 means stopped. + track = self.playlist[self.current_index] + yield self._item_info(track) + + def cmd_next(self, conn): + """Advance to the next song in the playlist.""" + self.current_index = self._succ_idx() + if self.current_index >= len(self.playlist): + # Fallen off the end. Just move to stopped state. + return self.cmd_stop(conn) + else: + return self.cmd_play(conn) + + def cmd_previous(self, conn): + """Step back to the last song.""" + self.current_index = self._prev_idx() + if self.current_index < 0: + return self.cmd_stop(conn) + else: + return self.cmd_play(conn) + + def cmd_pause(self, conn, state=None): + """Set the pause state playback.""" + if state is None: + self.paused = not self.paused # Toggle. + else: + self.paused = cast_arg('intbool', state) + + def cmd_play(self, conn, index=-1): + """Begin playback, possibly at a specified playlist index.""" + index = cast_arg(int, index) + + if index < -1 or index > len(self.playlist): + raise ArgumentIndexError() + + if index == -1: # No index specified: start where we are. + if not self.playlist: # Empty playlist: stop immediately. + return self.cmd_stop(conn) + if self.current_index == -1: # No current song. + self.current_index = 0 # Start at the beginning. + # If we have a current song, just stay there. + + else: # Start with the specified index. + self.current_index = index + + self.paused = False + + def cmd_playid(self, conn, track_id=0): + track_id = cast_arg(int, track_id) + if track_id == -1: + index = -1 + else: + index = self._id_to_index(track_id) + return self.cmd_play(conn, index) + + def cmd_stop(self, conn): + """Stop playback.""" + self.current_index = -1 + self.paused = False + + def cmd_seek(self, conn, index, pos): + """Seek to a specified point in a specified song.""" + index = cast_arg(int, index) + if index < 0 or index >= len(self.playlist): + raise ArgumentIndexError() + self.current_index = index + + def cmd_seekid(self, conn, track_id, pos): + index = self._id_to_index(track_id) + return self.cmd_seek(conn, index, pos) + + def cmd_profile(self, conn): + """Memory profiling for debugging.""" + from guppy import hpy + heap = hpy().heap() + print(heap) + + +class Connection(object): + """A connection between a client and the server. Handles input and + output from and to the client. + """ + def __init__(self, server, sock): + """Create a new connection for the accepted socket `client`. + """ + self.server = server + self.sock = sock + self.authenticated = False + + def send(self, lines): + """Send lines, which which is either a single string or an + iterable consisting of strings, to the client. A newline is + added after every string. Returns a Bluelet event that sends + the data. + """ + if isinstance(lines, basestring): + lines = [lines] + out = NEWLINE.join(lines) + NEWLINE + log.debug('{}', out[:-1]) # Don't log trailing newline. + if isinstance(out, unicode): + out = out.encode('utf8') + return self.sock.sendall(out) + + def do_command(self, command): + """A coroutine that runs the given command and sends an + appropriate response.""" + try: + yield bluelet.call(command.run(self)) + except BPDError as e: + # Send the error. + yield self.send(e.response()) + else: + # Send success code. + yield self.send(RESP_OK) + + def run(self): + """Send a greeting to the client and begin processing commands + as they arrive. + """ + yield self.send(HELLO) + + clist = None # Initially, no command list is being constructed. + while True: + line = yield self.sock.readline() + if not line: + break + line = line.strip() + if not line: + break + log.debug('{}', line) + + if clist is not None: + # Command list already opened. + if line == CLIST_END: + yield bluelet.call(self.do_command(clist)) + clist = None # Clear the command list. + else: + clist.append(Command(line)) + + elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN: + # Begin a command list. + clist = CommandList([], line == CLIST_VERBOSE_BEGIN) + + else: + # Ordinary command. + try: + yield bluelet.call(self.do_command(Command(line))) + except BPDClose: + # Command indicates that the conn should close. + self.sock.close() + return + + @classmethod + def handler(cls, server): + def _handle(sock): + """Creates a new `Connection` and runs it. + """ + return cls(server, sock).run() + return _handle + + +class Command(object): + """A command issued by the client for processing by the server. + """ + + command_re = re.compile(br'^([^ \t]+)[ \t]*') + arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)') + + def __init__(self, s): + """Creates a new `Command` from the given string, `s`, parsing + the string for command name and arguments. + """ + command_match = self.command_re.match(s) + self.name = command_match.group(1) + + self.args = [] + arg_matches = self.arg_re.findall(s[command_match.end():]) + for match in arg_matches: + if match[0]: + # Quoted argument. + arg = match[0] + arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\') + else: + # Unquoted argument. + arg = match[1] + arg = arg.decode('utf8') + self.args.append(arg) + + def run(self, conn): + """A coroutine that executes the command on the given + connection. + """ + # Attempt to get correct command function. + func_name = 'cmd_' + self.name + if not hasattr(conn.server, func_name): + raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name) + func = getattr(conn.server, func_name) + + # Ensure we have permission for this command. + if conn.server.password and \ + not conn.authenticated and \ + self.name not in SAFE_COMMANDS: + raise BPDError(ERROR_PERMISSION, u'insufficient privileges') + + try: + args = [conn] + self.args + results = func(*args) + if results: + for data in results: + yield conn.send(data) + + except BPDError as e: + # An exposed error. Set the command name and then let + # the Connection handle it. + e.cmd_name = self.name + raise e + + except BPDClose: + # An indication that the connection should close. Send + # it on the Connection. + raise + + except Exception as e: + # An "unintentional" error. Hide it from the client. + log.error('{}', traceback.format_exc(e)) + raise BPDError(ERROR_SYSTEM, u'server error', self.name) + + +class CommandList(list): + """A list of commands issued by the client for processing by the + server. May be verbose, in which case the response is delimited, or + not. Should be a list of `Command` objects. + """ + + def __init__(self, sequence=None, verbose=False): + """Create a new `CommandList` from the given sequence of + `Command`s. If `verbose`, this is a verbose command list. + """ + if sequence: + for item in sequence: + self.append(item) + self.verbose = verbose + + def run(self, conn): + """Coroutine executing all the commands in this list. + """ + for i, command in enumerate(self): + try: + yield bluelet.call(command.run(conn)) + except BPDError as e: + # If the command failed, stop executing. + e.index = i # Give the error the correct index. + raise e + + # Otherwise, possibly send the output delimeter if we're in a + # verbose ("OK") command list. + if self.verbose: + yield conn.send(RESP_CLIST_VERBOSE) + + +# A subclass of the basic, protocol-handling server that actually plays +# music. + +class Server(BaseServer): + """An MPD-compatible server using GStreamer to play audio and beets + to store its library. + """ + + def __init__(self, library, host, port, password): + try: + from beetsplug.bpd import gstplayer + except ImportError as e: + # This is a little hacky, but it's the best I know for now. + if e.args[0].endswith(' gst'): + raise NoGstreamerError() + else: + raise + super(Server, self).__init__(host, port, password) + self.lib = library + self.player = gstplayer.GstPlayer(self.play_finished) + self.cmd_update(None) + + def run(self): + self.player.run() + super(Server, self).run() + + def play_finished(self): + """A callback invoked every time our player finishes a + track. + """ + self.cmd_next(None) + + # Metadata helper functions. + + def _item_info(self, item): + info_lines = [ + u'file: ' + item.destination(fragment=True), + u'Time: ' + unicode(int(item.length)), + u'Title: ' + item.title, + u'Artist: ' + item.artist, + u'Album: ' + item.album, + u'Genre: ' + item.genre, + ] + + track = unicode(item.track) + if item.tracktotal: + track += u'/' + unicode(item.tracktotal) + info_lines.append(u'Track: ' + track) + + info_lines.append(u'Date: ' + unicode(item.year)) + + try: + pos = self._id_to_index(item.id) + info_lines.append(u'Pos: ' + unicode(pos)) + except ArgumentNotFoundError: + # Don't include position if not in playlist. + pass + + info_lines.append(u'Id: ' + unicode(item.id)) + + return info_lines + + def _item_id(self, item): + return item.id + + # Database updating. + + def cmd_update(self, conn, path=u'/'): + """Updates the catalog to reflect the current database state. + """ + # Path is ignored. Also, the real MPD does this asynchronously; + # this is done inline. + print(u'Building directory tree...') + self.tree = vfs.libtree(self.lib) + print(u'... done.') + self.updated_time = time.time() + + # Path (directory tree) browsing. + + def _resolve_path(self, path): + """Returns a VFS node or an item ID located at the path given. + If the path does not exist, raises a + """ + components = path.split(u'/') + node = self.tree + + for component in components: + if not component: + continue + + if isinstance(node, int): + # We're trying to descend into a file node. + raise ArgumentNotFoundError() + + if component in node.files: + node = node.files[component] + elif component in node.dirs: + node = node.dirs[component] + else: + raise ArgumentNotFoundError() + + return node + + def _path_join(self, p1, p2): + """Smashes together two BPD paths.""" + out = p1 + u'/' + p2 + return out.replace(u'//', u'/').replace(u'//', u'/') + + def cmd_lsinfo(self, conn, path=u"/"): + """Sends info on all the items in the path.""" + node = self._resolve_path(path) + if isinstance(node, int): + # Trying to list a track. + raise BPDError(ERROR_ARG, u'this is not a directory') + else: + for name, itemid in iter(sorted(node.files.items())): + item = self.lib.get_item(itemid) + yield self._item_info(item) + for name, _ in iter(sorted(node.dirs.iteritems())): + dirpath = self._path_join(path, name) + if dirpath.startswith(u"/"): + # Strip leading slash (libmpc rejects this). + dirpath = dirpath[1:] + yield u'directory: %s' % dirpath + + def _listall(self, basepath, node, info=False): + """Helper function for recursive listing. If info, show + tracks' complete info; otherwise, just show items' paths. + """ + if isinstance(node, int): + # List a single file. + if info: + item = self.lib.get_item(node) + yield self._item_info(item) + else: + yield u'file: ' + basepath + else: + # List a directory. Recurse into both directories and files. + for name, itemid in sorted(node.files.iteritems()): + newpath = self._path_join(basepath, name) + # "yield from" + for v in self._listall(newpath, itemid, info): + yield v + for name, subdir in sorted(node.dirs.iteritems()): + newpath = self._path_join(basepath, name) + yield u'directory: ' + newpath + for v in self._listall(newpath, subdir, info): + yield v + + def cmd_listall(self, conn, path=u"/"): + """Send the paths all items in the directory, recursively.""" + return self._listall(path, self._resolve_path(path), False) + + def cmd_listallinfo(self, conn, path=u"/"): + """Send info on all the items in the directory, recursively.""" + return self._listall(path, self._resolve_path(path), True) + + # Playlist manipulation. + + def _all_items(self, node): + """Generator yielding all items under a VFS node. + """ + if isinstance(node, int): + # Could be more efficient if we built up all the IDs and + # then issued a single SELECT. + yield self.lib.get_item(node) + else: + # Recurse into a directory. + for name, itemid in sorted(node.files.iteritems()): + # "yield from" + for v in self._all_items(itemid): + yield v + for name, subdir in sorted(node.dirs.iteritems()): + for v in self._all_items(subdir): + yield v + + def _add(self, path, send_id=False): + """Adds a track or directory to the playlist, specified by the + path. If `send_id`, write each item's id to the client. + """ + for item in self._all_items(self._resolve_path(path)): + self.playlist.append(item) + if send_id: + yield u'Id: ' + unicode(item.id) + self.playlist_version += 1 + + def cmd_add(self, conn, path): + """Adds a track or directory to the playlist, specified by a + path. + """ + return self._add(path, False) + + def cmd_addid(self, conn, path): + """Same as `cmd_add` but sends an id back to the client.""" + return self._add(path, True) + + # Server info. + + def cmd_status(self, conn): + for line in super(Server, self).cmd_status(conn): + yield line + if self.current_index > -1: + item = self.playlist[self.current_index] + + yield u'bitrate: ' + unicode(item.bitrate / 1000) + # Missing 'audio'. + + (pos, total) = self.player.time() + yield u'time: ' + unicode(pos) + u':' + unicode(total) + + # Also missing 'updating_db'. + + def cmd_stats(self, conn): + """Sends some statistics about the library.""" + with self.lib.transaction() as tx: + statement = 'SELECT COUNT(DISTINCT artist), ' \ + 'COUNT(DISTINCT album), ' \ + 'COUNT(id), ' \ + 'SUM(length) ' \ + 'FROM items' + artists, albums, songs, totaltime = tx.query(statement)[0] + + yield ( + u'artists: ' + unicode(artists), + u'albums: ' + unicode(albums), + u'songs: ' + unicode(songs), + u'uptime: ' + unicode(int(time.time() - self.startup_time)), + u'playtime: ' + u'0', # Missing. + u'db_playtime: ' + unicode(int(totaltime)), + u'db_update: ' + unicode(int(self.updated_time)), + ) + + # Searching. + + tagtype_map = { + u'Artist': u'artist', + u'Album': u'album', + u'Title': u'title', + u'Track': u'track', + u'AlbumArtist': u'albumartist', + u'AlbumArtistSort': u'albumartist_sort', + # Name? + u'Genre': u'genre', + u'Date': u'year', + u'Composer': u'composer', + # Performer? + u'Disc': u'disc', + u'filename': u'path', # Suspect. + } + + def cmd_tagtypes(self, conn): + """Returns a list of the metadata (tag) fields available for + searching. + """ + for tag in self.tagtype_map: + yield u'tagtype: ' + tag + + def _tagtype_lookup(self, tag): + """Uses `tagtype_map` to look up the beets column name for an + MPD tagtype (or throw an appropriate exception). Returns both + the canonical name of the MPD tagtype and the beets column + name. + """ + for test_tag, key in self.tagtype_map.items(): + # Match case-insensitively. + if test_tag.lower() == tag.lower(): + return test_tag, key + raise BPDError(ERROR_UNKNOWN, u'no such tagtype') + + def _metadata_query(self, query_type, any_query_type, kv): + """Helper function returns a query object that will find items + according to the library query type provided and the key-value + pairs specified. The any_query_type is used for queries of + type "any"; if None, then an error is thrown. + """ + if kv: # At least one key-value pair. + queries = [] + # Iterate pairwise over the arguments. + it = iter(kv) + for tag, value in zip(it, it): + if tag.lower() == u'any': + if any_query_type: + queries.append(any_query_type(value, + ITEM_KEYS_WRITABLE, + query_type)) + else: + raise BPDError(ERROR_UNKNOWN, u'no such tagtype') + else: + _, key = self._tagtype_lookup(tag) + queries.append(query_type(key, value)) + return dbcore.query.AndQuery(queries) + else: # No key-value pairs. + return dbcore.query.TrueQuery() + + def cmd_search(self, conn, *kv): + """Perform a substring match for items.""" + query = self._metadata_query(dbcore.query.SubstringQuery, + dbcore.query.AnyFieldQuery, + kv) + for item in self.lib.items(query): + yield self._item_info(item) + + def cmd_find(self, conn, *kv): + """Perform an exact match for items.""" + query = self._metadata_query(dbcore.query.MatchQuery, + None, + kv) + for item in self.lib.items(query): + yield self._item_info(item) + + def cmd_list(self, conn, show_tag, *kv): + """List distinct metadata values for show_tag, possibly + filtered by matching match_tag to match_term. + """ + show_tag_canon, show_key = self._tagtype_lookup(show_tag) + query = self._metadata_query(dbcore.query.MatchQuery, None, kv) + + clause, subvals = query.clause() + statement = 'SELECT DISTINCT ' + show_key + \ + ' FROM items WHERE ' + clause + \ + ' ORDER BY ' + show_key + with self.lib.transaction() as tx: + rows = tx.query(statement, subvals) + + for row in rows: + yield show_tag_canon + u': ' + unicode(row[0]) + + def cmd_count(self, conn, tag, value): + """Returns the number and total time of songs matching the + tag/value query. + """ + _, key = self._tagtype_lookup(tag) + songs = 0 + playtime = 0.0 + for item in self.lib.items(dbcore.query.MatchQuery(key, value)): + songs += 1 + playtime += item.length + yield u'songs: ' + unicode(songs) + yield u'playtime: ' + unicode(int(playtime)) + + # "Outputs." Just a dummy implementation because we don't control + # any outputs. + + def cmd_outputs(self, conn): + """List the available outputs.""" + yield ( + u'outputid: 0', + u'outputname: gstreamer', + u'outputenabled: 1', + ) + + def cmd_enableoutput(self, conn, output_id): + output_id = cast_arg(int, output_id) + if output_id != 0: + raise ArgumentIndexError() + + def cmd_disableoutput(self, conn, output_id): + output_id = cast_arg(int, output_id) + if output_id == 0: + raise BPDError(ERROR_ARG, u'cannot disable this output') + else: + raise ArgumentIndexError() + + # Playback control. The functions below hook into the + # half-implementations provided by the base class. Together, they're + # enough to implement all normal playback functionality. + + def cmd_play(self, conn, index=-1): + new_index = index != -1 and index != self.current_index + was_paused = self.paused + super(Server, self).cmd_play(conn, index) + + if self.current_index > -1: # Not stopped. + if was_paused and not new_index: + # Just unpause. + self.player.play() + else: + self.player.play_file(self.playlist[self.current_index].path) + + def cmd_pause(self, conn, state=None): + super(Server, self).cmd_pause(conn, state) + if self.paused: + self.player.pause() + elif self.player.playing: + self.player.play() + + def cmd_stop(self, conn): + super(Server, self).cmd_stop(conn) + self.player.stop() + + def cmd_seek(self, conn, index, pos): + """Seeks to the specified position in the specified song.""" + index = cast_arg(int, index) + pos = cast_arg(int, pos) + super(Server, self).cmd_seek(conn, index, pos) + self.player.seek(pos) + + # Volume control. + + def cmd_setvol(self, conn, vol): + vol = cast_arg(int, vol) + super(Server, self).cmd_setvol(conn, vol) + self.player.volume = float(vol) / 100 + + +# Beets plugin hooks. + +class BPDPlugin(BeetsPlugin): + """Provides the "beet bpd" command for running a music player + server. + """ + def __init__(self): + super(BPDPlugin, self).__init__() + self.config.add({ + 'host': u'', + 'port': 6600, + 'password': u'', + 'volume': VOLUME_MAX, + }) + self.config['password'].redact = True + + def start_bpd(self, lib, host, port, password, volume, debug): + """Starts a BPD server.""" + if debug: # FIXME this should be managed by BeetsPlugin + self._log.setLevel(logging.DEBUG) + else: + self._log.setLevel(logging.WARNING) + try: + server = Server(lib, host, port, password) + server.cmd_setvol(None, volume) + server.run() + except NoGstreamerError: + global_log.error(u'Gstreamer Python bindings not found.') + global_log.error(u'Install "python-gst0.10", "py27-gst-python", ' + u'or similar package to use BPD.') + + def commands(self): + cmd = beets.ui.Subcommand( + 'bpd', help=u'run an MPD-compatible music player server' + ) + cmd.parser.add_option( + '-d', '--debug', action='store_true', + help=u'dump all MPD traffic to stdout' + ) + + def func(lib, opts, args): + host = args.pop(0) if args else self.config['host'].get(unicode) + port = args.pop(0) if args else self.config['port'].get(int) + if args: + raise beets.ui.UserError(u'too many arguments') + password = self.config['password'].get(unicode) + volume = self.config['volume'].get(int) + debug = opts.debug or False + self.start_bpd(lib, host, int(port), password, volume, debug) + + cmd.func = func + return [cmd] diff --git a/libs/beetsplug/bpd/gstplayer.py b/libs/beetsplug/bpd/gstplayer.py new file mode 100644 index 00000000..b64cd009 --- /dev/null +++ b/libs/beetsplug/bpd/gstplayer.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A wrapper for the GStreamer Python bindings that exposes a simple +music player. +""" + +from __future__ import division, absolute_import, print_function + +import sys +import time +import gobject +import thread +import os +import copy +import urllib + +import pygst +pygst.require('0.10') +import gst # noqa + + +class GstPlayer(object): + """A music player abstracting GStreamer's Playbin element. + + Create a player object, then call run() to start a thread with a + runloop. Then call play_file to play music. Use player.playing + to check whether music is currently playing. + + A basic play queue is also implemented (just a Python list, + player.queue, whose last element is next to play). To use it, + just call enqueue() and then play(). When a track finishes and + another is available on the queue, it is played automatically. + """ + + def __init__(self, finished_callback=None): + """Initialize a player. + + If a finished_callback is provided, it is called every time a + track started with play_file finishes. + + Once the player has been created, call run() to begin the main + runloop in a separate thread. + """ + + # Set up the Gstreamer player. From the pygst tutorial: + # http://pygstdocs.berlios.de/pygst-tutorial/playbin.html + self.player = gst.element_factory_make("playbin2", "player") + fakesink = gst.element_factory_make("fakesink", "fakesink") + self.player.set_property("video-sink", fakesink) + bus = self.player.get_bus() + bus.add_signal_watch() + bus.connect("message", self._handle_message) + + # Set up our own stuff. + self.playing = False + self.finished_callback = finished_callback + self.cached_time = None + self._volume = 1.0 + + def _get_state(self): + """Returns the current state flag of the playbin.""" + # gst's get_state function returns a 3-tuple; we just want the + # status flag in position 1. + return self.player.get_state()[1] + + def _handle_message(self, bus, message): + """Callback for status updates from GStreamer.""" + if message.type == gst.MESSAGE_EOS: + # file finished playing + self.player.set_state(gst.STATE_NULL) + self.playing = False + self.cached_time = None + if self.finished_callback: + self.finished_callback() + + elif message.type == gst.MESSAGE_ERROR: + # error + self.player.set_state(gst.STATE_NULL) + err, debug = message.parse_error() + print(u"Error: {0}".format(err)) + self.playing = False + + def _set_volume(self, volume): + """Set the volume level to a value in the range [0, 1.5].""" + # And the volume for the playbin. + self._volume = volume + self.player.set_property("volume", volume) + + def _get_volume(self): + """Get the volume as a float in the range [0, 1.5].""" + return self._volume + + volume = property(_get_volume, _set_volume) + + def play_file(self, path): + """Immediately begin playing the audio file at the given + path. + """ + self.player.set_state(gst.STATE_NULL) + if isinstance(path, unicode): + path = path.encode('utf8') + uri = 'file://' + urllib.quote(path) + self.player.set_property("uri", uri) + self.player.set_state(gst.STATE_PLAYING) + self.playing = True + + def play(self): + """If paused, resume playback.""" + if self._get_state() == gst.STATE_PAUSED: + self.player.set_state(gst.STATE_PLAYING) + self.playing = True + + def pause(self): + """Pause playback.""" + self.player.set_state(gst.STATE_PAUSED) + + def stop(self): + """Halt playback.""" + self.player.set_state(gst.STATE_NULL) + self.playing = False + self.cached_time = None + + def run(self): + """Start a new thread for the player. + + Call this function before trying to play any music with + play_file() or play(). + """ + # If we don't use the MainLoop, messages are never sent. + gobject.threads_init() + + def start(): + loop = gobject.MainLoop() + loop.run() + thread.start_new_thread(start, ()) + + def time(self): + """Returns a tuple containing (position, length) where both + values are integers in seconds. If no stream is available, + returns (0, 0). + """ + fmt = gst.Format(gst.FORMAT_TIME) + try: + pos = self.player.query_position(fmt, None)[0] / (10 ** 9) + length = self.player.query_duration(fmt, None)[0] / (10 ** 9) + self.cached_time = (pos, length) + return (pos, length) + + except gst.QueryError: + # Stream not ready. For small gaps of time, for instance + # after seeking, the time values are unavailable. For this + # reason, we cache recent. + if self.playing and self.cached_time: + return self.cached_time + else: + return (0, 0) + + def seek(self, position): + """Seeks to position (in seconds).""" + cur_pos, cur_len = self.time() + if position > cur_len: + self.stop() + return + + fmt = gst.Format(gst.FORMAT_TIME) + ns = position * 10 ** 9 # convert to nanoseconds + self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns) + + # save new cached time + self.cached_time = (position, cur_len) + + def block(self): + """Block until playing finishes.""" + while self.playing: + time.sleep(1) + + +def play_simple(paths): + """Play the files in paths in a straightforward way, without + using the player's callback function. + """ + p = GstPlayer() + p.run() + for path in paths: + p.play_file(path) + p.block() + + +def play_complicated(paths): + """Play the files in the path one after the other by using the + callback function to advance to the next song. + """ + my_paths = copy.copy(paths) + + def next_song(): + my_paths.pop(0) + p.play_file(my_paths[0]) + p = GstPlayer(next_song) + p.run() + p.play_file(my_paths[0]) + while my_paths: + time.sleep(1) + +if __name__ == '__main__': + # A very simple command-line player. Just give it names of audio + # files on the command line; these are all played in sequence. + paths = [os.path.abspath(os.path.expanduser(p)) + for p in sys.argv[1:]] + # play_simple(paths) + play_complicated(paths) diff --git a/libs/beetsplug/bpm.py b/libs/beetsplug/bpm.py new file mode 100644 index 00000000..ba284c04 --- /dev/null +++ b/libs/beetsplug/bpm.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, aroquen +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Determine BPM by pressing a key to the rhythm.""" + +from __future__ import division, absolute_import, print_function + +import time + +from beets import ui +from beets.plugins import BeetsPlugin + + +def bpm(max_strokes): + """Returns average BPM (possibly of a playing song) + listening to Enter keystrokes. + """ + t0 = None + dt = [] + for i in range(max_strokes): + # Press enter to the rhythm... + s = raw_input() + if s == '': + t1 = time.time() + # Only start measuring at the second stroke + if t0: + dt.append(t1 - t0) + t0 = t1 + else: + break + + # Return average BPM + # bpm = (max_strokes-1) / sum(dt) * 60 + ave = sum([1.0 / dti * 60 for dti in dt]) / len(dt) + return ave + + +class BPMPlugin(BeetsPlugin): + + def __init__(self): + super(BPMPlugin, self).__init__() + self.config.add({ + u'max_strokes': 3, + u'overwrite': True, + }) + + def commands(self): + cmd = ui.Subcommand('bpm', + help=u'determine bpm of a song by pressing ' + u'a key to the rhythm') + cmd.func = self.command + return [cmd] + + def command(self, lib, opts, args): + self.get_bpm(lib.items(ui.decargs(args))) + + def get_bpm(self, items, write=False): + overwrite = self.config['overwrite'].get(bool) + if len(items) > 1: + raise ValueError(u'Can only get bpm of one song at time') + + item = items[0] + if item['bpm']: + self._log.info(u'Found bpm {0}', item['bpm']) + if not overwrite: + return + + self._log.info(u'Press Enter {0} times to the rhythm or Ctrl-D ' + u'to exit', self.config['max_strokes'].get(int)) + new_bpm = bpm(self.config['max_strokes'].get(int)) + item['bpm'] = int(new_bpm) + if write: + item.try_write() + item.store() + self._log.info(u'Added new bpm {0}', item['bpm']) diff --git a/libs/beetsplug/bucket.py b/libs/beetsplug/bucket.py new file mode 100644 index 00000000..21acb1f1 --- /dev/null +++ b/libs/beetsplug/bucket.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Provides the %bucket{} function for path formatting. +""" + +from __future__ import division, absolute_import, print_function + +from datetime import datetime +import re +import string +from itertools import tee, izip + +from beets import plugins, ui + + +ASCII_DIGITS = string.digits + string.ascii_lowercase + + +class BucketError(Exception): + pass + + +def pairwise(iterable): + "s -> (s0,s1), (s1,s2), (s2, s3), ..." + a, b = tee(iterable) + next(b, None) + return izip(a, b) + + +def span_from_str(span_str): + """Build a span dict from the span string representation. + """ + + def normalize_year(d, yearfrom): + """Convert string to a 4 digits year + """ + if yearfrom < 100: + raise BucketError(u"%d must be expressed on 4 digits" % yearfrom) + + # if two digits only, pick closest year that ends by these two + # digits starting from yearfrom + if d < 100: + if (d % 100) < (yearfrom % 100): + d = (yearfrom - yearfrom % 100) + 100 + d + else: + d = (yearfrom - yearfrom % 100) + d + return d + + years = [int(x) for x in re.findall('\d+', span_str)] + if not years: + raise ui.UserError(u"invalid range defined for year bucket '%s': no " + u"year found" % span_str) + try: + years = [normalize_year(x, years[0]) for x in years] + except BucketError as exc: + raise ui.UserError(u"invalid range defined for year bucket '%s': %s" % + (span_str, exc)) + + res = {'from': years[0], 'str': span_str} + if len(years) > 1: + res['to'] = years[-1] + return res + + +def complete_year_spans(spans): + """Set the `to` value of spans if empty and sort them chronologically. + """ + spans.sort(key=lambda x: x['from']) + for (x, y) in pairwise(spans): + if 'to' not in x: + x['to'] = y['from'] - 1 + if spans and 'to' not in spans[-1]: + spans[-1]['to'] = datetime.now().year + + +def extend_year_spans(spans, spanlen, start=1900, end=2014): + """Add new spans to given spans list so that every year of [start,end] + belongs to a span. + """ + extended_spans = spans[:] + for (x, y) in pairwise(spans): + # if a gap between two spans, fill the gap with as much spans of + # spanlen length as necessary + for span_from in range(x['to'] + 1, y['from'], spanlen): + extended_spans.append({'from': span_from}) + # Create spans prior to declared ones + for span_from in range(spans[0]['from'] - spanlen, start, -spanlen): + extended_spans.append({'from': span_from}) + # Create spans after the declared ones + for span_from in range(spans[-1]['to'] + 1, end, spanlen): + extended_spans.append({'from': span_from}) + + complete_year_spans(extended_spans) + return extended_spans + + +def build_year_spans(year_spans_str): + """Build a chronologically ordered list of spans dict from unordered spans + stringlist. + """ + spans = [] + for elem in year_spans_str: + spans.append(span_from_str(elem)) + complete_year_spans(spans) + return spans + + +def str2fmt(s): + """Deduces formatting syntax from a span string. + """ + regex = re.compile(r"(?P\D*)(?P\d+)(?P\D*)" + r"(?P\d*)(?P\D*)") + m = re.match(regex, s) + + res = {'fromnchars': len(m.group('fromyear')), + 'tonchars': len(m.group('toyear'))} + res['fmt'] = "%s%%s%s%s%s" % (m.group('bef'), + m.group('sep'), + '%s' if res['tonchars'] else '', + m.group('after')) + return res + + +def format_span(fmt, yearfrom, yearto, fromnchars, tonchars): + """Return a span string representation. + """ + args = (bytes(yearfrom)[-fromnchars:]) + if tonchars: + args = (bytes(yearfrom)[-fromnchars:], bytes(yearto)[-tonchars:]) + return fmt % args + + +def extract_modes(spans): + """Extract the most common spans lengths and representation formats + """ + rangelen = sorted([x['to'] - x['from'] + 1 for x in spans]) + deflen = sorted(rangelen, key=rangelen.count)[-1] + reprs = [str2fmt(x['str']) for x in spans] + deffmt = sorted(reprs, key=reprs.count)[-1] + return deflen, deffmt + + +def build_alpha_spans(alpha_spans_str, alpha_regexs): + """Extract alphanumerics from string and return sorted list of chars + [from...to] + """ + spans = [] + + for elem in alpha_spans_str: + if elem in alpha_regexs: + spans.append(re.compile(alpha_regexs[elem])) + else: + bucket = sorted([x for x in elem.lower() if x.isalnum()]) + if bucket: + begin_index = ASCII_DIGITS.index(bucket[0]) + end_index = ASCII_DIGITS.index(bucket[-1]) + else: + raise ui.UserError(u"invalid range defined for alpha bucket " + u"'%s': no alphanumeric character found" % + elem) + spans.append( + re.compile( + "^[" + ASCII_DIGITS[begin_index:end_index + 1] + + ASCII_DIGITS[begin_index:end_index + 1].upper() + "]" + ) + ) + return spans + + +class BucketPlugin(plugins.BeetsPlugin): + def __init__(self): + super(BucketPlugin, self).__init__() + self.template_funcs['bucket'] = self._tmpl_bucket + + self.config.add({ + 'bucket_year': [], + 'bucket_alpha': [], + 'bucket_alpha_regex': {}, + 'extrapolate': False + }) + self.setup() + + def setup(self): + """Setup plugin from config options + """ + self.year_spans = build_year_spans(self.config['bucket_year'].get()) + if self.year_spans and self.config['extrapolate']: + [self.ys_len_mode, + self.ys_repr_mode] = extract_modes(self.year_spans) + self.year_spans = extend_year_spans(self.year_spans, + self.ys_len_mode) + + self.alpha_spans = build_alpha_spans( + self.config['bucket_alpha'].get(), + self.config['bucket_alpha_regex'].get() + ) + + def find_bucket_year(self, year): + """Return bucket that matches given year or return the year + if no matching bucket. + """ + for ys in self.year_spans: + if ys['from'] <= int(year) <= ys['to']: + if 'str' in ys: + return ys['str'] + else: + return format_span(self.ys_repr_mode['fmt'], + ys['from'], ys['to'], + self.ys_repr_mode['fromnchars'], + self.ys_repr_mode['tonchars']) + return year + + def find_bucket_alpha(self, s): + """Return alpha-range bucket that matches given string or return the + string initial if no matching bucket. + """ + for (i, span) in enumerate(self.alpha_spans): + if span.match(s): + return self.config['bucket_alpha'].get()[i] + return s[0].upper() + + def _tmpl_bucket(self, text, field=None): + if not field and len(text) == 4 and text.isdigit(): + field = 'year' + + if field == 'year': + func = self.find_bucket_year + else: + func = self.find_bucket_alpha + return func(text) diff --git a/libs/beetsplug/chroma.py b/libs/beetsplug/chroma.py new file mode 100644 index 00000000..148e9c20 --- /dev/null +++ b/libs/beetsplug/chroma.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Adds Chromaprint/Acoustid acoustic fingerprinting support to the +autotagger. Requires the pyacoustid library. +""" +from __future__ import division, absolute_import, print_function + +from beets import plugins +from beets import ui +from beets import util +from beets import config +from beets.util import confit +from beets.autotag import hooks +import acoustid +from collections import defaultdict + +API_KEY = '1vOwZtEn' +SCORE_THRESH = 0.5 +TRACK_ID_WEIGHT = 10.0 +COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common? +MAX_RECORDINGS = 5 +MAX_RELEASES = 5 + +# Stores the Acoustid match information for each track. This is +# populated when an import task begins and then used when searching for +# candidates. It maps audio file paths to (recording_ids, release_ids) +# pairs. If a given path is not present in the mapping, then no match +# was found. +_matches = {} + +# Stores the fingerprint and Acoustid ID for each track. This is stored +# as metadata for each track for later use but is not relevant for +# autotagging. +_fingerprints = {} +_acoustids = {} + + +def prefix(it, count): + """Truncate an iterable to at most `count` items. + """ + for i, v in enumerate(it): + if i >= count: + break + yield v + + +def acoustid_match(log, path): + """Gets metadata for a file from Acoustid and populates the + _matches, _fingerprints, and _acoustids dictionaries accordingly. + """ + try: + duration, fp = acoustid.fingerprint_file(util.syspath(path)) + except acoustid.FingerprintGenerationError as exc: + log.error(u'fingerprinting of {0} failed: {1}', + util.displayable_path(repr(path)), exc) + return None + _fingerprints[path] = fp + try: + res = acoustid.lookup(API_KEY, fp, duration, + meta='recordings releases') + except acoustid.AcoustidError as exc: + log.debug(u'fingerprint matching {0} failed: {1}', + util.displayable_path(repr(path)), exc) + return None + log.debug(u'chroma: fingerprinted {0}', + util.displayable_path(repr(path))) + + # Ensure the response is usable and parse it. + if res['status'] != 'ok' or not res.get('results'): + log.debug(u'no match found') + return None + result = res['results'][0] # Best match. + if result['score'] < SCORE_THRESH: + log.debug(u'no results above threshold') + return None + _acoustids[path] = result['id'] + + # Get recording and releases from the result. + if not result.get('recordings'): + log.debug(u'no recordings found') + return None + recording_ids = [] + release_ids = [] + for recording in result['recordings']: + recording_ids.append(recording['id']) + if 'releases' in recording: + release_ids += [rel['id'] for rel in recording['releases']] + + log.debug(u'matched recordings {0} on releases {1}', + recording_ids, release_ids) + _matches[path] = recording_ids, release_ids + + +# Plugin structure and autotagging logic. + + +def _all_releases(items): + """Given an iterable of Items, determines (according to Acoustid) + which releases the items have in common. Generates release IDs. + """ + # Count the number of "hits" for each release. + relcounts = defaultdict(int) + for item in items: + if item.path not in _matches: + continue + + _, release_ids = _matches[item.path] + for release_id in release_ids: + relcounts[release_id] += 1 + + for release_id, count in relcounts.iteritems(): + if float(count) / len(items) > COMMON_REL_THRESH: + yield release_id + + +class AcoustidPlugin(plugins.BeetsPlugin): + def __init__(self): + super(AcoustidPlugin, self).__init__() + + self.config.add({ + 'auto': True, + }) + config['acoustid']['apikey'].redact = True + + if self.config['auto']: + self.register_listener('import_task_start', self.fingerprint_task) + self.register_listener('import_task_apply', apply_acoustid_metadata) + + def fingerprint_task(self, task, session): + return fingerprint_task(self._log, task, session) + + def track_distance(self, item, info): + dist = hooks.Distance() + if item.path not in _matches or not info.track_id: + # Match failed or no track ID. + return dist + + recording_ids, _ = _matches[item.path] + dist.add_expr('track_id', info.track_id not in recording_ids) + return dist + + def candidates(self, items, artist, album, va_likely): + albums = [] + for relid in prefix(_all_releases(items), MAX_RELEASES): + album = hooks.album_for_mbid(relid) + if album: + albums.append(album) + + self._log.debug(u'acoustid album candidates: {0}', len(albums)) + return albums + + def item_candidates(self, item, artist, title): + if item.path not in _matches: + return [] + + recording_ids, _ = _matches[item.path] + tracks = [] + for recording_id in prefix(recording_ids, MAX_RECORDINGS): + track = hooks.track_for_mbid(recording_id) + if track: + tracks.append(track) + self._log.debug(u'acoustid item candidates: {0}', len(tracks)) + return tracks + + def commands(self): + submit_cmd = ui.Subcommand('submit', + help=u'submit Acoustid fingerprints') + + def submit_cmd_func(lib, opts, args): + try: + apikey = config['acoustid']['apikey'].get(unicode) + except confit.NotFoundError: + raise ui.UserError(u'no Acoustid user API key provided') + submit_items(self._log, apikey, lib.items(ui.decargs(args))) + submit_cmd.func = submit_cmd_func + + fingerprint_cmd = ui.Subcommand( + 'fingerprint', + help=u'generate fingerprints for items without them' + ) + + def fingerprint_cmd_func(lib, opts, args): + for item in lib.items(ui.decargs(args)): + fingerprint_item(self._log, item, write=ui.should_write()) + fingerprint_cmd.func = fingerprint_cmd_func + + return [submit_cmd, fingerprint_cmd] + + +# Hooks into import process. + + +def fingerprint_task(log, task, session): + """Fingerprint each item in the task for later use during the + autotagging candidate search. + """ + items = task.items if task.is_album else [task.item] + for item in items: + acoustid_match(log, item.path) + + +def apply_acoustid_metadata(task, session): + """Apply Acoustid metadata (fingerprint and ID) to the task's items. + """ + for item in task.imported_items(): + if item.path in _fingerprints: + item.acoustid_fingerprint = _fingerprints[item.path] + if item.path in _acoustids: + item.acoustid_id = _acoustids[item.path] + + +# UI commands. + + +def submit_items(log, userkey, items, chunksize=64): + """Submit fingerprints for the items to the Acoustid server. + """ + data = [] # The running list of dictionaries to submit. + + def submit_chunk(): + """Submit the current accumulated fingerprint data.""" + log.info(u'submitting {0} fingerprints', len(data)) + try: + acoustid.submit(API_KEY, userkey, data) + except acoustid.AcoustidError as exc: + log.warn(u'acoustid submission error: {0}', exc) + del data[:] + + for item in items: + fp = fingerprint_item(log, item) + + # Construct a submission dictionary for this item. + item_data = { + 'duration': int(item.length), + 'fingerprint': fp, + } + if item.mb_trackid: + item_data['mbid'] = item.mb_trackid + log.debug(u'submitting MBID') + else: + item_data.update({ + 'track': item.title, + 'artist': item.artist, + 'album': item.album, + 'albumartist': item.albumartist, + 'year': item.year, + 'trackno': item.track, + 'discno': item.disc, + }) + log.debug(u'submitting textual metadata') + data.append(item_data) + + # If we have enough data, submit a chunk. + if len(data) >= chunksize: + submit_chunk() + + # Submit remaining data in a final chunk. + if data: + submit_chunk() + + +def fingerprint_item(log, item, write=False): + """Get the fingerprint for an Item. If the item already has a + fingerprint, it is not regenerated. If fingerprint generation fails, + return None. If the items are associated with a library, they are + saved to the database. If `write` is set, then the new fingerprints + are also written to files' metadata. + """ + # Get a fingerprint and length for this track. + if not item.length: + log.info(u'{0}: no duration available', + util.displayable_path(item.path)) + elif item.acoustid_fingerprint: + if write: + log.info(u'{0}: fingerprint exists, skipping', + util.displayable_path(item.path)) + else: + log.info(u'{0}: using existing fingerprint', + util.displayable_path(item.path)) + return item.acoustid_fingerprint + else: + log.info(u'{0}: fingerprinting', + util.displayable_path(item.path)) + try: + _, fp = acoustid.fingerprint_file(item.path) + item.acoustid_fingerprint = fp + if write: + log.info(u'{0}: writing fingerprint', + util.displayable_path(item.path)) + item.try_write() + if item._db: + item.store() + return item.acoustid_fingerprint + except acoustid.FingerprintGenerationError as exc: + log.info(u'fingerprint generation failed: {0}', exc) diff --git a/libs/beetsplug/convert.py b/libs/beetsplug/convert.py new file mode 100644 index 00000000..de91604f --- /dev/null +++ b/libs/beetsplug/convert.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Jakob Schnitzer. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Converts tracks or albums to external directory +""" +from __future__ import division, absolute_import, print_function + +import os +import threading +import subprocess +import tempfile +import shlex +from string import Template + +from beets import ui, util, plugins, config +from beets.plugins import BeetsPlugin +from beets.util.confit import ConfigTypeError +from beets import art +from beets.util.artresizer import ArtResizer + +_fs_lock = threading.Lock() +_temp_files = [] # Keep track of temporary transcoded files for deletion. + +# Some convenient alternate names for formats. +ALIASES = { + u'wma': u'windows media', + u'vorbis': u'ogg', +} + +LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff'] + + +def replace_ext(path, ext): + """Return the path with its extension replaced by `ext`. + + The new extension must not contain a leading dot. + """ + return os.path.splitext(path)[0] + b'.' + ext + + +def get_format(fmt=None): + """Return the command template and the extension from the config. + """ + if not fmt: + fmt = config['convert']['format'].get(unicode).lower() + fmt = ALIASES.get(fmt, fmt) + + try: + format_info = config['convert']['formats'][fmt].get(dict) + command = format_info['command'] + extension = format_info.get('extension', fmt) + except KeyError: + raise ui.UserError( + u'convert: format {0} needs the "command" field' + .format(fmt) + ) + except ConfigTypeError: + command = config['convert']['formats'][fmt].get(bytes) + extension = fmt + + # Convenience and backwards-compatibility shortcuts. + keys = config['convert'].keys() + if 'command' in keys: + command = config['convert']['command'].get(unicode) + elif 'opts' in keys: + # Undocumented option for backwards compatibility with < 1.3.1. + command = u'ffmpeg -i $source -y {0} $dest'.format( + config['convert']['opts'].get(unicode) + ) + if 'extension' in keys: + extension = config['convert']['extension'].get(unicode) + + return (command.encode('utf8'), extension.encode('utf8')) + + +def should_transcode(item, fmt): + """Determine whether the item should be transcoded as part of + conversion (i.e., its bitrate is high or it has the wrong format). + """ + if config['convert']['never_convert_lossy_files'] and \ + not (item.format.lower() in LOSSLESS_FORMATS): + return False + maxbr = config['convert']['max_bitrate'].get(int) + return fmt.lower() != item.format.lower() or \ + item.bitrate >= 1000 * maxbr + + +class ConvertPlugin(BeetsPlugin): + def __init__(self): + super(ConvertPlugin, self).__init__() + self.config.add({ + u'dest': None, + u'pretend': False, + u'threads': util.cpu_count(), + u'format': u'mp3', + u'formats': { + u'aac': { + u'command': u'ffmpeg -i $source -y -vn -acodec libfaac ' + u'-aq 100 $dest', + u'extension': u'm4a', + }, + u'alac': { + u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest', + u'extension': u'm4a', + }, + u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest', + u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest', + u'opus': + u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest', + u'ogg': + u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest', + u'wma': + u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest', + }, + u'max_bitrate': 500, + u'auto': False, + u'tmpdir': None, + u'quiet': False, + u'embed': True, + u'paths': {}, + u'never_convert_lossy_files': False, + u'copy_album_art': False, + u'album_art_maxwidth': 0, + }) + self.import_stages = [self.auto_convert] + + self.register_listener('import_task_files', self._cleanup) + + def commands(self): + cmd = ui.Subcommand('convert', help=u'convert to external location') + cmd.parser.add_option('-p', '--pretend', action='store_true', + help=u'show actions but do nothing') + cmd.parser.add_option('-t', '--threads', action='store', type='int', + help=u'change the number of threads, \ + defaults to maximum available processors') + cmd.parser.add_option('-k', '--keep-new', action='store_true', + dest='keep_new', help=u'keep only the converted \ + and move the old files') + cmd.parser.add_option('-d', '--dest', action='store', + help=u'set the destination directory') + cmd.parser.add_option('-f', '--format', action='store', dest='format', + help=u'set the target format of the tracks') + cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes', + help=u'do not ask for confirmation') + cmd.parser.add_album_option() + cmd.func = self.convert_func + return [cmd] + + def auto_convert(self, config, task): + if self.config['auto']: + for item in task.imported_items(): + self.convert_on_import(config.lib, item) + + # Utilities converted from functions to methods on logging overhaul + + def encode(self, command, source, dest, pretend=False): + """Encode `source` to `dest` using command template `command`. + + Raises `subprocess.CalledProcessError` if the command exited with a + non-zero status code. + """ + # The paths and arguments must be bytes. + assert isinstance(command, bytes) + assert isinstance(source, bytes) + assert isinstance(dest, bytes) + + quiet = self.config['quiet'].get(bool) + + if not quiet and not pretend: + self._log.info(u'Encoding {0}', util.displayable_path(source)) + + # Substitute $source and $dest in the argument list. + args = shlex.split(command) + for i, arg in enumerate(args): + args[i] = Template(arg).safe_substitute({ + 'source': source, + 'dest': dest, + }) + + if pretend: + self._log.info(u' '.join(ui.decargs(args))) + return + + try: + util.command_output(args) + except subprocess.CalledProcessError as exc: + # Something went wrong (probably Ctrl+C), remove temporary files + self._log.info(u'Encoding {0} failed. Cleaning up...', + util.displayable_path(source)) + self._log.debug(u'Command {0} exited with status {1}', + exc.cmd.decode('utf8', 'ignore'), + exc.returncode) + util.remove(dest) + util.prune_dirs(os.path.dirname(dest)) + raise + except OSError as exc: + raise ui.UserError( + u"convert: couldn't invoke '{0}': {1}".format( + u' '.join(ui.decargs(args)), exc + ) + ) + + if not quiet and not pretend: + self._log.info(u'Finished encoding {0}', + util.displayable_path(source)) + + def convert_item(self, dest_dir, keep_new, path_formats, fmt, + pretend=False): + command, ext = get_format(fmt) + item, original, converted = None, None, None + while True: + item = yield (item, original, converted) + dest = item.destination(basedir=dest_dir, + path_formats=path_formats) + + # When keeping the new file in the library, we first move the + # current (pristine) file to the destination. We'll then copy it + # back to its old path or transcode it to a new path. + if keep_new: + original = dest + converted = item.path + if should_transcode(item, fmt): + converted = replace_ext(converted, ext) + else: + original = item.path + if should_transcode(item, fmt): + dest = replace_ext(dest, ext) + converted = dest + + # Ensure that only one thread tries to create directories at a + # time. (The existence check is not atomic with the directory + # creation inside this function.) + if not pretend: + with _fs_lock: + util.mkdirall(dest) + + if os.path.exists(util.syspath(dest)): + self._log.info(u'Skipping {0} (target file exists)', + util.displayable_path(item.path)) + continue + + if keep_new: + if pretend: + self._log.info(u'mv {0} {1}', + util.displayable_path(item.path), + util.displayable_path(original)) + else: + self._log.info(u'Moving to {0}', + util.displayable_path(original)) + util.move(item.path, original) + + if should_transcode(item, fmt): + try: + self.encode(command, original, converted, pretend) + except subprocess.CalledProcessError: + continue + else: + if pretend: + self._log.info(u'cp {0} {1}', + util.displayable_path(original), + util.displayable_path(converted)) + else: + # No transcoding necessary. + self._log.info(u'Copying {0}', + util.displayable_path(item.path)) + util.copy(original, converted) + + if pretend: + continue + + # Write tags from the database to the converted file. + item.try_write(path=converted) + + if keep_new: + # If we're keeping the transcoded file, read it again (after + # writing) to get new bitrate, duration, etc. + item.path = converted + item.read() + item.store() # Store new path and audio data. + + if self.config['embed']: + album = item.get_album() + if album and album.artpath: + self._log.debug(u'embedding album art from {}', + util.displayable_path(album.artpath)) + art.embed_item(self._log, item, album.artpath, + itempath=converted) + + if keep_new: + plugins.send('after_convert', item=item, + dest=dest, keepnew=True) + else: + plugins.send('after_convert', item=item, + dest=converted, keepnew=False) + + def copy_album_art(self, album, dest_dir, path_formats, pretend=False): + """Copies or converts the associated cover art of the album. Album must + have at least one track. + """ + if not album or not album.artpath: + return + + album_item = album.items().get() + # Album shouldn't be empty. + if not album_item: + return + + # Get the destination of the first item (track) of the album, we use + # this function to format the path accordingly to path_formats. + dest = album_item.destination(basedir=dest_dir, + path_formats=path_formats) + + # Remove item from the path. + dest = os.path.join(*util.components(dest)[:-1]) + + dest = album.art_destination(album.artpath, item_dir=dest) + if album.artpath == dest: + return + + if not pretend: + util.mkdirall(dest) + + if os.path.exists(util.syspath(dest)): + self._log.info(u'Skipping {0} (target file exists)', + util.displayable_path(album.artpath)) + return + + # Decide whether we need to resize the cover-art image. + resize = False + maxwidth = None + if self.config['album_art_maxwidth']: + maxwidth = self.config['album_art_maxwidth'].get(int) + size = ArtResizer.shared.get_size(album.artpath) + self._log.debug('image size: {}', size) + if size: + resize = size[0] > maxwidth + else: + self._log.warning(u'Could not get size of image (please see ' + u'documentation for dependencies).') + + # Either copy or resize (while copying) the image. + if resize: + self._log.info(u'Resizing cover art from {0} to {1}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + if not pretend: + ArtResizer.shared.resize(maxwidth, album.artpath, dest) + else: + if pretend: + self._log.info(u'cp {0} {1}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + else: + self._log.info(u'Copying cover art to {0}', + util.displayable_path(album.artpath), + util.displayable_path(dest)) + util.copy(album.artpath, dest) + + def convert_func(self, lib, opts, args): + if not opts.dest: + opts.dest = self.config['dest'].get() + if not opts.dest: + raise ui.UserError(u'no convert destination set') + opts.dest = util.bytestring_path(opts.dest) + + if not opts.threads: + opts.threads = self.config['threads'].get(int) + + if self.config['paths']: + path_formats = ui.get_path_formats(self.config['paths']) + else: + path_formats = ui.get_path_formats() + + if not opts.format: + opts.format = self.config['format'].get(unicode).lower() + + pretend = opts.pretend if opts.pretend is not None else \ + self.config['pretend'].get(bool) + + if not pretend: + ui.commands.list_items(lib, ui.decargs(args), opts.album) + + if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")): + return + + if opts.album: + albums = lib.albums(ui.decargs(args)) + items = (i for a in albums for i in a.items()) + if self.config['copy_album_art']: + for album in albums: + self.copy_album_art(album, opts.dest, path_formats, + pretend) + else: + items = iter(lib.items(ui.decargs(args))) + convert = [self.convert_item(opts.dest, + opts.keep_new, + path_formats, + opts.format, + pretend) + for _ in range(opts.threads)] + pipe = util.pipeline.Pipeline([items, convert]) + pipe.run_parallel() + + def convert_on_import(self, lib, item): + """Transcode a file automatically after it is imported into the + library. + """ + fmt = self.config['format'].get(unicode).lower() + if should_transcode(item, fmt): + command, ext = get_format() + + # Create a temporary file for the conversion. + tmpdir = self.config['tmpdir'].get() + fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir) + os.close(fd) + dest = util.bytestring_path(dest) + _temp_files.append(dest) # Delete the transcode later. + + # Convert. + try: + self.encode(command, item.path, dest) + except subprocess.CalledProcessError: + return + + # Change the newly-imported database entry to point to the + # converted file. + item.path = dest + item.write() + item.read() # Load new audio information data. + item.store() + + def _cleanup(self, task, session): + for path in task.old_paths: + if path in _temp_files: + if os.path.isfile(path): + util.remove(path) + _temp_files.remove(path) diff --git a/libs/beetsplug/cue.py b/libs/beetsplug/cue.py new file mode 100644 index 00000000..63051bfc --- /dev/null +++ b/libs/beetsplug/cue.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 Bruno Cauet +# Split an album-file in tracks thanks a cue file + +from __future__ import division, absolute_import, print_function + +import subprocess +from os import path +from glob import glob + +from beets.util import command_output, displayable_path +from beets.plugins import BeetsPlugin +from beets.autotag import TrackInfo + + +class CuePlugin(BeetsPlugin): + def __init__(self): + super(CuePlugin, self).__init__() + # this does not seem supported by shnsplit + self.config.add({ + 'keep_before': .1, + 'keep_after': .9, + }) + + # self.register_listener('import_task_start', self.look_for_cues) + + def candidates(self, items, artist, album, va_likely): + import pdb + pdb.set_trace() + + def item_candidates(self, item, artist, album): + dir = path.dirname(item.path) + cues = glob.glob(path.join(dir, "*.cue")) + if not cues: + return + if len(cues) > 1: + self._log.info(u"Found multiple cue files doing nothing: {0}", + map(displayable_path, cues)) + + cue_file = cues[0] + self._log.info("Found {} for {}", displayable_path(cue_file), item) + + try: + # careful: will ask for input in case of conflicts + command_output(['shnsplit', '-f', cue_file, item.path]) + except (subprocess.CalledProcessError, OSError): + self._log.exception(u'shnsplit execution failed') + return + + tracks = glob(path.join(dir, "*.wav")) + self._log.info("Generated {0} tracks", len(tracks)) + for t in tracks: + title = "dunno lol" + track_id = "wtf" + index = int(path.basename(t)[len("split-track"):-len(".wav")]) + yield TrackInfo(title, track_id, index=index, artist=artist) + # generate TrackInfo instances diff --git a/libs/beetsplug/discogs.py b/libs/beetsplug/discogs.py new file mode 100644 index 00000000..62a78a5f --- /dev/null +++ b/libs/beetsplug/discogs.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Adds Discogs album search support to the autotagger. Requires the +discogs-client library. +""" +from __future__ import division, absolute_import, print_function + +import beets.ui +from beets import logging +from beets import config +from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance +from beets.plugins import BeetsPlugin +from beets.util import confit +from discogs_client import Release, Client +from discogs_client.exceptions import DiscogsAPIError +from requests.exceptions import ConnectionError +import beets +import re +import time +import json +import socket +import httplib +import os + + +# Silence spurious INFO log lines generated by urllib3. +urllib3_logger = logging.getLogger('requests.packages.urllib3') +urllib3_logger.setLevel(logging.CRITICAL) + +USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__) + +# Exceptions that discogs_client should really handle but does not. +CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException, + ValueError, # JSON decoding raises a ValueError. + DiscogsAPIError) + + +class DiscogsPlugin(BeetsPlugin): + + def __init__(self): + super(DiscogsPlugin, self).__init__() + self.config.add({ + 'apikey': 'rAzVUQYRaoFjeBjyWuWZ', + 'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy', + 'tokenfile': 'discogs_token.json', + 'source_weight': 0.5, + }) + self.config['apikey'].redact = True + self.config['apisecret'].redact = True + self.discogs_client = None + self.register_listener('import_begin', self.setup) + + def setup(self, session=None): + """Create the `discogs_client` field. Authenticate if necessary. + """ + c_key = self.config['apikey'].get(unicode) + c_secret = self.config['apisecret'].get(unicode) + + # Get the OAuth token from a file or log in. + try: + with open(self._tokenfile()) as f: + tokendata = json.load(f) + except IOError: + # No token yet. Generate one. + token, secret = self.authenticate(c_key, c_secret) + else: + token = tokendata['token'] + secret = tokendata['secret'] + + self.discogs_client = Client(USER_AGENT, c_key, c_secret, + token, secret) + + def reset_auth(self): + """Delete toke file & redo the auth steps. + """ + os.remove(self._tokenfile()) + self.setup() + + def _tokenfile(self): + """Get the path to the JSON file for storing the OAuth token. + """ + return self.config['tokenfile'].get(confit.Filename(in_app_dir=True)) + + def authenticate(self, c_key, c_secret): + # Get the link for the OAuth page. + auth_client = Client(USER_AGENT, c_key, c_secret) + try: + _, _, url = auth_client.get_authorize_url() + except CONNECTION_ERRORS as e: + self._log.debug(u'connection error: {0}', e) + raise beets.ui.UserError(u'communication with Discogs failed') + + beets.ui.print_(u"To authenticate with Discogs, visit:") + beets.ui.print_(url) + + # Ask for the code and validate it. + code = beets.ui.input_(u"Enter the code:") + try: + token, secret = auth_client.get_access_token(code) + except DiscogsAPIError: + raise beets.ui.UserError(u'Discogs authorization failed') + except CONNECTION_ERRORS as e: + self._log.debug(u'connection error: {0}', e) + raise beets.ui.UserError(u'Discogs token request failed') + + # Save the token for later use. + self._log.debug(u'Discogs token {0}, secret {1}', token, secret) + with open(self._tokenfile(), 'w') as f: + json.dump({'token': token, 'secret': secret}, f) + + return token, secret + + def album_distance(self, items, album_info, mapping): + """Returns the album distance. + """ + dist = Distance() + if album_info.data_source == 'Discogs': + dist.add('source', self.config['source_weight'].as_number()) + return dist + + def candidates(self, items, artist, album, va_likely): + """Returns a list of AlbumInfo objects for discogs search results + matching an album and artist (if not various). + """ + if not self.discogs_client: + return + + if va_likely: + query = album + else: + query = '%s %s' % (artist, album) + try: + return self.get_albums(query) + except DiscogsAPIError as e: + self._log.debug(u'API Error: {0} (query: {1})', e, query) + if e.status_code == 401: + self.reset_auth() + return self.candidates(items, artist, album, va_likely) + else: + return [] + except CONNECTION_ERRORS: + self._log.debug(u'Connection error in album search', exc_info=True) + return [] + + def album_for_id(self, album_id): + """Fetches an album by its Discogs ID and returns an AlbumInfo object + or None if the album is not found. + """ + if not self.discogs_client: + return + + self._log.debug(u'Searching for release {0}', album_id) + # Discogs-IDs are simple integers. We only look for those at the end + # of an input string as to avoid confusion with other metadata plugins. + # An optional bracket can follow the integer, as this is how discogs + # displays the release ID on its webpage. + match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])', + album_id) + if not match: + return None + result = Release(self.discogs_client, {'id': int(match.group(2))}) + # Try to obtain title to verify that we indeed have a valid Release + try: + getattr(result, 'title') + except DiscogsAPIError as e: + if e.status_code != 404: + self._log.debug(u'API Error: {0} (query: {1})', e, result._uri) + if e.status_code == 401: + self.reset_auth() + return self.album_for_id(album_id) + return None + except CONNECTION_ERRORS: + self._log.debug(u'Connection error in album lookup', exc_info=True) + return None + return self.get_album_info(result) + + def get_albums(self, query): + """Returns a list of AlbumInfo objects for a discogs search query. + """ + # Strip non-word characters from query. Things like "!" and "-" can + # cause a query to return no results, even if they match the artist or + # album title. Use `re.UNICODE` flag to avoid stripping non-english + # word characters. + # TEMPORARY: Encode as ASCII to work around a bug: + # https://github.com/beetbox/beets/issues/1051 + # When the library is fixed, we should encode as UTF-8. + query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace") + # Strip medium information from query, Things like "CD1" and "disk 1" + # can also negate an otherwise positive result. + query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query) + try: + releases = self.discogs_client.search(query, + type='release').page(1) + except CONNECTION_ERRORS: + self._log.debug(u"Communication error while searching for {0!r}", + query, exc_info=True) + return [] + return [self.get_album_info(release) for release in releases[:5]] + + def get_album_info(self, result): + """Returns an AlbumInfo object for a discogs Release object. + """ + artist, artist_id = self.get_artist([a.data for a in result.artists]) + album = re.sub(r' +', ' ', result.title) + album_id = result.data['id'] + # Use `.data` to access the tracklist directly instead of the + # convenient `.tracklist` property, which will strip out useful artist + # information and leave us with skeleton `Artist` objects that will + # each make an API call just to get the same data back. + tracks = self.get_tracks(result.data['tracklist']) + albumtype = ', '.join( + result.data['formats'][0].get('descriptions', [])) or None + va = result.data['artists'][0]['name'].lower() == 'various' + if va: + artist = config['va_name'].get(unicode) + year = result.data['year'] + label = result.data['labels'][0]['name'] + mediums = len(set(t.medium for t in tracks)) + catalogno = result.data['labels'][0]['catno'] + if catalogno == 'none': + catalogno = None + country = result.data.get('country') + media = result.data['formats'][0]['name'] + data_url = result.data['uri'] + return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None, + albumtype=albumtype, va=va, year=year, month=None, + day=None, label=label, mediums=mediums, + artist_sort=None, releasegroup_id=None, + catalognum=catalogno, script=None, language=None, + country=country, albumstatus=None, media=media, + albumdisambig=None, artist_credit=None, + original_year=None, original_month=None, + original_day=None, data_source='Discogs', + data_url=data_url) + + def get_artist(self, artists): + """Returns an artist string (all artists) and an artist_id (the main + artist) for a list of discogs album or track artists. + """ + artist_id = None + bits = [] + for i, artist in enumerate(artists): + if not artist_id: + artist_id = artist['id'] + name = artist['name'] + # Strip disambiguation number. + name = re.sub(r' \(\d+\)$', '', name) + # Move articles to the front. + name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name) + bits.append(name) + if artist['join'] and i < len(artists) - 1: + bits.append(artist['join']) + artist = ' '.join(bits).replace(' ,', ',') or None + return artist, artist_id + + def get_tracks(self, tracklist): + """Returns a list of TrackInfo objects for a discogs tracklist. + """ + tracks = [] + index_tracks = {} + index = 0 + for track in tracklist: + # Only real tracks have `position`. Otherwise, it's an index track. + if track['position']: + index += 1 + tracks.append(self.get_track_info(track, index)) + else: + index_tracks[index + 1] = track['title'] + + # Fix up medium and medium_index for each track. Discogs position is + # unreliable, but tracks are in order. + medium = None + medium_count, index_count = 0, 0 + for track in tracks: + # Handle special case where a different medium does not indicate a + # new disc, when there is no medium_index and the ordinal of medium + # is not sequential. For example, I, II, III, IV, V. Assume these + # are the track index, not the medium. + medium_is_index = track.medium and not track.medium_index and ( + len(track.medium) != 1 or + ord(track.medium) - 64 != medium_count + 1 + ) + + if not medium_is_index and medium != track.medium: + # Increment medium_count and reset index_count when medium + # changes. + medium = track.medium + medium_count += 1 + index_count = 0 + index_count += 1 + track.medium, track.medium_index = medium_count, index_count + + # Get `disctitle` from Discogs index tracks. Assume that an index track + # before the first track of each medium is a disc title. + for track in tracks: + if track.medium_index == 1: + if track.index in index_tracks: + disctitle = index_tracks[track.index] + else: + disctitle = None + track.disctitle = disctitle + + return tracks + + def get_track_info(self, track, index): + """Returns a TrackInfo object for a discogs track. + """ + title = track['title'] + track_id = None + medium, medium_index = self.get_track_index(track['position']) + artist, artist_id = self.get_artist(track.get('artists', [])) + length = self.get_track_length(track['duration']) + return TrackInfo(title, track_id, artist, artist_id, length, index, + medium, medium_index, artist_sort=None, + disctitle=None, artist_credit=None) + + def get_track_index(self, position): + """Returns the medium and medium index for a discogs track position. + """ + # medium_index is a number at the end of position. medium is everything + # else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc. + match = re.match(r'^(.*?)(\d*)$', position.upper()) + if match: + medium, index = match.groups() + else: + self._log.debug(u'Invalid position: {0}', position) + medium = index = None + return medium or None, index or None + + def get_track_length(self, duration): + """Returns the track length in seconds for a discogs duration. + """ + try: + length = time.strptime(duration, '%M:%S') + except ValueError: + return None + return length.tm_min * 60 + length.tm_sec diff --git a/libs/beetsplug/duplicates.py b/libs/beetsplug/duplicates.py new file mode 100644 index 00000000..4f039717 --- /dev/null +++ b/libs/beetsplug/duplicates.py @@ -0,0 +1,337 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Pedro Silva. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""List duplicate tracks or albums. +""" +from __future__ import division, absolute_import, print_function + +import shlex + +from beets.plugins import BeetsPlugin +from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError +from beets.util import command_output, displayable_path, subprocess +from beets.library import Item, Album + +PLUGIN = 'duplicates' + + +class DuplicatesPlugin(BeetsPlugin): + """List duplicate tracks or albums + """ + def __init__(self): + super(DuplicatesPlugin, self).__init__() + + self.config.add({ + 'album': False, + 'checksum': '', + 'copy': '', + 'count': False, + 'delete': False, + 'format': '', + 'full': False, + 'keys': [], + 'merge': False, + 'move': '', + 'path': False, + 'tiebreak': {}, + 'strict': False, + 'tag': '', + }) + + self._command = Subcommand('duplicates', + help=__doc__, + aliases=['dup']) + self._command.parser.add_option( + u'-c', u'--count', dest='count', + action='store_true', + help=u'show duplicate counts', + ) + self._command.parser.add_option( + u'-C', u'--checksum', dest='checksum', + action='store', metavar='PROG', + help=u'report duplicates based on arbitrary command', + ) + self._command.parser.add_option( + u'-d', u'--delete', dest='delete', + action='store_true', + help=u'delete items from library and disk', + ) + self._command.parser.add_option( + u'-F', u'--full', dest='full', + action='store_true', + help=u'show all versions of duplicate tracks or albums', + ) + self._command.parser.add_option( + u'-s', u'--strict', dest='strict', + action='store_true', + help=u'report duplicates only if all attributes are set', + ) + self._command.parser.add_option( + u'-k', u'--keys', dest='keys', + action='callback', metavar='KEY1 KEY2', + callback=vararg_callback, + help=u'report duplicates based on keys', + ) + self._command.parser.add_option( + u'-M', u'--merge', dest='merge', + action='store_true', + help=u'merge duplicate items', + ) + self._command.parser.add_option( + u'-m', u'--move', dest='move', + action='store', metavar='DEST', + help=u'move items to dest', + ) + self._command.parser.add_option( + u'-o', u'--copy', dest='copy', + action='store', metavar='DEST', + help=u'copy items to dest', + ) + self._command.parser.add_option( + u'-t', u'--tag', dest='tag', + action='store', + help=u'tag matched items with \'k=v\' attribute', + ) + self._command.parser.add_all_common_options() + + def commands(self): + + def _dup(lib, opts, args): + self.config.set_args(opts) + album = self.config['album'].get(bool) + checksum = self.config['checksum'].get(str) + copy = self.config['copy'].get(str) + count = self.config['count'].get(bool) + delete = self.config['delete'].get(bool) + fmt = self.config['format'].get(str) + full = self.config['full'].get(bool) + keys = self.config['keys'].get(list) + merge = self.config['merge'].get(bool) + move = self.config['move'].get(str) + path = self.config['path'].get(bool) + tiebreak = self.config['tiebreak'].get(dict) + strict = self.config['strict'].get(bool) + tag = self.config['tag'].get(str) + + if album: + if not keys: + keys = ['mb_albumid'] + items = lib.albums(decargs(args)) + else: + if not keys: + keys = ['mb_trackid', 'mb_albumid'] + items = lib.items(decargs(args)) + + if path: + fmt = '$path' + + # Default format string for count mode. + if count and not fmt: + if album: + fmt = '$albumartist - $album' + else: + fmt = '$albumartist - $album - $title' + fmt += ': {0}' + + if checksum: + for i in items: + k, _ = self._checksum(i, checksum) + keys = [k] + + for obj_id, obj_count, objs in self._duplicates(items, + keys=keys, + full=full, + strict=strict, + tiebreak=tiebreak, + merge=merge): + if obj_id: # Skip empty IDs. + for o in objs: + self._process_item(o, + copy=copy, + move=move, + delete=delete, + tag=tag, + fmt=fmt.format(obj_count)) + + self._command.func = _dup + return [self._command] + + def _process_item(self, item, copy=False, move=False, delete=False, + tag=False, fmt=''): + """Process Item `item`. + """ + print_(format(item, fmt)) + if copy: + item.move(basedir=copy, copy=True) + item.store() + if move: + item.move(basedir=move, copy=False) + item.store() + if delete: + item.remove(delete=True) + if tag: + try: + k, v = tag.split('=') + except: + raise UserError( + u"{}: can't parse k=v tag: {}".format(PLUGIN, tag) + ) + setattr(item, k, v) + item.store() + + def _checksum(self, item, prog): + """Run external `prog` on file path associated with `item`, cache + output as flexattr on a key that is the name of the program, and + return the key, checksum tuple. + """ + args = [p.format(file=item.path) for p in shlex.split(prog)] + key = args[0] + checksum = getattr(item, key, False) + if not checksum: + self._log.debug(u'key {0} on item {1} not cached:' + u'computing checksum', + key, displayable_path(item.path)) + try: + checksum = command_output(args) + setattr(item, key, checksum) + item.store() + self._log.debug(u'computed checksum for {0} using {1}', + item.title, key) + except subprocess.CalledProcessError as e: + self._log.debug(u'failed to checksum {0}: {1}', + displayable_path(item.path), e) + else: + self._log.debug(u'key {0} on item {1} cached:' + u'not computing checksum', + key, displayable_path(item.path)) + return key, checksum + + def _group_by(self, objs, keys, strict): + """Return a dictionary with keys arbitrary concatenations of attributes + and values lists of objects (Albums or Items) with those keys. + + If strict, all attributes must be defined for a duplicate match. + """ + import collections + counts = collections.defaultdict(list) + for obj in objs: + values = [getattr(obj, k, None) for k in keys] + values = [v for v in values if v not in (None, '')] + if strict and len(values) < len(keys): + self._log.debug(u'some keys {0} on item {1} are null or empty:' + u' skipping', + keys, displayable_path(obj.path)) + elif (not strict and not len(values)): + self._log.debug(u'all keys {0} on item {1} are null or empty:' + u' skipping', + keys, displayable_path(obj.path)) + else: + key = tuple(values) + counts[key].append(obj) + + return counts + + def _order(self, objs, tiebreak=None): + """Return the objects (Items or Albums) sorted by descending + order of priority. + + If provided, the `tiebreak` dict indicates the field to use to + prioritize the objects. Otherwise, Items are placed in order of + "completeness" (objects with more non-null fields come first) + and Albums are ordered by their track count. + """ + if tiebreak: + kind = 'items' if all(isinstance(o, Item) + for o in objs) else 'albums' + key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind]) + else: + kind = Item if all(isinstance(o, Item) for o in objs) else Album + if kind is Item: + def truthy(v): + # Avoid a Unicode warning by avoiding comparison + # between a bytes object and the empty Unicode + # string ''. + return v is not None and \ + (v != '' if isinstance(v, unicode) else True) + fields = kind.all_keys() + key = lambda x: sum(1 for f in fields if truthy(getattr(x, f))) + else: + key = lambda x: len(x.items()) + + return sorted(objs, key=key, reverse=True) + + def _merge_items(self, objs): + """Merge Item objs by copying missing fields from items in the tail to + the head item. + + Return same number of items, with the head item modified. + """ + fields = Item.all_keys() + for f in fields: + for o in objs[1:]: + if getattr(objs[0], f, None) in (None, ''): + value = getattr(o, f, None) + if value: + self._log.debug(u'key {0} on item {1} is null ' + u'or empty: setting from item {2}', + f, displayable_path(objs[0].path), + displayable_path(o.path)) + setattr(objs[0], f, value) + objs[0].store() + break + return objs + + def _merge_albums(self, objs): + """Merge Album objs by copying missing items from albums in the tail + to the head album. + + Return same number of albums, with the head album modified.""" + ids = [i.mb_trackid for i in objs[0].items()] + for o in objs[1:]: + for i in o.items(): + if i.mb_trackid not in ids: + missing = Item.from_path(i.path) + missing.album_id = objs[0].id + missing.add(i._db) + self._log.debug(u'item {0} missing from album {1}:' + u' merging from {2} into {3}', + missing, + objs[0], + displayable_path(o.path), + displayable_path(missing.destination())) + missing.move(copy=True) + return objs + + def _merge(self, objs): + """Merge duplicate items. See ``_merge_items`` and ``_merge_albums`` + for the relevant strategies. + """ + kind = Item if all(isinstance(o, Item) for o in objs) else Album + if kind is Item: + objs = self._merge_items(objs) + else: + objs = self._merge_albums(objs) + return objs + + def _duplicates(self, objs, keys, full, strict, tiebreak, merge): + """Generate triples of keys, duplicate counts, and constituent objects. + """ + offset = 0 if full else 1 + for k, objs in self._group_by(objs, keys, strict).iteritems(): + if len(objs) > 1: + objs = self._order(objs, tiebreak) + if merge: + objs = self._merge(objs) + yield (k, len(objs) - offset, objs[offset:]) diff --git a/libs/beetsplug/edit.py b/libs/beetsplug/edit.py new file mode 100644 index 00000000..5c7796ee --- /dev/null +++ b/libs/beetsplug/edit.py @@ -0,0 +1,392 @@ +# This file is part of beets. +# Copyright 2016 +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Open metadata information in a text editor to let the user edit it. +""" +from __future__ import division, absolute_import, print_function + +from beets import plugins +from beets import util +from beets import ui +from beets.dbcore import types +from beets.importer import action +from beets.ui.commands import _do_query, PromptChoice +from copy import deepcopy +import subprocess +import yaml +from tempfile import NamedTemporaryFile +import os + + +# These "safe" types can avoid the format/parse cycle that most fields go +# through: they are safe to edit with native YAML types. +SAFE_TYPES = (types.Float, types.Integer, types.Boolean) + + +class ParseError(Exception): + """The modified file is unreadable. The user should be offered a chance to + fix the error. + """ + + +def edit(filename, log): + """Open `filename` in a text editor. + """ + cmd = util.shlex_split(util.editor_command()) + cmd.append(filename) + log.debug(u'invoking editor command: {!r}', cmd) + try: + subprocess.call(cmd) + except OSError as exc: + raise ui.UserError(u'could not run editor command {!r}: {}'.format( + cmd[0], exc + )) + + +def dump(arg): + """Dump a sequence of dictionaries as YAML for editing. + """ + return yaml.safe_dump_all( + arg, + allow_unicode=True, + default_flow_style=False, + ) + + +def load(s): + """Read a sequence of YAML documents back to a list of dictionaries + with string keys. + + Can raise a `ParseError`. + """ + try: + out = [] + for d in yaml.load_all(s): + if not isinstance(d, dict): + raise ParseError( + u'each entry must be a dictionary; found {}'.format( + type(d).__name__ + ) + ) + + # Convert all keys to strings. They started out as strings, + # but the user may have inadvertently messed this up. + out.append({unicode(k): v for k, v in d.items()}) + + except yaml.YAMLError as e: + raise ParseError(u'invalid YAML: {}'.format(e)) + return out + + +def _safe_value(obj, key, value): + """Check whether the `value` is safe to represent in YAML and trust as + returned from parsed YAML. + + This ensures that values do not change their type when the user edits their + YAML representation. + """ + typ = obj._type(key) + return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type) + + +def flatten(obj, fields): + """Represent `obj`, a `dbcore.Model` object, as a dictionary for + serialization. Only include the given `fields` if provided; + otherwise, include everything. + + The resulting dictionary's keys are strings and the values are + safely YAML-serializable types. + """ + # Format each value. + d = {} + for key in obj.keys(): + value = obj[key] + if _safe_value(obj, key, value): + # A safe value that is faithfully representable in YAML. + d[key] = value + else: + # A value that should be edited as a string. + d[key] = obj.formatted()[key] + + # Possibly filter field names. + if fields: + return {k: v for k, v in d.items() if k in fields} + else: + return d + + +def apply_(obj, data): + """Set the fields of a `dbcore.Model` object according to a + dictionary. + + This is the opposite of `flatten`. The `data` dictionary should have + strings as values. + """ + for key, value in data.items(): + if _safe_value(obj, key, value): + # A safe value *stayed* represented as a safe type. Assign it + # directly. + obj[key] = value + else: + # Either the field was stringified originally or the user changed + # it from a safe type to an unsafe one. Parse it as a string. + obj.set_parse(key, unicode(value)) + + +class EditPlugin(plugins.BeetsPlugin): + + def __init__(self): + super(EditPlugin, self).__init__() + + self.config.add({ + # The default fields to edit. + 'albumfields': 'album albumartist', + 'itemfields': 'track title artist album', + + # Silently ignore any changes to these fields. + 'ignore_fields': 'id path', + }) + + self.register_listener('before_choose_candidate', + self.before_choose_candidate_listener) + + def commands(self): + edit_command = ui.Subcommand( + 'edit', + help=u'interactively edit metadata' + ) + edit_command.parser.add_option( + u'-f', u'--field', + metavar='FIELD', + action='append', + help=u'edit this field also', + ) + edit_command.parser.add_option( + u'--all', + action='store_true', dest='all', + help=u'edit all fields', + ) + edit_command.parser.add_album_option() + edit_command.func = self._edit_command + return [edit_command] + + def _edit_command(self, lib, opts, args): + """The CLI command function for the `beet edit` command. + """ + # Get the objects to edit. + query = ui.decargs(args) + items, albums = _do_query(lib, query, opts.album, False) + objs = albums if opts.album else items + if not objs: + ui.print_(u'Nothing to edit.') + return + + # Get the fields to edit. + if opts.all: + fields = None + else: + fields = self._get_fields(opts.album, opts.field) + self.edit(opts.album, objs, fields) + + def _get_fields(self, album, extra): + """Get the set of fields to edit. + """ + # Start with the configured base fields. + if album: + fields = self.config['albumfields'].as_str_seq() + else: + fields = self.config['itemfields'].as_str_seq() + + # Add the requested extra fields. + if extra: + fields += extra + + # Ensure we always have the `id` field for identification. + fields.append('id') + + return set(fields) + + def edit(self, album, objs, fields): + """The core editor function. + + - `album`: A flag indicating whether we're editing Items or Albums. + - `objs`: The `Item`s or `Album`s to edit. + - `fields`: The set of field names to edit (or None to edit + everything). + """ + # Present the YAML to the user and let her change it. + success = self.edit_objects(objs, fields) + + # Save the new data. + if success: + self.save_changes(objs) + + def edit_objects(self, objs, fields): + """Dump a set of Model objects to a file as text, ask the user + to edit it, and apply any changes to the objects. + + Return a boolean indicating whether the edit succeeded. + """ + # Get the content to edit as raw data structures. + old_data = [flatten(o, fields) for o in objs] + + # Set up a temporary file with the initial data for editing. + new = NamedTemporaryFile(suffix='.yaml', delete=False) + old_str = dump(old_data) + new.write(old_str) + new.close() + + # Loop until we have parseable data and the user confirms. + try: + while True: + # Ask the user to edit the data. + edit(new.name, self._log) + + # Read the data back after editing and check whether anything + # changed. + with open(new.name) as f: + new_str = f.read() + if new_str == old_str: + ui.print_(u"No changes; aborting.") + return False + + # Parse the updated data. + try: + new_data = load(new_str) + except ParseError as e: + ui.print_(u"Could not read data: {}".format(e)) + if ui.input_yn(u"Edit again to fix? (Y/n)", True): + continue + else: + return False + + # Show the changes. + # If the objects are not on the DB yet, we need a copy of their + # original state for show_model_changes. + objs_old = [deepcopy(obj) if not obj._db else None + for obj in objs] + self.apply_data(objs, old_data, new_data) + changed = False + for obj, obj_old in zip(objs, objs_old): + changed |= ui.show_model_changes(obj, obj_old) + if not changed: + ui.print_(u'No changes to apply.') + return False + + # Confirm the changes. + choice = ui.input_options( + (u'continue Editing', u'apply', u'cancel') + ) + if choice == u'a': # Apply. + return True + elif choice == u'c': # Cancel. + return False + elif choice == u'e': # Keep editing. + # Reset the temporary changes to the objects. + for obj in objs: + obj.read() + continue + + # Remove the temporary file before returning. + finally: + os.remove(new.name) + + def apply_data(self, objs, old_data, new_data): + """Take potentially-updated data and apply it to a set of Model + objects. + + The objects are not written back to the database, so the changes + are temporary. + """ + if len(old_data) != len(new_data): + self._log.warn(u'number of objects changed from {} to {}', + len(old_data), len(new_data)) + + obj_by_id = {o.id: o for o in objs} + ignore_fields = self.config['ignore_fields'].as_str_seq() + for old_dict, new_dict in zip(old_data, new_data): + # Prohibit any changes to forbidden fields to avoid + # clobbering `id` and such by mistake. + forbidden = False + for key in ignore_fields: + if old_dict.get(key) != new_dict.get(key): + self._log.warn(u'ignoring object whose {} changed', key) + forbidden = True + break + if forbidden: + continue + + id_ = int(old_dict['id']) + apply_(obj_by_id[id_], new_dict) + + def save_changes(self, objs): + """Save a list of updated Model objects to the database. + """ + # Save to the database and possibly write tags. + for ob in objs: + if ob._dirty: + self._log.debug(u'saving changes to {}', ob) + ob.try_sync(ui.should_write(), ui.should_move()) + + # Methods for interactive importer execution. + + def before_choose_candidate_listener(self, session, task): + """Append an "Edit" choice and an "edit Candidates" choice (if + there are candidates) to the interactive importer prompt. + """ + choices = [PromptChoice('d', 'eDit', self.importer_edit)] + if task.candidates: + choices.append(PromptChoice('c', 'edit Candidates', + self.importer_edit_candidate)) + + return choices + + def importer_edit(self, session, task): + """Callback for invoking the functionality during an interactive + import session on the *original* item tags. + """ + # Assign temporary ids to the Items. + for i, obj in enumerate(task.items): + obj.id = i + 1 + + # Present the YAML to the user and let her change it. + fields = self._get_fields(album=False, extra=[]) + success = self.edit_objects(task.items, fields) + + # Remove temporary ids. + for obj in task.items: + obj.id = None + + # Save the new data. + if success: + # Return action.RETAG, which makes the importer write the tags + # to the files if needed without re-applying metadata. + return action.RETAG + else: + # Edit cancelled / no edits made. Revert changes. + for obj in task.items: + obj.read() + + def importer_edit_candidate(self, session, task): + """Callback for invoking the functionality during an interactive + import session on a *candidate*. The candidate's metadata is + applied to the original items. + """ + # Prompt the user for a candidate. + sel = ui.input_options([], numrange=(1, len(task.candidates))) + # Force applying the candidate on the items. + task.match = task.candidates[sel - 1] + task.apply_metadata() + + return self.importer_edit(session, task) diff --git a/libs/beetsplug/embedart.py b/libs/beetsplug/embedart.py new file mode 100644 index 00000000..71b05f37 --- /dev/null +++ b/libs/beetsplug/embedart.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Allows beets to embed album art into file metadata.""" +from __future__ import division, absolute_import, print_function + +import os.path + +from beets.plugins import BeetsPlugin +from beets import ui +from beets.ui import decargs +from beets.util import syspath, normpath, displayable_path, bytestring_path +from beets.util.artresizer import ArtResizer +from beets import config +from beets import art + + +class EmbedCoverArtPlugin(BeetsPlugin): + """Allows albumart to be embedded into the actual files. + """ + def __init__(self): + super(EmbedCoverArtPlugin, self).__init__() + self.config.add({ + 'maxwidth': 0, + 'auto': True, + 'compare_threshold': 0, + 'ifempty': False, + 'remove_art_file': False + }) + + if self.config['maxwidth'].get(int) and not ArtResizer.shared.local: + self.config['maxwidth'] = 0 + self._log.warning(u"ImageMagick or PIL not found; " + u"'maxwidth' option ignored") + if self.config['compare_threshold'].get(int) and not \ + ArtResizer.shared.can_compare: + self.config['compare_threshold'] = 0 + self._log.warning(u"ImageMagick 6.8.7 or higher not installed; " + u"'compare_threshold' option ignored") + + self.register_listener('art_set', self.process_album) + + def commands(self): + # Embed command. + embed_cmd = ui.Subcommand( + 'embedart', help=u'embed image files into file metadata' + ) + embed_cmd.parser.add_option( + u'-f', u'--file', metavar='PATH', help=u'the image file to embed' + ) + maxwidth = self.config['maxwidth'].get(int) + compare_threshold = self.config['compare_threshold'].get(int) + ifempty = self.config['ifempty'].get(bool) + + def embed_func(lib, opts, args): + if opts.file: + imagepath = normpath(opts.file) + if not os.path.isfile(syspath(imagepath)): + raise ui.UserError(u'image file {0} not found'.format( + displayable_path(imagepath) + )) + for item in lib.items(decargs(args)): + art.embed_item(self._log, item, imagepath, maxwidth, None, + compare_threshold, ifempty) + else: + for album in lib.albums(decargs(args)): + art.embed_album(self._log, album, maxwidth, False, + compare_threshold, ifempty) + self.remove_artfile(album) + + embed_cmd.func = embed_func + + # Extract command. + extract_cmd = ui.Subcommand( + 'extractart', + help=u'extract an image from file metadata', + ) + extract_cmd.parser.add_option( + u'-o', dest='outpath', + help=u'image output file', + ) + extract_cmd.parser.add_option( + u'-n', dest='filename', + help=u'image filename to create for all matched albums', + ) + extract_cmd.parser.add_option( + '-a', dest='associate', action='store_true', + help='associate the extracted images with the album', + ) + + def extract_func(lib, opts, args): + if opts.outpath: + art.extract_first(self._log, normpath(opts.outpath), + lib.items(decargs(args))) + else: + filename = bytestring_path(opts.filename or + config['art_filename'].get()) + if os.path.dirname(filename) != '': + self._log.error( + u"Only specify a name rather than a path for -n") + return + for album in lib.albums(decargs(args)): + artpath = normpath(os.path.join(album.path, filename)) + artpath = art.extract_first(self._log, artpath, + album.items()) + if artpath and opts.associate: + album.set_art(artpath) + album.store() + extract_cmd.func = extract_func + + # Clear command. + clear_cmd = ui.Subcommand( + 'clearart', + help=u'remove images from file metadata', + ) + + def clear_func(lib, opts, args): + art.clear(self._log, lib, decargs(args)) + clear_cmd.func = clear_func + + return [embed_cmd, extract_cmd, clear_cmd] + + def process_album(self, album): + """Automatically embed art after art has been set + """ + if self.config['auto'] and ui.should_write(): + max_width = self.config['maxwidth'].get(int) + art.embed_album(self._log, album, max_width, True, + self.config['compare_threshold'].get(int), + self.config['ifempty'].get(bool)) + self.remove_artfile(album) + + def remove_artfile(self, album): + """Possibly delete the album art file for an album (if the + appropriate configuration option is enabled. + """ + if self.config['remove_art_file'] and album.artpath: + if os.path.isfile(album.artpath): + self._log.debug(u'Removing album art file for {0}', album) + os.remove(album.artpath) + album.artpath = None + album.store() diff --git a/libs/beetsplug/embyupdate.py b/libs/beetsplug/embyupdate.py new file mode 100644 index 00000000..38f8929e --- /dev/null +++ b/libs/beetsplug/embyupdate.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +"""Updates the Emby Library whenever the beets library is changed. + + emby: + host: localhost + port: 8096 + username: user + password: password +""" +from __future__ import division, absolute_import, print_function + +from beets import config +from beets.plugins import BeetsPlugin +from urllib import urlencode +from urlparse import urljoin, parse_qs, urlsplit, urlunsplit +import hashlib +import requests + + +def api_url(host, port, endpoint): + """Returns a joined url. + """ + joined = urljoin('http://{0}:{1}'.format(host, port), endpoint) + scheme, netloc, path, query_string, fragment = urlsplit(joined) + query_params = parse_qs(query_string) + + query_params['format'] = ['json'] + new_query_string = urlencode(query_params, doseq=True) + + return urlunsplit((scheme, netloc, path, new_query_string, fragment)) + + +def password_data(username, password): + """Returns a dict with username and its encoded password. + """ + return { + 'username': username, + 'password': hashlib.sha1(password).hexdigest(), + 'passwordMd5': hashlib.md5(password).hexdigest() + } + + +def create_headers(user_id, token=None): + """Return header dict that is needed to talk to the Emby API. + """ + headers = { + 'Authorization': 'MediaBrowser', + 'UserId': user_id, + 'Client': 'other', + 'Device': 'empy', + 'DeviceId': 'beets', + 'Version': '0.0.0' + } + + if token: + headers['X-MediaBrowser-Token'] = token + + return headers + + +def get_token(host, port, headers, auth_data): + """Return token for a user. + """ + url = api_url(host, port, '/Users/AuthenticateByName') + r = requests.post(url, headers=headers, data=auth_data) + + return r.json().get('AccessToken') + + +def get_user(host, port, username): + """Return user dict from server or None if there is no user. + """ + url = api_url(host, port, '/Users/Public') + r = requests.get(url) + user = [i for i in r.json() if i['Name'] == username] + + return user + + +class EmbyUpdate(BeetsPlugin): + def __init__(self): + super(EmbyUpdate, self).__init__() + + # Adding defaults. + config['emby'].add({ + u'host': u'localhost', + u'port': 8096 + }) + + self.register_listener('database_change', self.listen_for_db_change) + + def listen_for_db_change(self, lib, model): + """Listens for beets db change and register the update for the end. + """ + self.register_listener('cli_exit', self.update) + + def update(self, lib): + """When the client exists try to send refresh request to Emby. + """ + self._log.info(u'Updating Emby library...') + + host = config['emby']['host'].get() + port = config['emby']['port'].get() + username = config['emby']['username'].get() + password = config['emby']['password'].get() + + # Get user information from the Emby API. + user = get_user(host, port, username) + if not user: + self._log.warning(u'User {0} could not be found.'.format(username)) + return + + # Create Authentication data and headers. + auth_data = password_data(username, password) + headers = create_headers(user[0]['Id']) + + # Get authentication token. + token = get_token(host, port, headers, auth_data) + if not token: + self._log.warning( + u'Could not get token for user {0}', username + ) + return + + # Recreate headers with a token. + headers = create_headers(user[0]['Id'], token=token) + + # Trigger the Update. + url = api_url(host, port, '/Library/Refresh') + r = requests.post(url, headers=headers) + if r.status_code != 204: + self._log.warning(u'Update could not be triggered') + else: + self._log.info(u'Update triggered.') diff --git a/libs/beetsplug/export.py b/libs/beetsplug/export.py new file mode 100644 index 00000000..641b9fef --- /dev/null +++ b/libs/beetsplug/export.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Exports data from beets +""" + +from __future__ import division, absolute_import, print_function + +import sys +import json +import codecs + +from datetime import datetime, date +from beets.plugins import BeetsPlugin +from beets import ui +from beets import mediafile +from beetsplug.info import make_key_filter, library_data, tag_data + + +class ExportEncoder(json.JSONEncoder): + """Deals with dates because JSON doesn't have a standard""" + def default(self, o): + if isinstance(o, datetime) or isinstance(o, date): + return o.isoformat() + return json.JSONEncoder.default(self, o) + + +class ExportPlugin(BeetsPlugin): + + def __init__(self): + super(ExportPlugin, self).__init__() + + self.config.add({ + 'default_format': 'json', + 'json': { + # json module formatting options + 'formatting': { + 'ensure_ascii': False, + 'indent': 4, + 'separators': (',', ': '), + 'sort_keys': True + } + }, + # TODO: Use something like the edit plugin + # 'item_fields': [] + }) + + def commands(self): + # TODO: Add option to use albums + + cmd = ui.Subcommand('export', help=u'export data from beets') + cmd.func = self.run + cmd.parser.add_option( + u'-l', u'--library', action='store_true', + help=u'show library fields instead of tags', + ) + cmd.parser.add_option( + u'--append', action='store_true', default=False, + help=u'if should append data to the file', + ) + cmd.parser.add_option( + u'-i', u'--include-keys', default=[], + action='append', dest='included_keys', + help=u'comma separated list of keys to show', + ) + cmd.parser.add_option( + u'-o', u'--output', + help=u'path for the output file. If not given, will print the data' + ) + return [cmd] + + def run(self, lib, opts, args): + + file_path = opts.output + file_format = self.config['default_format'].get(str) + file_mode = 'a' if opts.append else 'w' + format_options = self.config[file_format]['formatting'].get(dict) + + export_format = ExportFormat.factory( + file_format, **{ + 'file_path': file_path, + 'file_mode': file_mode + } + ) + + items = [] + data_collector = library_data if opts.library else tag_data + + included_keys = [] + for keys in opts.included_keys: + included_keys.extend(keys.split(',')) + key_filter = make_key_filter(included_keys) + + for data_emitter in data_collector(lib, ui.decargs(args)): + try: + data, item = data_emitter() + except (mediafile.UnreadableFileError, IOError) as ex: + self._log.error(u'cannot read file: {0}', ex) + continue + + data = key_filter(data) + items += [data] + + export_format.export(items, **format_options) + + +class ExportFormat(object): + """The output format type""" + + @classmethod + def factory(cls, type, **kwargs): + if type == "json": + if kwargs['file_path']: + return JsonFileFormat(**kwargs) + else: + return JsonPrintFormat() + raise NotImplementedError() + + def export(self, data, **kwargs): + raise NotImplementedError() + + +class JsonPrintFormat(ExportFormat): + """Outputs to the console""" + + def export(self, data, **kwargs): + json.dump(data, sys.stdout, cls=ExportEncoder, **kwargs) + + +class JsonFileFormat(ExportFormat): + """Saves in a json file""" + + def __init__(self, file_path, file_mode=u'w', encoding=u'utf-8'): + self.path = file_path + self.mode = file_mode + self.encoding = encoding + + def export(self, data, **kwargs): + with codecs.open(self.path, self.mode, self.encoding) as f: + json.dump(data, f, cls=ExportEncoder, **kwargs) diff --git a/libs/beetsplug/fetchart.py b/libs/beetsplug/fetchart.py new file mode 100644 index 00000000..2cc362e4 --- /dev/null +++ b/libs/beetsplug/fetchart.py @@ -0,0 +1,861 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Fetches album art. +""" +from __future__ import division, absolute_import, print_function + +from contextlib import closing +import os +import re +from tempfile import NamedTemporaryFile + +import requests + +from beets import plugins +from beets import importer +from beets import ui +from beets import util +from beets import config +from beets.util.artresizer import ArtResizer +from beets.util import confit + +try: + import itunes + HAVE_ITUNES = True +except ImportError: + HAVE_ITUNES = False + +IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg'] +CONTENT_TYPES = ('image/jpeg', 'image/png') +DOWNLOAD_EXTENSION = '.jpg' + + +class Candidate(object): + """Holds information about a matching artwork, deals with validation of + dimension restrictions and resizing. + """ + CANDIDATE_BAD = 0 + CANDIDATE_EXACT = 1 + CANDIDATE_DOWNSCALE = 2 + + MATCH_EXACT = 0 + MATCH_FALLBACK = 1 + + def __init__(self, log, path=None, url=None, source=u'', + match=None, size=None): + self._log = log + self.path = path + self.url = url + self.source = source + self.check = None + self.match = match + self.size = size + + def _validate(self, extra): + """Determine whether the candidate artwork is valid based on + its dimensions (width and ratio). + + Return `CANDIDATE_BAD` if the file is unusable. + Return `CANDIDATE_EXACT` if the file is usable as-is. + Return `CANDIDATE_DOWNSCALE` if the file must be resized. + """ + if not self.path: + return self.CANDIDATE_BAD + + if not (extra['enforce_ratio'] or + extra['minwidth'] or + extra['maxwidth']): + return self.CANDIDATE_EXACT + + # get_size returns None if no local imaging backend is available + if not self.size: + self.size = ArtResizer.shared.get_size(self.path) + self._log.debug(u'image size: {}', self.size) + + if not self.size: + self._log.warning(u'Could not get size of image (please see ' + u'documentation for dependencies). ' + u'The configuration options `minwidth` and ' + u'`enforce_ratio` may be violated.') + return self.CANDIDATE_EXACT + + short_edge = min(self.size) + long_edge = max(self.size) + + # Check minimum size. + if extra['minwidth'] and self.size[0] < extra['minwidth']: + self._log.debug(u'image too small ({} < {})', + self.size[0], extra['minwidth']) + return self.CANDIDATE_BAD + + # Check aspect ratio. + edge_diff = long_edge - short_edge + if extra['enforce_ratio']: + if extra['margin_px']: + if edge_diff > extra['margin_px']: + self._log.debug(u'image is not close enough to being ' + u'square, ({} - {} > {})', + long_edge, short_edge, extra['margin_px']) + return self.CANDIDATE_BAD + elif extra['margin_percent']: + margin_px = extra['margin_percent'] * long_edge + if edge_diff > margin_px: + self._log.debug(u'image is not close enough to being ' + u'square, ({} - {} > {})', + long_edge, short_edge, margin_px) + return self.CANDIDATE_BAD + elif edge_diff: + # also reached for margin_px == 0 and margin_percent == 0.0 + self._log.debug(u'image is not square ({} != {})', + self.size[0], self.size[1]) + return self.CANDIDATE_BAD + + # Check maximum size. + if extra['maxwidth'] and self.size[0] > extra['maxwidth']: + self._log.debug(u'image needs resizing ({} > {})', + self.size[0], extra['maxwidth']) + return self.CANDIDATE_DOWNSCALE + + return self.CANDIDATE_EXACT + + def validate(self, extra): + self.check = self._validate(extra) + return self.check + + def resize(self, extra): + if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE: + self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path) + + +def _logged_get(log, *args, **kwargs): + """Like `requests.get`, but logs the effective URL to the specified + `log` at the `DEBUG` level. + + Use the optional `message` parameter to specify what to log before + the URL. By default, the string is "getting URL". + + Also sets the User-Agent header to indicate beets. + """ + # Use some arguments with the `send` call but most with the + # `Request` construction. This is a cheap, magic-filled way to + # emulate `requests.get` or, more pertinently, + # `requests.Session.request`. + req_kwargs = kwargs + send_kwargs = {} + for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'): + if arg in kwargs: + send_kwargs[arg] = req_kwargs.pop(arg) + + # Our special logging message parameter. + if 'message' in kwargs: + message = kwargs.pop('message') + else: + message = 'getting URL' + + req = requests.Request('GET', *args, **req_kwargs) + with requests.Session() as s: + s.headers = {'User-Agent': 'beets'} + prepped = s.prepare_request(req) + log.debug('{}: {}', message, prepped.url) + return s.send(prepped, **send_kwargs) + + +class RequestMixin(object): + """Adds a Requests wrapper to the class that uses the logger, which + must be named `self._log`. + """ + + def request(self, *args, **kwargs): + """Like `requests.get`, but uses the logger `self._log`. + + See also `_logged_get`. + """ + return _logged_get(self._log, *args, **kwargs) + + +# ART SOURCES ################################################################ + +class ArtSource(RequestMixin): + def __init__(self, log, config): + self._log = log + self._config = config + + def get(self, album, extra): + raise NotImplementedError() + + def _candidate(self, **kwargs): + return Candidate(source=self, log=self._log, **kwargs) + + def fetch_image(self, candidate, extra): + raise NotImplementedError() + + +class LocalArtSource(ArtSource): + IS_LOCAL = True + LOC_STR = u'local' + + def fetch_image(self, candidate, extra): + pass + + +class RemoteArtSource(ArtSource): + IS_LOCAL = False + LOC_STR = u'remote' + + def fetch_image(self, candidate, extra): + """Downloads an image from a URL and checks whether it seems to + actually be an image. If so, returns a path to the downloaded image. + Otherwise, returns None. + """ + if extra['maxwidth']: + candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'], + candidate.url) + try: + with closing(self.request(candidate.url, stream=True, + message=u'downloading image')) as resp: + if 'Content-Type' not in resp.headers \ + or resp.headers['Content-Type'] not in CONTENT_TYPES: + self._log.debug( + u'not a supported image: {}', + resp.headers.get('Content-Type') or u'no content type', + ) + candidate.path = None + return + + # Generate a temporary file with the correct extension. + with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION, + delete=False) as fh: + for chunk in resp.iter_content(chunk_size=1024): + fh.write(chunk) + self._log.debug(u'downloaded art to: {0}', + util.displayable_path(fh.name)) + candidate.path = fh.name + return + + except (IOError, requests.RequestException, TypeError) as exc: + # Handling TypeError works around a urllib3 bug: + # https://github.com/shazow/urllib3/issues/556 + self._log.debug(u'error fetching art: {}', exc) + candidate.path = None + return + + +class CoverArtArchive(RemoteArtSource): + NAME = u"Cover Art Archive" + + URL = 'http://coverartarchive.org/release/{mbid}/front' + GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front' + + def get(self, album, extra): + """Return the Cover Art Archive and Cover Art Archive release group URLs + using album MusicBrainz release ID and release group ID. + """ + if album.mb_albumid: + yield self._candidate(url=self.URL.format(mbid=album.mb_albumid), + match=Candidate.MATCH_EXACT) + if album.mb_releasegroupid: + yield self._candidate( + url=self.GROUP_URL.format(mbid=album.mb_releasegroupid), + match=Candidate.MATCH_FALLBACK) + + +class Amazon(RemoteArtSource): + NAME = u"Amazon" + URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg' + INDICES = (1, 2) + + def get(self, album, extra): + """Generate URLs using Amazon ID (ASIN) string. + """ + if album.asin: + for index in self.INDICES: + yield self._candidate(url=self.URL % (album.asin, index), + match=Candidate.MATCH_EXACT) + + +class AlbumArtOrg(RemoteArtSource): + NAME = u"AlbumArt.org scraper" + URL = 'http://www.albumart.org/index_detail.php' + PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' + + def get(self, album, extra): + """Return art URL from AlbumArt.org using album ASIN. + """ + if not album.asin: + return + # Get the page from albumart.org. + try: + resp = self.request(self.URL, params={'asin': album.asin}) + self._log.debug(u'scraped art URL: {0}', resp.url) + except requests.RequestException: + self._log.debug(u'error scraping art page') + return + + # Search the page for the image URL. + m = re.search(self.PAT, resp.text) + if m: + image_url = m.group(1) + yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) + else: + self._log.debug(u'no image found on page') + + +class GoogleImages(RemoteArtSource): + NAME = u"Google Images" + URL = u'https://www.googleapis.com/customsearch/v1' + + def __init__(self, *args, **kwargs): + super(GoogleImages, self).__init__(*args, **kwargs) + self.key = self._config['google_key'].get(), + self.cx = self._config['google_engine'].get(), + + def get(self, album, extra): + """Return art URL from google custom search engine + given an album title and interpreter. + """ + if not (album.albumartist and album.album): + return + search_string = (album.albumartist + ',' + album.album).encode('utf-8') + response = self.request(self.URL, params={ + 'key': self.key, + 'cx': self.cx, + 'q': search_string, + 'searchType': 'image' + }) + + # Get results using JSON. + try: + data = response.json() + except ValueError: + self._log.debug(u'google: error loading response: {}' + .format(response.text)) + return + + if 'error' in data: + reason = data['error']['errors'][0]['reason'] + self._log.debug(u'google fetchart error: {0}', reason) + return + + if 'items' in data.keys(): + for item in data['items']: + yield self._candidate(url=item['link'], + match=Candidate.MATCH_EXACT) + + +class FanartTV(RemoteArtSource): + """Art from fanart.tv requested using their API""" + NAME = u"fanart.tv" + + API_URL = 'http://webservice.fanart.tv/v3/' + API_ALBUMS = API_URL + 'music/albums/' + PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e' + + def __init__(self, *args, **kwargs): + super(FanartTV, self).__init__(*args, **kwargs) + self.client_key = self._config['fanarttv_key'].get() + + def get(self, album, extra): + if not album.mb_releasegroupid: + return + + response = self.request( + self.API_ALBUMS + album.mb_releasegroupid, + headers={'api-key': self.PROJECT_KEY, + 'client-key': self.client_key}) + + try: + data = response.json() + except ValueError: + self._log.debug(u'fanart.tv: error loading response: {}', + response.text) + return + + if u'status' in data and data[u'status'] == u'error': + if u'not found' in data[u'error message'].lower(): + self._log.debug(u'fanart.tv: no image found') + elif u'api key' in data[u'error message'].lower(): + self._log.warning(u'fanart.tv: Invalid API key given, please ' + u'enter a valid one in your config file.') + else: + self._log.debug(u'fanart.tv: error on request: {}', + data[u'error message']) + return + + matches = [] + # can there be more than one releasegroupid per response? + for mbid, art in data.get(u'albums', dict()).items(): + # there might be more art referenced, e.g. cdart, and an albumcover + # might not be present, even if the request was succesful + if album.mb_releasegroupid == mbid and u'albumcover' in art: + matches.extend(art[u'albumcover']) + # can this actually occur? + else: + self._log.debug(u'fanart.tv: unexpected mb_releasegroupid in ' + u'response!') + + matches.sort(key=lambda x: x[u'likes'], reverse=True) + for item in matches: + # fanart.tv has a strict size requirement for album art to be + # uploaded + yield self._candidate(url=item[u'url'], + match=Candidate.MATCH_EXACT, + size=(1000, 1000)) + + +class ITunesStore(RemoteArtSource): + NAME = u"iTunes Store" + + def get(self, album, extra): + """Return art URL from iTunes Store given an album title. + """ + if not (album.albumartist and album.album): + return + search_string = (album.albumartist + ' ' + album.album).encode('utf-8') + try: + # Isolate bugs in the iTunes library while searching. + try: + results = itunes.search_album(search_string) + except Exception as exc: + self._log.debug(u'iTunes search failed: {0}', exc) + return + + # Get the first match. + if results: + itunes_album = results[0] + else: + self._log.debug(u'iTunes search for {:r} got no results', + search_string) + return + + if itunes_album.get_artwork()['100']: + small_url = itunes_album.get_artwork()['100'] + big_url = small_url.replace('100x100', '1200x1200') + yield self._candidate(url=big_url, match=Candidate.MATCH_EXACT) + else: + self._log.debug(u'album has no artwork in iTunes Store') + except IndexError: + self._log.debug(u'album not found in iTunes Store') + + +class Wikipedia(RemoteArtSource): + NAME = u"Wikipedia (queried through DBpedia)" + DBPEDIA_URL = 'http://dbpedia.org/sparql' + WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php' + SPARQL_QUERY = u'''PREFIX rdf: + PREFIX dbpprop: + PREFIX owl: + PREFIX rdfs: + PREFIX foaf: + + SELECT DISTINCT ?pageId ?coverFilename WHERE {{ + ?subject owl:wikiPageID ?pageId . + ?subject dbpprop:name ?name . + ?subject rdfs:label ?label . + {{ ?subject dbpprop:artist ?artist }} + UNION + {{ ?subject owl:artist ?artist }} + {{ ?artist foaf:name "{artist}"@en }} + UNION + {{ ?artist dbpprop:name "{artist}"@en }} + ?subject rdf:type . + ?subject dbpprop:cover ?coverFilename . + FILTER ( regex(?name, "{album}", "i") ) + }} + Limit 1''' + + def get(self, album, extra): + if not (album.albumartist and album.album): + return + + # Find the name of the cover art filename on DBpedia + cover_filename, page_id = None, None + dbpedia_response = self.request( + self.DBPEDIA_URL, + params={ + 'format': 'application/sparql-results+json', + 'timeout': 2500, + 'query': self.SPARQL_QUERY.format( + artist=album.albumartist.title(), album=album.album) + }, + headers={'content-type': 'application/json'}, + ) + try: + data = dbpedia_response.json() + results = data['results']['bindings'] + if results: + cover_filename = 'File:' + results[0]['coverFilename']['value'] + page_id = results[0]['pageId']['value'] + else: + self._log.debug(u'wikipedia: album not found on dbpedia') + except (ValueError, KeyError, IndexError): + self._log.debug(u'wikipedia: error scraping dbpedia response: {}', + dbpedia_response.text) + + # Ensure we have a filename before attempting to query wikipedia + if not (cover_filename and page_id): + return + + # DBPedia sometimes provides an incomplete cover_filename, indicated + # by the filename having a space before the extension, e.g., 'foo .bar' + # An additional Wikipedia call can help to find the real filename. + # This may be removed once the DBPedia issue is resolved, see: + # https://github.com/dbpedia/extraction-framework/issues/396 + if ' .' in cover_filename and \ + '.' not in cover_filename.split(' .')[-1]: + self._log.debug( + u'wikipedia: dbpedia provided incomplete cover_filename' + ) + lpart, rpart = cover_filename.rsplit(' .', 1) + + # Query all the images in the page + wikipedia_response = self.request( + self.WIKIPEDIA_URL, + params={ + 'format': 'json', + 'action': 'query', + 'continue': '', + 'prop': 'images', + 'pageids': page_id, + }, + headers={'content-type': 'application/json'}, + ) + + # Try to see if one of the images on the pages matches our + # imcomplete cover_filename + try: + data = wikipedia_response.json() + results = data['query']['pages'][page_id]['images'] + for result in results: + if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart), + result['title']): + cover_filename = result['title'] + break + except (ValueError, KeyError): + self._log.debug( + u'wikipedia: failed to retrieve a cover_filename' + ) + return + + # Find the absolute url of the cover art on Wikipedia + wikipedia_response = self.request( + self.WIKIPEDIA_URL, + params={ + 'format': 'json', + 'action': 'query', + 'continue': '', + 'prop': 'imageinfo', + 'iiprop': 'url', + 'titles': cover_filename.encode('utf-8'), + }, + headers={'content-type': 'application/json'}, + ) + + try: + data = wikipedia_response.json() + results = data['query']['pages'] + for _, result in results.iteritems(): + image_url = result['imageinfo'][0]['url'] + yield self._candidate(url=image_url, + match=Candidate.MATCH_EXACT) + except (ValueError, KeyError, IndexError): + self._log.debug(u'wikipedia: error scraping imageinfo') + return + + +class FileSystem(LocalArtSource): + NAME = u"Filesystem" + + @staticmethod + def filename_priority(filename, cover_names): + """Sort order for image names. + + Return indexes of cover names found in the image filename. This + means that images with lower-numbered and more keywords will have + higher priority. + """ + return [idx for (idx, x) in enumerate(cover_names) if x in filename] + + def get(self, album, extra): + """Look for album art files in the specified directories. + """ + paths = extra['paths'] + if not paths: + return + cover_names = extra['cover_names'] + cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names)) + cautious = extra['cautious'] + + for path in paths: + if not os.path.isdir(path): + continue + + # Find all files that look like images in the directory. + images = [] + for fn in os.listdir(path): + for ext in IMAGE_EXTENSIONS: + if fn.lower().endswith(b'.' + ext.encode('utf8')) and \ + os.path.isfile(os.path.join(path, fn)): + images.append(fn) + + # Look for "preferred" filenames. + images = sorted(images, + key=lambda x: + self.filename_priority(x, cover_names)) + remaining = [] + for fn in images: + if re.search(cover_pat, os.path.splitext(fn)[0], re.I): + self._log.debug(u'using well-named art file {0}', + util.displayable_path(fn)) + yield self._candidate(path=os.path.join(path, fn), + match=Candidate.MATCH_EXACT) + else: + remaining.append(fn) + + # Fall back to any image in the folder. + if remaining and not cautious: + self._log.debug(u'using fallback art file {0}', + util.displayable_path(remaining[0])) + yield self._candidate(path=os.path.join(path, remaining[0]), + match=Candidate.MATCH_FALLBACK) + + +# Try each source in turn. + +SOURCES_ALL = [u'filesystem', + u'coverart', u'itunes', u'amazon', u'albumart', + u'wikipedia', u'google', u'fanarttv'] + +ART_SOURCES = { + u'filesystem': FileSystem, + u'coverart': CoverArtArchive, + u'itunes': ITunesStore, + u'albumart': AlbumArtOrg, + u'amazon': Amazon, + u'wikipedia': Wikipedia, + u'google': GoogleImages, + u'fanarttv': FanartTV, +} +SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()} + +# PLUGIN LOGIC ############################################################### + + +class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): + PAT_PX = r"(0|[1-9][0-9]*)px" + PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%" + + def __init__(self): + super(FetchArtPlugin, self).__init__() + + # Holds candidates corresponding to downloaded images between + # fetching them and placing them in the filesystem. + self.art_candidates = {} + + self.config.add({ + 'auto': True, + 'minwidth': 0, + 'maxwidth': 0, + 'enforce_ratio': False, + 'cautious': False, + 'cover_names': ['cover', 'front', 'art', 'album', 'folder'], + 'sources': ['filesystem', + 'coverart', 'itunes', 'amazon', 'albumart'], + 'google_key': None, + 'google_engine': u'001442825323518660753:hrh5ch1gjzm', + 'fanarttv_key': None, + 'store_source': False, + }) + self.config['google_key'].redact = True + self.config['fanarttv_key'].redact = True + + self.minwidth = self.config['minwidth'].get(int) + self.maxwidth = self.config['maxwidth'].get(int) + + # allow both pixel and percentage-based margin specifications + self.enforce_ratio = self.config['enforce_ratio'].get( + confit.OneOf([bool, + confit.String(pattern=self.PAT_PX), + confit.String(pattern=self.PAT_PERCENT)])) + self.margin_px = None + self.margin_percent = None + if type(self.enforce_ratio) is unicode: + if self.enforce_ratio[-1] == u'%': + self.margin_percent = float(self.enforce_ratio[:-1]) / 100 + elif self.enforce_ratio[-2:] == u'px': + self.margin_px = int(self.enforce_ratio[:-2]) + else: + # shouldn't happen + raise confit.ConfigValueError() + self.enforce_ratio = True + + cover_names = self.config['cover_names'].as_str_seq() + self.cover_names = map(util.bytestring_path, cover_names) + self.cautious = self.config['cautious'].get(bool) + self.store_source = self.config['store_source'].get(bool) + + self.src_removed = (config['import']['delete'].get(bool) or + config['import']['move'].get(bool)) + + if self.config['auto']: + # Enable two import hooks when fetching is enabled. + self.import_stages = [self.fetch_art] + self.register_listener('import_task_files', self.assign_art) + + available_sources = list(SOURCES_ALL) + if not HAVE_ITUNES and u'itunes' in available_sources: + available_sources.remove(u'itunes') + if not self.config['google_key'].get() and \ + u'google' in available_sources: + available_sources.remove(u'google') + sources_name = plugins.sanitize_choices( + self.config['sources'].as_str_seq(), available_sources) + if 'remote_priority' in self.config: + self._log.warning( + u'The `fetch_art.remote_priority` configuration option has ' + u'been deprecated, see the documentation.') + if self.config['remote_priority'].get(bool): + try: + sources_name.remove(u'filesystem') + sources_name.append(u'filesystem') + except ValueError: + pass + self.sources = [ART_SOURCES[s](self._log, self.config) + for s in sources_name] + + # Asynchronous; after music is added to the library. + def fetch_art(self, session, task): + """Find art for the album being imported.""" + if task.is_album: # Only fetch art for full albums. + if task.album.artpath and os.path.isfile(task.album.artpath): + # Album already has art (probably a re-import); skip it. + return + if task.choice_flag == importer.action.ASIS: + # For as-is imports, don't search Web sources for art. + local = True + elif task.choice_flag == importer.action.APPLY: + # Search everywhere for art. + local = False + else: + # For any other choices (e.g., TRACKS), do nothing. + return + + candidate = self.art_for_album(task.album, task.paths, local) + + if candidate: + self.art_candidates[task] = candidate + + def _set_art(self, album, candidate, delete=False): + album.set_art(candidate.path, delete) + if self.store_source: + # store the source of the chosen artwork in a flexible field + self._log.debug( + u"Storing art_source for {0.albumartist} - {0.album}", + album) + album.art_source = SOURCE_NAMES[type(candidate.source)] + album.store() + + # Synchronous; after music files are put in place. + def assign_art(self, session, task): + """Place the discovered art in the filesystem.""" + if task in self.art_candidates: + candidate = self.art_candidates.pop(task) + + self._set_art(task.album, candidate, not self.src_removed) + + if self.src_removed: + task.prune(candidate.path) + + # Manual album art fetching. + def commands(self): + cmd = ui.Subcommand('fetchart', help='download album art') + cmd.parser.add_option( + u'-f', u'--force', dest='force', + action='store_true', default=False, + help=u're-download art when already present' + ) + + def func(lib, opts, args): + self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force) + cmd.func = func + return [cmd] + + # Utilities converted from functions to methods on logging overhaul + + def art_for_album(self, album, paths, local_only=False): + """Given an Album object, returns a path to downloaded art for the + album (or None if no art is found). If `maxwidth`, then images are + resized to this maximum pixel size. If `local_only`, then only local + image files from the filesystem are returned; no network requests + are made. + """ + out = None + + # all the information any of the sources might need + extra = {'paths': paths, + 'cover_names': self.cover_names, + 'cautious': self.cautious, + 'enforce_ratio': self.enforce_ratio, + 'margin_px': self.margin_px, + 'margin_percent': self.margin_percent, + 'minwidth': self.minwidth, + 'maxwidth': self.maxwidth} + + for source in self.sources: + if source.IS_LOCAL or not local_only: + self._log.debug( + u'trying source {0} for album {1.albumartist} - {1.album}', + SOURCE_NAMES[type(source)], + album, + ) + # URLs might be invalid at this point, or the image may not + # fulfill the requirements + for candidate in source.get(album, extra): + source.fetch_image(candidate, extra) + if candidate.validate(extra): + out = candidate + self._log.debug( + u'using {0.LOC_STR} image {1}'.format( + source, util.displayable_path(out.path))) + break + if out: + break + + if out: + out.resize(extra) + + return out + + def batch_fetch_art(self, lib, albums, force): + """Fetch album art for each of the albums. This implements the manual + fetchart CLI command. + """ + for album in albums: + if album.artpath and not force and os.path.isfile(album.artpath): + message = ui.colorize('text_highlight_minor', u'has album art') + else: + # In ordinary invocations, look for images on the + # filesystem. When forcing, however, always go to the Web + # sources. + local_paths = None if force else [album.path] + + candidate = self.art_for_album(album, local_paths) + if candidate: + self._set_art(album, candidate) + message = ui.colorize('text_success', u'found album art') + else: + message = ui.colorize('text_error', u'no art found') + + self._log.info(u'{0}: {1}', album, message) diff --git a/libs/beetsplug/filefilter.py b/libs/beetsplug/filefilter.py new file mode 100644 index 00000000..72b5ea9e --- /dev/null +++ b/libs/beetsplug/filefilter.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Malte Ried. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Filter imported files using a regular expression. +""" + +from __future__ import division, absolute_import, print_function + +import re +from beets import config +from beets.plugins import BeetsPlugin +from beets.importer import SingletonImportTask + + +class FileFilterPlugin(BeetsPlugin): + def __init__(self): + super(FileFilterPlugin, self).__init__() + self.register_listener('import_task_created', + self.import_task_created_event) + self.config.add({ + 'path': '.*' + }) + + self.path_album_regex = \ + self.path_singleton_regex = \ + re.compile(self.config['path'].get()) + + if 'album_path' in self.config: + self.path_album_regex = re.compile(self.config['album_path'].get()) + + if 'singleton_path' in self.config: + self.path_singleton_regex = re.compile( + self.config['singleton_path'].get()) + + def import_task_created_event(self, session, task): + if task.items and len(task.items) > 0: + items_to_import = [] + for item in task.items: + if self.file_filter(item['path']): + items_to_import.append(item) + if len(items_to_import) > 0: + task.items = items_to_import + else: + # Returning an empty list of tasks from the handler + # drops the task from the rest of the importer pipeline. + return [] + + elif isinstance(task, SingletonImportTask): + if not self.file_filter(task.item['path']): + return [] + + # If not filtered, return the original task unchanged. + return [task] + + def file_filter(self, full_path): + """Checks if the configured regular expressions allow the import + of the file given in full_path. + """ + import_config = dict(config['import']) + if 'singletons' not in import_config or not import_config[ + 'singletons']: + # Album + return self.path_album_regex.match(full_path) is not None + else: + # Singleton + return self.path_singleton_regex.match(full_path) is not None diff --git a/libs/beetsplug/freedesktop.py b/libs/beetsplug/freedesktop.py new file mode 100644 index 00000000..a768be2d --- /dev/null +++ b/libs/beetsplug/freedesktop.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Matt Lichtenberg. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Creates freedesktop.org-compliant .directory files on an album level. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui + + +class FreedesktopPlugin(BeetsPlugin): + def commands(self): + deprecated = ui.Subcommand( + "freedesktop", + help=u"Print a message to redirect to thumbnails --dolphin") + deprecated.func = self.deprecation_message + return [deprecated] + + def deprecation_message(self, lib, opts, args): + ui.print_(u"This plugin is deprecated. Its functionality is " + u"superseded by the 'thumbnails' plugin") + ui.print_(u"'thumbnails --dolphin' replaces freedesktop. See doc & " + u"changelog for more information") diff --git a/libs/beetsplug/fromfilename.py b/libs/beetsplug/fromfilename.py new file mode 100644 index 00000000..e9c49bee --- /dev/null +++ b/libs/beetsplug/fromfilename.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Jan-Erik Dahlin +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""If the title is empty, try to extract track and title from the +filename. +""" +from __future__ import division, absolute_import, print_function + +from beets import plugins +from beets.util import displayable_path +import os +import re + + +# Filename field extraction patterns. +PATTERNS = [ + # "01 - Track 01" and "01": do nothing + r'^(\d+)\s*-\s*track\s*\d$', + r'^\d+$', + + # Useful patterns. + r'^(?P.+)-(?P.+)-(?P<tag>.*)$', + r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', + r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', + r'^(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$', + r'^(?P<title>.+)$', + r'^(?P<track>\d+)\.\s*(?P<title>.+)$', + r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$', + r'^(?P<track>\d+)\s(?P<title>.+)$', + r'^(?P<title>.+) by (?P<artist>.+)$', +] + +# Titles considered "empty" and in need of replacement. +BAD_TITLE_PATTERNS = [ + r'^$', + r'\d+?\s?-?\s*track\s*\d+', +] + + +def equal(seq): + """Determine whether a sequence holds identical elements. + """ + return len(set(seq)) <= 1 + + +def equal_fields(matchdict, field): + """Do all items in `matchdict`, whose values are dictionaries, have + the same value for `field`? (If they do, the field is probably not + the title.) + """ + return equal(m[field] for m in matchdict.values()) + + +def all_matches(names, pattern): + """If all the filenames in the item/filename mapping match the + pattern, return a dictionary mapping the items to dictionaries + giving the value for each named subpattern in the match. Otherwise, + return None. + """ + matches = {} + for item, name in names.items(): + m = re.match(pattern, name, re.IGNORECASE) + if m and m.groupdict(): + # Only yield a match when the regex applies *and* has + # capture groups. Otherwise, no information can be extracted + # from the filename. + matches[item] = m.groupdict() + else: + return None + return matches + + +def bad_title(title): + """Determine whether a given title is "bad" (empty or otherwise + meaningless) and in need of replacement. + """ + for pat in BAD_TITLE_PATTERNS: + if re.match(pat, title, re.IGNORECASE): + return True + return False + + +def apply_matches(d): + """Given a mapping from items to field dicts, apply the fields to + the objects. + """ + some_map = d.values()[0] + keys = some_map.keys() + + # Only proceed if the "tag" field is equal across all filenames. + if 'tag' in keys and not equal_fields(d, 'tag'): + return + + # Given both an "artist" and "title" field, assume that one is + # *actually* the artist, which must be uniform, and use the other + # for the title. This, of course, won't work for VA albums. + if 'artist' in keys: + if equal_fields(d, 'artist'): + artist = some_map['artist'] + title_field = 'title' + elif equal_fields(d, 'title'): + artist = some_map['title'] + title_field = 'artist' + else: + # Both vary. Abort. + return + + for item in d: + if not item.artist: + item.artist = artist + + # No artist field: remaining field is the title. + else: + title_field = 'title' + + # Apply the title and track. + for item in d: + if bad_title(item.title): + item.title = unicode(d[item][title_field]) + if 'track' in d[item] and item.track == 0: + item.track = int(d[item]['track']) + + +# Plugin structure and hook into import process. + +class FromFilenamePlugin(plugins.BeetsPlugin): + def __init__(self): + super(FromFilenamePlugin, self).__init__() + self.register_listener('import_task_start', filename_task) + + +def filename_task(task, session): + """Examine each item in the task to see if we can extract a title + from the filename. Try to match all filenames to a number of + regexps, starting with the most complex patterns and successively + trying less complex patterns. As soon as all filenames match the + same regex we can make an educated guess of which part of the + regex that contains the title. + """ + items = task.items if task.is_album else [task.item] + + # Look for suspicious (empty or meaningless) titles. + missing_titles = sum(bad_title(i.title) for i in items) + + if missing_titles: + # Get the base filenames (no path or extension). + names = {} + for item in items: + path = displayable_path(item.path) + name, _ = os.path.splitext(os.path.basename(path)) + names[item] = name + + # Look for useful information in the filenames. + for pattern in PATTERNS: + d = all_matches(names, pattern) + if d: + apply_matches(d) diff --git a/libs/beetsplug/ftintitle.py b/libs/beetsplug/ftintitle.py new file mode 100644 index 00000000..eefdfcf1 --- /dev/null +++ b/libs/beetsplug/ftintitle.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Verrus, <github.com/Verrus/beets-plugin-featInTitle> +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Moves "featured" artists to the title from the artist field. +""" +from __future__ import division, absolute_import, print_function + +import re + +from beets import plugins +from beets import ui +from beets.util import displayable_path + + +def split_on_feat(artist): + """Given an artist string, split the "main" artist from any artist + on the right-hand side of a string like "feat". Return the main + artist, which is always a string, and the featuring artist, which + may be a string or None if none is present. + """ + # split on the first "feat". + regex = re.compile(plugins.feat_tokens(), re.IGNORECASE) + parts = [s.strip() for s in regex.split(artist, 1)] + if len(parts) == 1: + return parts[0], None + else: + return tuple(parts) + + +def contains_feat(title): + """Determine whether the title contains a "featured" marker. + """ + return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE)) + + +def find_feat_part(artist, albumartist): + """Attempt to find featured artists in the item's artist fields and + return the results. Returns None if no featured artist found. + """ + feat_part = None + + # Look for the album artist in the artist field. If it's not + # present, give up. + albumartist_split = artist.split(albumartist, 1) + if len(albumartist_split) <= 1: + return feat_part + + # If the last element of the split (the right-hand side of the + # album artist) is nonempty, then it probably contains the + # featured artist. + elif albumartist_split[-1] != '': + # Extract the featured artist from the right-hand side. + _, feat_part = split_on_feat(albumartist_split[-1]) + + # Otherwise, if there's nothing on the right-hand side, look for a + # featuring artist on the left-hand side. + else: + lhs, rhs = split_on_feat(albumartist_split[0]) + if lhs: + feat_part = lhs + + return feat_part + + +class FtInTitlePlugin(plugins.BeetsPlugin): + def __init__(self): + super(FtInTitlePlugin, self).__init__() + + self.config.add({ + 'auto': True, + 'drop': False, + 'format': u'feat. {0}', + }) + + self._command = ui.Subcommand( + 'ftintitle', + help=u'move featured artists to the title field') + + self._command.parser.add_option( + u'-d', u'--drop', dest='drop', + action='store_true', default=False, + help=u'drop featuring from artists and ignore title update') + + if self.config['auto']: + self.import_stages = [self.imported] + + def commands(self): + + def func(lib, opts, args): + self.config.set_args(opts) + drop_feat = self.config['drop'].get(bool) + write = ui.should_write() + + for item in lib.items(ui.decargs(args)): + self.ft_in_title(item, drop_feat) + item.store() + if write: + item.try_write() + + self._command.func = func + return [self._command] + + def imported(self, session, task): + """Import hook for moving featuring artist automatically. + """ + drop_feat = self.config['drop'].get(bool) + + for item in task.imported_items(): + self.ft_in_title(item, drop_feat) + item.store() + + def update_metadata(self, item, feat_part, drop_feat): + """Choose how to add new artists to the title and set the new + metadata. Also, print out messages about any changes that are made. + If `drop_feat` is set, then do not add the artist to the title; just + remove it from the artist field. + """ + # In all cases, update the artist fields. + self._log.info(u'artist: {0} -> {1}', item.artist, item.albumartist) + item.artist = item.albumartist + if item.artist_sort: + # Just strip the featured artist from the sort name. + item.artist_sort, _ = split_on_feat(item.artist_sort) + + # Only update the title if it does not already contain a featured + # artist and if we do not drop featuring information. + if not drop_feat and not contains_feat(item.title): + feat_format = self.config['format'].get(unicode) + new_format = feat_format.format(feat_part) + new_title = u"{0} {1}".format(item.title, new_format) + self._log.info(u'title: {0} -> {1}', item.title, new_title) + item.title = new_title + + def ft_in_title(self, item, drop_feat): + """Look for featured artists in the item's artist fields and move + them to the title. + """ + artist = item.artist.strip() + albumartist = item.albumartist.strip() + + # Check whether there is a featured artist on this track and the + # artist field does not exactly match the album artist field. In + # that case, we attempt to move the featured artist to the title. + _, featured = split_on_feat(artist) + if featured and albumartist != artist and albumartist: + self._log.info('{}', displayable_path(item.path)) + + feat_part = None + + # Attempt to find the featured artist. + feat_part = find_feat_part(artist, albumartist) + + # If we have a featuring artist, move it to the title. + if feat_part: + self.update_metadata(item, feat_part, drop_feat) + else: + self._log.info(u'no featuring artists found') diff --git a/libs/beetsplug/fuzzy.py b/libs/beetsplug/fuzzy.py new file mode 100644 index 00000000..3decdc60 --- /dev/null +++ b/libs/beetsplug/fuzzy.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Philippe Mongeau. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Provides a fuzzy matching query. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.dbcore.query import StringFieldQuery +from beets import config +import difflib + + +class FuzzyQuery(StringFieldQuery): + @classmethod + def string_match(cls, pattern, val): + # smartcase + if pattern.islower(): + val = val.lower() + query_matcher = difflib.SequenceMatcher(None, pattern, val) + threshold = config['fuzzy']['threshold'].as_number() + return query_matcher.quick_ratio() >= threshold + + +class FuzzyPlugin(BeetsPlugin): + def __init__(self): + super(FuzzyPlugin, self).__init__() + self.config.add({ + 'prefix': '~', + 'threshold': 0.7, + }) + + def queries(self): + prefix = self.config['prefix'].get(basestring) + return {prefix: FuzzyQuery} diff --git a/libs/beetsplug/hook.py b/libs/beetsplug/hook.py new file mode 100644 index 00000000..4f2b8f0e --- /dev/null +++ b/libs/beetsplug/hook.py @@ -0,0 +1,108 @@ +# This file is part of beets. +# Copyright 2015, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Allows custom commands to be run when an event is emitted by beets""" +from __future__ import division, absolute_import, print_function + +import string +import subprocess + +from beets.plugins import BeetsPlugin +from beets.ui import _arg_encoding +from beets.util import shlex_split + + +class CodingFormatter(string.Formatter): + """A custom string formatter that decodes the format string and it's + fields. + """ + + def __init__(self, coding): + """Creates a new coding formatter with the provided coding.""" + self._coding = coding + + def format(self, format_string, *args, **kwargs): + """Formats the provided string using the provided arguments and keyword + arguments. + + This method decodes the format string using the formatter's coding. + + See str.format and string.Formatter.format. + """ + try: + format_string = format_string.decode(self._coding) + except UnicodeEncodeError: + pass + + return super(CodingFormatter, self).format(format_string, *args, + **kwargs) + + def convert_field(self, value, conversion): + """Converts the provided value given a conversion type. + + This method decodes the converted value using the formatter's coding. + + See string.Formatter.convert_field. + """ + converted = super(CodingFormatter, self).convert_field(value, + conversion) + try: + converted = converted.decode(self._coding) + except UnicodeEncodeError: + pass + + return converted + + +class HookPlugin(BeetsPlugin): + """Allows custom commands to be run when an event is emitted by beets""" + def __init__(self): + super(HookPlugin, self).__init__() + + self.config.add({ + 'hooks': [] + }) + + hooks = self.config['hooks'].get(list) + + for hook_index in range(len(hooks)): + hook = self.config['hooks'][hook_index] + + hook_event = hook['event'].get(unicode) + hook_command = hook['command'].get(unicode) + + self.create_and_register_hook(hook_event, hook_command) + + def create_and_register_hook(self, event, command): + def hook_function(**kwargs): + if command is None or len(command) == 0: + self._log.error('invalid command "{0}"', command) + return + + formatter = CodingFormatter(_arg_encoding()) + command_pieces = shlex_split(command) + + for i, piece in enumerate(command_pieces): + command_pieces[i] = formatter.format(piece, event=event, + **kwargs) + + self._log.debug(u'running command "{0}" for event {1}', + u' '.join(command_pieces), event) + + try: + subprocess.Popen(command_pieces).wait() + except OSError as exc: + self._log.error(u'hook for {0} failed: {1}', event, exc) + + self.register_listener(event, hook_function) diff --git a/libs/beetsplug/ihate.py b/libs/beetsplug/ihate.py new file mode 100644 index 00000000..6ed250fe --- /dev/null +++ b/libs/beetsplug/ihate.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +"""Warns you about things you hate (or even blocks import).""" + +from beets.plugins import BeetsPlugin +from beets.importer import action +from beets.library import parse_query_string +from beets.library import Item +from beets.library import Album + + +__author__ = 'baobab@heresiarch.info' +__version__ = '2.0' + + +def summary(task): + """Given an ImportTask, produce a short string identifying the + object. + """ + if task.is_album: + return u'{0} - {1}'.format(task.cur_artist, task.cur_album) + else: + return u'{0} - {1}'.format(task.item.artist, task.item.title) + + +class IHatePlugin(BeetsPlugin): + def __init__(self): + super(IHatePlugin, self).__init__() + self.register_listener('import_task_choice', + self.import_task_choice_event) + self.config.add({ + 'warn': [], + 'skip': [], + }) + + @classmethod + def do_i_hate_this(cls, task, action_patterns): + """Process group of patterns (warn or skip) and returns True if + task is hated and not whitelisted. + """ + if action_patterns: + for query_string in action_patterns: + query, _ = parse_query_string( + query_string, + Album if task.is_album else Item, + ) + if any(query.match(item) for item in task.imported_items()): + return True + return False + + def import_task_choice_event(self, session, task): + skip_queries = self.config['skip'].as_str_seq() + warn_queries = self.config['warn'].as_str_seq() + + if task.choice_flag == action.APPLY: + if skip_queries or warn_queries: + self._log.debug(u'processing your hate') + if self.do_i_hate_this(task, skip_queries): + task.choice_flag = action.SKIP + self._log.info(u'skipped: {0}', summary(task)) + return + if self.do_i_hate_this(task, warn_queries): + self._log.info(u'you may hate this: {0}', summary(task)) + else: + self._log.debug(u'nothing to do') + else: + self._log.debug(u'user made a decision, nothing to do') diff --git a/libs/beetsplug/importadded.py b/libs/beetsplug/importadded.py new file mode 100644 index 00000000..77c7e7ab --- /dev/null +++ b/libs/beetsplug/importadded.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- + +"""Populate an item's `added` and `mtime` fields by using the file +modification time (mtime) of the item's source file before import. + +Reimported albums and items are skipped. +""" +from __future__ import division, absolute_import, print_function + +import os + +from beets import util +from beets import importer +from beets.plugins import BeetsPlugin + + +class ImportAddedPlugin(BeetsPlugin): + def __init__(self): + super(ImportAddedPlugin, self).__init__() + self.config.add({ + 'preserve_mtimes': False, + 'preserve_write_mtimes': False, + }) + + # item.id for new items that were reimported + self.reimported_item_ids = None + # album.path for old albums that were replaced by a reimported album + self.replaced_album_paths = None + # item path in the library to the mtime of the source file + self.item_mtime = dict() + + register = self.register_listener + register('import_task_start', self.check_config) + register('import_task_start', self.record_if_inplace) + register('import_task_files', self.record_reimported) + register('before_item_moved', self.record_import_mtime) + register('item_copied', self.record_import_mtime) + register('item_linked', self.record_import_mtime) + register('album_imported', self.update_album_times) + register('item_imported', self.update_item_times) + register('after_write', self.update_after_write_time) + + def check_config(self, task, session): + self.config['preserve_mtimes'].get(bool) + + def reimported_item(self, item): + return item.id in self.reimported_item_ids + + def reimported_album(self, album): + return album.path in self.replaced_album_paths + + def record_if_inplace(self, task, session): + if not (session.config['copy'] or session.config['move'] or + session.config['link']): + self._log.debug(u"In place import detected, recording mtimes from " + u"source paths") + items = [task.item] \ + if isinstance(task, importer.SingletonImportTask) \ + else task.items + for item in items: + self.record_import_mtime(item, item.path, item.path) + + def record_reimported(self, task, session): + self.reimported_item_ids = set(item.id for item, replaced_items + in task.replaced_items.iteritems() + if replaced_items) + self.replaced_album_paths = set(task.replaced_albums.keys()) + + def write_file_mtime(self, path, mtime): + """Write the given mtime to the destination path. + """ + stat = os.stat(util.syspath(path)) + os.utime(util.syspath(path), (stat.st_atime, mtime)) + + def write_item_mtime(self, item, mtime): + """Write the given mtime to an item's `mtime` field and to the mtime + of the item's file. + """ + # The file's mtime on disk must be in sync with the item's mtime + self.write_file_mtime(util.syspath(item.path), mtime) + item.mtime = mtime + + def record_import_mtime(self, item, source, destination): + """Record the file mtime of an item's path before its import. + """ + mtime = os.stat(util.syspath(source)).st_mtime + self.item_mtime[destination] = mtime + self._log.debug(u"Recorded mtime {0} for item '{1}' imported from " + u"'{2}'", mtime, util.displayable_path(destination), + util.displayable_path(source)) + + def update_album_times(self, lib, album): + if self.reimported_album(album): + self._log.debug(u"Album '{0}' is reimported, skipping import of " + u"added dates for the album and its items.", + util.displayable_path(album.path)) + return + + album_mtimes = [] + for item in album.items(): + mtime = self.item_mtime.pop(item.path, None) + if mtime: + album_mtimes.append(mtime) + if self.config['preserve_mtimes'].get(bool): + self.write_item_mtime(item, mtime) + item.store() + album.added = min(album_mtimes) + self._log.debug(u"Import of album '{0}', selected album.added={1} " + u"from item file mtimes.", album.album, album.added) + album.store() + + def update_item_times(self, lib, item): + if self.reimported_item(item): + self._log.debug(u"Item '{0}' is reimported, skipping import of " + u"added date.", util.displayable_path(item.path)) + return + mtime = self.item_mtime.pop(item.path, None) + if mtime: + item.added = mtime + if self.config['preserve_mtimes'].get(bool): + self.write_item_mtime(item, mtime) + self._log.debug(u"Import of item '{0}', selected item.added={1}", + util.displayable_path(item.path), item.added) + item.store() + + def update_after_write_time(self, item): + """Update the mtime of the item's file with the item.added value + after each write of the item if `preserve_write_mtimes` is enabled. + """ + if item.added: + if self.config['preserve_write_mtimes'].get(bool): + self.write_item_mtime(item, item.added) + self._log.debug(u"Write of item '{0}', selected item.added={1}", + util.displayable_path(item.path), item.added) diff --git a/libs/beetsplug/importfeeds.py b/libs/beetsplug/importfeeds.py new file mode 100644 index 00000000..d046ddc4 --- /dev/null +++ b/libs/beetsplug/importfeeds.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +"""Write paths of imported files in various formats to ease later import in a +music player. Also allow printing the new file locations to stdout in case +one wants to manually add music to a player by its path. +""" +import datetime +import os +import re + +from beets.plugins import BeetsPlugin +from beets.util import mkdirall, normpath, syspath, bytestring_path +from beets import config + +M3U_DEFAULT_NAME = 'imported.m3u' + + +def _get_feeds_dir(lib): + """Given a Library object, return the path to the feeds directory to be + used (either in the library directory or an explicitly configured + path). Ensures that the directory exists. + """ + # Inside library directory. + dirpath = lib.directory + + # Ensure directory exists. + if not os.path.exists(syspath(dirpath)): + os.makedirs(syspath(dirpath)) + return dirpath + + +def _build_m3u_filename(basename): + """Builds unique m3u filename by appending given basename to current + date.""" + + basename = re.sub(r"[\s,/\\'\"]", '_', basename) + date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M") + path = normpath(os.path.join( + config['importfeeds']['dir'].as_filename(), + date + '_' + basename + '.m3u' + )) + return path + + +def _write_m3u(m3u_path, items_paths): + """Append relative paths to items into m3u file. + """ + mkdirall(m3u_path) + with open(syspath(m3u_path), 'a') as f: + for path in items_paths: + f.write(path + b'\n') + + +class ImportFeedsPlugin(BeetsPlugin): + def __init__(self): + super(ImportFeedsPlugin, self).__init__() + + self.config.add({ + 'formats': [], + 'm3u_name': u'imported.m3u', + 'dir': None, + 'relative_to': None, + 'absolute_path': False, + }) + + feeds_dir = self.config['dir'].get() + if feeds_dir: + feeds_dir = os.path.expanduser(bytestring_path(feeds_dir)) + self.config['dir'] = feeds_dir + if not os.path.exists(syspath(feeds_dir)): + os.makedirs(syspath(feeds_dir)) + + relative_to = self.config['relative_to'].get() + if relative_to: + self.config['relative_to'] = normpath(relative_to) + else: + self.config['relative_to'] = feeds_dir + + self.register_listener('library_opened', self.library_opened) + self.register_listener('album_imported', self.album_imported) + self.register_listener('item_imported', self.item_imported) + + def _record_items(self, lib, basename, items): + """Records relative paths to the given items for each feed format + """ + feedsdir = bytestring_path(self.config['dir'].as_filename()) + formats = self.config['formats'].as_str_seq() + relative_to = self.config['relative_to'].get() \ + or self.config['dir'].as_filename() + relative_to = bytestring_path(relative_to) + + paths = [] + for item in items: + if self.config['absolute_path']: + paths.append(item.path) + else: + try: + relpath = os.path.relpath(item.path, relative_to) + except ValueError: + # On Windows, it is sometimes not possible to construct a + # relative path (if the files are on different disks). + relpath = item.path + paths.append(relpath) + + if 'm3u' in formats: + m3u_basename = bytestring_path( + self.config['m3u_name'].get(unicode)) + m3u_path = os.path.join(feedsdir, m3u_basename) + _write_m3u(m3u_path, paths) + + if 'm3u_multi' in formats: + m3u_path = _build_m3u_filename(basename) + _write_m3u(m3u_path, paths) + + if 'link' in formats: + for path in paths: + dest = os.path.join(feedsdir, os.path.basename(path)) + if not os.path.exists(syspath(dest)): + os.symlink(syspath(path), syspath(dest)) + + if 'echo' in formats: + self._log.info(u"Location of imported music:") + for path in paths: + self._log.info(u" {0}", path) + + def library_opened(self, lib): + if self.config['dir'].get() is None: + self.config['dir'] = _get_feeds_dir(lib) + + def album_imported(self, lib, album): + self._record_items(lib, album.album, album.items()) + + def item_imported(self, lib, item): + self._record_items(lib, item.title, [item]) diff --git a/libs/beetsplug/info.py b/libs/beetsplug/info.py new file mode 100644 index 00000000..29bff7a2 --- /dev/null +++ b/libs/beetsplug/info.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Shows file metadata. +""" + +from __future__ import division, absolute_import, print_function + +import os +import re + +from beets.plugins import BeetsPlugin +from beets import ui +from beets import mediafile +from beets.library import Item +from beets.util import displayable_path, normpath, syspath + + +def tag_data(lib, args): + query = [] + for arg in args: + path = normpath(arg) + if os.path.isfile(syspath(path)): + yield tag_data_emitter(path) + else: + query.append(arg) + + if query: + for item in lib.items(query): + yield tag_data_emitter(item.path) + + +def tag_data_emitter(path): + def emitter(): + fields = list(mediafile.MediaFile.readable_fields()) + fields.remove('images') + mf = mediafile.MediaFile(syspath(path)) + tags = {} + for field in fields: + tags[field] = getattr(mf, field) + tags['art'] = mf.art is not None + # create a temporary Item to take advantage of __format__ + item = Item.from_path(syspath(path)) + + return tags, item + return emitter + + +def library_data(lib, args): + for item in lib.items(args): + yield library_data_emitter(item) + + +def library_data_emitter(item): + def emitter(): + data = dict(item.formatted()) + data.pop('path', None) # path is fetched from item + + return data, item + return emitter + + +def update_summary(summary, tags): + for key, value in tags.iteritems(): + if key not in summary: + summary[key] = value + elif summary[key] != value: + summary[key] = '[various]' + return summary + + +def print_data(data, item=None, fmt=None): + """Print, with optional formatting, the fields of a single element. + + If no format string `fmt` is passed, the entries on `data` are printed one + in each line, with the format 'field: value'. If `fmt` is not `None`, the + `item` is printed according to `fmt`, using the `Item.__format__` + machinery. + """ + if fmt: + # use fmt specified by the user + ui.print_(format(item, fmt)) + return + + path = displayable_path(item.path) if item else None + formatted = {} + for key, value in data.iteritems(): + if isinstance(value, list): + formatted[key] = u'; '.join(value) + if value is not None: + formatted[key] = value + + if len(formatted) == 0: + return + + maxwidth = max(len(key) for key in formatted) + lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth) + + if path: + ui.print_(displayable_path(path)) + + for field in sorted(formatted): + value = formatted[field] + if isinstance(value, list): + value = u'; '.join(value) + ui.print_(lineformat.format(field, value)) + + +def print_data_keys(data, item=None): + """Print only the keys (field names) for an item. + """ + path = displayable_path(item.path) if item else None + formatted = [] + for key, value in data.iteritems(): + formatted.append(key) + + if len(formatted) == 0: + return + + line_format = u'{0}{{0}}'.format(u' ' * 4) + if path: + ui.print_(displayable_path(path)) + + for field in sorted(formatted): + ui.print_(line_format.format(field)) + + +class InfoPlugin(BeetsPlugin): + + def commands(self): + cmd = ui.Subcommand('info', help=u'show file metadata') + cmd.func = self.run + cmd.parser.add_option( + u'-l', u'--library', action='store_true', + help=u'show library fields instead of tags', + ) + cmd.parser.add_option( + u'-s', u'--summarize', action='store_true', + help=u'summarize the tags of all files', + ) + cmd.parser.add_option( + u'-i', u'--include-keys', default=[], + action='append', dest='included_keys', + help=u'comma separated list of keys to show', + ) + cmd.parser.add_option( + u'-k', u'--keys-only', action='store_true', + help=u'show only the keys', + ) + cmd.parser.add_format_option(target='item') + return [cmd] + + def run(self, lib, opts, args): + """Print tag info or library data for each file referenced by args. + + Main entry point for the `beet info ARGS...` command. + + If an argument is a path pointing to an existing file, then the tags + of that file are printed. All other arguments are considered + queries, and for each item matching all those queries the tags from + the file are printed. + + If `opts.summarize` is true, the function merges all tags into one + dictionary and only prints that. If two files have different values + for the same tag, the value is set to '[various]' + """ + if opts.library: + data_collector = library_data + else: + data_collector = tag_data + + included_keys = [] + for keys in opts.included_keys: + included_keys.extend(keys.split(',')) + key_filter = make_key_filter(included_keys) + + first = True + summary = {} + for data_emitter in data_collector(lib, ui.decargs(args)): + try: + data, item = data_emitter() + except (mediafile.UnreadableFileError, IOError) as ex: + self._log.error(u'cannot read file: {0}', ex) + continue + + data = key_filter(data) + if opts.summarize: + update_summary(summary, data) + else: + if not first: + ui.print_() + if opts.keys_only: + print_data_keys(data, item) + else: + print_data(data, item, opts.format) + first = False + + if opts.summarize: + print_data(summary) + + +def make_key_filter(include): + """Return a function that filters a dictionary. + + The returned filter takes a dictionary and returns another + dictionary that only includes the key-value pairs where the key + glob-matches one of the keys in `include`. + """ + if not include: + return identity + + matchers = [] + for key in include: + key = re.escape(key) + key = key.replace(r'\*', '.*') + matchers.append(re.compile(key + '$')) + + def filter_(data): + filtered = dict() + for key, value in data.items(): + if any(map(lambda m: m.match(key), matchers)): + filtered[key] = value + return filtered + + return filter_ + + +def identity(val): + return val diff --git a/libs/beetsplug/inline.py b/libs/beetsplug/inline.py new file mode 100644 index 00000000..6e3771f2 --- /dev/null +++ b/libs/beetsplug/inline.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Allows inline path template customization code in the config file. +""" +from __future__ import division, absolute_import, print_function + +import traceback +import itertools + +from beets.plugins import BeetsPlugin +from beets import config + +FUNC_NAME = u'__INLINE_FUNC__' + + +class InlineError(Exception): + """Raised when a runtime error occurs in an inline expression. + """ + def __init__(self, code, exc): + super(InlineError, self).__init__( + (u"error in inline path field code:\n" + u"%s\n%s: %s") % (code, type(exc).__name__, unicode(exc)) + ) + + +def _compile_func(body): + """Given Python code for a function body, return a compiled + callable that invokes that code. + """ + body = u'def {0}():\n {1}'.format( + FUNC_NAME, + body.replace('\n', '\n ') + ) + code = compile(body, 'inline', 'exec') + env = {} + eval(code, env) + return env[FUNC_NAME] + + +class InlinePlugin(BeetsPlugin): + def __init__(self): + super(InlinePlugin, self).__init__() + + config.add({ + 'pathfields': {}, # Legacy name. + 'item_fields': {}, + 'album_fields': {}, + }) + + # Item fields. + for key, view in itertools.chain(config['item_fields'].items(), + config['pathfields'].items()): + self._log.debug(u'adding item field {0}', key) + func = self.compile_inline(view.get(unicode), False) + if func is not None: + self.template_fields[key] = func + + # Album fields. + for key, view in config['album_fields'].items(): + self._log.debug(u'adding album field {0}', key) + func = self.compile_inline(view.get(unicode), True) + if func is not None: + self.album_template_fields[key] = func + + def compile_inline(self, python_code, album): + """Given a Python expression or function body, compile it as a path + field function. The returned function takes a single argument, an + Item, and returns a Unicode string. If the expression cannot be + compiled, then an error is logged and this function returns None. + """ + # First, try compiling as a single function. + try: + code = compile(u'({0})'.format(python_code), 'inline', 'eval') + except SyntaxError: + # Fall back to a function body. + try: + func = _compile_func(python_code) + except SyntaxError: + self._log.error(u'syntax error in inline field definition:\n' + u'{0}', traceback.format_exc()) + return + else: + is_expr = False + else: + is_expr = True + + def _dict_for(obj): + out = dict(obj) + if album: + out['items'] = list(obj.items()) + return out + + if is_expr: + # For expressions, just evaluate and return the result. + def _expr_func(obj): + values = _dict_for(obj) + try: + return eval(code, values) + except Exception as exc: + raise InlineError(python_code, exc) + return _expr_func + else: + # For function bodies, invoke the function with values as global + # variables. + def _func_func(obj): + func.__globals__.update(_dict_for(obj)) + try: + return func() + except Exception as exc: + raise InlineError(python_code, exc) + return _func_func diff --git a/libs/beetsplug/ipfs.py b/libs/beetsplug/ipfs.py new file mode 100644 index 00000000..87a100b1 --- /dev/null +++ b/libs/beetsplug/ipfs.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon +""" + +from __future__ import division, absolute_import, print_function + +from beets import ui, util, library, config +from beets.plugins import BeetsPlugin + +import subprocess +import shutil +import os +import tempfile + + +class IPFSPlugin(BeetsPlugin): + + def __init__(self): + super(IPFSPlugin, self).__init__() + self.config.add({ + 'auto': True, + }) + + if self.config['auto']: + self.import_stages = [self.auto_add] + + def commands(self): + cmd = ui.Subcommand('ipfs', + help='interact with ipfs') + cmd.parser.add_option('-a', '--add', dest='add', + action='store_true', + help='Add to ipfs') + cmd.parser.add_option('-g', '--get', dest='get', + action='store_true', + help='Get from ipfs') + cmd.parser.add_option('-p', '--publish', dest='publish', + action='store_true', + help='Publish local library to ipfs') + cmd.parser.add_option('-i', '--import', dest='_import', + action='store_true', + help='Import remote library from ipfs') + cmd.parser.add_option('-l', '--list', dest='_list', + action='store_true', + help='Query imported libraries') + cmd.parser.add_option('-m', '--play', dest='play', + action='store_true', + help='Play music from remote libraries') + + def func(lib, opts, args): + if opts.add: + for album in lib.albums(ui.decargs(args)): + if len(album.items()) == 0: + self._log.info('{0} does not contain items, aborting', + album) + + self.ipfs_add(album) + album.store() + + if opts.get: + self.ipfs_get(lib, ui.decargs(args)) + + if opts.publish: + self.ipfs_publish(lib) + + if opts._import: + self.ipfs_import(lib, ui.decargs(args)) + + if opts._list: + self.ipfs_list(lib, ui.decargs(args)) + + if opts.play: + self.ipfs_play(lib, opts, ui.decargs(args)) + + cmd.func = func + return [cmd] + + def auto_add(self, session, task): + if task.is_album: + if self.ipfs_add(task.album): + task.album.store() + + def ipfs_play(self, lib, opts, args): + from beetsplug.play import PlayPlugin + + jlib = self.get_remote_lib(lib) + player = PlayPlugin() + config['play']['relative_to'] = None + player.album = True + player.play_music(jlib, player, args) + + def ipfs_add(self, album): + try: + album_dir = album.item_dir() + except AttributeError: + return False + try: + if album.ipfs: + self._log.debug('{0} already added', album_dir) + # Already added to ipfs + return False + except AttributeError: + pass + + self._log.info('Adding {0} to ipfs', album_dir) + + cmd = "ipfs add -q -r".split() + cmd.append(album_dir) + try: + output = util.command_output(cmd).split() + except (OSError, subprocess.CalledProcessError) as exc: + self._log.error(u'Failed to add {0}, error: {1}', album_dir, exc) + return False + length = len(output) + + for linenr, line in enumerate(output): + line = line.strip() + if linenr == length - 1: + # last printed line is the album hash + self._log.info("album: {0}", line) + album.ipfs = line + else: + try: + item = album.items()[linenr] + self._log.info("item: {0}", line) + item.ipfs = line + item.store() + except IndexError: + # if there's non music files in the to-add folder they'll + # get ignored here + pass + + return True + + def ipfs_get(self, lib, query): + query = query[0] + # Check if query is a hash + if query.startswith("Qm") and len(query) == 46: + self.ipfs_get_from_hash(lib, query) + else: + albums = self.query(lib, query) + for album in albums: + self.ipfs_get_from_hash(lib, album.ipfs) + + def ipfs_get_from_hash(self, lib, _hash): + try: + cmd = "ipfs get".split() + cmd.append(_hash) + util.command_output(cmd) + except (OSError, subprocess.CalledProcessError) as err: + self._log.error('Failed to get {0} from ipfs.\n{1}', + _hash, err.output) + return False + + self._log.info('Getting {0} from ipfs', _hash) + imp = ui.commands.TerminalImportSession(lib, loghandler=None, + query=None, paths=[_hash]) + imp.run() + shutil.rmtree(_hash) + + def ipfs_publish(self, lib): + with tempfile.NamedTemporaryFile() as tmp: + self.ipfs_added_albums(lib, tmp.name) + try: + cmd = "ipfs add -q ".split() + cmd.append(tmp.name) + output = util.command_output(cmd) + except (OSError, subprocess.CalledProcessError) as err: + msg = "Failed to publish library. Error: {0}".format(err) + self._log.error(msg) + return False + self._log.info("hash of library: {0}", output) + + def ipfs_import(self, lib, args): + _hash = args[0] + if len(args) > 1: + lib_name = args[1] + else: + lib_name = _hash + lib_root = os.path.dirname(lib.path) + remote_libs = lib_root + "/remotes" + if not os.path.exists(remote_libs): + try: + os.makedirs(remote_libs) + except OSError as e: + msg = "Could not create {0}. Error: {1}".format(remote_libs, e) + self._log.error(msg) + return False + path = remote_libs + "/" + lib_name + ".db" + if not os.path.exists(path): + cmd = "ipfs get {0} -o".format(_hash).split() + cmd.append(path) + try: + util.command_output(cmd) + except (OSError, subprocess.CalledProcessError): + self._log.error("Could not import {0}".format(_hash)) + return False + + # add all albums from remotes into a combined library + jpath = remote_libs + "/joined.db" + jlib = library.Library(jpath) + nlib = library.Library(path) + for album in nlib.albums(): + if not self.already_added(album, jlib): + new_album = [] + for item in album.items(): + item.id = None + new_album.append(item) + added_album = jlib.add_album(new_album) + added_album.ipfs = album.ipfs + added_album.store() + + def already_added(self, check, jlib): + for jalbum in jlib.albums(): + if jalbum.mb_albumid == check.mb_albumid: + return True + return False + + def ipfs_list(self, lib, args): + fmt = config['format_album'].get() + try: + albums = self.query(lib, args) + except IOError: + ui.print_("No imported libraries yet.") + return + + for album in albums: + ui.print_(format(album, fmt), " : ", album.ipfs) + + def query(self, lib, args): + rlib = self.get_remote_lib(lib) + albums = rlib.albums(args) + return albums + + def get_remote_lib(self, lib): + lib_root = os.path.dirname(lib.path) + remote_libs = lib_root + "/remotes" + path = remote_libs + "/joined.db" + if not os.path.isfile(path): + raise IOError + return library.Library(path) + + def ipfs_added_albums(self, rlib, tmpname): + """ Returns a new library with only albums/items added to ipfs + """ + tmplib = library.Library(tmpname) + for album in rlib.albums(): + try: + if album.ipfs: + self.create_new_album(album, tmplib) + except AttributeError: + pass + return tmplib + + def create_new_album(self, album, tmplib): + items = [] + for item in album.items(): + try: + if not item.ipfs: + break + except AttributeError: + pass + # Clear current path from item + item.path = '/ipfs/{0}/{1}'.format(album.ipfs, + os.path.basename(item.path)) + + item.id = None + items.append(item) + if len(items) < 1: + return False + self._log.info("Adding '{0}' to temporary library", album) + new_album = tmplib.add_album(items) + new_album.ipfs = album.ipfs + new_album.store() diff --git a/libs/beetsplug/keyfinder.py b/libs/beetsplug/keyfinder.py new file mode 100644 index 00000000..b6131a4b --- /dev/null +++ b/libs/beetsplug/keyfinder.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Thomas Scholtes. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Uses the `KeyFinder` program to add the `initial_key` field. +""" + +from __future__ import division, absolute_import, print_function + +import subprocess + +from beets import ui +from beets import util +from beets.plugins import BeetsPlugin + + +class KeyFinderPlugin(BeetsPlugin): + + def __init__(self): + super(KeyFinderPlugin, self).__init__() + self.config.add({ + u'bin': u'KeyFinder', + u'auto': True, + u'overwrite': False, + }) + + if self.config['auto'].get(bool): + self.import_stages = [self.imported] + + def commands(self): + cmd = ui.Subcommand('keyfinder', + help=u'detect and add initial key from audio') + cmd.func = self.command + return [cmd] + + def command(self, lib, opts, args): + self.find_key(lib.items(ui.decargs(args)), write=ui.should_write()) + + def imported(self, session, task): + self.find_key(task.items) + + def find_key(self, items, write=False): + overwrite = self.config['overwrite'].get(bool) + bin = util.bytestring_path(self.config['bin'].get(unicode)) + + for item in items: + if item['initial_key'] and not overwrite: + continue + + try: + output = util.command_output([bin, b'-f', + util.syspath(item.path)]) + except (subprocess.CalledProcessError, OSError) as exc: + self._log.error(u'execution failed: {0}', exc) + continue + except UnicodeEncodeError: + # Workaround for Python 2 Windows bug. + # http://bugs.python.org/issue1759845 + self._log.error(u'execution failed for Unicode path: {0!r}', + item.path) + continue + + key_raw = output.rsplit(None, 1)[-1] + try: + key = key_raw.decode('utf8') + except UnicodeDecodeError: + self._log.error(u'output is invalid UTF-8') + continue + + item['initial_key'] = key + self._log.info(u'added computed initial key {0} for {1}', + key, util.displayable_path(item.path)) + + if write: + item.try_write() + item.store() diff --git a/libs/beetsplug/lastgenre/__init__.py b/libs/beetsplug/lastgenre/__init__.py new file mode 100644 index 00000000..a4b8f062 --- /dev/null +++ b/libs/beetsplug/lastgenre/__init__.py @@ -0,0 +1,425 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +"""Gets genres for imported music based on Last.fm tags. + +Uses a provided whitelist file to determine which tags are valid genres. +The included (default) genre list was originally produced by scraping Wikipedia +and has been edited to remove some questionable entries. +The scraper script used is available here: +https://gist.github.com/1241307 +""" +import pylast +import os +import yaml +import traceback + +from beets import plugins +from beets import ui +from beets import config +from beets.util import normpath, plurality +from beets import library + + +LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY) + +PYLAST_EXCEPTIONS = ( + pylast.WSError, + pylast.MalformedResponseError, + pylast.NetworkError, +) + +REPLACE = { + u'\u2010': '-', +} + + +def deduplicate(seq): + """Remove duplicates from sequence wile preserving order. + """ + seen = set() + return [x for x in seq if x not in seen and not seen.add(x)] + + +# Canonicalization tree processing. + +def flatten_tree(elem, path, branches): + """Flatten nested lists/dictionaries into lists of strings + (branches). + """ + if not path: + path = [] + + if isinstance(elem, dict): + for (k, v) in elem.items(): + flatten_tree(v, path + [k], branches) + elif isinstance(elem, list): + for sub in elem: + flatten_tree(sub, path, branches) + else: + branches.append(path + [unicode(elem)]) + + +def find_parents(candidate, branches): + """Find parents genre of a given genre, ordered from the closest to + the further parent. + """ + for branch in branches: + try: + idx = branch.index(candidate.lower()) + return list(reversed(branch[:idx + 1])) + except ValueError: + continue + return [candidate] + + +# Main plugin logic. + +WHITELIST = os.path.join(os.path.dirname(__file__), 'genres.txt') +C14N_TREE = os.path.join(os.path.dirname(__file__), 'genres-tree.yaml') + + +class LastGenrePlugin(plugins.BeetsPlugin): + def __init__(self): + super(LastGenrePlugin, self).__init__() + + self.config.add({ + 'whitelist': True, + 'min_weight': 10, + 'count': 1, + 'fallback': None, + 'canonical': False, + 'source': 'album', + 'force': True, + 'auto': True, + 'separator': u', ', + }) + + self.setup() + + def setup(self): + """Setup plugin from config options + """ + if self.config['auto']: + self.import_stages = [self.imported] + + self._genre_cache = {} + + # Read the whitelist file if enabled. + self.whitelist = set() + wl_filename = self.config['whitelist'].get() + if wl_filename in (True, ''): # Indicates the default whitelist. + wl_filename = WHITELIST + if wl_filename: + wl_filename = normpath(wl_filename) + with open(wl_filename, 'r') as f: + for line in f: + line = line.decode('utf8').strip().lower() + if line and not line.startswith(u'#'): + self.whitelist.add(line) + + # Read the genres tree for canonicalization if enabled. + self.c14n_branches = [] + c14n_filename = self.config['canonical'].get() + if c14n_filename in (True, ''): # Default tree. + c14n_filename = C14N_TREE + if c14n_filename: + c14n_filename = normpath(c14n_filename) + genres_tree = yaml.load(open(c14n_filename, 'r')) + flatten_tree(genres_tree, [], self.c14n_branches) + + @property + def sources(self): + """A tuple of allowed genre sources. May contain 'track', + 'album', or 'artist.' + """ + source = self.config['source'].as_choice(('track', 'album', 'artist')) + if source == 'track': + return 'track', 'album', 'artist' + elif source == 'album': + return 'album', 'artist' + elif source == 'artist': + return 'artist', + + def _resolve_genres(self, tags): + """Given a list of strings, return a genre by joining them into a + single string and (optionally) canonicalizing each. + """ + if not tags: + return None + + count = self.config['count'].get(int) + if self.c14n_branches: + # Extend the list to consider tags parents in the c14n tree + tags_all = [] + for tag in tags: + # Add parents that are in the whitelist, or add the oldest + # ancestor if no whitelist + if self.whitelist: + parents = [x for x in find_parents(tag, self.c14n_branches) + if self._is_allowed(x)] + else: + parents = [find_parents(tag, self.c14n_branches)[-1]] + + tags_all += parents + if len(tags_all) >= count: + break + tags = tags_all + + tags = deduplicate(tags) + + # c14n only adds allowed genres but we may have had forbidden genres in + # the original tags list + tags = [x.title() for x in tags if self._is_allowed(x)] + + return self.config['separator'].get(unicode).join( + tags[:self.config['count'].get(int)] + ) + + def fetch_genre(self, lastfm_obj): + """Return the genre for a pylast entity or None if no suitable genre + can be found. Ex. 'Electronic, House, Dance' + """ + min_weight = self.config['min_weight'].get(int) + return self._resolve_genres(self._tags_for(lastfm_obj, min_weight)) + + def _is_allowed(self, genre): + """Determine whether the genre is present in the whitelist, + returning a boolean. + """ + if genre is None: + return False + if not self.whitelist or genre in self.whitelist: + return True + return False + + # Cached entity lookups. + + def _last_lookup(self, entity, method, *args): + """Get a genre based on the named entity using the callable `method` + whose arguments are given in the sequence `args`. The genre lookup + is cached based on the entity name and the arguments. Before the + lookup, each argument is has some Unicode characters replaced with + rough ASCII equivalents in order to return better results from the + Last.fm database. + """ + # Shortcut if we're missing metadata. + if any(not s for s in args): + return None + + key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args)) + if key in self._genre_cache: + return self._genre_cache[key] + else: + args_replaced = [] + for arg in args: + for k, v in REPLACE.items(): + arg = arg.replace(k, v) + args_replaced.append(arg) + + genre = self.fetch_genre(method(*args_replaced)) + self._genre_cache[key] = genre + return genre + + def fetch_album_genre(self, obj): + """Return the album genre for this Item or Album. + """ + return self._last_lookup( + u'album', LASTFM.get_album, obj.albumartist, obj.album + ) + + def fetch_album_artist_genre(self, obj): + """Return the album artist genre for this Item or Album. + """ + return self._last_lookup( + u'artist', LASTFM.get_artist, obj.albumartist + ) + + def fetch_artist_genre(self, item): + """Returns the track artist genre for this Item. + """ + return self._last_lookup( + u'artist', LASTFM.get_artist, item.artist + ) + + def fetch_track_genre(self, obj): + """Returns the track genre for this Item. + """ + return self._last_lookup( + u'track', LASTFM.get_track, obj.artist, obj.title + ) + + def _get_genre(self, obj): + """Get the genre string for an Album or Item object based on + self.sources. Return a `(genre, source)` pair. The + prioritization order is: + - track (for Items only) + - album + - artist + - original + - fallback + - None + """ + + # Shortcut to existing genre if not forcing. + if not self.config['force'] and self._is_allowed(obj.genre): + return obj.genre, 'keep' + + # Track genre (for Items only). + if isinstance(obj, library.Item): + if 'track' in self.sources: + result = self.fetch_track_genre(obj) + if result: + return result, 'track' + + # Album genre. + if 'album' in self.sources: + result = self.fetch_album_genre(obj) + if result: + return result, 'album' + + # Artist (or album artist) genre. + if 'artist' in self.sources: + result = None + if isinstance(obj, library.Item): + result = self.fetch_artist_genre(obj) + elif obj.albumartist != config['va_name'].get(unicode): + result = self.fetch_album_artist_genre(obj) + else: + # For "Various Artists", pick the most popular track genre. + item_genres = [] + for item in obj.items(): + item_genre = None + if 'track' in self.sources: + item_genre = self.fetch_track_genre(item) + if not item_genre: + item_genre = self.fetch_artist_genre(item) + if item_genre: + item_genres.append(item_genre) + if item_genres: + result, _ = plurality(item_genres) + + if result: + return result, 'artist' + + # Filter the existing genre. + if obj.genre: + result = self._resolve_genres([obj.genre]) + if result: + return result, 'original' + + # Fallback string. + fallback = self.config['fallback'].get() + if fallback: + return fallback, 'fallback' + + return None, None + + def commands(self): + lastgenre_cmd = ui.Subcommand('lastgenre', help=u'fetch genres') + lastgenre_cmd.parser.add_option( + u'-f', u'--force', dest='force', + action='store_true', default=False, + help=u're-download genre when already present' + ) + lastgenre_cmd.parser.add_option( + u'-s', u'--source', dest='source', type='string', + help=u'genre source: artist, album, or track' + ) + + def lastgenre_func(lib, opts, args): + write = ui.should_write() + self.config.set_args(opts) + + for album in lib.albums(ui.decargs(args)): + album.genre, src = self._get_genre(album) + self._log.info(u'genre for album {0} ({1}): {0.genre}', + album, src) + album.store() + + for item in album.items(): + # If we're using track-level sources, also look up each + # track on the album. + if 'track' in self.sources: + item.genre, src = self._get_genre(item) + item.store() + self._log.info(u'genre for track {0} ({1}): {0.genre}', + item, src) + + if write: + item.try_write() + + lastgenre_cmd.func = lastgenre_func + return [lastgenre_cmd] + + def imported(self, session, task): + """Event hook called when an import task finishes.""" + if task.is_album: + album = task.album + album.genre, src = self._get_genre(album) + self._log.debug(u'added last.fm album genre ({0}): {1}', + src, album.genre) + album.store() + + if 'track' in self.sources: + for item in album.items(): + item.genre, src = self._get_genre(item) + self._log.debug(u'added last.fm item genre ({0}): {1}', + src, item.genre) + item.store() + + else: + item = task.item + item.genre, src = self._get_genre(item) + self._log.debug(u'added last.fm item genre ({0}): {1}', + src, item.genre) + item.store() + + def _tags_for(self, obj, min_weight=None): + """Core genre identification routine. + + Given a pylast entity (album or track), return a list of + tag names for that entity. Return an empty list if the entity is + not found or another error occurs. + + If `min_weight` is specified, tags are filtered by weight. + """ + # Work around an inconsistency in pylast where + # Album.get_top_tags() does not return TopItem instances. + # https://code.google.com/p/pylast/issues/detail?id=85 + if isinstance(obj, pylast.Album): + obj = super(pylast.Album, obj) + + try: + res = obj.get_top_tags() + except PYLAST_EXCEPTIONS as exc: + self._log.debug(u'last.fm error: {0}', exc) + return [] + except Exception as exc: + # Isolate bugs in pylast. + self._log.debug(u'{}', traceback.format_exc()) + self._log.error(u'error in pylast library: {0}', exc) + return [] + + # Filter by weight (optionally). + if min_weight: + res = [el for el in res if (int(el.weight or 0)) >= min_weight] + + # Get strings from tags. + res = [el.item.get_name().lower() for el in res] + + return res diff --git a/libs/beetsplug/lastgenre/genres-tree.yaml b/libs/beetsplug/lastgenre/genres-tree.yaml new file mode 100644 index 00000000..a09f7e6b --- /dev/null +++ b/libs/beetsplug/lastgenre/genres-tree.yaml @@ -0,0 +1,749 @@ +- african: + - african heavy metal + - african hip hop + - afrobeat + - apala + - benga + - bikutsi + - bongo flava + - cape jazz + - chimurenga + - coupé-décalé + - fuji music + - genge + - highlife + - hiplife + - isicathamiya + - jit + - jùjú + - kapuka + - kizomba + - kuduro + - kwaito + - kwela + - makossa + - maloya + - marrabenta + - mbalax + - mbaqanga + - mbube + - morna + - museve + - palm-wine + - raï + - sakara + - sega + - seggae + - semba + - soukous + - taarab + - zouglou +- asian: + - east asian: + - anison + - c-pop + - cantopop + - enka + - hong kong english pop + - j-pop + - k-pop + - kayÅkyoku + - korean pop + - mandopop + - onkyokei + - taiwanese pop + - fann at-tanbura + - fijiri + - khaliji + - liwa + - sawt + - south and southeast asian: + - baila + - bhangra + - bhojpuri + - dangdut + - filmi + - indian pop + - lavani + - luk thung: + - luk krung + - manila sound + - morlam + - pinoy pop + - pop sunda + - ragini + - thai pop +- avant-garde: + - experimental music + - lo-fi + - musique concrète +- blues: + - african blues + - blues rock + - blues shouter + - british blues + - canadian blues + - chicago blues + - classic female blues + - contemporary r&b + - country blues + - delta blues + - detroit blues + - electric blues + - gospel blues + - hill country blues + - hokum blues + - jazz blues + - jump blues + - kansas city blues + - louisiana blues + - memphis blues + - piano blues + - piedmont blues + - punk blues + - soul blues + - st. louis blues + - swamp blues + - texas blues + - west coast blues +- caribbean and latin american: + - bachata + - baithak gana + - bolero + - brazilian: + - axé + - bossa nova + - brazilian rock + - brega + - choro + - forró + - frevo + - funk carioca + - lambada + - maracatu + - música popular brasileira + - música sertaneja + - pagode + - samba + - samba rock + - tecnobrega + - tropicalia + - zouk-lambada + - calypso + - chutney + - chutney soca + - compas + - mambo + - merengue + - méringue + - other latin: + - chicha + - criolla + - cumbia + - huayno + - mariachi + - ranchera + - tejano + - punta + - punta rock + - rasin + - reggaeton + - salsa + - soca + - son + - timba + - twoubadou + - zouk +- classical: + - ballet + - baroque: + - baroque music + - cantata + - chamber music: + - string quartet + - classical music + - concerto: + - concerto grosso + - contemporary classical + - modern classical + - opera + - oratorio + - orchestra: + - orchestral + - symphonic + - symphony + - organum + - mass: + - requiem + - sacred music: + - cantique + - gregorian chant + - sonata +- comedy: + - comedy music + - comedy rock + - humor + - parody music + - stand-up +- country: + - alternative country: + - cowpunk + - americana + - australian country music + - bakersfield sound + - bluegrass: + - progressive bluegrass + - reactionary bluegrass + - blues country + - cajun: + - cajun fiddle tunes + - christian country music + - classic country + - close harmony + - country pop + - country rap + - country rock + - country soul + - cowboy/western music + - dansband music + - franco-country + - gulf and western + - hellbilly music + - hokum + - honky tonk + - instrumental country + - lubbock sound + - nashville sound + - neotraditional country + - outlaw country + - progressive country + - psychobilly/punkabilly + - red dirt + - rockabilly + - sertanejo + - texas country + - traditional country music + - truck-driving country + - western swing + - zydeco +- easy listening: + - background music + - beautiful music + - elevator music + - furniture music + - lounge music + - middle of the road + - new-age music +- electronic: + - ambient: + - ambient dub + - ambient house + - ambient techno + - dark ambient + - drone music + - illbient + - isolationism + - lowercase + - asian underground + - breakbeat: + - 4-beat + - acid breaks + - baltimore club + - big beat + - breakbeat hardcore + - broken beat + - florida breaks + - nu skool breaks + - chiptune: + - bitpop + - game boy music + - nintendocore + - video game music + - yorkshire bleeps and bass + - disco: + - cosmic disco + - disco polo + - euro disco + - italo disco + - nu-disco + - space disco + - downtempo: + - acid jazz + - balearic beat + - chill out + - dub music + - dubtronica + - ethnic electronica + - moombahton + - nu jazz + - trip hop + - drum and bass: + - darkcore + - darkstep + - drumfunk + - drumstep + - hardstep + - intelligent drum and bass + - jump-up + - liquid funk + - neurofunk + - oldschool jungle: + - darkside jungle + - ragga jungle + - raggacore + - sambass + - techstep + - electro: + - crunk + - electro backbeat + - electro-grime + - electropop + - electroacoustic: + - acousmatic music + - computer music + - electroacoustic improvisation + - field recording + - live coding + - live electronics + - soundscape composition + - tape music + - electronic rock: + - alternative dance: + - baggy + - madchester + - dance-punk + - dance-rock + - dark wave + - electroclash + - electronicore + - electropunk + - ethereal wave + - indietronica + - new rave + - space rock + - synthpop + - synthpunk + - electronica: + - berlin school + - chillwave + - electronic art music + - electronic dance music + - folktronica + - freestyle music + - glitch + - idm + - laptronica + - skweee + - sound art + - synthcore + - eurodance: + - bubblegum dance + - italo dance + - turbofolk + - hardcore: + - bouncy house + - bouncy techno + - breakcore + - digital hardcore + - doomcore + - dubstyle + - gabber + - happy hardcore + - hardstyle + - jumpstyle + - makina + - speedcore + - terrorcore + - uk hardcore + - hi-nrg: + - eurobeat + - hard nrg + - new beat + - house: + - acid house + - chicago house + - deep house + - diva house + - dutch house + - electro house + - freestyle house + - french house + - funky house + - ghetto house + - hardbag + - hip house + - italo house + - latin house + - minimal house + - progressive house + - rave music + - swing house + - tech house + - tribal house + - uk hard house + - us garage + - vocal house + - industrial: + - aggrotech + - coldwave + - cybergrind + - dark electro + - death industrial + - electro-industrial + - electronic body music: + - futurepop + - industrial metal: + - neue deutsche härte + - industrial rock + - noise: + - japanoise + - power electronics + - power noise + - witch house + - post-disco: + - boogie + - dance-pop + - progressive: + - progressive house/trance: + - disco house + - dream house + - space house + - progressive breaks + - progressive drum & bass + - progressive techno + - techno: + - acid techno + - detroit techno + - free tekno + - ghettotech + - minimal + - nortec + - schranz + - techno-dnb + - technopop + - tecno brega + - toytown techno + - trance: + - acid trance + - classic trance + - dream trance + - goa trance: + - dark psytrance + - full on + - psybreaks + - psyprog + - suomisaundi + - hard trance + - tech trance + - uplifting trance: + - orchestral uplifting + - vocal trance + - uk garage: + - 2-step + - 4x4 + - bassline + - breakstep + - dubstep + - funky + - grime + - speed garage + - trap +- folk: + - american folk revival + - anti-folk + - british folk revival + - celtic music + - contemporary folk + - filk music + - freak folk + - indie folk + - industrial folk + - neofolk + - progressive folk + - psychedelic folk + - sung poetry + - techno-folk +- hip hop: + - alternative hip hop + - avant-garde hip hop + - chap hop + - christian hip hop + - conscious hip hop + - country-rap + - crunkcore + - cumbia rap + - east coast hip hop: + - brick city club + - hardcore hip hop + - mafioso rap + - new jersey hip hop + - electro music + - freestyle rap + - g-funk + - gangsta rap + - golden age hip hop + - hip hop soul + - hip pop + - hyphy + - industrial hip hop + - instrumental hip hop + - jazz rap + - low bap + - lyrical hip hop + - merenrap + - midwest hip hop: + - chicago hip hop + - detroit hip hop + - horrorcore + - st. louis hip hop + - twin cities hip hop + - motswako + - nerdcore + - new jack swing + - new school hip hop + - old school hip hop + - political hip hop + - rap opera + - rap rock: + - rap metal + - rapcore + - songo-salsa + - southern hip hop: + - atlanta hip hop: + - snap music + - bounce music + - houston hip hop: + - chopped and screwed + - miami bass + - turntablism + - underground hip hop + - urban pasifika + - west coast hip hop: + - chicano rap + - jerkin' +- jazz: + - asian american jazz + - avant-garde jazz + - bebop + - boogie-woogie + - british dance band + - chamber jazz + - continental jazz + - cool jazz + - crossover jazz + - cubop + - dixieland + - ethno jazz + - european free jazz + - free funk + - free improvisation + - free jazz + - gypsy jazz + - hard bop + - jazz fusion + - jazz rock + - jazz-funk + - kansas city jazz + - latin jazz + - livetronica + - m-base + - mainstream jazz + - modal jazz + - neo-bop jazz + - neo-swing + - novelty ragtime + - orchestral jazz + - post-bop + - punk jazz + - ragtime + - shibuya-kei + - ska jazz + - smooth jazz + - soul jazz + - straight-ahead jazz + - stride jazz + - swing + - third stream + - trad jazz + - vocal jazz + - west coast gypsy jazz + - west coast jazz +- other: + - worldbeat +- pop: + - adult contemporary + - arab pop + - baroque pop + - bubblegum pop + - chanson + - christian pop + - classical crossover + - europop: + - austropop + - balkan pop + - french pop + - latin pop + - laïkó + - nederpop + - russian pop + - iranian pop + - jangle pop + - latin ballad + - levenslied + - louisiana swamp pop + - mexican pop + - motorpop + - new romanticism + - pop rap + - popera + - psychedelic pop + - schlager + - soft rock + - sophisti-pop + - space age pop + - sunshine pop + - surf pop + - teen pop + - traditional pop music + - turkish pop + - vispop + - wonky pop +- rhythm and blues: + - funk: + - deep funk + - go-go + - p-funk + - soul: + - blue-eyed soul + - neo soul + - northern soul +- rock: + - alternative rock: + - britpop: + - post-britpop + - dream pop + - grunge: + - post-grunge + - indie pop: + - dunedin sound + - twee pop + - indie rock + - noise pop + - nu metal + - post-punk revival + - post-rock: + - post-metal + - sadcore + - shoegaze + - slowcore + - art rock + - beat music + - chinese rock + - christian rock + - dark cabaret + - desert rock + - experimental rock + - folk rock + - garage rock + - glam rock + - hard rock + - heavy metal: + - alternative metal + - black metal: + - viking metal + - christian metal + - death metal: + - goregrind + - melodic death metal + - technical death metal + - doom metal + - drone metal + - folk metal: + - celtic metal + - medieval metal + - funk metal + - glam metal + - gothic metal + - metalcore: + - deathcore + - mathcore: + - djent + - power metal + - progressive metal + - sludge metal + - speed metal + - stoner rock + - symphonic metal + - thrash metal: + - crossover thrash + - groove metal + - math rock + - new wave: + - world fusion + - paisley underground + - pop rock + - post-punk: + - gothic rock + - no wave + - noise rock + - power pop + - progressive rock: + - canterbury scene + - krautrock + - new prog + - rock in opposition + - psychedelic rock: + - acid rock + - freakbeat + - neo-psychedelia + - raga rock + - punk rock: + - anarcho punk: + - crust punk: + - d-beat + - art punk + - christian punk + - deathrock + - folk punk: + - celtic punk + - gypsy punk + - garage punk + - grindcore: + - crustgrind + - noisegrind + - hardcore punk: + - post-hardcore: + - emo: + - screamo + - powerviolence + - street punk + - thrashcore + - horror punk + - pop punk + - psychobilly + - riot grrrl + - ska punk: + - ska-core + - skate punk + - rock and roll + - southern rock + - sufi rock + - surf rock + - visual kei: + - nagoya kei +- reggae: + - roots reggae + - reggae fusion + - reggae en español: + - spanish reggae + - reggae 110 + - reggae bultrón + - romantic flow + - lovers rock + - raggamuffin: + - ragga + - dancehall + - ska: + - 2 tone + - dub + - rocksteady diff --git a/libs/beetsplug/lastgenre/genres.txt b/libs/beetsplug/lastgenre/genres.txt new file mode 100644 index 00000000..914ee129 --- /dev/null +++ b/libs/beetsplug/lastgenre/genres.txt @@ -0,0 +1,1534 @@ +2 tone +2-step garage +4-beat +4x4 garage +8-bit +acapella +acid +acid breaks +acid house +acid jazz +acid rock +acoustic music +acousticana +adult contemporary music +african popular music +african rumba +afrobeat +aleatoric music +alternative country +alternative dance +alternative hip hop +alternative metal +alternative rock +ambient +ambient house +ambient music +americana +anarcho punk +anti-folk +apala +ape haters +arab pop +arabesque +arabic pop +argentine rock +ars antiqua +ars nova +art punk +art rock +ashiq +asian american jazz +australian country music +australian hip hop +australian pub rock +austropop +avant-garde +avant-garde jazz +avant-garde metal +avant-garde music +axé +bac-bal +bachata +baggy +baila +baile funk +baisha xiyue +baithak gana +baião +bajourou +bakersfield sound +bakou +bakshy +bal-musette +balakadri +balinese gamelan +balkan pop +ballad +ballata +ballet +bamboo band +bambuco +banda +bangsawan +bantowbol +barbershop music +barndance +baroque +baroque music +baroque pop +bass music +batcave +batucada +batuco +batá-rumba +beach music +beat +beatboxing +beautiful music +bebop +beiguan +bel canto +bend-skin +benga +berlin school of electronic music +bhajan +bhangra +bhangra-wine +bhangragga +bhangramuffin +big band +big band music +big beat +biguine +bihu +bikutsi +biomusic +bitcore +bitpop +black metal +blackened death metal +blue-eyed soul +bluegrass +blues +blues ballad +blues-rock +boogie +boogie woogie +boogie-woogie +bossa nova +brass band +brazilian funk +brazilian jazz +breakbeat +breakbeat hardcore +breakcore +breton music +brill building pop +britfunk +british blues +british invasion +britpop +broken beat +brown-eyed soul +brukdown +brutal death metal +bubblegum dance +bubblegum pop +bulerias +bumba-meu-boi +bunraku +burger-highlife +burgundian school +byzantine chant +ca din tulnic +ca pe lunca +ca trù +cabaret +cadence +cadence rampa +cadence-lypso +café-aman +cai luong +cajun music +cakewalk +calenda +calentanos +calgia +calypso +calypso jazz +calypso-style baila +campursari +canatronic +candombe +canon +canrock +cantata +cante chico +cante jondo +canterbury scene +cantiga +cantique +cantiñas +canto livre +canto nuevo +canto popular +cantopop +canzone napoletana +cape jazz +capoeira music +caracoles +carceleras +cardas +cardiowave +carimbó +cariso +carnatic music +carol +cartageneras +cassette culture +casséy-co +cavacha +caveman +caña +celempungan +cello rock +celtic +celtic fusion +celtic metal +celtic punk +celtic reggae +celtic rock +cha-cha-cha +chakacha +chalga +chamamé +chamber jazz +chamber music +chamber pop +champeta +changuí +chanson +chant +charanga +charanga-vallenata +charikawi +chastushki +chau van +chemical breaks +chicago blues +chicago house +chicago soul +chicano rap +chicha +chicken scratch +children's music +chillout +chillwave +chimurenga +chinese music +chinese pop +chinese rock +chip music +cho-kantrum +chongak +chopera +chorinho +choro +chouval bwa +chowtal +christian alternative +christian black metal +christian electronic music +christian hardcore +christian hip hop +christian industrial +christian metal +christian music +christian punk +christian r&b +christian rock +christian ska +christmas carol +christmas music +chumba +chut-kai-pang +chutney +chutney soca +chutney-bhangra +chutney-hip hop +chutney-soca +chylandyk +chzalni +chèo +cigányzene +classic +classic country +classic female blues +classic rock +classical +classical music +classical music era +clicks n cuts +close harmony +club music +cocobale +coimbra fado +coladeira +colombianas +combined rhythm +comedy +comedy rap +comedy rock +comic opera +comparsa +compas direct +compas meringue +concert overture +concerto +concerto grosso +congo +conjunto +contemporary christian +contemporary christian music +contemporary classical +contemporary r&b +contonbley +contradanza +cool jazz +corrido +corsican polyphonic song +cothoza mfana +country +country blues +country gospel +country music +country pop +country r&b +country rock +country-rap +countrypolitan +couple de sonneurs +coupé-décalé +cowpunk +cretan music +crossover jazz +crossover music +crossover thrash +crossover thrash metal +crunk +crunk&b +crunkcore +crust punk +csárdás +cuarteto +cuban rumba +cuddlecore +cueca +cumbia +cumbia villera +cybergrind +dabka +dadra +daina +dalauna +dance +dance music +dance-pop +dance-punk +dance-rock +dancehall +dangdut +danger music +dansband +danza +danzón +dark ambient +dark cabaret +dark pop +darkcore +darkstep +darkwave +de ascultat la servici +de codru +de dragoste +de jale +de pahar +death industrial +death metal +death rock +death/doom +deathcore +deathgrind +deathrock +deep funk +deep house +deep soul +degung +delta blues +dementia +desert rock +desi +detroit blues +detroit techno +dhamar +dhimotiká +dhrupad +dhun +digital hardcore +dirge +dirty dutch +dirty rap +dirty rap/pornocore +dirty south +disco +disco house +disco polo +disney +disney hardcore +disney pop +diva house +divine rock +dixieland +dixieland jazz +djambadon +djent +dodompa +doina +dombola +dondang sayang +donegal fiddle tradition +dongjing +doo wop +doom metal +doomcore +downtempo +drag +dream pop +drone doom +drone metal +drone music +dronology +drum and bass +dub +dub house +dubanguthu +dubstep +dubtronica +dunedin sound +dunun +dutch jazz +décima +early music +east coast blues +east coast hip hop +easy listening +electric blues +electric folk +electro +electro backbeat +electro hop +electro house +electro punk +electro-industrial +electro-swing +electroclash +electrofunk +electronic +electronic art music +electronic body music +electronic dance +electronic luk thung +electronic music +electronic rock +electronica +electropop +elevator music +emo +emo pop +emo rap +emocore +emotronic +enka +eremwu eu +ethereal pop +ethereal wave +euro +euro disco +eurobeat +eurodance +europop +eurotrance +eurourban +exotica +experimental music +experimental noise +experimental pop +experimental rock +extreme metal +ezengileer +fado +falak +fandango +farruca +fife and drum blues +filk +film score +filmi +filmi-ghazal +finger-style +fjatpangarri +flamenco +flamenco rumba +flower power +foaie verde +fofa +folk hop +folk metal +folk music +folk pop +folk punk +folk rock +folktronica +forró +franco-country +freak-folk +freakbeat +free improvisation +free jazz +free music +freestyle +freestyle house +freetekno +french pop +frenchcore +frevo +fricote +fuji +fuji music +fulia +full on +funaná +funeral doom +funk +funk metal +funk rock +funkcore +funky house +furniture music +fusion jazz +g-funk +gaana +gabba +gabber +gagaku +gaikyoku +gaita +galant +gamad +gambang kromong +gamelan +gamelan angklung +gamelan bang +gamelan bebonangan +gamelan buh +gamelan degung +gamelan gede +gamelan kebyar +gamelan salendro +gamelan selunding +gamelan semar pegulingan +gamewave +gammeldans +gandrung +gangsta rap +gar +garage rock +garrotin +gavotte +gelugpa chanting +gender wayang +gending +german folk music +gharbi +gharnati +ghazal +ghazal-song +ghetto house +ghettotech +girl group +glam metal +glam punk +glam rock +glitch +gnawa +go-go +goa +goa trance +gong-chime music +goombay +goregrind +goshu ondo +gospel music +gothic metal +gothic rock +granadinas +grebo +gregorian chant +grime +grindcore +groove metal +group sounds +grunge +grupera +guaguanbo +guajira +guasca +guitarra baiana +guitarradas +gumbe +gunchei +gunka +guoyue +gwo ka +gwo ka moderne +gypsy jazz +gypsy punk +gypsybilly +gyu ke +habanera +hajnali +hakka +halling +hambo +hands up +hapa haole +happy hardcore +haqibah +hard +hard bop +hard house +hard rock +hard trance +hardcore hip hop +hardcore metal +hardcore punk +hardcore techno +hardstyle +harepa +harmonica blues +hasaposérviko +heart attack +heartland rock +heavy beat +heavy metal +hesher +hi-nrg +highlands +highlife +highlife fusion +hillybilly music +hindustani classical music +hip hop +hip hop & rap +hip hop soul +hip house +hiplife +hiragasy +hiva usu +hong kong and cantonese pop +hong kong english pop +honky tonk +honkyoku +hora lunga +hornpipe +horror punk +horrorcore +horrorcore rap +house +house music +hua'er +huasteco +huayno +hula +humor +humppa +hunguhungu +hyangak +hymn +hyphy +hát chau van +hát chèo +hát cãi luong +hát tuồng +ibiza music +icaro +idm +igbo music +ijexá +ilahije +illbient +impressionist music +improvisational +incidental music +indian pop +indie folk +indie music +indie pop +indie rock +indietronica +indo jazz +indo rock +indonesian pop +indoyíftika +industrial death metal +industrial hip-hop +industrial metal +industrial music +industrial musical +industrial rock +instrumental rock +intelligent dance music +international latin +inuit music +iranian pop +irish folk +irish rebel music +iscathamiya +isicathamiya +isikhwela jo +island +isolationist +italo dance +italo disco +italo house +itsmeños +izvorna bosanska muzika +j'ouvert +j-fusion +j-pop +j-rock +jaipongan +jaliscienses +jam band +jam rock +jamana kura +jamrieng samai +jangle pop +japanese pop +jarana +jariang +jarochos +jawaiian +jazz +jazz blues +jazz fusion +jazz metal +jazz rap +jazz-funk +jazz-rock +jegog +jenkka +jesus music +jibaro +jig +jig punk +jing ping +jingle +jit +jitterbug +jive +joged +joged bumbung +joik +jonnycore +joropo +jota +jtek +jug band +jujitsu +juju +juke joint blues +jump blues +jumpstyle +jungle +junkanoo +juré +jùjú +k-pop +kaba +kabuki +kachÄshÄ« +kadans +kagok +kagyupa chanting +kaiso +kalamatianó +kalattuut +kalinda +kamba pop +kan ha diskan +kansas city blues +kantrum +kantádhes +kargyraa +karma +kaseko +katajjaq +kawachi ondo +kayÅkyoku +ke-kwe +kebyar +kecak +kecapi suling +kertok +khaleeji +khap +khelimaski djili +khene +khoomei +khorovodi +khplam wai +khrung sai +khyal +kilapanda +kinko +kirtan +kiwi rock +kizomba +klape +klasik +klezmer +kliningan +kléftiko +kochare +kolomyjka +komagaku +kompa +konpa +korean pop +koumpaneia +kpanlogo +krakowiak +krautrock +kriti +kroncong +krump +krzesany +kuduro +kulintang +kulning +kumina +kun-borrk +kundere +kundiman +kussundé +kutumba wake +kveding +kvæði +kwaito +kwassa kwassa +kwela +käng +kélé +kÄ©kÅ©yÅ© pop +la la +latin american +latin jazz +latin pop +latin rap +lavway +laïko +laïkó +le leagan +legényes +lelio +letkajenkka +levenslied +lhamo +lieder +light music +light rock +likanos +liquid drum&bass +liquid funk +liquindi +llanera +llanto +lo-fi +lo-fi music +loki djili +long-song +louisiana blues +louisiana swamp pop +lounge music +lovers rock +lowercase +lubbock sound +lucknavi thumri +luhya omutibo +luk grung +lullaby +lundu +lundum +m-base +madchester +madrigal +mafioso rap +maglaal +magnificat +mahori +mainstream jazz +makossa +makossa-soukous +malagueñas +malawian jazz +malhun +maloya +maluf +maluka +mambo +manaschi +mandarin pop +manding swing +mango +mangue bit +mangulina +manikay +manila sound +manouche +manzuma +mapouka +mapouka-serré +marabi +maracatu +marga +mariachi +marimba +marinera +marrabenta +martial industrial +martinetes +maskanda +mass +matamuerte +math rock +mathcore +matt bello +maxixe +mazurka +mbalax +mbaqanga +mbube +mbumba +medh +medieval folk rock +medieval metal +medieval music +meditation +mejorana +melhoun +melhûn +melodic black metal +melodic death metal +melodic hardcore +melodic metalcore +melodic music +melodic trance +memphis blues +memphis rap +memphis soul +mento +merengue +merengue típico moderno +merengue-bomba +meringue +merseybeat +metal +metalcore +metallic hardcore +mexican pop +mexican rock +mexican son +meykhana +mezwed +miami bass +microhouse +middle of the road +midwest hip hop +milonga +min'yo +mineras +mini compas +mini-jazz +minimal techno +minimalist music +minimalist trance +minneapolis sound +minstrel show +minuet +mirolóyia +modal jazz +modern classical +modern classical music +modern laika +modern rock +modinha +mohabelo +montuno +monumental dance +mor lam +mor lam sing +morna +motorpop +motown +mozambique +mpb +mugam +multicultural +murga +musette +museve +mushroom jazz +music drama +music hall +musiqi-e assil +musique concrète +mutuashi +muwashshah +muzak +méringue +música campesina +música criolla +música de la interior +música llanera +música nordestina +música popular brasileira +música tropical +nagauta +nakasi +nangma +nanguan +narcocorrido +nardcore +narodna muzika +nasheed +nashville sound +nashville sound/countrypolitan +national socialist black metal +naturalismo +nederpop +neo soul +neo-classical metal +neo-medieval +neo-prog +neo-psychedelia +neoclassical +neoclassical music +neofolk +neotraditional country +nerdcore +neue deutsche härte +neue deutsche welle +new age music +new beat +new instrumental +new jack swing +new orleans blues +new orleans jazz +new pop +new prog +new rave +new romantic +new school hip hop +new taiwanese song +new wave +new wave of british heavy metal +new wave of new wave +new weird america +new york blues +new york house +newgrass +nganja +nightcore +nintendocore +nisiótika +no wave +noh +noise music +noise pop +noise rock +nongak +norae undong +nordic folk dance music +nordic folk music +nortec +norteño +northern soul +nota +nu breaks +nu jazz +nu metal +nu soul +nueva canción +nyatiti +néo kýma +obscuro +oi! +old school hip hop +old-time +oldies +olonkho +oltului +ondo +opera +operatic pop +oratorio +orchestra +orchestral +organ trio +organic ambient +organum +orgel +oriental metal +ottava rima +outlaw country +outsider music +p-funk +pagan metal +pagan rock +pagode +paisley underground +palm wine +palm-wine +pambiche +panambih +panchai baja +panchavadyam +pansori +paranda +parang +parody +parranda +partido alto +pasillo +patriotic +peace punk +pelimanni music +petenera +peyote song +philadelphia soul +piano blues +piano rock +piedmont blues +pimba +pinoy pop +pinoy rock +pinpeat orchestra +piphat +piyyutim +plainchant +plena +pleng phua cheewit +pleng thai sakorn +political hip hop +polka +polo +polonaise +pols +polska +pong lang +pop +pop folk +pop music +pop punk +pop rap +pop rock +pop sunda +pornocore +porro +post disco +post-britpop +post-disco +post-grunge +post-hardcore +post-industrial +post-metal +post-minimalism +post-punk +post-rock +post-romanticism +pow-wow +power electronics +power metal +power noise +power pop +powerviolence +ppongtchak +praise song +program symphony +progressive bluegrass +progressive country +progressive death metal +progressive electronic +progressive electronic music +progressive folk +progressive folk music +progressive house +progressive metal +progressive rock +progressive trance +protopunk +psych folk +psychedelic music +psychedelic pop +psychedelic rock +psychedelic trance +psychobilly +punk blues +punk cabaret +punk jazz +punk rock +punta +punta rock +qasidah +qasidah modern +qawwali +quadrille +quan ho +queercore +quiet storm +rada +raga +raga rock +ragga +ragga jungle +raggamuffin +ragtime +rai +rake-and-scrape +ramkbach +ramvong +ranchera +rap +rap metal +rap rock +rapcore +rara +rare groove +rasiya +rave +raw rock +raï +rebetiko +red dirt +reel +reggae +reggae 110 +reggae bultrón +reggae en español +reggae fusion +reggae highlife +reggaefusion +reggaeton +rekilaulu +relax music +religious +rembetiko +renaissance music +requiem +rhapsody +rhyming spiritual +rhythm & blues +rhythm and blues +ricercar +riot grrrl +rock +rock and roll +rock en español +rock opera +rockabilly +rocksteady +rococo +romantic flow +romantic period in music +rondeaux +ronggeng +roots reggae +roots rock +roots rock reggae +rumba +russian pop +rímur +sabar +sacred harp +sacred music +sadcore +saibara +sakara +salegy +salsa +salsa erotica +salsa romantica +saltarello +samba +samba-canção +samba-reggae +samba-rock +sambai +sanjo +sato kagura +sawt +saya +scat +schlager +schottisch +schranz +scottish baroque music +screamo +scrumpy and western +sea shanty +sean nós +second viennese school +sega music +seggae +seis +semba +sephardic music +serialism +set dance +sevdalinka +sevillana +shabab +shabad +shalako +shan'ge +shango +shape note +shibuya-kei +shidaiqu +shima uta +shock rock +shoegaze +shoegazer +shoka +shomyo +show tune +sica +siguiriyas +silat +sinawi +situational +ska +ska punk +skacore +skald +skate punk +skiffle +slack-key guitar +slide +slowcore +sludge metal +slängpolska +smooth jazz +soca +soft rock +son +son montuno +son-batá +sonata +songo +songo-salsa +sophisti-pop +soukous +soul +soul blues +soul jazz +soul music +southern gospel +southern harmony +southern hip hop +southern metal +southern rock +southern soul +space age pop +space music +space rock +spectralism +speed garage +speed metal +speedcore +spirituals +spouge +sprechgesang +square dance +squee +st. louis blues +stand-up +steelband +stoner metal +stoner rock +straight edge +strathspeys +stride +string +string quartet +sufi music +suite +sunshine pop +suomirock +super eurobeat +surf ballad +surf instrumental +surf music +surf pop +surf rock +swamp blues +swamp pop +swamp rock +swing +swing music +swingbeat +sygyt +symphonic +symphonic black metal +symphonic metal +symphonic poem +symphonic rock +symphony +synthpop +synthpunk +t'ong guitar +taarab +tai tu +taiwanese pop +tala +talempong +tambu +tamburitza +tamil christian keerthanai +tango +tanguk +tappa +tarana +tarantella +taranto +tech +tech house +tech trance +technical death metal +technical metal +techno +technoid +technopop +techstep +techtonik +teen pop +tejano +tejano music +tekno +tembang sunda +texas blues +thai pop +thillana +thrash metal +thrashcore +thumri +tibetan pop +tiento +timbila +tin pan alley +tinga +tinku +toeshey +togaku +trad jazz +traditional bluegrass +traditional pop music +trallalero +trance +tribal house +trikitixa +trip hop +trip rock +trip-hop +tropicalia +tropicalismo +tropipop +truck-driving country +tumba +turbo-folk +turkish music +turkish pop +turntablism +tuvan throat-singing +twee pop +twist +two tone +táncház +uk garage +uk pub rock +unblack metal +underground music +uplifting +uplifting trance +urban cowboy +urban folk +urban jazz +vallenato +vaudeville +venezuela +verbunkos +verismo +viking metal +villanella +virelai +vispop +visual kei +visual music +vocal +vocal house +vocal jazz +vocal music +volksmusik +waila +waltz +wangga +warabe uta +wassoulou +weld +were music +west coast hip hop +west coast jazz +western +western blues +western swing +witch house +wizard rock +women's music +wong shadow +wonky pop +wood +work song +world fusion +world fusion music +world music +worldbeat +xhosa music +xoomii +yo-pop +yodeling +yukar +yé-yé +zajal +zapin +zarzuela +zeibekiko +zeuhl +ziglibithy +zouglou +zouk +zouk chouv +zouklove +zulu music +zydeco diff --git a/libs/beetsplug/lastimport.py b/libs/beetsplug/lastimport.py new file mode 100644 index 00000000..2d8cc700 --- /dev/null +++ b/libs/beetsplug/lastimport.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Rafael Bodill http://github.com/rafi +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import pylast +from pylast import TopItem, _extract, _number +from beets import ui +from beets import dbcore +from beets import config +from beets import plugins +from beets.dbcore import types + +API_URL = 'http://ws.audioscrobbler.com/2.0/' + + +class LastImportPlugin(plugins.BeetsPlugin): + def __init__(self): + super(LastImportPlugin, self).__init__() + config['lastfm'].add({ + 'user': '', + 'api_key': plugins.LASTFM_KEY, + }) + config['lastfm']['api_key'].redact = True + self.config.add({ + 'per_page': 500, + 'retry_limit': 3, + }) + self.item_types = { + 'play_count': types.INTEGER, + } + + def commands(self): + cmd = ui.Subcommand('lastimport', help=u'import last.fm play-count') + + def func(lib, opts, args): + import_lastfm(lib, self._log) + + cmd.func = func + return [cmd] + + +class CustomUser(pylast.User): + """ Custom user class derived from pylast.User, and overriding the + _get_things method to return MBID and album. Also introduces new + get_top_tracks_by_page method to allow access to more than one page of top + tracks. + """ + def __init__(self, *args, **kwargs): + super(CustomUser, self).__init__(*args, **kwargs) + + def _get_things(self, method, thing, thing_type, params=None, + cacheable=True): + """Returns a list of the most played thing_types by this thing, in a + tuple with the total number of pages of results. Includes an MBID, if + found. + """ + doc = self._request( + self.ws_prefix + "." + method, cacheable, params) + + toptracks_node = doc.getElementsByTagName('toptracks')[0] + total_pages = int(toptracks_node.getAttribute('totalPages')) + + seq = [] + for node in doc.getElementsByTagName(thing): + title = _extract(node, "name") + artist = _extract(node, "name", 1) + mbid = _extract(node, "mbid") + playcount = _number(_extract(node, "playcount")) + + thing = thing_type(artist, title, self.network) + thing.mbid = mbid + seq.append(TopItem(thing, playcount)) + + return seq, total_pages + + def get_top_tracks_by_page(self, period=pylast.PERIOD_OVERALL, limit=None, + page=1, cacheable=True): + """Returns the top tracks played by a user, in a tuple with the total + number of pages of results. + * period: The period of time. Possible values: + o PERIOD_OVERALL + o PERIOD_7DAYS + o PERIOD_1MONTH + o PERIOD_3MONTHS + o PERIOD_6MONTHS + o PERIOD_12MONTHS + """ + + params = self._get_params() + params['period'] = period + params['page'] = page + if limit: + params['limit'] = limit + + return self._get_things( + "getTopTracks", "track", pylast.Track, params, cacheable) + + +def import_lastfm(lib, log): + user = config['lastfm']['user'].get(unicode) + per_page = config['lastimport']['per_page'].get(int) + + if not user: + raise ui.UserError(u'You must specify a user name for lastimport') + + log.info(u'Fetching last.fm library for @{0}', user) + + page_total = 1 + page_current = 0 + found_total = 0 + unknown_total = 0 + retry_limit = config['lastimport']['retry_limit'].get(int) + # Iterate through a yet to be known page total count + while page_current < page_total: + log.info(u'Querying page #{0}{1}...', + page_current + 1, + '/{}'.format(page_total) if page_total > 1 else '') + + for retry in range(0, retry_limit): + tracks, page_total = fetch_tracks(user, page_current + 1, per_page) + if page_total < 1: + # It means nothing to us! + raise ui.UserError(u'Last.fm reported no data.') + + if tracks: + found, unknown = process_tracks(lib, tracks, log) + found_total += found + unknown_total += unknown + break + else: + log.error(u'ERROR: unable to read page #{0}', + page_current + 1) + if retry < retry_limit: + log.info( + u'Retrying page #{0}... ({1}/{2} retry)', + page_current + 1, retry + 1, retry_limit + ) + else: + log.error(u'FAIL: unable to fetch page #{0}, ', + u'tried {1} times', page_current, retry + 1) + page_current += 1 + + log.info(u'... done!') + log.info(u'finished processing {0} song pages', page_total) + log.info(u'{0} unknown play-counts', unknown_total) + log.info(u'{0} play-counts imported', found_total) + + +def fetch_tracks(user, page, limit): + """ JSON format: + [ + { + "mbid": "...", + "artist": "...", + "title": "...", + "playcount": "..." + } + ] + """ + network = pylast.LastFMNetwork(api_key=config['lastfm']['api_key']) + user_obj = CustomUser(user, network) + results, total_pages =\ + user_obj.get_top_tracks_by_page(limit=limit, page=page) + return [ + { + "mbid": track.item.mbid if track.item.mbid else '', + "artist": { + "name": track.item.artist.name + }, + "name": track.item.title, + "playcount": track.weight + } for track in results + ], total_pages + + +def process_tracks(lib, tracks, log): + total = len(tracks) + total_found = 0 + total_fails = 0 + log.info(u'Received {0} tracks in this page, processing...', total) + + for num in xrange(0, total): + song = None + trackid = tracks[num]['mbid'].strip() + artist = tracks[num]['artist'].get('name', '').strip() + title = tracks[num]['name'].strip() + album = '' + if 'album' in tracks[num]: + album = tracks[num]['album'].get('name', '').strip() + + log.debug(u'query: {0} - {1} ({2})', artist, title, album) + + # First try to query by musicbrainz's trackid + if trackid: + song = lib.items( + dbcore.query.MatchQuery('mb_trackid', trackid) + ).get() + + # If not, try just artist/title + if song is None: + log.debug(u'no album match, trying by artist/title') + query = dbcore.AndQuery([ + dbcore.query.SubstringQuery('artist', artist), + dbcore.query.SubstringQuery('title', title) + ]) + song = lib.items(query).get() + + # Last resort, try just replacing to utf-8 quote + if song is None: + title = title.replace("'", u'\u2019') + log.debug(u'no title match, trying utf-8 single quote') + query = dbcore.AndQuery([ + dbcore.query.SubstringQuery('artist', artist), + dbcore.query.SubstringQuery('title', title) + ]) + song = lib.items(query).get() + + if song is not None: + count = int(song.get('play_count', 0)) + new_count = int(tracks[num]['playcount']) + log.debug(u'match: {0} - {1} ({2}) ' + u'updating: play_count {3} => {4}', + song.artist, song.title, song.album, count, new_count) + song['play_count'] = new_count + song.store() + total_found += 1 + else: + total_fails += 1 + log.info(u' - No match: {0} - {1} ({2})', + artist, title, album) + + if total_fails > 0: + log.info(u'Acquired {0}/{1} play-counts ({2} unknown)', + total_found, total, total_fails) + + return total_found, total_fails diff --git a/libs/beetsplug/lyrics.py b/libs/beetsplug/lyrics.py new file mode 100644 index 00000000..b6936e1b --- /dev/null +++ b/libs/beetsplug/lyrics.py @@ -0,0 +1,760 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Fetches, embeds, and displays lyrics. +""" + +from __future__ import absolute_import, division, print_function + +import difflib +import itertools +import json +import re +import requests +import unicodedata +import urllib +import warnings +from HTMLParser import HTMLParseError + +try: + from bs4 import SoupStrainer, BeautifulSoup + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + +try: + import langdetect + HAS_LANGDETECT = True +except ImportError: + HAS_LANGDETECT = False + +from beets import plugins +from beets import ui + + +DIV_RE = re.compile(r'<(/?)div>?', re.I) +COMMENT_RE = re.compile(r'<!--.*-->', re.S) +TAG_RE = re.compile(r'<[^>]*>') +BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I) +URL_CHARACTERS = { + u'\u2018': u"'", + u'\u2019': u"'", + u'\u201c': u'"', + u'\u201d': u'"', + u'\u2010': u'-', + u'\u2011': u'-', + u'\u2012': u'-', + u'\u2013': u'-', + u'\u2014': u'-', + u'\u2015': u'-', + u'\u2016': u'-', + u'\u2026': u'...', +} + + +# Utilities. + + +def unescape(text): + """Resolve &#xxx; HTML entities (and some others).""" + if isinstance(text, bytes): + text = text.decode('utf8', 'ignore') + out = text.replace(u' ', u' ') + + def replchar(m): + num = m.group(1) + return unichr(int(num)) + out = re.sub(u"&#(\d+);", replchar, out) + return out + + +def extract_text_between(html, start_marker, end_marker): + try: + _, html = html.split(start_marker, 1) + html, _ = html.split(end_marker, 1) + except ValueError: + return u'' + return html + + +def extract_text_in(html, starttag): + """Extract the text from a <DIV> tag in the HTML starting with + ``starttag``. Returns None if parsing fails. + """ + + # Strip off the leading text before opening tag. + try: + _, html = html.split(starttag, 1) + except ValueError: + return + + # Walk through balanced DIV tags. + level = 0 + parts = [] + pos = 0 + for match in DIV_RE.finditer(html): + if match.group(1): # Closing tag. + level -= 1 + if level == 0: + pos = match.end() + else: # Opening tag. + if level == 0: + parts.append(html[pos:match.start()]) + level += 1 + + if level == -1: + parts.append(html[pos:match.start()]) + break + else: + print(u'no closing tag found!') + return + return u''.join(parts) + + +def search_pairs(item): + """Yield a pairs of artists and titles to search for. + + The first item in the pair is the name of the artist, the second + item is a list of song names. + + In addition to the artist and title obtained from the `item` the + method tries to strip extra information like paranthesized suffixes + and featured artists from the strings and add them as candidates. + The method also tries to split multiple titles separated with `/`. + """ + + title, artist = item.title, item.artist + titles = [title] + artists = [artist] + + # Remove any featuring artists from the artists name + pattern = r"(.*?) {0}".format(plugins.feat_tokens()) + match = re.search(pattern, artist, re.IGNORECASE) + if match: + artists.append(match.group(1)) + + # Remove a parenthesized suffix from a title string. Common + # examples include (live), (remix), and (acoustic). + pattern = r"(.+?)\s+[(].*[)]$" + match = re.search(pattern, title, re.IGNORECASE) + if match: + titles.append(match.group(1)) + + # Remove any featuring artists from the title + pattern = r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)) + for title in titles[:]: + match = re.search(pattern, title, re.IGNORECASE) + if match: + titles.append(match.group(1)) + + # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe) + # and each of them. + multi_titles = [] + for title in titles: + multi_titles.append([title]) + if '/' in title: + multi_titles.append([x.strip() for x in title.split('/')]) + + return itertools.product(artists, multi_titles) + + +class Backend(object): + def __init__(self, config, log): + self._log = log + + @staticmethod + def _encode(s): + """Encode the string for inclusion in a URL""" + if isinstance(s, unicode): + for char, repl in URL_CHARACTERS.items(): + s = s.replace(char, repl) + s = s.encode('utf8', 'ignore') + return urllib.quote(s) + + def build_url(self, artist, title): + return self.URL_PATTERN % (self._encode(artist.title()), + self._encode(title.title())) + + def fetch_url(self, url): + """Retrieve the content at a given URL, or return None if the source + is unreachable. + """ + try: + # Disable the InsecureRequestWarning that comes from using + # `verify=false`. + # https://github.com/kennethreitz/requests/issues/2214 + # We're not overly worried about the NSA MITMing our lyrics scraper + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + r = requests.get(url, verify=False) + except requests.RequestException as exc: + self._log.debug(u'lyrics request failed: {0}', exc) + return + if r.status_code == requests.codes.ok: + return r.text + else: + self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code) + + def fetch(self, artist, title): + raise NotImplementedError() + + +class SymbolsReplaced(Backend): + REPLACEMENTS = { + r'\s+': '_', + '<': 'Less_Than', + '>': 'Greater_Than', + '#': 'Number_', + r'[\[\{]': '(', + r'[\[\{]': ')' + } + + @classmethod + def _encode(cls, s): + for old, new in cls.REPLACEMENTS.iteritems(): + s = re.sub(old, new, s) + + return super(SymbolsReplaced, cls)._encode(s) + + +class MusiXmatch(SymbolsReplaced): + REPLACEMENTS = dict(SymbolsReplaced.REPLACEMENTS, **{ + r'\s+': '-' + }) + + URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s' + + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: + return + lyrics = extract_text_between(html, + '"body":', '"language":') + return lyrics.strip(',"').replace('\\n', '\n') + + +class Genius(Backend): + """Fetch lyrics from Genius via genius-api.""" + def __init__(self, config, log): + super(Genius, self).__init__(config, log) + self.api_key = config['genius_api_key'].get(unicode) + self.headers = {'Authorization': "Bearer %s" % self.api_key} + + def search_genius(self, artist, title): + query = u"%s %s" % (artist, title) + url = u'https://api.genius.com/search?q=%s' \ + % (urllib.quote(query.encode('utf8'))) + + self._log.debug(u'genius: requesting search {}', url) + try: + req = requests.get( + url, + headers=self.headers, + allow_redirects=True + ) + req.raise_for_status() + except requests.RequestException as exc: + self._log.debug(u'genius: request error: {}', exc) + return None + + try: + return req.json() + except ValueError: + self._log.debug(u'genius: invalid response: {}', req.text) + return None + + def get_lyrics(self, link): + url = u'http://genius-api.com/api/lyricsInfo' + + self._log.debug(u'genius: requesting lyrics for link {}', link) + try: + req = requests.post( + url, + data={'link': link}, + headers=self.headers, + allow_redirects=True + ) + req.raise_for_status() + except requests.RequestException as exc: + self._log.debug(u'genius: request error: {}', exc) + return None + + try: + return req.json() + except ValueError: + self._log.debug(u'genius: invalid response: {}', req.text) + return None + + def build_lyric_string(self, lyrics): + if 'lyrics' not in lyrics: + return + sections = lyrics['lyrics']['sections'] + + lyrics_list = [] + for section in sections: + lyrics_list.append(section['name']) + lyrics_list.append('\n') + for verse in section['verses']: + if 'content' in verse: + lyrics_list.append(verse['content']) + + return ''.join(lyrics_list) + + def fetch(self, artist, title): + search_data = self.search_genius(artist, title) + if not search_data: + return + + if not search_data['meta']['status'] == 200: + return + else: + records = search_data['response']['hits'] + if not records: + return + + record_url = records[0]['result']['url'] + lyric_data = self.get_lyrics(record_url) + if not lyric_data: + return + lyrics = self.build_lyric_string(lyric_data) + + return lyrics + + +class LyricsWiki(SymbolsReplaced): + """Fetch lyrics from LyricsWiki.""" + URL_PATTERN = 'http://lyrics.wikia.com/%s:%s' + + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: + return + + # Get the HTML fragment inside the appropriate HTML element and then + # extract the text from it. + html_frag = extract_text_in(html, u"<div class='lyricbox'>") + if html_frag: + lyrics = _scrape_strip_cruft(html_frag, True) + + if lyrics and 'Unfortunately, we are not licensed' not in lyrics: + return lyrics + + +class LyricsCom(Backend): + """Fetch lyrics from Lyrics.com.""" + URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html' + NOT_FOUND = ( + 'Sorry, we do not have the lyric', + 'Submit Lyrics', + ) + + @classmethod + def _encode(cls, s): + s = re.sub(r'[^\w\s-]', '', s) + s = re.sub(r'\s+', '-', s) + return super(LyricsCom, cls)._encode(s).lower() + + def fetch(self, artist, title): + url = self.build_url(artist, title) + html = self.fetch_url(url) + if not html: + return + lyrics = extract_text_between(html, '<div id="lyrics" class="SCREENO' + 'NLY" itemprop="description">', '</div>') + if not lyrics: + return + for not_found_str in self.NOT_FOUND: + if not_found_str in lyrics: + return + + parts = lyrics.split('\n---\nLyrics powered by', 1) + if parts: + return parts[0] + + +def remove_credits(text): + """Remove first/last line of text if it contains the word 'lyrics' + eg 'Lyrics by songsdatabase.com' + """ + textlines = text.split('\n') + credits = None + for i in (0, -1): + if textlines and 'lyrics' in textlines[i].lower(): + credits = textlines.pop(i) + if credits: + text = '\n'.join(textlines) + return text + + +def _scrape_strip_cruft(html, plain_text_out=False): + """Clean up HTML + """ + html = unescape(html) + + html = html.replace('\r', '\n') # Normalize EOL. + html = re.sub(r' +', ' ', html) # Whitespaces collapse. + html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'. + html = re.sub(r'<(script).*?</\1>(?s)', '', html) # Strip script tags. + + if plain_text_out: # Strip remaining HTML tags + html = COMMENT_RE.sub('', html) + html = TAG_RE.sub('', html) + + html = '\n'.join([x.strip() for x in html.strip().split('\n')]) + html = re.sub(r'\n{3,}', r'\n\n', html) + return html + + +def _scrape_merge_paragraphs(html): + html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html) + return re.sub(r'<div .*>\s*</div>', '\n', html) + + +def scrape_lyrics_from_html(html): + """Scrape lyrics from a URL. If no lyrics can be found, return None + instead. + """ + if not HAS_BEAUTIFUL_SOUP: + return None + + if not html: + return None + + def is_text_notcode(text): + length = len(text) + return (length > 20 and + text.count(' ') > length / 25 and + (text.find('{') == -1 or text.find(';') == -1)) + html = _scrape_strip_cruft(html) + html = _scrape_merge_paragraphs(html) + + # extract all long text blocks that are not code + try: + soup = BeautifulSoup(html, "html.parser", + parse_only=SoupStrainer(text=is_text_notcode)) + except HTMLParseError: + return None + + # Get the longest text element (if any). + strings = sorted(soup.stripped_strings, key=len, reverse=True) + if strings: + return strings[0] + else: + return None + + +class Google(Backend): + """Fetch lyrics from Google search results.""" + def __init__(self, config, log): + super(Google, self).__init__(config, log) + self.api_key = config['google_API_key'].get(unicode) + self.engine_id = config['google_engine_ID'].get(unicode) + + def is_lyrics(self, text, artist=None): + """Determine whether the text seems to be valid lyrics. + """ + if not text: + return False + bad_triggers_occ = [] + nb_lines = text.count('\n') + if nb_lines <= 1: + self._log.debug(u"Ignoring too short lyrics '{0}'", text) + return False + elif nb_lines < 5: + bad_triggers_occ.append('too_short') + else: + # Lyrics look legit, remove credits to avoid being penalized + # further down + text = remove_credits(text) + + bad_triggers = ['lyrics', 'copyright', 'property', 'links'] + if artist: + bad_triggers_occ += [artist] + + for item in bad_triggers: + bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item, + text, re.I)) + + if bad_triggers_occ: + self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ) + return len(bad_triggers_occ) < 2 + + def slugify(self, text): + """Normalize a string and remove non-alphanumeric characters. + """ + text = re.sub(r"[-'_\s]", '_', text) + text = re.sub(r"_+", '_', text).strip('_') + pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses + text = re.sub(pat, '\g<1>', text).strip() + try: + text = unicodedata.normalize('NFKD', text).encode('ascii', + 'ignore') + text = unicode(re.sub('[-\s]+', ' ', text)) + except UnicodeDecodeError: + self._log.exception(u"Failing to normalize '{0}'", text) + return text + + BY_TRANS = ['by', 'par', 'de', 'von'] + LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte'] + + def is_page_candidate(self, url_link, url_title, title, artist): + """Return True if the URL title makes it a good candidate to be a + page that contains lyrics of title by artist. + """ + title = self.slugify(title.lower()) + artist = self.slugify(artist.lower()) + sitename = re.search(u"//([^/]+)/.*", + self.slugify(url_link.lower())).group(1) + url_title = self.slugify(url_title.lower()) + + # Check if URL title contains song title (exact match) + if url_title.find(title) != -1: + return True + + # or try extracting song title from URL title and check if + # they are close enough + tokens = [by + '_' + artist for by in self.BY_TRANS] + \ + [artist, sitename, sitename.replace('www.', '')] + \ + self.LYRICS_TRANS + tokens = [re.escape(t) for t in tokens] + song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title) + + song_title = song_title.strip('_|') + typo_ratio = .9 + ratio = difflib.SequenceMatcher(None, song_title, title).ratio() + return ratio >= typo_ratio + + def fetch(self, artist, title): + query = u"%s %s" % (artist, title) + url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \ + % (self.api_key, self.engine_id, + urllib.quote(query.encode('utf8'))) + + data = urllib.urlopen(url) + data = json.load(data) + if 'error' in data: + reason = data['error']['errors'][0]['reason'] + self._log.debug(u'google lyrics backend error: {0}', reason) + return + + if 'items' in data.keys(): + for item in data['items']: + url_link = item['link'] + url_title = item.get('title', u'') + if not self.is_page_candidate(url_link, url_title, + title, artist): + continue + html = self.fetch_url(url_link) + lyrics = scrape_lyrics_from_html(html) + if not lyrics: + continue + + if self.is_lyrics(lyrics, artist): + self._log.debug(u'got lyrics from {0}', + item['displayLink']) + return lyrics + + +class LyricsPlugin(plugins.BeetsPlugin): + SOURCES = ['google', 'lyricwiki', 'lyrics.com', 'musixmatch'] + SOURCE_BACKENDS = { + 'google': Google, + 'lyricwiki': LyricsWiki, + 'lyrics.com': LyricsCom, + 'musixmatch': MusiXmatch, + 'genius': Genius, + } + + def __init__(self): + super(LyricsPlugin, self).__init__() + self.import_stages = [self.imported] + self.config.add({ + 'auto': True, + 'bing_client_secret': None, + 'bing_lang_from': [], + 'bing_lang_to': None, + 'google_API_key': None, + 'google_engine_ID': u'009217259823014548361:lndtuqkycfu', + 'genius_api_key': + "Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W" + "76V-uFL5jks5dNvcGCdarqFjDhP9c", + 'fallback': None, + 'force': False, + 'sources': self.SOURCES, + }) + self.config['bing_client_secret'].redact = True + self.config['google_API_key'].redact = True + self.config['google_engine_ID'].redact = True + self.config['genius_api_key'].redact = True + + available_sources = list(self.SOURCES) + sources = plugins.sanitize_choices( + self.config['sources'].as_str_seq(), available_sources) + + if 'google' in sources: + if not self.config['google_API_key'].get(): + self._log.warn(u'To use the google lyrics source, you must ' + u'provide an API key in the configuration. ' + u'See the documentation for further details.') + sources.remove('google') + if not HAS_BEAUTIFUL_SOUP: + self._log.warn(u'To use the google lyrics source, you must ' + u'install the beautifulsoup4 module. See the ' + u'documentation for further details.') + sources.remove('google') + + self.config['bing_lang_from'] = [ + x.lower() for x in self.config['bing_lang_from'].as_str_seq()] + self.bing_auth_token = None + + if not HAS_LANGDETECT and self.config['bing_client_secret'].get(): + self._log.warn(u'To use bing translations, you need to ' + u'install the langdetect module. See the ' + u'documentation for further details.') + + self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log) + for source in sources] + + def get_bing_access_token(self): + params = { + 'client_id': 'beets', + 'client_secret': self.config['bing_client_secret'], + 'scope': 'http://api.microsofttranslator.com', + 'grant_type': 'client_credentials', + } + + oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13' + oauth_token = json.loads(requests.post( + oauth_url, + data=urllib.urlencode(params)).content) + if 'access_token' in oauth_token: + return "Bearer " + oauth_token['access_token'] + else: + self._log.warning(u'Could not get Bing Translate API access token.' + u' Check your "bing_client_secret" password') + + def commands(self): + cmd = ui.Subcommand('lyrics', help='fetch song lyrics') + cmd.parser.add_option( + u'-p', u'--print', dest='printlyr', + action='store_true', default=False, + help=u'print lyrics to console', + ) + cmd.parser.add_option( + u'-f', u'--force', dest='force_refetch', + action='store_true', default=False, + help=u'always re-download lyrics', + ) + + def func(lib, opts, args): + # The "write to files" option corresponds to the + # import_write config value. + write = ui.should_write() + for item in lib.items(ui.decargs(args)): + self.fetch_item_lyrics( + lib, item, write, + opts.force_refetch or self.config['force'], + ) + if opts.printlyr and item.lyrics: + ui.print_(item.lyrics) + + cmd.func = func + return [cmd] + + def imported(self, session, task): + """Import hook for fetching lyrics automatically. + """ + if self.config['auto']: + for item in task.imported_items(): + self.fetch_item_lyrics(session.lib, item, + False, self.config['force']) + + def fetch_item_lyrics(self, lib, item, write, force): + """Fetch and store lyrics for a single item. If ``write``, then the + lyrics will also be written to the file itself.""" + # Skip if the item already has lyrics. + if not force and item.lyrics: + self._log.info(u'lyrics already present: {0}', item) + return + + lyrics = None + for artist, titles in search_pairs(item): + lyrics = [self.get_lyrics(artist, title) for title in titles] + if any(lyrics): + break + + lyrics = u"\n\n---\n\n".join([l for l in lyrics if l]) + + if lyrics: + self._log.info(u'fetched lyrics: {0}', item) + if HAS_LANGDETECT and self.config['bing_client_secret'].get(): + lang_from = langdetect.detect(lyrics) + if self.config['bing_lang_to'].get() != lang_from and ( + not self.config['bing_lang_from'] or ( + lang_from in self.config[ + 'bing_lang_from'].as_str_seq())): + lyrics = self.append_translation( + lyrics, self.config['bing_lang_to']) + else: + self._log.info(u'lyrics not found: {0}', item) + fallback = self.config['fallback'].get() + if fallback: + lyrics = fallback + else: + return + item.lyrics = lyrics + if write: + item.try_write() + item.store() + + def get_lyrics(self, artist, title): + """Fetch lyrics, trying each source in turn. Return a string or + None if no lyrics were found. + """ + for backend in self.backends: + lyrics = backend.fetch(artist, title) + if lyrics: + self._log.debug(u'got lyrics from backend: {0}', + backend.__class__.__name__) + return _scrape_strip_cruft(lyrics, True) + + def append_translation(self, text, to_lang): + import xml.etree.ElementTree as ET + + if not self.bing_auth_token: + self.bing_auth_token = self.get_bing_access_token() + if self.bing_auth_token: + # Extract unique lines to limit API request size per song + text_lines = set(text.split('\n')) + url = ('http://api.microsofttranslator.com/v2/Http.svc/' + 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang)) + r = requests.get(url, + headers={"Authorization ": self.bing_auth_token}) + if r.status_code != 200: + self._log.debug('translation API error {}: {}', r.status_code, + r.text) + if 'token has expired' in r.text: + self.bing_auth_token = None + return self.append_translation(text, to_lang) + return text + lines_translated = ET.fromstring(r.text.encode('utf8')).text + # Use a translation mapping dict to build resulting lyrics + translations = dict(zip(text_lines, lines_translated.split('|'))) + result = '' + for line in text.split('\n'): + result += '%s / %s\n' % (line, translations[line]) + return result diff --git a/libs/beetsplug/mbcollection.py b/libs/beetsplug/mbcollection.py new file mode 100644 index 00000000..b95ba6fe --- /dev/null +++ b/libs/beetsplug/mbcollection.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2011, Jeffrey Aylesworth <jeffrey@aylesworth.ca> +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand +from beets import ui +from beets import config +import musicbrainzngs + +import re + +SUBMISSION_CHUNK_SIZE = 200 +UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$' + + +def mb_call(func, *args, **kwargs): + """Call a MusicBrainz API function and catch exceptions. + """ + try: + return func(*args, **kwargs) + except musicbrainzngs.AuthenticationError: + raise ui.UserError(u'authentication with MusicBrainz failed') + except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc: + raise ui.UserError(u'MusicBrainz API error: {0}'.format(exc)) + except musicbrainzngs.UsageError: + raise ui.UserError(u'MusicBrainz credentials missing') + + +def submit_albums(collection_id, release_ids): + """Add all of the release IDs to the indicated collection. Multiple + requests are made if there are many release IDs to submit. + """ + for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE): + chunk = release_ids[i:i + SUBMISSION_CHUNK_SIZE] + mb_call( + musicbrainzngs.add_releases_to_collection, + collection_id, chunk + ) + + +class MusicBrainzCollectionPlugin(BeetsPlugin): + def __init__(self): + super(MusicBrainzCollectionPlugin, self).__init__() + config['musicbrainz']['pass'].redact = True + musicbrainzngs.auth( + config['musicbrainz']['user'].get(unicode), + config['musicbrainz']['pass'].get(unicode), + ) + self.config.add({'auto': False}) + if self.config['auto']: + self.import_stages = [self.imported] + + def commands(self): + mbupdate = Subcommand('mbupdate', + help=u'Update MusicBrainz collection') + mbupdate.func = self.update_collection + return [mbupdate] + + def update_collection(self, lib, opts, args): + self.update_album_list(lib.albums()) + + def imported(self, session, task): + """Add each imported album to the collection. + """ + if task.is_album: + self.update_album_list([task.album]) + + def update_album_list(self, album_list): + """Update the MusicBrainz colleciton from a list of Beets albums + """ + # Get the available collections. + collections = mb_call(musicbrainzngs.get_collections) + if not collections['collection-list']: + raise ui.UserError(u'no collections exist for user') + + # Get the first release collection. MusicBrainz also has event + # collections, so we need to avoid adding to those. + for collection in collections['collection-list']: + if 'release-count' in collection: + collection_id = collection['id'] + break + else: + raise ui.UserError(u'No collection found.') + + # Get a list of all the album IDs. + album_ids = [] + for album in album_list: + aid = album.mb_albumid + if aid: + if re.match(UUID_REGEX, aid): + album_ids.append(aid) + else: + self._log.info(u'skipping invalid MBID: {0}', aid) + + # Submit to MusicBrainz. + self._log.info( + u'Updating MusicBrainz collection {0}...', collection_id + ) + submit_albums(collection_id, album_ids) + self._log.info(u'...MusicBrainz collection updated.') diff --git a/libs/beetsplug/mbsubmit.py b/libs/beetsplug/mbsubmit.py new file mode 100644 index 00000000..91de6128 --- /dev/null +++ b/libs/beetsplug/mbsubmit.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson and Diego Moreda. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Aid in submitting information to MusicBrainz. + +This plugin allows the user to print track information in a format that is +parseable by the MusicBrainz track parser [1]. Programmatic submitting is not +implemented by MusicBrainz yet. + +[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings +""" + +from __future__ import division, absolute_import, print_function + + +from beets.autotag import Recommendation +from beets.plugins import BeetsPlugin +from beets.ui.commands import PromptChoice +from beetsplug.info import print_data + + +class MBSubmitPlugin(BeetsPlugin): + def __init__(self): + super(MBSubmitPlugin, self).__init__() + + self.config.add({ + 'format': '$track. $title - $artist ($length)', + 'threshold': 'medium', + }) + + # Validate and store threshold. + self.threshold = self.config['threshold'].as_choice({ + 'none': Recommendation.none, + 'low': Recommendation.low, + 'medium': Recommendation.medium, + 'strong': Recommendation.strong + }) + + self.register_listener('before_choose_candidate', + self.before_choose_candidate_event) + + def before_choose_candidate_event(self, session, task): + if task.rec <= self.threshold: + return [PromptChoice(u'p', u'Print tracks', self.print_tracks)] + + def print_tracks(self, session, task): + for i in task.items: + print_data(None, i, self.config['format'].get()) diff --git a/libs/beetsplug/mbsync.py b/libs/beetsplug/mbsync.py new file mode 100644 index 00000000..cf58c82d --- /dev/null +++ b/libs/beetsplug/mbsync.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Jakob Schnitzer. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Update library's tags using MusicBrainz. +""" +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import autotag, library, ui, util +from beets.autotag import hooks +from collections import defaultdict + + +def apply_item_changes(lib, item, move, pretend, write): + """Store, move and write the item according to the arguments. + """ + if not pretend: + # Move the item if it's in the library. + if move and lib.directory in util.ancestry(item.path): + item.move(with_album=False) + + if write: + item.try_write() + item.store() + + +class MBSyncPlugin(BeetsPlugin): + def __init__(self): + super(MBSyncPlugin, self).__init__() + + def commands(self): + cmd = ui.Subcommand('mbsync', + help=u'update metadata from musicbrainz') + cmd.parser.add_option( + u'-p', u'--pretend', action='store_true', + help=u'show all changes but do nothing') + cmd.parser.add_option( + u'-m', u'--move', action='store_true', dest='move', + help=u"move files in the library directory") + cmd.parser.add_option( + u'-M', u'--nomove', action='store_false', dest='move', + help=u"don't move files in library") + cmd.parser.add_option( + u'-W', u'--nowrite', action='store_false', + default=None, dest='write', + help=u"don't write updated metadata to files") + cmd.parser.add_format_option() + cmd.func = self.func + return [cmd] + + def func(self, lib, opts, args): + """Command handler for the mbsync function. + """ + move = ui.should_move(opts.move) + pretend = opts.pretend + write = ui.should_write(opts.write) + query = ui.decargs(args) + + self.singletons(lib, query, move, pretend, write) + self.albums(lib, query, move, pretend, write) + + def singletons(self, lib, query, move, pretend, write): + """Retrieve and apply info from the autotagger for items matched by + query. + """ + for item in lib.items(query + [u'singleton:true']): + item_formatted = format(item) + if not item.mb_trackid: + self._log.info(u'Skipping singleton with no mb_trackid: {0}', + item_formatted) + continue + + # Get the MusicBrainz recording info. + track_info = hooks.track_for_mbid(item.mb_trackid) + if not track_info: + self._log.info(u'Recording ID not found: {0} for track {0}', + item.mb_trackid, + item_formatted) + continue + + # Apply. + with lib.transaction(): + autotag.apply_item_metadata(item, track_info) + apply_item_changes(lib, item, move, pretend, write) + + def albums(self, lib, query, move, pretend, write): + """Retrieve and apply info from the autotagger for albums matched by + query and their items. + """ + # Process matching albums. + for a in lib.albums(query): + album_formatted = format(a) + if not a.mb_albumid: + self._log.info(u'Skipping album with no mb_albumid: {0}', + album_formatted) + continue + + items = list(a.items()) + + # Get the MusicBrainz album information. + album_info = hooks.album_for_mbid(a.mb_albumid) + if not album_info: + self._log.info(u'Release ID {0} not found for album {1}', + a.mb_albumid, + album_formatted) + continue + + # Map recording MBIDs to their information. Recordings can appear + # multiple times on a release, so each MBID maps to a list of + # TrackInfo objects. + track_index = defaultdict(list) + for track_info in album_info.tracks: + track_index[track_info.track_id].append(track_info) + + # Construct a track mapping according to MBIDs. This should work + # for albums that have missing or extra tracks. If there are + # multiple copies of a recording, they are disambiguated using + # their disc and track number. + mapping = {} + for item in items: + candidates = track_index[item.mb_trackid] + if len(candidates) == 1: + mapping[item] = candidates[0] + else: + for c in candidates: + if (c.medium_index == item.track and + c.medium == item.disc): + mapping[item] = c + break + + # Apply. + self._log.debug(u'applying changes to {}', album_formatted) + with lib.transaction(): + autotag.apply_metadata(album_info, mapping) + changed = False + for item in items: + item_changed = ui.show_model_changes(item) + changed |= item_changed + if item_changed: + apply_item_changes(lib, item, move, pretend, write) + + if not changed: + # No change to any item. + continue + + if not pretend: + # Update album structure to reflect an item in it. + for key in library.Album.item_keys: + a[key] = items[0][key] + a.store() + + # Move album art (and any inconsistent items). + if move and lib.directory in util.ancestry(items[0].path): + self._log.debug(u'moving album {0}', album_formatted) + a.move() diff --git a/libs/beetsplug/metasync/__init__.py b/libs/beetsplug/metasync/__init__.py new file mode 100644 index 00000000..3fc0be4c --- /dev/null +++ b/libs/beetsplug/metasync/__init__.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Heinz Wiesinger. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from music player libraries +""" + +from __future__ import division, absolute_import, print_function + +from abc import abstractmethod, ABCMeta +from importlib import import_module + +from beets.util.confit import ConfigValueError +from beets import ui +from beets.plugins import BeetsPlugin + + +METASYNC_MODULE = 'beetsplug.metasync' + +# Dictionary to map the MODULE and the CLASS NAME of meta sources +SOURCES = { + 'amarok': 'Amarok', + 'itunes': 'Itunes', +} + + +class MetaSource(object): + __metaclass__ = ABCMeta + + def __init__(self, config, log): + self.item_types = {} + self.config = config + self._log = log + + @abstractmethod + def sync_from_source(self, item): + pass + + +def load_meta_sources(): + """ Returns a dictionary of all the MetaSources + E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true + """ + meta_sources = {} + + for module_path, class_name in SOURCES.items(): + module = import_module(METASYNC_MODULE + '.' + module_path) + meta_sources[class_name.lower()] = getattr(module, class_name) + + return meta_sources + + +META_SOURCES = load_meta_sources() + + +def load_item_types(): + """ Returns a dictionary containing the item_types of all the MetaSources + """ + item_types = {} + for meta_source in META_SOURCES.values(): + item_types.update(meta_source.item_types) + return item_types + + +class MetaSyncPlugin(BeetsPlugin): + + item_types = load_item_types() + + def __init__(self): + super(MetaSyncPlugin, self).__init__() + + def commands(self): + cmd = ui.Subcommand('metasync', + help='update metadata from music player libraries') + cmd.parser.add_option('-p', '--pretend', action='store_true', + help='show all changes but do nothing') + cmd.parser.add_option('-s', '--source', default=[], + action='append', dest='sources', + help='comma-separated list of sources to sync') + cmd.parser.add_format_option() + cmd.func = self.func + return [cmd] + + def func(self, lib, opts, args): + """Command handler for the metasync function. + """ + pretend = opts.pretend + query = ui.decargs(args) + + sources = [] + for source in opts.sources: + sources.extend(source.split(',')) + + sources = sources or self.config['source'].as_str_seq() + + meta_source_instances = {} + items = lib.items(query) + + # Avoid needlessly instantiating meta sources (can be expensive) + if not items: + self._log.info(u'No items found matching query') + return + + # Instantiate the meta sources + for player in sources: + try: + cls = META_SOURCES[player] + except KeyError: + self._log.error(u'Unknown metadata source \'{0}\''.format( + player)) + + try: + meta_source_instances[player] = cls(self.config, self._log) + except (ImportError, ConfigValueError) as e: + self._log.error(u'Failed to instantiate metadata source ' + u'\'{0}\': {1}'.format(player, e)) + + # Avoid needlessly iterating over items + if not meta_source_instances: + self._log.error(u'No valid metadata sources found') + return + + # Sync the items with all of the meta sources + for item in items: + for meta_source in meta_source_instances.values(): + meta_source.sync_from_source(item) + + changed = ui.show_model_changes(item) + + if changed and not pretend: + item.store() diff --git a/libs/beetsplug/metasync/amarok.py b/libs/beetsplug/metasync/amarok.py new file mode 100644 index 00000000..aaa1ee91 --- /dev/null +++ b/libs/beetsplug/metasync/amarok.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Heinz Wiesinger. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from amarok's library via dbus +""" + +from __future__ import division, absolute_import, print_function + +from os.path import basename +from datetime import datetime +from time import mktime +from xml.sax.saxutils import escape + +from beets.util import displayable_path +from beets.dbcore import types +from beets.library import DateType +from beetsplug.metasync import MetaSource + + +def import_dbus(): + try: + return __import__('dbus') + except ImportError: + return None + +dbus = import_dbus() + + +class Amarok(MetaSource): + + item_types = { + 'amarok_rating': types.INTEGER, + 'amarok_score': types.FLOAT, + 'amarok_uid': types.STRING, + 'amarok_playcount': types.INTEGER, + 'amarok_firstplayed': DateType(), + 'amarok_lastplayed': DateType(), + } + + queryXML = u'<query version="1.0"> \ + <filters> \ + <and><include field="filename" value="%s" /></and> \ + </filters> \ + </query>' + + def __init__(self, config, log): + super(Amarok, self).__init__(config, log) + + if not dbus: + raise ImportError('failed to import dbus') + + self.collection = \ + dbus.SessionBus().get_object('org.kde.amarok', '/Collection') + + def sync_from_source(self, item): + path = displayable_path(item.path) + + # amarok unfortunately doesn't allow searching for the full path, only + # for the patch relative to the mount point. But the full path is part + # of the result set. So query for the filename and then try to match + # the correct item from the results we get back + results = self.collection.Query(self.queryXML % escape(basename(path))) + for result in results: + if result['xesam:url'] != path: + continue + + item.amarok_rating = result['xesam:userRating'] + item.amarok_score = result['xesam:autoRating'] + item.amarok_playcount = result['xesam:useCount'] + item.amarok_uid = \ + result['xesam:id'].replace('amarok-sqltrackuid://', '') + + if result['xesam:firstUsed'][0][0] != 0: + # These dates are stored as timestamps in amarok's db, but + # exposed over dbus as fixed integers in the current timezone. + first_played = datetime( + result['xesam:firstUsed'][0][0], + result['xesam:firstUsed'][0][1], + result['xesam:firstUsed'][0][2], + result['xesam:firstUsed'][1][0], + result['xesam:firstUsed'][1][1], + result['xesam:firstUsed'][1][2] + ) + + if result['xesam:lastUsed'][0][0] != 0: + last_played = datetime( + result['xesam:lastUsed'][0][0], + result['xesam:lastUsed'][0][1], + result['xesam:lastUsed'][0][2], + result['xesam:lastUsed'][1][0], + result['xesam:lastUsed'][1][1], + result['xesam:lastUsed'][1][2] + ) + else: + last_played = first_played + + item.amarok_firstplayed = mktime(first_played.timetuple()) + item.amarok_lastplayed = mktime(last_played.timetuple()) diff --git a/libs/beetsplug/metasync/itunes.py b/libs/beetsplug/metasync/itunes.py new file mode 100644 index 00000000..a6274684 --- /dev/null +++ b/libs/beetsplug/metasync/itunes.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Tom Jaspers. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Synchronize information from iTunes's library +""" + +from __future__ import division, absolute_import, print_function + +from contextlib import contextmanager +import os +import shutil +import tempfile +import plistlib +import urllib +from urlparse import urlparse +from time import mktime + +from beets import util +from beets.dbcore import types +from beets.library import DateType +from beets.util.confit import ConfigValueError +from beetsplug.metasync import MetaSource + + +@contextmanager +def create_temporary_copy(path): + temp_dir = tempfile.mkdtemp() + temp_path = os.path.join(temp_dir, 'temp_itunes_lib') + shutil.copyfile(path, temp_path) + try: + yield temp_path + finally: + shutil.rmtree(temp_dir) + + +def _norm_itunes_path(path): + # Itunes prepends the location with 'file://' on posix systems, + # and with 'file://localhost/' on Windows systems. + # The actual path to the file is always saved as posix form + # E.g., 'file://Users/Music/bar' or 'file://localhost/G:/Music/bar' + + # The entire path will also be capitalized (e.g., '/Music/Alt-J') + # Note that this means the path will always have a leading separator, + # which is unwanted in the case of Windows systems. + # E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar' + + return util.bytestring_path(os.path.normpath( + urllib.unquote(urlparse(path).path)).lstrip('\\')).lower() + + +class Itunes(MetaSource): + + item_types = { + 'itunes_rating': types.INTEGER, # 0..100 scale + 'itunes_playcount': types.INTEGER, + 'itunes_skipcount': types.INTEGER, + 'itunes_lastplayed': DateType(), + 'itunes_lastskipped': DateType(), + } + + def __init__(self, config, log): + super(Itunes, self).__init__(config, log) + + config.add({'itunes': { + 'library': '~/Music/iTunes/iTunes Library.xml' + }}) + + # Load the iTunes library, which has to be the .xml one (not the .itl) + library_path = config['itunes']['library'].as_filename() + + try: + self._log.debug( + u'loading iTunes library from {0}'.format(library_path)) + with create_temporary_copy(library_path) as library_copy: + raw_library = plistlib.readPlist(library_copy) + except IOError as e: + raise ConfigValueError(u'invalid iTunes library: ' + e.strerror) + except Exception: + # It's likely the user configured their '.itl' library (<> xml) + if os.path.splitext(library_path)[1].lower() != '.xml': + hint = u': please ensure that the configured path' \ + u' points to the .XML library' + else: + hint = '' + raise ConfigValueError(u'invalid iTunes library' + hint) + + # Make the iTunes library queryable using the path + self.collection = {_norm_itunes_path(track['Location']): track + for track in raw_library['Tracks'].values() + if 'Location' in track} + + def sync_from_source(self, item): + result = self.collection.get(util.bytestring_path(item.path).lower()) + + if not result: + self._log.warning(u'no iTunes match found for {0}'.format(item)) + return + + item.itunes_rating = result.get('Rating') + item.itunes_playcount = result.get('Play Count') + item.itunes_skipcount = result.get('Skip Count') + + if result.get('Play Date UTC'): + item.itunes_lastplayed = mktime( + result.get('Play Date UTC').timetuple()) + + if result.get('Skip Date'): + item.itunes_lastskipped = mktime( + result.get('Skip Date').timetuple()) diff --git a/libs/beetsplug/missing.py b/libs/beetsplug/missing.py new file mode 100644 index 00000000..8fff659f --- /dev/null +++ b/libs/beetsplug/missing.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Pedro Silva. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""List missing tracks. +""" +from __future__ import division, absolute_import, print_function + +from beets.autotag import hooks +from beets.library import Item +from beets.plugins import BeetsPlugin +from beets.ui import decargs, print_, Subcommand +from beets import config + + +def _missing_count(album): + """Return number of missing items in `album`. + """ + return (album.albumtotal or 0) - len(album.items()) + + +def _item(track_info, album_info, album_id): + """Build and return `item` from `track_info` and `album info` + objects. `item` is missing what fields cannot be obtained from + MusicBrainz alone (encoder, rg_track_gain, rg_track_peak, + rg_album_gain, rg_album_peak, original_year, original_month, + original_day, length, bitrate, format, samplerate, bitdepth, + channels, mtime.) + """ + t = track_info + a = album_info + + return Item(**{ + 'album_id': album_id, + 'album': a.album, + 'albumartist': a.artist, + 'albumartist_credit': a.artist_credit, + 'albumartist_sort': a.artist_sort, + 'albumdisambig': a.albumdisambig, + 'albumstatus': a.albumstatus, + 'albumtype': a.albumtype, + 'artist': t.artist, + 'artist_credit': t.artist_credit, + 'artist_sort': t.artist_sort, + 'asin': a.asin, + 'catalognum': a.catalognum, + 'comp': a.va, + 'country': a.country, + 'day': a.day, + 'disc': t.medium, + 'disctitle': t.disctitle, + 'disctotal': a.mediums, + 'label': a.label, + 'language': a.language, + 'length': t.length, + 'mb_albumid': a.album_id, + 'mb_artistid': t.artist_id, + 'mb_releasegroupid': a.releasegroup_id, + 'mb_trackid': t.track_id, + 'media': t.media, + 'month': a.month, + 'script': a.script, + 'title': t.title, + 'track': t.index, + 'tracktotal': len(a.tracks), + 'year': a.year, + }) + + +class MissingPlugin(BeetsPlugin): + """List missing tracks + """ + def __init__(self): + super(MissingPlugin, self).__init__() + + self.config.add({ + 'count': False, + 'total': False, + }) + + self.album_template_fields['missing'] = _missing_count + + self._command = Subcommand('missing', + help=__doc__, + aliases=['miss']) + self._command.parser.add_option( + u'-c', u'--count', dest='count', action='store_true', + help=u'count missing tracks per album') + self._command.parser.add_option( + u'-t', u'--total', dest='total', action='store_true', + help=u'count total of missing tracks') + self._command.parser.add_format_option() + + def commands(self): + def _miss(lib, opts, args): + self.config.set_args(opts) + count = self.config['count'].get() + total = self.config['total'].get() + fmt = config['format_album' if count else 'format_item'].get() + + albums = lib.albums(decargs(args)) + if total: + print(sum([_missing_count(a) for a in albums])) + return + + # Default format string for count mode. + if count: + fmt += ': $missing' + + for album in albums: + if count: + if _missing_count(album): + print_(format(album, fmt)) + + else: + for item in self._missing(album): + print_(format(item, fmt)) + + self._command.func = _miss + return [self._command] + + def _missing(self, album): + """Query MusicBrainz to determine items missing from `album`. + """ + item_mbids = map(lambda x: x.mb_trackid, album.items()) + if len([i for i in album.items()]) < album.albumtotal: + # fetch missing items + # TODO: Implement caching that without breaking other stuff + album_info = hooks.album_for_mbid(album.mb_albumid) + for track_info in getattr(album_info, 'tracks', []): + if track_info.track_id not in item_mbids: + item = _item(track_info, album_info, album.id) + self._log.debug(u'track {0} in album {1}', + track_info.track_id, album_info.album_id) + yield item diff --git a/libs/beetsplug/mpdstats.py b/libs/beetsplug/mpdstats.py new file mode 100644 index 00000000..2b642294 --- /dev/null +++ b/libs/beetsplug/mpdstats.py @@ -0,0 +1,368 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Peter Schnebel and Johann Klähn. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import mpd +import socket +import select +import time +import os + +from beets import ui +from beets import config +from beets import plugins +from beets import library +from beets.util import displayable_path +from beets.dbcore import types + +# If we lose the connection, how many times do we want to retry and how +# much time should we wait between retries? +RETRIES = 10 +RETRY_INTERVAL = 5 + + +mpd_config = config['mpd'] + + +def is_url(path): + """Try to determine if the path is an URL. + """ + return path.split('://', 1)[0] in ['http', 'https'] + + +# Use the MPDClient internals to get unicode. +# see http://www.tarmack.eu/code/mpdunicode.py for the general idea +class MPDClient(mpd.MPDClient): + def _write_command(self, command, args=[]): + args = [unicode(arg).encode('utf-8') for arg in args] + super(MPDClient, self)._write_command(command, args) + + def _read_line(self): + line = super(MPDClient, self)._read_line() + if line is not None: + return line.decode('utf-8') + return None + + +class MPDClientWrapper(object): + def __init__(self, log): + self._log = log + + self.music_directory = ( + mpd_config['music_directory'].get(unicode)) + + self.client = MPDClient() + + def connect(self): + """Connect to the MPD. + """ + host = mpd_config['host'].get(unicode) + port = mpd_config['port'].get(int) + + if host[0] in ['/', '~']: + host = os.path.expanduser(host) + + self._log.info(u'connecting to {0}:{1}', host, port) + try: + self.client.connect(host, port) + except socket.error as e: + raise ui.UserError(u'could not connect to MPD: {0}'.format(e)) + + password = mpd_config['password'].get(unicode) + if password: + try: + self.client.password(password) + except mpd.CommandError as e: + raise ui.UserError( + u'could not authenticate to MPD: {0}'.format(e) + ) + + def disconnect(self): + """Disconnect from the MPD. + """ + self.client.close() + self.client.disconnect() + + def get(self, command, retries=RETRIES): + """Wrapper for requests to the MPD server. Tries to re-connect if the + connection was lost (f.ex. during MPD's library refresh). + """ + try: + return getattr(self.client, command)() + except (select.error, mpd.ConnectionError) as err: + self._log.error(u'{0}', err) + + if retries <= 0: + # if we exited without breaking, we couldn't reconnect in time :( + raise ui.UserError(u'communication with MPD server failed') + + time.sleep(RETRY_INTERVAL) + + try: + self.disconnect() + except mpd.ConnectionError: + pass + + self.connect() + return self.get(command, retries=retries - 1) + + def playlist(self): + """Return the currently active playlist. Prefixes paths with the + music_directory, to get the absolute path. + """ + result = {} + for entry in self.get('playlistinfo'): + if not is_url(entry['file']): + result[entry['id']] = os.path.join( + self.music_directory, entry['file']) + else: + result[entry['id']] = entry['file'] + return result + + def status(self): + """Return the current status of the MPD. + """ + return self.get('status') + + def events(self): + """Return list of events. This may block a long time while waiting for + an answer from MPD. + """ + return self.get('idle') + + +class MPDStats(object): + def __init__(self, lib, log): + self.lib = lib + self._log = log + + self.do_rating = mpd_config['rating'].get(bool) + self.rating_mix = mpd_config['rating_mix'].get(float) + self.time_threshold = 10.0 # TODO: maybe add config option? + + self.now_playing = None + self.mpd = MPDClientWrapper(log) + + def rating(self, play_count, skip_count, rating, skipped): + """Calculate a new rating for a song based on play count, skip count, + old rating and the fact if it was skipped or not. + """ + if skipped: + rolling = (rating - rating / 2.0) + else: + rolling = (rating + (1.0 - rating) / 2.0) + stable = (play_count + 1.0) / (play_count + skip_count + 2.0) + return (self.rating_mix * stable + + (1.0 - self.rating_mix) * rolling) + + def get_item(self, path): + """Return the beets item related to path. + """ + query = library.PathQuery('path', path) + item = self.lib.items(query).get() + if item: + return item + else: + self._log.info(u'item not found: {0}', displayable_path(path)) + + def update_item(self, item, attribute, value=None, increment=None): + """Update the beets item. Set attribute to value or increment the value + of attribute. If the increment argument is used the value is cast to + the corresponding type. + """ + if item is None: + return + + if increment is not None: + item.load() + value = type(increment)(item.get(attribute, 0)) + increment + + if value is not None: + item[attribute] = value + item.store() + + self._log.debug(u'updated: {0} = {1} [{2}]', + attribute, + item[attribute], + displayable_path(item.path)) + + def update_rating(self, item, skipped): + """Update the rating for a beets item. The `item` can either be a + beets `Item` or None. If the item is None, nothing changes. + """ + if item is None: + return + + item.load() + rating = self.rating( + int(item.get('play_count', 0)), + int(item.get('skip_count', 0)), + float(item.get('rating', 0.5)), + skipped) + + self.update_item(item, 'rating', rating) + + def handle_song_change(self, song): + """Determine if a song was skipped or not and update its attributes. + To this end the difference between the song's supposed end time + and the current time is calculated. If it's greater than a threshold, + the song is considered skipped. + + Returns whether the change was manual (skipped previous song or not) + """ + diff = abs(song['remaining'] - (time.time() - song['started'])) + + skipped = diff >= self.time_threshold + + if skipped: + self.handle_skipped(song) + else: + self.handle_played(song) + + if self.do_rating: + self.update_rating(song['beets_item'], skipped) + + return skipped + + def handle_played(self, song): + """Updates the play count of a song. + """ + self.update_item(song['beets_item'], 'play_count', increment=1) + self._log.info(u'played {0}', displayable_path(song['path'])) + + def handle_skipped(self, song): + """Updates the skip count of a song. + """ + self.update_item(song['beets_item'], 'skip_count', increment=1) + self._log.info(u'skipped {0}', displayable_path(song['path'])) + + def on_stop(self, status): + self._log.info(u'stop') + + if self.now_playing: + self.handle_song_change(self.now_playing) + + self.now_playing = None + + def on_pause(self, status): + self._log.info(u'pause') + self.now_playing = None + + def on_play(self, status): + playlist = self.mpd.playlist() + path = playlist.get(status['songid']) + + if not path: + return + + if is_url(path): + self._log.info(u'playing stream {0}', displayable_path(path)) + return + + played, duration = map(int, status['time'].split(':', 1)) + remaining = duration - played + + if self.now_playing and self.now_playing['path'] != path: + skipped = self.handle_song_change(self.now_playing) + # mpd responds twice on a natural new song start + going_to_happen_twice = not skipped + else: + going_to_happen_twice = False + + if not going_to_happen_twice: + self._log.info(u'playing {0}', displayable_path(path)) + + self.now_playing = { + 'started': time.time(), + 'remaining': remaining, + 'path': path, + 'beets_item': self.get_item(path), + } + + self.update_item(self.now_playing['beets_item'], + 'last_played', value=int(time.time())) + + def run(self): + self.mpd.connect() + events = ['player'] + + while True: + if 'player' in events: + status = self.mpd.status() + + handler = getattr(self, 'on_' + status['state'], None) + + if handler: + handler(status) + else: + self._log.debug(u'unhandled status "{0}"', status) + + events = self.mpd.events() + + +class MPDStatsPlugin(plugins.BeetsPlugin): + + item_types = { + 'play_count': types.INTEGER, + 'skip_count': types.INTEGER, + 'last_played': library.DateType(), + 'rating': types.FLOAT, + } + + def __init__(self): + super(MPDStatsPlugin, self).__init__() + mpd_config.add({ + 'music_directory': config['directory'].as_filename(), + 'rating': True, + 'rating_mix': 0.75, + 'host': u'localhost', + 'port': 6600, + 'password': u'', + }) + mpd_config['password'].redact = True + + def commands(self): + cmd = ui.Subcommand( + 'mpdstats', + help=u'run a MPD client to gather play statistics') + cmd.parser.add_option( + u'--host', dest='host', type='string', + help=u'set the hostname of the server to connect to') + cmd.parser.add_option( + u'--port', dest='port', type='int', + help=u'set the port of the MPD server to connect to') + cmd.parser.add_option( + u'--password', dest='password', type='string', + help=u'set the password of the MPD server to connect to') + + def func(lib, opts, args): + mpd_config.set_args(opts) + + # Overrides for MPD settings. + if opts.host: + mpd_config['host'] = opts.host.decode('utf8') + if opts.port: + mpd_config['host'] = int(opts.port) + if opts.password: + mpd_config['password'] = opts.password.decode('utf8') + + try: + MPDStats(lib, self._log).run() + except KeyboardInterrupt: + pass + + cmd.func = func + return [cmd] diff --git a/libs/beetsplug/mpdupdate.py b/libs/beetsplug/mpdupdate.py new file mode 100644 index 00000000..f828ba5d --- /dev/null +++ b/libs/beetsplug/mpdupdate.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Updates an MPD index whenever the library is changed. + +Put something like the following in your config.yaml to configure: + mpd: + host: localhost + port: 6600 + password: seekrit +""" +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +import os +import socket +from beets import config + + +# No need to introduce a dependency on an MPD library for such a +# simple use case. Here's a simple socket abstraction to make things +# easier. +class BufferedSocket(object): + """Socket abstraction that allows reading by line.""" + def __init__(self, host, port, sep='\n'): + if host[0] in ['/', '~']: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.connect(os.path.expanduser(host)) + else: + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((host, port)) + self.buf = '' + self.sep = sep + + def readline(self): + while self.sep not in self.buf: + data = self.sock.recv(1024) + if not data: + break + self.buf += data + if '\n' in self.buf: + res, self.buf = self.buf.split(self.sep, 1) + return res + self.sep + else: + return '' + + def send(self, data): + self.sock.send(data) + + def close(self): + self.sock.close() + + +class MPDUpdatePlugin(BeetsPlugin): + def __init__(self): + super(MPDUpdatePlugin, self).__init__() + config['mpd'].add({ + 'host': u'localhost', + 'port': 6600, + 'password': u'', + }) + config['mpd']['password'].redact = True + + # For backwards compatibility, use any values from the + # plugin-specific "mpdupdate" section. + for key in config['mpd'].keys(): + if self.config[key].exists(): + config['mpd'][key] = self.config[key].get() + + self.register_listener('database_change', self.db_change) + + def db_change(self, lib, model): + self.register_listener('cli_exit', self.update) + + def update(self, lib): + self.update_mpd( + config['mpd']['host'].get(unicode), + config['mpd']['port'].get(int), + config['mpd']['password'].get(unicode), + ) + + def update_mpd(self, host='localhost', port=6600, password=None): + """Sends the "update" command to the MPD server indicated, + possibly authenticating with a password first. + """ + self._log.info('Updating MPD database...') + + try: + s = BufferedSocket(host, port) + except socket.error as e: + self._log.warning(u'MPD connection failed: {0}', + unicode(e.strerror)) + return + + resp = s.readline() + if 'OK MPD' not in resp: + self._log.warning(u'MPD connection failed: {0!r}', resp) + return + + if password: + s.send('password "%s"\n' % password) + resp = s.readline() + if 'OK' not in resp: + self._log.warning(u'Authentication failed: {0!r}', resp) + s.send('close\n') + s.close() + return + + s.send('update\n') + resp = s.readline() + if 'updating_db' not in resp: + self._log.warning(u'Update failed: {0!r}', resp) + + s.send('close\n') + s.close() + self._log.info(u'Database updated.') diff --git a/libs/beetsplug/permissions.py b/libs/beetsplug/permissions.py new file mode 100644 index 00000000..0de8978c --- /dev/null +++ b/libs/beetsplug/permissions.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + +"""Fixes file permissions after the file gets written on import. Put something +like the following in your config.yaml to configure: + + permissions: + file: 644 + dir: 755 +""" +import os +from beets import config, util +from beets.plugins import BeetsPlugin +from beets.util import ancestry + + +def convert_perm(perm): + """If the perm is a int it will first convert it to a string and back + to an oct int. Else it just converts it to oct. + """ + if isinstance(perm, int): + return int(bytes(perm), 8) + else: + return int(perm, 8) + + +def check_permissions(path, permission): + """Checks the permissions of a path. + """ + return oct(os.stat(path).st_mode & 0o777) == oct(permission) + + +def dirs_in_library(library, item): + """Creates a list of ancestor directories in the beets library path. + """ + return [ancestor + for ancestor in ancestry(item) + if ancestor.startswith(library)][1:] + + +class Permissions(BeetsPlugin): + def __init__(self): + super(Permissions, self).__init__() + + # Adding defaults. + self.config.add({ + u'file': 644, + u'dir': 755 + }) + + self.register_listener('item_imported', permissions) + self.register_listener('album_imported', permissions) + + +def permissions(lib, item=None, album=None): + """Running the permission fixer. + """ + # Getting the config. + file_perm = config['permissions']['file'].get() + dir_perm = config['permissions']['dir'].get() + + # Converts permissions to oct. + file_perm = convert_perm(file_perm) + dir_perm = convert_perm(dir_perm) + + # Create chmod_queue. + file_chmod_queue = [] + if item: + file_chmod_queue.append(item.path) + elif album: + for album_item in album.items(): + file_chmod_queue.append(album_item.path) + + # A set of directories to change permissions for. + dir_chmod_queue = set() + + for path in file_chmod_queue: + # Changing permissions on the destination file. + os.chmod(util.bytestring_path(path), file_perm) + + # Checks if the destination path has the permissions configured. + if not check_permissions(util.bytestring_path(path), file_perm): + message = u'There was a problem setting permission on {}'.format( + path) + print(message) + + # Adding directories to the directory chmod queue. + dir_chmod_queue.update( + dirs_in_library(lib.directory, + path)) + + # Change permissions for the directories. + for path in dir_chmod_queue: + # Chaning permissions on the destination directory. + os.chmod(util.bytestring_path(path), dir_perm) + + # Checks if the destination path has the permissions configured. + if not check_permissions(util.bytestring_path(path), dir_perm): + message = u'There was a problem setting permission on {}'.format( + path) + print(message) diff --git a/libs/beetsplug/play.py b/libs/beetsplug/play.py new file mode 100644 index 00000000..fa70f2bc --- /dev/null +++ b/libs/beetsplug/play.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, David Hamp-Gonsalves +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Send the results of a query to the configured music player as a playlist. +""" +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand +from beets import config +from beets import ui +from beets import util +from os.path import relpath +from tempfile import NamedTemporaryFile + +# Indicate where arguments should be inserted into the command string. +# If this is missing, they're placed at the end. +ARGS_MARKER = '$args' + + +class PlayPlugin(BeetsPlugin): + + def __init__(self): + super(PlayPlugin, self).__init__() + + config['play'].add({ + 'command': None, + 'use_folders': False, + 'relative_to': None, + 'raw': False, + # Backwards compatibility. See #1803 and line 74 + 'warning_threshold': -2, + 'warning_treshold': 100, + }) + + def commands(self): + play_command = Subcommand( + 'play', + help=u'send music to a player as a playlist' + ) + play_command.parser.add_album_option() + play_command.parser.add_option( + u'-A', u'--args', + action='store', + help=u'add additional arguments to the command', + ) + play_command.func = self.play_music + return [play_command] + + def play_music(self, lib, opts, args): + """Execute query, create temporary playlist and execute player + command passing that playlist, at request insert optional arguments. + """ + command_str = config['play']['command'].get() + if not command_str: + command_str = util.open_anything() + use_folders = config['play']['use_folders'].get(bool) + relative_to = config['play']['relative_to'].get() + raw = config['play']['raw'].get(bool) + warning_threshold = config['play']['warning_threshold'].get(int) + # We use -2 as a default value for warning_threshold to detect if it is + # set or not. We can't use a falsey value because it would have an + # actual meaning in the configuration of this plugin, and we do not use + # -1 because some people might use it as a value to obtain no warning, + # which wouldn't be that bad of a practice. + if warning_threshold == -2: + # if warning_threshold has not been set by user, look for + # warning_treshold, to preserve backwards compatibility. See #1803. + # warning_treshold has the correct default value of 100. + warning_threshold = config['play']['warning_treshold'].get(int) + + if relative_to: + relative_to = util.normpath(relative_to) + + # Add optional arguments to the player command. + if opts.args: + if ARGS_MARKER in command_str: + command_str = command_str.replace(ARGS_MARKER, opts.args) + else: + command_str = u"{} {}".format(command_str, opts.args) + + # Perform search by album and add folders rather than tracks to + # playlist. + if opts.album: + selection = lib.albums(ui.decargs(args)) + paths = [] + + sort = lib.get_default_album_sort() + for album in selection: + if use_folders: + paths.append(album.item_dir()) + else: + paths.extend(item.path + for item in sort.sort(album.items())) + item_type = 'album' + + # Perform item query and add tracks to playlist. + else: + selection = lib.items(ui.decargs(args)) + paths = [item.path for item in selection] + if relative_to: + paths = [relpath(path, relative_to) for path in paths] + item_type = 'track' + + item_type += 's' if len(selection) > 1 else '' + + if not selection: + ui.print_(ui.colorize('text_warning', + u'No {0} to play.'.format(item_type))) + return + + # Warn user before playing any huge playlists. + if warning_threshold and len(selection) > warning_threshold: + ui.print_(ui.colorize( + 'text_warning', + u'You are about to queue {0} {1}.'.format( + len(selection), item_type))) + + if ui.input_options(('Continue', 'Abort')) == 'a': + return + + ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type)) + if raw: + open_args = paths + else: + open_args = [self._create_tmp_playlist(paths)] + + self._log.debug(u'executing command: {} {}', command_str, + b' '.join(open_args)) + try: + util.interactive_open(open_args, command_str) + except OSError as exc: + raise ui.UserError( + "Could not play the query: {0}".format(exc)) + + def _create_tmp_playlist(self, paths_list): + """Create a temporary .m3u file. Return the filename. + """ + m3u = NamedTemporaryFile('w', suffix='.m3u', delete=False) + for item in paths_list: + m3u.write(item + b'\n') + m3u.close() + return m3u.name diff --git a/libs/beetsplug/plexupdate.py b/libs/beetsplug/plexupdate.py new file mode 100644 index 00000000..ef50fde7 --- /dev/null +++ b/libs/beetsplug/plexupdate.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- + +"""Updates an Plex library whenever the beets library is changed. + +Plex Home users enter the Plex Token to enable updating. +Put something like the following in your config.yaml to configure: + plex: + host: localhost + port: 32400 + token: token +""" +from __future__ import division, absolute_import, print_function + +import requests +from urlparse import urljoin +from urllib import urlencode +import xml.etree.ElementTree as ET +from beets import config +from beets.plugins import BeetsPlugin + + +def get_music_section(host, port, token, library_name): + """Getting the section key for the music library in Plex. + """ + api_endpoint = append_token('library/sections', token) + url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint) + + # Sends request. + r = requests.get(url) + + # Parse xml tree and extract music section key. + tree = ET.fromstring(r.content) + for child in tree.findall('Directory'): + if child.get('title') == library_name: + return child.get('key') + + +def update_plex(host, port, token, library_name): + """Sends request to the Plex api to start a library refresh. + """ + # Getting section key and build url. + section_key = get_music_section(host, port, token, library_name) + api_endpoint = 'library/sections/{0}/refresh'.format(section_key) + api_endpoint = append_token(api_endpoint, token) + url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint) + + # Sends request and returns requests object. + r = requests.get(url) + return r + + +def append_token(url, token): + """Appends the Plex Home token to the api call if required. + """ + if token: + url += '?' + urlencode({'X-Plex-Token': token}) + return url + + +class PlexUpdate(BeetsPlugin): + def __init__(self): + super(PlexUpdate, self).__init__() + + # Adding defaults. + config['plex'].add({ + u'host': u'localhost', + u'port': 32400, + u'token': u'', + u'library_name': u'Music'}) + + self.register_listener('database_change', self.listen_for_db_change) + + def listen_for_db_change(self, lib, model): + """Listens for beets db change and register the update for the end""" + self.register_listener('cli_exit', self.update) + + def update(self, lib): + """When the client exists try to send refresh request to Plex server. + """ + self._log.info(u'Updating Plex library...') + + # Try to send update request. + try: + update_plex( + config['plex']['host'].get(), + config['plex']['port'].get(), + config['plex']['token'].get(), + config['plex']['library_name'].get()) + self._log.info(u'... started.') + + except requests.exceptions.RequestException: + self._log.warning(u'Update failed.') diff --git a/libs/beetsplug/random.py b/libs/beetsplug/random.py new file mode 100644 index 00000000..e1c6fea4 --- /dev/null +++ b/libs/beetsplug/random.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Philippe Mongeau. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Get a random song or album from the library. +""" +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand, decargs, print_ +import random +from operator import attrgetter +from itertools import groupby + + +def random_item(lib, opts, args): + query = decargs(args) + + if opts.album: + objs = list(lib.albums(query)) + else: + objs = list(lib.items(query)) + + if opts.equal_chance: + # Group the objects by artist so we can sample from them. + key = attrgetter('albumartist') + objs.sort(key=key) + objs_by_artists = {} + for artist, v in groupby(objs, key): + objs_by_artists[artist] = list(v) + + objs = [] + for _ in range(opts.number): + # Terminate early if we're out of objects to select. + if not objs_by_artists: + break + + # Choose an artist and an object for that artist, removing + # this choice from the pool. + artist = random.choice(objs_by_artists.keys()) + objs_from_artist = objs_by_artists[artist] + i = random.randint(0, len(objs_from_artist) - 1) + objs.append(objs_from_artist.pop(i)) + + # Remove the artist if we've used up all of its objects. + if not objs_from_artist: + del objs_by_artists[artist] + + else: + number = min(len(objs), opts.number) + objs = random.sample(objs, number) + + for item in objs: + print_(format(item)) + +random_cmd = Subcommand('random', + help=u'chose a random track or album') +random_cmd.parser.add_option( + u'-n', u'--number', action='store', type="int", + help=u'number of objects to choose', default=1) +random_cmd.parser.add_option( + u'-e', u'--equal-chance', action='store_true', + help=u'each artist has the same chance') +random_cmd.parser.add_all_common_options() +random_cmd.func = random_item + + +class Random(BeetsPlugin): + def commands(self): + return [random_cmd] diff --git a/libs/beetsplug/replaygain.py b/libs/beetsplug/replaygain.py new file mode 100644 index 00000000..7bb2aa39 --- /dev/null +++ b/libs/beetsplug/replaygain.py @@ -0,0 +1,953 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +import subprocess +import os +import collections +import itertools +import sys +import warnings +import re + +from beets import logging +from beets import ui +from beets.plugins import BeetsPlugin +from beets.util import syspath, command_output, displayable_path + + +# Utilities. + +class ReplayGainError(Exception): + """Raised when a local (to a track or an album) error occurs in one + of the backends. + """ + + +class FatalReplayGainError(Exception): + """Raised when a fatal error occurs in one of the backends. + """ + + +class FatalGstreamerPluginReplayGainError(FatalReplayGainError): + """Raised when a fatal error occurs in the GStreamerBackend when + loading the required plugins.""" + + +def call(args): + """Execute the command and return its output or raise a + ReplayGainError on failure. + """ + try: + return command_output(args) + except subprocess.CalledProcessError as e: + raise ReplayGainError( + u"{0} exited with status {1}".format(args[0], e.returncode) + ) + except UnicodeEncodeError: + # Due to a bug in Python 2's subprocess on Windows, Unicode + # filenames can fail to encode on that platform. See: + # http://code.google.com/p/beets/issues/detail?id=499 + raise ReplayGainError(u"argument encoding failed") + + +# Backend base and plumbing classes. + +Gain = collections.namedtuple("Gain", "gain peak") +AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains") + + +class Backend(object): + """An abstract class representing engine for calculating RG values. + """ + + def __init__(self, config, log): + """Initialize the backend with the configuration view for the + plugin. + """ + self._log = log + + def compute_track_gain(self, items): + raise NotImplementedError() + + def compute_album_gain(self, album): + # TODO: implement album gain in terms of track gain of the + # individual tracks which can be used for any backend. + raise NotImplementedError() + + +# bsg1770gain backend +class Bs1770gainBackend(Backend): + """bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and + its flavors EBU R128, ATSC A/85 and Replaygain 2.0. + """ + + def __init__(self, config, log): + super(Bs1770gainBackend, self).__init__(config, log) + config.add({ + 'chunk_at': 5000, + 'method': 'replaygain', + }) + self.chunk_at = config['chunk_at'].as_number() + self.method = b'--' + bytes(config['method'].get(unicode)) + + cmd = b'bs1770gain' + try: + call([cmd, self.method]) + self.command = cmd + except OSError: + raise FatalReplayGainError( + u'Is bs1770gain installed? Is your method in config correct?' + ) + if not self.command: + raise FatalReplayGainError( + u'no replaygain command found: install bs1770gain' + ) + + def compute_track_gain(self, items): + """Computes the track gain of the given tracks, returns a list + of TrackGain objects. + """ + + output = self.compute_gain(items, False) + return output + + def compute_album_gain(self, album): + """Computes the album gain of the given album, returns an + AlbumGain object. + """ + # TODO: What should be done when not all tracks in the album are + # supported? + + supported_items = album.items() + output = self.compute_gain(supported_items, True) + + if not output: + raise ReplayGainError(u'no output from bs1770gain') + return AlbumGain(output[-1], output[:-1]) + + def isplitter(self, items, chunk_at): + """Break an iterable into chunks of at most size `chunk_at`, + generating lists for each chunk. + """ + iterable = iter(items) + while True: + result = [] + for i in range(chunk_at): + try: + a = next(iterable) + except StopIteration: + break + else: + result.append(a) + if result: + yield result + else: + break + + def compute_gain(self, items, is_album): + """Computes the track or album gain of a list of items, returns + a list of TrackGain objects. + When computing album gain, the last TrackGain object returned is + the album gain + """ + + if len(items) == 0: + return [] + + albumgaintot = 0.0 + albumpeaktot = 0.0 + returnchunks = [] + + # In the case of very large sets of music, we break the tracks + # into smaller chunks and process them one at a time. This + # avoids running out of memory. + if len(items) > self.chunk_at: + i = 0 + for chunk in self.isplitter(items, self.chunk_at): + i += 1 + returnchunk = self.compute_chunk_gain(chunk, is_album) + albumgaintot += returnchunk[-1].gain + albumpeaktot += returnchunk[-1].peak + returnchunks = returnchunks + returnchunk[0:-1] + returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i)) + return returnchunks + else: + return self.compute_chunk_gain(items, is_album) + + def compute_chunk_gain(self, items, is_album): + """Compute ReplayGain values and return a list of results + dictionaries as given by `parse_tool_output`. + """ + # Construct shell command. + cmd = [self.command] + cmd = cmd + [self.method] + cmd = cmd + [b'-it'] + + # Workaround for Windows: the underlying tool fails on paths + # with the \\?\ prefix, so we don't use it here. This + # prevents the backend from working with long paths. + args = cmd + [syspath(i.path, prefix=False) for i in items] + + # Invoke the command. + self._log.debug( + u'executing {0}', u' '.join(map(displayable_path, args)) + ) + output = call(args) + + self._log.debug(u'analysis finished: {0}', output) + results = self.parse_tool_output(output, + len(items) + is_album) + self._log.debug(u'{0} items, {1} results', len(items), len(results)) + return results + + def parse_tool_output(self, text, num_lines): + """Given the output from bs1770gain, parse the text and + return a list of dictionaries + containing information about each analyzed file. + """ + out = [] + data = text.decode('utf8', errors='ignore') + regex = re.compile( + ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)' + '(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]' + ':|done\.\s)', re.DOTALL | re.UNICODE) + results = re.findall(regex, data) + for parts in results[0:num_lines]: + part = parts.split(b'\n') + if len(part) == 0: + self._log.debug(u'bad tool output: {0!r}', text) + raise ReplayGainError(u'bs1770gain failed') + + try: + song = { + 'file': part[0], + 'gain': float((part[1].split('/'))[1].split('LU')[0]), + 'peak': float(part[2].split('/')[1]), + } + except IndexError: + self._log.info(u'bs1770gain reports (faulty file?): {}', parts) + continue + + out.append(Gain(song['gain'], song['peak'])) + return out + + +# mpgain/aacgain CLI tool backend. +class CommandBackend(Backend): + + def __init__(self, config, log): + super(CommandBackend, self).__init__(config, log) + config.add({ + 'command': u"", + 'noclip': True, + }) + + self.command = config["command"].get(unicode) + + if self.command: + # Explicit executable path. + if not os.path.isfile(self.command): + raise FatalReplayGainError( + u'replaygain command does not exist: {0}'.format( + self.command) + ) + else: + # Check whether the program is in $PATH. + for cmd in (b'mp3gain', b'aacgain'): + try: + call([cmd, b'-v']) + self.command = cmd + except OSError: + pass + if not self.command: + raise FatalReplayGainError( + u'no replaygain command found: install mp3gain or aacgain' + ) + + self.noclip = config['noclip'].get(bool) + target_level = config['targetlevel'].as_number() + self.gain_offset = int(target_level - 89) + + def compute_track_gain(self, items): + """Computes the track gain of the given tracks, returns a list + of TrackGain objects. + """ + supported_items = filter(self.format_supported, items) + output = self.compute_gain(supported_items, False) + return output + + def compute_album_gain(self, album): + """Computes the album gain of the given album, returns an + AlbumGain object. + """ + # TODO: What should be done when not all tracks in the album are + # supported? + + supported_items = filter(self.format_supported, album.items()) + if len(supported_items) != len(album.items()): + self._log.debug(u'tracks are of unsupported format') + return AlbumGain(None, []) + + output = self.compute_gain(supported_items, True) + return AlbumGain(output[-1], output[:-1]) + + def format_supported(self, item): + """Checks whether the given item is supported by the selected tool. + """ + if 'mp3gain' in self.command and item.format != 'MP3': + return False + elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'): + return False + return True + + def compute_gain(self, items, is_album): + """Computes the track or album gain of a list of items, returns + a list of TrackGain objects. + + When computing album gain, the last TrackGain object returned is + the album gain + """ + if len(items) == 0: + self._log.debug(u'no supported tracks to analyze') + return [] + + """Compute ReplayGain values and return a list of results + dictionaries as given by `parse_tool_output`. + """ + # Construct shell command. The "-o" option makes the output + # easily parseable (tab-delimited). "-s s" forces gain + # recalculation even if tags are already present and disables + # tag-writing; this turns the mp3gain/aacgain tool into a gain + # calculator rather than a tag manipulator because we take care + # of changing tags ourselves. + cmd = [self.command, b'-o', b'-s', b's'] + if self.noclip: + # Adjust to avoid clipping. + cmd = cmd + [b'-k'] + else: + # Disable clipping warning. + cmd = cmd + [b'-c'] + cmd = cmd + [b'-d', bytes(self.gain_offset)] + cmd = cmd + [syspath(i.path) for i in items] + + self._log.debug(u'analyzing {0} files', len(items)) + self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd))) + output = call(cmd) + self._log.debug(u'analysis finished') + return self.parse_tool_output(output, + len(items) + (1 if is_album else 0)) + + def parse_tool_output(self, text, num_lines): + """Given the tab-delimited output from an invocation of mp3gain + or aacgain, parse the text and return a list of dictionaries + containing information about each analyzed file. + """ + out = [] + for line in text.split(b'\n')[1:num_lines + 1]: + parts = line.split(b'\t') + if len(parts) != 6 or parts[0] == b'File': + self._log.debug(u'bad tool output: {0}', text) + raise ReplayGainError(u'mp3gain failed') + d = { + 'file': parts[0], + 'mp3gain': int(parts[1]), + 'gain': float(parts[2]), + 'peak': float(parts[3]) / (1 << 15), + 'maxgain': int(parts[4]), + 'mingain': int(parts[5]), + + } + out.append(Gain(d['gain'], d['peak'])) + return out + + +# GStreamer-based backend. + +class GStreamerBackend(Backend): + + def __init__(self, config, log): + super(GStreamerBackend, self).__init__(config, log) + self._import_gst() + + # Initialized a GStreamer pipeline of the form filesrc -> + # decodebin -> audioconvert -> audioresample -> rganalysis -> + # fakesink The connection between decodebin and audioconvert is + # handled dynamically after decodebin figures out the type of + # the input file. + self._src = self.Gst.ElementFactory.make("filesrc", "src") + self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin") + self._conv = self.Gst.ElementFactory.make("audioconvert", "conv") + self._res = self.Gst.ElementFactory.make("audioresample", "res") + self._rg = self.Gst.ElementFactory.make("rganalysis", "rg") + + if self._src is None or self._decbin is None or self._conv is None \ + or self._res is None or self._rg is None: + raise FatalGstreamerPluginReplayGainError( + u"Failed to load required GStreamer plugins" + ) + + # We check which files need gain ourselves, so all files given + # to rganalsys should have their gain computed, even if it + # already exists. + self._rg.set_property("forced", True) + self._rg.set_property("reference-level", + config["targetlevel"].as_number()) + self._sink = self.Gst.ElementFactory.make("fakesink", "sink") + + self._pipe = self.Gst.Pipeline() + self._pipe.add(self._src) + self._pipe.add(self._decbin) + self._pipe.add(self._conv) + self._pipe.add(self._res) + self._pipe.add(self._rg) + self._pipe.add(self._sink) + + self._src.link(self._decbin) + self._conv.link(self._res) + self._res.link(self._rg) + self._rg.link(self._sink) + + self._bus = self._pipe.get_bus() + self._bus.add_signal_watch() + self._bus.connect("message::eos", self._on_eos) + self._bus.connect("message::error", self._on_error) + self._bus.connect("message::tag", self._on_tag) + # Needed for handling the dynamic connection between decodebin + # and audioconvert + self._decbin.connect("pad-added", self._on_pad_added) + self._decbin.connect("pad-removed", self._on_pad_removed) + + self._main_loop = self.GLib.MainLoop() + + self._files = [] + + def _import_gst(self): + """Import the necessary GObject-related modules and assign `Gst` + and `GObject` fields on this object. + """ + + try: + import gi + except ImportError: + raise FatalReplayGainError( + u"Failed to load GStreamer: python-gi not found" + ) + + try: + gi.require_version('Gst', '1.0') + except ValueError as e: + raise FatalReplayGainError( + u"Failed to load GStreamer 1.0: {0}".format(e) + ) + + from gi.repository import GObject, Gst, GLib + # Calling GObject.threads_init() is not needed for + # PyGObject 3.10.2+ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + GObject.threads_init() + Gst.init([sys.argv[0]]) + + self.GObject = GObject + self.GLib = GLib + self.Gst = Gst + + def compute(self, files, album): + self._error = None + self._files = list(files) + + if len(self._files) == 0: + return + + self._file_tags = collections.defaultdict(dict) + + if album: + self._rg.set_property("num-tracks", len(self._files)) + + if self._set_first_file(): + self._main_loop.run() + if self._error is not None: + raise self._error + + def compute_track_gain(self, items): + self.compute(items, False) + if len(self._file_tags) != len(items): + raise ReplayGainError(u"Some tracks did not receive tags") + + ret = [] + for item in items: + ret.append(Gain(self._file_tags[item]["TRACK_GAIN"], + self._file_tags[item]["TRACK_PEAK"])) + + return ret + + def compute_album_gain(self, album): + items = list(album.items()) + self.compute(items, True) + if len(self._file_tags) != len(items): + raise ReplayGainError(u"Some items in album did not receive tags") + + # Collect track gains. + track_gains = [] + for item in items: + try: + gain = self._file_tags[item]["TRACK_GAIN"] + peak = self._file_tags[item]["TRACK_PEAK"] + except KeyError: + raise ReplayGainError(u"results missing for track") + track_gains.append(Gain(gain, peak)) + + # Get album gain information from the last track. + last_tags = self._file_tags[items[-1]] + try: + gain = last_tags["ALBUM_GAIN"] + peak = last_tags["ALBUM_PEAK"] + except KeyError: + raise ReplayGainError(u"results missing for album") + + return AlbumGain(Gain(gain, peak), track_gains) + + def close(self): + self._bus.remove_signal_watch() + + def _on_eos(self, bus, message): + # A file finished playing in all elements of the pipeline. The + # RG tags have already been propagated. If we don't have a next + # file, we stop processing. + if not self._set_next_file(): + self._pipe.set_state(self.Gst.State.NULL) + self._main_loop.quit() + + def _on_error(self, bus, message): + self._pipe.set_state(self.Gst.State.NULL) + self._main_loop.quit() + err, debug = message.parse_error() + f = self._src.get_property("location") + # A GStreamer error, either an unsupported format or a bug. + self._error = ReplayGainError( + u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f) + ) + + def _on_tag(self, bus, message): + tags = message.parse_tag() + + def handle_tag(taglist, tag, userdata): + # The rganalysis element provides both the existing tags for + # files and the new computes tags. In order to ensure we + # store the computed tags, we overwrite the RG values of + # received a second time. + if tag == self.Gst.TAG_TRACK_GAIN: + self._file_tags[self._file]["TRACK_GAIN"] = \ + taglist.get_double(tag)[1] + elif tag == self.Gst.TAG_TRACK_PEAK: + self._file_tags[self._file]["TRACK_PEAK"] = \ + taglist.get_double(tag)[1] + elif tag == self.Gst.TAG_ALBUM_GAIN: + self._file_tags[self._file]["ALBUM_GAIN"] = \ + taglist.get_double(tag)[1] + elif tag == self.Gst.TAG_ALBUM_PEAK: + self._file_tags[self._file]["ALBUM_PEAK"] = \ + taglist.get_double(tag)[1] + elif tag == self.Gst.TAG_REFERENCE_LEVEL: + self._file_tags[self._file]["REFERENCE_LEVEL"] = \ + taglist.get_double(tag)[1] + + tags.foreach(handle_tag, None) + + def _set_first_file(self): + if len(self._files) == 0: + return False + + self._file = self._files.pop(0) + self._pipe.set_state(self.Gst.State.NULL) + self._src.set_property("location", syspath(self._file.path)) + self._pipe.set_state(self.Gst.State.PLAYING) + return True + + def _set_file(self): + """Initialize the filesrc element with the next file to be analyzed. + """ + # No more files, we're done + if len(self._files) == 0: + return False + + self._file = self._files.pop(0) + + # Disconnect the decodebin element from the pipeline, set its + # state to READY to to clear it. + self._decbin.unlink(self._conv) + self._decbin.set_state(self.Gst.State.READY) + + # Set a new file on the filesrc element, can only be done in the + # READY state + self._src.set_state(self.Gst.State.READY) + self._src.set_property("location", syspath(self._file.path)) + + # Ensure the filesrc element received the paused state of the + # pipeline in a blocking manner + self._src.sync_state_with_parent() + self._src.get_state(self.Gst.CLOCK_TIME_NONE) + + # Ensure the decodebin element receives the paused state of the + # pipeline in a blocking manner + self._decbin.sync_state_with_parent() + self._decbin.get_state(self.Gst.CLOCK_TIME_NONE) + + return True + + def _set_next_file(self): + """Set the next file to be analyzed while keeping the pipeline + in the PAUSED state so that the rganalysis element can correctly + handle album gain. + """ + # A blocking pause + self._pipe.set_state(self.Gst.State.PAUSED) + self._pipe.get_state(self.Gst.CLOCK_TIME_NONE) + + # Try setting the next file + ret = self._set_file() + if ret: + # Seek to the beginning in order to clear the EOS state of the + # various elements of the pipeline + self._pipe.seek_simple(self.Gst.Format.TIME, + self.Gst.SeekFlags.FLUSH, + 0) + self._pipe.set_state(self.Gst.State.PLAYING) + + return ret + + def _on_pad_added(self, decbin, pad): + sink_pad = self._conv.get_compatible_pad(pad, None) + assert(sink_pad is not None) + pad.link(sink_pad) + + def _on_pad_removed(self, decbin, pad): + # Called when the decodebin element is disconnected from the + # rest of the pipeline while switching input files + peer = pad.get_peer() + assert(peer is None) + + +class AudioToolsBackend(Backend): + """ReplayGain backend that uses `Python Audio Tools + <http://audiotools.sourceforge.net/>`_ and its capabilities to read more + file formats and compute ReplayGain values using it replaygain module. + """ + + def __init__(self, config, log): + super(AudioToolsBackend, self).__init__(config, log) + self._import_audiotools() + + def _import_audiotools(self): + """Check whether it's possible to import the necessary modules. + There is no check on the file formats at runtime. + + :raises :exc:`ReplayGainError`: if the modules cannot be imported + """ + try: + import audiotools + import audiotools.replaygain + except ImportError: + raise FatalReplayGainError( + u"Failed to load audiotools: audiotools not found" + ) + self._mod_audiotools = audiotools + self._mod_replaygain = audiotools.replaygain + + def open_audio_file(self, item): + """Open the file to read the PCM stream from the using + ``item.path``. + + :return: the audiofile instance + :rtype: :class:`audiotools.AudioFile` + :raises :exc:`ReplayGainError`: if the file is not found or the + file format is not supported + """ + try: + audiofile = self._mod_audiotools.open(item.path) + except IOError: + raise ReplayGainError( + u"File {} was not found".format(item.path) + ) + except self._mod_audiotools.UnsupportedFile: + raise ReplayGainError( + u"Unsupported file type {}".format(item.format) + ) + + return audiofile + + def init_replaygain(self, audiofile, item): + """Return an initialized :class:`audiotools.replaygain.ReplayGain` + instance, which requires the sample rate of the song(s) on which + the ReplayGain values will be computed. The item is passed in case + the sample rate is invalid to log the stored item sample rate. + + :return: initialized replagain object + :rtype: :class:`audiotools.replaygain.ReplayGain` + :raises: :exc:`ReplayGainError` if the sample rate is invalid + """ + try: + rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate()) + except ValueError: + raise ReplayGainError( + u"Unsupported sample rate {}".format(item.samplerate)) + return + return rg + + def compute_track_gain(self, items): + """Compute ReplayGain values for the requested items. + + :return list: list of :class:`Gain` objects + """ + return [self._compute_track_gain(item) for item in items] + + def _title_gain(self, rg, audiofile): + """Get the gain result pair from PyAudioTools using the `ReplayGain` + instance `rg` for the given `audiofile`. + + Wraps `rg.title_gain(audiofile.to_pcm())` and throws a + `ReplayGainError` when the library fails. + """ + try: + # The method needs an audiotools.PCMReader instance that can + # be obtained from an audiofile instance. + return rg.title_gain(audiofile.to_pcm()) + except ValueError as exc: + # `audiotools.replaygain` can raise a `ValueError` if the sample + # rate is incorrect. + self._log.debug(u'error in rg.title_gain() call: {}', exc) + raise ReplayGainError(u'audiotools audio data error') + + def _compute_track_gain(self, item): + """Compute ReplayGain value for the requested item. + + :rtype: :class:`Gain` + """ + audiofile = self.open_audio_file(item) + rg = self.init_replaygain(audiofile, item) + + # Each call to title_gain on a ReplayGain object returns peak and gain + # of the track. + rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile) + + self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}', + item.artist, item.title, rg_track_gain, rg_track_peak) + return Gain(gain=rg_track_gain, peak=rg_track_peak) + + def compute_album_gain(self, album): + """Compute ReplayGain values for the requested album and its items. + + :rtype: :class:`AlbumGain` + """ + self._log.debug(u'Analysing album {0}', album) + + # The first item is taken and opened to get the sample rate to + # initialize the replaygain object. The object is used for all the + # tracks in the album to get the album values. + item = list(album.items())[0] + audiofile = self.open_audio_file(item) + rg = self.init_replaygain(audiofile, item) + + track_gains = [] + for item in album.items(): + audiofile = self.open_audio_file(item) + rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile) + track_gains.append( + Gain(gain=rg_track_gain, peak=rg_track_peak) + ) + self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}', + item, rg_track_gain, rg_track_peak) + + # After getting the values for all tracks, it's possible to get the + # album values. + rg_album_gain, rg_album_peak = rg.album_gain() + self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}', + album, rg_album_gain, rg_album_peak) + + return AlbumGain( + Gain(gain=rg_album_gain, peak=rg_album_peak), + track_gains=track_gains + ) + + +# Main plugin logic. + +class ReplayGainPlugin(BeetsPlugin): + """Provides ReplayGain analysis. + """ + + backends = { + "command": CommandBackend, + "gstreamer": GStreamerBackend, + "audiotools": AudioToolsBackend, + "bs1770gain": Bs1770gainBackend + } + + def __init__(self): + super(ReplayGainPlugin, self).__init__() + + # default backend is 'command' for backward-compatibility. + self.config.add({ + 'overwrite': False, + 'auto': True, + 'backend': u'command', + 'targetlevel': 89, + }) + + self.overwrite = self.config['overwrite'].get(bool) + backend_name = self.config['backend'].get(unicode) + if backend_name not in self.backends: + raise ui.UserError( + u"Selected ReplayGain backend {0} is not supported. " + u"Please select one of: {1}".format( + backend_name, + u', '.join(self.backends.keys()) + ) + ) + + # On-import analysis. + if self.config['auto']: + self.import_stages = [self.imported] + + try: + self.backend_instance = self.backends[backend_name]( + self.config, self._log + ) + except (ReplayGainError, FatalReplayGainError) as e: + raise ui.UserError( + u'replaygain initialization failed: {0}'.format(e)) + + def track_requires_gain(self, item): + return self.overwrite or \ + (not item.rg_track_gain or not item.rg_track_peak) + + def album_requires_gain(self, album): + # Skip calculating gain only when *all* files don't need + # recalculation. This way, if any file among an album's tracks + # needs recalculation, we still get an accurate album gain + # value. + return self.overwrite or \ + any([not item.rg_album_gain or not item.rg_album_peak + for item in album.items()]) + + def store_track_gain(self, item, track_gain): + item.rg_track_gain = track_gain.gain + item.rg_track_peak = track_gain.peak + item.store() + + self._log.debug(u'applied track gain {0}, peak {1}', + item.rg_track_gain, item.rg_track_peak) + + def store_album_gain(self, album, album_gain): + album.rg_album_gain = album_gain.gain + album.rg_album_peak = album_gain.peak + album.store() + + self._log.debug(u'applied album gain {0}, peak {1}', + album.rg_album_gain, album.rg_album_peak) + + def handle_album(self, album, write): + """Compute album and track replay gain store it in all of the + album's items. + + If ``write`` is truthy then ``item.write()`` is called for each + item. If replay gain information is already present in all + items, nothing is done. + """ + if not self.album_requires_gain(album): + self._log.info(u'Skipping album {0}', album) + return + + self._log.info(u'analyzing {0}', album) + + try: + album_gain = self.backend_instance.compute_album_gain(album) + if len(album_gain.track_gains) != len(album.items()): + raise ReplayGainError( + u"ReplayGain backend failed " + u"for some tracks in album {0}".format(album) + ) + + self.store_album_gain(album, album_gain.album_gain) + for item, track_gain in itertools.izip(album.items(), + album_gain.track_gains): + self.store_track_gain(item, track_gain) + if write: + item.try_write() + except ReplayGainError as e: + self._log.info(u"ReplayGain error: {0}", e) + except FatalReplayGainError as e: + raise ui.UserError( + u"Fatal replay gain error: {0}".format(e)) + + def handle_track(self, item, write): + """Compute track replay gain and store it in the item. + + If ``write`` is truthy then ``item.write()`` is called to write + the data to disk. If replay gain information is already present + in the item, nothing is done. + """ + if not self.track_requires_gain(item): + self._log.info(u'Skipping track {0}', item) + return + + self._log.info(u'analyzing {0}', item) + + try: + track_gains = self.backend_instance.compute_track_gain([item]) + if len(track_gains) != 1: + raise ReplayGainError( + u"ReplayGain backend failed for track {0}".format(item) + ) + + self.store_track_gain(item, track_gains[0]) + if write: + item.try_write() + except ReplayGainError as e: + self._log.info(u"ReplayGain error: {0}", e) + except FatalReplayGainError as e: + raise ui.UserError( + u"Fatal replay gain error: {0}".format(e)) + + def imported(self, session, task): + """Add replay gain info to items or albums of ``task``. + """ + if task.is_album: + self.handle_album(task.album, False) + else: + self.handle_track(task.item, False) + + def commands(self): + """Return the "replaygain" ui subcommand. + """ + def func(lib, opts, args): + self._log.setLevel(logging.INFO) + + write = ui.should_write() + + if opts.album: + for album in lib.albums(ui.decargs(args)): + self.handle_album(album, write) + + else: + for item in lib.items(ui.decargs(args)): + self.handle_track(item, write) + + cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain') + cmd.parser.add_album_option() + cmd.func = func + return [cmd] diff --git a/libs/beetsplug/rewrite.py b/libs/beetsplug/rewrite.py new file mode 100644 index 00000000..b0104a11 --- /dev/null +++ b/libs/beetsplug/rewrite.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Uses user-specified rewriting rules to canonicalize names for path +formats. +""" +from __future__ import division, absolute_import, print_function + +import re +from collections import defaultdict + +from beets.plugins import BeetsPlugin +from beets import ui +from beets import library + + +def rewriter(field, rules): + """Create a template field function that rewrites the given field + with the given rewriting rules. ``rules`` must be a list of + (pattern, replacement) pairs. + """ + def fieldfunc(item): + value = item._values_fixed[field] + for pattern, replacement in rules: + if pattern.match(value.lower()): + # Rewrite activated. + return replacement + # Not activated; return original value. + return value + return fieldfunc + + +class RewritePlugin(BeetsPlugin): + def __init__(self): + super(RewritePlugin, self).__init__() + + self.config.add({}) + + # Gather all the rewrite rules for each field. + rules = defaultdict(list) + for key, view in self.config.items(): + value = view.get(unicode) + try: + fieldname, pattern = key.split(None, 1) + except ValueError: + raise ui.UserError(u"invalid rewrite specification") + if fieldname not in library.Item._fields: + raise ui.UserError(u"invalid field name (%s) in rewriter" % + fieldname) + self._log.debug(u'adding template field {0}', key) + pattern = re.compile(pattern.lower()) + rules[fieldname].append((pattern, value)) + if fieldname == 'artist': + # Special case for the artist field: apply the same + # rewrite for "albumartist" as well. + rules['albumartist'].append((pattern, value)) + + # Replace each template field with the new rewriter function. + for fieldname, fieldrules in rules.iteritems(): + getter = rewriter(fieldname, fieldrules) + self.template_fields[fieldname] = getter + if fieldname in library.Album._fields: + self.album_template_fields[fieldname] = getter diff --git a/libs/beetsplug/scrub.py b/libs/beetsplug/scrub.py new file mode 100644 index 00000000..ed4040d5 --- /dev/null +++ b/libs/beetsplug/scrub.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Cleans extraneous metadata from files' tags via a command or +automatically whenever tags are written. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui +from beets import util +from beets import config +from beets import mediafile + +_MUTAGEN_FORMATS = { + 'asf': 'ASF', + 'apev2': 'APEv2File', + 'flac': 'FLAC', + 'id3': 'ID3FileType', + 'mp3': 'MP3', + 'mp4': 'MP4', + 'oggflac': 'OggFLAC', + 'oggspeex': 'OggSpeex', + 'oggtheora': 'OggTheora', + 'oggvorbis': 'OggVorbis', + 'oggopus': 'OggOpus', + 'trueaudio': 'TrueAudio', + 'wavpack': 'WavPack', + 'monkeysaudio': 'MonkeysAudio', + 'optimfrog': 'OptimFROG', +} + + +class ScrubPlugin(BeetsPlugin): + """Removes extraneous metadata from files' tags.""" + def __init__(self): + super(ScrubPlugin, self).__init__() + self.config.add({ + 'auto': True, + }) + + if self.config['auto']: + self.register_listener("import_task_files", self.import_task_files) + + def commands(self): + def scrub_func(lib, opts, args): + # Walk through matching files and remove tags. + for item in lib.items(ui.decargs(args)): + self._log.info(u'scrubbing: {0}', + util.displayable_path(item.path)) + self._scrub_item(item, opts.write) + + scrub_cmd = ui.Subcommand('scrub', help=u'clean audio tags') + scrub_cmd.parser.add_option( + u'-W', u'--nowrite', dest='write', + action='store_false', default=True, + help=u'leave tags empty') + scrub_cmd.func = scrub_func + + return [scrub_cmd] + + @staticmethod + def _mutagen_classes(): + """Get a list of file type classes from the Mutagen module. + """ + classes = [] + for modname, clsname in _MUTAGEN_FORMATS.items(): + mod = __import__('mutagen.{0}'.format(modname), + fromlist=[clsname]) + classes.append(getattr(mod, clsname)) + return classes + + def _scrub(self, path): + """Remove all tags from a file. + """ + for cls in self._mutagen_classes(): + # Try opening the file with this type, but just skip in the + # event of any error. + try: + f = cls(util.syspath(path)) + except Exception: + continue + if f.tags is None: + continue + + # Remove the tag for this type. + try: + f.delete() + except NotImplementedError: + # Some Mutagen metadata subclasses (namely, ASFTag) do not + # support .delete(), presumably because it is impossible to + # remove them. In this case, we just remove all the tags. + for tag in f.keys(): + del f[tag] + f.save() + except IOError as exc: + self._log.error(u'could not scrub {0}: {1}', + util.displayable_path(path), exc) + + def _scrub_item(self, item, restore=True): + """Remove tags from an Item's associated file and, if `restore` + is enabled, write the database's tags back to the file. + """ + # Get album art if we need to restore it. + if restore: + try: + mf = mediafile.MediaFile(util.syspath(item.path), + config['id3v23'].get(bool)) + except IOError as exc: + self._log.error(u'could not open file to scrub: {0}', + exc) + art = mf.art + + # Remove all tags. + self._scrub(item.path) + + # Restore tags, if enabled. + if restore: + self._log.debug(u'writing new tags after scrub') + item.try_write() + if art: + self._log.debug(u'restoring art') + mf = mediafile.MediaFile(util.syspath(item.path), + config['id3v23'].get(bool)) + mf.art = art + mf.save() + + def import_task_files(self, session, task): + """Automatically scrub imported files.""" + for item in task.imported_items(): + self._log.debug(u'auto-scrubbing {0}', + util.displayable_path(item.path)) + self._scrub_item(item) diff --git a/libs/beetsplug/smartplaylist.py b/libs/beetsplug/smartplaylist.py new file mode 100644 index 00000000..f6d7f715 --- /dev/null +++ b/libs/beetsplug/smartplaylist.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Dang Mai <contact@dangmai.net>. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Generates smart playlists based on beets queries. +""" + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui +from beets.util import mkdirall, normpath, syspath +from beets.library import Item, Album, parse_query_string +from beets.dbcore import OrQuery +from beets.dbcore.query import MultipleSort, ParsingError +import os + + +class SmartPlaylistPlugin(BeetsPlugin): + + def __init__(self): + super(SmartPlaylistPlugin, self).__init__() + self.config.add({ + 'relative_to': None, + 'playlist_dir': u'.', + 'auto': True, + 'playlists': [] + }) + + self._matched_playlists = None + self._unmatched_playlists = None + + if self.config['auto']: + self.register_listener('database_change', self.db_change) + + def commands(self): + spl_update = ui.Subcommand( + 'splupdate', + help=u'update the smart playlists. Playlist names may be ' + u'passed as arguments.' + ) + spl_update.func = self.update_cmd + return [spl_update] + + def update_cmd(self, lib, opts, args): + self.build_queries() + if args: + args = set(ui.decargs(args)) + for a in list(args): + if not a.endswith(".m3u"): + args.add("{0}.m3u".format(a)) + + playlists = set((name, q, a_q) + for name, q, a_q in self._unmatched_playlists + if name in args) + if not playlists: + raise ui.UserError( + u'No playlist matching any of {0} found'.format( + [name for name, _, _ in self._unmatched_playlists]) + ) + + self._matched_playlists = playlists + self._unmatched_playlists -= playlists + else: + self._matched_playlists = self._unmatched_playlists + + self.update_playlists(lib) + + def build_queries(self): + """ + Instanciate queries for the playlists. + + Each playlist has 2 queries: one or items one for albums, each with a + sort. We must also remember its name. _unmatched_playlists is a set of + tuples (name, (q, q_sort), (album_q, album_q_sort)). + + sort may be any sort, or NullSort, or None. None and NullSort are + equivalent and both eval to False. + More precisely + - it will be NullSort when a playlist query ('query' or 'album_query') + is a single item or a list with 1 element + - it will be None when there are multiple items i a query + """ + self._unmatched_playlists = set() + self._matched_playlists = set() + + for playlist in self.config['playlists'].get(list): + if 'name' not in playlist: + self._log.warn(u"playlist configuration is missing name") + continue + + playlist_data = (playlist['name'],) + try: + for key, Model in (('query', Item), ('album_query', Album)): + qs = playlist.get(key) + if qs is None: + query_and_sort = None, None + elif isinstance(qs, basestring): + query_and_sort = parse_query_string(qs, Model) + elif len(qs) == 1: + query_and_sort = parse_query_string(qs[0], Model) + else: + # multiple queries and sorts + queries, sorts = zip(*(parse_query_string(q, Model) + for q in qs)) + query = OrQuery(queries) + final_sorts = [] + for s in sorts: + if s: + if isinstance(s, MultipleSort): + final_sorts += s.sorts + else: + final_sorts.append(s) + if not final_sorts: + sort = None + elif len(final_sorts) == 1: + sort, = final_sorts + else: + sort = MultipleSort(final_sorts) + query_and_sort = query, sort + + playlist_data += (query_and_sort,) + + except ParsingError as exc: + self._log.warn(u"invalid query in playlist {}: {}", + playlist['name'], exc) + continue + + self._unmatched_playlists.add(playlist_data) + + def matches(self, model, query, album_query): + if album_query and isinstance(model, Album): + return album_query.match(model) + if query and isinstance(model, Item): + return query.match(model) + return False + + def db_change(self, lib, model): + if self._unmatched_playlists is None: + self.build_queries() + + for playlist in self._unmatched_playlists: + n, (q, _), (a_q, _) = playlist + if self.matches(model, q, a_q): + self._log.debug( + u"{0} will be updated because of {1}", n, model) + self._matched_playlists.add(playlist) + self.register_listener('cli_exit', self.update_playlists) + + self._unmatched_playlists -= self._matched_playlists + + def update_playlists(self, lib): + self._log.info(u"Updating {0} smart playlists...", + len(self._matched_playlists)) + + playlist_dir = self.config['playlist_dir'].as_filename() + relative_to = self.config['relative_to'].get() + if relative_to: + relative_to = normpath(relative_to) + + for playlist in self._matched_playlists: + name, (query, q_sort), (album_query, a_q_sort) = playlist + self._log.debug(u"Creating playlist {0}", name) + items = [] + + if query: + items.extend(lib.items(query, q_sort)) + if album_query: + for album in lib.albums(album_query, a_q_sort): + items.extend(album.items()) + + m3us = {} + # As we allow tags in the m3u names, we'll need to iterate through + # the items and generate the correct m3u file names. + for item in items: + m3u_name = item.evaluate_template(name, True) + if m3u_name not in m3us: + m3us[m3u_name] = [] + item_path = item.path + if relative_to: + item_path = os.path.relpath(item.path, relative_to) + if item_path not in m3us[m3u_name]: + m3us[m3u_name].append(item_path) + # Now iterate through the m3us that we need to generate + for m3u in m3us: + m3u_path = normpath(os.path.join(playlist_dir, m3u)) + mkdirall(m3u_path) + with open(syspath(m3u_path), 'w') as f: + for path in m3us[m3u]: + f.write(path + b'\n') + self._log.info(u"{0} playlists updated", len(self._matched_playlists)) diff --git a/libs/beetsplug/spotify.py b/libs/beetsplug/spotify.py new file mode 100644 index 00000000..081a027f --- /dev/null +++ b/libs/beetsplug/spotify.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- + +from __future__ import division, absolute_import, print_function + +import re +import webbrowser +import requests +from beets.plugins import BeetsPlugin +from beets.ui import decargs +from beets import ui +from requests.exceptions import HTTPError + + +class SpotifyPlugin(BeetsPlugin): + + # URL for the Web API of Spotify + # Documentation here: https://developer.spotify.com/web-api/search-item/ + base_url = "https://api.spotify.com/v1/search" + open_url = "http://open.spotify.com/track/" + playlist_partial = "spotify:trackset:Playlist:" + + def __init__(self): + super(SpotifyPlugin, self).__init__() + self.config.add({ + 'mode': 'list', + 'tiebreak': 'popularity', + 'show_failures': False, + 'artist_field': 'albumartist', + 'album_field': 'album', + 'track_field': 'title', + 'region_filter': None, + 'regex': [] + }) + + def commands(self): + def queries(lib, opts, args): + success = self.parse_opts(opts) + if success: + results = self.query_spotify(lib, decargs(args)) + self.output_results(results) + spotify_cmd = ui.Subcommand( + 'spotify', + help=u'build a Spotify playlist' + ) + spotify_cmd.parser.add_option( + u'-m', u'--mode', action='store', + help=u'"open" to open Spotify with playlist, ' + u'"list" to print (default)' + ) + spotify_cmd.parser.add_option( + u'-f', u'--show-failures', + action='store_true', dest='show_failures', + help=u'list tracks that did not match a Spotify ID' + ) + spotify_cmd.func = queries + return [spotify_cmd] + + def parse_opts(self, opts): + if opts.mode: + self.config['mode'].set(opts.mode) + + if opts.show_failures: + self.config['show_failures'].set(True) + + if self.config['mode'].get() not in ['list', 'open']: + self._log.warn(u'{0} is not a valid mode', + self.config['mode'].get()) + return False + + self.opts = opts + return True + + def query_spotify(self, lib, query): + + results = [] + failures = [] + + items = lib.items(query) + + if not items: + self._log.debug(u'Your beets query returned no items, ' + u'skipping spotify') + return + + self._log.info(u'Processing {0} tracks...', len(items)) + + for item in items: + + # Apply regex transformations if provided + for regex in self.config['regex'].get(): + if ( + not regex['field'] or + not regex['search'] or + not regex['replace'] + ): + continue + + value = item[regex['field']] + item[regex['field']] = re.sub( + regex['search'], regex['replace'], value + ) + + # Custom values can be passed in the config (just in case) + artist = item[self.config['artist_field'].get()] + album = item[self.config['album_field'].get()] + query = item[self.config['track_field'].get()] + search_url = query + " album:" + album + " artist:" + artist + + # Query the Web API for each track, look for the items' JSON data + r = requests.get(self.base_url, params={ + "q": search_url, "type": "track" + }) + self._log.debug('{}', r.url) + try: + r.raise_for_status() + except HTTPError as e: + self._log.debug(u'URL returned a {0} error', + e.response.status_code) + failures.append(search_url) + continue + + r_data = r.json()['tracks']['items'] + + # Apply market filter if requested + region_filter = self.config['region_filter'].get() + if region_filter: + r_data = filter( + lambda x: region_filter in x['available_markets'], r_data + ) + + # Simplest, take the first result + chosen_result = None + if len(r_data) == 1 or self.config['tiebreak'].get() == "first": + self._log.debug(u'Spotify track(s) found, count: {0}', + len(r_data)) + chosen_result = r_data[0] + elif len(r_data) > 1: + # Use the popularity filter + self._log.debug(u'Most popular track chosen, count: {0}', + len(r_data)) + chosen_result = max(r_data, key=lambda x: x['popularity']) + + if chosen_result: + results.append(chosen_result) + else: + self._log.debug(u'No spotify track found: {0}', search_url) + failures.append(search_url) + + failure_count = len(failures) + if failure_count > 0: + if self.config['show_failures'].get(): + self._log.info(u'{0} track(s) did not match a Spotify ID:', + failure_count) + for track in failures: + self._log.info(u'track: {0}', track) + self._log.info(u'') + else: + self._log.warn(u'{0} track(s) did not match a Spotify ID;\n' + u'use --show-failures to display', + failure_count) + + return results + + def output_results(self, results): + if results: + ids = map(lambda x: x['id'], results) + if self.config['mode'].get() == "open": + self._log.info(u'Attempting to open Spotify with playlist') + spotify_url = self.playlist_partial + ",".join(ids) + webbrowser.open(spotify_url) + + else: + for item in ids: + print(unicode.encode(self.open_url + item)) + else: + self._log.warn(u'No Spotify tracks found from beets query') diff --git a/libs/beetsplug/the.py b/libs/beetsplug/the.py new file mode 100644 index 00000000..6bed4c6e --- /dev/null +++ b/libs/beetsplug/the.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Moves patterns in path formats (suitable for moving articles).""" + +from __future__ import division, absolute_import, print_function + +import re +from beets.plugins import BeetsPlugin + +__author__ = 'baobab@heresiarch.info' +__version__ = '1.1' + +PATTERN_THE = u'^[the]{3}\s' +PATTERN_A = u'^[a][n]?\s' +FORMAT = u'{0}, {1}' + + +class ThePlugin(BeetsPlugin): + + patterns = [] + + def __init__(self): + super(ThePlugin, self).__init__() + + self.template_funcs['the'] = self.the_template_func + + self.config.add({ + 'the': True, + 'a': True, + 'format': u'{0}, {1}', + 'strip': False, + 'patterns': [], + }) + + self.patterns = self.config['patterns'].as_str_seq() + for p in self.patterns: + if p: + try: + re.compile(p) + except re.error: + self._log.error(u'invalid pattern: {0}', p) + else: + if not (p.startswith('^') or p.endswith('$')): + self._log.warn(u'warning: \"{0}\" will not ' + u'match string start/end', p) + if self.config['a']: + self.patterns = [PATTERN_A] + self.patterns + if self.config['the']: + self.patterns = [PATTERN_THE] + self.patterns + if not self.patterns: + self._log.warn(u'no patterns defined!') + + def unthe(self, text, pattern): + """Moves pattern in the path format string or strips it + + text -- text to handle + pattern -- regexp pattern (case ignore is already on) + strip -- if True, pattern will be removed + """ + if text: + r = re.compile(pattern, flags=re.IGNORECASE) + try: + t = r.findall(text)[0] + except IndexError: + return text + else: + r = re.sub(r, '', text).strip() + if self.config['strip']: + return r + else: + fmt = self.config['format'].get(unicode) + return fmt.format(r, t.strip()).strip() + else: + return u'' + + def the_template_func(self, text): + if not self.patterns: + return text + if text: + for p in self.patterns: + r = self.unthe(text, p) + if r != text: + break + self._log.debug(u'\"{0}\" -> \"{1}\"', text, r) + return r + else: + return u'' diff --git a/libs/beetsplug/thumbnails.py b/libs/beetsplug/thumbnails.py new file mode 100644 index 00000000..0e7fbc6e --- /dev/null +++ b/libs/beetsplug/thumbnails.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Bruno Cauet +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Create freedesktop.org-compliant thumbnails for album folders + +This plugin is POSIX-only. +Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html +""" + +from __future__ import division, absolute_import, print_function + +from hashlib import md5 +import os +import shutil +from itertools import chain +from pathlib import PurePosixPath +import ctypes +import ctypes.util + +from xdg import BaseDirectory + +from beets.plugins import BeetsPlugin +from beets.ui import Subcommand, decargs +from beets import util +from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version + + +BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails") +NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal")) +LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large")) + + +class ThumbnailsPlugin(BeetsPlugin): + def __init__(self): + super(ThumbnailsPlugin, self).__init__() + self.config.add({ + 'auto': True, + 'force': False, + 'dolphin': False, + }) + + self.write_metadata = None + if self.config['auto'] and self._check_local_ok(): + self.register_listener('art_set', self.process_album) + + def commands(self): + thumbnails_command = Subcommand("thumbnails", + help=u"Create album thumbnails") + thumbnails_command.parser.add_option( + u'-f', u'--force', + dest='force', action='store_true', default=False, + help=u'force regeneration of thumbnails deemed fine (existing & ' + u'recent enough)') + thumbnails_command.parser.add_option( + u'--dolphin', dest='dolphin', action='store_true', default=False, + help=u"create Dolphin-compatible thumbnail information (for KDE)") + thumbnails_command.func = self.process_query + + return [thumbnails_command] + + def process_query(self, lib, opts, args): + self.config.set_args(opts) + if self._check_local_ok(): + for album in lib.albums(decargs(args)): + self.process_album(album) + + def _check_local_ok(self): + """Check that's everythings ready: + - local capability to resize images + - thumbnail dirs exist (create them if needed) + - detect whether we'll use PIL or IM + - detect whether we'll use GIO or Python to get URIs + """ + if not ArtResizer.shared.local: + self._log.warning(u"No local image resizing capabilities, " + u"cannot generate thumbnails") + return False + + for dir in (NORMAL_DIR, LARGE_DIR): + if not os.path.exists(dir): + os.makedirs(dir) + + if get_im_version(): + self.write_metadata = write_metadata_im + tool = "IM" + else: + assert get_pil_version() # since we're local + self.write_metadata = write_metadata_pil + tool = "PIL" + self._log.debug(u"using {0} to write metadata", tool) + + uri_getter = GioURI() + if not uri_getter.available: + uri_getter = PathlibURI() + self._log.debug(u"using {0.name} to compute URIs", uri_getter) + self.get_uri = uri_getter.uri + + return True + + def process_album(self, album): + """Produce thumbnails for the album folder. + """ + self._log.debug(u'generating thumbnail for {0}', album) + if not album.artpath: + self._log.info(u'album {0} has no art', album) + return + + if self.config['dolphin']: + self.make_dolphin_cover_thumbnail(album) + + size = ArtResizer.shared.get_size(album.artpath) + if not size: + self._log.warning(u'problem getting the picture size for {0}', + album.artpath) + return + + wrote = True + if max(size) >= 256: + wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR) + wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR) + + if wrote: + self._log.info(u'wrote thumbnail for {0}', album) + else: + self._log.info(u'nothing to do for {0}', album) + + def make_cover_thumbnail(self, album, size, target_dir): + """Make a thumbnail of given size for `album` and put it in + `target_dir`. + """ + target = os.path.join(target_dir, self.thumbnail_file_name(album.path)) + + if os.path.exists(target) and \ + os.stat(target).st_mtime > os.stat(album.artpath).st_mtime: + if self.config['force']: + self._log.debug(u"found a suitable {1}x{1} thumbnail for {0}, " + u"forcing regeneration", album, size) + else: + self._log.debug(u"{1}x{1} thumbnail for {0} exists and is " + u"recent enough", album, size) + return False + resized = ArtResizer.shared.resize(size, album.artpath, + util.syspath(target)) + self.add_tags(album, util.syspath(resized)) + shutil.move(resized, target) + return True + + def thumbnail_file_name(self, path): + """Compute the thumbnail file name + See http://standards.freedesktop.org/thumbnail-spec/latest/x227.html + """ + uri = self.get_uri(path) + hash = md5(uri).hexdigest() + return b"{0}.png".format(hash) + + def add_tags(self, album, image_path): + """Write required metadata to the thumbnail + See http://standards.freedesktop.org/thumbnail-spec/latest/x142.html + """ + metadata = {"Thumb::URI": self.get_uri(album.artpath), + "Thumb::MTime": unicode(os.stat(album.artpath).st_mtime)} + try: + self.write_metadata(image_path, metadata) + except Exception: + self._log.exception(u"could not write metadata to {0}", + util.displayable_path(image_path)) + + def make_dolphin_cover_thumbnail(self, album): + outfilename = os.path.join(album.path, b".directory") + if os.path.exists(outfilename): + return + artfile = os.path.split(album.artpath)[1] + with open(outfilename, 'w') as f: + f.write(b"[Desktop Entry]\nIcon=./{0}".format(artfile)) + f.close() + self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename)) + + +def write_metadata_im(file, metadata): + """Enrich the file metadata with `metadata` dict thanks to IM.""" + command = ['convert', file] + \ + list(chain.from_iterable(('-set', k, v) + for k, v in metadata.items())) + [file] + util.command_output(command) + return True + + +def write_metadata_pil(file, metadata): + """Enrich the file metadata with `metadata` dict thanks to PIL.""" + from PIL import Image, PngImagePlugin + im = Image.open(file) + meta = PngImagePlugin.PngInfo() + for k, v in metadata.items(): + meta.add_text(k, v, 0) + im.save(file, "PNG", pnginfo=meta) + return True + + +class URIGetter(object): + available = False + name = "Abstract base" + + def uri(self, path): + raise NotImplementedError() + + +class PathlibURI(URIGetter): + available = True + name = "Python Pathlib" + + def uri(self, path): + return PurePosixPath(path).as_uri() + + +def copy_c_string(c_string): + """Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python + string and return it. The old memory is then safe to free. + """ + # This is a pretty dumb way to get a string copy, but it seems to + # work. A more surefire way would be to allocate a ctypes buffer and copy + # the data with `memcpy` or somesuch. + s = ctypes.cast(c_string, ctypes.c_char_p).value + return '' + s + + +class GioURI(URIGetter): + """Use gio URI function g_file_get_uri. Paths must be utf-8 encoded. + """ + name = "GIO" + + def __init__(self): + self.libgio = self.get_library() + self.available = bool(self.libgio) + if self.available: + self.libgio.g_type_init() # for glib < 2.36 + + self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p] + self.libgio.g_file_new_for_path.restype = ctypes.c_void_p + + self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p] + self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char) + + self.libgio.g_object_unref.argtypes = [ctypes.c_void_p] + + def get_library(self): + lib_name = ctypes.util.find_library("gio-2") + try: + if not lib_name: + return False + return ctypes.cdll.LoadLibrary(lib_name) + except OSError: + return False + + def uri(self, path): + g_file_ptr = self.libgio.g_file_new_for_path(path) + if not g_file_ptr: + raise RuntimeError(u"No gfile pointer received for {0}".format( + util.displayable_path(path))) + + try: + uri_ptr = self.libgio.g_file_get_uri(g_file_ptr) + except: + raise + finally: + self.libgio.g_object_unref(g_file_ptr) + if not uri_ptr: + self.libgio.g_free(uri_ptr) + raise RuntimeError(u"No URI received from the gfile pointer for " + u"{0}".format(util.displayable_path(path))) + + try: + uri = copy_c_string(uri_ptr) + except: + raise + finally: + self.libgio.g_free(uri_ptr) + return uri diff --git a/libs/beetsplug/types.py b/libs/beetsplug/types.py new file mode 100644 index 00000000..0c078881 --- /dev/null +++ b/libs/beetsplug/types.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Thomas Scholtes. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets.dbcore import types +from beets.util.confit import ConfigValueError +from beets import library + + +class TypesPlugin(BeetsPlugin): + + @property + def item_types(self): + return self._types() + + @property + def album_types(self): + return self._types() + + def _types(self): + if not self.config.exists(): + return {} + + mytypes = {} + for key, value in self.config.items(): + if value.get() == 'int': + mytypes[key] = types.INTEGER + elif value.get() == 'float': + mytypes[key] = types.FLOAT + elif value.get() == 'bool': + mytypes[key] = types.BOOLEAN + elif value.get() == 'date': + mytypes[key] = library.DateType() + else: + raise ConfigValueError( + u"unknown type '{0}' for the '{1}' field" + .format(value, key)) + return mytypes diff --git a/libs/beetsplug/web/__init__.py b/libs/beetsplug/web/__init__.py new file mode 100644 index 00000000..67d99db6 --- /dev/null +++ b/libs/beetsplug/web/__init__.py @@ -0,0 +1,328 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""A Web interface to beets.""" +from __future__ import division, absolute_import, print_function + +from beets.plugins import BeetsPlugin +from beets import ui +from beets import util +import beets.library +import flask +from flask import g +from werkzeug.routing import BaseConverter, PathConverter +import os +import json + + +# Utilities. + +def _rep(obj, expand=False): + """Get a flat -- i.e., JSON-ish -- representation of a beets Item or + Album object. For Albums, `expand` dictates whether tracks are + included. + """ + out = dict(obj) + + if isinstance(obj, beets.library.Item): + del out['path'] + + # Get the size (in bytes) of the backing file. This is useful + # for the Tomahawk resolver API. + try: + out['size'] = os.path.getsize(util.syspath(obj.path)) + except OSError: + out['size'] = 0 + + return out + + elif isinstance(obj, beets.library.Album): + del out['artpath'] + if expand: + out['items'] = [_rep(item) for item in obj.items()] + return out + + +def json_generator(items, root): + """Generator that dumps list of beets Items or Albums as JSON + + :param root: root key for JSON + :param items: list of :class:`Item` or :class:`Album` to dump + :returns: generator that yields strings + """ + yield '{"%s":[' % root + first = True + for item in items: + if first: + first = False + else: + yield ',' + yield json.dumps(_rep(item)) + yield ']}' + + +def resource(name): + """Decorates a function to handle RESTful HTTP requests for a resource. + """ + def make_responder(retriever): + def responder(ids): + entities = [retriever(id) for id in ids] + entities = [entity for entity in entities if entity] + + if len(entities) == 1: + return flask.jsonify(_rep(entities[0])) + elif entities: + return app.response_class( + json_generator(entities, root=name), + mimetype='application/json' + ) + else: + return flask.abort(404) + responder.__name__ = 'get_{0}'.format(name) + return responder + return make_responder + + +def resource_query(name): + """Decorates a function to handle RESTful HTTP queries for resources. + """ + def make_responder(query_func): + def responder(queries): + return app.response_class( + json_generator(query_func(queries), root='results'), + mimetype='application/json' + ) + responder.__name__ = 'query_{0}'.format(name) + return responder + return make_responder + + +def resource_list(name): + """Decorates a function to handle RESTful HTTP request for a list of + resources. + """ + def make_responder(list_all): + def responder(): + return app.response_class( + json_generator(list_all(), root=name), + mimetype='application/json' + ) + responder.__name__ = 'all_{0}'.format(name) + return responder + return make_responder + + +def _get_unique_table_field_values(model, field, sort_field): + """ retrieve all unique values belonging to a key from a model """ + if field not in model.all_keys() or sort_field not in model.all_keys(): + raise KeyError + with g.lib.transaction() as tx: + rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"' + .format(field, model._table, sort_field)) + return [row[0] for row in rows] + + +class IdListConverter(BaseConverter): + """Converts comma separated lists of ids in urls to integer lists. + """ + + def to_python(self, value): + ids = [] + for id in value.split(','): + try: + ids.append(int(id)) + except ValueError: + pass + return ids + + def to_url(self, value): + return ','.join(value) + + +class QueryConverter(PathConverter): + """Converts slash separated lists of queries in the url to string list. + """ + + def to_python(self, value): + return value.split('/') + + def to_url(self, value): + return ','.join(value) + + +# Flask setup. + +app = flask.Flask(__name__) +app.url_map.converters['idlist'] = IdListConverter +app.url_map.converters['query'] = QueryConverter + + +@app.before_request +def before_request(): + g.lib = app.config['lib'] + + +# Items. + +@app.route('/item/<idlist:ids>') +@resource('items') +def get_item(id): + return g.lib.get_item(id) + + +@app.route('/item/') +@app.route('/item/query/') +@resource_list('items') +def all_items(): + return g.lib.items() + + +@app.route('/item/<int:item_id>/file') +def item_file(item_id): + item = g.lib.get_item(item_id) + response = flask.send_file(item.path, as_attachment=True, + attachment_filename=os.path.basename(item.path)) + response.headers['Content-Length'] = os.path.getsize(item.path) + return response + + +@app.route('/item/query/<query:queries>') +@resource_query('items') +def item_query(queries): + return g.lib.items(queries) + + +@app.route('/item/values/<string:key>') +def item_unique_field_values(key): + sort_key = flask.request.args.get('sort_key', key) + try: + values = _get_unique_table_field_values(beets.library.Item, key, + sort_key) + except KeyError: + return flask.abort(404) + return flask.jsonify(values=values) + + +# Albums. + +@app.route('/album/<idlist:ids>') +@resource('albums') +def get_album(id): + return g.lib.get_album(id) + + +@app.route('/album/') +@app.route('/album/query/') +@resource_list('albums') +def all_albums(): + return g.lib.albums() + + +@app.route('/album/query/<query:queries>') +@resource_query('albums') +def album_query(queries): + return g.lib.albums(queries) + + +@app.route('/album/<int:album_id>/art') +def album_art(album_id): + album = g.lib.get_album(album_id) + if album.artpath: + return flask.send_file(album.artpath) + else: + return flask.abort(404) + + +@app.route('/album/values/<string:key>') +def album_unique_field_values(key): + sort_key = flask.request.args.get('sort_key', key) + try: + values = _get_unique_table_field_values(beets.library.Album, key, + sort_key) + except KeyError: + return flask.abort(404) + return flask.jsonify(values=values) + + +# Artists. + +@app.route('/artist/') +def all_artists(): + with g.lib.transaction() as tx: + rows = tx.query("SELECT DISTINCT albumartist FROM albums") + all_artists = [row[0] for row in rows] + return flask.jsonify(artist_names=all_artists) + + +# Library information. + +@app.route('/stats') +def stats(): + with g.lib.transaction() as tx: + item_rows = tx.query("SELECT COUNT(*) FROM items") + album_rows = tx.query("SELECT COUNT(*) FROM albums") + return flask.jsonify({ + 'items': item_rows[0][0], + 'albums': album_rows[0][0], + }) + + +# UI. + +@app.route('/') +def home(): + return flask.render_template('index.html') + + +# Plugin hook. + +class WebPlugin(BeetsPlugin): + def __init__(self): + super(WebPlugin, self).__init__() + self.config.add({ + 'host': u'127.0.0.1', + 'port': 8337, + 'cors': '', + }) + + def commands(self): + cmd = ui.Subcommand('web', help=u'start a Web interface') + cmd.parser.add_option(u'-d', u'--debug', action='store_true', + default=False, help=u'debug mode') + + def func(lib, opts, args): + args = ui.decargs(args) + if args: + self.config['host'] = args.pop(0) + if args: + self.config['port'] = int(args.pop(0)) + + app.config['lib'] = lib + # Enable CORS if required. + if self.config['cors']: + self._log.info(u'Enabling CORS with origin: {0}', + self.config['cors']) + from flask.ext.cors import CORS + app.config['CORS_ALLOW_HEADERS'] = "Content-Type" + app.config['CORS_RESOURCES'] = { + r"/*": {"origins": self.config['cors'].get(str)} + } + CORS(app) + # Start the web application. + app.run(host=self.config['host'].get(unicode), + port=self.config['port'].get(int), + debug=opts.debug, threaded=True) + cmd.func = func + return [cmd] diff --git a/libs/beetsplug/web/static/backbone.js b/libs/beetsplug/web/static/backbone.js new file mode 100644 index 00000000..b2e49322 --- /dev/null +++ b/libs/beetsplug/web/static/backbone.js @@ -0,0 +1,1158 @@ +// Backbone.js 0.5.3 +// (c) 2010 Jeremy Ashkenas, DocumentCloud Inc. +// Backbone may be freely distributed under the MIT license. +// For all details and documentation: +// http://documentcloud.github.com/backbone + +(function(){ + + // Initial Setup + // ------------- + + // Save a reference to the global object. + var root = this; + + // Save the previous value of the `Backbone` variable. + var previousBackbone = root.Backbone; + + // The top-level namespace. All public Backbone classes and modules will + // be attached to this. Exported for both CommonJS and the browser. + var Backbone; + if (typeof exports !== 'undefined') { + Backbone = exports; + } else { + Backbone = root.Backbone = {}; + } + + // Current version of the library. Keep in sync with `package.json`. + Backbone.VERSION = '0.5.3'; + + // Require Underscore, if we're on the server, and it's not already present. + var _ = root._; + if (!_ && (typeof require !== 'undefined')) _ = require('underscore')._; + + // For Backbone's purposes, jQuery or Zepto owns the `$` variable. + var $ = root.jQuery || root.Zepto; + + // Runs Backbone.js in *noConflict* mode, returning the `Backbone` variable + // to its previous owner. Returns a reference to this Backbone object. + Backbone.noConflict = function() { + root.Backbone = previousBackbone; + return this; + }; + + // Turn on `emulateHTTP` to support legacy HTTP servers. Setting this option will + // fake `"PUT"` and `"DELETE"` requests via the `_method` parameter and set a + // `X-Http-Method-Override` header. + Backbone.emulateHTTP = false; + + // Turn on `emulateJSON` to support legacy servers that can't deal with direct + // `application/json` requests ... will encode the body as + // `application/x-www-form-urlencoded` instead and will send the model in a + // form param named `model`. + Backbone.emulateJSON = false; + + // Backbone.Events + // ----------------- + + // A module that can be mixed in to *any object* in order to provide it with + // custom events. You may `bind` or `unbind` a callback function to an event; + // `trigger`-ing an event fires all callbacks in succession. + // + // var object = {}; + // _.extend(object, Backbone.Events); + // object.bind('expand', function(){ alert('expanded'); }); + // object.trigger('expand'); + // + Backbone.Events = { + + // Bind an event, specified by a string name, `ev`, to a `callback` function. + // Passing `"all"` will bind the callback to all events fired. + bind : function(ev, callback, context) { + var calls = this._callbacks || (this._callbacks = {}); + var list = calls[ev] || (calls[ev] = []); + list.push([callback, context]); + return this; + }, + + // Remove one or many callbacks. If `callback` is null, removes all + // callbacks for the event. If `ev` is null, removes all bound callbacks + // for all events. + unbind : function(ev, callback) { + var calls; + if (!ev) { + this._callbacks = {}; + } else if (calls = this._callbacks) { + if (!callback) { + calls[ev] = []; + } else { + var list = calls[ev]; + if (!list) return this; + for (var i = 0, l = list.length; i < l; i++) { + if (list[i] && callback === list[i][0]) { + list[i] = null; + break; + } + } + } + } + return this; + }, + + // Trigger an event, firing all bound callbacks. Callbacks are passed the + // same arguments as `trigger` is, apart from the event name. + // Listening for `"all"` passes the true event name as the first argument. + trigger : function(eventName) { + var list, calls, ev, callback, args; + var both = 2; + if (!(calls = this._callbacks)) return this; + while (both--) { + ev = both ? eventName : 'all'; + if (list = calls[ev]) { + for (var i = 0, l = list.length; i < l; i++) { + if (!(callback = list[i])) { + list.splice(i, 1); i--; l--; + } else { + args = both ? Array.prototype.slice.call(arguments, 1) : arguments; + callback[0].apply(callback[1] || this, args); + } + } + } + } + return this; + } + + }; + + // Backbone.Model + // -------------- + + // Create a new model, with defined attributes. A client id (`cid`) + // is automatically generated and assigned for you. + Backbone.Model = function(attributes, options) { + var defaults; + attributes || (attributes = {}); + if (defaults = this.defaults) { + if (_.isFunction(defaults)) defaults = defaults.call(this); + attributes = _.extend({}, defaults, attributes); + } + this.attributes = {}; + this._escapedAttributes = {}; + this.cid = _.uniqueId('c'); + this.set(attributes, {silent : true}); + this._changed = false; + this._previousAttributes = _.clone(this.attributes); + if (options && options.collection) this.collection = options.collection; + this.initialize(attributes, options); + }; + + // Attach all inheritable methods to the Model prototype. + _.extend(Backbone.Model.prototype, Backbone.Events, { + + // A snapshot of the model's previous attributes, taken immediately + // after the last `"change"` event was fired. + _previousAttributes : null, + + // Has the item been changed since the last `"change"` event? + _changed : false, + + // The default name for the JSON `id` attribute is `"id"`. MongoDB and + // CouchDB users may want to set this to `"_id"`. + idAttribute : 'id', + + // Initialize is an empty function by default. Override it with your own + // initialization logic. + initialize : function(){}, + + // Return a copy of the model's `attributes` object. + toJSON : function() { + return _.clone(this.attributes); + }, + + // Get the value of an attribute. + get : function(attr) { + return this.attributes[attr]; + }, + + // Get the HTML-escaped value of an attribute. + escape : function(attr) { + var html; + if (html = this._escapedAttributes[attr]) return html; + var val = this.attributes[attr]; + return this._escapedAttributes[attr] = escapeHTML(val == null ? '' : '' + val); + }, + + // Returns `true` if the attribute contains a value that is not null + // or undefined. + has : function(attr) { + return this.attributes[attr] != null; + }, + + // Set a hash of model attributes on the object, firing `"change"` unless you + // choose to silence it. + set : function(attrs, options) { + + // Extract attributes and options. + options || (options = {}); + if (!attrs) return this; + if (attrs.attributes) attrs = attrs.attributes; + var now = this.attributes, escaped = this._escapedAttributes; + + // Run validation. + if (!options.silent && this.validate && !this._performValidation(attrs, options)) return false; + + // Check for changes of `id`. + if (this.idAttribute in attrs) this.id = attrs[this.idAttribute]; + + // We're about to start triggering change events. + var alreadyChanging = this._changing; + this._changing = true; + + // Update attributes. + for (var attr in attrs) { + var val = attrs[attr]; + if (!_.isEqual(now[attr], val)) { + now[attr] = val; + delete escaped[attr]; + this._changed = true; + if (!options.silent) this.trigger('change:' + attr, this, val, options); + } + } + + // Fire the `"change"` event, if the model has been changed. + if (!alreadyChanging && !options.silent && this._changed) this.change(options); + this._changing = false; + return this; + }, + + // Remove an attribute from the model, firing `"change"` unless you choose + // to silence it. `unset` is a noop if the attribute doesn't exist. + unset : function(attr, options) { + if (!(attr in this.attributes)) return this; + options || (options = {}); + var value = this.attributes[attr]; + + // Run validation. + var validObj = {}; + validObj[attr] = void 0; + if (!options.silent && this.validate && !this._performValidation(validObj, options)) return false; + + // Remove the attribute. + delete this.attributes[attr]; + delete this._escapedAttributes[attr]; + if (attr == this.idAttribute) delete this.id; + this._changed = true; + if (!options.silent) { + this.trigger('change:' + attr, this, void 0, options); + this.change(options); + } + return this; + }, + + // Clear all attributes on the model, firing `"change"` unless you choose + // to silence it. + clear : function(options) { + options || (options = {}); + var attr; + var old = this.attributes; + + // Run validation. + var validObj = {}; + for (attr in old) validObj[attr] = void 0; + if (!options.silent && this.validate && !this._performValidation(validObj, options)) return false; + + this.attributes = {}; + this._escapedAttributes = {}; + this._changed = true; + if (!options.silent) { + for (attr in old) { + this.trigger('change:' + attr, this, void 0, options); + } + this.change(options); + } + return this; + }, + + // Fetch the model from the server. If the server's representation of the + // model differs from its current attributes, they will be overriden, + // triggering a `"change"` event. + fetch : function(options) { + options || (options = {}); + var model = this; + var success = options.success; + options.success = function(resp, status, xhr) { + if (!model.set(model.parse(resp, xhr), options)) return false; + if (success) success(model, resp); + }; + options.error = wrapError(options.error, model, options); + return (this.sync || Backbone.sync).call(this, 'read', this, options); + }, + + // Set a hash of model attributes, and sync the model to the server. + // If the server returns an attributes hash that differs, the model's + // state will be `set` again. + save : function(attrs, options) { + options || (options = {}); + if (attrs && !this.set(attrs, options)) return false; + var model = this; + var success = options.success; + options.success = function(resp, status, xhr) { + if (!model.set(model.parse(resp, xhr), options)) return false; + if (success) success(model, resp, xhr); + }; + options.error = wrapError(options.error, model, options); + var method = this.isNew() ? 'create' : 'update'; + return (this.sync || Backbone.sync).call(this, method, this, options); + }, + + // Destroy this model on the server if it was already persisted. Upon success, the model is removed + // from its collection, if it has one. + destroy : function(options) { + options || (options = {}); + if (this.isNew()) return this.trigger('destroy', this, this.collection, options); + var model = this; + var success = options.success; + options.success = function(resp) { + model.trigger('destroy', model, model.collection, options); + if (success) success(model, resp); + }; + options.error = wrapError(options.error, model, options); + return (this.sync || Backbone.sync).call(this, 'delete', this, options); + }, + + // Default URL for the model's representation on the server -- if you're + // using Backbone's restful methods, override this to change the endpoint + // that will be called. + url : function() { + var base = getUrl(this.collection) || this.urlRoot || urlError(); + if (this.isNew()) return base; + return base + (base.charAt(base.length - 1) == '/' ? '' : '/') + encodeURIComponent(this.id); + }, + + // **parse** converts a response into the hash of attributes to be `set` on + // the model. The default implementation is just to pass the response along. + parse : function(resp, xhr) { + return resp; + }, + + // Create a new model with identical attributes to this one. + clone : function() { + return new this.constructor(this); + }, + + // A model is new if it has never been saved to the server, and lacks an id. + isNew : function() { + return this.id == null; + }, + + // Call this method to manually fire a `change` event for this model. + // Calling this will cause all objects observing the model to update. + change : function(options) { + this.trigger('change', this, options); + this._previousAttributes = _.clone(this.attributes); + this._changed = false; + }, + + // Determine if the model has changed since the last `"change"` event. + // If you specify an attribute name, determine if that attribute has changed. + hasChanged : function(attr) { + if (attr) return this._previousAttributes[attr] != this.attributes[attr]; + return this._changed; + }, + + // Return an object containing all the attributes that have changed, or false + // if there are no changed attributes. Useful for determining what parts of a + // view need to be updated and/or what attributes need to be persisted to + // the server. + changedAttributes : function(now) { + now || (now = this.attributes); + var old = this._previousAttributes; + var changed = false; + for (var attr in now) { + if (!_.isEqual(old[attr], now[attr])) { + changed = changed || {}; + changed[attr] = now[attr]; + } + } + return changed; + }, + + // Get the previous value of an attribute, recorded at the time the last + // `"change"` event was fired. + previous : function(attr) { + if (!attr || !this._previousAttributes) return null; + return this._previousAttributes[attr]; + }, + + // Get all of the attributes of the model at the time of the previous + // `"change"` event. + previousAttributes : function() { + return _.clone(this._previousAttributes); + }, + + // Run validation against a set of incoming attributes, returning `true` + // if all is well. If a specific `error` callback has been passed, + // call that instead of firing the general `"error"` event. + _performValidation : function(attrs, options) { + var error = this.validate(attrs); + if (error) { + if (options.error) { + options.error(this, error, options); + } else { + this.trigger('error', this, error, options); + } + return false; + } + return true; + } + + }); + + // Backbone.Collection + // ------------------- + + // Provides a standard collection class for our sets of models, ordered + // or unordered. If a `comparator` is specified, the Collection will maintain + // its models in sort order, as they're added and removed. + Backbone.Collection = function(models, options) { + options || (options = {}); + if (options.comparator) this.comparator = options.comparator; + _.bindAll(this, '_onModelEvent', '_removeReference'); + this._reset(); + if (models) this.reset(models, {silent: true}); + this.initialize.apply(this, arguments); + }; + + // Define the Collection's inheritable methods. + _.extend(Backbone.Collection.prototype, Backbone.Events, { + + // The default model for a collection is just a **Backbone.Model**. + // This should be overridden in most cases. + model : Backbone.Model, + + // Initialize is an empty function by default. Override it with your own + // initialization logic. + initialize : function(){}, + + // The JSON representation of a Collection is an array of the + // models' attributes. + toJSON : function() { + return this.map(function(model){ return model.toJSON(); }); + }, + + // Add a model, or list of models to the set. Pass **silent** to avoid + // firing the `added` event for every new model. + add : function(models, options) { + if (_.isArray(models)) { + for (var i = 0, l = models.length; i < l; i++) { + this._add(models[i], options); + } + } else { + this._add(models, options); + } + return this; + }, + + // Remove a model, or a list of models from the set. Pass silent to avoid + // firing the `removed` event for every model removed. + remove : function(models, options) { + if (_.isArray(models)) { + for (var i = 0, l = models.length; i < l; i++) { + this._remove(models[i], options); + } + } else { + this._remove(models, options); + } + return this; + }, + + // Get a model from the set by id. + get : function(id) { + if (id == null) return null; + return this._byId[id.id != null ? id.id : id]; + }, + + // Get a model from the set by client id. + getByCid : function(cid) { + return cid && this._byCid[cid.cid || cid]; + }, + + // Get the model at the given index. + at: function(index) { + return this.models[index]; + }, + + // Force the collection to re-sort itself. You don't need to call this under normal + // circumstances, as the set will maintain sort order as each item is added. + sort : function(options) { + options || (options = {}); + if (!this.comparator) throw new Error('Cannot sort a set without a comparator'); + this.models = this.sortBy(this.comparator); + if (!options.silent) this.trigger('reset', this, options); + return this; + }, + + // Pluck an attribute from each model in the collection. + pluck : function(attr) { + return _.map(this.models, function(model){ return model.get(attr); }); + }, + + // When you have more items than you want to add or remove individually, + // you can reset the entire set with a new list of models, without firing + // any `added` or `removed` events. Fires `reset` when finished. + reset : function(models, options) { + models || (models = []); + options || (options = {}); + this.each(this._removeReference); + this._reset(); + this.add(models, {silent: true}); + if (!options.silent) this.trigger('reset', this, options); + return this; + }, + + // Fetch the default set of models for this collection, resetting the + // collection when they arrive. If `add: true` is passed, appends the + // models to the collection instead of resetting. + fetch : function(options) { + options || (options = {}); + var collection = this; + var success = options.success; + options.success = function(resp, status, xhr) { + collection[options.add ? 'add' : 'reset'](collection.parse(resp, xhr), options); + if (success) success(collection, resp); + }; + options.error = wrapError(options.error, collection, options); + return (this.sync || Backbone.sync).call(this, 'read', this, options); + }, + + // Create a new instance of a model in this collection. After the model + // has been created on the server, it will be added to the collection. + // Returns the model, or 'false' if validation on a new model fails. + create : function(model, options) { + var coll = this; + options || (options = {}); + model = this._prepareModel(model, options); + if (!model) return false; + var success = options.success; + options.success = function(nextModel, resp, xhr) { + coll.add(nextModel, options); + if (success) success(nextModel, resp, xhr); + }; + model.save(null, options); + return model; + }, + + // **parse** converts a response into a list of models to be added to the + // collection. The default implementation is just to pass it through. + parse : function(resp, xhr) { + return resp; + }, + + // Proxy to _'s chain. Can't be proxied the same way the rest of the + // underscore methods are proxied because it relies on the underscore + // constructor. + chain: function () { + return _(this.models).chain(); + }, + + // Reset all internal state. Called when the collection is reset. + _reset : function(options) { + this.length = 0; + this.models = []; + this._byId = {}; + this._byCid = {}; + }, + + // Prepare a model to be added to this collection + _prepareModel: function(model, options) { + if (!(model instanceof Backbone.Model)) { + var attrs = model; + model = new this.model(attrs, {collection: this}); + if (model.validate && !model._performValidation(attrs, options)) model = false; + } else if (!model.collection) { + model.collection = this; + } + return model; + }, + + // Internal implementation of adding a single model to the set, updating + // hash indexes for `id` and `cid` lookups. + // Returns the model, or 'false' if validation on a new model fails. + _add : function(model, options) { + options || (options = {}); + model = this._prepareModel(model, options); + if (!model) return false; + var already = this.getByCid(model); + if (already) throw new Error(["Can't add the same model to a set twice", already.id]); + this._byId[model.id] = model; + this._byCid[model.cid] = model; + var index = options.at != null ? options.at : + this.comparator ? this.sortedIndex(model, this.comparator) : + this.length; + this.models.splice(index, 0, model); + model.bind('all', this._onModelEvent); + this.length++; + if (!options.silent) model.trigger('add', model, this, options); + return model; + }, + + // Internal implementation of removing a single model from the set, updating + // hash indexes for `id` and `cid` lookups. + _remove : function(model, options) { + options || (options = {}); + model = this.getByCid(model) || this.get(model); + if (!model) return null; + delete this._byId[model.id]; + delete this._byCid[model.cid]; + this.models.splice(this.indexOf(model), 1); + this.length--; + if (!options.silent) model.trigger('remove', model, this, options); + this._removeReference(model); + return model; + }, + + // Internal method to remove a model's ties to a collection. + _removeReference : function(model) { + if (this == model.collection) { + delete model.collection; + } + model.unbind('all', this._onModelEvent); + }, + + // Internal method called every time a model in the set fires an event. + // Sets need to update their indexes when models change ids. All other + // events simply proxy through. "add" and "remove" events that originate + // in other collections are ignored. + _onModelEvent : function(ev, model, collection, options) { + if ((ev == 'add' || ev == 'remove') && collection != this) return; + if (ev == 'destroy') { + this._remove(model, options); + } + if (model && ev === 'change:' + model.idAttribute) { + delete this._byId[model.previous(model.idAttribute)]; + this._byId[model.id] = model; + } + this.trigger.apply(this, arguments); + } + + }); + + // Underscore methods that we want to implement on the Collection. + var methods = ['forEach', 'each', 'map', 'reduce', 'reduceRight', 'find', 'detect', + 'filter', 'select', 'reject', 'every', 'all', 'some', 'any', 'include', + 'contains', 'invoke', 'max', 'min', 'sortBy', 'sortedIndex', 'toArray', 'size', + 'first', 'rest', 'last', 'without', 'indexOf', 'lastIndexOf', 'isEmpty', 'groupBy']; + + // Mix in each Underscore method as a proxy to `Collection#models`. + _.each(methods, function(method) { + Backbone.Collection.prototype[method] = function() { + return _[method].apply(_, [this.models].concat(_.toArray(arguments))); + }; + }); + + // Backbone.Router + // ------------------- + + // Routers map faux-URLs to actions, and fire events when routes are + // matched. Creating a new one sets its `routes` hash, if not set statically. + Backbone.Router = function(options) { + options || (options = {}); + if (options.routes) this.routes = options.routes; + this._bindRoutes(); + this.initialize.apply(this, arguments); + }; + + // Cached regular expressions for matching named param parts and splatted + // parts of route strings. + var namedParam = /:([\w\d]+)/g; + var splatParam = /\*([\w\d]+)/g; + var escapeRegExp = /[-[\]{}()+?.,\\^$|#\s]/g; + + // Set up all inheritable **Backbone.Router** properties and methods. + _.extend(Backbone.Router.prototype, Backbone.Events, { + + // Initialize is an empty function by default. Override it with your own + // initialization logic. + initialize : function(){}, + + // Manually bind a single named route to a callback. For example: + // + // this.route('search/:query/p:num', 'search', function(query, num) { + // ... + // }); + // + route : function(route, name, callback) { + Backbone.history || (Backbone.history = new Backbone.History); + if (!_.isRegExp(route)) route = this._routeToRegExp(route); + Backbone.history.route(route, _.bind(function(fragment) { + var args = this._extractParameters(route, fragment); + callback.apply(this, args); + this.trigger.apply(this, ['route:' + name].concat(args)); + }, this)); + }, + + // Simple proxy to `Backbone.history` to save a fragment into the history. + navigate : function(fragment, triggerRoute) { + Backbone.history.navigate(fragment, triggerRoute); + }, + + // Bind all defined routes to `Backbone.history`. We have to reverse the + // order of the routes here to support behavior where the most general + // routes can be defined at the bottom of the route map. + _bindRoutes : function() { + if (!this.routes) return; + var routes = []; + for (var route in this.routes) { + routes.unshift([route, this.routes[route]]); + } + for (var i = 0, l = routes.length; i < l; i++) { + this.route(routes[i][0], routes[i][1], this[routes[i][1]]); + } + }, + + // Convert a route string into a regular expression, suitable for matching + // against the current location hash. + _routeToRegExp : function(route) { + route = route.replace(escapeRegExp, "\\$&") + .replace(namedParam, "([^\/]*)") + .replace(splatParam, "(.*?)"); + return new RegExp('^' + route + '$'); + }, + + // Given a route, and a URL fragment that it matches, return the array of + // extracted parameters. + _extractParameters : function(route, fragment) { + return route.exec(fragment).slice(1); + } + + }); + + // Backbone.History + // ---------------- + + // Handles cross-browser history management, based on URL fragments. If the + // browser does not support `onhashchange`, falls back to polling. + Backbone.History = function() { + this.handlers = []; + _.bindAll(this, 'checkUrl'); + }; + + // Cached regex for cleaning hashes. + var hashStrip = /^#*/; + + // Cached regex for detecting MSIE. + var isExplorer = /msie [\w.]+/; + + // Has the history handling already been started? + var historyStarted = false; + + // Set up all inheritable **Backbone.History** properties and methods. + _.extend(Backbone.History.prototype, { + + // The default interval to poll for hash changes, if necessary, is + // twenty times a second. + interval: 50, + + // Get the cross-browser normalized URL fragment, either from the URL, + // the hash, or the override. + getFragment : function(fragment, forcePushState) { + if (fragment == null) { + if (this._hasPushState || forcePushState) { + fragment = window.location.pathname; + var search = window.location.search; + if (search) fragment += search; + if (fragment.indexOf(this.options.root) == 0) fragment = fragment.substr(this.options.root.length); + } else { + fragment = window.location.hash; + } + } + return decodeURIComponent(fragment.replace(hashStrip, '')); + }, + + // Start the hash change handling, returning `true` if the current URL matches + // an existing route, and `false` otherwise. + start : function(options) { + + // Figure out the initial configuration. Do we need an iframe? + // Is pushState desired ... is it available? + if (historyStarted) throw new Error("Backbone.history has already been started"); + this.options = _.extend({}, {root: '/'}, this.options, options); + this._wantsPushState = !!this.options.pushState; + this._hasPushState = !!(this.options.pushState && window.history && window.history.pushState); + var fragment = this.getFragment(); + var docMode = document.documentMode; + var oldIE = (isExplorer.exec(navigator.userAgent.toLowerCase()) && (!docMode || docMode <= 7)); + if (oldIE) { + this.iframe = $('<iframe src="javascript:0" tabindex="-1" />').hide().appendTo('body')[0].contentWindow; + this.navigate(fragment); + } + + // Depending on whether we're using pushState or hashes, and whether + // 'onhashchange' is supported, determine how we check the URL state. + if (this._hasPushState) { + $(window).bind('popstate', this.checkUrl); + } else if ('onhashchange' in window && !oldIE) { + $(window).bind('hashchange', this.checkUrl); + } else { + setInterval(this.checkUrl, this.interval); + } + + // Determine if we need to change the base url, for a pushState link + // opened by a non-pushState browser. + this.fragment = fragment; + historyStarted = true; + var loc = window.location; + var atRoot = loc.pathname == this.options.root; + if (this._wantsPushState && !this._hasPushState && !atRoot) { + this.fragment = this.getFragment(null, true); + window.location.replace(this.options.root + '#' + this.fragment); + // Return immediately as browser will do redirect to new url + return true; + } else if (this._wantsPushState && this._hasPushState && atRoot && loc.hash) { + this.fragment = loc.hash.replace(hashStrip, ''); + window.history.replaceState({}, document.title, loc.protocol + '//' + loc.host + this.options.root + this.fragment); + } + + if (!this.options.silent) { + return this.loadUrl(); + } + }, + + // Add a route to be tested when the fragment changes. Routes added later may + // override previous routes. + route : function(route, callback) { + this.handlers.unshift({route : route, callback : callback}); + }, + + // Checks the current URL to see if it has changed, and if it has, + // calls `loadUrl`, normalizing across the hidden iframe. + checkUrl : function(e) { + var current = this.getFragment(); + if (current == this.fragment && this.iframe) current = this.getFragment(this.iframe.location.hash); + if (current == this.fragment || current == decodeURIComponent(this.fragment)) return false; + if (this.iframe) this.navigate(current); + this.loadUrl() || this.loadUrl(window.location.hash); + }, + + // Attempt to load the current URL fragment. If a route succeeds with a + // match, returns `true`. If no defined routes matches the fragment, + // returns `false`. + loadUrl : function(fragmentOverride) { + var fragment = this.fragment = this.getFragment(fragmentOverride); + var matched = _.any(this.handlers, function(handler) { + if (handler.route.test(fragment)) { + handler.callback(fragment); + return true; + } + }); + return matched; + }, + + // Save a fragment into the hash history. You are responsible for properly + // URL-encoding the fragment in advance. This does not trigger + // a `hashchange` event. + navigate : function(fragment, triggerRoute) { + var frag = (fragment || '').replace(hashStrip, ''); + if (this.fragment == frag || this.fragment == decodeURIComponent(frag)) return; + if (this._hasPushState) { + var loc = window.location; + if (frag.indexOf(this.options.root) != 0) frag = this.options.root + frag; + this.fragment = frag; + window.history.pushState({}, document.title, loc.protocol + '//' + loc.host + frag); + } else { + window.location.hash = this.fragment = frag; + if (this.iframe && (frag != this.getFragment(this.iframe.location.hash))) { + this.iframe.document.open().close(); + this.iframe.location.hash = frag; + } + } + if (triggerRoute) this.loadUrl(fragment); + } + + }); + + // Backbone.View + // ------------- + + // Creating a Backbone.View creates its initial element outside of the DOM, + // if an existing element is not provided... + Backbone.View = function(options) { + this.cid = _.uniqueId('view'); + this._configure(options || {}); + this._ensureElement(); + this.delegateEvents(); + this.initialize.apply(this, arguments); + }; + + // Element lookup, scoped to DOM elements within the current view. + // This should be prefered to global lookups, if you're dealing with + // a specific view. + var selectorDelegate = function(selector) { + return $(selector, this.el); + }; + + // Cached regex to split keys for `delegate`. + var eventSplitter = /^(\S+)\s*(.*)$/; + + // List of view options to be merged as properties. + var viewOptions = ['model', 'collection', 'el', 'id', 'attributes', 'className', 'tagName']; + + // Set up all inheritable **Backbone.View** properties and methods. + _.extend(Backbone.View.prototype, Backbone.Events, { + + // The default `tagName` of a View's element is `"div"`. + tagName : 'div', + + // Attach the `selectorDelegate` function as the `$` property. + $ : selectorDelegate, + + // Initialize is an empty function by default. Override it with your own + // initialization logic. + initialize : function(){}, + + // **render** is the core function that your view should override, in order + // to populate its element (`this.el`), with the appropriate HTML. The + // convention is for **render** to always return `this`. + render : function() { + return this; + }, + + // Remove this view from the DOM. Note that the view isn't present in the + // DOM by default, so calling this method may be a no-op. + remove : function() { + $(this.el).remove(); + return this; + }, + + // For small amounts of DOM Elements, where a full-blown template isn't + // needed, use **make** to manufacture elements, one at a time. + // + // var el = this.make('li', {'class': 'row'}, this.model.escape('title')); + // + make : function(tagName, attributes, content) { + var el = document.createElement(tagName); + if (attributes) $(el).attr(attributes); + if (content) $(el).html(content); + return el; + }, + + // Set callbacks, where `this.callbacks` is a hash of + // + // *{"event selector": "callback"}* + // + // { + // 'mousedown .title': 'edit', + // 'click .button': 'save' + // } + // + // pairs. Callbacks will be bound to the view, with `this` set properly. + // Uses event delegation for efficiency. + // Omitting the selector binds the event to `this.el`. + // This only works for delegate-able events: not `focus`, `blur`, and + // not `change`, `submit`, and `reset` in Internet Explorer. + delegateEvents : function(events) { + if (!(events || (events = this.events))) return; + if (_.isFunction(events)) events = events.call(this); + $(this.el).unbind('.delegateEvents' + this.cid); + for (var key in events) { + var method = this[events[key]]; + if (!method) throw new Error('Event "' + events[key] + '" does not exist'); + var match = key.match(eventSplitter); + var eventName = match[1], selector = match[2]; + method = _.bind(method, this); + eventName += '.delegateEvents' + this.cid; + if (selector === '') { + $(this.el).bind(eventName, method); + } else { + $(this.el).delegate(selector, eventName, method); + } + } + }, + + // Performs the initial configuration of a View with a set of options. + // Keys with special meaning *(model, collection, id, className)*, are + // attached directly to the view. + _configure : function(options) { + if (this.options) options = _.extend({}, this.options, options); + for (var i = 0, l = viewOptions.length; i < l; i++) { + var attr = viewOptions[i]; + if (options[attr]) this[attr] = options[attr]; + } + this.options = options; + }, + + // Ensure that the View has a DOM element to render into. + // If `this.el` is a string, pass it through `$()`, take the first + // matching element, and re-assign it to `el`. Otherwise, create + // an element from the `id`, `className` and `tagName` proeprties. + _ensureElement : function() { + if (!this.el) { + var attrs = this.attributes || {}; + if (this.id) attrs.id = this.id; + if (this.className) attrs['class'] = this.className; + this.el = this.make(this.tagName, attrs); + } else if (_.isString(this.el)) { + this.el = $(this.el).get(0); + } + } + + }); + + // The self-propagating extend function that Backbone classes use. + var extend = function (protoProps, classProps) { + var child = inherits(this, protoProps, classProps); + child.extend = this.extend; + return child; + }; + + // Set up inheritance for the model, collection, and view. + Backbone.Model.extend = Backbone.Collection.extend = + Backbone.Router.extend = Backbone.View.extend = extend; + + // Map from CRUD to HTTP for our default `Backbone.sync` implementation. + var methodMap = { + 'create': 'POST', + 'update': 'PUT', + 'delete': 'DELETE', + 'read' : 'GET' + }; + + // Backbone.sync + // ------------- + + // Override this function to change the manner in which Backbone persists + // models to the server. You will be passed the type of request, and the + // model in question. By default, uses makes a RESTful Ajax request + // to the model's `url()`. Some possible customizations could be: + // + // * Use `setTimeout` to batch rapid-fire updates into a single request. + // * Send up the models as XML instead of JSON. + // * Persist models via WebSockets instead of Ajax. + // + // Turn on `Backbone.emulateHTTP` in order to send `PUT` and `DELETE` requests + // as `POST`, with a `_method` parameter containing the true HTTP method, + // as well as all requests with the body as `application/x-www-form-urlencoded` instead of + // `application/json` with the model in a param named `model`. + // Useful when interfacing with server-side languages like **PHP** that make + // it difficult to read the body of `PUT` requests. + Backbone.sync = function(method, model, options) { + var type = methodMap[method]; + + // Default JSON-request options. + var params = _.extend({ + type: type, + dataType: 'json' + }, options); + + // Ensure that we have a URL. + if (!params.url) { + params.url = getUrl(model) || urlError(); + } + + // Ensure that we have the appropriate request data. + if (!params.data && model && (method == 'create' || method == 'update')) { + params.contentType = 'application/json'; + params.data = JSON.stringify(model.toJSON()); + } + + // For older servers, emulate JSON by encoding the request into an HTML-form. + if (Backbone.emulateJSON) { + params.contentType = 'application/x-www-form-urlencoded'; + params.data = params.data ? {model : params.data} : {}; + } + + // For older servers, emulate HTTP by mimicking the HTTP method with `_method` + // And an `X-HTTP-Method-Override` header. + if (Backbone.emulateHTTP) { + if (type === 'PUT' || type === 'DELETE') { + if (Backbone.emulateJSON) params.data._method = type; + params.type = 'POST'; + params.beforeSend = function(xhr) { + xhr.setRequestHeader('X-HTTP-Method-Override', type); + }; + } + } + + // Don't process data on a non-GET request. + if (params.type !== 'GET' && !Backbone.emulateJSON) { + params.processData = false; + } + + // Make the request. + return $.ajax(params); + }; + + // Helpers + // ------- + + // Shared empty constructor function to aid in prototype-chain creation. + var ctor = function(){}; + + // Helper function to correctly set up the prototype chain, for subclasses. + // Similar to `goog.inherits`, but uses a hash of prototype properties and + // class properties to be extended. + var inherits = function(parent, protoProps, staticProps) { + var child; + + // The constructor function for the new subclass is either defined by you + // (the "constructor" property in your `extend` definition), or defaulted + // by us to simply call `super()`. + if (protoProps && protoProps.hasOwnProperty('constructor')) { + child = protoProps.constructor; + } else { + child = function(){ return parent.apply(this, arguments); }; + } + + // Inherit class (static) properties from parent. + _.extend(child, parent); + + // Set the prototype chain to inherit from `parent`, without calling + // `parent`'s constructor function. + ctor.prototype = parent.prototype; + child.prototype = new ctor(); + + // Add prototype properties (instance properties) to the subclass, + // if supplied. + if (protoProps) _.extend(child.prototype, protoProps); + + // Add static properties to the constructor function, if supplied. + if (staticProps) _.extend(child, staticProps); + + // Correctly set child's `prototype.constructor`. + child.prototype.constructor = child; + + // Set a convenience property in case the parent's prototype is needed later. + child.__super__ = parent.prototype; + + return child; + }; + + // Helper function to get a URL from a Model or Collection as a property + // or as a function. + var getUrl = function(object) { + if (!(object && object.url)) return null; + return _.isFunction(object.url) ? object.url() : object.url; + }; + + // Throw an error when a URL is needed, and none is supplied. + var urlError = function() { + throw new Error('A "url" property or function must be specified'); + }; + + // Wrap an optional error callback with a fallback error event. + var wrapError = function(onError, model, options) { + return function(resp) { + if (onError) { + onError(model, resp, options); + } else { + model.trigger('error', model, resp, options); + } + }; + }; + + // Helper function to escape a string for HTML rendering. + var escapeHTML = function(string) { + return string.replace(/&(?!\w+;|#\d+;|#x[\da-f]+;)/gi, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"').replace(/'/g, ''').replace(/\//g,'/'); + }; + +}).call(this); diff --git a/libs/beetsplug/web/static/beets.css b/libs/beetsplug/web/static/beets.css new file mode 100644 index 00000000..2ca7fc83 --- /dev/null +++ b/libs/beetsplug/web/static/beets.css @@ -0,0 +1,160 @@ +body { + font-family: Helvetica, Arial, sans-serif; +} + +#header { + position: fixed; + left: 0; + right: 0; + top: 0; + height: 36px; + + color: white; + + cursor: default; + + /* shadowy border */ + box-shadow: 0 0 20px #999; + -webkit-box-shadow: 0 0 20px #999; + -moz-box-shadow: 0 0 20px #999; + + /* background gradient */ + background: #0e0e0e; + background: -moz-linear-gradient(top, #6b6b6b 0%, #0e0e0e 100%); + background: -webkit-linear-gradient(top, #6b6b6b 0%,#0e0e0e 100%); +} +#header h1 { + font-size: 1.1em; + font-weight: bold; + color: white; + margin: 0.35em; + float: left; +} + +#entities { + width: 17em; + + position: fixed; + top: 36px; + left: 0; + bottom: 0; + margin: 0; + + z-index: 1; + background: #dde4eb; + + /* shadowy border */ + box-shadow: 0 0 20px #666; + -webkit-box-shadow: 0 0 20px #666; + -moz-box-shadow: 0 0 20px #666; +} +#queryForm { + display: block; + text-align: center; + margin: 0.25em 0; +} +#query { + width: 95%; + font-size: 1em; +} +#entities ul { + width: 17em; + + position: fixed; + top: 36px; + left: 0; + bottom: 0; + margin: 2.2em 0 0 0; + padding: 0; + + overflow-y: auto; + overflow-x: hidden; +} +#entities ul li { + list-style: none; + padding: 4px 8px; + margin: 0; + cursor: default; +} +#entities ul li.selected { + background: #7abcff; + background: -moz-linear-gradient(top, #7abcff 0%, #60abf8 44%, #4096ee 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#7abcff), color-stop(44%,#60abf8), color-stop(100%,#4096ee)); + color: white; +} +#entities ul li .playing { + margin-left: 5px; + font-size: 0.9em; +} + + +#main-detail, #extra-detail { + position: fixed; + left: 17em; + margin: 1.0em 0 0 1.5em; +} +#main-detail { + top: 36px; + height: 98px; +} +#main-detail .artist, #main-detail .album, #main-detail .title { + display: block; +} +#main-detail .title { + font-size: 1.3em; + font-weight: bold; +} +#main-detail .albumtitle { + font-style: italic; +} + +#extra-detail { + overflow-x: hidden; + overflow-y: auto; + top: 134px; + bottom: 0; + right: 0; +} +/*Fix for correctly displaying line breaks in lyrics*/ +#extra-detail .lyrics { + white-space: pre-wrap; +} +#extra-detail dl dt, #extra-detail dl dd { + list-style: none; + margin: 0; + padding: 0; +} +#extra-detail dl dt { + width: 10em; + float: left; + text-align: right; + font-weight: bold; + clear: both; +} +#extra-detail dl dd { + margin-left: 10.5em; +} + + +#player { + float: left; + width: 150px; + height: 36px; +} +#player .play, #player .pause, #player .disabled { + -webkit-appearance: none; + font-size: 1em; + font-family: Helvetica, Arial, sans-serif; + background: none; + border: none; + color: white; + padding: 5px; + margin: 0; + text-align: center; + + width: 36px; + height: 36px; +} +#player .disabled { + color: #666; +} diff --git a/libs/beetsplug/web/static/beets.js b/libs/beetsplug/web/static/beets.js new file mode 100644 index 00000000..757f2cda --- /dev/null +++ b/libs/beetsplug/web/static/beets.js @@ -0,0 +1,314 @@ +// Format times as minutes and seconds. +var timeFormat = function(secs) { + if (secs == undefined || isNaN(secs)) { + return '0:00'; + } + secs = Math.round(secs); + var mins = '' + Math.round(secs / 60); + secs = '' + (secs % 60); + if (secs.length < 2) { + secs = '0' + secs; + } + return mins + ':' + secs; +} + +// jQuery extension encapsulating event hookups for audio element controls. +$.fn.player = function(debug) { + // Selected element should contain an HTML5 Audio element. + var audio = $('audio', this).get(0); + + // Control elements that may be present, identified by class. + var playBtn = $('.play', this); + var pauseBtn = $('.pause', this); + var disabledInd = $('.disabled', this); + var timesEl = $('.times', this); + var curTimeEl = $('.currentTime', this); + var totalTimeEl = $('.totalTime', this); + var sliderPlayedEl = $('.slider .played', this); + var sliderLoadedEl = $('.slider .loaded', this); + + // Button events. + playBtn.click(function() { + audio.play(); + }); + pauseBtn.click(function(ev) { + audio.pause(); + }); + + // Utilities. + var timePercent = function(cur, total) { + if (cur == undefined || isNaN(cur) || + total == undefined || isNaN(total) || total == 0) { + return 0; + } + var ratio = cur / total; + if (ratio > 1.0) { + ratio = 1.0; + } + return (Math.round(ratio * 10000) / 100) + '%'; + } + + // Event helpers. + var dbg = function(msg) { + if (debug) + console.log(msg); + } + var showState = function() { + if (audio.duration == undefined || isNaN(audio.duration)) { + playBtn.hide(); + pauseBtn.hide(); + disabledInd.show(); + timesEl.hide(); + } else if (audio.paused) { + playBtn.show(); + pauseBtn.hide(); + disabledInd.hide(); + timesEl.show(); + } else { + playBtn.hide(); + pauseBtn.show(); + disabledInd.hide(); + timesEl.show(); + } + } + var showTimes = function() { + curTimeEl.text(timeFormat(audio.currentTime)); + totalTimeEl.text(timeFormat(audio.duration)); + + sliderPlayedEl.css('width', + timePercent(audio.currentTime, audio.duration)); + + // last time buffered + var bufferEnd = 0; + for (var i = 0; i < audio.buffered.length; ++i) { + if (audio.buffered.end(i) > bufferEnd) + bufferEnd = audio.buffered.end(i); + } + sliderLoadedEl.css('width', + timePercent(bufferEnd, audio.duration)); + } + + // Initialize controls. + showState(); + showTimes(); + + // Bind events. + $('audio', this).bind({ + playing: function() { + dbg('playing'); + showState(); + }, + pause: function() { + dbg('pause'); + showState(); + }, + ended: function() { + dbg('ended'); + showState(); + }, + progress: function() { + dbg('progress ' + audio.buffered); + }, + timeupdate: function() { + dbg('timeupdate ' + audio.currentTime); + showTimes(); + }, + durationchange: function() { + dbg('durationchange ' + audio.duration); + showState(); + showTimes(); + }, + loadeddata: function() { + dbg('loadeddata'); + }, + loadedmetadata: function() { + dbg('loadedmetadata'); + } + }); +} + +// Simple selection disable for jQuery. +// Cut-and-paste from: +// http://stackoverflow.com/questions/2700000 +$.fn.disableSelection = function() { + $(this).attr('unselectable', 'on') + .css('-moz-user-select', 'none') + .each(function() { + this.onselectstart = function() { return false; }; + }); +}; + +$(function() { + +// Routes. +var BeetsRouter = Backbone.Router.extend({ + routes: { + "item/query/:query": "itemQuery", + }, + itemQuery: function(query) { + var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/'); + $.getJSON('/item/query/' + queryURL, function(data) { + var models = _.map( + data['results'], + function(d) { return new Item(d); } + ); + var results = new Items(models); + app.showItems(results); + }); + } +}); +var router = new BeetsRouter(); + +// Model. +var Item = Backbone.Model.extend({ + urlRoot: '/item' +}); +var Items = Backbone.Collection.extend({ + model: Item +}); + +// Item views. +var ItemEntryView = Backbone.View.extend({ + tagName: "li", + template: _.template($('#item-entry-template').html()), + events: { + 'click': 'select', + 'dblclick': 'play' + }, + initialize: function() { + this.playing = false; + }, + render: function() { + $(this.el).html(this.template(this.model.toJSON())); + this.setPlaying(this.playing); + return this; + }, + select: function() { + app.selectItem(this); + }, + play: function() { + app.playItem(this.model); + }, + setPlaying: function(val) { + this.playing = val; + if (val) + this.$('.playing').show(); + else + this.$('.playing').hide(); + } +}); +//Holds Title, Artist, Album etc. +var ItemMainDetailView = Backbone.View.extend({ + tagName: "div", + template: _.template($('#item-main-detail-template').html()), + events: { + 'click .play': 'play', + }, + render: function() { + $(this.el).html(this.template(this.model.toJSON())); + return this; + }, + play: function() { + app.playItem(this.model); + } +}); +// Holds Track no., Format, MusicBrainz link, Lyrics, Comments etc. +var ItemExtraDetailView = Backbone.View.extend({ + tagName: "div", + template: _.template($('#item-extra-detail-template').html()), + render: function() { + $(this.el).html(this.template(this.model.toJSON())); + return this; + } +}); +// Main app view. +var AppView = Backbone.View.extend({ + el: $('body'), + events: { + 'submit #queryForm': 'querySubmit', + }, + querySubmit: function(ev) { + ev.preventDefault(); + router.navigate('item/query/' + encodeURIComponent($('#query').val()), true); + }, + initialize: function() { + this.playingItem = null; + this.shownItems = null; + + // Not sure why these events won't bind automatically. + this.$('audio').bind({ + 'play': _.bind(this.audioPlay, this), + 'pause': _.bind(this.audioPause, this), + 'ended': _.bind(this.audioEnded, this) + }); + }, + showItems: function(items) { + this.shownItems = items; + $('#results').empty(); + items.each(function(item) { + var view = new ItemEntryView({model: item}); + item.entryView = view; + $('#results').append(view.render().el); + }); + }, + selectItem: function(view) { + // Mark row as selected. + $('#results li').removeClass("selected"); + $(view.el).addClass("selected"); + + // Show main and extra detail. + var mainDetailView = new ItemMainDetailView({model: view.model}); + $('#main-detail').empty().append(mainDetailView.render().el); + + var extraDetailView = new ItemExtraDetailView({model: view.model}); + $('#extra-detail').empty().append(extraDetailView.render().el); + }, + playItem: function(item) { + var url = '/item/' + item.get('id') + '/file'; + $('#player audio').attr('src', url); + $('#player audio').get(0).play(); + + if (this.playingItem != null) { + this.playingItem.entryView.setPlaying(false); + } + item.entryView.setPlaying(true); + this.playingItem = item; + }, + + audioPause: function() { + this.playingItem.entryView.setPlaying(false); + }, + audioPlay: function() { + if (this.playingItem != null) + this.playingItem.entryView.setPlaying(true); + }, + audioEnded: function() { + this.playingItem.entryView.setPlaying(false); + + // Try to play the next track. + var idx = this.shownItems.indexOf(this.playingItem); + if (idx == -1) { + // Not in current list. + return; + } + var nextIdx = idx + 1; + if (nextIdx >= this.shownItems.size()) { + // End of list. + return; + } + this.playItem(this.shownItems.at(nextIdx)); + } +}); +var app = new AppView(); + +// App setup. +Backbone.history.start({pushState: false}); + +// Disable selection on UI elements. +$('#entities ul').disableSelection(); +$('#header').disableSelection(); + +// Audio player setup. +$('#player').player(); + +}); diff --git a/libs/beetsplug/web/static/jquery.js b/libs/beetsplug/web/static/jquery.js new file mode 100644 index 00000000..e1414212 --- /dev/null +++ b/libs/beetsplug/web/static/jquery.js @@ -0,0 +1,9266 @@ +/*! + * jQuery JavaScript Library v1.7.1 + * http://jquery.com/ + * + * Copyright 2016, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * Copyright 2016, The Dojo Foundation + * Released under the MIT, BSD, and GPL Licenses. + * + * Date: Mon Nov 21 21:11:03 2011 -0500 + */ +(function( window, undefined ) { + +// Use the correct document accordingly with window argument (sandbox) +var document = window.document, + navigator = window.navigator, + location = window.location; +var jQuery = (function() { + +// Define a local copy of jQuery +var jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + return new jQuery.fn.init( selector, context, rootjQuery ); + }, + + // Map over jQuery in case of overwrite + _jQuery = window.jQuery, + + // Map over the $ in case of overwrite + _$ = window.$, + + // A central reference to the root jQuery(document) + rootjQuery, + + // A simple way to check for HTML strings or ID strings + // Prioritize #id over <tag> to avoid XSS via location.hash (#9521) + quickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/, + + // Check if a string has a non-whitespace character in it + rnotwhite = /\S/, + + // Used for trimming whitespace + trimLeft = /^\s+/, + trimRight = /\s+$/, + + // Match a standalone tag + rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/, + + // JSON RegExp + rvalidchars = /^[\],:{}\s]*$/, + rvalidescape = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, + rvalidtokens = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, + rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, + + // Useragent RegExp + rwebkit = /(webkit)[ \/]([\w.]+)/, + ropera = /(opera)(?:.*version)?[ \/]([\w.]+)/, + rmsie = /(msie) ([\w.]+)/, + rmozilla = /(mozilla)(?:.*? rv:([\w.]+))?/, + + // Matches dashed string for camelizing + rdashAlpha = /-([a-z]|[0-9])/ig, + rmsPrefix = /^-ms-/, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return ( letter + "" ).toUpperCase(); + }, + + // Keep a UserAgent string for use with jQuery.browser + userAgent = navigator.userAgent, + + // For matching the engine and version of the browser + browserMatch, + + // The deferred used on DOM ready + readyList, + + // The ready event handler + DOMContentLoaded, + + // Save a reference to some core methods + toString = Object.prototype.toString, + hasOwn = Object.prototype.hasOwnProperty, + push = Array.prototype.push, + slice = Array.prototype.slice, + trim = String.prototype.trim, + indexOf = Array.prototype.indexOf, + + // [[Class]] -> type pairs + class2type = {}; + +jQuery.fn = jQuery.prototype = { + constructor: jQuery, + init: function( selector, context, rootjQuery ) { + var match, elem, ret, doc; + + // Handle $(""), $(null), or $(undefined) + if ( !selector ) { + return this; + } + + // Handle $(DOMElement) + if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + } + + // The body element only exists once, optimize finding it + if ( selector === "body" && !context && document.body ) { + this.context = document; + this[0] = document.body; + this.selector = selector; + this.length = 1; + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + // Are we dealing with HTML string or an ID? + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = quickExpr.exec( selector ); + } + + // Verify a match, and that no context was specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + doc = ( context ? context.ownerDocument || context : document ); + + // If a single string is passed in and it's a single tag + // just do a createElement and skip the rest + ret = rsingleTag.exec( selector ); + + if ( ret ) { + if ( jQuery.isPlainObject( context ) ) { + selector = [ document.createElement( ret[1] ) ]; + jQuery.fn.attr.call( selector, context, true ); + + } else { + selector = [ doc.createElement( ret[1] ) ]; + } + + } else { + ret = jQuery.buildFragment( [ match[1] ], [ doc ] ); + selector = ( ret.cacheable ? jQuery.clone(ret.fragment) : ret.fragment ).childNodes; + } + + return jQuery.merge( this, selector ); + + // HANDLE: $("#id") + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return rootjQuery.ready( selector ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }, + + // Start with an empty selector + selector: "", + + // The current version of jQuery being used + jquery: "1.7.1", + + // The default length of a jQuery object is 0 + length: 0, + + // The number of elements contained in the matched element set + size: function() { + return this.length; + }, + + toArray: function() { + return slice.call( this, 0 ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num == null ? + + // Return a 'clean' array + this.toArray() : + + // Return just the object + ( num < 0 ? this[ this.length + num ] : this[ num ] ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems, name, selector ) { + // Build a new jQuery matched element set + var ret = this.constructor(); + + if ( jQuery.isArray( elems ) ) { + push.apply( ret, elems ); + + } else { + jQuery.merge( ret, elems ); + } + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + ret.context = this.context; + + if ( name === "find" ) { + ret.selector = this.selector + ( this.selector ? " " : "" ) + selector; + } else if ( name ) { + ret.selector = this.selector + "." + name + "(" + selector + ")"; + } + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + ready: function( fn ) { + // Attach the listeners + jQuery.bindReady(); + + // Add the callback + readyList.add( fn ); + + return this; + }, + + eq: function( i ) { + i = +i; + return i === -1 ? + this.slice( i ) : + this.slice( i, i + 1 ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ), + "slice", slice.call(arguments).join(",") ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: [].sort, + splice: [].splice +}; + +// Give the init function the jQuery prototype for later instantiation +jQuery.fn.init.prototype = jQuery.fn; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + target = arguments[1] || {}; + // skip the boolean and the target + i = 2; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( length === i ) { + target = this; + --i; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + noConflict: function( deep ) { + if ( window.$ === jQuery ) { + window.$ = _$; + } + + if ( deep && window.jQuery === jQuery ) { + window.jQuery = _jQuery; + } + + return jQuery; + }, + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + // Either a released hold or an DOMready/load event and not yet ready + if ( (wait === true && !--jQuery.readyWait) || (wait !== true && !jQuery.isReady) ) { + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready, 1 ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.fireWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.trigger ) { + jQuery( document ).trigger( "ready" ).off( "ready" ); + } + } + }, + + bindReady: function() { + if ( readyList ) { + return; + } + + readyList = jQuery.Callbacks( "once memory" ); + + // Catch cases where $(document).ready() is called after the + // browser event has already occurred. + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + return setTimeout( jQuery.ready, 1 ); + } + + // Mozilla, Opera and webkit nightlies currently support this event + if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", jQuery.ready, false ); + + // If IE event model is used + } else if ( document.attachEvent ) { + // ensure firing before onload, + // maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", DOMContentLoaded ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", jQuery.ready ); + + // If IE and not a frame + // continually check to see if the document is ready + var toplevel = false; + + try { + toplevel = window.frameElement == null; + } catch(e) {} + + if ( document.documentElement.doScroll && toplevel ) { + doScrollCheck(); + } + } + }, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + // A crude way of determining if an object is a window + isWindow: function( obj ) { + return obj && typeof obj === "object" && "setInterval" in obj; + }, + + isNumeric: function( obj ) { + return !isNaN( parseFloat(obj) ) && isFinite( obj ); + }, + + type: function( obj ) { + return obj == null ? + String( obj ) : + class2type[ toString.call(obj) ] || "object"; + }, + + isPlainObject: function( obj ) { + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + + var key; + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + isEmptyObject: function( obj ) { + for ( var name in obj ) { + return false; + } + return true; + }, + + error: function( msg ) { + throw new Error( msg ); + }, + + parseJSON: function( data ) { + if ( typeof data !== "string" || !data ) { + return null; + } + + // Make sure leading/trailing whitespace is removed (IE can't handle it) + data = jQuery.trim( data ); + + // Attempt to parse using the native JSON parser first + if ( window.JSON && window.JSON.parse ) { + return window.JSON.parse( data ); + } + + // Make sure the incoming data is actual JSON + // Logic borrowed from http://json.org/json2.js + if ( rvalidchars.test( data.replace( rvalidescape, "@" ) + .replace( rvalidtokens, "]" ) + .replace( rvalidbraces, "")) ) { + + return ( new Function( "return " + data ) )(); + + } + jQuery.error( "Invalid JSON: " + data ); + }, + + // Cross-browser xml parsing + parseXML: function( data ) { + var xml, tmp; + try { + if ( window.DOMParser ) { // Standard + tmp = new DOMParser(); + xml = tmp.parseFromString( data , "text/xml" ); + } else { // IE + xml = new ActiveXObject( "Microsoft.XMLDOM" ); + xml.async = "false"; + xml.loadXML( data ); + } + } catch( e ) { + xml = undefined; + } + if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; + }, + + noop: function() {}, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && rnotwhite.test( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase(); + }, + + // args is for internal usage only + each: function( object, callback, args ) { + var name, i = 0, + length = object.length, + isObj = length === undefined || jQuery.isFunction( object ); + + if ( args ) { + if ( isObj ) { + for ( name in object ) { + if ( callback.apply( object[ name ], args ) === false ) { + break; + } + } + } else { + for ( ; i < length; ) { + if ( callback.apply( object[ i++ ], args ) === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isObj ) { + for ( name in object ) { + if ( callback.call( object[ name ], name, object[ name ] ) === false ) { + break; + } + } + } else { + for ( ; i < length; ) { + if ( callback.call( object[ i ], i, object[ i++ ] ) === false ) { + break; + } + } + } + } + + return object; + }, + + // Use native String.trim function wherever possible + trim: trim ? + function( text ) { + return text == null ? + "" : + trim.call( text ); + } : + + // Otherwise use our own trimming functionality + function( text ) { + return text == null ? + "" : + text.toString().replace( trimLeft, "" ).replace( trimRight, "" ); + }, + + // results is for internal usage only + makeArray: function( array, results ) { + var ret = results || []; + + if ( array != null ) { + // The window, strings (and functions) also have 'length' + // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930 + var type = jQuery.type( array ); + + if ( array.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( array ) ) { + push.call( ret, array ); + } else { + jQuery.merge( ret, array ); + } + } + + return ret; + }, + + inArray: function( elem, array, i ) { + var len; + + if ( array ) { + if ( indexOf ) { + return indexOf.call( array, elem, i ); + } + + len = array.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in array && array[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var i = first.length, + j = 0; + + if ( typeof second.length === "number" ) { + for ( var l = second.length; j < l; j++ ) { + first[ i++ ] = second[ j ]; + } + + } else { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, inv ) { + var ret = [], retVal; + inv = !!inv; + + // Go through the array, only saving the items + // that pass the validator function + for ( var i = 0, length = elems.length; i < length; i++ ) { + retVal = !!callback( elems[ i ], i ); + if ( inv !== retVal ) { + ret.push( elems[ i ] ); + } + } + + return ret; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, key, ret = [], + i = 0, + length = elems.length, + // jquery objects are treated as arrays + isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ; + + // Go through the array, translating each of the items to their + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret[ ret.length ] = value; + } + } + + // Go through every key on the object, + } else { + for ( key in elems ) { + value = callback( elems[ key ], key, arg ); + + if ( value != null ) { + ret[ ret.length ] = value; + } + } + } + + // Flatten any nested arrays + return ret.concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + if ( typeof context === "string" ) { + var tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + var args = slice.call( arguments, 2 ), + proxy = function() { + return fn.apply( context, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++; + + return proxy; + }, + + // Mutifunctional method to get and set values to a collection + // The value/s can optionally be executed if it's a function + access: function( elems, key, value, exec, fn, pass ) { + var length = elems.length; + + // Setting many attributes + if ( typeof key === "object" ) { + for ( var k in key ) { + jQuery.access( elems, k, key[k], exec, fn, value ); + } + return elems; + } + + // Setting one attribute + if ( value !== undefined ) { + // Optionally, function values get executed if exec is true + exec = !pass && exec && jQuery.isFunction(value); + + for ( var i = 0; i < length; i++ ) { + fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass ); + } + + return elems; + } + + // Getting an attribute + return length ? fn( elems[0], key ) : undefined; + }, + + now: function() { + return ( new Date() ).getTime(); + }, + + // Use of jQuery.browser is frowned upon. + // More details: http://docs.jquery.com/Utilities/jQuery.browser + uaMatch: function( ua ) { + ua = ua.toLowerCase(); + + var match = rwebkit.exec( ua ) || + ropera.exec( ua ) || + rmsie.exec( ua ) || + ua.indexOf("compatible") < 0 && rmozilla.exec( ua ) || + []; + + return { browser: match[1] || "", version: match[2] || "0" }; + }, + + sub: function() { + function jQuerySub( selector, context ) { + return new jQuerySub.fn.init( selector, context ); + } + jQuery.extend( true, jQuerySub, this ); + jQuerySub.superclass = this; + jQuerySub.fn = jQuerySub.prototype = this(); + jQuerySub.fn.constructor = jQuerySub; + jQuerySub.sub = this.sub; + jQuerySub.fn.init = function init( selector, context ) { + if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) { + context = jQuerySub( context ); + } + + return jQuery.fn.init.call( this, selector, context, rootjQuerySub ); + }; + jQuerySub.fn.init.prototype = jQuerySub.fn; + var rootjQuerySub = jQuerySub(document); + return jQuerySub; + }, + + browser: {} +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +browserMatch = jQuery.uaMatch( userAgent ); +if ( browserMatch.browser ) { + jQuery.browser[ browserMatch.browser ] = true; + jQuery.browser.version = browserMatch.version; +} + +// Deprecated, use jQuery.browser.webkit instead +if ( jQuery.browser.webkit ) { + jQuery.browser.safari = true; +} + +// IE doesn't match non-breaking spaces with \s +if ( rnotwhite.test( "\xA0" ) ) { + trimLeft = /^[\s\xA0]+/; + trimRight = /[\s\xA0]+$/; +} + +// All jQuery objects should point back to these +rootjQuery = jQuery(document); + +// Cleanup functions for the document ready method +if ( document.addEventListener ) { + DOMContentLoaded = function() { + document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + jQuery.ready(); + }; + +} else if ( document.attachEvent ) { + DOMContentLoaded = function() { + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( document.readyState === "complete" ) { + document.detachEvent( "onreadystatechange", DOMContentLoaded ); + jQuery.ready(); + } + }; +} + +// The DOM ready check for Internet Explorer +function doScrollCheck() { + if ( jQuery.isReady ) { + return; + } + + try { + // If IE is used, use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + document.documentElement.doScroll("left"); + } catch(e) { + setTimeout( doScrollCheck, 1 ); + return; + } + + // and execute any waiting functions + jQuery.ready(); +} + +return jQuery; + +})(); + + +// String to Object flags format cache +var flagsCache = {}; + +// Convert String-formatted flags into Object-formatted ones and store in cache +function createFlags( flags ) { + var object = flagsCache[ flags ] = {}, + i, length; + flags = flags.split( /\s+/ ); + for ( i = 0, length = flags.length; i < length; i++ ) { + object[ flags[i] ] = true; + } + return object; +} + +/* + * Create a callback list using the following parameters: + * + * flags: an optional list of space-separated flags that will change how + * the callback list behaves + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible flags: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( flags ) { + + // Convert flags from String-formatted to Object-formatted + // (we check in cache first) + flags = flags ? ( flagsCache[ flags ] || createFlags( flags ) ) : {}; + + var // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = [], + // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list is currently firing + firing, + // First callback to fire (used internally by add and fireWith) + firingStart, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // Add one or several callbacks to the list + add = function( args ) { + var i, + length, + elem, + type, + actual; + for ( i = 0, length = args.length; i < length; i++ ) { + elem = args[ i ]; + type = jQuery.type( elem ); + if ( type === "array" ) { + // Inspect recursively + add( elem ); + } else if ( type === "function" ) { + // Add if not in unique mode and callback is not in + if ( !flags.unique || !self.has( elem ) ) { + list.push( elem ); + } + } + } + }, + // Fire callbacks + fire = function( context, args ) { + args = args || []; + memory = !flags.memory || [ context, args ]; + firing = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( context, args ) === false && flags.stopOnFalse ) { + memory = true; // Mark as halted + break; + } + } + firing = false; + if ( list ) { + if ( !flags.once ) { + if ( stack && stack.length ) { + memory = stack.shift(); + self.fireWith( memory[ 0 ], memory[ 1 ] ); + } + } else if ( memory === true ) { + self.disable(); + } else { + list = []; + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + var length = list.length; + add( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away, unless previous + // firing was halted (stopOnFalse) + } else if ( memory && memory !== true ) { + firingStart = length; + fire( memory[ 0 ], memory[ 1 ] ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + var args = arguments, + argIndex = 0, + argLength = args.length; + for ( ; argIndex < argLength ; argIndex++ ) { + for ( var i = 0; i < list.length; i++ ) { + if ( args[ argIndex ] === list[ i ] ) { + // Handle firingIndex and firingLength + if ( firing ) { + if ( i <= firingLength ) { + firingLength--; + if ( i <= firingIndex ) { + firingIndex--; + } + } + } + // Remove the element + list.splice( i--, 1 ); + // If we have some unicity property then + // we only need to do this once + if ( flags.unique ) { + break; + } + } + } + } + } + return this; + }, + // Control if a given callback is in the list + has: function( fn ) { + if ( list ) { + var i = 0, + length = list.length; + for ( ; i < length; i++ ) { + if ( fn === list[ i ] ) { + return true; + } + } + } + return false; + }, + // Remove all callbacks from the list + empty: function() { + list = []; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory || memory === true ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( stack ) { + if ( firing ) { + if ( !flags.once ) { + stack.push( [ context, args ] ); + } + } else if ( !( flags.once && memory ) ) { + fire( context, args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!memory; + } + }; + + return self; +}; + + + + +var // Static reference to slice + sliceDeferred = [].slice; + +jQuery.extend({ + + Deferred: function( func ) { + var doneList = jQuery.Callbacks( "once memory" ), + failList = jQuery.Callbacks( "once memory" ), + progressList = jQuery.Callbacks( "memory" ), + state = "pending", + lists = { + resolve: doneList, + reject: failList, + notify: progressList + }, + promise = { + done: doneList.add, + fail: failList.add, + progress: progressList.add, + + state: function() { + return state; + }, + + // Deprecated + isResolved: doneList.fired, + isRejected: failList.fired, + + then: function( doneCallbacks, failCallbacks, progressCallbacks ) { + deferred.done( doneCallbacks ).fail( failCallbacks ).progress( progressCallbacks ); + return this; + }, + always: function() { + deferred.done.apply( deferred, arguments ).fail.apply( deferred, arguments ); + return this; + }, + pipe: function( fnDone, fnFail, fnProgress ) { + return jQuery.Deferred(function( newDefer ) { + jQuery.each( { + done: [ fnDone, "resolve" ], + fail: [ fnFail, "reject" ], + progress: [ fnProgress, "notify" ] + }, function( handler, data ) { + var fn = data[ 0 ], + action = data[ 1 ], + returned; + if ( jQuery.isFunction( fn ) ) { + deferred[ handler ](function() { + returned = fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise().then( newDefer.resolve, newDefer.reject, newDefer.notify ); + } else { + newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] ); + } + }); + } else { + deferred[ handler ]( newDefer[ action ] ); + } + }); + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + if ( obj == null ) { + obj = promise; + } else { + for ( var key in promise ) { + obj[ key ] = promise[ key ]; + } + } + return obj; + } + }, + deferred = promise.promise({}), + key; + + for ( key in lists ) { + deferred[ key ] = lists[ key ].fire; + deferred[ key + "With" ] = lists[ key ].fireWith; + } + + // Handle state + deferred.done( function() { + state = "resolved"; + }, failList.disable, progressList.lock ).fail( function() { + state = "rejected"; + }, doneList.disable, progressList.lock ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( firstParam ) { + var args = sliceDeferred.call( arguments, 0 ), + i = 0, + length = args.length, + pValues = new Array( length ), + count = length, + pCount = length, + deferred = length <= 1 && firstParam && jQuery.isFunction( firstParam.promise ) ? + firstParam : + jQuery.Deferred(), + promise = deferred.promise(); + function resolveFunc( i ) { + return function( value ) { + args[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value; + if ( !( --count ) ) { + deferred.resolveWith( deferred, args ); + } + }; + } + function progressFunc( i ) { + return function( value ) { + pValues[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value; + deferred.notifyWith( promise, pValues ); + }; + } + if ( length > 1 ) { + for ( ; i < length; i++ ) { + if ( args[ i ] && args[ i ].promise && jQuery.isFunction( args[ i ].promise ) ) { + args[ i ].promise().then( resolveFunc(i), deferred.reject, progressFunc(i) ); + } else { + --count; + } + } + if ( !count ) { + deferred.resolveWith( deferred, args ); + } + } else if ( deferred !== firstParam ) { + deferred.resolveWith( deferred, length ? [ firstParam ] : [] ); + } + return promise; + } +}); + + + + +jQuery.support = (function() { + + var support, + all, + a, + select, + opt, + input, + marginDiv, + fragment, + tds, + events, + eventName, + i, + isSupported, + div = document.createElement( "div" ), + documentElement = document.documentElement; + + // Preliminary tests + div.setAttribute("className", "t"); + div.innerHTML = " <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>"; + + all = div.getElementsByTagName( "*" ); + a = div.getElementsByTagName( "a" )[ 0 ]; + + // Can't get basic test support + if ( !all || !all.length || !a ) { + return {}; + } + + // First batch of supports tests + select = document.createElement( "select" ); + opt = select.appendChild( document.createElement("option") ); + input = div.getElementsByTagName( "input" )[ 0 ]; + + support = { + // IE strips leading whitespace when .innerHTML is used + leadingWhitespace: ( div.firstChild.nodeType === 3 ), + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + tbody: !div.getElementsByTagName("tbody").length, + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + htmlSerialize: !!div.getElementsByTagName("link").length, + + // Get the style information from getAttribute + // (IE uses .cssText instead) + style: /top/.test( a.getAttribute("style") ), + + // Make sure that URLs aren't manipulated + // (IE normalizes it by default) + hrefNormalized: ( a.getAttribute("href") === "/a" ), + + // Make sure that element opacity exists + // (IE uses filter instead) + // Use a regex to work around a WebKit issue. See #5145 + opacity: /^0.55/.test( a.style.opacity ), + + // Verify style float existence + // (IE uses styleFloat instead of cssFloat) + cssFloat: !!a.style.cssFloat, + + // Make sure that if no value is specified for a checkbox + // that it defaults to "on". + // (WebKit defaults to "" instead) + checkOn: ( input.value === "on" ), + + // Make sure that a selected-by-default option has a working selected property. + // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) + optSelected: opt.selected, + + // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) + getSetAttribute: div.className !== "t", + + // Tests for enctype support on a form(#6743) + enctype: !!document.createElement("form").enctype, + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav></:nav>", + + // Will be defined later + submitBubbles: true, + changeBubbles: true, + focusinBubbles: false, + deleteExpando: true, + noCloneEvent: true, + inlineBlockNeedsLayout: false, + shrinkWrapBlocks: false, + reliableMarginRight: true + }; + + // Make sure checked status is properly cloned + input.checked = true; + support.noCloneChecked = input.cloneNode( true ).checked; + + // Make sure that the options inside disabled selects aren't marked as disabled + // (WebKit marks them as disabled) + select.disabled = true; + support.optDisabled = !opt.disabled; + + // Test to see if it's possible to delete an expando from an element + // Fails in Internet Explorer + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + + if ( !div.addEventListener && div.attachEvent && div.fireEvent ) { + div.attachEvent( "onclick", function() { + // Cloning a node shouldn't copy over any + // bound event handlers (IE does this) + support.noCloneEvent = false; + }); + div.cloneNode( true ).fireEvent( "onclick" ); + } + + // Check if a radio maintains its value + // after being appended to the DOM + input = document.createElement("input"); + input.value = "t"; + input.setAttribute("type", "radio"); + support.radioValue = input.value === "t"; + + input.setAttribute("checked", "checked"); + div.appendChild( input ); + fragment = document.createDocumentFragment(); + fragment.appendChild( div.lastChild ); + + // WebKit doesn't clone checked state correctly in fragments + support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + support.appendChecked = input.checked; + + fragment.removeChild( input ); + fragment.appendChild( div ); + + div.innerHTML = ""; + + // Check if div with explicit width and no margin-right incorrectly + // gets computed margin-right based on width of container. For more + // info see bug #3333 + // Fails in WebKit before Feb 2011 nightlies + // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right + if ( window.getComputedStyle ) { + marginDiv = document.createElement( "div" ); + marginDiv.style.width = "0"; + marginDiv.style.marginRight = "0"; + div.style.width = "2px"; + div.appendChild( marginDiv ); + support.reliableMarginRight = + ( parseInt( ( window.getComputedStyle( marginDiv, null ) || { marginRight: 0 } ).marginRight, 10 ) || 0 ) === 0; + } + + // Technique from Juriy Zaytsev + // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/ + // We only care about the case where non-standard event systems + // are used, namely in IE. Short-circuiting here helps us to + // avoid an eval call (in setAttribute) which can cause CSP + // to go haywire. See: https://developer.mozilla.org/en/Security/CSP + if ( div.attachEvent ) { + for( i in { + submit: 1, + change: 1, + focusin: 1 + }) { + eventName = "on" + i; + isSupported = ( eventName in div ); + if ( !isSupported ) { + div.setAttribute( eventName, "return;" ); + isSupported = ( typeof div[ eventName ] === "function" ); + } + support[ i + "Bubbles" ] = isSupported; + } + } + + fragment.removeChild( div ); + + // Null elements to avoid leaks in IE + fragment = select = opt = marginDiv = div = input = null; + + // Run tests that need a body at doc ready + jQuery(function() { + var container, outer, inner, table, td, offsetSupport, + conMarginTop, ptlm, vb, style, html, + body = document.getElementsByTagName("body")[0]; + + if ( !body ) { + // Return for frameset docs that don't have a body + return; + } + + conMarginTop = 1; + ptlm = "position:absolute;top:0;left:0;width:1px;height:1px;margin:0;"; + vb = "visibility:hidden;border:0;"; + style = "style='" + ptlm + "border:5px solid #000;padding:0;'"; + html = "<div " + style + "><div></div></div>" + + "<table " + style + " cellpadding='0' cellspacing='0'>" + + "<tr><td></td></tr></table>"; + + container = document.createElement("div"); + container.style.cssText = vb + "width:0;height:0;position:static;top:0;margin-top:" + conMarginTop + "px"; + body.insertBefore( container, body.firstChild ); + + // Construct the test element + div = document.createElement("div"); + container.appendChild( div ); + + // Check if table cells still have offsetWidth/Height when they are set + // to display:none and there are still other visible table cells in a + // table row; if so, offsetWidth/Height are not reliable for use when + // determining if an element has been hidden directly using + // display:none (it is still safe to use offsets if a parent element is + // hidden; don safety goggles and see bug #4512 for more information). + // (only IE 8 fails this test) + div.innerHTML = "<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>"; + tds = div.getElementsByTagName( "td" ); + isSupported = ( tds[ 0 ].offsetHeight === 0 ); + + tds[ 0 ].style.display = ""; + tds[ 1 ].style.display = "none"; + + // Check if empty table cells still have offsetWidth/Height + // (IE <= 8 fail this test) + support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); + + // Figure out if the W3C box model works as expected + div.innerHTML = ""; + div.style.width = div.style.paddingLeft = "1px"; + jQuery.boxModel = support.boxModel = div.offsetWidth === 2; + + if ( typeof div.style.zoom !== "undefined" ) { + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + // (IE < 8 does this) + div.style.display = "inline"; + div.style.zoom = 1; + support.inlineBlockNeedsLayout = ( div.offsetWidth === 2 ); + + // Check if elements with layout shrink-wrap their children + // (IE 6 does this) + div.style.display = ""; + div.innerHTML = "<div style='width:4px;'></div>"; + support.shrinkWrapBlocks = ( div.offsetWidth !== 2 ); + } + + div.style.cssText = ptlm + vb; + div.innerHTML = html; + + outer = div.firstChild; + inner = outer.firstChild; + td = outer.nextSibling.firstChild.firstChild; + + offsetSupport = { + doesNotAddBorder: ( inner.offsetTop !== 5 ), + doesAddBorderForTableAndCells: ( td.offsetTop === 5 ) + }; + + inner.style.position = "fixed"; + inner.style.top = "20px"; + + // safari subtracts parent border width here which is 5px + offsetSupport.fixedPosition = ( inner.offsetTop === 20 || inner.offsetTop === 15 ); + inner.style.position = inner.style.top = ""; + + outer.style.overflow = "hidden"; + outer.style.position = "relative"; + + offsetSupport.subtractsBorderForOverflowNotVisible = ( inner.offsetTop === -5 ); + offsetSupport.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== conMarginTop ); + + body.removeChild( container ); + div = container = null; + + jQuery.extend( support, offsetSupport ); + }); + + return support; +})(); + + + + +var rbrace = /^(?:\{.*\}|\[.*\])$/, + rmultiDash = /([A-Z])/g; + +jQuery.extend({ + cache: {}, + + // Please use with caution + uuid: 0, + + // Unique for each copy of jQuery on the page + // Non-digits removed to match rinlinejQuery + expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ), + + // The following elements throw uncatchable exceptions if you + // attempt to add expando properties to them. + noData: { + "embed": true, + // Ban all objects except for Flash (which handle expandos) + "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", + "applet": true + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var privateCache, thisCache, ret, + internalKey = jQuery.expando, + getByName = typeof name === "string", + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey, + isEvents = name === "events"; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!isEvents && !pvt && !cache[id].data)) && getByName && data === undefined ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + elem[ internalKey ] = id = ++jQuery.uuid; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + cache[ id ] = {}; + + // Avoids exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + if ( !isNode ) { + cache[ id ].toJSON = jQuery.noop; + } + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + privateCache = thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Users should not attempt to inspect the internal events object using jQuery.data, + // it is undocumented and subject to change. But does anyone listen? No. + if ( isEvents && !thisCache[ name ] ) { + return privateCache.events; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( getByName ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; + }, + + removeData: function( elem, name, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, l, + + // Reference to internal data cache key + internalKey = jQuery.expando, + + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + + // See jQuery.data for more information + id = isNode ? elem[ internalKey ] : internalKey; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split( " " ); + } + } + } + + for ( i = 0, l = name.length; i < l; i++ ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject(cache[ id ]) ) { + return; + } + } + + // Browsers that fail expando deletion also refuse to delete expandos on + // the window, but it will allow it on all other JS objects; other browsers + // don't care + // Ensure that `cache` is not a window object #10080 + if ( jQuery.support.deleteExpando || !cache.setInterval ) { + delete cache[ id ]; + } else { + cache[ id ] = null; + } + + // We destroyed the cache and need to eliminate the expando on the node to avoid + // false lookups in the cache for entries that no longer exist + if ( isNode ) { + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( jQuery.support.deleteExpando ) { + delete elem[ internalKey ]; + } else if ( elem.removeAttribute ) { + elem.removeAttribute( internalKey ); + } else { + elem[ internalKey ] = null; + } + } + }, + + // For internal use only. + _data: function( elem, name, data ) { + return jQuery.data( elem, name, data, true ); + }, + + // A method for determining if a DOM node can handle the data expando + acceptData: function( elem ) { + if ( elem.nodeName ) { + var match = jQuery.noData[ elem.nodeName.toLowerCase() ]; + + if ( match ) { + return !(match === true || elem.getAttribute("classid") !== match); + } + } + + return true; + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var parts, attr, name, + data = null; + + if ( typeof key === "undefined" ) { + if ( this.length ) { + data = jQuery.data( this[0] ); + + if ( this[0].nodeType === 1 && !jQuery._data( this[0], "parsedAttrs" ) ) { + attr = this[0].attributes; + for ( var i = 0, l = attr.length; i < l; i++ ) { + name = attr[i].name; + + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.substring(5) ); + + dataAttr( this[0], name, data[ name ] ); + } + } + jQuery._data( this[0], "parsedAttrs", true ); + } + } + + return data; + + } else if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + parts = key.split("."); + parts[1] = parts[1] ? "." + parts[1] : ""; + + if ( value === undefined ) { + data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]); + + // Try to fetch any internally stored data first + if ( data === undefined && this.length ) { + data = jQuery.data( this[0], key ); + data = dataAttr( this[0], key, data ); + } + + return data === undefined && parts[1] ? + this.data( parts[0] ) : + data; + + } else { + return this.each(function() { + var self = jQuery( this ), + args = [ parts[0], value ]; + + self.triggerHandler( "setData" + parts[1] + "!", args ); + jQuery.data( this, key, value ); + self.triggerHandler( "changeData" + parts[1] + "!", args ); + }); + } + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + jQuery.isNumeric( data ) ? parseFloat( data ) : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + for ( var name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} + + + + +function handleQueueMarkDefer( elem, type, src ) { + var deferDataKey = type + "defer", + queueDataKey = type + "queue", + markDataKey = type + "mark", + defer = jQuery._data( elem, deferDataKey ); + if ( defer && + ( src === "queue" || !jQuery._data(elem, queueDataKey) ) && + ( src === "mark" || !jQuery._data(elem, markDataKey) ) ) { + // Give room for hard-coded callbacks to fire first + // and eventually mark/queue something else on the element + setTimeout( function() { + if ( !jQuery._data( elem, queueDataKey ) && + !jQuery._data( elem, markDataKey ) ) { + jQuery.removeData( elem, deferDataKey, true ); + defer.fire(); + } + }, 0 ); + } +} + +jQuery.extend({ + + _mark: function( elem, type ) { + if ( elem ) { + type = ( type || "fx" ) + "mark"; + jQuery._data( elem, type, (jQuery._data( elem, type ) || 0) + 1 ); + } + }, + + _unmark: function( force, elem, type ) { + if ( force !== true ) { + type = elem; + elem = force; + force = false; + } + if ( elem ) { + type = type || "fx"; + var key = type + "mark", + count = force ? 0 : ( (jQuery._data( elem, key ) || 1) - 1 ); + if ( count ) { + jQuery._data( elem, key, count ); + } else { + jQuery.removeData( elem, key, true ); + handleQueueMarkDefer( elem, type, "mark" ); + } + } + }, + + queue: function( elem, type, data ) { + var q; + if ( elem ) { + type = ( type || "fx" ) + "queue"; + q = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !q || jQuery.isArray(data) ) { + q = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + q.push( data ); + } + } + return q || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + fn = queue.shift(), + hooks = {}; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + } + + if ( fn ) { + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + jQuery._data( elem, type + ".run", hooks ); + fn.call( elem, function() { + jQuery.dequeue( elem, type ); + }, hooks ); + } + + if ( !queue.length ) { + jQuery.removeData( elem, type + "queue " + type + ".run", true ); + handleQueueMarkDefer( elem, type, "queue" ); + } + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + } + + if ( data === undefined ) { + return jQuery.queue( this[0], type ); + } + return this.each(function() { + var queue = jQuery.queue( this, type, data ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + // Based off of the plugin by Clint Helfers, with permission. + // http://blindsignals.com/index.php/2009/07/jquery-delay/ + delay: function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = setTimeout( next, time ); + hooks.stop = function() { + clearTimeout( timeout ); + }; + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, object ) { + if ( typeof type !== "string" ) { + object = type; + type = undefined; + } + type = type || "fx"; + var defer = jQuery.Deferred(), + elements = this, + i = elements.length, + count = 1, + deferDataKey = type + "defer", + queueDataKey = type + "queue", + markDataKey = type + "mark", + tmp; + function resolve() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + } + while( i-- ) { + if (( tmp = jQuery.data( elements[ i ], deferDataKey, undefined, true ) || + ( jQuery.data( elements[ i ], queueDataKey, undefined, true ) || + jQuery.data( elements[ i ], markDataKey, undefined, true ) ) && + jQuery.data( elements[ i ], deferDataKey, jQuery.Callbacks( "once memory" ), true ) )) { + count++; + tmp.add( resolve ); + } + } + resolve(); + return defer.promise(); + } +}); + + + + +var rclass = /[\n\t\r]/g, + rspace = /\s+/, + rreturn = /\r/g, + rtype = /^(?:button|input)$/i, + rfocusable = /^(?:button|input|object|select|textarea)$/i, + rclickable = /^a(?:rea)?$/i, + rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i, + getSetAttribute = jQuery.support.getSetAttribute, + nodeHook, boolHook, fixSpecified; + +jQuery.fn.extend({ + attr: function( name, value ) { + return jQuery.access( this, name, value, true, jQuery.attr ); + }, + + removeAttr: function( name ) { + return this.each(function() { + jQuery.removeAttr( this, name ); + }); + }, + + prop: function( name, value ) { + return jQuery.access( this, name, value, true, jQuery.prop ); + }, + + removeProp: function( name ) { + name = jQuery.propFix[ name ] || name; + return this.each(function() { + // try/catch handles cases where IE balks (such as removing a property on window) + try { + this[ name ] = undefined; + delete this[ name ]; + } catch( e ) {} + }); + }, + + addClass: function( value ) { + var classNames, i, l, elem, + setClass, c, cl; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( j ) { + jQuery( this ).addClass( value.call(this, j, this.className) ); + }); + } + + if ( value && typeof value === "string" ) { + classNames = value.split( rspace ); + + for ( i = 0, l = this.length; i < l; i++ ) { + elem = this[ i ]; + + if ( elem.nodeType === 1 ) { + if ( !elem.className && classNames.length === 1 ) { + elem.className = value; + + } else { + setClass = " " + elem.className + " "; + + for ( c = 0, cl = classNames.length; c < cl; c++ ) { + if ( !~setClass.indexOf( " " + classNames[ c ] + " " ) ) { + setClass += classNames[ c ] + " "; + } + } + elem.className = jQuery.trim( setClass ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classNames, i, l, elem, className, c, cl; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( j ) { + jQuery( this ).removeClass( value.call(this, j, this.className) ); + }); + } + + if ( (value && typeof value === "string") || value === undefined ) { + classNames = ( value || "" ).split( rspace ); + + for ( i = 0, l = this.length; i < l; i++ ) { + elem = this[ i ]; + + if ( elem.nodeType === 1 && elem.className ) { + if ( value ) { + className = (" " + elem.className + " ").replace( rclass, " " ); + for ( c = 0, cl = classNames.length; c < cl; c++ ) { + className = className.replace(" " + classNames[ c ] + " ", " "); + } + elem.className = jQuery.trim( className ); + + } else { + elem.className = ""; + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isBool = typeof stateVal === "boolean"; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( i ) { + jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); + }); + } + + return this.each(function() { + if ( type === "string" ) { + // toggle individual class names + var className, + i = 0, + self = jQuery( this ), + state = stateVal, + classNames = value.split( rspace ); + + while ( (className = classNames[ i++ ]) ) { + // check each className given, space seperated list + state = isBool ? state : !self.hasClass( className ); + self[ state ? "addClass" : "removeClass" ]( className ); + } + + } else if ( type === "undefined" || type === "boolean" ) { + if ( this.className ) { + // store className if set + jQuery._data( this, "__className__", this.className ); + } + + // toggle whole className + this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; + } + }); + }, + + hasClass: function( selector ) { + var className = " " + selector + " ", + i = 0, + l = this.length; + for ( ; i < l; i++ ) { + if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) { + return true; + } + } + + return false; + }, + + val: function( value ) { + var hooks, ret, isFunction, + elem = this[0]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.nodeName.toLowerCase() ] || jQuery.valHooks[ elem.type ]; + + if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { + return ret; + } + + ret = elem.value; + + return typeof ret === "string" ? + // handle most common string cases + ret.replace(rreturn, "") : + // handle cases where value is null/undef or number + ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each(function( i ) { + var self = jQuery(this), val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, self.val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + } else if ( typeof val === "number" ) { + val += ""; + } else if ( jQuery.isArray( val ) ) { + val = jQuery.map(val, function ( value ) { + return value == null ? "" : value + ""; + }); + } + + hooks = jQuery.valHooks[ this.nodeName.toLowerCase() ] || jQuery.valHooks[ this.type ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + }); + } +}); + +jQuery.extend({ + valHooks: { + option: { + get: function( elem ) { + // attributes.value is undefined in Blackberry 4.7 but + // uses .value. See #6932 + var val = elem.attributes.value; + return !val || val.specified ? elem.value : elem.text; + } + }, + select: { + get: function( elem ) { + var value, i, max, option, + index = elem.selectedIndex, + values = [], + options = elem.options, + one = elem.type === "select-one"; + + // Nothing was selected + if ( index < 0 ) { + return null; + } + + // Loop through all the selected options + i = one ? index : 0; + max = one ? index + 1 : options.length; + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Don't return options that are disabled or in a disabled optgroup + if ( option.selected && (jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null) && + (!option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" )) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + // Fixes Bug #2551 -- select.val() broken in IE after form.reset() + if ( one && !values.length && options.length ) { + return jQuery( options[ index ] ).val(); + } + + return values; + }, + + set: function( elem, value ) { + var values = jQuery.makeArray( value ); + + jQuery(elem).find("option").each(function() { + this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; + }); + + if ( !values.length ) { + elem.selectedIndex = -1; + } + return values; + } + } + }, + + attrFn: { + val: true, + css: true, + html: true, + text: true, + data: true, + width: true, + height: true, + offset: true + }, + + attr: function( elem, name, value, pass ) { + var ret, hooks, notxml, + nType = elem.nodeType; + + // don't get/set attributes on text, comment and attribute nodes + if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( pass && name in jQuery.attrFn ) { + return jQuery( elem )[ name ]( value ); + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); + + // All attributes are lowercase + // Grab necessary hook if one is defined + if ( notxml ) { + name = name.toLowerCase(); + hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); + } + + if ( value !== undefined ) { + + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + + } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) { + return ret; + + } else { + elem.setAttribute( name, "" + value ); + return value; + } + + } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) { + return ret; + + } else { + + ret = elem.getAttribute( name ); + + // Non-existent attributes return null, we normalize to undefined + return ret === null ? + undefined : + ret; + } + }, + + removeAttr: function( elem, value ) { + var propName, attrNames, name, l, + i = 0; + + if ( value && elem.nodeType === 1 ) { + attrNames = value.toLowerCase().split( rspace ); + l = attrNames.length; + + for ( ; i < l; i++ ) { + name = attrNames[ i ]; + + if ( name ) { + propName = jQuery.propFix[ name ] || name; + + // See #9699 for explanation of this approach (setting first, then removal) + jQuery.attr( elem, name, "" ); + elem.removeAttribute( getSetAttribute ? name : propName ); + + // Set corresponding property to false for boolean attributes + if ( rboolean.test( name ) && propName in elem ) { + elem[ propName ] = false; + } + } + } + } + }, + + attrHooks: { + type: { + set: function( elem, value ) { + // We can't allow the type property to be changed (since it causes problems in IE) + if ( rtype.test( elem.nodeName ) && elem.parentNode ) { + jQuery.error( "type property can't be changed" ); + } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { + // Setting the type on a radio button after the value resets the value in IE6-9 + // Reset value to it's default in case type is set after value + // This is for element creation + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + }, + // Use the value property for back compat + // Use the nodeHook for button elements in IE6/7 (#1954) + value: { + get: function( elem, name ) { + if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { + return nodeHook.get( elem, name ); + } + return name in elem ? + elem.value : + null; + }, + set: function( elem, value, name ) { + if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { + return nodeHook.set( elem, value, name ); + } + // Does not return so that setAttribute is also used + elem.value = value; + } + } + }, + + propFix: { + tabindex: "tabIndex", + readonly: "readOnly", + "for": "htmlFor", + "class": "className", + maxlength: "maxLength", + cellspacing: "cellSpacing", + cellpadding: "cellPadding", + rowspan: "rowSpan", + colspan: "colSpan", + usemap: "useMap", + frameborder: "frameBorder", + contenteditable: "contentEditable" + }, + + prop: function( elem, name, value ) { + var ret, hooks, notxml, + nType = elem.nodeType; + + // don't get/set properties on text, comment and attribute nodes + if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); + + if ( notxml ) { + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { + return ret; + + } else { + return ( elem[ name ] = value ); + } + + } else { + if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { + return ret; + + } else { + return elem[ name ]; + } + } + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set + // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + var attributeNode = elem.getAttributeNode("tabindex"); + + return attributeNode && attributeNode.specified ? + parseInt( attributeNode.value, 10 ) : + rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? + 0 : + undefined; + } + } + } +}); + +// Add the tabIndex propHook to attrHooks for back-compat (different case is intentional) +jQuery.attrHooks.tabindex = jQuery.propHooks.tabIndex; + +// Hook for boolean attributes +boolHook = { + get: function( elem, name ) { + // Align boolean attributes with corresponding properties + // Fall back to attribute presence where some booleans are not supported + var attrNode, + property = jQuery.prop( elem, name ); + return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ? + name.toLowerCase() : + undefined; + }, + set: function( elem, value, name ) { + var propName; + if ( value === false ) { + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + // value is true since we know at this point it's type boolean and not false + // Set boolean attributes to the same name and set the DOM property + propName = jQuery.propFix[ name ] || name; + if ( propName in elem ) { + // Only set the IDL specifically if it already exists on the element + elem[ propName ] = true; + } + + elem.setAttribute( name, name.toLowerCase() ); + } + return name; + } +}; + +// IE6/7 do not support getting/setting some attributes with get/setAttribute +if ( !getSetAttribute ) { + + fixSpecified = { + name: true, + id: true + }; + + // Use this for any attribute in IE6/7 + // This fixes almost every IE6/7 issue + nodeHook = jQuery.valHooks.button = { + get: function( elem, name ) { + var ret; + ret = elem.getAttributeNode( name ); + return ret && ( fixSpecified[ name ] ? ret.nodeValue !== "" : ret.specified ) ? + ret.nodeValue : + undefined; + }, + set: function( elem, value, name ) { + // Set the existing or create a new attribute node + var ret = elem.getAttributeNode( name ); + if ( !ret ) { + ret = document.createAttribute( name ); + elem.setAttributeNode( ret ); + } + return ( ret.nodeValue = value + "" ); + } + }; + + // Apply the nodeHook to tabindex + jQuery.attrHooks.tabindex.set = nodeHook.set; + + // Set width and height to auto instead of 0 on empty string( Bug #8150 ) + // This is for removals + jQuery.each([ "width", "height" ], function( i, name ) { + jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { + set: function( elem, value ) { + if ( value === "" ) { + elem.setAttribute( name, "auto" ); + return value; + } + } + }); + }); + + // Set contenteditable to false on removals(#10429) + // Setting to empty string throws an error as an invalid value + jQuery.attrHooks.contenteditable = { + get: nodeHook.get, + set: function( elem, value, name ) { + if ( value === "" ) { + value = "false"; + } + nodeHook.set( elem, value, name ); + } + }; +} + + +// Some attributes require a special call on IE +if ( !jQuery.support.hrefNormalized ) { + jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { + jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { + get: function( elem ) { + var ret = elem.getAttribute( name, 2 ); + return ret === null ? undefined : ret; + } + }); + }); +} + +if ( !jQuery.support.style ) { + jQuery.attrHooks.style = { + get: function( elem ) { + // Return undefined in the case of empty string + // Normalize to lowercase since IE uppercases css property names + return elem.style.cssText.toLowerCase() || undefined; + }, + set: function( elem, value ) { + return ( elem.style.cssText = "" + value ); + } + }; +} + +// Safari mis-reports the default selected property of an option +// Accessing the parent's selectedIndex property fixes it +if ( !jQuery.support.optSelected ) { + jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { + get: function( elem ) { + var parent = elem.parentNode; + + if ( parent ) { + parent.selectedIndex; + + // Make sure that it also works with optgroups, see #5701 + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + return null; + } + }); +} + +// IE6/7 call enctype encoding +if ( !jQuery.support.enctype ) { + jQuery.propFix.enctype = "encoding"; +} + +// Radios and checkboxes getter/setter +if ( !jQuery.support.checkOn ) { + jQuery.each([ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + get: function( elem ) { + // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified + return elem.getAttribute("value") === null ? "on" : elem.value; + } + }; + }); +} +jQuery.each([ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { + set: function( elem, value ) { + if ( jQuery.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); + } + } + }); +}); + + + + +var rformElems = /^(?:textarea|input|select)$/i, + rtypenamespace = /^([^\.]*)?(?:\.(.+))?$/, + rhoverHack = /\bhover(\.\S+)?\b/, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + rquickIs = /^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/, + quickParse = function( selector ) { + var quick = rquickIs.exec( selector ); + if ( quick ) { + // 0 1 2 3 + // [ _, tag, id, class ] + quick[1] = ( quick[1] || "" ).toLowerCase(); + quick[3] = quick[3] && new RegExp( "(?:^|\\s)" + quick[3] + "(?:\\s|$)" ); + } + return quick; + }, + quickIs = function( elem, m ) { + var attrs = elem.attributes || {}; + return ( + (!m[1] || elem.nodeName.toLowerCase() === m[1]) && + (!m[2] || (attrs.id || {}).value === m[2]) && + (!m[3] || m[3].test( (attrs[ "class" ] || {}).value )) + ); + }, + hoverHack = function( events ) { + return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" ); + }; + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + add: function( elem, types, handler, data, selector ) { + + var elemData, eventHandle, events, + t, tns, type, namespaces, handleObj, + handleObjIn, quick, handlers, special; + + // Don't attach events to noData or text/comment nodes (allow plain objects tho) + if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + events = elemData.events; + if ( !events ) { + elemData.events = events = {}; + } + eventHandle = elemData.handle; + if ( !eventHandle ) { + elemData.handle = eventHandle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + // jQuery(...).bind("mouseover mouseout", fn); + types = jQuery.trim( hoverHack(types) ).split( " " ); + for ( t = 0; t < types.length; t++ ) { + + tns = rtypenamespace.exec( types[t] ) || []; + type = tns[1]; + namespaces = ( tns[2] || "" ).split( "." ).sort(); + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: tns[1], + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + quick: quickParse( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + handlers = events[ type ]; + if ( !handlers ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + global: {}, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var elemData = jQuery.hasData( elem ) && jQuery._data( elem ), + t, tns, type, origType, namespaces, origCount, + j, events, special, handle, eventType, handleObj; + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = jQuery.trim( hoverHack( types || "" ) ).split(" "); + for ( t = 0; t < types.length; t++ ) { + tns = rtypenamespace.exec( types[t] ) || []; + type = origType = tns[1]; + namespaces = tns[2]; + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector? special.delegateType : special.bindType ) || type; + eventType = events[ type ] || []; + origCount = eventType.length; + namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.)?") + "(\\.|$)") : null; + + // Remove matching events + for ( j = 0; j < eventType.length; j++ ) { + handleObj = eventType[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !namespaces || namespaces.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + eventType.splice( j--, 1 ); + + if ( handleObj.selector ) { + eventType.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( eventType.length === 0 && origCount !== eventType.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + handle = elemData.handle; + if ( handle ) { + handle.elem = null; + } + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery.removeData( elem, [ "events", "handle" ], true ); + } + }, + + // Events that are safe to short-circuit if no handlers are attached. + // Native DOM events should not be added, they may have inline handlers. + customEvent: { + "getData": true, + "setData": true, + "changeData": true + }, + + trigger: function( event, data, elem, onlyHandlers ) { + // Don't do events on text and comment nodes + if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) { + return; + } + + // Event object or event type + var type = event.type || event, + namespaces = [], + cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType; + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "!" ) >= 0 ) { + // Exclusive events trigger only for the exact event (no namespaces) + type = type.slice(0, -1); + exclusive = true; + } + + if ( type.indexOf( "." ) >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + + if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) { + // No jQuery handlers for this event type, and it can't have inline handlers + return; + } + + // Caller can pass in an Event, Object, or just an event type string + event = typeof event === "object" ? + // jQuery.Event object + event[ jQuery.expando ] ? event : + // Object literal + new jQuery.Event( type, event ) : + // Just the event type (string) + new jQuery.Event( type ); + + event.type = type; + event.isTrigger = true; + event.exclusive = exclusive; + event.namespace = namespaces.join( "." ); + event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.)?") + "(\\.|$)") : null; + ontype = type.indexOf( ":" ) < 0 ? "on" + type : ""; + + // Handle a global trigger + if ( !elem ) { + + // TODO: Stop taunting the data cache; remove global events and always attach to document + cache = jQuery.cache; + for ( i in cache ) { + if ( cache[ i ].events && cache[ i ].events[ type ] ) { + jQuery.event.trigger( event, data, cache[ i ].handle.elem, true ); + } + } + return; + } + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data != null ? jQuery.makeArray( data ) : []; + data.unshift( event ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + eventPath = [[ elem, special.bindType || type ]]; + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode; + old = null; + for ( ; cur; cur = cur.parentNode ) { + eventPath.push([ cur, bubbleType ]); + old = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( old && old === elem.ownerDocument ) { + eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]); + } + } + + // Fire handlers on the event path + for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) { + + cur = eventPath[i][0]; + event.type = eventPath[i][1]; + + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + // Note that this is a bare JS function and not a jQuery handler + handle = ontype && cur[ ontype ]; + if ( handle && jQuery.acceptData( cur ) && handle.apply( cur, data ) === false ) { + event.preventDefault(); + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && + !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + // IE<9 dies on focus/blur to hidden element (#1486) + if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + old = elem[ ontype ]; + + if ( old ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( old ) { + elem[ ontype ] = old; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event || window.event ); + + var handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []), + delegateCount = handlers.delegateCount, + args = [].slice.call( arguments, 0 ), + run_all = !event.exclusive && !event.namespace, + handlerQueue = [], + i, j, cur, jqcur, ret, selMatch, matched, matches, handleObj, sel, related; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Determine handlers that should run if there are delegated events + // Avoid disabled elements in IE (#6911) and non-left-click bubbling in Firefox (#3861) + if ( delegateCount && !event.target.disabled && !(event.button && event.type === "click") ) { + + // Pregenerate a single jQuery object for reuse with .is() + jqcur = jQuery(this); + jqcur.context = this.ownerDocument || this; + + for ( cur = event.target; cur != this; cur = cur.parentNode || this ) { + selMatch = {}; + matches = []; + jqcur[0] = cur; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + sel = handleObj.selector; + + if ( selMatch[ sel ] === undefined ) { + selMatch[ sel ] = ( + handleObj.quick ? quickIs( cur, handleObj.quick ) : jqcur.is( sel ) + ); + } + if ( selMatch[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, matches: matches }); + } + } + } + + // Add the remaining (directly-bound) handlers + if ( handlers.length > delegateCount ) { + handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) }); + } + + // Run delegates first; they may want to stop propagation beneath us + for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) { + matched = handlerQueue[ i ]; + event.currentTarget = matched.elem; + + for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) { + handleObj = matched.matches[ j ]; + + // Triggered event must either 1) be non-exclusive and have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) { + + event.data = handleObj.data; + event.handleObj = handleObj; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + event.result = ret; + if ( ret === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + return event.result; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 *** + props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var eventDoc, doc, body, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, + originalEvent = event, + fixHook = jQuery.event.fixHooks[ event.type ] || {}, + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = jQuery.Event( originalEvent ); + + for ( i = copy.length; i; ) { + prop = copy[ --i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Target should not be a text node (#504, Safari) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // For mouse/key events; add metaKey if it's not there (#3368, IE6/7/8) + if ( event.metaKey === undefined ) { + event.metaKey = event.ctrlKey; + } + + return fixHook.filter? fixHook.filter( event, originalEvent ) : event; + }, + + special: { + ready: { + // Make sure the ready event is setup + setup: jQuery.bindReady + }, + + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + + focus: { + delegateType: "focusin" + }, + blur: { + delegateType: "focusout" + }, + + beforeunload: { + setup: function( data, namespaces, eventHandle ) { + // We only want to do this special case on windows + if ( jQuery.isWindow( this ) ) { + this.onbeforeunload = eventHandle; + } + }, + + teardown: function( namespaces, eventHandle ) { + if ( this.onbeforeunload === eventHandle ) { + this.onbeforeunload = null; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +// Some plugins are using, but it's undocumented/deprecated and will be removed. +// The 1.7 special event interface should provide all the hooks needed now. +jQuery.event.handle = jQuery.event.dispatch; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + if ( elem.detachEvent ) { + elem.detachEvent( "on" + type, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || + src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +function returnFalse() { + return false; +} +function returnTrue() { + return true; +} + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + preventDefault: function() { + this.isDefaultPrevented = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + + // if preventDefault exists run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // otherwise set the returnValue property of the original event to false (IE) + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + this.isPropagationStopped = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + // if stopPropagation exists run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + // otherwise set the cancelBubble property of the original event to true (IE) + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + this.isImmediatePropagationStopped = returnTrue; + this.stopPropagation(); + }, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var target = this, + related = event.relatedTarget, + handleObj = event.handleObj, + selector = handleObj.selector, + ret; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !jQuery.support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !form._submit_attached ) { + jQuery.event.add( form, "submit._submit", function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + }); + form._submit_attached = true; + } + }); + // return undefined since we don't need an event listener + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !jQuery.support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + jQuery.event.simulate( "change", this, event, true ); + } + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !elem._change_attached ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + elem._change_attached = true; + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !jQuery.support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler while someone wants focusin/focusout + var attaches = 0, + handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + if ( attaches++ === 0 ) { + document.addEventListener( orig, handler, true ); + } + }, + teardown: function() { + if ( --attaches === 0 ) { + document.removeEventListener( orig, handler, true ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + // ( types-Object, data ) + data = selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on.call( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + var handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace? handleObj.type + "." + handleObj.namespace : handleObj.type, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( var type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + bind: function( types, data, fn ) { + return this.on( types, null, data, fn ); + }, + unbind: function( types, fn ) { + return this.off( types, null, fn ); + }, + + live: function( types, data, fn ) { + jQuery( this.context ).on( types, this.selector, data, fn ); + return this; + }, + die: function( types, fn ) { + jQuery( this.context ).off( types, this.selector || "**", fn ); + return this; + }, + + delegate: function( selector, types, data, fn ) { + return this.on( types, selector, data, fn ); + }, + undelegate: function( selector, types, fn ) { + // ( namespace ) or ( selector, types [, fn] ) + return arguments.length == 1? this.off( selector, "**" ) : this.off( types, selector, fn ); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + if ( this[0] ) { + return jQuery.event.trigger( type, data, this[0], true ); + } + }, + + toggle: function( fn ) { + // Save reference to arguments for access in closure + var args = arguments, + guid = fn.guid || jQuery.guid++, + i = 0, + toggler = function( event ) { + // Figure out which function to execute + var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i; + jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 ); + + // Make sure that clicks stop + event.preventDefault(); + + // and execute the function + return args[ lastToggle ].apply( this, arguments ) || false; + }; + + // link all the functions, so any of them can unbind this click handler + toggler.guid = guid; + while ( i < args.length ) { + args[ i++ ].guid = guid; + } + + return this.click( toggler ); + }, + + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +}); + +jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + if ( fn == null ) { + fn = data; + data = null; + } + + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; + + if ( jQuery.attrFn ) { + jQuery.attrFn[ name ] = true; + } + + if ( rkeyEvent.test( name ) ) { + jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks; + } + + if ( rmouseEvent.test( name ) ) { + jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks; + } +}); + + + +/*! + * Sizzle CSS Selector Engine + * Copyright 2016, The Dojo Foundation + * Released under the MIT, BSD, and GPL Licenses. + * More information: http://sizzlejs.com/ + */ +(function(){ + +var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g, + expando = "sizcache" + (Math.random() + '').replace('.', ''), + done = 0, + toString = Object.prototype.toString, + hasDuplicate = false, + baseHasDuplicate = true, + rBackslash = /\\/g, + rReturn = /\r\n/g, + rNonWord = /\W/; + +// Here we check if the JavaScript engine is using some sort of +// optimization where it does not always call our comparision +// function. If that is the case, discard the hasDuplicate value. +// Thus far that includes Google Chrome. +[0, 0].sort(function() { + baseHasDuplicate = false; + return 0; +}); + +var Sizzle = function( selector, context, results, seed ) { + results = results || []; + context = context || document; + + var origContext = context; + + if ( context.nodeType !== 1 && context.nodeType !== 9 ) { + return []; + } + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + var m, set, checkSet, extra, ret, cur, pop, i, + prune = true, + contextXML = Sizzle.isXML( context ), + parts = [], + soFar = selector; + + // Reset the position of the chunker regexp (start from head) + do { + chunker.exec( "" ); + m = chunker.exec( soFar ); + + if ( m ) { + soFar = m[3]; + + parts.push( m[1] ); + + if ( m[2] ) { + extra = m[3]; + break; + } + } + } while ( m ); + + if ( parts.length > 1 && origPOS.exec( selector ) ) { + + if ( parts.length === 2 && Expr.relative[ parts[0] ] ) { + set = posProcess( parts[0] + parts[1], context, seed ); + + } else { + set = Expr.relative[ parts[0] ] ? + [ context ] : + Sizzle( parts.shift(), context ); + + while ( parts.length ) { + selector = parts.shift(); + + if ( Expr.relative[ selector ] ) { + selector += parts.shift(); + } + + set = posProcess( selector, set, seed ); + } + } + + } else { + // Take a shortcut and set the context if the root selector is an ID + // (but not if it'll be faster if the inner selector is an ID) + if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML && + Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) { + + ret = Sizzle.find( parts.shift(), context, contextXML ); + context = ret.expr ? + Sizzle.filter( ret.expr, ret.set )[0] : + ret.set[0]; + } + + if ( context ) { + ret = seed ? + { expr: parts.pop(), set: makeArray(seed) } : + Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML ); + + set = ret.expr ? + Sizzle.filter( ret.expr, ret.set ) : + ret.set; + + if ( parts.length > 0 ) { + checkSet = makeArray( set ); + + } else { + prune = false; + } + + while ( parts.length ) { + cur = parts.pop(); + pop = cur; + + if ( !Expr.relative[ cur ] ) { + cur = ""; + } else { + pop = parts.pop(); + } + + if ( pop == null ) { + pop = context; + } + + Expr.relative[ cur ]( checkSet, pop, contextXML ); + } + + } else { + checkSet = parts = []; + } + } + + if ( !checkSet ) { + checkSet = set; + } + + if ( !checkSet ) { + Sizzle.error( cur || selector ); + } + + if ( toString.call(checkSet) === "[object Array]" ) { + if ( !prune ) { + results.push.apply( results, checkSet ); + + } else if ( context && context.nodeType === 1 ) { + for ( i = 0; checkSet[i] != null; i++ ) { + if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) { + results.push( set[i] ); + } + } + + } else { + for ( i = 0; checkSet[i] != null; i++ ) { + if ( checkSet[i] && checkSet[i].nodeType === 1 ) { + results.push( set[i] ); + } + } + } + + } else { + makeArray( checkSet, results ); + } + + if ( extra ) { + Sizzle( extra, origContext, results, seed ); + Sizzle.uniqueSort( results ); + } + + return results; +}; + +Sizzle.uniqueSort = function( results ) { + if ( sortOrder ) { + hasDuplicate = baseHasDuplicate; + results.sort( sortOrder ); + + if ( hasDuplicate ) { + for ( var i = 1; i < results.length; i++ ) { + if ( results[i] === results[ i - 1 ] ) { + results.splice( i--, 1 ); + } + } + } + } + + return results; +}; + +Sizzle.matches = function( expr, set ) { + return Sizzle( expr, null, null, set ); +}; + +Sizzle.matchesSelector = function( node, expr ) { + return Sizzle( expr, null, null, [node] ).length > 0; +}; + +Sizzle.find = function( expr, context, isXML ) { + var set, i, len, match, type, left; + + if ( !expr ) { + return []; + } + + for ( i = 0, len = Expr.order.length; i < len; i++ ) { + type = Expr.order[i]; + + if ( (match = Expr.leftMatch[ type ].exec( expr )) ) { + left = match[1]; + match.splice( 1, 1 ); + + if ( left.substr( left.length - 1 ) !== "\\" ) { + match[1] = (match[1] || "").replace( rBackslash, "" ); + set = Expr.find[ type ]( match, context, isXML ); + + if ( set != null ) { + expr = expr.replace( Expr.match[ type ], "" ); + break; + } + } + } + } + + if ( !set ) { + set = typeof context.getElementsByTagName !== "undefined" ? + context.getElementsByTagName( "*" ) : + []; + } + + return { set: set, expr: expr }; +}; + +Sizzle.filter = function( expr, set, inplace, not ) { + var match, anyFound, + type, found, item, filter, left, + i, pass, + old = expr, + result = [], + curLoop = set, + isXMLFilter = set && set[0] && Sizzle.isXML( set[0] ); + + while ( expr && set.length ) { + for ( type in Expr.filter ) { + if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) { + filter = Expr.filter[ type ]; + left = match[1]; + + anyFound = false; + + match.splice(1,1); + + if ( left.substr( left.length - 1 ) === "\\" ) { + continue; + } + + if ( curLoop === result ) { + result = []; + } + + if ( Expr.preFilter[ type ] ) { + match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter ); + + if ( !match ) { + anyFound = found = true; + + } else if ( match === true ) { + continue; + } + } + + if ( match ) { + for ( i = 0; (item = curLoop[i]) != null; i++ ) { + if ( item ) { + found = filter( item, match, i, curLoop ); + pass = not ^ found; + + if ( inplace && found != null ) { + if ( pass ) { + anyFound = true; + + } else { + curLoop[i] = false; + } + + } else if ( pass ) { + result.push( item ); + anyFound = true; + } + } + } + } + + if ( found !== undefined ) { + if ( !inplace ) { + curLoop = result; + } + + expr = expr.replace( Expr.match[ type ], "" ); + + if ( !anyFound ) { + return []; + } + + break; + } + } + } + + // Improper expression + if ( expr === old ) { + if ( anyFound == null ) { + Sizzle.error( expr ); + + } else { + break; + } + } + + old = expr; + } + + return curLoop; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Utility function for retreiving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +var getText = Sizzle.getText = function( elem ) { + var i, node, + nodeType = elem.nodeType, + ret = ""; + + if ( nodeType ) { + if ( nodeType === 1 || nodeType === 9 ) { + // Use textContent || innerText for elements + if ( typeof elem.textContent === 'string' ) { + return elem.textContent; + } else if ( typeof elem.innerText === 'string' ) { + // Replace IE's carriage returns + return elem.innerText.replace( rReturn, '' ); + } else { + // Traverse it's children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + } else { + + // If no nodeType, this is expected to be an array + for ( i = 0; (node = elem[i]); i++ ) { + // Do not traverse comment nodes + if ( node.nodeType !== 8 ) { + ret += getText( node ); + } + } + } + return ret; +}; + +var Expr = Sizzle.selectors = { + order: [ "ID", "NAME", "TAG" ], + + match: { + ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/, + CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/, + NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/, + ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/, + TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/, + CHILD: /:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/, + POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/, + PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/ + }, + + leftMatch: {}, + + attrMap: { + "class": "className", + "for": "htmlFor" + }, + + attrHandle: { + href: function( elem ) { + return elem.getAttribute( "href" ); + }, + type: function( elem ) { + return elem.getAttribute( "type" ); + } + }, + + relative: { + "+": function(checkSet, part){ + var isPartStr = typeof part === "string", + isTag = isPartStr && !rNonWord.test( part ), + isPartStrNotTag = isPartStr && !isTag; + + if ( isTag ) { + part = part.toLowerCase(); + } + + for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) { + if ( (elem = checkSet[i]) ) { + while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {} + + checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ? + elem || false : + elem === part; + } + } + + if ( isPartStrNotTag ) { + Sizzle.filter( part, checkSet, true ); + } + }, + + ">": function( checkSet, part ) { + var elem, + isPartStr = typeof part === "string", + i = 0, + l = checkSet.length; + + if ( isPartStr && !rNonWord.test( part ) ) { + part = part.toLowerCase(); + + for ( ; i < l; i++ ) { + elem = checkSet[i]; + + if ( elem ) { + var parent = elem.parentNode; + checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false; + } + } + + } else { + for ( ; i < l; i++ ) { + elem = checkSet[i]; + + if ( elem ) { + checkSet[i] = isPartStr ? + elem.parentNode : + elem.parentNode === part; + } + } + + if ( isPartStr ) { + Sizzle.filter( part, checkSet, true ); + } + } + }, + + "": function(checkSet, part, isXML){ + var nodeCheck, + doneName = done++, + checkFn = dirCheck; + + if ( typeof part === "string" && !rNonWord.test( part ) ) { + part = part.toLowerCase(); + nodeCheck = part; + checkFn = dirNodeCheck; + } + + checkFn( "parentNode", part, doneName, checkSet, nodeCheck, isXML ); + }, + + "~": function( checkSet, part, isXML ) { + var nodeCheck, + doneName = done++, + checkFn = dirCheck; + + if ( typeof part === "string" && !rNonWord.test( part ) ) { + part = part.toLowerCase(); + nodeCheck = part; + checkFn = dirNodeCheck; + } + + checkFn( "previousSibling", part, doneName, checkSet, nodeCheck, isXML ); + } + }, + + find: { + ID: function( match, context, isXML ) { + if ( typeof context.getElementById !== "undefined" && !isXML ) { + var m = context.getElementById(match[1]); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [m] : []; + } + }, + + NAME: function( match, context ) { + if ( typeof context.getElementsByName !== "undefined" ) { + var ret = [], + results = context.getElementsByName( match[1] ); + + for ( var i = 0, l = results.length; i < l; i++ ) { + if ( results[i].getAttribute("name") === match[1] ) { + ret.push( results[i] ); + } + } + + return ret.length === 0 ? null : ret; + } + }, + + TAG: function( match, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( match[1] ); + } + } + }, + preFilter: { + CLASS: function( match, curLoop, inplace, result, not, isXML ) { + match = " " + match[1].replace( rBackslash, "" ) + " "; + + if ( isXML ) { + return match; + } + + for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) { + if ( elem ) { + if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n\r]/g, " ").indexOf(match) >= 0) ) { + if ( !inplace ) { + result.push( elem ); + } + + } else if ( inplace ) { + curLoop[i] = false; + } + } + } + + return false; + }, + + ID: function( match ) { + return match[1].replace( rBackslash, "" ); + }, + + TAG: function( match, curLoop ) { + return match[1].replace( rBackslash, "" ).toLowerCase(); + }, + + CHILD: function( match ) { + if ( match[1] === "nth" ) { + if ( !match[2] ) { + Sizzle.error( match[0] ); + } + + match[2] = match[2].replace(/^\+|\s*/g, ''); + + // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6' + var test = /(-?)(\d*)(?:n([+\-]?\d*))?/.exec( + match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" || + !/\D/.test( match[2] ) && "0n+" + match[2] || match[2]); + + // calculate the numbers (first)n+(last) including if they are negative + match[2] = (test[1] + (test[2] || 1)) - 0; + match[3] = test[3] - 0; + } + else if ( match[2] ) { + Sizzle.error( match[0] ); + } + + // TODO: Move to normal caching system + match[0] = done++; + + return match; + }, + + ATTR: function( match, curLoop, inplace, result, not, isXML ) { + var name = match[1] = match[1].replace( rBackslash, "" ); + + if ( !isXML && Expr.attrMap[name] ) { + match[1] = Expr.attrMap[name]; + } + + // Handle if an un-quoted value was used + match[4] = ( match[4] || match[5] || "" ).replace( rBackslash, "" ); + + if ( match[2] === "~=" ) { + match[4] = " " + match[4] + " "; + } + + return match; + }, + + PSEUDO: function( match, curLoop, inplace, result, not ) { + if ( match[1] === "not" ) { + // If we're dealing with a complex expression, or a simple one + if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) { + match[3] = Sizzle(match[3], null, null, curLoop); + + } else { + var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not); + + if ( !inplace ) { + result.push.apply( result, ret ); + } + + return false; + } + + } else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) { + return true; + } + + return match; + }, + + POS: function( match ) { + match.unshift( true ); + + return match; + } + }, + + filters: { + enabled: function( elem ) { + return elem.disabled === false && elem.type !== "hidden"; + }, + + disabled: function( elem ) { + return elem.disabled === true; + }, + + checked: function( elem ) { + return elem.checked === true; + }, + + selected: function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + parent: function( elem ) { + return !!elem.firstChild; + }, + + empty: function( elem ) { + return !elem.firstChild; + }, + + has: function( elem, i, match ) { + return !!Sizzle( match[3], elem ).length; + }, + + header: function( elem ) { + return (/h\d/i).test( elem.nodeName ); + }, + + text: function( elem ) { + var attr = elem.getAttribute( "type" ), type = elem.type; + // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) + // use getAttribute instead to test this case + return elem.nodeName.toLowerCase() === "input" && "text" === type && ( attr === type || attr === null ); + }, + + radio: function( elem ) { + return elem.nodeName.toLowerCase() === "input" && "radio" === elem.type; + }, + + checkbox: function( elem ) { + return elem.nodeName.toLowerCase() === "input" && "checkbox" === elem.type; + }, + + file: function( elem ) { + return elem.nodeName.toLowerCase() === "input" && "file" === elem.type; + }, + + password: function( elem ) { + return elem.nodeName.toLowerCase() === "input" && "password" === elem.type; + }, + + submit: function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && "submit" === elem.type; + }, + + image: function( elem ) { + return elem.nodeName.toLowerCase() === "input" && "image" === elem.type; + }, + + reset: function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && "reset" === elem.type; + }, + + button: function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && "button" === elem.type || name === "button"; + }, + + input: function( elem ) { + return (/input|select|textarea|button/i).test( elem.nodeName ); + }, + + focus: function( elem ) { + return elem === elem.ownerDocument.activeElement; + } + }, + setFilters: { + first: function( elem, i ) { + return i === 0; + }, + + last: function( elem, i, match, array ) { + return i === array.length - 1; + }, + + even: function( elem, i ) { + return i % 2 === 0; + }, + + odd: function( elem, i ) { + return i % 2 === 1; + }, + + lt: function( elem, i, match ) { + return i < match[3] - 0; + }, + + gt: function( elem, i, match ) { + return i > match[3] - 0; + }, + + nth: function( elem, i, match ) { + return match[3] - 0 === i; + }, + + eq: function( elem, i, match ) { + return match[3] - 0 === i; + } + }, + filter: { + PSEUDO: function( elem, match, i, array ) { + var name = match[1], + filter = Expr.filters[ name ]; + + if ( filter ) { + return filter( elem, i, match, array ); + + } else if ( name === "contains" ) { + return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0; + + } else if ( name === "not" ) { + var not = match[3]; + + for ( var j = 0, l = not.length; j < l; j++ ) { + if ( not[j] === elem ) { + return false; + } + } + + return true; + + } else { + Sizzle.error( name ); + } + }, + + CHILD: function( elem, match ) { + var first, last, + doneName, parent, cache, + count, diff, + type = match[1], + node = elem; + + switch ( type ) { + case "only": + case "first": + while ( (node = node.previousSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + + if ( type === "first" ) { + return true; + } + + node = elem; + + case "last": + while ( (node = node.nextSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + + return true; + + case "nth": + first = match[2]; + last = match[3]; + + if ( first === 1 && last === 0 ) { + return true; + } + + doneName = match[0]; + parent = elem.parentNode; + + if ( parent && (parent[ expando ] !== doneName || !elem.nodeIndex) ) { + count = 0; + + for ( node = parent.firstChild; node; node = node.nextSibling ) { + if ( node.nodeType === 1 ) { + node.nodeIndex = ++count; + } + } + + parent[ expando ] = doneName; + } + + diff = elem.nodeIndex - last; + + if ( first === 0 ) { + return diff === 0; + + } else { + return ( diff % first === 0 && diff / first >= 0 ); + } + } + }, + + ID: function( elem, match ) { + return elem.nodeType === 1 && elem.getAttribute("id") === match; + }, + + TAG: function( elem, match ) { + return (match === "*" && elem.nodeType === 1) || !!elem.nodeName && elem.nodeName.toLowerCase() === match; + }, + + CLASS: function( elem, match ) { + return (" " + (elem.className || elem.getAttribute("class")) + " ") + .indexOf( match ) > -1; + }, + + ATTR: function( elem, match ) { + var name = match[1], + result = Sizzle.attr ? + Sizzle.attr( elem, name ) : + Expr.attrHandle[ name ] ? + Expr.attrHandle[ name ]( elem ) : + elem[ name ] != null ? + elem[ name ] : + elem.getAttribute( name ), + value = result + "", + type = match[2], + check = match[4]; + + return result == null ? + type === "!=" : + !type && Sizzle.attr ? + result != null : + type === "=" ? + value === check : + type === "*=" ? + value.indexOf(check) >= 0 : + type === "~=" ? + (" " + value + " ").indexOf(check) >= 0 : + !check ? + value && result !== false : + type === "!=" ? + value !== check : + type === "^=" ? + value.indexOf(check) === 0 : + type === "$=" ? + value.substr(value.length - check.length) === check : + type === "|=" ? + value === check || value.substr(0, check.length + 1) === check + "-" : + false; + }, + + POS: function( elem, match, i, array ) { + var name = match[2], + filter = Expr.setFilters[ name ]; + + if ( filter ) { + return filter( elem, i, match, array ); + } + } + } +}; + +var origPOS = Expr.match.POS, + fescape = function(all, num){ + return "\\" + (num - 0 + 1); + }; + +for ( var type in Expr.match ) { + Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) ); + Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) ); +} + +var makeArray = function( array, results ) { + array = Array.prototype.slice.call( array, 0 ); + + if ( results ) { + results.push.apply( results, array ); + return results; + } + + return array; +}; + +// Perform a simple check to determine if the browser is capable of +// converting a NodeList to an array using builtin methods. +// Also verifies that the returned array holds DOM nodes +// (which is not the case in the Blackberry browser) +try { + Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType; + +// Provide a fallback method if it does not work +} catch( e ) { + makeArray = function( array, results ) { + var i = 0, + ret = results || []; + + if ( toString.call(array) === "[object Array]" ) { + Array.prototype.push.apply( ret, array ); + + } else { + if ( typeof array.length === "number" ) { + for ( var l = array.length; i < l; i++ ) { + ret.push( array[i] ); + } + + } else { + for ( ; array[i]; i++ ) { + ret.push( array[i] ); + } + } + } + + return ret; + }; +} + +var sortOrder, siblingCheck; + +if ( document.documentElement.compareDocumentPosition ) { + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) { + return a.compareDocumentPosition ? -1 : 1; + } + + return a.compareDocumentPosition(b) & 4 ? -1 : 1; + }; + +} else { + sortOrder = function( a, b ) { + // The nodes are identical, we can exit early + if ( a === b ) { + hasDuplicate = true; + return 0; + + // Fallback to using sourceIndex (in IE) if it's available on both nodes + } else if ( a.sourceIndex && b.sourceIndex ) { + return a.sourceIndex - b.sourceIndex; + } + + var al, bl, + ap = [], + bp = [], + aup = a.parentNode, + bup = b.parentNode, + cur = aup; + + // If the nodes are siblings (or identical) we can do a quick check + if ( aup === bup ) { + return siblingCheck( a, b ); + + // If no parents were found then the nodes are disconnected + } else if ( !aup ) { + return -1; + + } else if ( !bup ) { + return 1; + } + + // Otherwise they're somewhere else in the tree so we need + // to build up a full list of the parentNodes for comparison + while ( cur ) { + ap.unshift( cur ); + cur = cur.parentNode; + } + + cur = bup; + + while ( cur ) { + bp.unshift( cur ); + cur = cur.parentNode; + } + + al = ap.length; + bl = bp.length; + + // Start walking down the tree looking for a discrepancy + for ( var i = 0; i < al && i < bl; i++ ) { + if ( ap[i] !== bp[i] ) { + return siblingCheck( ap[i], bp[i] ); + } + } + + // We ended someplace up the tree so do a sibling check + return i === al ? + siblingCheck( a, bp[i], -1 ) : + siblingCheck( ap[i], b, 1 ); + }; + + siblingCheck = function( a, b, ret ) { + if ( a === b ) { + return ret; + } + + var cur = a.nextSibling; + + while ( cur ) { + if ( cur === b ) { + return -1; + } + + cur = cur.nextSibling; + } + + return 1; + }; +} + +// Check to see if the browser returns elements by name when +// querying by getElementById (and provide a workaround) +(function(){ + // We're going to inject a fake input element with a specified name + var form = document.createElement("div"), + id = "script" + (new Date()).getTime(), + root = document.documentElement; + + form.innerHTML = "<a name='" + id + "'/>"; + + // Inject it into the root element, check its status, and remove it quickly + root.insertBefore( form, root.firstChild ); + + // The workaround has to do additional checks after a getElementById + // Which slows things down for other browsers (hence the branching) + if ( document.getElementById( id ) ) { + Expr.find.ID = function( match, context, isXML ) { + if ( typeof context.getElementById !== "undefined" && !isXML ) { + var m = context.getElementById(match[1]); + + return m ? + m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ? + [m] : + undefined : + []; + } + }; + + Expr.filter.ID = function( elem, match ) { + var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id"); + + return elem.nodeType === 1 && node && node.nodeValue === match; + }; + } + + root.removeChild( form ); + + // release memory in IE + root = form = null; +})(); + +(function(){ + // Check to see if the browser returns only elements + // when doing getElementsByTagName("*") + + // Create a fake element + var div = document.createElement("div"); + div.appendChild( document.createComment("") ); + + // Make sure no comments are found + if ( div.getElementsByTagName("*").length > 0 ) { + Expr.find.TAG = function( match, context ) { + var results = context.getElementsByTagName( match[1] ); + + // Filter out possible comments + if ( match[1] === "*" ) { + var tmp = []; + + for ( var i = 0; results[i]; i++ ) { + if ( results[i].nodeType === 1 ) { + tmp.push( results[i] ); + } + } + + results = tmp; + } + + return results; + }; + } + + // Check to see if an attribute returns normalized href attributes + div.innerHTML = "<a href='#'></a>"; + + if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" && + div.firstChild.getAttribute("href") !== "#" ) { + + Expr.attrHandle.href = function( elem ) { + return elem.getAttribute( "href", 2 ); + }; + } + + // release memory in IE + div = null; +})(); + +if ( document.querySelectorAll ) { + (function(){ + var oldSizzle = Sizzle, + div = document.createElement("div"), + id = "__sizzle__"; + + div.innerHTML = "<p class='TEST'></p>"; + + // Safari can't handle uppercase or unicode characters when + // in quirks mode. + if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) { + return; + } + + Sizzle = function( query, context, extra, seed ) { + context = context || document; + + // Only use querySelectorAll on non-XML documents + // (ID selectors don't work in non-HTML documents) + if ( !seed && !Sizzle.isXML(context) ) { + // See if we find a selector to speed up + var match = /^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec( query ); + + if ( match && (context.nodeType === 1 || context.nodeType === 9) ) { + // Speed-up: Sizzle("TAG") + if ( match[1] ) { + return makeArray( context.getElementsByTagName( query ), extra ); + + // Speed-up: Sizzle(".CLASS") + } else if ( match[2] && Expr.find.CLASS && context.getElementsByClassName ) { + return makeArray( context.getElementsByClassName( match[2] ), extra ); + } + } + + if ( context.nodeType === 9 ) { + // Speed-up: Sizzle("body") + // The body element only exists once, optimize finding it + if ( query === "body" && context.body ) { + return makeArray( [ context.body ], extra ); + + // Speed-up: Sizzle("#ID") + } else if ( match && match[3] ) { + var elem = context.getElementById( match[3] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id === match[3] ) { + return makeArray( [ elem ], extra ); + } + + } else { + return makeArray( [], extra ); + } + } + + try { + return makeArray( context.querySelectorAll(query), extra ); + } catch(qsaError) {} + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + } else if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + var oldContext = context, + old = context.getAttribute( "id" ), + nid = old || id, + hasParent = context.parentNode, + relativeHierarchySelector = /^\s*[+~]/.test( query ); + + if ( !old ) { + context.setAttribute( "id", nid ); + } else { + nid = nid.replace( /'/g, "\\$&" ); + } + if ( relativeHierarchySelector && hasParent ) { + context = context.parentNode; + } + + try { + if ( !relativeHierarchySelector || hasParent ) { + return makeArray( context.querySelectorAll( "[id='" + nid + "'] " + query ), extra ); + } + + } catch(pseudoError) { + } finally { + if ( !old ) { + oldContext.removeAttribute( "id" ); + } + } + } + } + + return oldSizzle(query, context, extra, seed); + }; + + for ( var prop in oldSizzle ) { + Sizzle[ prop ] = oldSizzle[ prop ]; + } + + // release memory in IE + div = null; + })(); +} + +(function(){ + var html = document.documentElement, + matches = html.matchesSelector || html.mozMatchesSelector || html.webkitMatchesSelector || html.msMatchesSelector; + + if ( matches ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9 fails this) + var disconnectedMatch = !matches.call( document.createElement( "div" ), "div" ), + pseudoWorks = false; + + try { + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( document.documentElement, "[test!='']:sizzle" ); + + } catch( pseudoError ) { + pseudoWorks = true; + } + + Sizzle.matchesSelector = function( node, expr ) { + // Make sure that attribute selectors are quoted + expr = expr.replace(/\=\s*([^'"\]]*)\s*\]/g, "='$1']"); + + if ( !Sizzle.isXML( node ) ) { + try { + if ( pseudoWorks || !Expr.match.PSEUDO.test( expr ) && !/!=/.test( expr ) ) { + var ret = matches.call( node, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || !disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9, so check for that + node.document && node.document.nodeType !== 11 ) { + return ret; + } + } + } catch(e) {} + } + + return Sizzle(expr, null, null, [node]).length > 0; + }; + } +})(); + +(function(){ + var div = document.createElement("div"); + + div.innerHTML = "<div class='test e'></div><div class='test'></div>"; + + // Opera can't find a second classname (in 9.6) + // Also, make sure that getElementsByClassName actually exists + if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) { + return; + } + + // Safari caches class attributes, doesn't catch changes (in 3.2) + div.lastChild.className = "e"; + + if ( div.getElementsByClassName("e").length === 1 ) { + return; + } + + Expr.order.splice(1, 0, "CLASS"); + Expr.find.CLASS = function( match, context, isXML ) { + if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) { + return context.getElementsByClassName(match[1]); + } + }; + + // release memory in IE + div = null; +})(); + +function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + + if ( elem ) { + var match = false; + + elem = elem[dir]; + + while ( elem ) { + if ( elem[ expando ] === doneName ) { + match = checkSet[elem.sizset]; + break; + } + + if ( elem.nodeType === 1 && !isXML ){ + elem[ expando ] = doneName; + elem.sizset = i; + } + + if ( elem.nodeName.toLowerCase() === cur ) { + match = elem; + break; + } + + elem = elem[dir]; + } + + checkSet[i] = match; + } + } +} + +function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + + if ( elem ) { + var match = false; + + elem = elem[dir]; + + while ( elem ) { + if ( elem[ expando ] === doneName ) { + match = checkSet[elem.sizset]; + break; + } + + if ( elem.nodeType === 1 ) { + if ( !isXML ) { + elem[ expando ] = doneName; + elem.sizset = i; + } + + if ( typeof cur !== "string" ) { + if ( elem === cur ) { + match = true; + break; + } + + } else if ( Sizzle.filter( cur, [elem] ).length > 0 ) { + match = elem; + break; + } + } + + elem = elem[dir]; + } + + checkSet[i] = match; + } + } +} + +if ( document.documentElement.contains ) { + Sizzle.contains = function( a, b ) { + return a !== b && (a.contains ? a.contains(b) : true); + }; + +} else if ( document.documentElement.compareDocumentPosition ) { + Sizzle.contains = function( a, b ) { + return !!(a.compareDocumentPosition(b) & 16); + }; + +} else { + Sizzle.contains = function() { + return false; + }; +} + +Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement; + + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +var posProcess = function( selector, context, seed ) { + var match, + tmpSet = [], + later = "", + root = context.nodeType ? [context] : context; + + // Position selectors must be done after the filter + // And so must :not(positional) so we move all PSEUDOs to the end + while ( (match = Expr.match.PSEUDO.exec( selector )) ) { + later += match[0]; + selector = selector.replace( Expr.match.PSEUDO, "" ); + } + + selector = Expr.relative[selector] ? selector + "*" : selector; + + for ( var i = 0, l = root.length; i < l; i++ ) { + Sizzle( selector, root[i], tmpSet, seed ); + } + + return Sizzle.filter( later, tmpSet ); +}; + +// EXPOSE +// Override sizzle attribute retrieval +Sizzle.attr = jQuery.attr; +Sizzle.selectors.attrMap = {}; +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.filters; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + +})(); + + +var runtil = /Until$/, + rparentsprev = /^(?:parents|prevUntil|prevAll)/, + // Note: This RegExp should be improved, or likely pulled from Sizzle + rmultiselector = /,/, + isSimple = /^.[^:#\[\.,]*$/, + slice = Array.prototype.slice, + POS = jQuery.expr.match.POS, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend({ + find: function( selector ) { + var self = this, + i, l; + + if ( typeof selector !== "string" ) { + return jQuery( selector ).filter(function() { + for ( i = 0, l = self.length; i < l; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }); + } + + var ret = this.pushStack( "", "find", selector ), + length, n, r; + + for ( i = 0, l = this.length; i < l; i++ ) { + length = ret.length; + jQuery.find( selector, this[i], ret ); + + if ( i > 0 ) { + // Make sure that the results are unique + for ( n = length; n < ret.length; n++ ) { + for ( r = 0; r < length; r++ ) { + if ( ret[r] === ret[n] ) { + ret.splice(n--, 1); + break; + } + } + } + } + } + + return ret; + }, + + has: function( target ) { + var targets = jQuery( target ); + return this.filter(function() { + for ( var i = 0, l = targets.length; i < l; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + not: function( selector ) { + return this.pushStack( winnow(this, selector, false), "not", selector); + }, + + filter: function( selector ) { + return this.pushStack( winnow(this, selector, true), "filter", selector ); + }, + + is: function( selector ) { + return !!selector && ( + typeof selector === "string" ? + // If this is a positional selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + POS.test( selector ) ? + jQuery( selector, this.context ).index( this[0] ) >= 0 : + jQuery.filter( selector, this ).length > 0 : + this.filter( selector ).length > 0 ); + }, + + closest: function( selectors, context ) { + var ret = [], i, l, cur = this[0]; + + // Array (deprecated as of jQuery 1.7) + if ( jQuery.isArray( selectors ) ) { + var level = 1; + + while ( cur && cur.ownerDocument && cur !== context ) { + for ( i = 0; i < selectors.length; i++ ) { + + if ( jQuery( cur ).is( selectors[ i ] ) ) { + ret.push({ selector: selectors[ i ], elem: cur, level: level }); + } + } + + cur = cur.parentNode; + level++; + } + + return ret; + } + + // String + var pos = POS.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( i = 0, l = this.length; i < l; i++ ) { + cur = this[i]; + + while ( cur ) { + if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { + ret.push( cur ); + break; + + } else { + cur = cur.parentNode; + if ( !cur || !cur.ownerDocument || cur === context || cur.nodeType === 11 ) { + break; + } + } + } + } + + ret = ret.length > 1 ? jQuery.unique( ret ) : ret; + + return this.pushStack( ret, "closest", selectors ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + var set = typeof selector === "string" ? + jQuery( selector, context ) : + jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), + all = jQuery.merge( this.get(), set ); + + return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ? + all : + jQuery.unique( all ) ); + }, + + andSelf: function() { + return this.add( this.prevObject ); + } +}); + +// A painfully simple check to see if an element is disconnected +// from a document (should be improved, where feasible). +function isDisconnected( node ) { + return !node || !node.parentNode || node.parentNode.nodeType === 11; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return jQuery.nth( elem, 2, "nextSibling" ); + }, + prev: function( elem ) { + return jQuery.nth( elem, 2, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( elem.parentNode.firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.makeArray( elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( !runtil.test( name ) ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; + + if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + + return this.pushStack( ret, name, slice.call( arguments ).join(",") ); + }; +}); + +jQuery.extend({ + filter: function( expr, elems, not ) { + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 ? + jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : + jQuery.find.matches(expr, elems); + }, + + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + nth: function( cur, result, dir, elem ) { + result = result || 1; + var num = 0; + + for ( ; cur; cur = cur[dir] ) { + if ( cur.nodeType === 1 && ++num === result ) { + break; + } + } + + return cur; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, keep ) { + + // Can't pass null or undefined to indexOf in Firefox 4 + // Set to 0 to skip string check + qualifier = qualifier || 0; + + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep(elements, function( elem, i ) { + var retVal = !!qualifier.call( elem, i, elem ); + return retVal === keep; + }); + + } else if ( qualifier.nodeType ) { + return jQuery.grep(elements, function( elem, i ) { + return ( elem === qualifier ) === keep; + }); + + } else if ( typeof qualifier === "string" ) { + var filtered = jQuery.grep(elements, function( elem ) { + return elem.nodeType === 1; + }); + + if ( isSimple.test( qualifier ) ) { + return jQuery.filter(qualifier, filtered, !keep); + } else { + qualifier = jQuery.filter( qualifier, filtered ); + } + } + + return jQuery.grep(elements, function( elem, i ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; + }); +} + + + + +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g, + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig, + rtagName = /<([\w:]+)/, + rtbody = /<tbody/i, + rhtml = /<|&#?\w+;/, + rnoInnerhtml = /<(?:script|style)/i, + rnocache = /<(?:script|object|embed|option|style)/i, + rnoshimcache = new RegExp("<(?:" + nodeNames + ")", "i"), + // checked="checked" or checked + rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, + rscriptType = /\/(java|ecma)script/i, + rcleanScript = /^\s*<!(?:\[CDATA\[|\-\-)/, + wrapMap = { + option: [ 1, "<select multiple='multiple'>", "</select>" ], + legend: [ 1, "<fieldset>", "</fieldset>" ], + thead: [ 1, "<table>", "</table>" ], + tr: [ 2, "<table><tbody>", "</tbody></table>" ], + td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ], + col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ], + area: [ 1, "<map>", "</map>" ], + _default: [ 0, "", "" ] + }, + safeFragment = createSafeFragment( document ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// IE can't serialize <link> and <script> tags normally +if ( !jQuery.support.htmlSerialize ) { + wrapMap._default = [ 1, "div<div>", "</div>" ]; +} + +jQuery.fn.extend({ + text: function( text ) { + if ( jQuery.isFunction(text) ) { + return this.each(function(i) { + var self = jQuery( this ); + + self.text( text.call(this, i, self.text()) ); + }); + } + + if ( typeof text !== "object" && text !== undefined ) { + return this.empty().append( (this[0] && this[0].ownerDocument || document).createTextNode( text ) ); + } + + return jQuery.text( this ); + }, + + wrapAll: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each(function(i) { + jQuery(this).wrapAll( html.call(this, i) ); + }); + } + + if ( this[0] ) { + // The elements to wrap the target around + var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); + + if ( this[0].parentNode ) { + wrap.insertBefore( this[0] ); + } + + wrap.map(function() { + var elem = this; + + while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { + elem = elem.firstChild; + } + + return elem; + }).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each(function(i) { + jQuery(this).wrapInner( html.call(this, i) ); + }); + } + + return this.each(function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + }); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each(function(i) { + jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); + }); + }, + + unwrap: function() { + return this.parent().each(function() { + if ( !jQuery.nodeName( this, "body" ) ) { + jQuery( this ).replaceWith( this.childNodes ); + } + }).end(); + }, + + append: function() { + return this.domManip(arguments, true, function( elem ) { + if ( this.nodeType === 1 ) { + this.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip(arguments, true, function( elem ) { + if ( this.nodeType === 1 ) { + this.insertBefore( elem, this.firstChild ); + } + }); + }, + + before: function() { + if ( this[0] && this[0].parentNode ) { + return this.domManip(arguments, false, function( elem ) { + this.parentNode.insertBefore( elem, this ); + }); + } else if ( arguments.length ) { + var set = jQuery.clean( arguments ); + set.push.apply( set, this.toArray() ); + return this.pushStack( set, "before", arguments ); + } + }, + + after: function() { + if ( this[0] && this[0].parentNode ) { + return this.domManip(arguments, false, function( elem ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + }); + } else if ( arguments.length ) { + var set = this.pushStack( this, "after", arguments ); + set.push.apply( set, jQuery.clean(arguments) ); + return set; + } + }, + + // keepData is for internal use only--do not document + remove: function( selector, keepData ) { + for ( var i = 0, elem; (elem = this[i]) != null; i++ ) { + if ( !selector || jQuery.filter( selector, [ elem ] ).length ) { + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( elem.getElementsByTagName("*") ); + jQuery.cleanData( [ elem ] ); + } + + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } + } + } + + return this; + }, + + empty: function() { + for ( var i = 0, elem; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( elem.getElementsByTagName("*") ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function () { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + if ( value === undefined ) { + return this[0] && this[0].nodeType === 1 ? + this[0].innerHTML.replace(rinlinejQuery, "") : + null; + + // See if we can take a shortcut and just use innerHTML + } else if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + (jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value )) && + !wrapMap[ (rtagName.exec( value ) || ["", ""])[1].toLowerCase() ] ) { + + value = value.replace(rxhtmlTag, "<$1></$2>"); + + try { + for ( var i = 0, l = this.length; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + if ( this[i].nodeType === 1 ) { + jQuery.cleanData( this[i].getElementsByTagName("*") ); + this[i].innerHTML = value; + } + } + + // If using innerHTML throws an exception, use the fallback method + } catch(e) { + this.empty().append( value ); + } + + } else if ( jQuery.isFunction( value ) ) { + this.each(function(i){ + var self = jQuery( this ); + + self.html( value.call(this, i, self.html()) ); + }); + + } else { + this.empty().append( value ); + } + + return this; + }, + + replaceWith: function( value ) { + if ( this[0] && this[0].parentNode ) { + // Make sure that the elements are removed from the DOM before they are inserted + // this can help fix replacing a parent with child elements + if ( jQuery.isFunction( value ) ) { + return this.each(function(i) { + var self = jQuery(this), old = self.html(); + self.replaceWith( value.call( this, i, old ) ); + }); + } + + if ( typeof value !== "string" ) { + value = jQuery( value ).detach(); + } + + return this.each(function() { + var next = this.nextSibling, + parent = this.parentNode; + + jQuery( this ).remove(); + + if ( next ) { + jQuery(next).before( value ); + } else { + jQuery(parent).append( value ); + } + }); + } else { + return this.length ? + this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) : + this; + } + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, table, callback ) { + var results, first, fragment, parent, + value = args[0], + scripts = []; + + // We can't cloneNode fragments that contain checked, in WebKit + if ( !jQuery.support.checkClone && arguments.length === 3 && typeof value === "string" && rchecked.test( value ) ) { + return this.each(function() { + jQuery(this).domManip( args, table, callback, true ); + }); + } + + if ( jQuery.isFunction(value) ) { + return this.each(function(i) { + var self = jQuery(this); + args[0] = value.call(this, i, table ? self.html() : undefined); + self.domManip( args, table, callback ); + }); + } + + if ( this[0] ) { + parent = value && value.parentNode; + + // If we're in a fragment, just use that instead of building a new one + if ( jQuery.support.parentNode && parent && parent.nodeType === 11 && parent.childNodes.length === this.length ) { + results = { fragment: parent }; + + } else { + results = jQuery.buildFragment( args, this, scripts ); + } + + fragment = results.fragment; + + if ( fragment.childNodes.length === 1 ) { + first = fragment = fragment.firstChild; + } else { + first = fragment.firstChild; + } + + if ( first ) { + table = table && jQuery.nodeName( first, "tr" ); + + for ( var i = 0, l = this.length, lastIndex = l - 1; i < l; i++ ) { + callback.call( + table ? + root(this[i], first) : + this[i], + // Make sure that we do not leak memory by inadvertently discarding + // the original fragment (which might have attached data) instead of + // using it; in addition, use the original fragment object for the last + // item instead of first because it can end up being emptied incorrectly + // in certain situations (Bug #8070). + // Fragments from the fragment cache must always be cloned and never used + // in place. + results.cacheable || ( l > 1 && i < lastIndex ) ? + jQuery.clone( fragment, true, true ) : + fragment + ); + } + } + + if ( scripts.length ) { + jQuery.each( scripts, evalScript ); + } + } + + return this; + } +}); + +function root( elem, cur ) { + return jQuery.nodeName(elem, "table") ? + (elem.getElementsByTagName("tbody")[0] || + elem.appendChild(elem.ownerDocument.createElement("tbody"))) : + elem; +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type + ( events[ type ][ i ].namespace ? "." : "" ) + events[ type ][ i ].namespace, events[ type ][ i ], events[ type ][ i ].data ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function cloneFixAttributes( src, dest ) { + var nodeName; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + // clearAttributes removes the attributes, which we don't want, + // but also removes the attachEvent events, which we *do* want + if ( dest.clearAttributes ) { + dest.clearAttributes(); + } + + // mergeAttributes, in contrast, only merges back on the + // original attributes, not the events + if ( dest.mergeAttributes ) { + dest.mergeAttributes( src ); + } + + nodeName = dest.nodeName.toLowerCase(); + + // IE6-8 fail to clone children inside object elements that use + // the proprietary classid attribute value (rather than the type + // attribute) to identify the type of content to display + if ( nodeName === "object" ) { + dest.outerHTML = src.outerHTML; + + } else if ( nodeName === "input" && (src.type === "checkbox" || src.type === "radio") ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + if ( src.checked ) { + dest.defaultChecked = dest.checked = src.checked; + } + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } + + // Event data gets referenced instead of copied if the expando + // gets copied too + dest.removeAttribute( jQuery.expando ); +} + +jQuery.buildFragment = function( args, nodes, scripts ) { + var fragment, cacheable, cacheresults, doc, + first = args[ 0 ]; + + // nodes may contain either an explicit document object, + // a jQuery collection or context object. + // If nodes[0] contains a valid object to assign to doc + if ( nodes && nodes[0] ) { + doc = nodes[0].ownerDocument || nodes[0]; + } + + // Ensure that an attr object doesn't incorrectly stand in as a document object + // Chrome and Firefox seem to allow this to occur and will throw exception + // Fixes #8950 + if ( !doc.createDocumentFragment ) { + doc = document; + } + + // Only cache "small" (1/2 KB) HTML strings that are associated with the main document + // Cloning options loses the selected state, so don't cache them + // IE 6 doesn't like it when you put <object> or <embed> elements in a fragment + // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache + // Lastly, IE6,7,8 will not correctly reuse cached fragments that were created from unknown elems #10501 + if ( args.length === 1 && typeof first === "string" && first.length < 512 && doc === document && + first.charAt(0) === "<" && !rnocache.test( first ) && + (jQuery.support.checkClone || !rchecked.test( first )) && + (jQuery.support.html5Clone || !rnoshimcache.test( first )) ) { + + cacheable = true; + + cacheresults = jQuery.fragments[ first ]; + if ( cacheresults && cacheresults !== 1 ) { + fragment = cacheresults; + } + } + + if ( !fragment ) { + fragment = doc.createDocumentFragment(); + jQuery.clean( args, doc, fragment, scripts ); + } + + if ( cacheable ) { + jQuery.fragments[ first ] = cacheresults ? fragment : 1; + } + + return { fragment: fragment, cacheable: cacheable }; +}; + +jQuery.fragments = {}; + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var ret = [], + insert = jQuery( selector ), + parent = this.length === 1 && this[0].parentNode; + + if ( parent && parent.nodeType === 11 && parent.childNodes.length === 1 && insert.length === 1 ) { + insert[ original ]( this[0] ); + return this; + + } else { + for ( var i = 0, l = insert.length; i < l; i++ ) { + var elems = ( i > 0 ? this.clone(true) : this ).get(); + jQuery( insert[i] )[ original ]( elems ); + ret = ret.concat( elems ); + } + + return this.pushStack( ret, name, insert.selector ); + } + }; +}); + +function getAll( elem ) { + if ( typeof elem.getElementsByTagName !== "undefined" ) { + return elem.getElementsByTagName( "*" ); + + } else if ( typeof elem.querySelectorAll !== "undefined" ) { + return elem.querySelectorAll( "*" ); + + } else { + return []; + } +} + +// Used in clean, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( elem.type === "checkbox" || elem.type === "radio" ) { + elem.defaultChecked = elem.checked; + } +} +// Finds all inputs and passes them to fixDefaultChecked +function findInputs( elem ) { + var nodeName = ( elem.nodeName || "" ).toLowerCase(); + if ( nodeName === "input" ) { + fixDefaultChecked( elem ); + // Skip scripts, get other children + } else if ( nodeName !== "script" && typeof elem.getElementsByTagName !== "undefined" ) { + jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked ); + } +} + +// Derived From: http://www.iecss.com/shimprove/javascript/shimprove.1-0-1.js +function shimCloneNode( elem ) { + var div = document.createElement( "div" ); + safeFragment.appendChild( div ); + + div.innerHTML = elem.outerHTML; + return div.firstChild; +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var srcElements, + destElements, + i, + // IE<=8 does not properly clone detached, unknown element nodes + clone = jQuery.support.html5Clone || !rnoshimcache.test( "<" + elem.nodeName ) ? + elem.cloneNode( true ) : + shimCloneNode( elem ); + + if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + // IE copies events bound via attachEvent when using cloneNode. + // Calling detachEvent on the clone will also remove the events + // from the original. In order to get around this, we use some + // proprietary methods to clear the events. Thanks to MooTools + // guys for this hotness. + + cloneFixAttributes( elem, clone ); + + // Using Sizzle here is crazy slow, so we use getElementsByTagName instead + srcElements = getAll( elem ); + destElements = getAll( clone ); + + // Weird iteration because IE will replace the length property + // with an element if you are cloning the body and one of the + // elements on the page has a name or id of "length" + for ( i = 0; srcElements[i]; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + cloneFixAttributes( srcElements[i], destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + cloneCopyEvent( elem, clone ); + + if ( deepDataAndEvents ) { + srcElements = getAll( elem ); + destElements = getAll( clone ); + + for ( i = 0; srcElements[i]; ++i ) { + cloneCopyEvent( srcElements[i], destElements[i] ); + } + } + } + + srcElements = destElements = null; + + // Return the cloned set + return clone; + }, + + clean: function( elems, context, fragment, scripts ) { + var checkScriptType; + + context = context || document; + + // !context.createElement fails in IE with an error but returns typeof 'object' + if ( typeof context.createElement === "undefined" ) { + context = context.ownerDocument || context[0] && context[0].ownerDocument || document; + } + + var ret = [], j; + + for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) { + if ( typeof elem === "number" ) { + elem += ""; + } + + if ( !elem ) { + continue; + } + + // Convert html string into DOM nodes + if ( typeof elem === "string" ) { + if ( !rhtml.test( elem ) ) { + elem = context.createTextNode( elem ); + } else { + // Fix "XHTML"-style tags in all browsers + elem = elem.replace(rxhtmlTag, "<$1></$2>"); + + // Trim whitespace, otherwise indexOf won't work as expected + var tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(), + wrap = wrapMap[ tag ] || wrapMap._default, + depth = wrap[0], + div = context.createElement("div"); + + // Append wrapper element to unknown element safe doc fragment + if ( context === document ) { + // Use the fragment we've already created for this document + safeFragment.appendChild( div ); + } else { + // Use a fragment created with the owner document + createSafeFragment( context ).appendChild( div ); + } + + // Go to html and back, then peel off extra wrappers + div.innerHTML = wrap[1] + elem + wrap[2]; + + // Move to the right depth + while ( depth-- ) { + div = div.lastChild; + } + + // Remove IE's autoinserted <tbody> from table fragments + if ( !jQuery.support.tbody ) { + + // String was a <table>, *may* have spurious <tbody> + var hasBody = rtbody.test(elem), + tbody = tag === "table" && !hasBody ? + div.firstChild && div.firstChild.childNodes : + + // String was a bare <thead> or <tfoot> + wrap[1] === "<table>" && !hasBody ? + div.childNodes : + []; + + for ( j = tbody.length - 1; j >= 0 ; --j ) { + if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) { + tbody[ j ].parentNode.removeChild( tbody[ j ] ); + } + } + } + + // IE completely kills leading whitespace when innerHTML is used + if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild ); + } + + elem = div.childNodes; + } + } + + // Resets defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + var len; + if ( !jQuery.support.appendChecked ) { + if ( elem[0] && typeof (len = elem.length) === "number" ) { + for ( j = 0; j < len; j++ ) { + findInputs( elem[j] ); + } + } else { + findInputs( elem ); + } + } + + if ( elem.nodeType ) { + ret.push( elem ); + } else { + ret = jQuery.merge( ret, elem ); + } + } + + if ( fragment ) { + checkScriptType = function( elem ) { + return !elem.type || rscriptType.test( elem.type ); + }; + for ( i = 0; ret[i]; i++ ) { + if ( scripts && jQuery.nodeName( ret[i], "script" ) && (!ret[i].type || ret[i].type.toLowerCase() === "text/javascript") ) { + scripts.push( ret[i].parentNode ? ret[i].parentNode.removeChild( ret[i] ) : ret[i] ); + + } else { + if ( ret[i].nodeType === 1 ) { + var jsTags = jQuery.grep( ret[i].getElementsByTagName( "script" ), checkScriptType ); + + ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) ); + } + fragment.appendChild( ret[i] ); + } + } + } + + return ret; + }, + + cleanData: function( elems ) { + var data, id, + cache = jQuery.cache, + special = jQuery.event.special, + deleteExpando = jQuery.support.deleteExpando; + + for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) { + if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) { + continue; + } + + id = elem[ jQuery.expando ]; + + if ( id ) { + data = cache[ id ]; + + if ( data && data.events ) { + for ( var type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + + // Null the DOM reference to avoid IE6/7/8 leak (#7054) + if ( data.handle ) { + data.handle.elem = null; + } + } + + if ( deleteExpando ) { + delete elem[ jQuery.expando ]; + + } else if ( elem.removeAttribute ) { + elem.removeAttribute( jQuery.expando ); + } + + delete cache[ id ]; + } + } + } +}); + +function evalScript( i, elem ) { + if ( elem.src ) { + jQuery.ajax({ + url: elem.src, + async: false, + dataType: "script" + }); + } else { + jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "/*$0*/" ) ); + } + + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } +} + + + + +var ralpha = /alpha\([^)]*\)/i, + ropacity = /opacity=([^)]*)/, + // fixed for IE9, see #8346 + rupper = /([A-Z]|^ms)/g, + rnumpx = /^-?\d+(?:px)?$/i, + rnum = /^-?\d/, + rrelNum = /^([\-+])=([\-+.\de]+)/, + + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssWidth = [ "Left", "Right" ], + cssHeight = [ "Top", "Bottom" ], + curCSS, + + getComputedStyle, + currentStyle; + +jQuery.fn.css = function( name, value ) { + // Setting 'undefined' is a no-op + if ( arguments.length === 2 && value === undefined ) { + return this; + } + + return jQuery.access( this, name, value, true, function( elem, name, value ) { + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }); +}; + +jQuery.extend({ + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity", "opacity" ); + return ret === "" ? "1" : ret; + + } else { + return elem.style.opacity; + } + } + } + }, + + // Exclude the following css properties to add px + cssNumber: { + "fillOpacity": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + // normalize float css property + "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, origName = jQuery.camelCase( name ), + style = elem.style, hooks = jQuery.cssHooks[ origName ]; + + name = jQuery.cssProps[ origName ] || origName; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // convert relative number strings (+= or -=) to relative numbers. #7345 + if ( type === "string" && (ret = rrelNum.exec( value )) ) { + value = ( +( ret[1] + 1) * +ret[2] ) + parseFloat( jQuery.css( elem, name ) ); + // Fixes bug #9237 + type = "number"; + } + + // Make sure that NaN and null values aren't set. See: #7116 + if ( value == null || type === "number" && isNaN( value ) ) { + return; + } + + // If a number was passed in, add 'px' to the (except for certain CSS properties) + if ( type === "number" && !jQuery.cssNumber[ origName ] ) { + value += "px"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value )) !== undefined ) { + // Wrapped to prevent IE from throwing errors when 'invalid' values are provided + // Fixes bug #5509 + try { + style[ name ] = value; + } catch(e) {} + } + + } else { + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra ) { + var ret, hooks; + + // Make sure that we're working with the right name + name = jQuery.camelCase( name ); + hooks = jQuery.cssHooks[ name ]; + name = jQuery.cssProps[ name ] || name; + + // cssFloat needs a special treatment + if ( name === "cssFloat" ) { + name = "float"; + } + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks && (ret = hooks.get( elem, true, extra )) !== undefined ) { + return ret; + + // Otherwise, if a way to get the computed value exists, use that + } else if ( curCSS ) { + return curCSS( elem, name ); + } + }, + + // A method for quickly swapping in/out CSS properties to get correct calculations + swap: function( elem, options, callback ) { + var old = {}; + + // Remember the old values, and insert the new ones + for ( var name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + } +}); + +// DEPRECATED, Use jQuery.css() instead +jQuery.curCSS = jQuery.css; + +jQuery.each(["height", "width"], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + var val; + + if ( computed ) { + if ( elem.offsetWidth !== 0 ) { + return getWH( elem, name, extra ); + } else { + jQuery.swap( elem, cssShow, function() { + val = getWH( elem, name, extra ); + }); + } + + return val; + } + }, + + set: function( elem, value ) { + if ( rnumpx.test( value ) ) { + // ignore negative width and height values #1599 + value = parseFloat( value ); + + if ( value >= 0 ) { + return value + "px"; + } + + } else { + return value; + } + } + }; +}); + +if ( !jQuery.support.opacity ) { + jQuery.cssHooks.opacity = { + get: function( elem, computed ) { + // IE uses filters for opacity + return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ? + ( parseFloat( RegExp.$1 ) / 100 ) + "" : + computed ? "1" : ""; + }, + + set: function( elem, value ) { + var style = elem.style, + currentStyle = elem.currentStyle, + opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "", + filter = currentStyle && currentStyle.filter || style.filter || ""; + + // IE has trouble with opacity if it does not have layout + // Force it by setting the zoom level + style.zoom = 1; + + // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652 + if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" ) { + + // Setting style.filter to null, "" & " " still leave "filter:" in the cssText + // if "filter:" is present at all, clearType is disabled, we want to avoid this + // style.removeAttribute is IE Only, but so apparently is this code path... + style.removeAttribute( "filter" ); + + // if there there is no filter style applied in a css rule, we are done + if ( currentStyle && !currentStyle.filter ) { + return; + } + } + + // otherwise, set new filter values + style.filter = ralpha.test( filter ) ? + filter.replace( ralpha, opacity ) : + filter + " " + opacity; + } + }; +} + +jQuery(function() { + // This hook cannot be added until DOM ready because the support test + // for it is not run until after DOM ready + if ( !jQuery.support.reliableMarginRight ) { + jQuery.cssHooks.marginRight = { + get: function( elem, computed ) { + // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right + // Work around by temporarily setting element display to inline-block + var ret; + jQuery.swap( elem, { "display": "inline-block" }, function() { + if ( computed ) { + ret = curCSS( elem, "margin-right", "marginRight" ); + } else { + ret = elem.style.marginRight; + } + }); + return ret; + } + }; + } +}); + +if ( document.defaultView && document.defaultView.getComputedStyle ) { + getComputedStyle = function( elem, name ) { + var ret, defaultView, computedStyle; + + name = name.replace( rupper, "-$1" ).toLowerCase(); + + if ( (defaultView = elem.ownerDocument.defaultView) && + (computedStyle = defaultView.getComputedStyle( elem, null )) ) { + ret = computedStyle.getPropertyValue( name ); + if ( ret === "" && !jQuery.contains( elem.ownerDocument.documentElement, elem ) ) { + ret = jQuery.style( elem, name ); + } + } + + return ret; + }; +} + +if ( document.documentElement.currentStyle ) { + currentStyle = function( elem, name ) { + var left, rsLeft, uncomputed, + ret = elem.currentStyle && elem.currentStyle[ name ], + style = elem.style; + + // Avoid setting ret to empty string here + // so we don't default to auto + if ( ret === null && style && (uncomputed = style[ name ]) ) { + ret = uncomputed; + } + + // From the awesome hack by Dean Edwards + // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 + + // If we're not dealing with a regular pixel number + // but a number that has a weird ending, we need to convert it to pixels + if ( !rnumpx.test( ret ) && rnum.test( ret ) ) { + + // Remember the original values + left = style.left; + rsLeft = elem.runtimeStyle && elem.runtimeStyle.left; + + // Put in the new values to get a computed value out + if ( rsLeft ) { + elem.runtimeStyle.left = elem.currentStyle.left; + } + style.left = name === "fontSize" ? "1em" : ( ret || 0 ); + ret = style.pixelLeft + "px"; + + // Revert the changed values + style.left = left; + if ( rsLeft ) { + elem.runtimeStyle.left = rsLeft; + } + } + + return ret === "" ? "auto" : ret; + }; +} + +curCSS = getComputedStyle || currentStyle; + +function getWH( elem, name, extra ) { + + // Start with offset property + var val = name === "width" ? elem.offsetWidth : elem.offsetHeight, + which = name === "width" ? cssWidth : cssHeight, + i = 0, + len = which.length; + + if ( val > 0 ) { + if ( extra !== "border" ) { + for ( ; i < len; i++ ) { + if ( !extra ) { + val -= parseFloat( jQuery.css( elem, "padding" + which[ i ] ) ) || 0; + } + if ( extra === "margin" ) { + val += parseFloat( jQuery.css( elem, extra + which[ i ] ) ) || 0; + } else { + val -= parseFloat( jQuery.css( elem, "border" + which[ i ] + "Width" ) ) || 0; + } + } + } + + return val + "px"; + } + + // Fall back to computed then uncomputed css if necessary + val = curCSS( elem, name, name ); + if ( val < 0 || val == null ) { + val = elem.style[ name ] || 0; + } + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Add padding, border, margin + if ( extra ) { + for ( ; i < len; i++ ) { + val += parseFloat( jQuery.css( elem, "padding" + which[ i ] ) ) || 0; + if ( extra !== "padding" ) { + val += parseFloat( jQuery.css( elem, "border" + which[ i ] + "Width" ) ) || 0; + } + if ( extra === "margin" ) { + val += parseFloat( jQuery.css( elem, extra + which[ i ] ) ) || 0; + } + } + } + + return val + "px"; +} + +if ( jQuery.expr && jQuery.expr.filters ) { + jQuery.expr.filters.hidden = function( elem ) { + var width = elem.offsetWidth, + height = elem.offsetHeight; + + return ( width === 0 && height === 0 ) || (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || jQuery.css( elem, "display" )) === "none"); + }; + + jQuery.expr.filters.visible = function( elem ) { + return !jQuery.expr.filters.hidden( elem ); + }; +} + + + + +var r20 = /%20/g, + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rhash = /#.*$/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL + rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i, + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + rquery = /\?/, + rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, + rselectTextarea = /^(?:select|textarea)/i, + rspacesAjax = /\s+/, + rts = /([?&])_=[^&]*/, + rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/, + + // Keep a copy of the old load method + _load = jQuery.fn.load, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Document location + ajaxLocation, + + // Document location segments + ajaxLocParts, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = ["*/"] + ["*"]; + +// #8138, IE may throw an exception when accessing +// a field from window.location if document.domain has been set +try { + ajaxLocation = location.href; +} catch( e ) { + // Use the href attribute of an A element + // since IE will modify it given document.location + ajaxLocation = document.createElement( "a" ); + ajaxLocation.href = ""; + ajaxLocation = ajaxLocation.href; +} + +// Segment location into parts +ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || []; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + if ( jQuery.isFunction( func ) ) { + var dataTypes = dataTypeExpression.toLowerCase().split( rspacesAjax ), + i = 0, + length = dataTypes.length, + dataType, + list, + placeBefore; + + // For each dataType in the dataTypeExpression + for ( ; i < length; i++ ) { + dataType = dataTypes[ i ]; + // We control if we're asked to add before + // any existing element + placeBefore = /^\+/.test( dataType ); + if ( placeBefore ) { + dataType = dataType.substr( 1 ) || "*"; + } + list = structure[ dataType ] = structure[ dataType ] || []; + // then we add to the structure accordingly + list[ placeBefore ? "unshift" : "push" ]( func ); + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR, + dataType /* internal */, inspected /* internal */ ) { + + dataType = dataType || options.dataTypes[ 0 ]; + inspected = inspected || {}; + + inspected[ dataType ] = true; + + var list = structure[ dataType ], + i = 0, + length = list ? list.length : 0, + executeOnly = ( structure === prefilters ), + selection; + + for ( ; i < length && ( executeOnly || !selection ); i++ ) { + selection = list[ i ]( options, originalOptions, jqXHR ); + // If we got redirected to another dataType + // we try there if executing only and not done already + if ( typeof selection === "string" ) { + if ( !executeOnly || inspected[ selection ] ) { + selection = undefined; + } else { + options.dataTypes.unshift( selection ); + selection = inspectPrefiltersOrTransports( + structure, options, originalOptions, jqXHR, selection, inspected ); + } + } + } + // If we're only executing or nothing was selected + // we try the catchall dataType if not done already + if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) { + selection = inspectPrefiltersOrTransports( + structure, options, originalOptions, jqXHR, "*", inspected ); + } + // unnecessary when only executing (prefilters) + // but it'll be ignored by the caller in that case + return selection; +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } +} + +jQuery.fn.extend({ + load: function( url, params, callback ) { + if ( typeof url !== "string" && _load ) { + return _load.apply( this, arguments ); + + // Don't do a request if no elements are being requested + } else if ( !this.length ) { + return this; + } + + var off = url.indexOf( " " ); + if ( off >= 0 ) { + var selector = url.slice( off, url.length ); + url = url.slice( 0, off ); + } + + // Default to a GET request + var type = "GET"; + + // If the second parameter was provided + if ( params ) { + // If it's a function + if ( jQuery.isFunction( params ) ) { + // We assume that it's the callback + callback = params; + params = undefined; + + // Otherwise, build a param string + } else if ( typeof params === "object" ) { + params = jQuery.param( params, jQuery.ajaxSettings.traditional ); + type = "POST"; + } + } + + var self = this; + + // Request the remote document + jQuery.ajax({ + url: url, + type: type, + dataType: "html", + data: params, + // Complete callback (responseText is used internally) + complete: function( jqXHR, status, responseText ) { + // Store the response as specified by the jqXHR object + responseText = jqXHR.responseText; + // If successful, inject the HTML into all the matched elements + if ( jqXHR.isResolved() ) { + // #4825: Get the actual response in case + // a dataFilter is present in ajaxSettings + jqXHR.done(function( r ) { + responseText = r; + }); + // See if a selector was specified + self.html( selector ? + // Create a dummy div to hold the results + jQuery("<div>") + // inject the contents of the document in, removing the scripts + // to avoid any 'Permission Denied' errors in IE + .append(responseText.replace(rscript, "")) + + // Locate the specified elements + .find(selector) : + + // If not, just inject the full result + responseText ); + } + + if ( callback ) { + self.each( callback, [ responseText, status, jqXHR ] ); + } + } + }); + + return this; + }, + + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + + serializeArray: function() { + return this.map(function(){ + return this.elements ? jQuery.makeArray( this.elements ) : this; + }) + .filter(function(){ + return this.name && !this.disabled && + ( this.checked || rselectTextarea.test( this.nodeName ) || + rinput.test( this.type ) ); + }) + .map(function( i, elem ){ + var val = jQuery( this ).val(); + + return val == null ? + null : + jQuery.isArray( val ) ? + jQuery.map( val, function( val, i ){ + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + }) : + { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + }).get(); + } +}); + +// Attach a bunch of functions for handling common AJAX events +jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){ + jQuery.fn[ o ] = function( f ){ + return this.on( o, f ); + }; +}); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + // shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + return jQuery.ajax({ + type: method, + url: url, + data: data, + success: callback, + dataType: type + }); + }; +}); + +jQuery.extend({ + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + if ( settings ) { + // Building a settings object + ajaxExtend( target, jQuery.ajaxSettings ); + } else { + // Extending ajaxSettings + settings = target; + target = jQuery.ajaxSettings; + } + ajaxExtend( target, settings ); + return target; + }, + + ajaxSettings: { + url: ajaxLocation, + isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ), + global: true, + type: "GET", + contentType: "application/x-www-form-urlencoded", + processData: true, + async: true, + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + traditional: false, + headers: {}, + */ + + accepts: { + xml: "application/xml, text/xml", + html: "text/html", + text: "text/plain", + json: "application/json, text/javascript", + "*": allTypes + }, + + contents: { + xml: /xml/, + html: /html/, + json: /json/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText" + }, + + // List of data converters + // 1) key format is "source_type destination_type" (a single space in-between) + // 2) the catchall symbol "*" can be used for source_type + converters: { + + // Convert anything to text + "* text": window.String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": jQuery.parseJSON, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + context: true, + url: true + } + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + // Callbacks context + callbackContext = s.context || s, + // Context for global events + // It's the callbackContext if one was provided in the options + // and if it's a DOM node or a jQuery collection + globalEventContext = callbackContext !== s && + ( callbackContext.nodeType || callbackContext instanceof jQuery ) ? + jQuery( callbackContext ) : jQuery.event, + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + // Status-dependent callbacks + statusCode = s.statusCode || {}, + // ifModified key + ifModifiedKey, + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + // Response headers + responseHeadersString, + responseHeaders, + // transport + transport, + // timeout handle + timeoutTimer, + // Cross-domain detection vars + parts, + // The jqXHR state + state = 0, + // To know if global events are to be dispatched + fireGlobals, + // Loop variable + i, + // Fake xhr + jqXHR = { + + readyState: 0, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( !state ) { + var lname = name.toLowerCase(); + name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Raw string + getAllResponseHeaders: function() { + return state === 2 ? responseHeadersString : null; + }, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( state === 2 ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[1].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match === undefined ? null : match; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( !state ) { + s.mimeType = type; + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + statusText = statusText || "abort"; + if ( transport ) { + transport.abort( statusText ); + } + done( 0, statusText ); + return this; + } + }; + + // Callback for when everything is done + // It is defined here because jslint complains if it is declared + // at the end of the function (which would be more logical and readable) + function done( status, nativeStatusText, responses, headers ) { + + // Called once + if ( state === 2 ) { + return; + } + + // State is "done" now + state = 2; + + // Clear timeout if it exists + if ( timeoutTimer ) { + clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + var isSuccess, + success, + error, + statusText = nativeStatusText, + response = responses ? ajaxHandleResponses( s, jqXHR, responses ) : undefined, + lastModified, + etag; + + // If successful, handle type chaining + if ( status >= 200 && status < 300 || status === 304 ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + + if ( ( lastModified = jqXHR.getResponseHeader( "Last-Modified" ) ) ) { + jQuery.lastModified[ ifModifiedKey ] = lastModified; + } + if ( ( etag = jqXHR.getResponseHeader( "Etag" ) ) ) { + jQuery.etag[ ifModifiedKey ] = etag; + } + } + + // If not modified + if ( status === 304 ) { + + statusText = "notmodified"; + isSuccess = true; + + // If we have data + } else { + + try { + success = ajaxConvert( s, response ); + statusText = "success"; + isSuccess = true; + } catch(e) { + // We have a parsererror + statusText = "parsererror"; + error = e; + } + } + } else { + // We extract error from statusText + // then normalize statusText and status for non-aborts + error = statusText; + if ( !statusText || status ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = "" + ( nativeStatusText || statusText ); + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ), + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + // Attach deferreds + deferred.promise( jqXHR ); + jqXHR.success = jqXHR.done; + jqXHR.error = jqXHR.fail; + jqXHR.complete = completeDeferred.add; + + // Status-dependent callbacks + jqXHR.statusCode = function( map ) { + if ( map ) { + var tmp; + if ( state < 2 ) { + for ( tmp in map ) { + statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ]; + } + } else { + tmp = map[ jqXHR.status ]; + jqXHR.then( tmp, tmp ); + } + } + return this; + }; + + // Remove hash character (#7531: and string promotion) + // Add protocol if not provided (#5866: IE7 issue with protocol-less urls) + // We also use the url parameter if available + s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" ); + + // Extract dataTypes list + s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( rspacesAjax ); + + // Determine if a cross-domain request is in order + if ( s.crossDomain == null ) { + parts = rurl.exec( s.url.toLowerCase() ); + s.crossDomain = !!( parts && + ( parts[ 1 ] != ajaxLocParts[ 1 ] || parts[ 2 ] != ajaxLocParts[ 2 ] || + ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) != + ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) ) + ); + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefiler, stop there + if ( state === 2 ) { + return false; + } + + // We can fire global events as of now if asked to + fireGlobals = s.global; + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // If data is available, append data to url + if ( s.data ) { + s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data; + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Get ifModifiedKey before adding the anti-cache parameter + ifModifiedKey = s.url; + + // Add anti-cache in url if needed + if ( s.cache === false ) { + + var ts = jQuery.now(), + // try replacing _= if it is there + ret = s.url.replace( rts, "$1_=" + ts ); + + // if nothing was replaced, add timestamp to the end + s.url = ret + ( ( ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + ifModifiedKey = ifModifiedKey || s.url; + if ( jQuery.lastModified[ ifModifiedKey ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] ); + } + if ( jQuery.etag[ ifModifiedKey ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] ); + } + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ? + s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) { + // Abort if not done already + jqXHR.abort(); + return false; + + } + + // Install callbacks on deferreds + for ( i in { success: 1, error: 1, complete: 1 } ) { + jqXHR[ i ]( s[ i ] ); + } + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = setTimeout( function(){ + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + state = 1; + transport.send( requestHeaders, done ); + } catch (e) { + // Propagate exception as error if not done + if ( state < 2 ) { + done( -1, e ); + // Simply rethrow otherwise + } else { + throw e; + } + } + } + + return jqXHR; + }, + + // Serialize an array of form elements or a set of + // key/values into a query string + param: function( a, traditional ) { + var s = [], + add = function( key, value ) { + // If value is a function, invoke it and return its value + value = jQuery.isFunction( value ) ? value() : value; + s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value ); + }; + + // Set traditional to true for jQuery <= 1.3.2 behavior. + if ( traditional === undefined ) { + traditional = jQuery.ajaxSettings.traditional; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + }); + + } else { + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( var prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ).replace( r20, "+" ); + } +}); + +function buildParams( prefix, obj, traditional, add ) { + if ( jQuery.isArray( obj ) ) { + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + // If array item is non-scalar (array or object), encode its + // numeric index to resolve deserialization ambiguity issues. + // Note that rack (as of 1.0.0) can't currently deserialize + // nested arrays properly, and attempting to do so may cause + // a server error. Possible fixes are to modify rack's + // deserialization algorithm or to provide an option or flag + // to force array serialization to be shallow. + buildParams( prefix + "[" + ( typeof v === "object" || jQuery.isArray(v) ? i : "" ) + "]", v, traditional, add ); + } + }); + + } else if ( !traditional && obj != null && typeof obj === "object" ) { + // Serialize object item. + for ( var name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + // Serialize scalar item. + add( prefix, obj ); + } +} + +// This is still on the jQuery object... for now +// Want to move this to jQuery.ajax some day +jQuery.extend({ + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {} + +}); + +/* Handles responses to an ajax request: + * - sets all responseXXX fields accordingly + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var contents = s.contents, + dataTypes = s.dataTypes, + responseFields = s.responseFields, + ct, + type, + finalDataType, + firstDataType; + + // Fill responseXXX fields + for ( type in responseFields ) { + if ( type in responses ) { + jqXHR[ responseFields[type] ] = responses[ type ]; + } + } + + // Remove auto dataType and get content-type in the process + while( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "content-type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +// Chain conversions given the request and the original response +function ajaxConvert( s, response ) { + + // Apply the dataFilter if provided + if ( s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + var dataTypes = s.dataTypes, + converters = {}, + i, + key, + length = dataTypes.length, + tmp, + // Current and previous dataTypes + current = dataTypes[ 0 ], + prev, + // Conversion expression + conversion, + // Conversion function + conv, + // Conversion functions (transitive conversion) + conv1, + conv2; + + // For each dataType in the chain + for ( i = 1; i < length; i++ ) { + + // Create converters map + // with lowercased keys + if ( i === 1 ) { + for ( key in s.converters ) { + if ( typeof key === "string" ) { + converters[ key.toLowerCase() ] = s.converters[ key ]; + } + } + } + + // Get the dataTypes + prev = current; + current = dataTypes[ i ]; + + // If current is auto dataType, update it to prev + if ( current === "*" ) { + current = prev; + // If no auto and dataTypes are actually different + } else if ( prev !== "*" && prev !== current ) { + + // Get the converter + conversion = prev + " " + current; + conv = converters[ conversion ] || converters[ "* " + current ]; + + // If there is no direct converter, search transitively + if ( !conv ) { + conv2 = undefined; + for ( conv1 in converters ) { + tmp = conv1.split( " " ); + if ( tmp[ 0 ] === prev || tmp[ 0 ] === "*" ) { + conv2 = converters[ tmp[1] + " " + current ]; + if ( conv2 ) { + conv1 = converters[ conv1 ]; + if ( conv1 === true ) { + conv = conv2; + } else if ( conv2 === true ) { + conv = conv1; + } + break; + } + } + } + } + // If we found no converter, dispatch an error + if ( !( conv || conv2 ) ) { + jQuery.error( "No conversion from " + conversion.replace(" "," to ") ); + } + // If found converter is not an equivalence + if ( conv !== true ) { + // Convert with 1 or 2 converters accordingly + response = conv ? conv( response ) : conv2( conv1(response) ); + } + } + } + return response; +} + + + + +var jsc = jQuery.now(), + jsre = /(\=)\?(&|$)|\?\?/i; + +// Default jsonp settings +jQuery.ajaxSetup({ + jsonp: "callback", + jsonpCallback: function() { + return jQuery.expando + "_" + ( jsc++ ); + } +}); + +// Detect, normalize options and install callbacks for jsonp requests +jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { + + var inspectData = s.contentType === "application/x-www-form-urlencoded" && + ( typeof s.data === "string" ); + + if ( s.dataTypes[ 0 ] === "jsonp" || + s.jsonp !== false && ( jsre.test( s.url ) || + inspectData && jsre.test( s.data ) ) ) { + + var responseContainer, + jsonpCallback = s.jsonpCallback = + jQuery.isFunction( s.jsonpCallback ) ? s.jsonpCallback() : s.jsonpCallback, + previous = window[ jsonpCallback ], + url = s.url, + data = s.data, + replace = "$1" + jsonpCallback + "$2"; + + if ( s.jsonp !== false ) { + url = url.replace( jsre, replace ); + if ( s.url === url ) { + if ( inspectData ) { + data = data.replace( jsre, replace ); + } + if ( s.data === data ) { + // Add callback manually + url += (/\?/.test( url ) ? "&" : "?") + s.jsonp + "=" + jsonpCallback; + } + } + } + + s.url = url; + s.data = data; + + // Install callback + window[ jsonpCallback ] = function( response ) { + responseContainer = [ response ]; + }; + + // Clean-up function + jqXHR.always(function() { + // Set callback back to previous value + window[ jsonpCallback ] = previous; + // Call if it was a function and we have a response + if ( responseContainer && jQuery.isFunction( previous ) ) { + window[ jsonpCallback ]( responseContainer[ 0 ] ); + } + }); + + // Use data converter to retrieve json after script execution + s.converters["script json"] = function() { + if ( !responseContainer ) { + jQuery.error( jsonpCallback + " was not called" ); + } + return responseContainer[ 0 ]; + }; + + // force json dataType + s.dataTypes[ 0 ] = "json"; + + // Delegate to script + return "script"; + } +}); + + + + +// Install script dataType +jQuery.ajaxSetup({ + accepts: { + script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /javascript|ecmascript/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +}); + +// Handle cache's special case and global +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + s.global = false; + } +}); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function(s) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + + var script, + head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement; + + return { + + send: function( _, callback ) { + + script = document.createElement( "script" ); + + script.async = "async"; + + if ( s.scriptCharset ) { + script.charset = s.scriptCharset; + } + + script.src = s.url; + + // Attach handlers for all browsers + script.onload = script.onreadystatechange = function( _, isAbort ) { + + if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) { + + // Handle memory leak in IE + script.onload = script.onreadystatechange = null; + + // Remove the script + if ( head && script.parentNode ) { + head.removeChild( script ); + } + + // Dereference the script + script = undefined; + + // Callback if not abort + if ( !isAbort ) { + callback( 200, "success" ); + } + } + }; + // Use insertBefore instead of appendChild to circumvent an IE6 bug. + // This arises when a base node is used (#2709 and #4378). + head.insertBefore( script, head.firstChild ); + }, + + abort: function() { + if ( script ) { + script.onload( 0, 1 ); + } + } + }; + } +}); + + + + +var // #5280: Internet Explorer will keep connections alive if we don't abort on unload + xhrOnUnloadAbort = window.ActiveXObject ? function() { + // Abort all pending requests + for ( var key in xhrCallbacks ) { + xhrCallbacks[ key ]( 0, 1 ); + } + } : false, + xhrId = 0, + xhrCallbacks; + +// Functions to create xhrs +function createStandardXHR() { + try { + return new window.XMLHttpRequest(); + } catch( e ) {} +} + +function createActiveXHR() { + try { + return new window.ActiveXObject( "Microsoft.XMLHTTP" ); + } catch( e ) {} +} + +// Create the request object +// (This is still attached to ajaxSettings for backward compatibility) +jQuery.ajaxSettings.xhr = window.ActiveXObject ? + /* Microsoft failed to properly + * implement the XMLHttpRequest in IE7 (can't request local files), + * so we use the ActiveXObject when it is available + * Additionally XMLHttpRequest can be disabled in IE7/IE8 so + * we need a fallback. + */ + function() { + return !this.isLocal && createStandardXHR() || createActiveXHR(); + } : + // For all other browsers, use the standard XMLHttpRequest object + createStandardXHR; + +// Determine support properties +(function( xhr ) { + jQuery.extend( jQuery.support, { + ajax: !!xhr, + cors: !!xhr && ( "withCredentials" in xhr ) + }); +})( jQuery.ajaxSettings.xhr() ); + +// Create transport if the browser can provide an xhr +if ( jQuery.support.ajax ) { + + jQuery.ajaxTransport(function( s ) { + // Cross domain only allowed if supported through XMLHttpRequest + if ( !s.crossDomain || jQuery.support.cors ) { + + var callback; + + return { + send: function( headers, complete ) { + + // Get a new xhr + var xhr = s.xhr(), + handle, + i; + + // Open the socket + // Passing null username, generates a login popup on Opera (#2865) + if ( s.username ) { + xhr.open( s.type, s.url, s.async, s.username, s.password ); + } else { + xhr.open( s.type, s.url, s.async ); + } + + // Apply custom fields if provided + if ( s.xhrFields ) { + for ( i in s.xhrFields ) { + xhr[ i ] = s.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( s.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( s.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !s.crossDomain && !headers["X-Requested-With"] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Need an extra try/catch for cross domain requests in Firefox 3 + try { + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + } catch( _ ) {} + + // Do send the request + // This may raise an exception which is actually + // handled in jQuery.ajax (so no try/catch here) + xhr.send( ( s.hasContent && s.data ) || null ); + + // Listener + callback = function( _, isAbort ) { + + var status, + statusText, + responseHeaders, + responses, + xml; + + // Firefox throws exceptions when accessing properties + // of an xhr when a network error occured + // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE) + try { + + // Was never called and is aborted or complete + if ( callback && ( isAbort || xhr.readyState === 4 ) ) { + + // Only called once + callback = undefined; + + // Do not keep as active anymore + if ( handle ) { + xhr.onreadystatechange = jQuery.noop; + if ( xhrOnUnloadAbort ) { + delete xhrCallbacks[ handle ]; + } + } + + // If it's an abort + if ( isAbort ) { + // Abort it manually if needed + if ( xhr.readyState !== 4 ) { + xhr.abort(); + } + } else { + status = xhr.status; + responseHeaders = xhr.getAllResponseHeaders(); + responses = {}; + xml = xhr.responseXML; + + // Construct response list + if ( xml && xml.documentElement /* #4958 */ ) { + responses.xml = xml; + } + responses.text = xhr.responseText; + + // Firefox throws an exception when accessing + // statusText for faulty cross-domain requests + try { + statusText = xhr.statusText; + } catch( e ) { + // We normalize with Webkit giving an empty statusText + statusText = ""; + } + + // Filter status for non standard behaviors + + // If the request is local and we have data: assume a success + // (success with no data won't get notified, that's the best we + // can do given current implementations) + if ( !status && s.isLocal && !s.crossDomain ) { + status = responses.text ? 200 : 404; + // IE - #1450: sometimes returns 1223 when it should be 204 + } else if ( status === 1223 ) { + status = 204; + } + } + } + } catch( firefoxAccessException ) { + if ( !isAbort ) { + complete( -1, firefoxAccessException ); + } + } + + // Call complete if needed + if ( responses ) { + complete( status, statusText, responses, responseHeaders ); + } + }; + + // if we're in sync mode or it's in cache + // and has been retrieved directly (IE6 & IE7) + // we need to manually fire the callback + if ( !s.async || xhr.readyState === 4 ) { + callback(); + } else { + handle = ++xhrId; + if ( xhrOnUnloadAbort ) { + // Create the active xhrs callbacks list if needed + // and attach the unload handler + if ( !xhrCallbacks ) { + xhrCallbacks = {}; + jQuery( window ).unload( xhrOnUnloadAbort ); + } + // Add to list of active xhrs callbacks + xhrCallbacks[ handle ] = callback; + } + xhr.onreadystatechange = callback; + } + }, + + abort: function() { + if ( callback ) { + callback(0,1); + } + } + }; + } + }); +} + + + + +var elemdisplay = {}, + iframe, iframeDoc, + rfxtypes = /^(?:toggle|show|hide)$/, + rfxnum = /^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i, + timerId, + fxAttrs = [ + // height animations + [ "height", "marginTop", "marginBottom", "paddingTop", "paddingBottom" ], + // width animations + [ "width", "marginLeft", "marginRight", "paddingLeft", "paddingRight" ], + // opacity animations + [ "opacity" ] + ], + fxNow; + +jQuery.fn.extend({ + show: function( speed, easing, callback ) { + var elem, display; + + if ( speed || speed === 0 ) { + return this.animate( genFx("show", 3), speed, easing, callback ); + + } else { + for ( var i = 0, j = this.length; i < j; i++ ) { + elem = this[ i ]; + + if ( elem.style ) { + display = elem.style.display; + + // Reset the inline display of this element to learn if it is + // being hidden by cascaded rules or not + if ( !jQuery._data(elem, "olddisplay") && display === "none" ) { + display = elem.style.display = ""; + } + + // Set elements which have been overridden with display: none + // in a stylesheet to whatever the default browser style is + // for such an element + if ( display === "" && jQuery.css(elem, "display") === "none" ) { + jQuery._data( elem, "olddisplay", defaultDisplay(elem.nodeName) ); + } + } + } + + // Set the display of most of the elements in a second loop + // to avoid the constant reflow + for ( i = 0; i < j; i++ ) { + elem = this[ i ]; + + if ( elem.style ) { + display = elem.style.display; + + if ( display === "" || display === "none" ) { + elem.style.display = jQuery._data( elem, "olddisplay" ) || ""; + } + } + } + + return this; + } + }, + + hide: function( speed, easing, callback ) { + if ( speed || speed === 0 ) { + return this.animate( genFx("hide", 3), speed, easing, callback); + + } else { + var elem, display, + i = 0, + j = this.length; + + for ( ; i < j; i++ ) { + elem = this[i]; + if ( elem.style ) { + display = jQuery.css( elem, "display" ); + + if ( display !== "none" && !jQuery._data( elem, "olddisplay" ) ) { + jQuery._data( elem, "olddisplay", display ); + } + } + } + + // Set the display of the elements in a second loop + // to avoid the constant reflow + for ( i = 0; i < j; i++ ) { + if ( this[i].style ) { + this[i].style.display = "none"; + } + } + + return this; + } + }, + + // Save the old toggle function + _toggle: jQuery.fn.toggle, + + toggle: function( fn, fn2, callback ) { + var bool = typeof fn === "boolean"; + + if ( jQuery.isFunction(fn) && jQuery.isFunction(fn2) ) { + this._toggle.apply( this, arguments ); + + } else if ( fn == null || bool ) { + this.each(function() { + var state = bool ? fn : jQuery(this).is(":hidden"); + jQuery(this)[ state ? "show" : "hide" ](); + }); + + } else { + this.animate(genFx("toggle", 3), fn, fn2, callback); + } + + return this; + }, + + fadeTo: function( speed, to, easing, callback ) { + return this.filter(":hidden").css("opacity", 0).show().end() + .animate({opacity: to}, speed, easing, callback); + }, + + animate: function( prop, speed, easing, callback ) { + var optall = jQuery.speed( speed, easing, callback ); + + if ( jQuery.isEmptyObject( prop ) ) { + return this.each( optall.complete, [ false ] ); + } + + // Do not change referenced properties as per-property easing will be lost + prop = jQuery.extend( {}, prop ); + + function doAnimation() { + // XXX 'this' does not always have a nodeName when running the + // test suite + + if ( optall.queue === false ) { + jQuery._mark( this ); + } + + var opt = jQuery.extend( {}, optall ), + isElement = this.nodeType === 1, + hidden = isElement && jQuery(this).is(":hidden"), + name, val, p, e, + parts, start, end, unit, + method; + + // will store per property easing and be used to determine when an animation is complete + opt.animatedProperties = {}; + + for ( p in prop ) { + + // property name normalization + name = jQuery.camelCase( p ); + if ( p !== name ) { + prop[ name ] = prop[ p ]; + delete prop[ p ]; + } + + val = prop[ name ]; + + // easing resolution: per property > opt.specialEasing > opt.easing > 'swing' (default) + if ( jQuery.isArray( val ) ) { + opt.animatedProperties[ name ] = val[ 1 ]; + val = prop[ name ] = val[ 0 ]; + } else { + opt.animatedProperties[ name ] = opt.specialEasing && opt.specialEasing[ name ] || opt.easing || 'swing'; + } + + if ( val === "hide" && hidden || val === "show" && !hidden ) { + return opt.complete.call( this ); + } + + if ( isElement && ( name === "height" || name === "width" ) ) { + // Make sure that nothing sneaks out + // Record all 3 overflow attributes because IE does not + // change the overflow attribute when overflowX and + // overflowY are set to the same value + opt.overflow = [ this.style.overflow, this.style.overflowX, this.style.overflowY ]; + + // Set display property to inline-block for height/width + // animations on inline elements that are having width/height animated + if ( jQuery.css( this, "display" ) === "inline" && + jQuery.css( this, "float" ) === "none" ) { + + // inline-level elements accept inline-block; + // block-level elements need to be inline with layout + if ( !jQuery.support.inlineBlockNeedsLayout || defaultDisplay( this.nodeName ) === "inline" ) { + this.style.display = "inline-block"; + + } else { + this.style.zoom = 1; + } + } + } + } + + if ( opt.overflow != null ) { + this.style.overflow = "hidden"; + } + + for ( p in prop ) { + e = new jQuery.fx( this, opt, p ); + val = prop[ p ]; + + if ( rfxtypes.test( val ) ) { + + // Tracks whether to show or hide based on private + // data attached to the element + method = jQuery._data( this, "toggle" + p ) || ( val === "toggle" ? hidden ? "show" : "hide" : 0 ); + if ( method ) { + jQuery._data( this, "toggle" + p, method === "show" ? "hide" : "show" ); + e[ method ](); + } else { + e[ val ](); + } + + } else { + parts = rfxnum.exec( val ); + start = e.cur(); + + if ( parts ) { + end = parseFloat( parts[2] ); + unit = parts[3] || ( jQuery.cssNumber[ p ] ? "" : "px" ); + + // We need to compute starting value + if ( unit !== "px" ) { + jQuery.style( this, p, (end || 1) + unit); + start = ( (end || 1) / e.cur() ) * start; + jQuery.style( this, p, start + unit); + } + + // If a +=/-= token was provided, we're doing a relative animation + if ( parts[1] ) { + end = ( (parts[ 1 ] === "-=" ? -1 : 1) * end ) + start; + } + + e.custom( start, end, unit ); + + } else { + e.custom( start, val, "" ); + } + } + } + + // For JS strict compliance + return true; + } + + return optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + + stop: function( type, clearQueue, gotoEnd ) { + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each(function() { + var index, + hadTimers = false, + timers = jQuery.timers, + data = jQuery._data( this ); + + // clear marker counters if we know they won't be + if ( !gotoEnd ) { + jQuery._unmark( true, this ); + } + + function stopQueue( elem, data, index ) { + var hooks = data[ index ]; + jQuery.removeData( elem, index, true ); + hooks.stop( gotoEnd ); + } + + if ( type == null ) { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && index.indexOf(".run") === index.length - 4 ) { + stopQueue( this, data, index ); + } + } + } else if ( data[ index = type + ".run" ] && data[ index ].stop ){ + stopQueue( this, data, index ); + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) { + if ( gotoEnd ) { + + // force the next step to be the last + timers[ index ]( true ); + } else { + timers[ index ].saveState(); + } + hadTimers = true; + timers.splice( index, 1 ); + } + } + + // start the next in the queue if the last step wasn't forced + // timers currently will call their complete callbacks, which will dequeue + // but only if they were gotoEnd + if ( !( gotoEnd && hadTimers ) ) { + jQuery.dequeue( this, type ); + } + }); + } + +}); + +// Animations created synchronously will run synchronously +function createFxNow() { + setTimeout( clearFxNow, 0 ); + return ( fxNow = jQuery.now() ); +} + +function clearFxNow() { + fxNow = undefined; +} + +// Generate parameters to create a standard animation +function genFx( type, num ) { + var obj = {}; + + jQuery.each( fxAttrs.concat.apply([], fxAttrs.slice( 0, num )), function() { + obj[ this ] = type; + }); + + return obj; +} + +// Generate shortcuts for custom animations +jQuery.each({ + slideDown: genFx( "show", 1 ), + slideUp: genFx( "hide", 1 ), + slideToggle: genFx( "toggle", 1 ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +}); + +jQuery.extend({ + speed: function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration : + opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default; + + // normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function( noUnmark ) { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } else if ( noUnmark !== false ) { + jQuery._unmark( this ); + } + }; + + return opt; + }, + + easing: { + linear: function( p, n, firstNum, diff ) { + return firstNum + diff * p; + }, + swing: function( p, n, firstNum, diff ) { + return ( ( -Math.cos( p*Math.PI ) / 2 ) + 0.5 ) * diff + firstNum; + } + }, + + timers: [], + + fx: function( elem, options, prop ) { + this.options = options; + this.elem = elem; + this.prop = prop; + + options.orig = options.orig || {}; + } + +}); + +jQuery.fx.prototype = { + // Simple function for setting a style value + update: function() { + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + ( jQuery.fx.step[ this.prop ] || jQuery.fx.step._default )( this ); + }, + + // Get the current size + cur: function() { + if ( this.elem[ this.prop ] != null && (!this.elem.style || this.elem.style[ this.prop ] == null) ) { + return this.elem[ this.prop ]; + } + + var parsed, + r = jQuery.css( this.elem, this.prop ); + // Empty strings, null, undefined and "auto" are converted to 0, + // complex values such as "rotate(1rad)" are returned as is, + // simple values such as "10px" are parsed to Float. + return isNaN( parsed = parseFloat( r ) ) ? !r || r === "auto" ? 0 : r : parsed; + }, + + // Start an animation from one number to another + custom: function( from, to, unit ) { + var self = this, + fx = jQuery.fx; + + this.startTime = fxNow || createFxNow(); + this.end = to; + this.now = this.start = from; + this.pos = this.state = 0; + this.unit = unit || this.unit || ( jQuery.cssNumber[ this.prop ] ? "" : "px" ); + + function t( gotoEnd ) { + return self.step( gotoEnd ); + } + + t.queue = this.options.queue; + t.elem = this.elem; + t.saveState = function() { + if ( self.options.hide && jQuery._data( self.elem, "fxshow" + self.prop ) === undefined ) { + jQuery._data( self.elem, "fxshow" + self.prop, self.start ); + } + }; + + if ( t() && jQuery.timers.push(t) && !timerId ) { + timerId = setInterval( fx.tick, fx.interval ); + } + }, + + // Simple 'show' function + show: function() { + var dataShow = jQuery._data( this.elem, "fxshow" + this.prop ); + + // Remember where we started, so that we can go back to it later + this.options.orig[ this.prop ] = dataShow || jQuery.style( this.elem, this.prop ); + this.options.show = true; + + // Begin the animation + // Make sure that we start at a small width/height to avoid any flash of content + if ( dataShow !== undefined ) { + // This show is picking up where a previous hide or show left off + this.custom( this.cur(), dataShow ); + } else { + this.custom( this.prop === "width" || this.prop === "height" ? 1 : 0, this.cur() ); + } + + // Start by showing the element + jQuery( this.elem ).show(); + }, + + // Simple 'hide' function + hide: function() { + // Remember where we started, so that we can go back to it later + this.options.orig[ this.prop ] = jQuery._data( this.elem, "fxshow" + this.prop ) || jQuery.style( this.elem, this.prop ); + this.options.hide = true; + + // Begin the animation + this.custom( this.cur(), 0 ); + }, + + // Each step of an animation + step: function( gotoEnd ) { + var p, n, complete, + t = fxNow || createFxNow(), + done = true, + elem = this.elem, + options = this.options; + + if ( gotoEnd || t >= options.duration + this.startTime ) { + this.now = this.end; + this.pos = this.state = 1; + this.update(); + + options.animatedProperties[ this.prop ] = true; + + for ( p in options.animatedProperties ) { + if ( options.animatedProperties[ p ] !== true ) { + done = false; + } + } + + if ( done ) { + // Reset the overflow + if ( options.overflow != null && !jQuery.support.shrinkWrapBlocks ) { + + jQuery.each( [ "", "X", "Y" ], function( index, value ) { + elem.style[ "overflow" + value ] = options.overflow[ index ]; + }); + } + + // Hide the element if the "hide" operation was done + if ( options.hide ) { + jQuery( elem ).hide(); + } + + // Reset the properties, if the item has been hidden or shown + if ( options.hide || options.show ) { + for ( p in options.animatedProperties ) { + jQuery.style( elem, p, options.orig[ p ] ); + jQuery.removeData( elem, "fxshow" + p, true ); + // Toggle data is no longer needed + jQuery.removeData( elem, "toggle" + p, true ); + } + } + + // Execute the complete function + // in the event that the complete function throws an exception + // we must ensure it won't be called twice. #5684 + + complete = options.complete; + if ( complete ) { + + options.complete = false; + complete.call( elem ); + } + } + + return false; + + } else { + // classical easing cannot be used with an Infinity duration + if ( options.duration == Infinity ) { + this.now = t; + } else { + n = t - this.startTime; + this.state = n / options.duration; + + // Perform the easing function, defaults to swing + this.pos = jQuery.easing[ options.animatedProperties[this.prop] ]( this.state, n, 0, 1, options.duration ); + this.now = this.start + ( (this.end - this.start) * this.pos ); + } + // Perform the next step of the animation + this.update(); + } + + return true; + } +}; + +jQuery.extend( jQuery.fx, { + tick: function() { + var timer, + timers = jQuery.timers, + i = 0; + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + // Checks the timer has not already been removed + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + }, + + interval: 13, + + stop: function() { + clearInterval( timerId ); + timerId = null; + }, + + speeds: { + slow: 600, + fast: 200, + // Default speed + _default: 400 + }, + + step: { + opacity: function( fx ) { + jQuery.style( fx.elem, "opacity", fx.now ); + }, + + _default: function( fx ) { + if ( fx.elem.style && fx.elem.style[ fx.prop ] != null ) { + fx.elem.style[ fx.prop ] = fx.now + fx.unit; + } else { + fx.elem[ fx.prop ] = fx.now; + } + } + } +}); + +// Adds width/height step functions +// Do not set anything below 0 +jQuery.each([ "width", "height" ], function( i, prop ) { + jQuery.fx.step[ prop ] = function( fx ) { + jQuery.style( fx.elem, prop, Math.max(0, fx.now) + fx.unit ); + }; +}); + +if ( jQuery.expr && jQuery.expr.filters ) { + jQuery.expr.filters.animated = function( elem ) { + return jQuery.grep(jQuery.timers, function( fn ) { + return elem === fn.elem; + }).length; + }; +} + +// Try to restore the default display value of an element +function defaultDisplay( nodeName ) { + + if ( !elemdisplay[ nodeName ] ) { + + var body = document.body, + elem = jQuery( "<" + nodeName + ">" ).appendTo( body ), + display = elem.css( "display" ); + elem.remove(); + + // If the simple way fails, + // get element's real default display by attaching it to a temp iframe + if ( display === "none" || display === "" ) { + // No iframe to use yet, so create it + if ( !iframe ) { + iframe = document.createElement( "iframe" ); + iframe.frameBorder = iframe.width = iframe.height = 0; + } + + body.appendChild( iframe ); + + // Create a cacheable copy of the iframe document on first call. + // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML + // document to it; WebKit & Firefox won't allow reusing the iframe document. + if ( !iframeDoc || !iframe.createElement ) { + iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document; + iframeDoc.write( ( document.compatMode === "CSS1Compat" ? "<!doctype html>" : "" ) + "<html><body>" ); + iframeDoc.close(); + } + + elem = iframeDoc.createElement( nodeName ); + + iframeDoc.body.appendChild( elem ); + + display = jQuery.css( elem, "display" ); + body.removeChild( iframe ); + } + + // Store the correct default display + elemdisplay[ nodeName ] = display; + } + + return elemdisplay[ nodeName ]; +} + + + + +var rtable = /^t(?:able|d|h)$/i, + rroot = /^(?:body|html)$/i; + +if ( "getBoundingClientRect" in document.documentElement ) { + jQuery.fn.offset = function( options ) { + var elem = this[0], box; + + if ( options ) { + return this.each(function( i ) { + jQuery.offset.setOffset( this, options, i ); + }); + } + + if ( !elem || !elem.ownerDocument ) { + return null; + } + + if ( elem === elem.ownerDocument.body ) { + return jQuery.offset.bodyOffset( elem ); + } + + try { + box = elem.getBoundingClientRect(); + } catch(e) {} + + var doc = elem.ownerDocument, + docElem = doc.documentElement; + + // Make sure we're not dealing with a disconnected DOM node + if ( !box || !jQuery.contains( docElem, elem ) ) { + return box ? { top: box.top, left: box.left } : { top: 0, left: 0 }; + } + + var body = doc.body, + win = getWindow(doc), + clientTop = docElem.clientTop || body.clientTop || 0, + clientLeft = docElem.clientLeft || body.clientLeft || 0, + scrollTop = win.pageYOffset || jQuery.support.boxModel && docElem.scrollTop || body.scrollTop, + scrollLeft = win.pageXOffset || jQuery.support.boxModel && docElem.scrollLeft || body.scrollLeft, + top = box.top + scrollTop - clientTop, + left = box.left + scrollLeft - clientLeft; + + return { top: top, left: left }; + }; + +} else { + jQuery.fn.offset = function( options ) { + var elem = this[0]; + + if ( options ) { + return this.each(function( i ) { + jQuery.offset.setOffset( this, options, i ); + }); + } + + if ( !elem || !elem.ownerDocument ) { + return null; + } + + if ( elem === elem.ownerDocument.body ) { + return jQuery.offset.bodyOffset( elem ); + } + + var computedStyle, + offsetParent = elem.offsetParent, + prevOffsetParent = elem, + doc = elem.ownerDocument, + docElem = doc.documentElement, + body = doc.body, + defaultView = doc.defaultView, + prevComputedStyle = defaultView ? defaultView.getComputedStyle( elem, null ) : elem.currentStyle, + top = elem.offsetTop, + left = elem.offsetLeft; + + while ( (elem = elem.parentNode) && elem !== body && elem !== docElem ) { + if ( jQuery.support.fixedPosition && prevComputedStyle.position === "fixed" ) { + break; + } + + computedStyle = defaultView ? defaultView.getComputedStyle(elem, null) : elem.currentStyle; + top -= elem.scrollTop; + left -= elem.scrollLeft; + + if ( elem === offsetParent ) { + top += elem.offsetTop; + left += elem.offsetLeft; + + if ( jQuery.support.doesNotAddBorder && !(jQuery.support.doesAddBorderForTableAndCells && rtable.test(elem.nodeName)) ) { + top += parseFloat( computedStyle.borderTopWidth ) || 0; + left += parseFloat( computedStyle.borderLeftWidth ) || 0; + } + + prevOffsetParent = offsetParent; + offsetParent = elem.offsetParent; + } + + if ( jQuery.support.subtractsBorderForOverflowNotVisible && computedStyle.overflow !== "visible" ) { + top += parseFloat( computedStyle.borderTopWidth ) || 0; + left += parseFloat( computedStyle.borderLeftWidth ) || 0; + } + + prevComputedStyle = computedStyle; + } + + if ( prevComputedStyle.position === "relative" || prevComputedStyle.position === "static" ) { + top += body.offsetTop; + left += body.offsetLeft; + } + + if ( jQuery.support.fixedPosition && prevComputedStyle.position === "fixed" ) { + top += Math.max( docElem.scrollTop, body.scrollTop ); + left += Math.max( docElem.scrollLeft, body.scrollLeft ); + } + + return { top: top, left: left }; + }; +} + +jQuery.offset = { + + bodyOffset: function( body ) { + var top = body.offsetTop, + left = body.offsetLeft; + + if ( jQuery.support.doesNotIncludeMarginInBodyOffset ) { + top += parseFloat( jQuery.css(body, "marginTop") ) || 0; + left += parseFloat( jQuery.css(body, "marginLeft") ) || 0; + } + + return { top: top, left: left }; + }, + + setOffset: function( elem, options, i ) { + var position = jQuery.css( elem, "position" ); + + // set position first, in-case top/left are set even on static elem + if ( position === "static" ) { + elem.style.position = "relative"; + } + + var curElem = jQuery( elem ), + curOffset = curElem.offset(), + curCSSTop = jQuery.css( elem, "top" ), + curCSSLeft = jQuery.css( elem, "left" ), + calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1, + props = {}, curPosition = {}, curTop, curLeft; + + // need to be able to calculate position if either top or left is auto and position is either absolute or fixed + if ( calculatePosition ) { + curPosition = curElem.position(); + curTop = curPosition.top; + curLeft = curPosition.left; + } else { + curTop = parseFloat( curCSSTop ) || 0; + curLeft = parseFloat( curCSSLeft ) || 0; + } + + if ( jQuery.isFunction( options ) ) { + options = options.call( elem, i, curOffset ); + } + + if ( options.top != null ) { + props.top = ( options.top - curOffset.top ) + curTop; + } + if ( options.left != null ) { + props.left = ( options.left - curOffset.left ) + curLeft; + } + + if ( "using" in options ) { + options.using.call( elem, props ); + } else { + curElem.css( props ); + } + } +}; + + +jQuery.fn.extend({ + + position: function() { + if ( !this[0] ) { + return null; + } + + var elem = this[0], + + // Get *real* offsetParent + offsetParent = this.offsetParent(), + + // Get correct offsets + offset = this.offset(), + parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset(); + + // Subtract element margins + // note: when an element has margin: auto the offsetLeft and marginLeft + // are the same in Safari causing offset.left to incorrectly be 0 + offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0; + offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0; + + // Add offsetParent borders + parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0; + parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0; + + // Subtract the two offsets + return { + top: offset.top - parentOffset.top, + left: offset.left - parentOffset.left + }; + }, + + offsetParent: function() { + return this.map(function() { + var offsetParent = this.offsetParent || document.body; + while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) { + offsetParent = offsetParent.offsetParent; + } + return offsetParent; + }); + } +}); + + +// Create scrollLeft and scrollTop methods +jQuery.each( ["Left", "Top"], function( i, name ) { + var method = "scroll" + name; + + jQuery.fn[ method ] = function( val ) { + var elem, win; + + if ( val === undefined ) { + elem = this[ 0 ]; + + if ( !elem ) { + return null; + } + + win = getWindow( elem ); + + // Return the scroll offset + return win ? ("pageXOffset" in win) ? win[ i ? "pageYOffset" : "pageXOffset" ] : + jQuery.support.boxModel && win.document.documentElement[ method ] || + win.document.body[ method ] : + elem[ method ]; + } + + // Set the scroll offset + return this.each(function() { + win = getWindow( this ); + + if ( win ) { + win.scrollTo( + !i ? val : jQuery( win ).scrollLeft(), + i ? val : jQuery( win ).scrollTop() + ); + + } else { + this[ method ] = val; + } + }); + }; +}); + +function getWindow( elem ) { + return jQuery.isWindow( elem ) ? + elem : + elem.nodeType === 9 ? + elem.defaultView || elem.parentWindow : + false; +} + + + + +// Create width, height, innerHeight, innerWidth, outerHeight and outerWidth methods +jQuery.each([ "Height", "Width" ], function( i, name ) { + + var type = name.toLowerCase(); + + // innerHeight and innerWidth + jQuery.fn[ "inner" + name ] = function() { + var elem = this[0]; + return elem ? + elem.style ? + parseFloat( jQuery.css( elem, type, "padding" ) ) : + this[ type ]() : + null; + }; + + // outerHeight and outerWidth + jQuery.fn[ "outer" + name ] = function( margin ) { + var elem = this[0]; + return elem ? + elem.style ? + parseFloat( jQuery.css( elem, type, margin ? "margin" : "border" ) ) : + this[ type ]() : + null; + }; + + jQuery.fn[ type ] = function( size ) { + // Get window width or height + var elem = this[0]; + if ( !elem ) { + return size == null ? null : this; + } + + if ( jQuery.isFunction( size ) ) { + return this.each(function( i ) { + var self = jQuery( this ); + self[ type ]( size.call( this, i, self[ type ]() ) ); + }); + } + + if ( jQuery.isWindow( elem ) ) { + // Everyone else use document.documentElement or document.body depending on Quirks vs Standards mode + // 3rd condition allows Nokia support, as it supports the docElem prop but not CSS1Compat + var docElemProp = elem.document.documentElement[ "client" + name ], + body = elem.document.body; + return elem.document.compatMode === "CSS1Compat" && docElemProp || + body && body[ "client" + name ] || docElemProp; + + // Get document width or height + } else if ( elem.nodeType === 9 ) { + // Either scroll[Width/Height] or offset[Width/Height], whichever is greater + return Math.max( + elem.documentElement["client" + name], + elem.body["scroll" + name], elem.documentElement["scroll" + name], + elem.body["offset" + name], elem.documentElement["offset" + name] + ); + + // Get or set width or height on the element + } else if ( size === undefined ) { + var orig = jQuery.css( elem, type ), + ret = parseFloat( orig ); + + return jQuery.isNumeric( ret ) ? ret : orig; + + // Set the width or height on the element (default to pixels if value is unitless) + } else { + return this.css( type, typeof size === "string" ? size : size + "px" ); + } + }; + +}); + + + + +// Expose jQuery to the global object +window.jQuery = window.$ = jQuery; + +// Expose jQuery as an AMD module, but only for AMD loaders that +// understand the issues with loading multiple versions of jQuery +// in a page that all might call define(). The loader will indicate +// they have special allowances for multiple jQuery versions by +// specifying define.amd.jQuery = true. Register as a named module, +// since jQuery can be concatenated with other files that may use define, +// but not use a proper concatenation script that understands anonymous +// AMD modules. A named AMD is safest and most robust way to register. +// Lowercase jquery is used because AMD module names are derived from +// file names, and jQuery is normally delivered in a lowercase file name. +// Do this after creating the global so that if an AMD module wants to call +// noConflict to hide this version of jQuery, it will work. +if ( typeof define === "function" && define.amd && define.amd.jQuery ) { + define( "jquery", [], function () { return jQuery; } ); +} + + + +})( window ); diff --git a/libs/beetsplug/web/static/underscore.js b/libs/beetsplug/web/static/underscore.js new file mode 100644 index 00000000..5579c07d --- /dev/null +++ b/libs/beetsplug/web/static/underscore.js @@ -0,0 +1,977 @@ +// Underscore.js 1.2.2 +// (c) 2011 Jeremy Ashkenas, DocumentCloud Inc. +// Underscore is freely distributable under the MIT license. +// Portions of Underscore are inspired or borrowed from Prototype, +// Oliver Steele's Functional, and John Resig's Micro-Templating. +// For all details and documentation: +// http://documentcloud.github.com/underscore + +(function() { + + // Baseline setup + // -------------- + + // Establish the root object, `window` in the browser, or `global` on the server. + var root = this; + + // Save the previous value of the `_` variable. + var previousUnderscore = root._; + + // Establish the object that gets returned to break out of a loop iteration. + var breaker = {}; + + // Save bytes in the minified (but not gzipped) version: + var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype; + + // Create quick reference variables for speed access to core prototypes. + var slice = ArrayProto.slice, + unshift = ArrayProto.unshift, + toString = ObjProto.toString, + hasOwnProperty = ObjProto.hasOwnProperty; + + // All **ECMAScript 5** native function implementations that we hope to use + // are declared here. + var + nativeForEach = ArrayProto.forEach, + nativeMap = ArrayProto.map, + nativeReduce = ArrayProto.reduce, + nativeReduceRight = ArrayProto.reduceRight, + nativeFilter = ArrayProto.filter, + nativeEvery = ArrayProto.every, + nativeSome = ArrayProto.some, + nativeIndexOf = ArrayProto.indexOf, + nativeLastIndexOf = ArrayProto.lastIndexOf, + nativeIsArray = Array.isArray, + nativeKeys = Object.keys, + nativeBind = FuncProto.bind; + + // Create a safe reference to the Underscore object for use below. + var _ = function(obj) { return new wrapper(obj); }; + + // Export the Underscore object for **Node.js** and **"CommonJS"**, with + // backwards-compatibility for the old `require()` API. If we're not in + // CommonJS, add `_` to the global object. + if (typeof exports !== 'undefined') { + if (typeof module !== 'undefined' && module.exports) { + exports = module.exports = _; + } + exports._ = _; + } else if (typeof define === 'function' && define.amd) { + // Register as a named module with AMD. + define('underscore', function() { + return _; + }); + } else { + // Exported as a string, for Closure Compiler "advanced" mode. + root['_'] = _; + } + + // Current version. + _.VERSION = '1.2.2'; + + // Collection Functions + // -------------------- + + // The cornerstone, an `each` implementation, aka `forEach`. + // Handles objects with the built-in `forEach`, arrays, and raw objects. + // Delegates to **ECMAScript 5**'s native `forEach` if available. + var each = _.each = _.forEach = function(obj, iterator, context) { + if (obj == null) return; + if (nativeForEach && obj.forEach === nativeForEach) { + obj.forEach(iterator, context); + } else if (obj.length === +obj.length) { + for (var i = 0, l = obj.length; i < l; i++) { + if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return; + } + } else { + for (var key in obj) { + if (hasOwnProperty.call(obj, key)) { + if (iterator.call(context, obj[key], key, obj) === breaker) return; + } + } + } + }; + + // Return the results of applying the iterator to each element. + // Delegates to **ECMAScript 5**'s native `map` if available. + _.map = function(obj, iterator, context) { + var results = []; + if (obj == null) return results; + if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context); + each(obj, function(value, index, list) { + results[results.length] = iterator.call(context, value, index, list); + }); + return results; + }; + + // **Reduce** builds up a single result from a list of values, aka `inject`, + // or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available. + _.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) { + var initial = memo !== void 0; + if (obj == null) obj = []; + if (nativeReduce && obj.reduce === nativeReduce) { + if (context) iterator = _.bind(iterator, context); + return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator); + } + each(obj, function(value, index, list) { + if (!initial) { + memo = value; + initial = true; + } else { + memo = iterator.call(context, memo, value, index, list); + } + }); + if (!initial) throw new TypeError("Reduce of empty array with no initial value"); + return memo; + }; + + // The right-associative version of reduce, also known as `foldr`. + // Delegates to **ECMAScript 5**'s native `reduceRight` if available. + _.reduceRight = _.foldr = function(obj, iterator, memo, context) { + if (obj == null) obj = []; + if (nativeReduceRight && obj.reduceRight === nativeReduceRight) { + if (context) iterator = _.bind(iterator, context); + return memo !== void 0 ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator); + } + var reversed = (_.isArray(obj) ? obj.slice() : _.toArray(obj)).reverse(); + return _.reduce(reversed, iterator, memo, context); + }; + + // Return the first value which passes a truth test. Aliased as `detect`. + _.find = _.detect = function(obj, iterator, context) { + var result; + any(obj, function(value, index, list) { + if (iterator.call(context, value, index, list)) { + result = value; + return true; + } + }); + return result; + }; + + // Return all the elements that pass a truth test. + // Delegates to **ECMAScript 5**'s native `filter` if available. + // Aliased as `select`. + _.filter = _.select = function(obj, iterator, context) { + var results = []; + if (obj == null) return results; + if (nativeFilter && obj.filter === nativeFilter) return obj.filter(iterator, context); + each(obj, function(value, index, list) { + if (iterator.call(context, value, index, list)) results[results.length] = value; + }); + return results; + }; + + // Return all the elements for which a truth test fails. + _.reject = function(obj, iterator, context) { + var results = []; + if (obj == null) return results; + each(obj, function(value, index, list) { + if (!iterator.call(context, value, index, list)) results[results.length] = value; + }); + return results; + }; + + // Determine whether all of the elements match a truth test. + // Delegates to **ECMAScript 5**'s native `every` if available. + // Aliased as `all`. + _.every = _.all = function(obj, iterator, context) { + var result = true; + if (obj == null) return result; + if (nativeEvery && obj.every === nativeEvery) return obj.every(iterator, context); + each(obj, function(value, index, list) { + if (!(result = result && iterator.call(context, value, index, list))) return breaker; + }); + return result; + }; + + // Determine if at least one element in the object matches a truth test. + // Delegates to **ECMAScript 5**'s native `some` if available. + // Aliased as `any`. + var any = _.some = _.any = function(obj, iterator, context) { + iterator = iterator || _.identity; + var result = false; + if (obj == null) return result; + if (nativeSome && obj.some === nativeSome) return obj.some(iterator, context); + each(obj, function(value, index, list) { + if (result || (result = iterator.call(context, value, index, list))) return breaker; + }); + return !!result; + }; + + // Determine if a given value is included in the array or object using `===`. + // Aliased as `contains`. + _.include = _.contains = function(obj, target) { + var found = false; + if (obj == null) return found; + if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1; + found = any(obj, function(value) { + return value === target; + }); + return found; + }; + + // Invoke a method (with arguments) on every item in a collection. + _.invoke = function(obj, method) { + var args = slice.call(arguments, 2); + return _.map(obj, function(value) { + return (method.call ? method || value : value[method]).apply(value, args); + }); + }; + + // Convenience version of a common use case of `map`: fetching a property. + _.pluck = function(obj, key) { + return _.map(obj, function(value){ return value[key]; }); + }; + + // Return the maximum element or (element-based computation). + _.max = function(obj, iterator, context) { + if (!iterator && _.isArray(obj)) return Math.max.apply(Math, obj); + if (!iterator && _.isEmpty(obj)) return -Infinity; + var result = {computed : -Infinity}; + each(obj, function(value, index, list) { + var computed = iterator ? iterator.call(context, value, index, list) : value; + computed >= result.computed && (result = {value : value, computed : computed}); + }); + return result.value; + }; + + // Return the minimum element (or element-based computation). + _.min = function(obj, iterator, context) { + if (!iterator && _.isArray(obj)) return Math.min.apply(Math, obj); + if (!iterator && _.isEmpty(obj)) return Infinity; + var result = {computed : Infinity}; + each(obj, function(value, index, list) { + var computed = iterator ? iterator.call(context, value, index, list) : value; + computed < result.computed && (result = {value : value, computed : computed}); + }); + return result.value; + }; + + // Shuffle an array. + _.shuffle = function(obj) { + var shuffled = [], rand; + each(obj, function(value, index, list) { + if (index == 0) { + shuffled[0] = value; + } else { + rand = Math.floor(Math.random() * (index + 1)); + shuffled[index] = shuffled[rand]; + shuffled[rand] = value; + } + }); + return shuffled; + }; + + // Sort the object's values by a criterion produced by an iterator. + _.sortBy = function(obj, iterator, context) { + return _.pluck(_.map(obj, function(value, index, list) { + return { + value : value, + criteria : iterator.call(context, value, index, list) + }; + }).sort(function(left, right) { + var a = left.criteria, b = right.criteria; + return a < b ? -1 : a > b ? 1 : 0; + }), 'value'); + }; + + // Groups the object's values by a criterion. Pass either a string attribute + // to group by, or a function that returns the criterion. + _.groupBy = function(obj, val) { + var result = {}; + var iterator = _.isFunction(val) ? val : function(obj) { return obj[val]; }; + each(obj, function(value, index) { + var key = iterator(value, index); + (result[key] || (result[key] = [])).push(value); + }); + return result; + }; + + // Use a comparator function to figure out at what index an object should + // be inserted so as to maintain order. Uses binary search. + _.sortedIndex = function(array, obj, iterator) { + iterator || (iterator = _.identity); + var low = 0, high = array.length; + while (low < high) { + var mid = (low + high) >> 1; + iterator(array[mid]) < iterator(obj) ? low = mid + 1 : high = mid; + } + return low; + }; + + // Safely convert anything iterable into a real, live array. + _.toArray = function(iterable) { + if (!iterable) return []; + if (iterable.toArray) return iterable.toArray(); + if (_.isArray(iterable)) return slice.call(iterable); + if (_.isArguments(iterable)) return slice.call(iterable); + return _.values(iterable); + }; + + // Return the number of elements in an object. + _.size = function(obj) { + return _.toArray(obj).length; + }; + + // Array Functions + // --------------- + + // Get the first element of an array. Passing **n** will return the first N + // values in the array. Aliased as `head`. The **guard** check allows it to work + // with `_.map`. + _.first = _.head = function(array, n, guard) { + return (n != null) && !guard ? slice.call(array, 0, n) : array[0]; + }; + + // Returns everything but the last entry of the array. Especcialy useful on + // the arguments object. Passing **n** will return all the values in + // the array, excluding the last N. The **guard** check allows it to work with + // `_.map`. + _.initial = function(array, n, guard) { + return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n)); + }; + + // Get the last element of an array. Passing **n** will return the last N + // values in the array. The **guard** check allows it to work with `_.map`. + _.last = function(array, n, guard) { + if ((n != null) && !guard) { + return slice.call(array, Math.max(array.length - n, 0)); + } else { + return array[array.length - 1]; + } + }; + + // Returns everything but the first entry of the array. Aliased as `tail`. + // Especially useful on the arguments object. Passing an **index** will return + // the rest of the values in the array from that index onward. The **guard** + // check allows it to work with `_.map`. + _.rest = _.tail = function(array, index, guard) { + return slice.call(array, (index == null) || guard ? 1 : index); + }; + + // Trim out all falsy values from an array. + _.compact = function(array) { + return _.filter(array, function(value){ return !!value; }); + }; + + // Return a completely flattened version of an array. + _.flatten = function(array, shallow) { + return _.reduce(array, function(memo, value) { + if (_.isArray(value)) return memo.concat(shallow ? value : _.flatten(value)); + memo[memo.length] = value; + return memo; + }, []); + }; + + // Return a version of the array that does not contain the specified value(s). + _.without = function(array) { + return _.difference(array, slice.call(arguments, 1)); + }; + + // Produce a duplicate-free version of the array. If the array has already + // been sorted, you have the option of using a faster algorithm. + // Aliased as `unique`. + _.uniq = _.unique = function(array, isSorted, iterator) { + var initial = iterator ? _.map(array, iterator) : array; + var result = []; + _.reduce(initial, function(memo, el, i) { + if (0 == i || (isSorted === true ? _.last(memo) != el : !_.include(memo, el))) { + memo[memo.length] = el; + result[result.length] = array[i]; + } + return memo; + }, []); + return result; + }; + + // Produce an array that contains the union: each distinct element from all of + // the passed-in arrays. + _.union = function() { + return _.uniq(_.flatten(arguments, true)); + }; + + // Produce an array that contains every item shared between all the + // passed-in arrays. (Aliased as "intersect" for back-compat.) + _.intersection = _.intersect = function(array) { + var rest = slice.call(arguments, 1); + return _.filter(_.uniq(array), function(item) { + return _.every(rest, function(other) { + return _.indexOf(other, item) >= 0; + }); + }); + }; + + // Take the difference between one array and another. + // Only the elements present in just the first array will remain. + _.difference = function(array, other) { + return _.filter(array, function(value){ return !_.include(other, value); }); + }; + + // Zip together multiple lists into a single array -- elements that share + // an index go together. + _.zip = function() { + var args = slice.call(arguments); + var length = _.max(_.pluck(args, 'length')); + var results = new Array(length); + for (var i = 0; i < length; i++) results[i] = _.pluck(args, "" + i); + return results; + }; + + // If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**), + // we need this function. Return the position of the first occurrence of an + // item in an array, or -1 if the item is not included in the array. + // Delegates to **ECMAScript 5**'s native `indexOf` if available. + // If the array is large and already in sort order, pass `true` + // for **isSorted** to use binary search. + _.indexOf = function(array, item, isSorted) { + if (array == null) return -1; + var i, l; + if (isSorted) { + i = _.sortedIndex(array, item); + return array[i] === item ? i : -1; + } + if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item); + for (i = 0, l = array.length; i < l; i++) if (array[i] === item) return i; + return -1; + }; + + // Delegates to **ECMAScript 5**'s native `lastIndexOf` if available. + _.lastIndexOf = function(array, item) { + if (array == null) return -1; + if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) return array.lastIndexOf(item); + var i = array.length; + while (i--) if (array[i] === item) return i; + return -1; + }; + + // Generate an integer Array containing an arithmetic progression. A port of + // the native Python `range()` function. See + // [the Python documentation](http://docs.python.org/library/functions.html#range). + _.range = function(start, stop, step) { + if (arguments.length <= 1) { + stop = start || 0; + start = 0; + } + step = arguments[2] || 1; + + var len = Math.max(Math.ceil((stop - start) / step), 0); + var idx = 0; + var range = new Array(len); + + while(idx < len) { + range[idx++] = start; + start += step; + } + + return range; + }; + + // Function (ahem) Functions + // ------------------ + + // Reusable constructor function for prototype setting. + var ctor = function(){}; + + // Create a function bound to a given object (assigning `this`, and arguments, + // optionally). Binding with arguments is also known as `curry`. + // Delegates to **ECMAScript 5**'s native `Function.bind` if available. + // We check for `func.bind` first, to fail fast when `func` is undefined. + _.bind = function bind(func, context) { + var bound, args; + if (func.bind === nativeBind && nativeBind) return nativeBind.apply(func, slice.call(arguments, 1)); + if (!_.isFunction(func)) throw new TypeError; + args = slice.call(arguments, 2); + return bound = function() { + if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments))); + ctor.prototype = func.prototype; + var self = new ctor; + var result = func.apply(self, args.concat(slice.call(arguments))); + if (Object(result) === result) return result; + return self; + }; + }; + + // Bind all of an object's methods to that object. Useful for ensuring that + // all callbacks defined on an object belong to it. + _.bindAll = function(obj) { + var funcs = slice.call(arguments, 1); + if (funcs.length == 0) funcs = _.functions(obj); + each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); }); + return obj; + }; + + // Memoize an expensive function by storing its results. + _.memoize = function(func, hasher) { + var memo = {}; + hasher || (hasher = _.identity); + return function() { + var key = hasher.apply(this, arguments); + return hasOwnProperty.call(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments)); + }; + }; + + // Delays a function for the given number of milliseconds, and then calls + // it with the arguments supplied. + _.delay = function(func, wait) { + var args = slice.call(arguments, 2); + return setTimeout(function(){ return func.apply(func, args); }, wait); + }; + + // Defers a function, scheduling it to run after the current call stack has + // cleared. + _.defer = function(func) { + return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1))); + }; + + // Returns a function, that, when invoked, will only be triggered at most once + // during a given window of time. + _.throttle = function(func, wait) { + var context, args, timeout, throttling, more; + var whenDone = _.debounce(function(){ more = throttling = false; }, wait); + return function() { + context = this; args = arguments; + var later = function() { + timeout = null; + if (more) func.apply(context, args); + whenDone(); + }; + if (!timeout) timeout = setTimeout(later, wait); + if (throttling) { + more = true; + } else { + func.apply(context, args); + } + whenDone(); + throttling = true; + }; + }; + + // Returns a function, that, as long as it continues to be invoked, will not + // be triggered. The function will be called after it stops being called for + // N milliseconds. + _.debounce = function(func, wait) { + var timeout; + return function() { + var context = this, args = arguments; + var later = function() { + timeout = null; + func.apply(context, args); + }; + clearTimeout(timeout); + timeout = setTimeout(later, wait); + }; + }; + + // Returns a function that will be executed at most one time, no matter how + // often you call it. Useful for lazy initialization. + _.once = function(func) { + var ran = false, memo; + return function() { + if (ran) return memo; + ran = true; + return memo = func.apply(this, arguments); + }; + }; + + // Returns the first function passed as an argument to the second, + // allowing you to adjust arguments, run code before and after, and + // conditionally execute the original function. + _.wrap = function(func, wrapper) { + return function() { + var args = [func].concat(slice.call(arguments)); + return wrapper.apply(this, args); + }; + }; + + // Returns a function that is the composition of a list of functions, each + // consuming the return value of the function that follows. + _.compose = function() { + var funcs = slice.call(arguments); + return function() { + var args = slice.call(arguments); + for (var i = funcs.length - 1; i >= 0; i--) { + args = [funcs[i].apply(this, args)]; + } + return args[0]; + }; + }; + + // Returns a function that will only be executed after being called N times. + _.after = function(times, func) { + if (times <= 0) return func(); + return function() { + if (--times < 1) { return func.apply(this, arguments); } + }; + }; + + // Object Functions + // ---------------- + + // Retrieve the names of an object's properties. + // Delegates to **ECMAScript 5**'s native `Object.keys` + _.keys = nativeKeys || function(obj) { + if (obj !== Object(obj)) throw new TypeError('Invalid object'); + var keys = []; + for (var key in obj) if (hasOwnProperty.call(obj, key)) keys[keys.length] = key; + return keys; + }; + + // Retrieve the values of an object's properties. + _.values = function(obj) { + return _.map(obj, _.identity); + }; + + // Return a sorted list of the function names available on the object. + // Aliased as `methods` + _.functions = _.methods = function(obj) { + var names = []; + for (var key in obj) { + if (_.isFunction(obj[key])) names.push(key); + } + return names.sort(); + }; + + // Extend a given object with all the properties in passed-in object(s). + _.extend = function(obj) { + each(slice.call(arguments, 1), function(source) { + for (var prop in source) { + if (source[prop] !== void 0) obj[prop] = source[prop]; + } + }); + return obj; + }; + + // Fill in a given object with default properties. + _.defaults = function(obj) { + each(slice.call(arguments, 1), function(source) { + for (var prop in source) { + if (obj[prop] == null) obj[prop] = source[prop]; + } + }); + return obj; + }; + + // Create a (shallow-cloned) duplicate of an object. + _.clone = function(obj) { + if (!_.isObject(obj)) return obj; + return _.isArray(obj) ? obj.slice() : _.extend({}, obj); + }; + + // Invokes interceptor with the obj, and then returns obj. + // The primary purpose of this method is to "tap into" a method chain, in + // order to perform operations on intermediate results within the chain. + _.tap = function(obj, interceptor) { + interceptor(obj); + return obj; + }; + + // Internal recursive comparison function. + function eq(a, b, stack) { + // Identical objects are equal. `0 === -0`, but they aren't identical. + // See the Harmony `egal` proposal: http://wiki.ecmascript.org/doku.php?id=harmony:egal. + if (a === b) return a !== 0 || 1 / a == 1 / b; + // A strict comparison is necessary because `null == undefined`. + if (a == null || b == null) return a === b; + // Unwrap any wrapped objects. + if (a._chain) a = a._wrapped; + if (b._chain) b = b._wrapped; + // Invoke a custom `isEqual` method if one is provided. + if (_.isFunction(a.isEqual)) return a.isEqual(b); + if (_.isFunction(b.isEqual)) return b.isEqual(a); + // Compare `[[Class]]` names. + var className = toString.call(a); + if (className != toString.call(b)) return false; + switch (className) { + // Strings, numbers, dates, and booleans are compared by value. + case '[object String]': + // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is + // equivalent to `new String("5")`. + return String(a) == String(b); + case '[object Number]': + a = +a; + b = +b; + // `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for + // other numeric values. + return a != a ? b != b : (a == 0 ? 1 / a == 1 / b : a == b); + case '[object Date]': + case '[object Boolean]': + // Coerce dates and booleans to numeric primitive values. Dates are compared by their + // millisecond representations. Note that invalid dates with millisecond representations + // of `NaN` are not equivalent. + return +a == +b; + // RegExps are compared by their source patterns and flags. + case '[object RegExp]': + return a.source == b.source && + a.global == b.global && + a.multiline == b.multiline && + a.ignoreCase == b.ignoreCase; + } + if (typeof a != 'object' || typeof b != 'object') return false; + // Assume equality for cyclic structures. The algorithm for detecting cyclic + // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. + var length = stack.length; + while (length--) { + // Linear search. Performance is inversely proportional to the number of + // unique nested structures. + if (stack[length] == a) return true; + } + // Add the first object to the stack of traversed objects. + stack.push(a); + var size = 0, result = true; + // Recursively compare objects and arrays. + if (className == '[object Array]') { + // Compare array lengths to determine if a deep comparison is necessary. + size = a.length; + result = size == b.length; + if (result) { + // Deep compare the contents, ignoring non-numeric properties. + while (size--) { + // Ensure commutative equality for sparse arrays. + if (!(result = size in a == size in b && eq(a[size], b[size], stack))) break; + } + } + } else { + // Objects with different constructors are not equivalent. + if ("constructor" in a != "constructor" in b || a.constructor != b.constructor) return false; + // Deep compare objects. + for (var key in a) { + if (hasOwnProperty.call(a, key)) { + // Count the expected number of properties. + size++; + // Deep compare each member. + if (!(result = hasOwnProperty.call(b, key) && eq(a[key], b[key], stack))) break; + } + } + // Ensure that both objects contain the same number of properties. + if (result) { + for (key in b) { + if (hasOwnProperty.call(b, key) && !(size--)) break; + } + result = !size; + } + } + // Remove the first object from the stack of traversed objects. + stack.pop(); + return result; + } + + // Perform a deep comparison to check if two objects are equal. + _.isEqual = function(a, b) { + return eq(a, b, []); + }; + + // Is a given array, string, or object empty? + // An "empty" object has no enumerable own-properties. + _.isEmpty = function(obj) { + if (_.isArray(obj) || _.isString(obj)) return obj.length === 0; + for (var key in obj) if (hasOwnProperty.call(obj, key)) return false; + return true; + }; + + // Is a given value a DOM element? + _.isElement = function(obj) { + return !!(obj && obj.nodeType == 1); + }; + + // Is a given value an array? + // Delegates to ECMA5's native Array.isArray + _.isArray = nativeIsArray || function(obj) { + return toString.call(obj) == '[object Array]'; + }; + + // Is a given variable an object? + _.isObject = function(obj) { + return obj === Object(obj); + }; + + // Is a given variable an arguments object? + if (toString.call(arguments) == '[object Arguments]') { + _.isArguments = function(obj) { + return toString.call(obj) == '[object Arguments]'; + }; + } else { + _.isArguments = function(obj) { + return !!(obj && hasOwnProperty.call(obj, 'callee')); + }; + } + + // Is a given value a function? + _.isFunction = function(obj) { + return toString.call(obj) == '[object Function]'; + }; + + // Is a given value a string? + _.isString = function(obj) { + return toString.call(obj) == '[object String]'; + }; + + // Is a given value a number? + _.isNumber = function(obj) { + return toString.call(obj) == '[object Number]'; + }; + + // Is the given value `NaN`? + _.isNaN = function(obj) { + // `NaN` is the only value for which `===` is not reflexive. + return obj !== obj; + }; + + // Is a given value a boolean? + _.isBoolean = function(obj) { + return obj === true || obj === false || toString.call(obj) == '[object Boolean]'; + }; + + // Is a given value a date? + _.isDate = function(obj) { + return toString.call(obj) == '[object Date]'; + }; + + // Is the given value a regular expression? + _.isRegExp = function(obj) { + return toString.call(obj) == '[object RegExp]'; + }; + + // Is a given value equal to null? + _.isNull = function(obj) { + return obj === null; + }; + + // Is a given variable undefined? + _.isUndefined = function(obj) { + return obj === void 0; + }; + + // Utility Functions + // ----------------- + + // Run Underscore.js in *noConflict* mode, returning the `_` variable to its + // previous owner. Returns a reference to the Underscore object. + _.noConflict = function() { + root._ = previousUnderscore; + return this; + }; + + // Keep the identity function around for default iterators. + _.identity = function(value) { + return value; + }; + + // Run a function **n** times. + _.times = function (n, iterator, context) { + for (var i = 0; i < n; i++) iterator.call(context, i); + }; + + // Escape a string for HTML interpolation. + _.escape = function(string) { + return (''+string).replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"').replace(/'/g, ''').replace(/\//g,'/'); + }; + + // Add your own custom functions to the Underscore object, ensuring that + // they're correctly added to the OOP wrapper as well. + _.mixin = function(obj) { + each(_.functions(obj), function(name){ + addToWrapper(name, _[name] = obj[name]); + }); + }; + + // Generate a unique integer id (unique within the entire client session). + // Useful for temporary DOM ids. + var idCounter = 0; + _.uniqueId = function(prefix) { + var id = idCounter++; + return prefix ? prefix + id : id; + }; + + // By default, Underscore uses ERB-style template delimiters, change the + // following template settings to use alternative delimiters. + _.templateSettings = { + evaluate : /<%([\s\S]+?)%>/g, + interpolate : /<%=([\s\S]+?)%>/g, + escape : /<%-([\s\S]+?)%>/g + }; + + // JavaScript micro-templating, similar to John Resig's implementation. + // Underscore templating handles arbitrary delimiters, preserves whitespace, + // and correctly escapes quotes within interpolated code. + _.template = function(str, data) { + var c = _.templateSettings; + var tmpl = 'var __p=[],print=function(){__p.push.apply(__p,arguments);};' + + 'with(obj||{}){__p.push(\'' + + str.replace(/\\/g, '\\\\') + .replace(/'/g, "\\'") + .replace(c.escape, function(match, code) { + return "',_.escape(" + code.replace(/\\'/g, "'") + "),'"; + }) + .replace(c.interpolate, function(match, code) { + return "'," + code.replace(/\\'/g, "'") + ",'"; + }) + .replace(c.evaluate || null, function(match, code) { + return "');" + code.replace(/\\'/g, "'") + .replace(/[\r\n\t]/g, ' ') + ";__p.push('"; + }) + .replace(/\r/g, '\\r') + .replace(/\n/g, '\\n') + .replace(/\t/g, '\\t') + + "');}return __p.join('');"; + var func = new Function('obj', '_', tmpl); + return data ? func(data, _) : function(data) { return func(data, _) }; + }; + + // The OOP Wrapper + // --------------- + + // If Underscore is called as a function, it returns a wrapped object that + // can be used OO-style. This wrapper holds altered versions of all the + // underscore functions. Wrapped objects may be chained. + var wrapper = function(obj) { this._wrapped = obj; }; + + // Expose `wrapper.prototype` as `_.prototype` + _.prototype = wrapper.prototype; + + // Helper function to continue chaining intermediate results. + var result = function(obj, chain) { + return chain ? _(obj).chain() : obj; + }; + + // A method to easily add functions to the OOP wrapper. + var addToWrapper = function(name, func) { + wrapper.prototype[name] = function() { + var args = slice.call(arguments); + unshift.call(args, this._wrapped); + return result(func.apply(_, args), this._chain); + }; + }; + + // Add all of the Underscore functions to the wrapper object. + _.mixin(_); + + // Add all mutator Array functions to the wrapper. + each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { + var method = ArrayProto[name]; + wrapper.prototype[name] = function() { + method.apply(this._wrapped, arguments); + return result(this._wrapped, this._chain); + }; + }); + + // Add all accessor Array functions to the wrapper. + each(['concat', 'join', 'slice'], function(name) { + var method = ArrayProto[name]; + wrapper.prototype[name] = function() { + return result(method.apply(this._wrapped, arguments), this._chain); + }; + }); + + // Start chaining a wrapped Underscore object. + wrapper.prototype.chain = function() { + this._chain = true; + return this; + }; + + // Extracts the result from a wrapped and chained object. + wrapper.prototype.value = function() { + return this._wrapped; + }; + +}).call(this); diff --git a/libs/beetsplug/web/templates/index.html b/libs/beetsplug/web/templates/index.html new file mode 100644 index 00000000..7c37c82d --- /dev/null +++ b/libs/beetsplug/web/templates/index.html @@ -0,0 +1,98 @@ +<!DOCTYPE html> +<html> + <head> + <title>beets + + + + + + + + + + + +
+
+ +
+
    +
+
+ +
+
+ +
+
+ + + + + + + diff --git a/libs/beetsplug/zero.py b/libs/beetsplug/zero.py new file mode 100644 index 00000000..d20f7616 --- /dev/null +++ b/libs/beetsplug/zero.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# This file is part of beets. +# Copyright 2016, Blemjhoo Tezoulbr . +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +""" Clears tag fields in media files.""" + +from __future__ import division, absolute_import, print_function + +import re +from beets.plugins import BeetsPlugin +from beets.mediafile import MediaFile +from beets.importer import action +from beets.util import confit + +__author__ = 'baobab@heresiarch.info' +__version__ = '0.10' + + +class ZeroPlugin(BeetsPlugin): + + _instance = None + + def __init__(self): + super(ZeroPlugin, self).__init__() + + # Listeners. + self.register_listener('write', self.write_event) + self.register_listener('import_task_choice', + self.import_task_choice_event) + + self.config.add({ + 'fields': [], + 'keep_fields': [], + 'update_database': False, + }) + + self.patterns = {} + self.warned = False + + # We'll only handle `fields` or `keep_fields`, but not both. + if self.config['fields'] and self.config['keep_fields']: + self._log.warn(u'cannot blacklist and whitelist at the same time') + + # Blacklist mode. + if self.config['fields']: + self.validate_config('fields') + for field in self.config['fields'].as_str_seq(): + self.set_pattern(field) + + # Whitelist mode. + elif self.config['keep_fields']: + self.validate_config('keep_fields') + + for field in MediaFile.fields(): + if field in self.config['keep_fields'].as_str_seq(): + continue + self.set_pattern(field) + + # These fields should always be preserved. + for key in ('id', 'path', 'album_id'): + if key in self.patterns: + del self.patterns[key] + + def validate_config(self, mode): + """Check whether fields in the configuration are valid. + + `mode` should either be "fields" or "keep_fields", indicating + the section of the configuration to validate. + """ + for field in self.config[mode].as_str_seq(): + if field not in MediaFile.fields(): + self._log.error(u'invalid field: {0}', field) + continue + if mode == 'fields' and field in ('id', 'path', 'album_id'): + self._log.warn(u'field \'{0}\' ignored, zeroing ' + u'it would be dangerous', field) + continue + + def set_pattern(self, field): + """Set a field in `self.patterns` to a string list corresponding to + the configuration, or `True` if the field has no specific + configuration. + """ + try: + self.patterns[field] = self.config[field].as_str_seq() + except confit.NotFoundError: + # Matches everything + self.patterns[field] = True + + def import_task_choice_event(self, session, task): + """Listen for import_task_choice event.""" + if task.choice_flag == action.ASIS and not self.warned: + self._log.warn(u'cannot zero in \"as-is\" mode') + self.warned = True + # TODO request write in as-is mode + + @classmethod + def match_patterns(cls, field, patterns): + """Check if field (as string) is matching any of the patterns in + the list. + """ + if patterns is True: + return True + for p in patterns: + if re.search(p, unicode(field), flags=re.IGNORECASE): + return True + return False + + def write_event(self, item, path, tags): + """Set values in tags to `None` if the key and value are matched + by `self.patterns`. + """ + if not self.patterns: + self._log.warn(u'no fields, nothing to do') + return + + for field, patterns in self.patterns.items(): + if field in tags: + value = tags[field] + match = self.match_patterns(tags[field], patterns) + else: + value = '' + match = patterns is True + + if match: + self._log.debug(u'{0}: {1} -> None', field, value) + tags[field] = None + if self.config['update_database']: + item[field] = None diff --git a/libs/colorama/__init__.py b/libs/colorama/__init__.py new file mode 100644 index 00000000..670e6b39 --- /dev/null +++ b/libs/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.3.7' + diff --git a/libs/colorama/ansi.py b/libs/colorama/ansi.py new file mode 100644 index 00000000..78776588 --- /dev/null +++ b/libs/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\007' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/libs/colorama/ansitowin32.py b/libs/colorama/ansitowin32.py new file mode 100644 index 00000000..b7ff6f21 --- /dev/null +++ b/libs/colorama/ansitowin32.py @@ -0,0 +1,236 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style +from .winterm import WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +def is_stream_closed(stream): + return not hasattr(stream, 'closed') or stream.closed + + +def is_a_tty(stream): + return hasattr(stream, 'isatty') and stream.isatty() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def write(self, text): + self.__convertor.write(text) + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + + # should we strip ANSI sequences from our output? + if strip is None: + strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped)) + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped) + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not is_stream_closed(self.wrapped): + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command in '\x07': # \x07 = BEL + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text diff --git a/libs/colorama/initialise.py b/libs/colorama/initialise.py new file mode 100644 index 00000000..834962a3 --- /dev/null +++ b/libs/colorama/initialise.py @@ -0,0 +1,82 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +orig_stdout = None +orig_stderr = None + +wrapped_stdout = None +wrapped_stderr = None + +atexit_done = False + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + diff --git a/libs/colorama/win32.py b/libs/colorama/win32.py new file mode 100644 index 00000000..3d1d2f2d --- /dev/null +++ b/libs/colorama/win32.py @@ -0,0 +1,154 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA + _SetConsoleTitleW.argtypes = [ + wintypes.LPCSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + handles = { + STDOUT: _GetStdHandle(STDOUT), + STDERR: _GetStdHandle(STDERR), + } + + def winapi_test(): + handle = handles[STDOUT] + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = handles[stream_id] + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = handles[stream_id] + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = handles[stream_id] + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = handles[stream_id] + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = handles[stream_id] + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) diff --git a/libs/colorama/winterm.py b/libs/colorama/winterm.py new file mode 100644 index 00000000..60309d3c --- /dev/null +++ b/libs/colorama/winterm.py @@ -0,0 +1,162 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from . import win32 + + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + if mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + if mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) diff --git a/libs/enum/LICENSE b/libs/enum/LICENSE new file mode 100644 index 00000000..9003b885 --- /dev/null +++ b/libs/enum/LICENSE @@ -0,0 +1,32 @@ +Copyright (c) 2013, Ethan Furman. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + Neither the name Ethan Furman nor the names of any + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/libs/enum/README b/libs/enum/README new file mode 100644 index 00000000..aa2333d8 --- /dev/null +++ b/libs/enum/README @@ -0,0 +1,3 @@ +enum34 is the new Python stdlib enum module available in Python 3.4 +backported for previous versions of Python from 2.4 to 3.3. +tested on 2.6, 2.7, and 3.3+ diff --git a/libs/enum/__init__.py b/libs/enum/__init__.py new file mode 100644 index 00000000..d6ffb3a4 --- /dev/null +++ b/libs/enum/__init__.py @@ -0,0 +1,837 @@ +"""Python Enumerations""" + +import sys as _sys + +__all__ = ['Enum', 'IntEnum', 'unique'] + +version = 1, 1, 6 + +pyver = float('%s.%s' % _sys.version_info[:2]) + +try: + any +except NameError: + def any(iterable): + for element in iterable: + if element: + return True + return False + +try: + from collections import OrderedDict +except ImportError: + OrderedDict = None + +try: + basestring +except NameError: + # In Python 2 basestring is the ancestor of both str and unicode + # in Python 3 it's just str, but was missing in 3.1 + basestring = str + +try: + unicode +except NameError: + # In Python 3 unicode no longer exists (it's just str) + unicode = str + +class _RouteClassAttributeToGetattr(object): + """Route attribute access on a class to __getattr__. + + This is a descriptor, used to define attributes that act differently when + accessed through an instance and through a class. Instance access remains + normal, but access to an attribute through a class will be routed to the + class's __getattr__ method; this is done by raising AttributeError. + + """ + def __init__(self, fget=None): + self.fget = fget + + def __get__(self, instance, ownerclass=None): + if instance is None: + raise AttributeError() + return self.fget(instance) + + def __set__(self, instance, value): + raise AttributeError("can't set attribute") + + def __delete__(self, instance): + raise AttributeError("can't delete attribute") + + +def _is_descriptor(obj): + """Returns True if obj is a descriptor, False otherwise.""" + return ( + hasattr(obj, '__get__') or + hasattr(obj, '__set__') or + hasattr(obj, '__delete__')) + + +def _is_dunder(name): + """Returns True if a __dunder__ name, False otherwise.""" + return (name[:2] == name[-2:] == '__' and + name[2:3] != '_' and + name[-3:-2] != '_' and + len(name) > 4) + + +def _is_sunder(name): + """Returns True if a _sunder_ name, False otherwise.""" + return (name[0] == name[-1] == '_' and + name[1:2] != '_' and + name[-2:-1] != '_' and + len(name) > 2) + + +def _make_class_unpicklable(cls): + """Make the given class un-picklable.""" + def _break_on_call_reduce(self, protocol=None): + raise TypeError('%r cannot be pickled' % self) + cls.__reduce_ex__ = _break_on_call_reduce + cls.__module__ = '' + + +class _EnumDict(dict): + """Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + + """ + def __init__(self): + super(_EnumDict, self).__init__() + self._member_names = [] + + def __setitem__(self, key, value): + """Changes anything not dundered or not a descriptor. + + If a descriptor is added with the same name as an enum member, the name + is removed from _member_names (this may leave a hole in the numerical + sequence of values). + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + + Note: in 3.x __order__ is simply discarded as a not necessary piece + leftover from 2.x + + """ + if pyver >= 3.0 and key in ('_order_', '__order__'): + return + elif key == '__order__': + key = '_order_' + if _is_sunder(key): + if key != '_order_': + raise ValueError('_names_ are reserved for future Enum use') + elif _is_dunder(key): + pass + elif key in self._member_names: + # descriptor overwriting an enum? + raise TypeError('Attempted to reuse key: %r' % key) + elif not _is_descriptor(value): + if key in self: + # enum overwriting a descriptor? + raise TypeError('Key already defined as: %r' % self[key]) + self._member_names.append(key) + super(_EnumDict, self).__setitem__(key, value) + + +# Dummy value for Enum as EnumMeta explicity checks for it, but of course until +# EnumMeta finishes running the first time the Enum class doesn't exist. This +# is also why there are checks in EnumMeta like `if Enum is not None` +Enum = None + + +class EnumMeta(type): + """Metaclass for Enum""" + @classmethod + def __prepare__(metacls, cls, bases): + return _EnumDict() + + def __new__(metacls, cls, bases, classdict): + # an Enum class is final once enumeration items have been defined; it + # cannot be mixed with other types (int, float, etc.) if it has an + # inherited __new__ unless a new __new__ is defined (or the resulting + # class will fail). + if type(classdict) is dict: + original_dict = classdict + classdict = _EnumDict() + for k, v in original_dict.items(): + classdict[k] = v + + member_type, first_enum = metacls._get_mixins_(bases) + __new__, save_new, use_args = metacls._find_new_(classdict, member_type, + first_enum) + # save enum items into separate mapping so they don't get baked into + # the new class + members = dict((k, classdict[k]) for k in classdict._member_names) + for name in classdict._member_names: + del classdict[name] + + # py2 support for definition order + _order_ = classdict.get('_order_') + if _order_ is None: + if pyver < 3.0: + try: + _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])] + except TypeError: + _order_ = [name for name in sorted(members.keys())] + else: + _order_ = classdict._member_names + else: + del classdict['_order_'] + if pyver < 3.0: + _order_ = _order_.replace(',', ' ').split() + aliases = [name for name in members if name not in _order_] + _order_ += aliases + + # check for illegal enum names (any others?) + invalid_names = set(members) & set(['mro']) + if invalid_names: + raise ValueError('Invalid enum member name(s): %s' % ( + ', '.join(invalid_names), )) + + # save attributes from super classes so we know if we can take + # the shortcut of storing members in the class dict + base_attributes = set([a for b in bases for a in b.__dict__]) + # create our new Enum type + enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict) + enum_class._member_names_ = [] # names in random order + if OrderedDict is not None: + enum_class._member_map_ = OrderedDict() + else: + enum_class._member_map_ = {} # name->value map + enum_class._member_type_ = member_type + + # Reverse value->name map for hashable values. + enum_class._value2member_map_ = {} + + # instantiate them, checking for duplicates as we go + # we instantiate first instead of checking for duplicates first in case + # a custom __new__ is doing something funky with the values -- such as + # auto-numbering ;) + if __new__ is None: + __new__ = enum_class.__new__ + for member_name in _order_: + value = members[member_name] + if not isinstance(value, tuple): + args = (value, ) + else: + args = value + if member_type is tuple: # special case for tuple enums + args = (args, ) # wrap it one more time + if not use_args or not args: + enum_member = __new__(enum_class) + if not hasattr(enum_member, '_value_'): + enum_member._value_ = value + else: + enum_member = __new__(enum_class, *args) + if not hasattr(enum_member, '_value_'): + enum_member._value_ = member_type(*args) + value = enum_member._value_ + enum_member._name_ = member_name + enum_member.__objclass__ = enum_class + enum_member.__init__(*args) + # If another member with the same value was already defined, the + # new member becomes an alias to the existing one. + for name, canonical_member in enum_class._member_map_.items(): + if canonical_member.value == enum_member._value_: + enum_member = canonical_member + break + else: + # Aliases don't appear in member names (only in __members__). + enum_class._member_names_.append(member_name) + # performance boost for any member that would not shadow + # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr) + if member_name not in base_attributes: + setattr(enum_class, member_name, enum_member) + # now add to _member_map_ + enum_class._member_map_[member_name] = enum_member + try: + # This may fail if value is not hashable. We can't add the value + # to the map, and by-value lookups for this value will be + # linear. + enum_class._value2member_map_[value] = enum_member + except TypeError: + pass + + + # If a custom type is mixed into the Enum, and it does not know how + # to pickle itself, pickle.dumps will succeed but pickle.loads will + # fail. Rather than have the error show up later and possibly far + # from the source, sabotage the pickle protocol for this class so + # that pickle.dumps also fails. + # + # However, if the new class implements its own __reduce_ex__, do not + # sabotage -- it's on them to make sure it works correctly. We use + # __reduce_ex__ instead of any of the others as it is preferred by + # pickle over __reduce__, and it handles all pickle protocols. + unpicklable = False + if '__reduce_ex__' not in classdict: + if member_type is not object: + methods = ('__getnewargs_ex__', '__getnewargs__', + '__reduce_ex__', '__reduce__') + if not any(m in member_type.__dict__ for m in methods): + _make_class_unpicklable(enum_class) + unpicklable = True + + + # double check that repr and friends are not the mixin's or various + # things break (such as pickle) + for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): + class_method = getattr(enum_class, name) + obj_method = getattr(member_type, name, None) + enum_method = getattr(first_enum, name, None) + if name not in classdict and class_method is not enum_method: + if name == '__reduce_ex__' and unpicklable: + continue + setattr(enum_class, name, enum_method) + + # method resolution and int's are not playing nice + # Python's less than 2.6 use __cmp__ + + if pyver < 2.6: + + if issubclass(enum_class, int): + setattr(enum_class, '__cmp__', getattr(int, '__cmp__')) + + elif pyver < 3.0: + + if issubclass(enum_class, int): + for method in ( + '__le__', + '__lt__', + '__gt__', + '__ge__', + '__eq__', + '__ne__', + '__hash__', + ): + setattr(enum_class, method, getattr(int, method)) + + # replace any other __new__ with our own (as long as Enum is not None, + # anyway) -- again, this is to support pickle + if Enum is not None: + # if the user defined their own __new__, save it before it gets + # clobbered in case they subclass later + if save_new: + setattr(enum_class, '__member_new__', enum_class.__dict__['__new__']) + setattr(enum_class, '__new__', Enum.__dict__['__new__']) + return enum_class + + def __bool__(cls): + """ + classes/types should always be True. + """ + return True + + def __call__(cls, value, names=None, module=None, type=None, start=1): + """Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='red green blue')). + + When used for the functional API: `module`, if set, will be stored in + the new class' __module__ attribute; `type`, if set, will be mixed in + as the first base class. + + Note: if `module` is not set this routine will attempt to discover the + calling module by walking the frame stack; if this is unsuccessful + the resulting class will not be pickleable. + + """ + if names is None: # simple value lookup + return cls.__new__(cls, value) + # otherwise, functional API: we're creating a new Enum type + return cls._create_(value, names, module=module, type=type, start=start) + + def __contains__(cls, member): + return isinstance(member, cls) and member.name in cls._member_map_ + + def __delattr__(cls, attr): + # nicer error message when someone tries to delete an attribute + # (see issue19025). + if attr in cls._member_map_: + raise AttributeError( + "%s: cannot delete Enum member." % cls.__name__) + super(EnumMeta, cls).__delattr__(attr) + + def __dir__(self): + return (['__class__', '__doc__', '__members__', '__module__'] + + self._member_names_) + + @property + def __members__(cls): + """Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a copy of the internal mapping. + + """ + return cls._member_map_.copy() + + def __getattr__(cls, name): + """Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + + """ + if _is_dunder(name): + raise AttributeError(name) + try: + return cls._member_map_[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(cls, name): + return cls._member_map_[name] + + def __iter__(cls): + return (cls._member_map_[name] for name in cls._member_names_) + + def __reversed__(cls): + return (cls._member_map_[name] for name in reversed(cls._member_names_)) + + def __len__(cls): + return len(cls._member_names_) + + __nonzero__ = __bool__ + + def __repr__(cls): + return "" % cls.__name__ + + def __setattr__(cls, name, value): + """Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + + """ + member_map = cls.__dict__.get('_member_map_', {}) + if name in member_map: + raise AttributeError('Cannot reassign members.') + super(EnumMeta, cls).__setattr__(name, value) + + def _create_(cls, class_name, names=None, module=None, type=None, start=1): + """Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are auto-numbered from 1. + * An iterable of member names. Values are auto-numbered from 1. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value. + + """ + if pyver < 3.0: + # if class_name is unicode, attempt a conversion to ASCII + if isinstance(class_name, unicode): + try: + class_name = class_name.encode('ascii') + except UnicodeEncodeError: + raise TypeError('%r is not representable in ASCII' % class_name) + metacls = cls.__class__ + if type is None: + bases = (cls, ) + else: + bases = (type, cls) + classdict = metacls.__prepare__(class_name, bases) + _order_ = [] + + # special processing needed for names? + if isinstance(names, basestring): + names = names.replace(',', ' ').split() + if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): + names = [(e, i+start) for (i, e) in enumerate(names)] + + # Here, names is either an iterable of (name, value) or a mapping. + item = None # in case names is empty + for item in names: + if isinstance(item, basestring): + member_name, member_value = item, names[item] + else: + member_name, member_value = item + classdict[member_name] = member_value + _order_.append(member_name) + # only set _order_ in classdict if name/value was not from a mapping + if not isinstance(item, basestring): + classdict['_order_'] = ' '.join(_order_) + enum_class = metacls.__new__(metacls, class_name, bases, classdict) + + # TODO: replace the frame hack if a blessed way to know the calling + # module is ever developed + if module is None: + try: + module = _sys._getframe(2).f_globals['__name__'] + except (AttributeError, ValueError): + pass + if module is None: + _make_class_unpicklable(enum_class) + else: + enum_class.__module__ = module + + return enum_class + + @staticmethod + def _get_mixins_(bases): + """Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + + """ + if not bases or Enum is None: + return object, Enum + + + # double check that we are not subclassing a class with existing + # enumeration members; while we're at it, see if any other data + # type has been mixed in so we can use the correct __new__ + member_type = first_enum = None + for base in bases: + if (base is not Enum and + issubclass(base, Enum) and + base._member_names_): + raise TypeError("Cannot extend enumerations") + # base is now the last base in bases + if not issubclass(base, Enum): + raise TypeError("new enumerations must be created as " + "`ClassName([mixin_type,] enum_type)`") + + # get correct mix-in type (either mix-in type of Enum subclass, or + # first base if last base is Enum) + if not issubclass(bases[0], Enum): + member_type = bases[0] # first data type + first_enum = bases[-1] # enum type + else: + for base in bases[0].__mro__: + # most common: (IntEnum, int, Enum, object) + # possible: (, , + # , , + # ) + if issubclass(base, Enum): + if first_enum is None: + first_enum = base + else: + if member_type is None: + member_type = base + + return member_type, first_enum + + if pyver < 3.0: + @staticmethod + def _find_new_(classdict, member_type, first_enum): + """Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + + """ + # now find the correct __new__, checking to see of one was defined + # by the user; also check earlier enum classes in case a __new__ was + # saved as __member_new__ + __new__ = classdict.get('__new__', None) + if __new__: + return None, True, True # __new__, save_new, use_args + + N__new__ = getattr(None, '__new__') + O__new__ = getattr(object, '__new__') + if Enum is None: + E__new__ = N__new__ + else: + E__new__ = Enum.__dict__['__new__'] + # check all possibles for __member_new__ before falling back to + # __new__ + for method in ('__member_new__', '__new__'): + for possible in (member_type, first_enum): + try: + target = possible.__dict__[method] + except (AttributeError, KeyError): + target = getattr(possible, method, None) + if target not in [ + None, + N__new__, + O__new__, + E__new__, + ]: + if method == '__member_new__': + classdict['__new__'] = target + return None, False, True + if isinstance(target, staticmethod): + target = target.__get__(member_type) + __new__ = target + break + if __new__ is not None: + break + else: + __new__ = object.__new__ + + # if a non-object.__new__ is used then whatever value/tuple was + # assigned to the enum member name will be passed to __new__ and to the + # new enum member's __init__ + if __new__ is object.__new__: + use_args = False + else: + use_args = True + + return __new__, False, use_args + else: + @staticmethod + def _find_new_(classdict, member_type, first_enum): + """Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + + """ + # now find the correct __new__, checking to see of one was defined + # by the user; also check earlier enum classes in case a __new__ was + # saved as __member_new__ + __new__ = classdict.get('__new__', None) + + # should __new__ be saved as __member_new__ later? + save_new = __new__ is not None + + if __new__ is None: + # check all possibles for __member_new__ before falling back to + # __new__ + for method in ('__member_new__', '__new__'): + for possible in (member_type, first_enum): + target = getattr(possible, method, None) + if target not in ( + None, + None.__new__, + object.__new__, + Enum.__new__, + ): + __new__ = target + break + if __new__ is not None: + break + else: + __new__ = object.__new__ + + # if a non-object.__new__ is used then whatever value/tuple was + # assigned to the enum member name will be passed to __new__ and to the + # new enum member's __init__ + if __new__ is object.__new__: + use_args = False + else: + use_args = True + + return __new__, save_new, use_args + + +######################################################## +# In order to support Python 2 and 3 with a single +# codebase we have to create the Enum methods separately +# and then use the `type(name, bases, dict)` method to +# create the class. +######################################################## +temp_enum_dict = {} +temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n" + +def __new__(cls, value): + # all enum instances are actually created during class construction + # without calling this method; this method is called by the metaclass' + # __call__ (i.e. Color(3) ), and by pickle + if type(value) is cls: + # For lookups like Color(Color.red) + value = value.value + #return value + # by-value search for a matching enum member + # see if it's in the reverse mapping (for hashable values) + try: + if value in cls._value2member_map_: + return cls._value2member_map_[value] + except TypeError: + # not there, now do long search -- O(n) behavior + for member in cls._member_map_.values(): + if member.value == value: + return member + raise ValueError("%s is not a valid %s" % (value, cls.__name__)) +temp_enum_dict['__new__'] = __new__ +del __new__ + +def __repr__(self): + return "<%s.%s: %r>" % ( + self.__class__.__name__, self._name_, self._value_) +temp_enum_dict['__repr__'] = __repr__ +del __repr__ + +def __str__(self): + return "%s.%s" % (self.__class__.__name__, self._name_) +temp_enum_dict['__str__'] = __str__ +del __str__ + +if pyver >= 3.0: + def __dir__(self): + added_behavior = [ + m + for cls in self.__class__.mro() + for m in cls.__dict__ + if m[0] != '_' and m not in self._member_map_ + ] + return (['__class__', '__doc__', '__module__', ] + added_behavior) + temp_enum_dict['__dir__'] = __dir__ + del __dir__ + +def __format__(self, format_spec): + # mixed-in Enums should use the mixed-in type's __format__, otherwise + # we can get strange results with the Enum name showing up instead of + # the value + + # pure Enum branch + if self._member_type_ is object: + cls = str + val = str(self) + # mix-in branch + else: + cls = self._member_type_ + val = self.value + return cls.__format__(val, format_spec) +temp_enum_dict['__format__'] = __format__ +del __format__ + + +#################################### +# Python's less than 2.6 use __cmp__ + +if pyver < 2.6: + + def __cmp__(self, other): + if type(other) is self.__class__: + if self is other: + return 0 + return -1 + return NotImplemented + raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__)) + temp_enum_dict['__cmp__'] = __cmp__ + del __cmp__ + +else: + + def __le__(self, other): + raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__)) + temp_enum_dict['__le__'] = __le__ + del __le__ + + def __lt__(self, other): + raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__)) + temp_enum_dict['__lt__'] = __lt__ + del __lt__ + + def __ge__(self, other): + raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__)) + temp_enum_dict['__ge__'] = __ge__ + del __ge__ + + def __gt__(self, other): + raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__)) + temp_enum_dict['__gt__'] = __gt__ + del __gt__ + + +def __eq__(self, other): + if type(other) is self.__class__: + return self is other + return NotImplemented +temp_enum_dict['__eq__'] = __eq__ +del __eq__ + +def __ne__(self, other): + if type(other) is self.__class__: + return self is not other + return NotImplemented +temp_enum_dict['__ne__'] = __ne__ +del __ne__ + +def __hash__(self): + return hash(self._name_) +temp_enum_dict['__hash__'] = __hash__ +del __hash__ + +def __reduce_ex__(self, proto): + return self.__class__, (self._value_, ) +temp_enum_dict['__reduce_ex__'] = __reduce_ex__ +del __reduce_ex__ + +# _RouteClassAttributeToGetattr is used to provide access to the `name` +# and `value` properties of enum members while keeping some measure of +# protection from modification, while still allowing for an enumeration +# to have members named `name` and `value`. This works because enumeration +# members are not set directly on the enum class -- __getattr__ is +# used to look them up. + +@_RouteClassAttributeToGetattr +def name(self): + return self._name_ +temp_enum_dict['name'] = name +del name + +@_RouteClassAttributeToGetattr +def value(self): + return self._value_ +temp_enum_dict['value'] = value +del value + +@classmethod +def _convert(cls, name, module, filter, source=None): + """ + Create a new Enum subclass that replaces a collection of global constants + """ + # convert all constants from source (or module) that pass filter() to + # a new Enum called name, and export the enum and its members back to + # module; + # also, replace the __reduce_ex__ method so unpickling works in + # previous Python versions + module_globals = vars(_sys.modules[module]) + if source: + source = vars(source) + else: + source = module_globals + members = dict((name, value) for name, value in source.items() if filter(name)) + cls = cls(name, members, module=module) + cls.__reduce_ex__ = _reduce_ex_by_name + module_globals.update(cls.__members__) + module_globals[name] = cls + return cls +temp_enum_dict['_convert'] = _convert +del _convert + +Enum = EnumMeta('Enum', (object, ), temp_enum_dict) +del temp_enum_dict + +# Enum has now been created +########################### + +class IntEnum(int, Enum): + """Enum where members are also (and must be) ints""" + +def _reduce_ex_by_name(self, proto): + return self.name + +def unique(enumeration): + """Class decorator that ensures only unique members exist in an enumeration.""" + duplicates = [] + for name, member in enumeration.__members__.items(): + if name != member.name: + duplicates.append((name, member.name)) + if duplicates: + duplicate_names = ', '.join( + ["%s -> %s" % (alias, name) for (alias, name) in duplicates] + ) + raise ValueError('duplicate names found in %r: %s' % + (enumeration, duplicate_names) + ) + return enumeration diff --git a/libs/jellyfish/__init__.py b/libs/jellyfish/__init__.py new file mode 100644 index 00000000..78345699 --- /dev/null +++ b/libs/jellyfish/__init__.py @@ -0,0 +1,4 @@ +try: + from .cjellyfish import * # noqa +except ImportError: + from ._jellyfish import * # noqa diff --git a/libs/jellyfish/_jellyfish.py b/libs/jellyfish/_jellyfish.py new file mode 100644 index 00000000..a596bb73 --- /dev/null +++ b/libs/jellyfish/_jellyfish.py @@ -0,0 +1,489 @@ +import unicodedata +from collections import defaultdict +from .compat import _range, _zip_longest, _no_bytes_err +from .porter import Stemmer + + +def _normalize(s): + return unicodedata.normalize('NFKD', s) + + +def levenshtein_distance(s1, s2): + if isinstance(s1, bytes) or isinstance(s2, bytes): + raise TypeError(_no_bytes_err) + + if s1 == s2: + return 0 + rows = len(s1)+1 + cols = len(s2)+1 + + if not s1: + return cols-1 + if not s2: + return rows-1 + + prev = None + cur = range(cols) + for r in _range(1, rows): + prev, cur = cur, [r] + [0]*(cols-1) + for c in _range(1, cols): + deletion = prev[c] + 1 + insertion = cur[c-1] + 1 + edit = prev[c-1] + (0 if s1[r-1] == s2[c-1] else 1) + cur[c] = min(edit, deletion, insertion) + + return cur[-1] + + +def _jaro_winkler(ying, yang, long_tolerance, winklerize): + if isinstance(ying, bytes) or isinstance(yang, bytes): + raise TypeError(_no_bytes_err) + + ying_len = len(ying) + yang_len = len(yang) + + if not ying_len or not yang_len: + return 0 + + min_len = max(ying_len, yang_len) + search_range = (min_len // 2) - 1 + if search_range < 0: + search_range = 0 + + ying_flags = [False]*ying_len + yang_flags = [False]*yang_len + + # looking only within search range, count & flag matched pairs + common_chars = 0 + for i, ying_ch in enumerate(ying): + low = i - search_range if i > search_range else 0 + hi = i + search_range if i + search_range < yang_len else yang_len - 1 + for j in _range(low, hi+1): + if not yang_flags[j] and yang[j] == ying_ch: + ying_flags[i] = yang_flags[j] = True + common_chars += 1 + break + + # short circuit if no characters match + if not common_chars: + return 0 + + # count transpositions + k = trans_count = 0 + for i, ying_f in enumerate(ying_flags): + if ying_f: + for j in _range(k, yang_len): + if yang_flags[j]: + k = j + 1 + break + if ying[i] != yang[j]: + trans_count += 1 + trans_count /= 2 + + # adjust for similarities in nonmatched characters + common_chars = float(common_chars) + weight = ((common_chars/ying_len + common_chars/yang_len + + (common_chars-trans_count) / common_chars)) / 3 + + # winkler modification: continue to boost if strings are similar + if winklerize and weight > 0.7 and ying_len > 3 and yang_len > 3: + # adjust for up to first 4 chars in common + j = min(min_len, 4) + i = 0 + while i < j and ying[i] == yang[i] and ying[i]: + i += 1 + if i: + weight += i * 0.1 * (1.0 - weight) + + # optionally adjust for long strings + # after agreeing beginning chars, at least two or more must agree and + # agreed characters must be > half of remaining characters + if (long_tolerance and min_len > 4 and common_chars > i+1 and + 2 * common_chars >= min_len + i): + weight += ((1.0 - weight) * (float(common_chars-i-1) / float(ying_len+yang_len-i*2+2))) + + return weight + + +def damerau_levenshtein_distance(s1, s2): + if isinstance(s1, bytes) or isinstance(s2, bytes): + raise TypeError(_no_bytes_err) + + len1 = len(s1) + len2 = len(s2) + infinite = len1 + len2 + + # character array + da = defaultdict(int) + + # distance matrix + score = [[0]*(len2+2) for x in _range(len1+2)] + + score[0][0] = infinite + for i in _range(0, len1+1): + score[i+1][0] = infinite + score[i+1][1] = i + for i in _range(0, len2+1): + score[0][i+1] = infinite + score[1][i+1] = i + + for i in _range(1, len1+1): + db = 0 + for j in _range(1, len2+1): + i1 = da[s2[j-1]] + j1 = db + cost = 1 + if s1[i-1] == s2[j-1]: + cost = 0 + db = j + + score[i+1][j+1] = min(score[i][j] + cost, + score[i+1][j] + 1, + score[i][j+1] + 1, + score[i1][j1] + (i-i1-1) + 1 + (j-j1-1)) + da[s1[i-1]] = i + + return score[len1+1][len2+1] + + +def jaro_distance(s1, s2): + return _jaro_winkler(s1, s2, False, False) + + +def jaro_winkler(s1, s2, long_tolerance=False): + return _jaro_winkler(s1, s2, long_tolerance, True) + + +def soundex(s): + if not s: + return s + if isinstance(s, bytes): + raise TypeError(_no_bytes_err) + + s = _normalize(s) + + replacements = (('bfpv', '1'), + ('cgjkqsxz', '2'), + ('dt', '3'), + ('l', '4'), + ('mn', '5'), + ('r', '6')) + result = [s[0]] + count = 1 + + # find would-be replacment for first character + for lset, sub in replacements: + if s[0].lower() in lset: + last = sub + break + else: + last = None + + for letter in s[1:]: + for lset, sub in replacements: + if letter.lower() in lset: + if sub != last: + result.append(sub) + count += 1 + last = sub + break + else: + last = None + if count == 4: + break + + result += '0'*(4-count) + return ''.join(result) + + +def hamming_distance(s1, s2): + if isinstance(s1, bytes) or isinstance(s2, bytes): + raise TypeError(_no_bytes_err) + + # ensure length of s1 >= s2 + if len(s2) > len(s1): + s1, s2 = s2, s1 + + # distance is difference in length + differing chars + distance = len(s1) - len(s2) + for i, c in enumerate(s2): + if c != s1[i]: + distance += 1 + + return distance + + +def nysiis(s): + if isinstance(s, bytes): + raise TypeError(_no_bytes_err) + if not s: + return '' + + s = s.upper() + key = [] + + # step 1 - prefixes + if s.startswith('MAC'): + s = 'MCC' + s[3:] + elif s.startswith('KN'): + s = s[1:] + elif s.startswith('K'): + s = 'C' + s[1:] + elif s.startswith(('PH', 'PF')): + s = 'FF' + s[2:] + elif s.startswith('SCH'): + s = 'SSS' + s[3:] + + # step 2 - suffixes + if s.endswith(('IE', 'EE')): + s = s[:-2] + 'Y' + elif s.endswith(('DT', 'RT', 'RD', 'NT', 'ND')): + s = s[:-2] + 'D' + + # step 3 - first character of key comes from name + key.append(s[0]) + + # step 4 - translate remaining chars + i = 1 + len_s = len(s) + while i < len_s: + ch = s[i] + if ch == 'E' and i+1 < len_s and s[i+1] == 'V': + ch = 'AF' + i += 1 + elif ch in 'AEIOU': + ch = 'A' + elif ch == 'Q': + ch = 'G' + elif ch == 'Z': + ch = 'S' + elif ch == 'M': + ch = 'N' + elif ch == 'K': + if i+1 < len(s) and s[i+1] == 'N': + ch = 'N' + else: + ch = 'C' + elif ch == 'S' and s[i+1:i+3] == 'CH': + ch = 'SS' + i += 2 + elif ch == 'P' and i+1 < len(s) and s[i+1] == 'H': + ch = 'F' + i += 1 + elif ch == 'H' and (s[i-1] not in 'AEIOU' or (i+1 < len(s) and s[i+1] not in 'AEIOU')): + if s[i-1] in 'AEIOU': + ch = 'A' + else: + ch = s[i-1] + elif ch == 'W' and s[i-1] in 'AEIOU': + ch = s[i-1] + + if ch[-1] != key[-1][-1]: + key.append(ch) + + i += 1 + + key = ''.join(key) + + # step 5 - remove trailing S + if key.endswith('S') and key != 'S': + key = key[:-1] + + # step 6 - replace AY w/ Y + if key.endswith('AY'): + key = key[:-2] + 'Y' + + # step 7 - remove trailing A + if key.endswith('A') and key != 'A': + key = key[:-1] + + # step 8 was already done + + return key + + +def match_rating_codex(s): + if isinstance(s, bytes): + raise TypeError(_no_bytes_err) + s = s.upper() + codex = [] + + prev = None + for i, c in enumerate(s): + # not a space OR + # starting character & vowel + # or consonant not preceded by same consonant + if (c != ' ' and (i == 0 and c in 'AEIOU') or (c not in 'AEIOU' and c != prev)): + codex.append(c) + + prev = c + + # just use first/last 3 + if len(codex) > 6: + return ''.join(codex[:3]+codex[-3:]) + else: + return ''.join(codex) + + +def match_rating_comparison(s1, s2): + codex1 = match_rating_codex(s1) + codex2 = match_rating_codex(s2) + len1 = len(codex1) + len2 = len(codex2) + res1 = [] + res2 = [] + + # length differs by 3 or more, no result + if abs(len1-len2) >= 3: + return None + + # get minimum rating based on sums of codexes + lensum = len1 + len2 + if lensum <= 4: + min_rating = 5 + elif lensum <= 7: + min_rating = 4 + elif lensum <= 11: + min_rating = 3 + else: + min_rating = 2 + + # strip off common prefixes + for c1, c2 in _zip_longest(codex1, codex2): + if c1 != c2: + if c1: + res1.append(c1) + if c2: + res2.append(c2) + + unmatched_count1 = unmatched_count2 = 0 + for c1, c2 in _zip_longest(reversed(res1), reversed(res2)): + if c1 != c2: + if c1: + unmatched_count1 += 1 + if c2: + unmatched_count2 += 1 + + return (6 - max(unmatched_count1, unmatched_count2)) >= min_rating + + +def metaphone(s): + if isinstance(s, bytes): + raise TypeError(_no_bytes_err) + + result = [] + + s = _normalize(s.lower()) + + # skip first character if s starts with these + if s.startswith(('kn', 'gn', 'pn', 'ac', 'wr', 'ae')): + s = s[1:] + + i = 0 + + while i < len(s): + c = s[i] + next = s[i+1] if i < len(s)-1 else '*****' + nextnext = s[i+2] if i < len(s)-2 else '*****' + + # skip doubles except for cc + if c == next and c != 'c': + i += 1 + continue + + if c in 'aeiou': + if i == 0 or s[i-1] == ' ': + result.append(c) + elif c == 'b': + if (not (i != 0 and s[i-1] == 'm')) or next: + result.append('b') + elif c == 'c': + if next == 'i' and nextnext == 'a' or next == 'h': + result.append('x') + i += 1 + elif next in 'iey': + result.append('s') + i += 1 + else: + result.append('k') + elif c == 'd': + if next == 'g' and nextnext in 'iey': + result.append('j') + i += 2 + else: + result.append('t') + elif c in 'fjlmnr': + result.append(c) + elif c == 'g': + if next in 'iey': + result.append('j') + elif next not in 'hn': + result.append('k') + elif next == 'h' and nextnext and nextnext not in 'aeiou': + i += 1 + elif c == 'h': + if i == 0 or next in 'aeiou' or s[i-1] not in 'aeiou': + result.append('h') + elif c == 'k': + if i == 0 or s[i-1] != 'c': + result.append('k') + elif c == 'p': + if next == 'h': + result.append('f') + i += 1 + else: + result.append('p') + elif c == 'q': + result.append('k') + elif c == 's': + if next == 'h': + result.append('x') + i += 1 + elif next == 'i' and nextnext in 'oa': + result.append('x') + i += 2 + else: + result.append('s') + elif c == 't': + if next == 'i' and nextnext in 'oa': + result.append('x') + elif next == 'h': + result.append('0') + i += 1 + elif next != 'c' or nextnext != 'h': + result.append('t') + elif c == 'v': + result.append('f') + elif c == 'w': + if i == 0 and next == 'h': + i += 1 + next = s[i+1] + if next in 'aeiou': + result.append('w') + elif c == 'x': + if i == 0: + if next == 'h' or (next == 'i' and nextnext in 'oa'): + result.append('x') + else: + result.append('s') + else: + result.append('k') + result.append('s') + elif c == 'y': + if next in 'aeiou': + result.append('y') + elif c == 'z': + result.append('s') + elif c == ' ': + if len(result) > 0 and result[-1] != ' ': + result.append(' ') + + i += 1 + + return ''.join(result).upper() + + +def porter_stem(s): + if isinstance(s, bytes): + raise TypeError(_no_bytes_err) + return Stemmer(s).stem() diff --git a/libs/jellyfish/cjellyfish.pyd b/libs/jellyfish/cjellyfish.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fb20e5d7fa70a0bec447302c41e64260ec7b667f GIT binary patch literal 26624 zcmeHw3wTsjmhP@ck|GaIc$5SbDK)fF9>xTfP^c-W;3=rY2;rqb5-L?B8j_ft1BphA zU5Qd2%VBz)Y3%8qrXSO!d%Rb=#}@F~b{+_UpagtjV=JOYp=ET+H0?~_u8jI;q6U4)s>b2Ib%|qrZH0ekT3T|Z zPP(G(pVJ(s&(;{}s;0ZwT#oWpO@TF6^7>P2Qh0g8npqsKTXPwQ6&xNn_L(<{` z#ws36V4wZV(&a|o0K43gVjjiVJk(sS)n<7C(|NwdM1b;U#uDfY9|N*&D3EgS(OR@l zdSBFzENeB{>lrIXtGiNz)SrJ^7<)vg$IDw7Tcx%9$G*!LtK))SxZ6z0-zcGJOg_Fy z#Cb^Zd{J~TRyMP~qFgFxtR)Qfa2E^b8S)ww}COosP z9mFqbs1%j733oB(RStN=34bnSObpxKBoJDanXTH#-G;1kRBY>zlSKtpVsKAZx2W`p zq2^47YCl_y_Mk--l|e#SViT2Kb#bJ6t+KNLGqJPuhO*NVGPd_z?F(6R&Bh-=?`2dNF^ zc8_vEwda8gq`wpcU8V`J&Jdu;5TLPJY0cbf$$c2ZXF1F`RZa2nR%A6S%&FMvSbn+ZdS!F*s3Fy2LOD zd+}8%hP;_sRLu5<-I=qg=zxyig@=QZReJ;I=+bW`l41K{-URh+G5Cw!Bz75zoei-u zrQXchkq5AJN#vga%@Fyzp)R_}KLs^iWCzg??h)ff{;u+_GUV+pipd*67V>%wd5@#a zr~AV4ev(Y+Y1ac?=~L}Lpxz^-Ie759%5!QpdXxTghvZqI$@4aEg8quXS)NYF6C6}! z%=G!fnBeI`dI&W*c%IvOjz-OCP+L2Mf-#inW0+1@aazt_f)O~#LY3FaAoC&6bVJs^;vouI zZF~|Pcfbpj072=E{OYU<2S83jdPfY44)P7zkq&`qf7La#sq#40{u>Y@?~om%7EarX zN292`rrQ6JHy#2_)!w;CM--KAhzh=OOX@=MwsI<%`y0Ma-p|imNZzvaZ<6Gf*O+II(=BDc zt3A)pySY8nSSq>G((Jh}MYHFNL?vMr^;IEX!eYvi zg~GU~Wnru(-)KSY+*oanQ9EKJTJb{q|K;tZ|77&j1;2(!gzZ~U&-H&L%DP51i;t=Q z%W;HasQ)z&m7vif^;dElhWel52%7re7j@)sQa|bcEB2DX<-HxWbb0S5vSGW;kar`> zh6vnBF3N}(Gn-2>Bqkmk`C>@QoUL|Bk=qdnxr@wjLwBzvy4yM37x`*` z=@!B|6bXW(s2IGyR!7j!Yz+M$=@}DF8(qd&LGJ#7+>U*CXn%%-j%q(c1YtW1(4L5P zeI-OWAMUcQ1c7sWO91;lM)l;hLJ4dwV_)6V1ozC2b3kr-UVE`c$Ox5Wrqk2gwrn|s zYIG@|h`uvzM}*ejLyUs9{X**#$b@e(xtym-hB|MkBD1VOdHWeUXSju1{|e2xQJ1OO z-@}fa{3+;W(Hu;~b3DPswL~2ibi|TqEM3zbUSQ(=#ZWb#4L%IuH@t6zx?#EWs`dv^ zLH#>1vi?CZ^8$B;OWEUk7L3{BUQB(l7j_~}>mvTxc)f9W(5t9!<)|wdH5CZr8->1T zn<_lohu$J)6(;#?<`xn=yg~ISpLvxI@ryUbX@grl&OdvUmtD$UQ7C-1!1<<2D8gdz z^Ei*(Um#+SwGExT$_rvRwb(nYC$J?@ke4}1PM#<1v?c}eOwwpk?Gl5XW^~x}PIRn_ zY1~c6oj`!M>#gQ6EjS3*noC1dp5k(Om1+p-dn&UXeTA!} z%xsVH=K|&B0_BZD<)ByjB^LzN=$ZCrp|UqHFF4o2q`6+_0hiM2Q9AFtLYUHmt}GC2 z#tBnS71a*4Q*?GUe-yR3!z)eRuxN5YXm`Ms$0UnO?c(wTKQ_s~b1OgQd&z)M^Oj5L zdR#eFsQh(W*Wtl{OQ^wCvF62TJ)a8i9m5XnQ>2%W??BprQdl#9)=t2~RCiKXaT57% zzyZMP1|f+%@7YW`vkR?Os0G`)>;8mV;la;Q=L(*+2#XI9_2Nmu+!`7XzSRt(yKhe)!|Y07EXIn*l7#6oT(=H;z00p-o`|HXEz9# z7MHq5Cf(r*p0RFP z>cRuh}+;HiC{6v84YGPTsuE(&2&vW2a<29huqVSqED_FT6qnL zw01<-)L}4ri8SOpvCSR4LUJvOb8_BRcohpAv(rNqF$*z$SmiJz%34zJ&KD4}?|v zQBk-e*Ge;o^U?j_d)~C62%5N$6)LY5Dtq8Oyx}z}mfai~Lo~TUEPAO+8Rb%Lv$~Z0 zb8076YL~N9nAhVxEX+Fqm*Iihyp~?CrP~8{ChzeCpNAUvSjFJ;Ru8S%9}znT*$7Og zBD0YUrH=bKkMiO?WmWb$)$Vj*)6W%KNmH%s;vjeZBKiI$OP4KomAIC<$o*$~!cAtc za>A>8@{A3`-OXSug$f)$oRvq3EDB#@QtfI!SuH(frDi=m9%j)mdzuK=GFg{{dR1^!hG zO%y{ec+C#cazJ!Gzu}`?oS!{5Pq^47Ov%SQpxsQ(!A=|O%jR+J+wi{Ye~@j{bE3}c z{Il>-3W~Y=3IC7OfU|+esR|Fhio8epAb^iBr4>CRWkQ_m1tmo zo_o!CIYCt3iY9P}?iP0L7vXqlcpixFQMx_ClU;@3B_^-%En?y z(`i}cdz3*s^m_v5);)4GC#mgaF}?{I72 zCuObxv;5L-Z1SMYg79r{P@hXvJiCTh#@WsY(UBXW~T}r1mYD1}UaJYUQqVmb^(O@GgXAmEnOBveY%IQQA zil+TEW~&tD*@1&L>^1W6mt^~Vqa8a20zv1Izs#^e*R!+U|LV()kk&jjmz<-&6*@P|$~ zw~l`a4rXkeFK%@SPq!wPO~t-OdE`MipvUr}`u2`%qhojEL`Uz)jVAAy6;0f6UDUke z`usW1OJlM3l+rd^Z=j8ex-V+Z!D8gArZoF^+1_a#5%U$`Dm+(+gCPSag=~2v|5n0fs7~HSIAuC^oi%kVNpP&5i;unWuRHIbY zeqs)3n;$Jx!8?n)`_-l0TrN^`hui zY9gPeKCjvp^m_Pce*s6Tlqc;?S`pt|tu=p-H~Xcoma|RLAND3KBEn;MY?o zp|(VPSIHOeOOXQT8U4Q0JFA6_KeRDy1!FE2Z%* zXKQe1a6~PEU_A=QM7|%aL^~TzQ9rV@Va=`O9XcEY$0Wz;MpR| z{tQ_thUREN{2cY%zz6nyj#*5Y8+n#ft>)RqR9imOf7l#v*80!m%~kT1&@ms>*>ajl zpvFz)Dh>A{PIU=UrM(f&`2?C8KL53H9=cK6$8ZR`#M$u@U!+m#v)=Hxpn*-ve29ibEuSs*lfOZJq?;zqQNf-G;zr!cmgI)ZrYo)90Ek|rk>wJPiZ2_ zNH$t4jpmZtun-8PNRu_X09L*C78ByX`^-_8z=a7r_9m^QUKx7tbnKs`l$M(vQsUku z2UOguQ=`$pD6=Tbg{tfAAqyeNXavn(KNNmFY=Er2VSWO9uNhaH< z4EcoX)zlQ$hvadO4>9F7}Zv2E`NcKk6o zI8k_LCCb?ID31iUAx`SC^r~+Iv&p;+I&z1`YDa(UCf%Vq{nrAQD*Gt}5eVhDB!vUOlP*G!5RBx6&+Minv>qRh|$C%?X6=!9KOiimWHtY3+ZR zYiLeC?cWH?%mtV;OaO}NK!D5pw=L zV;4V*IQd!RI(`jd5k1H(xL(}&@Z2M5bqF5=XSJFzk=Nwjw7%nC}QqUZV zDLBCJd>}Z-C65xFuSi@NzThL-w3446TI=0`E&FFM_W8`A^h8{F3F?jZ_x1V{I;8rU zXg{KtO*#z&zbB67pMYZ=A$F%@2H|5WAd68K57Fd`Z-S%C5*3rz*R=rA&wN*~lOCc>sVPYO=c31Ce$d1atRe z9v$X5EH9{U7viiv!4+PD z^UA@fui;;}^AUJGJ5DR=3?c0!gz6I}6h4j^`(C%QG8MClI8*^^0Is_%IB%vqqSe=Q7o9L@x{S28>R8VGmVlEJkum48{%bAWR+}wh+8Zk zg!l=WR0huXxRLZIN4>r^Hry}K%&&9{&v~b)g-W;E*&~ew5zLv5wp=*%qffUhJRec_ z``^nw(xz_uo_E^5qOjYAnEDhv429=XCt=9U*(vWuV2z^7qVu)p2&dtm_74-HT<$!!m=S?@xuwOKnFsO)j(?DG;8s=D}ll)XuukQ|x?MoQk)uQDHMI(pekvPVtUMng% z(RSjopH5FYa6i=iSMYR(=0i=x<0A38r@YSJh{y5xt*Zo9fQ{8vUZ2O7tK#ygxsWyR z7bd3u_<*VkZJjV=OpUh?`b~HW+bXCxpF-}4HZ; zBx4eAVbXjeINx?2-Oh#Pr$*~y1n<-c7HR}7&2F9GIjlKOFMEhy)(|~Q^9fh@wl70V zQXy0pXu#TW4_j!)r9e~=eT!_k>?%CpASH%fDpyUe7nMLZ0U6v=iy+_~2G_+J*P?U7 z^v(It9^yZJ1b^3;Sg9CJWC>oufXjch9%oQ>k*)tuK+XgF9-GHCc7w*s+&oifbw03# z)1Ny;f7}p#GmX!6KDyW-vzU_++60i+r4}B6lfpxnCZH&$ulyK2p^dh5Lo_YTZ=&ay zUf)s3>{ecbyAd{14BUu!>#pZ7gWLtA8Lp@QpMm2J-C-kH)?i5cU3^G)(ON`+uX*jl zaLF2dJXgBGU)X7ecIn7UCW@e#x%r)f@ci>`FczLmdj$PGzCPISGDHV0sJRz4C>C3_ z;(EQ9=nCgs`X`!rXytZPYS+VXL%+q54DRtpuWvq9Gm*HN`pgS?^L3KhMp>tMnNik= z?txV!UXvD2bd^SQ&JfX)`NN66W0ZBGFB@e}G+HnE?!_w1#m!LjZ1~iPHPghGf1Wz|eIl(DCl&0!*?;NpNBM4pj!lpiA@Abz!LFYM&x{iu`j;fwdN?ieJO$vol*b;} zg{c)Sht(69<~fl686aS_^hV^ECg;=}DbzWSk8%Fu#;r`_S?X1Y(h2$Ee;IdErw{1&_t3ZXZS!D)=EP;#II2<^B>))J;T`R1hhN5uF)n z)QR3qNHJ%-47gl0GeqM)$gsJ~Wc(#t}p-iKtB@x`v2i+$>_#7DcPdAJx5)ES>Pjyi9gFXC&dPM-aZ42tNsY zvL_-;(vrRDURq=o#+Q9-V-#Ik0g4_2%@uzC`Heqg6m^MpU^C-^%tB_ zZ{*;Brh|Wo5gt2|uz3XGPxC-{iAJ~|`Iv5tLEF%+q|MpF?9~*=t zXE6JQ-Cn9w{R^K>vV`VgR8c_}Hu1V$jdK6L0v~EvLPYta=U+9GYW=Zp7SDgIso{|r z;cG?`o-%@PI?B;oL3I>SCBr5A@^Zq`EtIe0L^nA#J9-~i9z)}Yq<=z1yrjKxWxh9x zxi9SNuf!xg>0<1(4nKfVQ@I$4V0;qc5{&ZuaHd9{-~b*)-u$6h58vO=)x5*Y_Xs$u zy-s`9<-yaqUGkW4O>-KTOFzf(XxHV&u1jcZgQ&|0i zL&G+t>&MLv?Gh)~K?V}@ekfNLvw4G8=_(3)mk;Xk`uR8?W>jnPnQ%Z@M(?}O`;izE zop)lgS#;U@xgpZhXak?&vPr1JmeaWHiEaepQ^>>!|AHe)6L9Y5r@@`$#X#^(gLEIhC5h1Y z6FMm*dNWFYBq|7+COi{QWi_WFi>Ut0>2Qe-sg6=bTv^uh+W=3;)f3+JNH-&uAypw6{RuvV z^fmDx{rx2e9scMHsJtF(!ZpL(O@8^Z$mV`7@cm`^yLsK9~=2VhgSXz7(D+GMx*xeaG#2CRywFoU6p~b%+w9|K6p^$!Czs- z@6k|2uYS@mb4n>6O329NU=)G2V{!sQ98tVhb_I(ZpnYsO?JPj#S6coZSa#$`$Q#jF z#MtnDUuXs1daFc`voXUPDs#kMl5ylBuz4JB$~a#W+Gb!02;z>fu8Xj1LJKwqzN9Ivkmt^dTSr#X1T!)Pc}&e4mqxQz{dh&Lxc zsJ~szyN^kqsP+o9ig^W0`irR0YecK4OgdXc8C)W&c;_wcO+>v_dp>VV+m8ZA$wt}h zJS!)7odeRpdX=+Y<(=pcarL0ue}55mNa8dSwo->s!s7s;8}G%p+xfA;J%#wtjSVRG zzXpMEDM5|}AqSytA2KmLDIb4TQ)!^DYbpl>ps561Q5U1=+$W`KddkHFMsLFJCgzeT z;k2m$`w~8d`bn*k(P(z=lUspc(GX67aO4xr#=e9XfJJV_`7!;tVxRa>!amf9c{d&f zkS+tYL&!wF4`eVLgLi(p7vk@E{~j%BLIb|Fs5w?gzWzQJ-t3|m4(J6zhxV=%g>quH zRmW|Mhi>tf@qls$S3&$O*U*Yc$o?hnTyZlsHqSl*wB_X7W2o*wpy^FYK&W5N#jlJW z#3<+z-=_8{N9pzGwqw#bud-KpjV^{};ikZ-#sx1r$C~UIjMfAXAYTXHa4p($kMR2> zC?i%FzvAma(UCS$sR zX2=>%#HdzfI`$=`V6<58s`frJ6e!Uh54{@4UpwG>1146a;rH;~CcUBl39Lv?h|n$% zuQAYToA}YrumDuQf}17MdpPZNTsiii0;>K@R$hn2e}GEWUNjyuLVs{-_<>~#9FS`N zIetnN-L%_A_Cvoj*#hk6IJP5(t)%^u*NoO{AUb*;nG3Pca&-IgId*70fgL9fp>7q>9@;j|aGuENgz@pxNdMdHG+mt`Q( zC|#=Dmg;pLsZUm1HcOY+*;g;P@P2HtGpoSaTYrM?z?9Co7%0E*QaTaO?D{ReuDL&Q zCkzK;y}WQ*rw6~fM$Kl_(C`H%KeID3<%_=HaZ}W6TxO|L#j%+4sSEk{D*pZ67^*pn zZ=z2|l`aNvyb`m*8ew>nPwnEx35zJ#SvVXJ4qJ8js1ARr!>4t)M~8hnd{c)X>5%FC40&_( z`bkE+fq$ol@rDi$=S{*Lb;dC8N(xFX<2LG>vvqaWHTczQjfj|D} zar*4W{Rw>rZswP(f%o*XEuUAjl^q)V_HPq@CTN|<*kuW3mM|-k|81D7;fDswQD?I< zn}n^}$a*3R90SjkWMWAtu(cyv-~R@fXl98y=4{SON@Voh4j`>s3tr5O&E4=i(+K=N z;9i8?%Oi34@pZ$-0=Zj&djRQK!WXPepmB_-A13oWT@N&7Y)d^zgCqLJ*N^141NZ|- z(|w>b%F0Gn;Gzn&hSl2>nO&EIp*_Bh(<6TRT=e1iWS)rkTZtZ29$z+1w6KXM>~-VI z(3Wgv$$L^Uf1_D3@E=5Kr#@+kEUhGsF|@IfZ4(pO#8ac#_;-_8!pkYF4}GkYaGIlj z#J|$Q%coEm(fLz*e0f+O`U60q$3TyP9_LBCNAkru;$<|(9D{u9CT6-liQPUcjR|X0 zj*sd~s!ODCPDo@EP9{6u969jb5$|V)yu<5E8Obc80;yya%a{c^Iph1rcBI#tghVD(q`;1nnNXji zmy@_%2{|a!Jme(ySv$;SmyNeUx4@N>Su*G(fKGNMV>L)~{hGYP%h=cgp8=l%pN+sb zr6#b{dA5PXc zZ@^_E=$KLxS<2$1j)XdEnWb1?v!|@Acp84kf@BNmyxvWK{iO1`(9#bSCL}KF4u$Iw_e=l18yf6*e}hB#BMJ+MSehQHN#=ad^6~ z;}a=FjDr6_ ze({)&)*|hoeDNa2zC>!dRfBfqEnd7gk1~Bm$~s2pOZ`n~|2N={Da5;@20rOSAHyj9 z`C#DQfIfF34FH#5OsBk$jgNbC6=%!87Tg8gGRBTp4%KZ%T|Vlz#??()k9O33?T6a& z`P;rk)5HJieaU}g#Z3kE^`%Sv(h{k@s%9PgjA|Nvr6o;uezerqv-i!#O-pO4eE1W7 zOXn}iDRnh0Em<&IM`x!@#Z9-az0>cLO6QkXS1*ujd{R|y4Vz^tZYrH8S5;RmE3cOQ zY^%iqT0ja+r@M&wQrUV|U97cN;ge{o(mCwPOY)L%ndFRiPs zs*!+KWG>Z6m-^}@_M)k@3i6;~Jv%g%FZHw8M$d}U8X2O^FqKyLhfzAgO3T+btSj|5 zR)OnER$4m0w4u)Ls|r;4N-N82DyqSb-A?s&bt`;z(t@gLlHgv{_^SQoHKh{l1N;NE zr8VX2{SC;Xy`~&`_@$*(uEF^4Vr{^NYM^&wbECPG*S>2m)oVJeORD|;I(CA&s~XDJ zR{KjT>;2^w-m10r<@HTuP7UnWmfK{1eN(Z&K2Te~zP!fgpI<9;ZT!fLsVk}S-Nh>y z>ouV~U#_qB*GL*6PX(Tsaka(uwLX7C1FBo7?~e)yZJ|$hti#{%Y>;3Cj6%o_IUJ2)F4vX9Z2k2#6WCwX zY3G-09nRLFhK(@J74j9$cuss1eM}O^qdxx`c%JV_j~-tn-?#Cl_5v(KZgpHm4OqZ% z5XjS_a`5Fzf9VwZf(xAqe46^wU1NXbg^WErx7(U;|@6Pi2+R}|xHFxp# zuJxhs`1&kuSl3S1$CoqRG29HUN=|myI?5djL2SlyjhK!L1+A|uudiyTt-)V}l{WhQ zhL)~%z{?mENoD-Q`BqdlNSb*W)tWp;{Z|Bn09ekt1O{gV+p*q{Y4JJWKJ=ywu&yj$ zzaCRpYIKS*)-xm9FC@Zz3{{r=4yn@b5H)%ZgWk6dzS_URU(--2`KxL!D8q%?zfA`3 z5Y6u_M+JO&eYsqEA%5{X`6j$?lgq8~nDM_NUjtTQg};&MP-lht$HT%WSC{MR*PFHF zNfTf0SXTuNJLp+O#s-@xufww&E{uO-<*%a{LvYml>+1atc*c~|KF3iTh!G!J{%n1@ zZ_uB!V~4kA>-Af7*fFAghhCo{Y5mH^ z4ey_?*YDCHVs$OXzJ~aB==Dt2>Jf(<)}F4{m+26(xMA%rdVNHP*%u9OXB)J#q(j8r zhWFR&2Xu%y+OT%EQ7eDwLsuUx}MX^&Z5b&uf}8XElEVygh?neO*j zF#N-IZXGMf&KZWu)?&|#y(_D_2lePleX-@|A8xDI6cZk~3)&y^;}Ke6wM3F2XNN}~ zFB^|O(4y@Zr@6Meiu1shSi=DwEC*fyWPap`NUhw!>i~Jbs(ZKu;7?~VP2O?|hdLu) zUDZG3jC^{LvRZ|FP)9~pu&<id&Q#}R=*7?%X@}D!hD2Y7<)n}Gq!se>9FVRUX`mLH zEE+LML|nO^AQ4F;Z`Cf==yJ`tQhZ^x<4E9Ty88Qn>=PH)r}+LYNQ31Xya)MI#HR^v zMao1Tev8%Pj&v6C@KJ2)Sj0Dx&j$Q=BscQ#J#6VX#=OYG&#;N(5pqO6KK@Vl0RN0s zfqH_^BQ+u)AK$0g|01NvP)|@sYDYdk&QJ0FkC67Dp5SG8QQ|1_@GmTiG=O}344-2B z>ya4jir_Y+@yN%=6!PtG5G4=-P3H}q(0P@=a*IW)c zvD)eY|8NEVa6IyTfLCQgF6855-4p}AXexAwdV+4GO5_PPAvGc&AJg84IwNjPaKqKG zDc}&KKLm6Xd4h|k;jRMt_;_-B9Gc?Mv#xIu@1vv(j*a5qv1@*RLG(#y!_=OXrj^ak=f0Nt|?`bNGO@Fz&8 zs2lIH2PetS{uV0XvXNknaPW zDq<|i<35r70I3@J#{fTEi02LR1AtS#uutUa|G@Agy^OpBm{|xpktgUx%C5xP29%Jj z$P;`*&u14QzJZjE`YnL}jzsuR0j|3hG9yp%>0cmi0ZoccQ@ok-1W)LBiYt#P*6N{+ zzkU9zHBhGgS00;x2F7);Q2=pU@wOG)>b5s--@JXx_O097x9`}#YkSZ3zU@c1N45`a zAKY$hPjAm?&uY(ZpWQCD7q_oyuWYYtZ*1S(zNLL@dwcti_Fe5g?f=z7`5V9gxCZ_w DUj(-K literal 0 HcmV?d00001 diff --git a/libs/jellyfish/compat.py b/libs/jellyfish/compat.py new file mode 100644 index 00000000..b5e09792 --- /dev/null +++ b/libs/jellyfish/compat.py @@ -0,0 +1,13 @@ +import sys +import itertools + +IS_PY3 = sys.version_info[0] == 3 + +if IS_PY3: + _range = range + _zip_longest = itertools.zip_longest + _no_bytes_err = 'expected str, got bytes' +else: + _range = xrange + _zip_longest = itertools.izip_longest + _no_bytes_err = 'expected unicode, got str' diff --git a/libs/jellyfish/porter.py b/libs/jellyfish/porter.py new file mode 100644 index 00000000..2945b22d --- /dev/null +++ b/libs/jellyfish/porter.py @@ -0,0 +1,218 @@ +from .compat import _range + +_s2_options = { + 'a': ((['a', 't', 'i', 'o', 'n', 'a', 'l'], ['a', 't', 'e']), + (['t', 'i', 'o', 'n', 'a', 'l'], ['t', 'i', 'o', 'n'])), + 'c': ((['e', 'n', 'c', 'i'], ['e', 'n', 'c', 'e']), + (['a', 'n', 'c', 'i'], ['a', 'n', 'c', 'e']),), + 'e': ((['i', 'z', 'e', 'r'], ['i', 'z', 'e']),), + 'l': ((['b', 'l', 'i'], ['b', 'l', 'e']), + (['a', 'l', 'l', 'i'], ['a', 'l']), + (['e', 'n', 't', 'l', 'i'], ['e', 'n', 't']), + (['e', 'l', 'i'], ['e']), + (['o', 'u', 's', 'l', 'i'], ['o', 'u', 's']),), + 'o': ((['i', 'z', 'a', 't', 'i', 'o', 'n'], ['i', 'z', 'e']), + (['a', 't', 'i', 'o', 'n'], ['a', 't', 'e']), + (['a', 't', 'o', 'r'], ['a', 't', 'e']),), + 's': ((['a', 'l', 'i', 's', 'm'], ['a', 'l']), + (['i', 'v', 'e', 'n', 'e', 's', 's'], ['i', 'v', 'e']), + (['f', 'u', 'l', 'n', 'e', 's', 's'], ['f', 'u', 'l']), + (['o', 'u', 's', 'n', 'e', 's', 's'], ['o', 'u', 's']),), + 't': ((['a', 'l', 'i', 't', 'i'], ['a', 'l']), + (['i', 'v', 'i', 't', 'i'], ['i', 'v', 'e']), + (['b', 'i', 'l', 'i', 't', 'i'], ['b', 'l', 'e']),), + 'g': ((['l', 'o', 'g', 'i'], ['l', 'o', 'g']),), +} + + +_s3_options = { + 'e': ((['i', 'c', 'a', 't', 'e'], ['i', 'c']), + (['a', 't', 'i', 'v', 'e'], []), + (['a', 'l', 'i', 'z', 'e'], ['a', 'l']),), + 'i': ((['i', 'c', 'i', 't', 'i'], ['i', 'c']),), + 'l': ((['i', 'c', 'a', 'l'], ['i', 'c']), + (['f', 'u', 'l'], []),), + 's': ((['n', 'e', 's', 's'], []),), +} + +_s4_endings = { + 'a': (['a', 'l'],), + 'c': (['a', 'n', 'c', 'e'], ['e', 'n', 'c', 'e']), + 'e': (['e', 'r'],), + 'i': (['i', 'c'],), + 'l': (['a', 'b', 'l', 'e'], ['i', 'b', 'l', 'e']), + 'n': (['a', 'n', 't'], ['e', 'm', 'e', 'n', 't'], ['m', 'e', 'n', 't'], + ['e', 'n', 't']), + # handle 'o' separately + 's': (['i', 's', 'm'],), + 't': (['a', 't', 'e'], ['i', 't', 'i']), + 'u': (['o', 'u', 's'],), + 'v': (['i', 'v', 'e'],), + 'z': (['i', 'z', 'e'],), +} + + +class Stemmer(object): + def __init__(self, b): + self.b = list(b) + self.k = len(b)-1 + self.j = 0 + + def cons(self, i): + """ True iff b[i] is a consonant """ + if self.b[i] in 'aeiou': + return False + elif self.b[i] == 'y': + return True if i == 0 else not self.cons(i-1) + return True + + def m(self): + n = i = 0 + while True: + if i > self.j: + return n + if not self.cons(i): + break + i += 1 + i += 1 + while True: + while True: + if i > self.j: + return n + if self.cons(i): + break + i += 1 + + i += 1 + n += 1 + + while True: + if i > self.j: + return n + if not self.cons(i): + break + i += 1 + i += 1 + + def vowel_in_stem(self): + """ True iff 0...j contains vowel """ + for i in _range(0, self.j+1): + if not self.cons(i): + return True + return False + + def doublec(self, j): + """ True iff j, j-1 contains double consonant """ + if j < 1 or self.b[j] != self.b[j-1]: + return False + return self.cons(j) + + def cvc(self, i): + """ True iff i-2,i-1,i is consonent-vowel consonant + and if second c isn't w,x, or y. + used to restore e at end of short words like cave, love, hope, crime + """ + if (i < 2 or not self.cons(i) or self.cons(i-1) or not self.cons(i-2) or + self.b[i] in 'wxy'): + return False + return True + + def ends(self, s): + length = len(s) + """ True iff 0...k ends with string s """ + res = (self.b[self.k-length+1:self.k+1] == s) + if res: + self.j = self.k - length + return res + + def setto(self, s): + """ set j+1...k to string s, readjusting k """ + length = len(s) + self.b[self.j+1:self.j+1+length] = s + self.k = self.j + length + + def r(self, s): + if self.m() > 0: + self.setto(s) + + def step1ab(self): + if self.b[self.k] == 's': + if self.ends(['s', 's', 'e', 's']): + self.k -= 2 + elif self.ends(['i', 'e', 's']): + self.setto(['i']) + elif self.b[self.k-1] != 's': + self.k -= 1 + if self.ends(['e', 'e', 'd']): + if self.m() > 0: + self.k -= 1 + elif ((self.ends(['e', 'd']) or self.ends(['i', 'n', 'g'])) and + self.vowel_in_stem()): + self.k = self.j + if self.ends(['a', 't']): + self.setto(['a', 't', 'e']) + elif self.ends(['b', 'l']): + self.setto(['b', 'l', 'e']) + elif self.ends(['i', 'z']): + self.setto(['i', 'z', 'e']) + elif self.doublec(self.k): + self.k -= 1 + if self.b[self.k] in 'lsz': + self.k += 1 + elif self.m() == 1 and self.cvc(self.k): + self.setto(['e']) + + def step1c(self): + """ turn terminal y into i if there's a vowel in stem """ + if self.ends(['y']) and self.vowel_in_stem(): + self.b[self.k] = 'i' + + def step2and3(self): + for end, repl in _s2_options.get(self.b[self.k-1], []): + if self.ends(end): + self.r(repl) + break + + for end, repl in _s3_options.get(self.b[self.k], []): + if self.ends(end): + self.r(repl) + break + + def step4(self): + ch = self.b[self.k-1] + + if ch == 'o': + if not ((self.ends(['i', 'o', 'n']) and self.b[self.j] in 'st') or + self.ends(['o', 'u'])): + return + else: + endings = _s4_endings.get(ch, []) + for end in endings: + if self.ends(end): + break + else: + return + + if self.m() > 1: + self.k = self.j + + def step5(self): + self.j = self.k + if self.b[self.k] == 'e': + a = self.m() + if a > 1 or a == 1 and not self.cvc(self.k-1): + self.k -= 1 + if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: + self.k -= 1 + + def result(self): + return ''.join(self.b[:self.k+1]) + + def stem(self): + if self.k > 1: + self.step1ab() + self.step1c() + self.step2and3() + self.step4() + self.step5() + return self.result() diff --git a/libs/jellyfish/test.py b/libs/jellyfish/test.py new file mode 100644 index 00000000..72ef9344 --- /dev/null +++ b/libs/jellyfish/test.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +import sys +if sys.version_info[0] < 3: + import unicodecsv as csv + open_kwargs = {} +else: + import csv + open_kwargs = {'encoding': 'utf8'} +import platform +import pytest + + +def assertAlmostEqual(a, b, places=3): + assert abs(a - b) < (0.1**places) + + +if platform.python_implementation() == 'CPython': + implementations = ['python', 'c'] +else: + implementations = ['python'] + + +@pytest.fixture(params=implementations) +def jf(request): + if request.param == 'python': + from jellyfish import _jellyfish as jf + else: + from jellyfish import cjellyfish as jf + return jf + + +def _load_data(name): + with open('testdata/{}.csv'.format(name), **open_kwargs) as f: + for data in csv.reader(f): + yield data + + +@pytest.mark.parametrize("s1,s2,value", _load_data('jaro_winkler'), ids=str) +def test_jaro_winkler(jf, s1, s2, value): + value = float(value) + assertAlmostEqual(jf.jaro_winkler(s1, s2), value, places=3) + + +@pytest.mark.parametrize("s1,s2,value", _load_data('jaro_distance'), ids=str) +def test_jaro_distance(jf, s1, s2, value): + value = float(value) + assertAlmostEqual(jf.jaro_distance(s1, s2), value, places=3) + + +@pytest.mark.parametrize("s1,s2,value", _load_data('hamming'), ids=str) +def test_hamming_distance(jf, s1, s2, value): + value = int(value) + assert jf.hamming_distance(s1, s2) == value + + +@pytest.mark.parametrize("s1,s2,value", _load_data('levenshtein'), ids=str) +def test_levenshtein_distance(jf, s1, s2, value): + value = int(value) + assert jf.levenshtein_distance(s1, s2) == value + + +@pytest.mark.parametrize("s1,s2,value", _load_data('damerau_levenshtein'), ids=str) +def test_damerau_levenshtein_distance(jf, s1, s2, value): + value = int(value) + assert jf.damerau_levenshtein_distance(s1, s2) == value + + +@pytest.mark.parametrize("s1,code", _load_data('soundex'), ids=str) +def test_soundex(jf, s1, code): + assert jf.soundex(s1) == code + + +@pytest.mark.parametrize("s1,code", _load_data('metaphone'), ids=str) +def test_metaphone(jf, s1, code): + assert jf.metaphone(s1) == code + + +@pytest.mark.parametrize("s1,s2", _load_data('nysiis'), ids=str) +def test_nysiis(jf, s1, s2): + assert jf.nysiis(s1) == s2 + + +@pytest.mark.parametrize("s1,s2", _load_data('match_rating_codex'), ids=str) +def test_match_rating_codex(jf, s1, s2): + assert jf.match_rating_codex(s1) == s2 + + +@pytest.mark.parametrize("s1,s2,value", _load_data('match_rating_comparison'), ids=str) +def test_match_rating_comparison(jf, s1, s2, value): + value = {'True': True, 'False': False, 'None': None}[value] + assert jf.match_rating_comparison(s1, s2) is value + + +# use non-parameterized version for speed +# @pytest.mark.parametrize("a,b", _load_data('porter'), ids=str) +# def test_porter_stem(jf, a, b): +# assert jf.porter_stem(a) == b + +def test_porter_stem(jf): + with open('testdata/porter.csv', **open_kwargs) as f: + reader = csv.reader(f) + for (a, b) in reader: + assert jf.porter_stem(a) == b + + +if platform.python_implementation() == 'CPython': + def test_match_rating_comparison_segfault(): + import hashlib + from jellyfish import cjellyfish as jf + sha1s = [u'{}'.format(hashlib.sha1(str(v).encode('ascii')).hexdigest()) + for v in range(100)] + # this segfaulted on 0.1.2 + assert [[jf.match_rating_comparison(h1, h2) for h1 in sha1s] for h2 in sha1s] + + + def test_damerau_levenshtein_unicode_segfault(): + # unfortunate difference in behavior between Py & C versions + from jellyfish.cjellyfish import damerau_levenshtein_distance as c_dl + from jellyfish._jellyfish import damerau_levenshtein_distance as py_dl + s1 = u'mylifeoutdoors' + s2 = u'нахлыÑÑ‚' + with pytest.raises(ValueError): + c_dl(s1, s2) + with pytest.raises(ValueError): + c_dl(s2, s1) + + assert py_dl(s1, s2) == 14 + assert py_dl(s2, s1) == 14 + + +def test_jaro_winkler_long_tolerance(jf): + no_lt = jf.jaro_winkler(u'two long strings', u'two long stringz', long_tolerance=False) + with_lt = jf.jaro_winkler(u'two long strings', u'two long stringz', long_tolerance=True) + # make sure long_tolerance does something + assertAlmostEqual(no_lt, 0.975) + assertAlmostEqual(with_lt, 0.984) + + +def test_damerau_levenshtein_distance_type(jf): + jf.damerau_levenshtein_distance(u'abc', u'abc') + with pytest.raises(TypeError) as exc: + jf.damerau_levenshtein_distance(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_levenshtein_distance_type(jf): + assert jf.levenshtein_distance(u'abc', u'abc') == 0 + with pytest.raises(TypeError) as exc: + jf.levenshtein_distance(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_jaro_distance_type(jf): + assert jf.jaro_distance(u'abc', u'abc') == 1 + with pytest.raises(TypeError) as exc: + jf.jaro_distance(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_jaro_winkler_type(jf): + assert jf.jaro_winkler(u'abc', u'abc') == 1 + with pytest.raises(TypeError) as exc: + jf.jaro_winkler(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_mra_comparison_type(jf): + assert jf.match_rating_comparison(u'abc', u'abc') is True + with pytest.raises(TypeError) as exc: + jf.match_rating_comparison(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_hamming_type(jf): + assert jf.hamming_distance(u'abc', u'abc') == 0 + with pytest.raises(TypeError) as exc: + jf.hamming_distance(b'abc', b'abc') + assert 'expected' in str(exc.value) + + +def test_soundex_type(jf): + assert jf.soundex(u'ABC') == 'A120' + with pytest.raises(TypeError) as exc: + jf.soundex(b'ABC') + assert 'expected' in str(exc.value) + + +def test_metaphone_type(jf): + assert jf.metaphone(u'abc') == 'ABK' + with pytest.raises(TypeError) as exc: + jf.metaphone(b'abc') + assert 'expected' in str(exc.value) + + +def test_nysiis_type(jf): + assert jf.nysiis(u'abc') == 'ABC' + with pytest.raises(TypeError) as exc: + jf.nysiis(b'abc') + assert 'expected' in str(exc.value) + + +def test_mr_codex_type(jf): + assert jf.match_rating_codex(u'abc') == 'ABC' + with pytest.raises(TypeError) as exc: + jf.match_rating_codex(b'abc') + assert 'expected' in str(exc.value) + + +def test_porter_type(jf): + assert jf.porter_stem(u'abc') == 'abc' + with pytest.raises(TypeError) as exc: + jf.porter_stem(b'abc') + assert 'expected' in str(exc.value) diff --git a/libs/munkres.py b/libs/munkres.py new file mode 100644 index 00000000..187333b3 --- /dev/null +++ b/libs/munkres.py @@ -0,0 +1,786 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- + +# Documentation is intended to be processed by Epydoc. + +""" +Introduction +============ + +The Munkres module provides an implementation of the Munkres algorithm +(also called the Hungarian algorithm or the Kuhn-Munkres algorithm), +useful for solving the Assignment Problem. + +Assignment Problem +================== + +Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers +to perform any of *n* jobs. The assignment problem is to assign jobs to +workers in a way that minimizes the total cost. Since each worker can perform +only one job and each job can be assigned to only one worker the assignments +represent an independent set of the matrix *C*. + +One way to generate the optimal set is to create all permutations of +the indexes necessary to traverse the matrix so that no row and column +are used more than once. For instance, given this matrix (expressed in +Python):: + + matrix = [[5, 9, 1], + [10, 3, 2], + [8, 7, 4]] + +You could use this code to generate the traversal indexes:: + + def permute(a, results): + if len(a) == 1: + results.insert(len(results), a) + + else: + for i in range(0, len(a)): + element = a[i] + a_copy = [a[j] for j in range(0, len(a)) if j != i] + subresults = [] + permute(a_copy, subresults) + for subresult in subresults: + result = [element] + subresult + results.insert(len(results), result) + + results = [] + permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix + +After the call to permute(), the results matrix would look like this:: + + [[0, 1, 2], + [0, 2, 1], + [1, 0, 2], + [1, 2, 0], + [2, 0, 1], + [2, 1, 0]] + +You could then use that index matrix to loop over the original cost matrix +and calculate the smallest cost of the combinations:: + + n = len(matrix) + minval = sys.maxsize + for row in range(n): + cost = 0 + for col in range(n): + cost += matrix[row][col] + minval = min(cost, minval) + + print minval + +While this approach works fine for small matrices, it does not scale. It +executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n* +matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600 +traversals. Even if you could manage to perform each traversal in just one +millisecond, it would still take more than 133 hours to perform the entire +traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At +an optimistic millisecond per operation, that's more than 77 million years. + +The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This +package provides an implementation of that algorithm. + +This version is based on +http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html. + +This version was written for Python by Brian Clapper from the (Ada) algorithm +at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was +clearly adapted from the same web site.) + +Usage +===== + +Construct a Munkres object:: + + from munkres import Munkres + + m = Munkres() + +Then use it to compute the lowest cost assignment from a cost matrix. Here's +a sample program:: + + from munkres import Munkres, print_matrix + + matrix = [[5, 9, 1], + [10, 3, 2], + [8, 7, 4]] + m = Munkres() + indexes = m.compute(matrix) + print_matrix(matrix, msg='Lowest cost through this matrix:') + total = 0 + for row, column in indexes: + value = matrix[row][column] + total += value + print '(%d, %d) -> %d' % (row, column, value) + print 'total cost: %d' % total + +Running that program produces:: + + Lowest cost through this matrix: + [5, 9, 1] + [10, 3, 2] + [8, 7, 4] + (0, 0) -> 5 + (1, 1) -> 3 + (2, 2) -> 4 + total cost=12 + +The instantiated Munkres object can be used multiple times on different +matrices. + +Non-square Cost Matrices +======================== + +The Munkres algorithm assumes that the cost matrix is square. However, it's +possible to use a rectangular matrix if you first pad it with 0 values to make +it square. This module automatically pads rectangular cost matrices to make +them square. + +Notes: + +- The module operates on a *copy* of the caller's matrix, so any padding will + not be seen by the caller. +- The cost matrix must be rectangular or square. An irregular matrix will + *not* work. + +Calculating Profit, Rather than Cost +==================================== + +The cost matrix is just that: A cost matrix. The Munkres algorithm finds +the combination of elements (one from each row and column) that results in +the smallest cost. It's also possible to use the algorithm to maximize +profit. To do that, however, you have to convert your profit matrix to a +cost matrix. The simplest way to do that is to subtract all elements from a +large value. For example:: + + from munkres import Munkres, print_matrix + + matrix = [[5, 9, 1], + [10, 3, 2], + [8, 7, 4]] + cost_matrix = [] + for row in matrix: + cost_row = [] + for col in row: + cost_row += [sys.maxsize - col] + cost_matrix += [cost_row] + + m = Munkres() + indexes = m.compute(cost_matrix) + print_matrix(matrix, msg='Highest profit through this matrix:') + total = 0 + for row, column in indexes: + value = matrix[row][column] + total += value + print '(%d, %d) -> %d' % (row, column, value) + + print 'total profit=%d' % total + +Running that program produces:: + + Highest profit through this matrix: + [5, 9, 1] + [10, 3, 2] + [8, 7, 4] + (0, 1) -> 9 + (1, 0) -> 10 + (2, 2) -> 4 + total profit=23 + +The ``munkres`` module provides a convenience method for creating a cost +matrix from a profit matrix. Since it doesn't know whether the matrix contains +floating point numbers, decimals, or integers, you have to provide the +conversion function; but the convenience method takes care of the actual +creation of the cost matrix:: + + import munkres + + cost_matrix = munkres.make_cost_matrix(matrix, + lambda cost: sys.maxsize - cost) + +So, the above profit-calculation program can be recast as:: + + from munkres import Munkres, print_matrix, make_cost_matrix + + matrix = [[5, 9, 1], + [10, 3, 2], + [8, 7, 4]] + cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost) + m = Munkres() + indexes = m.compute(cost_matrix) + print_matrix(matrix, msg='Lowest cost through this matrix:') + total = 0 + for row, column in indexes: + value = matrix[row][column] + total += value + print '(%d, %d) -> %d' % (row, column, value) + print 'total profit=%d' % total + +References +========== + +1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html + +2. Harold W. Kuhn. The Hungarian Method for the assignment problem. + *Naval Research Logistics Quarterly*, 2:83-97, 1955. + +3. Harold W. Kuhn. Variants of the Hungarian method for assignment + problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956. + +4. Munkres, J. Algorithms for the Assignment and Transportation Problems. + *Journal of the Society of Industrial and Applied Mathematics*, + 5(1):32-38, March, 1957. + +5. http://en.wikipedia.org/wiki/Hungarian_algorithm + +Copyright and License +===================== + +This software is released under a BSD license, adapted from + + +Copyright (c) 2008 Brian M. Clapper +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name "clapper.org" nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" + +__docformat__ = 'restructuredtext' + +# --------------------------------------------------------------------------- +# Imports +# --------------------------------------------------------------------------- + +import sys +import copy + +# --------------------------------------------------------------------------- +# Exports +# --------------------------------------------------------------------------- + +__all__ = ['Munkres', 'make_cost_matrix'] + +# --------------------------------------------------------------------------- +# Globals +# --------------------------------------------------------------------------- + +# Info about the module +__version__ = "1.0.7" +__author__ = "Brian Clapper, bmc@clapper.org" +__url__ = "http://software.clapper.org/munkres/" +__copyright__ = "(c) 2008 Brian M. Clapper" +__license__ = "BSD-style license" + +# --------------------------------------------------------------------------- +# Classes +# --------------------------------------------------------------------------- + +class Munkres: + """ + Calculate the Munkres solution to the classical assignment problem. + See the module documentation for usage. + """ + + def __init__(self): + """Create a new instance""" + self.C = None + self.row_covered = [] + self.col_covered = [] + self.n = 0 + self.Z0_r = 0 + self.Z0_c = 0 + self.marked = None + self.path = None + + def make_cost_matrix(profit_matrix, inversion_function): + """ + **DEPRECATED** + + Please use the module function ``make_cost_matrix()``. + """ + import munkres + return munkres.make_cost_matrix(profit_matrix, inversion_function) + + make_cost_matrix = staticmethod(make_cost_matrix) + + def pad_matrix(self, matrix, pad_value=0): + """ + Pad a possibly non-square matrix to make it square. + + :Parameters: + matrix : list of lists + matrix to pad + + pad_value : int + value to use to pad the matrix + + :rtype: list of lists + :return: a new, possibly padded, matrix + """ + max_columns = 0 + total_rows = len(matrix) + + for row in matrix: + max_columns = max(max_columns, len(row)) + + total_rows = max(max_columns, total_rows) + + new_matrix = [] + for row in matrix: + row_len = len(row) + new_row = row[:] + if total_rows > row_len: + # Row too short. Pad it. + new_row += [pad_value] * (total_rows - row_len) + new_matrix += [new_row] + + while len(new_matrix) < total_rows: + new_matrix += [[pad_value] * total_rows] + + return new_matrix + + def compute(self, cost_matrix): + """ + Compute the indexes for the lowest-cost pairings between rows and + columns in the database. Returns a list of (row, column) tuples + that can be used to traverse the matrix. + + :Parameters: + cost_matrix : list of lists + The cost matrix. If this cost matrix is not square, it + will be padded with zeros, via a call to ``pad_matrix()``. + (This method does *not* modify the caller's matrix. It + operates on a copy of the matrix.) + + **WARNING**: This code handles square and rectangular + matrices. It does *not* handle irregular matrices. + + :rtype: list + :return: A list of ``(row, column)`` tuples that describe the lowest + cost path through the matrix + + """ + self.C = self.pad_matrix(cost_matrix) + self.n = len(self.C) + self.original_length = len(cost_matrix) + self.original_width = len(cost_matrix[0]) + self.row_covered = [False for i in range(self.n)] + self.col_covered = [False for i in range(self.n)] + self.Z0_r = 0 + self.Z0_c = 0 + self.path = self.__make_matrix(self.n * 2, 0) + self.marked = self.__make_matrix(self.n, 0) + + done = False + step = 1 + + steps = { 1 : self.__step1, + 2 : self.__step2, + 3 : self.__step3, + 4 : self.__step4, + 5 : self.__step5, + 6 : self.__step6 } + + while not done: + try: + func = steps[step] + step = func() + except KeyError: + done = True + + # Look for the starred columns + results = [] + for i in range(self.original_length): + for j in range(self.original_width): + if self.marked[i][j] == 1: + results += [(i, j)] + + return results + + def __copy_matrix(self, matrix): + """Return an exact copy of the supplied matrix""" + return copy.deepcopy(matrix) + + def __make_matrix(self, n, val): + """Create an *n*x*n* matrix, populating it with the specific value.""" + matrix = [] + for i in range(n): + matrix += [[val for j in range(n)]] + return matrix + + def __step1(self): + """ + For each row of the matrix, find the smallest element and + subtract it from every element in its row. Go to Step 2. + """ + C = self.C + n = self.n + for i in range(n): + minval = min(self.C[i]) + # Find the minimum value for this row and subtract that minimum + # from every element in the row. + for j in range(n): + self.C[i][j] -= minval + + return 2 + + def __step2(self): + """ + Find a zero (Z) in the resulting matrix. If there is no starred + zero in its row or column, star Z. Repeat for each element in the + matrix. Go to Step 3. + """ + n = self.n + for i in range(n): + for j in range(n): + if (self.C[i][j] == 0) and \ + (not self.col_covered[j]) and \ + (not self.row_covered[i]): + self.marked[i][j] = 1 + self.col_covered[j] = True + self.row_covered[i] = True + + self.__clear_covers() + return 3 + + def __step3(self): + """ + Cover each column containing a starred zero. If K columns are + covered, the starred zeros describe a complete set of unique + assignments. In this case, Go to DONE, otherwise, Go to Step 4. + """ + n = self.n + count = 0 + for i in range(n): + for j in range(n): + if self.marked[i][j] == 1: + self.col_covered[j] = True + count += 1 + + if count >= n: + step = 7 # done + else: + step = 4 + + return step + + def __step4(self): + """ + Find a noncovered zero and prime it. If there is no starred zero + in the row containing this primed zero, Go to Step 5. Otherwise, + cover this row and uncover the column containing the starred + zero. Continue in this manner until there are no uncovered zeros + left. Save the smallest uncovered value and Go to Step 6. + """ + step = 0 + done = False + row = -1 + col = -1 + star_col = -1 + while not done: + (row, col) = self.__find_a_zero() + if row < 0: + done = True + step = 6 + else: + self.marked[row][col] = 2 + star_col = self.__find_star_in_row(row) + if star_col >= 0: + col = star_col + self.row_covered[row] = True + self.col_covered[col] = False + else: + done = True + self.Z0_r = row + self.Z0_c = col + step = 5 + + return step + + def __step5(self): + """ + Construct a series of alternating primed and starred zeros as + follows. Let Z0 represent the uncovered primed zero found in Step 4. + Let Z1 denote the starred zero in the column of Z0 (if any). + Let Z2 denote the primed zero in the row of Z1 (there will always + be one). Continue until the series terminates at a primed zero + that has no starred zero in its column. Unstar each starred zero + of the series, star each primed zero of the series, erase all + primes and uncover every line in the matrix. Return to Step 3 + """ + count = 0 + path = self.path + path[count][0] = self.Z0_r + path[count][1] = self.Z0_c + done = False + while not done: + row = self.__find_star_in_col(path[count][1]) + if row >= 0: + count += 1 + path[count][0] = row + path[count][1] = path[count-1][1] + else: + done = True + + if not done: + col = self.__find_prime_in_row(path[count][0]) + count += 1 + path[count][0] = path[count-1][0] + path[count][1] = col + + self.__convert_path(path, count) + self.__clear_covers() + self.__erase_primes() + return 3 + + def __step6(self): + """ + Add the value found in Step 4 to every element of each covered + row, and subtract it from every element of each uncovered column. + Return to Step 4 without altering any stars, primes, or covered + lines. + """ + minval = self.__find_smallest() + for i in range(self.n): + for j in range(self.n): + if self.row_covered[i]: + self.C[i][j] += minval + if not self.col_covered[j]: + self.C[i][j] -= minval + return 4 + + def __find_smallest(self): + """Find the smallest uncovered value in the matrix.""" + minval = sys.maxsize + for i in range(self.n): + for j in range(self.n): + if (not self.row_covered[i]) and (not self.col_covered[j]): + if minval > self.C[i][j]: + minval = self.C[i][j] + return minval + + def __find_a_zero(self): + """Find the first uncovered element with value 0""" + row = -1 + col = -1 + i = 0 + n = self.n + done = False + + while not done: + j = 0 + while True: + if (self.C[i][j] == 0) and \ + (not self.row_covered[i]) and \ + (not self.col_covered[j]): + row = i + col = j + done = True + j += 1 + if j >= n: + break + i += 1 + if i >= n: + done = True + + return (row, col) + + def __find_star_in_row(self, row): + """ + Find the first starred element in the specified row. Returns + the column index, or -1 if no starred element was found. + """ + col = -1 + for j in range(self.n): + if self.marked[row][j] == 1: + col = j + break + + return col + + def __find_star_in_col(self, col): + """ + Find the first starred element in the specified row. Returns + the row index, or -1 if no starred element was found. + """ + row = -1 + for i in range(self.n): + if self.marked[i][col] == 1: + row = i + break + + return row + + def __find_prime_in_row(self, row): + """ + Find the first prime element in the specified row. Returns + the column index, or -1 if no starred element was found. + """ + col = -1 + for j in range(self.n): + if self.marked[row][j] == 2: + col = j + break + + return col + + def __convert_path(self, path, count): + for i in range(count+1): + if self.marked[path[i][0]][path[i][1]] == 1: + self.marked[path[i][0]][path[i][1]] = 0 + else: + self.marked[path[i][0]][path[i][1]] = 1 + + def __clear_covers(self): + """Clear all covered matrix cells""" + for i in range(self.n): + self.row_covered[i] = False + self.col_covered[i] = False + + def __erase_primes(self): + """Erase all prime markings""" + for i in range(self.n): + for j in range(self.n): + if self.marked[i][j] == 2: + self.marked[i][j] = 0 + +# --------------------------------------------------------------------------- +# Functions +# --------------------------------------------------------------------------- + +def make_cost_matrix(profit_matrix, inversion_function): + """ + Create a cost matrix from a profit matrix by calling + 'inversion_function' to invert each value. The inversion + function must take one numeric argument (of any type) and return + another numeric argument which is presumed to be the cost inverse + of the original profit. + + This is a static method. Call it like this: + + .. python:: + + cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func) + + For example: + + .. python:: + + cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x) + + :Parameters: + profit_matrix : list of lists + The matrix to convert from a profit to a cost matrix + + inversion_function : function + The function to use to invert each entry in the profit matrix + + :rtype: list of lists + :return: The converted matrix + """ + cost_matrix = [] + for row in profit_matrix: + cost_matrix.append([inversion_function(value) for value in row]) + return cost_matrix + +def print_matrix(matrix, msg=None): + """ + Convenience function: Displays the contents of a matrix of integers. + + :Parameters: + matrix : list of lists + Matrix to print + + msg : str + Optional message to print before displaying the matrix + """ + import math + + if msg is not None: + print(msg) + + # Calculate the appropriate format width. + width = 0 + for row in matrix: + for val in row: + width = max(width, int(math.log10(val)) + 1) + + # Make the format string + format = '%%%dd' % width + + # Print the matrix + for row in matrix: + sep = '[' + for val in row: + sys.stdout.write(sep + format % val) + sep = ', ' + sys.stdout.write(']\n') + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +if __name__ == '__main__': + + matrices = [ + # Square + ([[400, 150, 400], + [400, 450, 600], + [300, 225, 300]], + 850), # expected cost + + # Rectangular variant + ([[400, 150, 400, 1], + [400, 450, 600, 2], + [300, 225, 300, 3]], + 452), # expected cost + + + # Square + ([[10, 10, 8], + [9, 8, 1], + [9, 7, 4]], + 18), + + # Rectangular variant + ([[10, 10, 8, 11], + [9, 8, 1, 1], + [9, 7, 4, 10]], + 15)] + + m = Munkres() + for cost_matrix, expected_total in matrices: + print_matrix(cost_matrix, msg='cost matrix') + indexes = m.compute(cost_matrix) + total_cost = 0 + for r, c in indexes: + x = cost_matrix[r][c] + total_cost += x + print('(%d, %d) -> %d' % (r, c, x)) + print('lowest cost=%d' % total_cost) + assert expected_total == total_cost diff --git a/libs/musicbrainzngs/__init__.py b/libs/musicbrainzngs/__init__.py new file mode 100644 index 00000000..22fed80d --- /dev/null +++ b/libs/musicbrainzngs/__init__.py @@ -0,0 +1,2 @@ +from musicbrainzngs.musicbrainz import * +from musicbrainzngs.caa import * diff --git a/libs/musicbrainzngs/caa.py b/libs/musicbrainzngs/caa.py new file mode 100644 index 00000000..12fa8d35 --- /dev/null +++ b/libs/musicbrainzngs/caa.py @@ -0,0 +1,177 @@ +# This file is part of the musicbrainzngs library +# Copyright (C) Alastair Porter, Wieland Hoffmann, and others +# This file is distributed under a BSD-2-Clause type license. +# See the COPYING file for more information. + +__all__ = [ + 'set_caa_hostname', 'get_image_list', 'get_release_group_image_list', + 'get_release_group_image_front', 'get_image_front', 'get_image_back', + 'get_image' + ] + +import json + +from musicbrainzngs import compat +from musicbrainzngs import musicbrainz + +hostname = "coverartarchive.org" + + +def set_caa_hostname(new_hostname): + """Set the base hostname for Cover Art Archive requests. + Defaults to 'coverartarchive.org'.""" + global hostname + hostname = new_hostname + + +def _caa_request(mbid, imageid=None, size=None, entitytype="release"): + """ Make a CAA request. + + :param imageid: ``front``, ``back`` or a number from the listing obtained + with :meth:`get_image_list`. + :type imageid: str + + :param size: 250, 500 + :type size: str or None + + :param entitytype: ``release`` or ``release-group`` + :type entitytype: str + """ + # Construct the full URL for the request, including hostname and + # query string. + path = [entitytype, mbid] + if imageid and size: + path.append("%s-%s" % (imageid, size)) + elif imageid: + path.append(imageid) + url = compat.urlunparse(( + 'http', + hostname, + '/%s' % '/'.join(path), + '', + '', + '' + )) + musicbrainz._log.debug("GET request for %s" % (url, )) + + # Set up HTTP request handler and URL opener. + httpHandler = compat.HTTPHandler(debuglevel=0) + handlers = [httpHandler] + + opener = compat.build_opener(*handlers) + + # Make request. + req = musicbrainz._MusicbrainzHttpRequest("GET", url, None) + # Useragent isn't needed for CAA, but we'll add it if it exists + if musicbrainz._useragent != "": + req.add_header('User-Agent', musicbrainz._useragent) + musicbrainz._log.debug("requesting with UA %s" % musicbrainz._useragent) + + resp = musicbrainz._safe_read(opener, req, None) + + # TODO: The content type declared by the CAA for JSON files is + # 'applicaiton/octet-stream'. This is not useful to detect whether the + # content is JSON, so default to decoding JSON if no imageid was supplied. + # http://tickets.musicbrainz.org/browse/CAA-75 + if imageid: + # If we asked for an image, return the image + return resp + else: + # Otherwise it's json + return json.loads(resp) + + +def get_image_list(releaseid): + """Get the list of cover art associated with a release. + + The return value is the deserialized response of the `JSON listing + `_ + returned by the Cover Art Archive API. + + If an error occurs then a :class:`~musicbrainzngs.ResponseError` will + be raised with one of the following HTTP codes: + + * 400: `Releaseid` is not a valid UUID + * 404: No release exists with an MBID of `releaseid` + * 503: Ratelimit exceeded + """ + return _caa_request(releaseid) + + +def get_release_group_image_list(releasegroupid): + """Get the list of cover art associated with a release group. + + The return value is the deserialized response of the `JSON listing + `_ + returned by the Cover Art Archive API. + + If an error occurs then a :class:`~musicbrainzngs.ResponseError` will + be raised with one of the following HTTP codes: + + * 400: `Releaseid` is not a valid UUID + * 404: No release exists with an MBID of `releaseid` + * 503: Ratelimit exceeded + """ + return _caa_request(releasegroupid, entitytype="release-group") + + +def get_release_group_image_front(releasegroupid, size=None): + """Download the front cover art for a release group. + The `size` argument and the possible error conditions are the same as for + :meth:`get_image`. + """ + return get_image(releasegroupid, "front", size=size, + entitytype="release-group") + + +def get_image_front(releaseid, size=None): + """Download the front cover art for a release. + The `size` argument and the possible error conditions are the same as for + :meth:`get_image`. + """ + return get_image(releaseid, "front", size=size) + + +def get_image_back(releaseid, size=None): + """Download the back cover art for a release. + The `size` argument and the possible error conditions are the same as for + :meth:`get_image`. + """ + return get_image(releaseid, "back", size=size) + + +def get_image(mbid, coverid, size=None, entitytype="release"): + """Download cover art for a release. The coverart file to download + is specified by the `coverid` argument. + + If `size` is not specified, download the largest copy present, which can be + very large. + + If an error occurs then a :class:`~musicbrainzngs.ResponseError` + will be raised with one of the following HTTP codes: + + * 400: `Releaseid` is not a valid UUID or `coverid` is invalid + * 404: No release exists with an MBID of `releaseid` + * 503: Ratelimit exceeded + + :param coverid: ``front``, ``back`` or a number from the listing obtained with + :meth:`get_image_list` + :type coverid: int or str + + :param size: 250, 500 or None. If it is None, the largest available picture + will be downloaded. If the image originally uploaded to the + Cover Art Archive was smaller than the requested size, only + the original image will be returned. + :type size: str or None + + :param entitytype: The type of entity for which to download the cover art. + This is either ``release`` or ``release-group``. + :type entitytype: str + :return: The binary image data + :type: str + """ + if isinstance(coverid, int): + coverid = "%d" % (coverid, ) + if isinstance(size, int): + size = "%d" % (size, ) + return _caa_request(mbid, coverid, size=size, entitytype=entitytype) diff --git a/libs/musicbrainzngs/compat.py b/libs/musicbrainzngs/compat.py new file mode 100644 index 00000000..36574b5c --- /dev/null +++ b/libs/musicbrainzngs/compat.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Kenneth Reitz. + +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. + +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +pythoncompat +""" + + +import sys + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = (_ver[0] == 2) + +#: Python 3.x? +is_py3 = (_ver[0] == 3) + +# --------- +# Specifics +# --------- + +if is_py2: + from StringIO import StringIO + from urllib2 import HTTPPasswordMgr, HTTPDigestAuthHandler, Request,\ + HTTPHandler, build_opener, HTTPError, URLError,\ + build_opener + from httplib import BadStatusLine, HTTPException + from urlparse import urlunparse + from urllib import urlencode + + bytes = str + unicode = unicode + basestring = basestring +elif is_py3: + from io import StringIO + from urllib.request import HTTPPasswordMgr, HTTPDigestAuthHandler, Request,\ + HTTPHandler, build_opener + from urllib.error import HTTPError, URLError + from http.client import HTTPException, BadStatusLine + from urllib.parse import urlunparse, urlencode + + unicode = str + bytes = bytes + basestring = (str,bytes) diff --git a/libs/musicbrainzngs/mbxml.py b/libs/musicbrainzngs/mbxml.py new file mode 100644 index 00000000..60236dc7 --- /dev/null +++ b/libs/musicbrainzngs/mbxml.py @@ -0,0 +1,821 @@ +# This file is part of the musicbrainzngs library +# Copyright (C) Alastair Porter, Adrian Sampson, and others +# This file is distributed under a BSD-2-Clause type license. +# See the COPYING file for more information. + +import re +import xml.etree.ElementTree as ET +import logging + +from musicbrainzngs import util + +try: + from ET import fixtag +except: + # Python < 2.7 + def fixtag(tag, namespaces): + # given a decorated tag (of the form {uri}tag), return prefixed + # tag and namespace declaration, if any + if isinstance(tag, ET.QName): + tag = tag.text + namespace_uri, tag = tag[1:].split("}", 1) + prefix = namespaces.get(namespace_uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + namespaces[namespace_uri] = prefix + if prefix == "xml": + xmlns = None + else: + xmlns = ("xmlns:%s" % prefix, namespace_uri) + else: + xmlns = None + return "%s:%s" % (prefix, tag), xmlns + + +NS_MAP = {"http://musicbrainz.org/ns/mmd-2.0#": "ws2", + "http://musicbrainz.org/ns/ext#-2.0": "ext"} +_log = logging.getLogger("musicbrainzngs") + +def get_error_message(error): + """ Given an error XML message from the webservice containing + xy, return a list + of [x, y]""" + try: + tree = util.bytes_to_elementtree(error) + root = tree.getroot() + errors = [] + if root.tag == "error": + for ch in root: + if ch.tag == "text": + errors.append(ch.text) + return errors + except ET.ParseError: + return None + +def make_artist_credit(artists): + names = [] + for artist in artists: + if isinstance(artist, dict): + if "name" in artist: + names.append(artist.get("name", "")) + else: + names.append(artist.get("artist", {}).get("name", "")) + else: + names.append(artist) + return "".join(names) + +def parse_elements(valid_els, inner_els, element): + """ Extract single level subelements from an element. + For example, given the element: + + Text + + and a list valid_els that contains "subelement", + return a dict {'subelement': 'Text'} + + Delegate the parsing of multi-level subelements to another function. + For example, given the element: + + + FooBar + + + and a dictionary {'subelement': parse_subelement}, + call parse_subelement() and + return a dict {'subelement': } + if parse_subelement returns a tuple of the form + (True, {'subelement-key': }) + then merge the second element of the tuple into the + result (which may have a key other than 'subelement' or + more than 1 key) + """ + result = {} + for sub in element: + t = fixtag(sub.tag, NS_MAP)[0] + if ":" in t: + t = t.split(":")[1] + if t in valid_els: + result[t] = sub.text or "" + elif t in inner_els.keys(): + inner_result = inner_els[t](sub) + if isinstance(inner_result, tuple) and inner_result[0]: + result.update(inner_result[1]) + else: + result[t] = inner_result + # add counts for lists when available + m = re.match(r'([a-z0-9-]+)-list', t) + if m and "count" in sub.attrib: + result["%s-count" % m.group(1)] = int(sub.attrib["count"]) + else: + _log.info("in <%s>, uncaught <%s>", + fixtag(element.tag, NS_MAP)[0], t) + return result + +def parse_attributes(attributes, element): + """ Extract attributes from an element. + For example, given the element: + + and a list attributes that contains "type", + return a dict {'type': 'Group'} + """ + result = {} + for attr in element.attrib: + if "{" in attr: + a = fixtag(attr, NS_MAP)[0] + else: + a = attr + if a in attributes: + result[a] = element.attrib[attr] + else: + _log.info("in <%s>, uncaught attribute %s", fixtag(element.tag, NS_MAP)[0], attr) + + return result + +def parse_message(message): + tree = util.bytes_to_elementtree(message) + root = tree.getroot() + result = {} + valid_elements = {"area": parse_area, + "artist": parse_artist, + "instrument": parse_instrument, + "label": parse_label, + "place": parse_place, + "event": parse_event, + "release": parse_release, + "release-group": parse_release_group, + "series": parse_series, + "recording": parse_recording, + "work": parse_work, + "url": parse_url, + + "disc": parse_disc, + "cdstub": parse_cdstub, + "isrc": parse_isrc, + + "annotation-list": parse_annotation_list, + "area-list": parse_area_list, + "artist-list": parse_artist_list, + "label-list": parse_label_list, + "place-list": parse_place_list, + "event-list": parse_event_list, + "instrument-list": parse_instrument_list, + "release-list": parse_release_list, + "release-group-list": parse_release_group_list, + "series-list": parse_series_list, + "recording-list": parse_recording_list, + "work-list": parse_work_list, + "url-list": parse_url_list, + + "collection-list": parse_collection_list, + "collection": parse_collection, + + "message": parse_response_message + } + result.update(parse_elements([], valid_elements, root)) + return result + +def parse_response_message(message): + return parse_elements(["text"], {}, message) + +def parse_collection_list(cl): + return [parse_collection(c) for c in cl] + +def parse_collection(collection): + result = {} + attribs = ["id", "type", "entity-type"] + elements = ["name", "editor"] + inner_els = {"release-list": parse_release_list, + "artist-list": parse_artist_list, + "event-list": parse_event_list, + "place-list": parse_place_list, + "recording-list": parse_recording_list, + "work-list": parse_work_list} + result.update(parse_attributes(attribs, collection)) + result.update(parse_elements(elements, inner_els, collection)) + + return result + +def parse_annotation_list(al): + return [parse_annotation(a) for a in al] + +def parse_annotation(annotation): + result = {} + attribs = ["type", "ext:score"] + elements = ["entity", "name", "text"] + result.update(parse_attributes(attribs, annotation)) + result.update(parse_elements(elements, {}, annotation)) + return result + +def parse_lifespan(lifespan): + parts = parse_elements(["begin", "end", "ended"], {}, lifespan) + + return parts + +def parse_area_list(al): + return [parse_area(a) for a in al] + +def parse_area(area): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "sort-name", "disambiguation"] + inner_els = {"life-span": parse_lifespan, + "alias-list": parse_alias_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation, + "iso-3166-1-code-list": parse_element_list, + "iso-3166-2-code-list": parse_element_list, + "iso-3166-3-code-list": parse_element_list} + + result.update(parse_attributes(attribs, area)) + result.update(parse_elements(elements, inner_els, area)) + + return result + +def parse_artist_list(al): + return [parse_artist(a) for a in al] + +def parse_artist(artist): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "sort-name", "country", "user-rating", + "disambiguation", "gender", "ipi"] + inner_els = {"area": parse_area, + "begin-area": parse_area, + "end-area": parse_area, + "life-span": parse_lifespan, + "recording-list": parse_recording_list, + "relation-list": parse_relation_list, + "release-list": parse_release_list, + "release-group-list": parse_release_group_list, + "work-list": parse_work_list, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "rating": parse_rating, + "ipi-list": parse_element_list, + "isni-list": parse_element_list, + "alias-list": parse_alias_list, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, artist)) + result.update(parse_elements(elements, inner_els, artist)) + + return result + +def parse_coordinates(c): + return parse_elements(['latitude', 'longitude'], {}, c) + +def parse_place_list(pl): + return [parse_place(p) for p in pl] + +def parse_place(place): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "address", + "ipi", "disambiguation"] + inner_els = {"area": parse_area, + "coordinates": parse_coordinates, + "life-span": parse_lifespan, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "alias-list": parse_alias_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, place)) + result.update(parse_elements(elements, inner_els, place)) + + return result + +def parse_event_list(el): + return [parse_event(e) for e in el] + +def parse_event(event): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "time", "setlist", "cancelled", "disambiguation", "user-rating"] + inner_els = {"life-span": parse_lifespan, + "relation-list": parse_relation_list, + "alias-list": parse_alias_list, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "rating": parse_rating} + + result.update(parse_attributes(attribs, event)) + result.update(parse_elements(elements, inner_els, event)) + + return result + +def parse_instrument(instrument): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "description", "disambiguation"] + inner_els = {"relation-list": parse_relation_list, + "tag-list": parse_tag_list, + "alias-list": parse_alias_list, + "annotation": parse_annotation} + result.update(parse_attributes(attribs, instrument)) + result.update(parse_elements(elements, inner_els, instrument)) + + return result + +def parse_label_list(ll): + return [parse_label(l) for l in ll] + +def parse_label(label): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "sort-name", "country", "label-code", "user-rating", + "ipi", "disambiguation"] + inner_els = {"area": parse_area, + "life-span": parse_lifespan, + "release-list": parse_release_list, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "rating": parse_rating, + "ipi-list": parse_element_list, + "alias-list": parse_alias_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, label)) + result.update(parse_elements(elements, inner_els, label)) + + return result + +def parse_relation_target(tgt): + attributes = parse_attributes(['id'], tgt) + if 'id' in attributes: + return (True, {'target-id': attributes['id']}) + else: + return (True, {'target-id': tgt.text}) + +def parse_relation_list(rl): + attribs = ["target-type"] + ttype = parse_attributes(attribs, rl) + key = "%s-relation-list" % ttype["target-type"] + return (True, {key: [parse_relation(r) for r in rl]}) + +def parse_relation(relation): + result = {} + attribs = ["type", "type-id"] + elements = ["target", "direction", "begin", "end", "ended", "ordering-key"] + inner_els = {"area": parse_area, + "artist": parse_artist, + "instrument": parse_instrument, + "label": parse_label, + "place": parse_place, + "event": parse_event, + "recording": parse_recording, + "release": parse_release, + "release-group": parse_release_group, + "series": parse_series, + "attribute-list": parse_element_list, + "work": parse_work, + "target": parse_relation_target + } + result.update(parse_attributes(attribs, relation)) + result.update(parse_elements(elements, inner_els, relation)) + # We parse attribute-list again to get attributes that have both + # text and attribute values + result.update(parse_elements([], {"attribute-list": parse_relation_attribute_list}, relation)) + + return result + +def parse_relation_attribute_list(attributelist): + ret = [] + for attribute in attributelist: + ret.append(parse_relation_attribute_element(attribute)) + return (True, {"attributes": ret}) + +def parse_relation_attribute_element(element): + # Parses an attribute into a dictionary containing an element + # {"attribute": } and also an additional element + # containing any xml attributes. + # e.g number + # -> {"attribute": "number", "value": "BuxWV 1"} + result = {} + for attr in element.attrib: + if "{" in attr: + a = fixtag(attr, NS_MAP)[0] + else: + a = attr + result[a] = element.attrib[attr] + result["attribute"] = element.text + return result + +def parse_release(release): + result = {} + attribs = ["id", "ext:score"] + elements = ["title", "status", "disambiguation", "quality", "country", + "barcode", "date", "packaging", "asin"] + inner_els = {"text-representation": parse_text_representation, + "artist-credit": parse_artist_credit, + "label-info-list": parse_label_info_list, + "medium-list": parse_medium_list, + "release-group": parse_release_group, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation, + "cover-art-archive": parse_caa, + "release-event-list": parse_release_event_list} + + result.update(parse_attributes(attribs, release)) + result.update(parse_elements(elements, inner_els, release)) + if "artist-credit" in result: + result["artist-credit-phrase"] = make_artist_credit( + result["artist-credit"]) + + return result + +def parse_medium_list(ml): + """medium-list results from search have an additional + element containing the number of tracks + over all mediums. Optionally add this""" + medium_list = [] + track_count = None + for m in ml: + tag = fixtag(m.tag, NS_MAP)[0] + if tag == "ws2:medium": + medium_list.append(parse_medium(m)) + elif tag == "ws2:track-count": + track_count = int(m.text) + ret = {"medium-list": medium_list} + if track_count is not None: + ret["medium-track-count"] = track_count + + return (True, ret) + +def parse_release_event_list(rel): + return [parse_release_event(re) for re in rel] + +def parse_release_event(event): + result = {} + elements = ["date"] + inner_els = {"area": parse_area} + + result.update(parse_elements(elements, inner_els, event)) + return result + +def parse_medium(medium): + result = {} + elements = ["position", "format", "title"] + inner_els = {"disc-list": parse_disc_list, + "pregap": parse_track, + "track-list": parse_track_list, + "data-track-list": parse_track_list} + + result.update(parse_elements(elements, inner_els, medium)) + return result + +def parse_disc_list(dl): + return [parse_disc(d) for d in dl] + +def parse_text_representation(textr): + return parse_elements(["language", "script"], {}, textr) + +def parse_release_group(rg): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["title", "user-rating", "first-release-date", "primary-type", + "disambiguation"] + inner_els = {"artist-credit": parse_artist_credit, + "release-list": parse_release_list, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "secondary-type-list": parse_element_list, + "relation-list": parse_relation_list, + "rating": parse_rating, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, rg)) + result.update(parse_elements(elements, inner_els, rg)) + if "artist-credit" in result: + result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) + + return result + +def parse_recording(recording): + result = {} + attribs = ["id", "ext:score"] + elements = ["title", "length", "user-rating", "disambiguation", "video"] + inner_els = {"artist-credit": parse_artist_credit, + "release-list": parse_release_list, + "tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "rating": parse_rating, + "isrc-list": parse_external_id_list, + "echoprint-list": parse_external_id_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, recording)) + result.update(parse_elements(elements, inner_els, recording)) + if "artist-credit" in result: + result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) + + return result + +def parse_series_list(sl): + return [parse_series(s) for s in sl] + +def parse_series(series): + result = {} + attribs = ["id", "type", "ext:score"] + elements = ["name", "disambiguation"] + inner_els = {"alias-list": parse_alias_list, + "relation-list": parse_relation_list, + "annotation": parse_annotation} + + result.update(parse_attributes(attribs, series)) + result.update(parse_elements(elements, inner_els, series)) + + return result + +def parse_external_id_list(pl): + return [parse_attributes(["id"], p)["id"] for p in pl] + +def parse_element_list(el): + return [e.text for e in el] + +def parse_work_list(wl): + return [parse_work(w) for w in wl] + +def parse_work(work): + result = {} + attribs = ["id", "ext:score", "type"] + elements = ["title", "user-rating", "language", "iswc", "disambiguation"] + inner_els = {"tag-list": parse_tag_list, + "user-tag-list": parse_tag_list, + "rating": parse_rating, + "alias-list": parse_alias_list, + "iswc-list": parse_element_list, + "relation-list": parse_relation_list, + "annotation": parse_response_message, + "attribute-list": parse_work_attribute_list + } + + result.update(parse_attributes(attribs, work)) + result.update(parse_elements(elements, inner_els, work)) + + return result + +def parse_work_attribute_list(wal): + return [parse_work_attribute(wa) for wa in wal] + +def parse_work_attribute(wa): + attribs = ["type"] + typeinfo = parse_attributes(attribs, wa) + result = {} + if typeinfo: + result = {"attribute": typeinfo["type"], + "value": wa.text} + + return result + + +def parse_url_list(ul): + return [parse_url(u) for u in ul] + +def parse_url(url): + result = {} + attribs = ["id"] + elements = ["resource"] + inner_els = {"relation-list": parse_relation_list} + + result.update(parse_attributes(attribs, url)) + result.update(parse_elements(elements, inner_els, url)) + + return result + +def parse_disc(disc): + result = {} + attribs = ["id"] + elements = ["sectors"] + inner_els = {"release-list": parse_release_list, + "offset-list": parse_offset_list + } + + result.update(parse_attributes(attribs, disc)) + result.update(parse_elements(elements, inner_els, disc)) + + return result + +def parse_cdstub(cdstub): + result = {} + attribs = ["id"] + elements = ["title", "artist", "barcode"] + inner_els = {"track-list": parse_track_list} + + result.update(parse_attributes(attribs, cdstub)) + result.update(parse_elements(elements, inner_els, cdstub)) + + return result + +def parse_offset_list(ol): + return [int(o.text) for o in ol] + +def parse_instrument_list(rl): + result = [] + for r in rl: + result.append(parse_instrument(r)) + return result + +def parse_release_list(rl): + result = [] + for r in rl: + result.append(parse_release(r)) + return result + +def parse_release_group_list(rgl): + result = [] + for rg in rgl: + result.append(parse_release_group(rg)) + return result + +def parse_isrc(isrc): + result = {} + attribs = ["id"] + inner_els = {"recording-list": parse_recording_list} + + result.update(parse_attributes(attribs, isrc)) + result.update(parse_elements([], inner_els, isrc)) + + return result + +def parse_recording_list(recs): + result = [] + for r in recs: + result.append(parse_recording(r)) + return result + +def parse_artist_credit(ac): + result = [] + for namecredit in ac: + result.append(parse_name_credit(namecredit)) + join = parse_attributes(["joinphrase"], namecredit) + if "joinphrase" in join: + result.append(join["joinphrase"]) + return result + +def parse_name_credit(nc): + result = {} + elements = ["name"] + inner_els = {"artist": parse_artist} + + result.update(parse_elements(elements, inner_els, nc)) + + return result + +def parse_label_info_list(lil): + result = [] + + for li in lil: + result.append(parse_label_info(li)) + return result + +def parse_label_info(li): + result = {} + elements = ["catalog-number"] + inner_els = {"label": parse_label} + + result.update(parse_elements(elements, inner_els, li)) + return result + +def parse_track_list(tl): + result = [] + for t in tl: + result.append(parse_track(t)) + return result + +def parse_track(track): + result = {} + attribs = ["id"] + elements = ["number", "position", "title", "length"] + inner_els = {"recording": parse_recording, + "artist-credit": parse_artist_credit} + + result.update(parse_attributes(attribs, track)) + result.update(parse_elements(elements, inner_els, track)) + if "artist-credit" in result.get("recording", {}) and "artist-credit" not in result: + result["artist-credit"] = result["recording"]["artist-credit"] + if "artist-credit" in result: + result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"]) + # Make a length field that contains track length or recording length + track_or_recording = None + if "length" in result: + track_or_recording = result["length"] + elif result.get("recording", {}).get("length"): + track_or_recording = result.get("recording", {}).get("length") + if track_or_recording: + result["track_or_recording_length"] = track_or_recording + return result + +def parse_tag_list(tl): + return [parse_tag(t) for t in tl] + +def parse_tag(tag): + result = {} + attribs = ["count"] + elements = ["name"] + + result.update(parse_attributes(attribs, tag)) + result.update(parse_elements(elements, {}, tag)) + + return result + +def parse_rating(rating): + result = {} + attribs = ["votes-count"] + + result.update(parse_attributes(attribs, rating)) + result["rating"] = rating.text + + return result + +def parse_alias_list(al): + return [parse_alias(a) for a in al] + +def parse_alias(alias): + result = {} + attribs = ["locale", "sort-name", "type", "primary", + "begin-date", "end-date"] + + result.update(parse_attributes(attribs, alias)) + result["alias"] = alias.text + + return result + +def parse_caa(caa_element): + result = {} + elements = ["artwork", "count", "front", "back", "darkened"] + + result.update(parse_elements(elements, {}, caa_element)) + return result + + +### + +def make_barcode_request(release2barcode): + NS = "http://musicbrainz.org/ns/mmd-2.0#" + root = ET.Element("{%s}metadata" % NS) + rel_list = ET.SubElement(root, "{%s}release-list" % NS) + for release, barcode in release2barcode.items(): + rel_xml = ET.SubElement(rel_list, "{%s}release" % NS) + bar_xml = ET.SubElement(rel_xml, "{%s}barcode" % NS) + rel_xml.set("{%s}id" % NS, release) + bar_xml.text = barcode + + return ET.tostring(root, "utf-8") + +def make_tag_request(**kwargs): + NS = "http://musicbrainz.org/ns/mmd-2.0#" + root = ET.Element("{%s}metadata" % NS) + for entity_type in ['artist', 'label', 'place', 'recording', 'release', 'release_group', 'work']: + entity_tags = kwargs.pop(entity_type + '_tags', None) + if entity_tags is not None: + e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-'))) + for e, tags in entity_tags.items(): + e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-'))) + e_xml.set("{%s}id" % NS, e) + taglist = ET.SubElement(e_xml, "{%s}user-tag-list" % NS) + for tag in tags: + usertag_xml = ET.SubElement(taglist, "{%s}user-tag" % NS) + name_xml = ET.SubElement(usertag_xml, "{%s}name" % NS) + name_xml.text = tag + if kwargs.keys(): + raise TypeError("make_tag_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0]) + + return ET.tostring(root, "utf-8") + +def make_rating_request(**kwargs): + NS = "http://musicbrainz.org/ns/mmd-2.0#" + root = ET.Element("{%s}metadata" % NS) + for entity_type in ['artist', 'label', 'recording', 'release_group', 'work']: + entity_ratings = kwargs.pop(entity_type + '_ratings', None) + if entity_ratings is not None: + e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-'))) + for e, rating in entity_ratings.items(): + e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-'))) + e_xml.set("{%s}id" % NS, e) + rating_xml = ET.SubElement(e_xml, "{%s}user-rating" % NS) + rating_xml.text = str(rating) + if kwargs.keys(): + raise TypeError("make_rating_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0]) + + return ET.tostring(root, "utf-8") + +def make_isrc_request(recording2isrcs): + NS = "http://musicbrainz.org/ns/mmd-2.0#" + root = ET.Element("{%s}metadata" % NS) + rec_list = ET.SubElement(root, "{%s}recording-list" % NS) + for rec, isrcs in recording2isrcs.items(): + if len(isrcs) > 0: + rec_xml = ET.SubElement(rec_list, "{%s}recording" % NS) + rec_xml.set("{%s}id" % NS, rec) + isrc_list_xml = ET.SubElement(rec_xml, "{%s}isrc-list" % NS) + isrc_list_xml.set("{%s}count" % NS, str(len(isrcs))) + for isrc in isrcs: + isrc_xml = ET.SubElement(isrc_list_xml, "{%s}isrc" % NS) + isrc_xml.set("{%s}id" % NS, isrc) + return ET.tostring(root, "utf-8") diff --git a/libs/musicbrainzngs/musicbrainz.py b/libs/musicbrainzngs/musicbrainz.py new file mode 100644 index 00000000..953c79b8 --- /dev/null +++ b/libs/musicbrainzngs/musicbrainz.py @@ -0,0 +1,1337 @@ +# This file is part of the musicbrainzngs library +# Copyright (C) Alastair Porter, Adrian Sampson, and others +# This file is distributed under a BSD-2-Clause type license. +# See the COPYING file for more information. + +import re +import threading +import time +import logging +import socket +import hashlib +import locale +import sys +import json +import xml.etree.ElementTree as etree +from xml.parsers import expat +from warnings import warn + +from musicbrainzngs import mbxml +from musicbrainzngs import util +from musicbrainzngs import compat + +_version = "0.6" +_log = logging.getLogger("musicbrainzngs") + +LUCENE_SPECIAL = r'([+\-&|!(){}\[\]\^"~*?:\\\/])' + +# Constants for validation. + +RELATABLE_TYPES = ['area', 'artist', 'label', 'place', 'event', 'recording', 'release', 'release-group', 'series', 'url', 'work', 'instrument'] +RELATION_INCLUDES = [entity + '-rels' for entity in RELATABLE_TYPES] +TAG_INCLUDES = ["tags", "user-tags"] +RATING_INCLUDES = ["ratings", "user-ratings"] + +VALID_INCLUDES = { + 'area' : ["aliases", "annotation"] + RELATION_INCLUDES, + 'artist': [ + "recordings", "releases", "release-groups", "works", # Subqueries + "various-artists", "discids", "media", "isrcs", + "aliases", "annotation" + ] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES, + 'annotation': [ + + ], + 'instrument': ["aliases", "annotation" + ] + RELATION_INCLUDES + TAG_INCLUDES, + 'label': [ + "releases", # Subqueries + "discids", "media", + "aliases", "annotation" + ] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES, + 'place' : ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES, + 'event' : ["aliases"] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES, + 'recording': [ + "artists", "releases", # Subqueries + "discids", "media", "artist-credits", "isrcs", + "annotation", "aliases" + ] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'release': [ + "artists", "labels", "recordings", "release-groups", "media", + "artist-credits", "discids", "puids", "isrcs", + "recording-level-rels", "work-level-rels", "annotation", "aliases" + ] + TAG_INCLUDES + RELATION_INCLUDES, + 'release-group': [ + "artists", "releases", "discids", "media", + "artist-credits", "annotation", "aliases" + ] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'series': [ + "annotation", "aliases" + ] + RELATION_INCLUDES, + 'work': [ + "artists", # Subqueries + "aliases", "annotation" + ] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'url': RELATION_INCLUDES, + 'discid': [ # Discid should be the same as release + "artists", "labels", "recordings", "release-groups", "media", + "artist-credits", "discids", "puids", "isrcs", + "recording-level-rels", "work-level-rels", "annotation", "aliases" + ] + RELATION_INCLUDES, + 'isrc': ["artists", "releases", "puids", "isrcs"], + 'iswc': ["artists"], + 'collection': ['releases'], +} +VALID_BROWSE_INCLUDES = { + 'artist': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'event': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'label': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'recording': ["artist-credits", "isrcs"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'release': ["artist-credits", "labels", "recordings", "isrcs", + "release-groups", "media", "discids"] + RELATION_INCLUDES, + 'place': ["aliases"] + TAG_INCLUDES + RELATION_INCLUDES, + 'release-group': ["artist-credits"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, + 'url': RELATION_INCLUDES, + 'work': ["aliases", "annotation"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES, +} + +#: These can be used to filter whenever releases are includes or browsed +VALID_RELEASE_TYPES = [ + "nat", + "album", "single", "ep", "broadcast", "other", # primary types + "compilation", "soundtrack", "spokenword", "interview", "audiobook", + "live", "remix", "dj-mix", "mixtape/street", # secondary types +] +#: These can be used to filter whenever releases or release-groups are involved +VALID_RELEASE_STATUSES = ["official", "promotion", "bootleg", "pseudo-release"] +VALID_SEARCH_FIELDS = { + 'annotation': [ + 'entity', 'name', 'text', 'type' + ], + 'area': [ + 'aid', 'area', 'alias', 'begin', 'comment', 'end', 'ended', + 'iso', 'iso1', 'iso2', 'iso3', 'type' + ], + 'artist': [ + 'arid', 'artist', 'artistaccent', 'alias', 'begin', 'comment', + 'country', 'end', 'ended', 'gender', 'ipi', 'sortname', 'tag', 'type', + 'area', 'beginarea', 'endarea' + ], + 'label': [ + 'alias', 'begin', 'code', 'comment', 'country', 'end', 'ended', + 'ipi', 'label', 'labelaccent', 'laid', 'sortname', 'type', 'tag', + 'area' + ], + 'recording': [ + 'arid', 'artist', 'artistname', 'creditname', 'comment', + 'country', 'date', 'dur', 'format', 'isrc', 'number', + 'position', 'primarytype', 'puid', 'qdur', 'recording', + 'recordingaccent', 'reid', 'release', 'rgid', 'rid', + 'secondarytype', 'status', 'tnum', 'tracks', 'tracksrelease', + 'tag', 'type', 'video' + ], + 'release-group': [ + 'arid', 'artist', 'artistname', 'comment', 'creditname', + 'primarytype', 'rgid', 'releasegroup', 'releasegroupaccent', + 'releases', 'release', 'reid', 'secondarytype', 'status', + 'tag', 'type' + ], + 'release': [ + 'arid', 'artist', 'artistname', 'asin', 'barcode', 'creditname', + 'catno', 'comment', 'country', 'creditname', 'date', 'discids', + 'discidsmedium', 'format', 'laid', 'label', 'lang', 'mediums', + 'primarytype', 'puid', 'quality', 'reid', 'release', 'releaseaccent', + 'rgid', 'script', 'secondarytype', 'status', 'tag', 'tracks', + 'tracksmedium', 'type' + ], + 'series': [ + 'alias', 'comment', 'sid', 'series', 'type' + ], + 'work': [ + 'alias', 'arid', 'artist', 'comment', 'iswc', 'lang', 'tag', + 'type', 'wid', 'work', 'workaccent' + ], +} + +# Constants +class AUTH_YES: pass +class AUTH_NO: pass +class AUTH_IFSET: pass + + +# Exceptions. + +class MusicBrainzError(Exception): + """Base class for all exceptions related to MusicBrainz.""" + pass + +class UsageError(MusicBrainzError): + """Error related to misuse of the module API.""" + pass + +class InvalidSearchFieldError(UsageError): + pass + +class InvalidIncludeError(UsageError): + def __init__(self, msg='Invalid Includes', reason=None): + super(InvalidIncludeError, self).__init__(self) + self.msg = msg + self.reason = reason + + def __str__(self): + return self.msg + +class InvalidFilterError(UsageError): + def __init__(self, msg='Invalid Includes', reason=None): + super(InvalidFilterError, self).__init__(self) + self.msg = msg + self.reason = reason + + def __str__(self): + return self.msg + +class WebServiceError(MusicBrainzError): + """Error related to MusicBrainz API requests.""" + def __init__(self, message=None, cause=None): + """Pass ``cause`` if this exception was caused by another + exception. + """ + self.message = message + self.cause = cause + + def __str__(self): + if self.message: + msg = "%s, " % self.message + else: + msg = "" + msg += "caused by: %s" % str(self.cause) + return msg + +class NetworkError(WebServiceError): + """Problem communicating with the MB server.""" + pass + +class ResponseError(WebServiceError): + """Bad response sent by the MB server.""" + pass + +class AuthenticationError(WebServiceError): + """Received a HTTP 401 response while accessing a protected resource.""" + pass + + +# Helpers for validating and formatting allowed sets. + +def _check_includes_impl(includes, valid_includes): + for i in includes: + if i not in valid_includes: + raise InvalidIncludeError("Bad includes: " + "%s is not a valid include" % i) +def _check_includes(entity, inc): + _check_includes_impl(inc, VALID_INCLUDES[entity]) + +def _check_filter(values, valid): + for v in values: + if v not in valid: + raise InvalidFilterError(v) + +def _check_filter_and_make_params(entity, includes, release_status=[], release_type=[]): + """Check that the status or type values are valid. Then, check that + the filters can be used with the given includes. Return a params + dict that can be passed to _do_mb_query. + """ + if isinstance(release_status, compat.basestring): + release_status = [release_status] + if isinstance(release_type, compat.basestring): + release_type = [release_type] + _check_filter(release_status, VALID_RELEASE_STATUSES) + _check_filter(release_type, VALID_RELEASE_TYPES) + + if (release_status + and "releases" not in includes and entity != "release"): + raise InvalidFilterError("Can't have a status with no release include") + if (release_type + and "release-groups" not in includes and "releases" not in includes + and entity not in ["release-group", "release"]): + raise InvalidFilterError("Can't have a release type " + "with no releases or release-groups involved") + + # Build parameters. + params = {} + if len(release_status): + params["status"] = "|".join(release_status) + if len(release_type): + params["type"] = "|".join(release_type) + return params + +def _docstring_get(entity): + includes = list(VALID_INCLUDES.get(entity, [])) + return _docstring_impl("includes", includes) + +def _docstring_browse(entity): + includes = list(VALID_BROWSE_INCLUDES.get(entity, [])) + return _docstring_impl("includes", includes) + +def _docstring_search(entity): + search_fields = list(VALID_SEARCH_FIELDS.get(entity, [])) + return _docstring_impl("fields", search_fields) + +def _docstring_impl(name, values): + def _decorator(func): + # puids are allowed so nothing breaks, but not documented + if "puids" in values: values.remove("puids") + vstr = ", ".join(values) + args = {name: vstr} + if func.__doc__: + func.__doc__ = func.__doc__.format(**args) + return func + + return _decorator + + +# Global authentication and endpoint details. + +user = password = "" +hostname = "musicbrainz.org" +_client = "" +_useragent = "" + +def auth(u, p): + """Set the username and password to be used in subsequent queries to + the MusicBrainz XML API that require authentication. + """ + global user, password + user = u + password = p + +def set_useragent(app, version, contact=None): + """Set the User-Agent to be used for requests to the MusicBrainz webservice. + This must be set before requests are made.""" + global _useragent, _client + if not app or not version: + raise ValueError("App and version can not be empty") + if contact is not None: + _useragent = "%s/%s python-musicbrainzngs/%s ( %s )" % (app, version, _version, contact) + else: + _useragent = "%s/%s python-musicbrainzngs/%s" % (app, version, _version) + _client = "%s-%s" % (app, version) + _log.debug("set user-agent to %s" % _useragent) + +def set_hostname(new_hostname): + """Set the hostname for MusicBrainz webservice requests. + Defaults to 'musicbrainz.org'. + You can also include a port: 'localhost:8000'.""" + global hostname + hostname = new_hostname + +# Rate limiting. + +limit_interval = 1.0 +limit_requests = 1 +do_rate_limit = True + +def set_rate_limit(limit_or_interval=1.0, new_requests=1): + """Sets the rate limiting behavior of the module. Must be invoked + before the first Web service call. + If the `limit_or_interval` parameter is set to False then + rate limiting will be disabled. If it is a number then only + a set number of requests (`new_requests`) will be made per + given interval (`limit_or_interval`). + """ + global limit_interval + global limit_requests + global do_rate_limit + if isinstance(limit_or_interval, bool): + do_rate_limit = limit_or_interval + else: + if limit_or_interval <= 0.0: + raise ValueError("limit_or_interval can't be less than 0") + if new_requests <= 0: + raise ValueError("new_requests can't be less than 0") + do_rate_limit = True + limit_interval = limit_or_interval + limit_requests = new_requests + +class _rate_limit(object): + """A decorator that limits the rate at which the function may be + called. The rate is controlled by the `limit_interval` and + `limit_requests` global variables. The limiting is thread-safe; + only one thread may be in the function at a time (acts like a + monitor in this sense). The globals must be set before the first + call to the limited function. + """ + def __init__(self, fun): + self.fun = fun + self.last_call = 0.0 + self.lock = threading.Lock() + self.remaining_requests = None # Set on first invocation. + + def _update_remaining(self): + """Update remaining requests based on the elapsed time since + they were last calculated. + """ + # On first invocation, we have the maximum number of requests + # available. + if self.remaining_requests is None: + self.remaining_requests = float(limit_requests) + + else: + since_last_call = time.time() - self.last_call + self.remaining_requests += since_last_call * \ + (limit_requests / limit_interval) + self.remaining_requests = min(self.remaining_requests, + float(limit_requests)) + + self.last_call = time.time() + + def __call__(self, *args, **kwargs): + with self.lock: + if do_rate_limit: + self._update_remaining() + + # Delay if necessary. + while self.remaining_requests < 0.999: + time.sleep((1.0 - self.remaining_requests) * + (limit_requests / limit_interval)) + self._update_remaining() + + # Call the original function, "paying" for this call. + self.remaining_requests -= 1.0 + return self.fun(*args, **kwargs) + +# From pymb2 +class _RedirectPasswordMgr(compat.HTTPPasswordMgr): + def __init__(self): + self._realms = { } + + def find_user_password(self, realm, uri): + # ignoring the uri parameter intentionally + try: + return self._realms[realm] + except KeyError: + return (None, None) + + def add_password(self, realm, uri, username, password): + # ignoring the uri parameter intentionally + self._realms[realm] = (username, password) + +class _DigestAuthHandler(compat.HTTPDigestAuthHandler): + def get_authorization (self, req, chal): + qop = chal.get ('qop', None) + if qop and ',' in qop and 'auth' in qop.split (','): + chal['qop'] = 'auth' + + return compat.HTTPDigestAuthHandler.get_authorization (self, req, chal) + + def _encode_utf8(self, msg): + """The MusicBrainz server also accepts UTF-8 encoded passwords.""" + encoding = sys.stdin.encoding or locale.getpreferredencoding() + try: + # This works on Python 2 (msg in bytes) + msg = msg.decode(encoding) + except AttributeError: + # on Python 3 (msg is already in unicode) + pass + return msg.encode("utf-8") + + def get_algorithm_impls(self, algorithm): + # algorithm should be case-insensitive according to RFC2617 + algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(self._encode_utf8(x)).hexdigest() + elif algorithm == 'SHA': + H = lambda x: hashlib.sha1(self._encode_utf8(x)).hexdigest() + # XXX MD5-sess + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + +class _MusicbrainzHttpRequest(compat.Request): + """ A custom request handler that allows DELETE and PUT""" + def __init__(self, method, url, data=None): + compat.Request.__init__(self, url, data) + allowed_m = ["GET", "POST", "DELETE", "PUT"] + if method not in allowed_m: + raise ValueError("invalid method: %s" % method) + self.method = method + + def get_method(self): + return self.method + + +# Core (internal) functions for calling the MB API. + +def _safe_read(opener, req, body=None, max_retries=8, retry_delay_delta=2.0): + """Open an HTTP request with a given URL opener and (optionally) a + request body. Transient errors lead to retries. Permanent errors + and repeated errors are translated into a small set of handleable + exceptions. Return a bytestring. + """ + last_exc = None + for retry_num in range(max_retries): + if retry_num: # Not the first try: delay an increasing amount. + _log.info("retrying after delay (#%i)" % retry_num) + time.sleep(retry_num * retry_delay_delta) + + try: + if body: + f = opener.open(req, body) + else: + f = opener.open(req) + return f.read() + + except compat.HTTPError as exc: + if exc.code in (400, 404, 411): + # Bad request, not found, etc. + raise ResponseError(cause=exc) + elif exc.code in (503, 502, 500): + # Rate limiting, internal overloading... + _log.info("HTTP error %i" % exc.code) + elif exc.code in (401, ): + raise AuthenticationError(cause=exc) + else: + # Other, unknown error. Should handle more cases, but + # retrying for now. + _log.info("unknown HTTP error %i" % exc.code) + last_exc = exc + except compat.BadStatusLine as exc: + _log.info("bad status line") + last_exc = exc + except compat.HTTPException as exc: + _log.info("miscellaneous HTTP exception: %s" % str(exc)) + last_exc = exc + except compat.URLError as exc: + if isinstance(exc.reason, socket.error): + code = exc.reason.errno + if code == 104: # "Connection reset by peer." + continue + raise NetworkError(cause=exc) + except socket.timeout as exc: + _log.info("socket timeout") + last_exc = exc + except socket.error as exc: + if exc.errno == 104: + continue + raise NetworkError(cause=exc) + except IOError as exc: + raise NetworkError(cause=exc) + + # Out of retries! + raise NetworkError("retried %i times" % max_retries, last_exc) + +# Get the XML parsing exceptions to catch. The behavior chnaged with Python 2.7 +# and ElementTree 1.3. +if hasattr(etree, 'ParseError'): + ETREE_EXCEPTIONS = (etree.ParseError, expat.ExpatError) +else: + ETREE_EXCEPTIONS = (expat.ExpatError) + + +# Parsing setup + +def mb_parser_null(resp): + """Return the raw response (XML)""" + return resp + +def mb_parser_xml(resp): + """Return a Python dict representing the XML response""" + # Parse the response. + try: + return mbxml.parse_message(resp) + except UnicodeError as exc: + raise ResponseError(cause=exc) + except Exception as exc: + if isinstance(exc, ETREE_EXCEPTIONS): + raise ResponseError(cause=exc) + else: + raise + +# Defaults +parser_fun = mb_parser_xml +ws_format = "xml" + +def set_parser(new_parser_fun=None): + """Sets the function used to parse the response from the + MusicBrainz web service. + + If no parser is given, the parser is reset to the default parser + :func:`mb_parser_xml`. + """ + global parser_fun + if new_parser_fun is None: + new_parser_fun = mb_parser_xml + if not callable(new_parser_fun): + raise ValueError("new_parser_fun must be callable") + parser_fun = new_parser_fun + +def set_format(fmt="xml"): + """Sets the format that should be returned by the Web Service. + The server currently supports `xml` and `json`. + + This method will set a default parser for the specified format, + but you can modify it with :func:`set_parser`. + + .. warning:: The json format used by the server is different from + the json format returned by the `musicbrainzngs` internal parser + when using the `xml` format! This format may change at any time. + """ + global ws_format + if fmt == "xml": + ws_format = fmt + set_parser() # set to default + elif fmt == "json": + ws_format = fmt + warn("The json format is non-official and may change at any time") + set_parser(json.loads) + else: + raise ValueError("invalid format: %s" % fmt) + + +@_rate_limit +def _mb_request(path, method='GET', auth_required=AUTH_NO, + client_required=False, args=None, data=None, body=None): + """Makes a request for the specified `path` (endpoint) on /ws/2 on + the globally-specified hostname. Parses the responses and returns + the resulting object. `auth_required` and `client_required` control + whether exceptions should be raised if the username/password and + client are left unspecified, respectively. + """ + global parser_fun + + if args is None: + args = {} + else: + args = dict(args) or {} + + if _useragent == "": + raise UsageError("set a proper user-agent with " + "set_useragent(\"application name\", \"application version\", \"contact info (preferably URL or email for your application)\")") + + if client_required: + args["client"] = _client + + if ws_format != "xml": + args["fmt"] = ws_format + + # Convert args from a dictionary to a list of tuples + # so that the ordering of elements is stable for easy + # testing (in this case we order alphabetically) + # Encode Unicode arguments using UTF-8. + newargs = [] + for key, value in sorted(args.items()): + if isinstance(value, compat.unicode): + value = value.encode('utf8') + newargs.append((key, value)) + + # Construct the full URL for the request, including hostname and + # query string. + url = compat.urlunparse(( + 'http', + hostname, + '/ws/2/%s' % path, + '', + compat.urlencode(newargs), + '' + )) + _log.debug("%s request for %s" % (method, url)) + + # Set up HTTP request handler and URL opener. + httpHandler = compat.HTTPHandler(debuglevel=0) + handlers = [httpHandler] + + # Add credentials if required. + add_auth = False + if auth_required == AUTH_YES: + _log.debug("Auth required for %s" % url) + if not user: + raise UsageError("authorization required; " + "use auth(user, pass) first") + add_auth = True + + if auth_required == AUTH_IFSET and user: + _log.debug("Using auth for %s because user and pass is set" % url) + add_auth = True + + if add_auth: + passwordMgr = _RedirectPasswordMgr() + authHandler = _DigestAuthHandler(passwordMgr) + authHandler.add_password("musicbrainz.org", (), user, password) + handlers.append(authHandler) + + opener = compat.build_opener(*handlers) + + # Make request. + req = _MusicbrainzHttpRequest(method, url, data) + req.add_header('User-Agent', _useragent) + _log.debug("requesting with UA %s" % _useragent) + if body: + req.add_header('Content-Type', 'application/xml; charset=UTF-8') + elif not data and not req.has_header('Content-Length'): + # Explicitly indicate zero content length if no request data + # will be sent (avoids HTTP 411 error). + req.add_header('Content-Length', '0') + resp = _safe_read(opener, req, body) + + return parser_fun(resp) + +def _get_auth_type(entity, id, includes): + """ Some calls require authentication. This returns + True if a call does, False otherwise + """ + if "user-tags" in includes or "user-ratings" in includes: + return AUTH_YES + elif entity.startswith("collection"): + if not id: + return AUTH_YES + else: + return AUTH_IFSET + else: + return AUTH_NO + +def _do_mb_query(entity, id, includes=[], params={}): + """Make a single GET call to the MusicBrainz XML API. `entity` is a + string indicated the type of object to be retrieved. The id may be + empty, in which case the query is a search. `includes` is a list + of strings that must be valid includes for the entity type. `params` + is a dictionary of additional parameters for the API call. The + response is parsed and returned. + """ + # Build arguments. + if not isinstance(includes, list): + includes = [includes] + _check_includes(entity, includes) + auth_required = _get_auth_type(entity, id, includes) + args = dict(params) + if len(includes) > 0: + inc = " ".join(includes) + args["inc"] = inc + + # Build the endpoint components. + path = '%s/%s' % (entity, id) + return _mb_request(path, 'GET', auth_required, args=args) + +def _do_mb_search(entity, query='', fields={}, + limit=None, offset=None, strict=False): + """Perform a full-text search on the MusicBrainz search server. + `query` is a lucene query string when no fields are set, + but is escaped when any fields are given. `fields` is a dictionary + of key/value query parameters. They keys in `fields` must be valid + for the given entity type. + """ + # Encode the query terms as a Lucene query string. + query_parts = [] + if query: + clean_query = util._unicode(query) + if fields: + clean_query = re.sub(LUCENE_SPECIAL, r'\\\1', + clean_query) + if strict: + query_parts.append('"%s"' % clean_query) + else: + query_parts.append(clean_query.lower()) + else: + query_parts.append(clean_query) + for key, value in fields.items(): + # Ensure this is a valid search field. + if key not in VALID_SEARCH_FIELDS[entity]: + raise InvalidSearchFieldError( + '%s is not a valid search field for %s' % (key, entity) + ) + elif key == "puid": + warn("PUID support was removed from server\n" + "the 'puid' field is ignored", + Warning, stacklevel=2) + + # Escape Lucene's special characters. + value = util._unicode(value) + value = re.sub(LUCENE_SPECIAL, r'\\\1', value) + if value: + if strict: + query_parts.append('%s:"%s"' % (key, value)) + else: + value = value.lower() # avoid AND / OR + query_parts.append('%s:(%s)' % (key, value)) + if strict: + full_query = ' AND '.join(query_parts).strip() + else: + full_query = ' '.join(query_parts).strip() + + if not full_query: + raise ValueError('at least one query term is required') + + # Additional parameters to the search. + params = {'query': full_query} + if limit: + params['limit'] = str(limit) + if offset: + params['offset'] = str(offset) + + return _do_mb_query(entity, '', [], params) + +def _do_mb_delete(path): + """Send a DELETE request for the specified object. + """ + return _mb_request(path, 'DELETE', AUTH_YES, True) + +def _do_mb_put(path): + """Send a PUT request for the specified object. + """ + return _mb_request(path, 'PUT', AUTH_YES, True) + +def _do_mb_post(path, body): + """Perform a single POST call for an endpoint with a specified + request body. + """ + return _mb_request(path, 'POST', AUTH_YES, True, body=body) + + +# The main interface! + +# Single entity by ID + +@_docstring_get("area") +def get_area_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the area with the MusicBrainz `id` as a dict with an 'area' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("area", includes, + release_status, release_type) + return _do_mb_query("area", id, includes, params) + +@_docstring_get("artist") +def get_artist_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the artist with the MusicBrainz `id` as a dict with an 'artist' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("artist", includes, + release_status, release_type) + return _do_mb_query("artist", id, includes, params) + +@_docstring_get("instrument") +def get_instrument_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the instrument with the MusicBrainz `id` as a dict with an 'artist' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("instrument", includes, + release_status, release_type) + return _do_mb_query("instrument", id, includes, params) + +@_docstring_get("label") +def get_label_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the label with the MusicBrainz `id` as a dict with a 'label' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("label", includes, + release_status, release_type) + return _do_mb_query("label", id, includes, params) + +@_docstring_get("place") +def get_place_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the place with the MusicBrainz `id` as a dict with an 'place' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("place", includes, + release_status, release_type) + return _do_mb_query("place", id, includes, params) + +@_docstring_get("event") +def get_event_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the event with the MusicBrainz `id` as a dict with an 'event' key. + + The event dict has the following keys: + `id`, `type`, `name`, `time`, `disambiguation` and `life-span`. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("event", includes, + release_status, release_type) + return _do_mb_query("event", id, includes, params) + +@_docstring_get("recording") +def get_recording_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the recording with the MusicBrainz `id` as a dict + with a 'recording' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("recording", includes, + release_status, release_type) + return _do_mb_query("recording", id, includes, params) + +@_docstring_get("release") +def get_release_by_id(id, includes=[], release_status=[], release_type=[]): + """Get the release with the MusicBrainz `id` as a dict with a 'release' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("release", includes, + release_status, release_type) + return _do_mb_query("release", id, includes, params) + +@_docstring_get("release-group") +def get_release_group_by_id(id, includes=[], + release_status=[], release_type=[]): + """Get the release group with the MusicBrainz `id` as a dict + with a 'release-group' key. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("release-group", includes, + release_status, release_type) + return _do_mb_query("release-group", id, includes, params) + +@_docstring_get("series") +def get_series_by_id(id, includes=[]): + """Get the series with the MusicBrainz `id` as a dict with a 'series' key. + + *Available includes*: {includes}""" + return _do_mb_query("series", id, includes) + +@_docstring_get("work") +def get_work_by_id(id, includes=[]): + """Get the work with the MusicBrainz `id` as a dict with a 'work' key. + + *Available includes*: {includes}""" + return _do_mb_query("work", id, includes) + +@_docstring_get("url") +def get_url_by_id(id, includes=[]): + """Get the url with the MusicBrainz `id` as a dict with a 'url' key. + + *Available includes*: {includes}""" + return _do_mb_query("url", id, includes) + + +# Searching + +@_docstring_search("annotation") +def search_annotations(query='', limit=None, offset=None, strict=False, **fields): + """Search for annotations and return a dict with an 'annotation-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('annotation', query, fields, limit, offset, strict) + +@_docstring_search("area") +def search_areas(query='', limit=None, offset=None, strict=False, **fields): + """Search for areas and return a dict with an 'area-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('area', query, fields, limit, offset, strict) + +@_docstring_search("artist") +def search_artists(query='', limit=None, offset=None, strict=False, **fields): + """Search for artists and return a dict with an 'artist-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('artist', query, fields, limit, offset, strict) + +@_docstring_search("event") +def search_events(query='', limit=None, offset=None, strict=False, **fields): + """Search for events and return a dict with an 'event-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('event', query, fields, limit, offset, strict) + +@_docstring_search("instrument") +def search_instruments(query='', limit=None, offset=None, strict=False, **fields): + """Search for instruments and return a dict with a 'instrument-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('instrument', query, fields, limit, offset, strict) + +@_docstring_search("label") +def search_labels(query='', limit=None, offset=None, strict=False, **fields): + """Search for labels and return a dict with a 'label-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('label', query, fields, limit, offset, strict) + +@_docstring_search("place") +def search_places(query='', limit=None, offset=None, strict=False, **fields): + """Search for places and return a dict with a 'place-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('place', query, fields, limit, offset, strict) + +@_docstring_search("recording") +def search_recordings(query='', limit=None, offset=None, + strict=False, **fields): + """Search for recordings and return a dict with a 'recording-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('recording', query, fields, limit, offset, strict) + +@_docstring_search("release") +def search_releases(query='', limit=None, offset=None, strict=False, **fields): + """Search for recordings and return a dict with a 'recording-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('release', query, fields, limit, offset, strict) + +@_docstring_search("release-group") +def search_release_groups(query='', limit=None, offset=None, + strict=False, **fields): + """Search for release groups and return a dict + with a 'release-group-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('release-group', query, fields, limit, offset, strict) + +@_docstring_search("series") +def search_series(query='', limit=None, offset=None, strict=False, **fields): + """Search for series and return a dict with a 'series-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('series', query, fields, limit, offset, strict) + +@_docstring_search("work") +def search_works(query='', limit=None, offset=None, strict=False, **fields): + """Search for works and return a dict with a 'work-list' key. + + *Available search fields*: {fields}""" + return _do_mb_search('work', query, fields, limit, offset, strict) + + +# Lists of entities +@_docstring_get("discid") +def get_releases_by_discid(id, includes=[], toc=None, cdstubs=True, media_format=None): + """Search for releases with a :musicbrainz:`Disc ID` or table of contents. + + When a `toc` is provided and no release with the disc ID is found, + a fuzzy search by the toc is done. + The `toc` should have to same format as :attr:`discid.Disc.toc_string`. + When a `toc` is provided, the format of the discid itself is not + checked server-side, so any value may be passed if searching by only + `toc` is desired. + + If no toc matches in musicbrainz but a :musicbrainz:`CD Stub` does, + the CD Stub will be returned. Prevent this from happening by + passing `cdstubs=False`. + + By default only results that match a format that allows discids + (e.g. CD) are included. To include all media formats, pass + `media_format='all'`. + + The result is a dict with either a 'disc' , a 'cdstub' key + or a 'release-list' (fuzzy match with TOC). + A 'disc' has an 'offset-count', an 'offset-list' and a 'release-list'. + A 'cdstub' key has direct 'artist' and 'title' keys. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("discid", includes, release_status=[], + release_type=[]) + if toc: + params["toc"] = toc + if not cdstubs: + params["cdstubs"] = "no" + if media_format: + params["media-format"] = media_format + return _do_mb_query("discid", id, includes, params) + +@_docstring_get("recording") +def get_recordings_by_echoprint(echoprint, includes=[], release_status=[], + release_type=[]): + """Search for recordings with an `echoprint `_. + (not available on server)""" + warn("Echoprints were never introduced\n" + "and will not be found (404)", + Warning, stacklevel=2) + raise ResponseError(cause=compat.HTTPError( + None, 404, "Not Found", None, None)) + +@_docstring_get("recording") +def get_recordings_by_puid(puid, includes=[], release_status=[], + release_type=[]): + """Search for recordings with a :musicbrainz:`PUID`. + (not available on server)""" + warn("PUID support was removed from the server\n" + "and no PUIDs will be found (404)", + Warning, stacklevel=2) + raise ResponseError(cause=compat.HTTPError( + None, 404, "Not Found", None, None)) + +@_docstring_get("recording") +def get_recordings_by_isrc(isrc, includes=[], release_status=[], + release_type=[]): + """Search for recordings with an :musicbrainz:`ISRC`. + The result is a dict with an 'isrc' key, + which again includes a 'recording-list'. + + *Available includes*: {includes}""" + params = _check_filter_and_make_params("isrc", includes, + release_status, release_type) + return _do_mb_query("isrc", isrc, includes, params) + +@_docstring_get("work") +def get_works_by_iswc(iswc, includes=[]): + """Search for works with an :musicbrainz:`ISWC`. + The result is a dict with a`work-list`. + + *Available includes*: {includes}""" + return _do_mb_query("iswc", iswc, includes) + + +def _browse_impl(entity, includes, limit, offset, params, release_status=[], release_type=[]): + includes = includes if isinstance(includes, list) else [includes] + valid_includes = VALID_BROWSE_INCLUDES[entity] + _check_includes_impl(includes, valid_includes) + p = {} + for k,v in params.items(): + if v: + p[k] = v + if len(p) > 1: + raise Exception("Can't have more than one of " + ", ".join(params.keys())) + if limit: p["limit"] = limit + if offset: p["offset"] = offset + filterp = _check_filter_and_make_params(entity, includes, release_status, release_type) + p.update(filterp) + return _do_mb_query(entity, "", includes, p) + +# Browse methods +# Browse include are a subset of regular get includes, so we check them here +# and the test in _do_mb_query will pass anyway. +@_docstring_browse("artist") +def browse_artists(recording=None, release=None, release_group=None, + work=None, includes=[], limit=None, offset=None): + """Get all artists linked to a recording, a release or a release group. + You need to give one MusicBrainz ID. + + *Available includes*: {includes}""" + params = {"recording": recording, + "release": release, + "release-group": release_group, + "work": work} + return _browse_impl("artist", includes, limit, offset, params) + +@_docstring_browse("event") +def browse_events(area=None, artist=None, place=None, + includes=[], limit=None, offset=None): + """Get all events linked to a area, a artist or a place. + You need to give one MusicBrainz ID. + + *Available includes*: {includes}""" + params = {"area": area, + "artist": artist, + "place": place} + return _browse_impl("event", includes, limit, offset, params) + +@_docstring_browse("label") +def browse_labels(release=None, includes=[], limit=None, offset=None): + """Get all labels linked to a relase. You need to give a MusicBrainz ID. + + *Available includes*: {includes}""" + params = {"release": release} + return _browse_impl("label", includes, limit, offset, params) + +@_docstring_browse("place") +def browse_places(area=None, includes=[], limit=None, offset=None): + """Get all places linked to an area. You need to give a MusicBrainz ID. + + *Available includes*: {includes}""" + params = {"area": area} + return _browse_impl("place", includes, limit, offset, params) + +@_docstring_browse("recording") +def browse_recordings(artist=None, release=None, includes=[], + limit=None, offset=None): + """Get all recordings linked to an artist or a release. + You need to give one MusicBrainz ID. + + *Available includes*: {includes}""" + params = {"artist": artist, + "release": release} + return _browse_impl("recording", includes, limit, offset, params) + +@_docstring_browse("release") +def browse_releases(artist=None, track_artist=None, label=None, recording=None, + release_group=None, release_status=[], release_type=[], + includes=[], limit=None, offset=None): + """Get all releases linked to an artist, a label, a recording + or a release group. You need to give one MusicBrainz ID. + + You can also browse by `track_artist`, which gives all releases where some + tracks are attributed to that artist, but not the whole release. + + You can filter by :data:`musicbrainz.VALID_RELEASE_TYPES` or + :data:`musicbrainz.VALID_RELEASE_STATUSES`. + + *Available includes*: {includes}""" + # track_artist param doesn't work yet + params = {"artist": artist, + "track_artist": track_artist, + "label": label, + "recording": recording, + "release-group": release_group} + return _browse_impl("release", includes, limit, offset, + params, release_status, release_type) + +@_docstring_browse("release-group") +def browse_release_groups(artist=None, release=None, release_type=[], + includes=[], limit=None, offset=None): + """Get all release groups linked to an artist or a release. + You need to give one MusicBrainz ID. + + You can filter by :data:`musicbrainz.VALID_RELEASE_TYPES`. + + *Available includes*: {includes}""" + params = {"artist": artist, + "release": release} + return _browse_impl("release-group", includes, limit, + offset, params, [], release_type) + +@_docstring_browse("url") +def browse_urls(resource=None, includes=[], limit=None, offset=None): + """Get urls by actual URL string. + You need to give a URL string as 'resource' + + *Available includes*: {includes}""" + params = {"resource": resource} + return _browse_impl("url", includes, limit, offset, params) + +@_docstring_browse("work") +def browse_works(artist=None, includes=[], limit=None, offset=None): + """Get all works linked to an artist + + *Available includes*: {includes}""" + params = {"artist": artist} + return _browse_impl("work", includes, limit, offset, params) + +# Collections +def get_collections(): + """List the collections for the currently :func:`authenticated ` user + as a dict with a 'collection-list' key.""" + # Missing the count in the reply + return _do_mb_query("collection", '') + +def _do_collection_query(collection, collection_type, limit, offset): + params = {} + if limit: params["limit"] = limit + if offset: params["offset"] = offset + return _do_mb_query("collection", "%s/%s" % (collection, collection_type), [], params) + +def get_artists_in_collection(collection, limit=None, offset=None): + """List the artists in a collection. + Returns a dict with a 'collection' key, which again has a 'artist-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "artists", limit, offset) + +def get_releases_in_collection(collection, limit=None, offset=None): + """List the releases in a collection. + Returns a dict with a 'collection' key, which again has a 'release-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "releases", limit, offset) + +def get_events_in_collection(collection, limit=None, offset=None): + """List the events in a collection. + Returns a dict with a 'collection' key, which again has a 'event-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "events", limit, offset) + +def get_places_in_collection(collection, limit=None, offset=None): + """List the places in a collection. + Returns a dict with a 'collection' key, which again has a 'place-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "places", limit, offset) + +def get_recordings_in_collection(collection, limit=None, offset=None): + """List the recordings in a collection. + Returns a dict with a 'collection' key, which again has a 'recording-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "recordings", limit, offset) + +def get_works_in_collection(collection, limit=None, offset=None): + """List the works in a collection. + Returns a dict with a 'collection' key, which again has a 'work-list'. + + See `Browsing`_ for how to use `limit` and `offset`. + """ + return _do_collection_query(collection, "works", limit, offset) + + +# Submission methods + +def submit_barcodes(release_barcode): + """Submits a set of {release_id1: barcode, ...}""" + query = mbxml.make_barcode_request(release_barcode) + return _do_mb_post("release", query) + +def submit_puids(recording_puids): + """Submit PUIDs. + (Functionality removed from server) + """ + warn("PUID support was dropped at the server\n" + "nothing will be submitted", + Warning, stacklevel=2) + return {'message': {'text': 'OK'}} + +def submit_echoprints(recording_echoprints): + """Submit echoprints. + (Functionality removed from server) + """ + warn("Echoprints were never introduced\n" + "nothing will be submitted", + Warning, stacklevel=2) + return {'message': {'text': 'OK'}} + +def submit_isrcs(recording_isrcs): + """Submit ISRCs. + Submits a set of {recording-id1: [isrc1, ...], ...} + or {recording_id1: isrc, ...}. + """ + rec2isrcs = dict() + for (rec, isrcs) in recording_isrcs.items(): + rec2isrcs[rec] = isrcs if isinstance(isrcs, list) else [isrcs] + query = mbxml.make_isrc_request(rec2isrcs) + return _do_mb_post("recording", query) + +def submit_tags(**kwargs): + """Submit user tags. + Takes parameters named e.g. 'artist_tags', 'recording_tags', etc., + and of the form: + {entity_id1: [tag1, ...], ...} + If you only have one tag for an entity you can use a string instead + of a list. + + The user's tags for each entity will be set to that list, adding or + removing tags as necessary. Submitting an empty list for an entity + will remove all tags for that entity by the user. + """ + for k, v in kwargs.items(): + for id, tags in v.items(): + kwargs[k][id] = tags if isinstance(tags, list) else [tags] + + query = mbxml.make_tag_request(**kwargs) + return _do_mb_post("tag", query) + +def submit_ratings(**kwargs): + """Submit user ratings. + Takes parameters named e.g. 'artist_ratings', 'recording_ratings', etc., + and of the form: + {entity_id1: rating, ...} + + Ratings are numbers from 0-100, at intervals of 20 (20 per 'star'). + Submitting a rating of 0 will remove the user's rating. + """ + query = mbxml.make_rating_request(**kwargs) + return _do_mb_post("rating", query) + +def add_releases_to_collection(collection, releases=[]): + """Add releases to a collection. + Collection and releases should be identified by their MBIDs + """ + # XXX: Maximum URI length of 16kb means we should only allow ~400 releases + releaselist = ";".join(releases) + return _do_mb_put("collection/%s/releases/%s" % (collection, releaselist)) + +def remove_releases_from_collection(collection, releases=[]): + """Remove releases from a collection. + Collection and releases should be identified by their MBIDs + """ + releaselist = ";".join(releases) + return _do_mb_delete("collection/%s/releases/%s" % (collection, releaselist)) diff --git a/libs/musicbrainzngs/util.py b/libs/musicbrainzngs/util.py new file mode 100644 index 00000000..37316f53 --- /dev/null +++ b/libs/musicbrainzngs/util.py @@ -0,0 +1,44 @@ +# This file is part of the musicbrainzngs library +# Copyright (C) Alastair Porter, Adrian Sampson, and others +# This file is distributed under a BSD-2-Clause type license. +# See the COPYING file for more information. + +import sys +import locale +import xml.etree.ElementTree as ET + +from . import compat + +def _unicode(string, encoding=None): + """Try to decode byte strings to unicode. + This can only be a guess, but this might be better than failing. + It is safe to use this on numbers or strings that are already unicode. + """ + if isinstance(string, compat.unicode): + unicode_string = string + elif isinstance(string, compat.bytes): + # use given encoding, stdin, preferred until something != None is found + if encoding is None: + encoding = sys.stdin.encoding + if encoding is None: + encoding = locale.getpreferredencoding() + unicode_string = string.decode(encoding, "ignore") + else: + unicode_string = compat.unicode(string) + return unicode_string.replace('\x00', '').strip() + +def bytes_to_elementtree(bytes_or_file): + """Given a bytestring or a file-like object that will produce them, + parse and return an ElementTree. + """ + if isinstance(bytes_or_file, compat.basestring): + s = bytes_or_file + else: + s = bytes_or_file.read() + + if compat.is_py3: + s = _unicode(s, "utf-8") + + f = compat.StringIO(s) + tree = ET.ElementTree(file=f) + return tree diff --git a/libs/mutagen/__init__.py b/libs/mutagen/__init__.py index 28febab3..c1abc0b1 100644 --- a/libs/mutagen/__init__.py +++ b/libs/mutagen/__init__.py @@ -1,4 +1,5 @@ -# mutagen aims to be an all purpose media tagging library +# -*- coding: utf-8 -*- + # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -6,7 +7,7 @@ # published by the Free Software Foundation. -"""Mutagen aims to be an all purpose tagging library. +"""Mutagen aims to be an all purpose multimedia tagging library. :: @@ -19,245 +20,26 @@ depending on tag or format. They may also be entirely different objects for certain keys, again depending on format. """ -version = (1, 22) +from mutagen._util import MutagenError +from mutagen._file import FileType, StreamInfo, File +from mutagen._tags import Tags, Metadata, PaddingInfo + +version = (1, 32) """Version tuple.""" version_string = ".".join(map(str, version)) """Version string.""" +MutagenError -import warnings +FileType -import mutagen._util +StreamInfo +File -class Metadata(object): - """An abstract dict-like object. +Tags - Metadata is the base class for many of the tag objects in Mutagen. - """ +Metadata - def __init__(self, *args, **kwargs): - if args or kwargs: - self.load(*args, **kwargs) - - def load(self, *args, **kwargs): - raise NotImplementedError - - def save(self, filename=None): - """Save changes to a file.""" - - raise NotImplementedError - - def delete(self, filename=None): - """Remove tags from a file.""" - - raise NotImplementedError - - -class FileType(mutagen._util.DictMixin): - """An abstract object wrapping tags and audio stream information. - - Attributes: - - * info -- stream information (length, bitrate, sample rate) - * tags -- metadata tags, if any - - Each file format has different potential tags and stream - information. - - FileTypes implement an interface very similar to Metadata; the - dict interface, save, load, and delete calls on a FileType call - the appropriate methods on its tag data. - """ - - info = None - tags = None - filename = None - _mimes = ["application/octet-stream"] - - def __init__(self, filename=None, *args, **kwargs): - if filename is None: - warnings.warn("FileType constructor requires a filename", - DeprecationWarning) - else: - self.load(filename, *args, **kwargs) - - def load(self, filename, *args, **kwargs): - raise NotImplementedError - - def __getitem__(self, key): - """Look up a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - return self.tags[key] - - def __setitem__(self, key, value): - """Set a metadata tag. - - If the file has no tags, an appropriate format is added (but - not written until save is called). - """ - - if self.tags is None: - self.add_tags() - self.tags[key] = value - - def __delitem__(self, key): - """Delete a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - del(self.tags[key]) - - def keys(self): - """Return a list of keys in the metadata tag. - - If the file has no tags at all, an empty list is returned. - """ - - if self.tags is None: - return [] - else: - return self.tags.keys() - - def delete(self, filename=None): - """Remove tags from a file.""" - - if self.tags is not None: - if filename is None: - filename = self.filename - else: - warnings.warn( - "delete(filename=...) is deprecated, reload the file", - DeprecationWarning) - return self.tags.delete(filename) - - def save(self, filename=None, **kwargs): - """Save metadata tags.""" - - if filename is None: - filename = self.filename - else: - warnings.warn( - "save(filename=...) is deprecated, reload the file", - DeprecationWarning) - if self.tags is not None: - return self.tags.save(filename, **kwargs) - else: - raise ValueError("no tags in file") - - def pprint(self): - """Print stream information and comment key=value pairs.""" - - stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) - try: - tags = self.tags.pprint() - except AttributeError: - return stream - else: - return stream + ((tags and "\n" + tags) or "") - - def add_tags(self): - """Adds new tags to the file. - - Raises if tags already exist. - """ - - raise NotImplementedError - - @property - def mime(self): - """A list of mime types""" - - mimes = [] - for Kind in type(self).__mro__: - for mime in getattr(Kind, '_mimes', []): - if mime not in mimes: - mimes.append(mime) - return mimes - - @staticmethod - def score(filename, fileobj, header): - raise NotImplementedError - - -def File(filename, options=None, easy=False): - """Guess the type of the file and try to open it. - - The file type is decided by several things, such as the first 128 - bytes (which usually contains a file type identifier), the - filename extension, and the presence of existing tags. - - If no appropriate type could be found, None is returned. - - :param options: Sequence of :class:`FileType` implementations, defaults to - all included ones. - - :param easy: If the easy wrappers should be returnd if available. - For example :class:`EasyMP3 ` instead - of :class:`MP3 `. - """ - - if options is None: - from mutagen.asf import ASF - from mutagen.apev2 import APEv2File - from mutagen.flac import FLAC - if easy: - from mutagen.easyid3 import EasyID3FileType as ID3FileType - else: - from mutagen.id3 import ID3FileType - if easy: - from mutagen.mp3 import EasyMP3 as MP3 - else: - from mutagen.mp3 import MP3 - from mutagen.oggflac import OggFLAC - from mutagen.oggspeex import OggSpeex - from mutagen.oggtheora import OggTheora - from mutagen.oggvorbis import OggVorbis - from mutagen.oggopus import OggOpus - if easy: - from mutagen.trueaudio import EasyTrueAudio as TrueAudio - else: - from mutagen.trueaudio import TrueAudio - from mutagen.wavpack import WavPack - if easy: - from mutagen.easymp4 import EasyMP4 as MP4 - else: - from mutagen.mp4 import MP4 - from mutagen.musepack import Musepack - from mutagen.monkeysaudio import MonkeysAudio - from mutagen.optimfrog import OptimFROG - options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC, - FLAC, APEv2File, MP4, ID3FileType, WavPack, Musepack, - MonkeysAudio, OptimFROG, ASF, OggOpus] - - if not options: - return None - - fileobj = open(filename, "rb") - try: - header = fileobj.read(128) - # Sort by name after score. Otherwise import order affects - # Kind sort order, which affects treatment of things with - # equals scores. - results = [(Kind.score(filename, fileobj, header), Kind.__name__) - for Kind in options] - finally: - fileobj.close() - results = zip(results, options) - results.sort() - (score, name), Kind = results[-1] - if score > 0: - return Kind(filename) - else: - return None +PaddingInfo diff --git a/libs/mutagen/_compat.py b/libs/mutagen/_compat.py new file mode 100644 index 00000000..77c465f1 --- /dev/null +++ b/libs/mutagen/_compat.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2013 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = not PY2 + +if PY2: + from StringIO import StringIO + BytesIO = StringIO + from cStringIO import StringIO as cBytesIO + from itertools import izip + + long_ = long + integer_types = (int, long) + string_types = (str, unicode) + text_type = unicode + + xrange = xrange + cmp = cmp + chr_ = chr + + def endswith(text, end): + return text.endswith(end) + + iteritems = lambda d: d.iteritems() + itervalues = lambda d: d.itervalues() + iterkeys = lambda d: d.iterkeys() + + iterbytes = lambda b: iter(b) + + exec("def reraise(tp, value, tb):\n raise tp, value, tb") + + def swap_to_string(cls): + if "__str__" in cls.__dict__: + cls.__unicode__ = cls.__str__ + + if "__bytes__" in cls.__dict__: + cls.__str__ = cls.__bytes__ + + return cls + +elif PY3: + from io import StringIO + StringIO = StringIO + from io import BytesIO + cBytesIO = BytesIO + + long_ = int + integer_types = (int,) + string_types = (str,) + text_type = str + + izip = zip + xrange = range + cmp = lambda a, b: (a > b) - (a < b) + chr_ = lambda x: bytes([x]) + + def endswith(text, end): + # usefull for paths which can be both, str and bytes + if isinstance(text, str): + if not isinstance(end, str): + end = end.decode("ascii") + else: + if not isinstance(end, bytes): + end = end.encode("ascii") + return text.endswith(end) + + iteritems = lambda d: iter(d.items()) + itervalues = lambda d: iter(d.values()) + iterkeys = lambda d: iter(d.keys()) + + iterbytes = lambda b: (bytes([v]) for v in b) + + def reraise(tp, value, tb): + raise tp(value).with_traceback(tb) + + def swap_to_string(cls): + return cls diff --git a/libs/mutagen/_constants.py b/libs/mutagen/_constants.py index f5ecd90c..62c1ce02 100644 --- a/libs/mutagen/_constants.py +++ b/libs/mutagen/_constants.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + """Constants used by Mutagen.""" GENRES = [ diff --git a/libs/mutagen/_file.py b/libs/mutagen/_file.py new file mode 100644 index 00000000..95f400cf --- /dev/null +++ b/libs/mutagen/_file.py @@ -0,0 +1,255 @@ +# Copyright (C) 2005 Michael Urman +# -*- coding: utf-8 -*- +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + +import warnings + +from mutagen._util import DictMixin +from mutagen._compat import izip + + +class FileType(DictMixin): + """An abstract object wrapping tags and audio stream information. + + Attributes: + + * info -- :class:`StreamInfo` -- (length, bitrate, sample rate) + * tags -- :class:`Tags` -- metadata tags, if any + + Each file format has different potential tags and stream + information. + + FileTypes implement an interface very similar to Metadata; the + dict interface, save, load, and delete calls on a FileType call + the appropriate methods on its tag data. + """ + + __module__ = "mutagen" + + info = None + tags = None + filename = None + _mimes = ["application/octet-stream"] + + def __init__(self, filename=None, *args, **kwargs): + if filename is None: + warnings.warn("FileType constructor requires a filename", + DeprecationWarning) + else: + self.load(filename, *args, **kwargs) + + def load(self, filename, *args, **kwargs): + raise NotImplementedError + + def __getitem__(self, key): + """Look up a metadata tag key. + + If the file has no tags at all, a KeyError is raised. + """ + + if self.tags is None: + raise KeyError(key) + else: + return self.tags[key] + + def __setitem__(self, key, value): + """Set a metadata tag. + + If the file has no tags, an appropriate format is added (but + not written until save is called). + """ + + if self.tags is None: + self.add_tags() + self.tags[key] = value + + def __delitem__(self, key): + """Delete a metadata tag key. + + If the file has no tags at all, a KeyError is raised. + """ + + if self.tags is None: + raise KeyError(key) + else: + del(self.tags[key]) + + def keys(self): + """Return a list of keys in the metadata tag. + + If the file has no tags at all, an empty list is returned. + """ + + if self.tags is None: + return [] + else: + return self.tags.keys() + + def delete(self, filename=None): + """Remove tags from a file. + + In cases where the tagging format is independent of the file type + (for example `mutagen.ID3`) all traces of the tagging format will + be removed. + In cases where the tag is part of the file type, all tags and + padding will be removed. + + The tags attribute will be cleared as well if there is one. + + Does nothing if the file has no tags. + + :raises mutagen.MutagenError: if deleting wasn't possible + """ + + if self.tags is not None: + if filename is None: + filename = self.filename + else: + warnings.warn( + "delete(filename=...) is deprecated, reload the file", + DeprecationWarning) + return self.tags.delete(filename) + + def save(self, filename=None, **kwargs): + """Save metadata tags. + + :raises mutagen.MutagenError: if saving wasn't possible + """ + + if filename is None: + filename = self.filename + else: + warnings.warn( + "save(filename=...) is deprecated, reload the file", + DeprecationWarning) + + if self.tags is not None: + return self.tags.save(filename, **kwargs) + + def pprint(self): + """Print stream information and comment key=value pairs.""" + + stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) + try: + tags = self.tags.pprint() + except AttributeError: + return stream + else: + return stream + ((tags and "\n" + tags) or "") + + def add_tags(self): + """Adds new tags to the file. + + :raises mutagen.MutagenError: if tags already exist or adding is not + possible. + """ + + raise NotImplementedError + + @property + def mime(self): + """A list of mime types""" + + mimes = [] + for Kind in type(self).__mro__: + for mime in getattr(Kind, '_mimes', []): + if mime not in mimes: + mimes.append(mime) + return mimes + + @staticmethod + def score(filename, fileobj, header): + raise NotImplementedError + + +class StreamInfo(object): + """Abstract stream information object. + + Provides attributes for length, bitrate, sample rate etc. + + See the implementations for details. + """ + + __module__ = "mutagen" + + def pprint(self): + """Print stream information""" + + raise NotImplementedError + + +def File(filename, options=None, easy=False): + """Guess the type of the file and try to open it. + + The file type is decided by several things, such as the first 128 + bytes (which usually contains a file type identifier), the + filename extension, and the presence of existing tags. + + If no appropriate type could be found, None is returned. + + :param options: Sequence of :class:`FileType` implementations, defaults to + all included ones. + + :param easy: If the easy wrappers should be returnd if available. + For example :class:`EasyMP3 ` instead + of :class:`MP3 `. + """ + + if options is None: + from mutagen.asf import ASF + from mutagen.apev2 import APEv2File + from mutagen.flac import FLAC + if easy: + from mutagen.easyid3 import EasyID3FileType as ID3FileType + else: + from mutagen.id3 import ID3FileType + if easy: + from mutagen.mp3 import EasyMP3 as MP3 + else: + from mutagen.mp3 import MP3 + from mutagen.oggflac import OggFLAC + from mutagen.oggspeex import OggSpeex + from mutagen.oggtheora import OggTheora + from mutagen.oggvorbis import OggVorbis + from mutagen.oggopus import OggOpus + if easy: + from mutagen.trueaudio import EasyTrueAudio as TrueAudio + else: + from mutagen.trueaudio import TrueAudio + from mutagen.wavpack import WavPack + if easy: + from mutagen.easymp4 import EasyMP4 as MP4 + else: + from mutagen.mp4 import MP4 + from mutagen.musepack import Musepack + from mutagen.monkeysaudio import MonkeysAudio + from mutagen.optimfrog import OptimFROG + from mutagen.aiff import AIFF + from mutagen.aac import AAC + from mutagen.smf import SMF + options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC, + FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack, + Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, + SMF] + + if not options: + return None + + with open(filename, "rb") as fileobj: + header = fileobj.read(128) + # Sort by name after score. Otherwise import order affects + # Kind sort order, which affects treatment of things with + # equals scores. + results = [(Kind.score(filename, fileobj, header), Kind.__name__) + for Kind in options] + + results = list(izip(results, options)) + results.sort() + (score, name), Kind = results[-1] + if score > 0: + return Kind(filename) + else: + return None diff --git a/libs/mutagen/_mp3util.py b/libs/mutagen/_mp3util.py new file mode 100644 index 00000000..409cadcb --- /dev/null +++ b/libs/mutagen/_mp3util.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + +""" +http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header +http://wiki.hydrogenaud.io/index.php?title=MP3 +""" + +from functools import partial + +from ._util import cdata, BitReader +from ._compat import xrange, iterbytes, cBytesIO + + +class LAMEError(Exception): + pass + + +class LAMEHeader(object): + """http://gabriel.mp3-tech.org/mp3infotag.html""" + + vbr_method = 0 + """0: unknown, 1: CBR, 2: ABR, 3/4/5: VBR, others: see the docs""" + + lowpass_filter = 0 + """lowpass filter value in Hz. 0 means unknown""" + + quality = -1 + """Encoding quality: 0..9""" + + vbr_quality = -1 + """VBR quality: 0..9""" + + track_peak = None + """Peak signal amplitude as float. None if unknown.""" + + track_gain_origin = 0 + """see the docs""" + + track_gain_adjustment = None + """Track gain adjustment as float (for 89db replay gain) or None""" + + album_gain_origin = 0 + """see the docs""" + + album_gain_adjustment = None + """Album gain adjustment as float (for 89db replay gain) or None""" + + encoding_flags = 0 + """see docs""" + + ath_type = -1 + """see docs""" + + bitrate = -1 + """Bitrate in kbps. For VBR the minimum bitrate, for anything else + (CBR, ABR, ..) the target bitrate. + """ + + encoder_delay_start = 0 + """Encoder delay in samples""" + + encoder_padding_end = 0 + """Padding in samples added at the end""" + + source_sample_frequency_enum = -1 + """see docs""" + + unwise_setting_used = False + """see docs""" + + stereo_mode = 0 + """see docs""" + + noise_shaping = 0 + """see docs""" + + mp3_gain = 0 + """Applied MP3 gain -127..127. Factor is 2 ** (mp3_gain / 4)""" + + surround_info = 0 + """see docs""" + + preset_used = 0 + """lame preset""" + + music_length = 0 + """Length in bytes excluding any ID3 tags""" + + music_crc = -1 + """CRC16 of the data specified by music_length""" + + header_crc = -1 + """CRC16 of this header and everything before (not checked)""" + + def __init__(self, xing, fileobj): + """Raises LAMEError if parsing fails""" + + payload = fileobj.read(27) + if len(payload) != 27: + raise LAMEError("Not enough data") + + # extended lame header + r = BitReader(cBytesIO(payload)) + revision = r.bits(4) + if revision != 0: + raise LAMEError("unsupported header revision %d" % revision) + + self.vbr_method = r.bits(4) + self.lowpass_filter = r.bits(8) * 100 + + # these have a different meaning for lame; expose them again here + self.quality = (100 - xing.vbr_scale) % 10 + self.vbr_quality = (100 - xing.vbr_scale) // 10 + + track_peak_data = r.bytes(4) + if track_peak_data == b"\x00\x00\x00\x00": + self.track_peak = None + else: + # see PutLameVBR() in LAME's VbrTag.c + self.track_peak = ( + cdata.uint32_be(track_peak_data) - 0.5) / 2 ** 23 + track_gain_type = r.bits(3) + self.track_gain_origin = r.bits(3) + sign = r.bits(1) + gain_adj = r.bits(9) / 10.0 + if sign: + gain_adj *= -1 + if track_gain_type == 1: + self.track_gain_adjustment = gain_adj + else: + self.track_gain_adjustment = None + assert r.is_aligned() + + album_gain_type = r.bits(3) + self.album_gain_origin = r.bits(3) + sign = r.bits(1) + album_gain_adj = r.bits(9) / 10.0 + if album_gain_type == 2: + self.album_gain_adjustment = album_gain_adj + else: + self.album_gain_adjustment = None + + self.encoding_flags = r.bits(4) + self.ath_type = r.bits(4) + + self.bitrate = r.bits(8) + + self.encoder_delay_start = r.bits(12) + self.encoder_padding_end = r.bits(12) + + self.source_sample_frequency_enum = r.bits(2) + self.unwise_setting_used = r.bits(1) + self.stereo_mode = r.bits(3) + self.noise_shaping = r.bits(2) + + sign = r.bits(1) + mp3_gain = r.bits(7) + if sign: + mp3_gain *= -1 + self.mp3_gain = mp3_gain + + r.skip(2) + self.surround_info = r.bits(3) + self.preset_used = r.bits(11) + self.music_length = r.bits(32) + self.music_crc = r.bits(16) + + self.header_crc = r.bits(16) + assert r.is_aligned() + + @classmethod + def parse_version(cls, fileobj): + """Returns a version string and True if a LAMEHeader follows. + The passed file object will be positioned right before the + lame header if True. + + Raises LAMEError if there is no lame version info. + """ + + # http://wiki.hydrogenaud.io/index.php?title=LAME_version_string + + data = fileobj.read(20) + if len(data) != 20: + raise LAMEError("Not a lame header") + if not data.startswith((b"LAME", b"L3.99")): + raise LAMEError("Not a lame header") + + data = data.lstrip(b"EMAL") + major, data = data[0:1], data[1:].lstrip(b".") + minor = b"" + for c in iterbytes(data): + if not c.isdigit(): + break + minor += c + data = data[len(minor):] + + try: + major = int(major.decode("ascii")) + minor = int(minor.decode("ascii")) + except ValueError: + raise LAMEError + + # the extended header was added sometimes in the 3.90 cycle + # e.g. "LAME3.90 (alpha)" should still stop here. + # (I have seen such a file) + if (major, minor) < (3, 90) or ( + (major, minor) == (3, 90) and data[-11:-10] == b"("): + flag = data.strip(b"\x00").rstrip().decode("ascii") + return u"%d.%d%s" % (major, minor, flag), False + + if len(data) <= 11: + raise LAMEError("Invalid version: too long") + + flag = data[:-11].rstrip(b"\x00") + + flag_string = u"" + patch = u"" + if flag == b"a": + flag_string = u" (alpha)" + elif flag == b"b": + flag_string = u" (beta)" + elif flag == b"r": + patch = u".1+" + elif flag == b" ": + if (major, minor) > (3, 96): + patch = u".0" + else: + patch = u".0+" + elif flag == b"" or flag == b".": + patch = u".0+" + else: + flag_string = u" (?)" + + # extended header, seek back to 9 bytes for the caller + fileobj.seek(-11, 1) + + return u"%d.%d%s%s" % (major, minor, patch, flag_string), True + + +class XingHeaderError(Exception): + pass + + +class XingHeaderFlags(object): + FRAMES = 0x1 + BYTES = 0x2 + TOC = 0x4 + VBR_SCALE = 0x8 + + +class XingHeader(object): + + frames = -1 + """Number of frames, -1 if unknown""" + + bytes = -1 + """Number of bytes, -1 if unknown""" + + toc = [] + """List of 100 file offsets in percent encoded as 0-255. E.g. entry + 50 contains the file offset in percent at 50% play time. + Empty if unknown. + """ + + vbr_scale = -1 + """VBR quality indicator 0-100. -1 if unknown""" + + lame_header = None + """A LAMEHeader instance or None""" + + lame_version = u"" + """The version of the LAME encoder e.g. '3.99.0'. Empty if unknown""" + + is_info = False + """If the header started with 'Info' and not 'Xing'""" + + def __init__(self, fileobj): + """Parses the Xing header or raises XingHeaderError. + + The file position after this returns is undefined. + """ + + data = fileobj.read(8) + if len(data) != 8 or data[:4] not in (b"Xing", b"Info"): + raise XingHeaderError("Not a Xing header") + + self.is_info = (data[:4] == b"Info") + + flags = cdata.uint32_be_from(data, 4)[0] + + if flags & XingHeaderFlags.FRAMES: + data = fileobj.read(4) + if len(data) != 4: + raise XingHeaderError("Xing header truncated") + self.frames = cdata.uint32_be(data) + + if flags & XingHeaderFlags.BYTES: + data = fileobj.read(4) + if len(data) != 4: + raise XingHeaderError("Xing header truncated") + self.bytes = cdata.uint32_be(data) + + if flags & XingHeaderFlags.TOC: + data = fileobj.read(100) + if len(data) != 100: + raise XingHeaderError("Xing header truncated") + self.toc = list(bytearray(data)) + + if flags & XingHeaderFlags.VBR_SCALE: + data = fileobj.read(4) + if len(data) != 4: + raise XingHeaderError("Xing header truncated") + self.vbr_scale = cdata.uint32_be(data) + + try: + self.lame_version, has_header = LAMEHeader.parse_version(fileobj) + if has_header: + self.lame_header = LAMEHeader(self, fileobj) + except LAMEError: + pass + + @classmethod + def get_offset(cls, info): + """Calculate the offset to the Xing header from the start of the + MPEG header including sync based on the MPEG header's content. + """ + + assert info.layer == 3 + + if info.version == 1: + if info.mode != 3: + return 36 + else: + return 21 + else: + if info.mode != 3: + return 21 + else: + return 13 + + +class VBRIHeaderError(Exception): + pass + + +class VBRIHeader(object): + + version = 0 + """VBRI header version""" + + quality = 0 + """Quality indicator""" + + bytes = 0 + """Number of bytes""" + + frames = 0 + """Number of frames""" + + toc_scale_factor = 0 + """Scale factor of TOC entries""" + + toc_frames = 0 + """Number of frames per table entry""" + + toc = [] + """TOC""" + + def __init__(self, fileobj): + """Reads the VBRI header or raises VBRIHeaderError. + + The file position is undefined after this returns + """ + + data = fileobj.read(26) + if len(data) != 26 or not data.startswith(b"VBRI"): + raise VBRIHeaderError("Not a VBRI header") + + offset = 4 + self.version, offset = cdata.uint16_be_from(data, offset) + if self.version != 1: + raise VBRIHeaderError( + "Unsupported header version: %r" % self.version) + + offset += 2 # float16.. can't do + self.quality, offset = cdata.uint16_be_from(data, offset) + self.bytes, offset = cdata.uint32_be_from(data, offset) + self.frames, offset = cdata.uint32_be_from(data, offset) + + toc_num_entries, offset = cdata.uint16_be_from(data, offset) + self.toc_scale_factor, offset = cdata.uint16_be_from(data, offset) + toc_entry_size, offset = cdata.uint16_be_from(data, offset) + self.toc_frames, offset = cdata.uint16_be_from(data, offset) + toc_size = toc_entry_size * toc_num_entries + toc_data = fileobj.read(toc_size) + if len(toc_data) != toc_size: + raise VBRIHeaderError("VBRI header truncated") + + self.toc = [] + if toc_entry_size == 2: + unpack = partial(cdata.uint16_be_from, toc_data) + elif toc_entry_size == 4: + unpack = partial(cdata.uint32_be_from, toc_data) + else: + raise VBRIHeaderError("Invalid TOC entry size") + + self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)] + + @classmethod + def get_offset(cls, info): + """Offset in bytes from the start of the MPEG header including sync""" + + assert info.layer == 3 + + return 36 diff --git a/libs/mutagen/_tags.py b/libs/mutagen/_tags.py new file mode 100644 index 00000000..e6365f0a --- /dev/null +++ b/libs/mutagen/_tags.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2005 Michael Urman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + + +class PaddingInfo(object): + """Abstract padding information object. + + This will be passed to the callback function that can be used + for saving tags. + + :: + + def my_callback(info: PaddingInfo): + return info.get_default_padding() + + The callback should return the amount of padding to use (>= 0) based on + the content size and the padding of the file after saving. The actual used + amount of padding might vary depending on the file format (due to + alignment etc.) + + The default implementation can be accessed using the + :meth:`get_default_padding` method in the callback. + """ + + padding = 0 + """The amount of padding left after saving in bytes (can be negative if + more data needs to be added as padding is available) + """ + + size = 0 + """The amount of data following the padding""" + + def __init__(self, padding, size): + self.padding = padding + self.size = size + + def get_default_padding(self): + """The default implementation which tries to select a reasonable + amount of padding and which might change in future versions. + + :return: Amount of padding after saving + :rtype: int + """ + + high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data + low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data + + if self.padding >= 0: + # enough padding left + if self.padding > high: + # padding too large, reduce + return low + # just use existing padding as is + return self.padding + else: + # not enough padding, add some + return low + + def _get_padding(self, user_func): + if user_func is None: + return self.get_default_padding() + else: + return user_func(self) + + def __repr__(self): + return "<%s size=%d padding=%d>" % ( + type(self).__name__, self.size, self.padding) + + +class Tags(object): + """`Tags` is the base class for many of the tag objects in Mutagen. + + In many cases it has a dict like interface. + """ + + __module__ = "mutagen" + + def pprint(self): + """ + :returns: tag information + :rtype: mutagen.text + """ + + raise NotImplementedError + + +class Metadata(Tags): + """Like :class:`Tags` but for standalone tagging formats that are not + solely managed by a container format. + + Provides methods to load, save and delete tags. + """ + + __module__ = "mutagen" + + def __init__(self, *args, **kwargs): + if args or kwargs: + self.load(*args, **kwargs) + + def load(self, filename, **kwargs): + raise NotImplementedError + + def save(self, filename=None): + """Save changes to a file. + + :raises mutagen.MutagenError: if saving wasn't possible + """ + + raise NotImplementedError + + def delete(self, filename=None): + """Remove tags from a file. + + In most cases this means any traces of the tag will be removed + from the file. + + :raises mutagen.MutagenError: if deleting wasn't possible + """ + + raise NotImplementedError diff --git a/libs/mutagen/_toolsutil.py b/libs/mutagen/_toolsutil.py new file mode 100644 index 00000000..e9074b71 --- /dev/null +++ b/libs/mutagen/_toolsutil.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- + +# Copyright 2015 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +import os +import sys +import signal +import locale +import contextlib +import optparse +import ctypes + +from ._compat import text_type, PY2, PY3, iterbytes + + +def split_escape(string, sep, maxsplit=None, escape_char="\\"): + """Like unicode/str/bytes.split but allows for the separator to be escaped + + If passed unicode/str/bytes will only return list of unicode/str/bytes. + """ + + assert len(sep) == 1 + assert len(escape_char) == 1 + + if isinstance(string, bytes): + if isinstance(escape_char, text_type): + escape_char = escape_char.encode("ascii") + iter_ = iterbytes + else: + iter_ = iter + + if maxsplit is None: + maxsplit = len(string) + + empty = string[:0] + result = [] + current = empty + escaped = False + for char in iter_(string): + if escaped: + if char != escape_char and char != sep: + current += escape_char + current += char + escaped = False + else: + if char == escape_char: + escaped = True + elif char == sep and len(result) < maxsplit: + result.append(current) + current = empty + else: + current += char + result.append(current) + return result + + +class SignalHandler(object): + + def __init__(self): + self._interrupted = False + self._nosig = False + self._init = False + + def init(self): + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + if os.name != "nt": + signal.signal(signal.SIGHUP, self._handler) + + def _handler(self, signum, frame): + self._interrupted = True + if not self._nosig: + raise SystemExit("Aborted...") + + @contextlib.contextmanager + def block(self): + """While this context manager is active any signals for aborting + the process will be queued and exit the program once the context + is left. + """ + + self._nosig = True + yield + self._nosig = False + if self._interrupted: + raise SystemExit("Aborted...") + + +def get_win32_unicode_argv(): + """Returns a unicode argv under Windows and standard sys.argv otherwise""" + + if os.name != "nt" or not PY2: + return sys.argv + + import ctypes + from ctypes import cdll, windll, wintypes + + GetCommandLineW = cdll.kernel32.GetCommandLineW + GetCommandLineW.argtypes = [] + GetCommandLineW.restype = wintypes.LPCWSTR + + CommandLineToArgvW = windll.shell32.CommandLineToArgvW + CommandLineToArgvW.argtypes = [ + wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)] + CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR) + + LocalFree = windll.kernel32.LocalFree + LocalFree.argtypes = [wintypes.HLOCAL] + LocalFree.restype = wintypes.HLOCAL + + argc = ctypes.c_int() + argv = CommandLineToArgvW(GetCommandLineW(), ctypes.byref(argc)) + if not argv: + return + + res = argv[max(0, argc.value - len(sys.argv)):argc.value] + + LocalFree(argv) + + return res + + +def fsencoding(): + """The encoding used for paths, argv, environ, stdout and stdin""" + + if os.name == "nt": + return "" + + return locale.getpreferredencoding() or "utf-8" + + +def fsnative(text=u""): + """Returns the passed text converted to the preferred path type + for each platform. + """ + + assert isinstance(text, text_type) + + if os.name == "nt" or PY3: + return text + else: + return text.encode(fsencoding(), "replace") + return text + + +def is_fsnative(arg): + """If the passed value is of the preferred path type for each platform. + Note that on Python3+linux, paths can be bytes or str but this returns + False for bytes there. + """ + + if PY3 or os.name == "nt": + return isinstance(arg, text_type) + else: + return isinstance(arg, bytes) + + +def print_(*objects, **kwargs): + """A print which supports bytes and str+surrogates under python3. + + Needed so we can print anything passed to us through argv and environ. + Under Windows only text_type is allowed. + + Arguments: + objects: one or more bytes/text + linesep (bool): whether a line separator should be appended + sep (bool): whether objects should be printed separated by spaces + """ + + linesep = kwargs.pop("linesep", True) + sep = kwargs.pop("sep", True) + file_ = kwargs.pop("file", None) + if file_ is None: + file_ = sys.stdout + + old_cp = None + if os.name == "nt": + # Try to force the output to cp65001 aka utf-8. + # If that fails use the current one (most likely cp850, so + # most of unicode will be replaced with '?') + encoding = "utf-8" + old_cp = ctypes.windll.kernel32.GetConsoleOutputCP() + if ctypes.windll.kernel32.SetConsoleOutputCP(65001) == 0: + encoding = getattr(sys.stdout, "encoding", None) or "utf-8" + old_cp = None + else: + encoding = fsencoding() + + try: + if linesep: + objects = list(objects) + [os.linesep] + + parts = [] + for text in objects: + if isinstance(text, text_type): + if PY3: + try: + text = text.encode(encoding, 'surrogateescape') + except UnicodeEncodeError: + text = text.encode(encoding, 'replace') + else: + text = text.encode(encoding, 'replace') + parts.append(text) + + data = (b" " if sep else b"").join(parts) + try: + fileno = file_.fileno() + except (AttributeError, OSError, ValueError): + # for tests when stdout is replaced + try: + file_.write(data) + except TypeError: + file_.write(data.decode(encoding, "replace")) + else: + file_.flush() + os.write(fileno, data) + finally: + # reset the code page to what we had before + if old_cp is not None: + ctypes.windll.kernel32.SetConsoleOutputCP(old_cp) + + +class OptionParser(optparse.OptionParser): + """OptionParser subclass which supports printing Unicode under Windows""" + + def print_help(self, file=None): + print_(self.format_help(), file=file) diff --git a/libs/mutagen/_util.py b/libs/mutagen/_util.py index 2c8e1a56..f05ff454 100644 --- a/libs/mutagen/_util.py +++ b/libs/mutagen/_util.py @@ -1,4 +1,6 @@ -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -11,10 +13,83 @@ intended for internal use in Mutagen only. """ import struct +import codecs from fnmatch import fnmatchcase +from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \ + izip + +class MutagenError(Exception): + """Base class for all custom exceptions in mutagen + + .. versionadded:: 1.25 + """ + + __module__ = "mutagen" + + +def total_ordering(cls): + assert "__eq__" in cls.__dict__ + assert "__lt__" in cls.__dict__ + + cls.__le__ = lambda self, other: self == other or self < other + cls.__gt__ = lambda self, other: not (self == other or self < other) + cls.__ge__ = lambda self, other: not self < other + cls.__ne__ = lambda self, other: not self.__eq__(other) + + return cls + + +def hashable(cls): + """Makes sure the class is hashable. + + Needs a working __eq__ and __hash__ and will add a __ne__. + """ + + # py2 + assert "__hash__" in cls.__dict__ + # py3 + assert cls.__dict__["__hash__"] is not None + assert "__eq__" in cls.__dict__ + + cls.__ne__ = lambda self, other: not self.__eq__(other) + + return cls + + +def enum(cls): + assert cls.__bases__ == (object,) + + d = dict(cls.__dict__) + new_type = type(cls.__name__, (int,), d) + new_type.__module__ = cls.__module__ + + map_ = {} + for key, value in iteritems(d): + if key.upper() == key and isinstance(value, integer_types): + value_instance = new_type(value) + setattr(new_type, key, value_instance) + map_[value] = key + + def str_(self): + if self in map_: + return "%s.%s" % (type(self).__name__, map_[self]) + return "%d" % int(self) + + def repr_(self): + if self in map_: + return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self)) + return "%d" % int(self) + + setattr(new_type, "__repr__", repr_) + setattr(new_type, "__str__", str_) + + return new_type + + +@total_ordering class DictMixin(object): """Implement the dict API using keys() and __*item__ methods. @@ -33,27 +108,37 @@ class DictMixin(object): def __iter__(self): return iter(self.keys()) - def has_key(self, key): + def __has_key(self, key): try: self[key] except KeyError: return False else: return True - __contains__ = has_key - iterkeys = lambda self: iter(self.keys()) + if PY2: + has_key = __has_key + + __contains__ = __has_key + + if PY2: + iterkeys = lambda self: iter(self.keys()) def values(self): - return map(self.__getitem__, self.keys()) - itervalues = lambda self: iter(self.values()) + return [self[k] for k in self.keys()] + + if PY2: + itervalues = lambda self: iter(self.values()) def items(self): - return zip(self.keys(), self.values()) - iteritems = lambda s: iter(s.items()) + return list(izip(self.keys(), self.values())) + + if PY2: + iteritems = lambda s: iter(s.items()) def clear(self): - map(self.__delitem__, self.keys()) + for key in list(self.keys()): + self.__delitem__(key) def pop(self, key, *args): if len(args) > 1: @@ -69,11 +154,11 @@ class DictMixin(object): return value def popitem(self): - try: - key = self.keys()[0] - return key, self.pop(key) - except IndexError: + for key in self.keys(): + break + else: raise KeyError("dictionary is empty") + return key, self.pop(key) def update(self, other=None, **kwargs): if other is None: @@ -81,7 +166,8 @@ class DictMixin(object): other = {} try: - map(self.__setitem__, other.keys(), other.values()) + for key, value in other.items(): + self.__setitem__(key, value) except AttributeError: for key, value in other: self[key] = value @@ -102,11 +188,11 @@ class DictMixin(object): def __repr__(self): return repr(dict(self.items())) - def __cmp__(self, other): - if other is None: - return 1 - else: - return cmp(dict(self.items()), other) + def __eq__(self, other): + return dict(self.items()) == other + + def __lt__(self, other): + return dict(self.items()) < other __hash__ = object.__hash__ @@ -132,98 +218,79 @@ class DictProxy(DictMixin): return self.__dict.keys() +def _fill_cdata(cls): + """Add struct pack/unpack functions""" + + funcs = {} + for key, name in [("b", "char"), ("h", "short"), + ("i", "int"), ("q", "longlong")]: + for echar, esuffix in [("<", "le"), (">", "be")]: + esuffix = "_" + esuffix + for unsigned in [True, False]: + s = struct.Struct(echar + (key.upper() if unsigned else key)) + get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0] + unpack = get_wrapper(s.unpack) + unpack_from = get_wrapper(s.unpack_from) + + def get_unpack_from(s): + def unpack_from(data, offset=0): + return s.unpack_from(data, offset)[0], offset + s.size + return unpack_from + + unpack_from = get_unpack_from(s) + pack = s.pack + + prefix = "u" if unsigned else "" + if s.size == 1: + esuffix = "" + bits = str(s.size * 8) + funcs["%s%s%s" % (prefix, name, esuffix)] = unpack + funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack + funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from + funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from + funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack + funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack + + for key, func in iteritems(funcs): + setattr(cls, key, staticmethod(func)) + + class cdata(object): - """C character buffer to Python numeric type conversions.""" + """C character buffer to Python numeric type conversions. + + For each size/sign/endianness: + uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0) + """ from struct import error error = error - short_le = staticmethod(lambda data: struct.unpack('h', data)[0]) - ushort_be = staticmethod(lambda data: struct.unpack('>H', data)[0]) - - int_le = staticmethod(lambda data: struct.unpack('i', data)[0]) - uint_be = staticmethod(lambda data: struct.unpack('>I', data)[0]) - - longlong_le = staticmethod(lambda data: struct.unpack('q', data)[0]) - ulonglong_be = staticmethod(lambda data: struct.unpack('>Q', data)[0]) - - to_short_le = staticmethod(lambda data: struct.pack('h', data)) - to_ushort_be = staticmethod(lambda data: struct.pack('>H', data)) - - to_int_le = staticmethod(lambda data: struct.pack('i', data)) - to_uint_be = staticmethod(lambda data: struct.pack('>I', data)) - - to_longlong_le = staticmethod(lambda data: struct.pack('q', data)) - to_ulonglong_be = staticmethod(lambda data: struct.pack('>Q', data)) - - bitswap = ''.join([chr(sum([((val >> i) & 1) << (7-i) for i in range(8)])) - for val in range(256)]) - del(i) - del(val) + bitswap = b''.join( + chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8))) + for val in xrange(256)) test_bit = staticmethod(lambda value, n: bool((value >> n) & 1)) -def lock(fileobj): - """Lock a file object 'safely'. +_fill_cdata(cdata) - That means a failure to lock because the platform doesn't - support fcntl or filesystem locks is not considered a - failure. This call does block. - Returns whether or not the lock was successful, or - raises an exception in more extreme circumstances (full - lock table, invalid file). +def get_size(fileobj): + """Returns the size of the file object. The position when passed in will + be preserved if no error occurs. + + In case of an error raises IOError. """ + old_pos = fileobj.tell() try: - import fcntl - except ImportError: - return False - else: - try: - fcntl.lockf(fileobj, fcntl.LOCK_EX) - except IOError: - # FIXME: There's possibly a lot of complicated - # logic that needs to go here in case the IOError - # is EACCES or EAGAIN. - return False - else: - return True + fileobj.seek(0, 2) + return fileobj.tell() + finally: + fileobj.seek(old_pos, 0) -def unlock(fileobj): - """Unlock a file object. - - Don't call this on a file object unless a call to lock() - returned true. - """ - - # If this fails there's a mismatched lock/unlock pair, - # so we definitely don't want to ignore errors. - import fcntl - fcntl.lockf(fileobj, fcntl.LOCK_UN) - - -def insert_bytes(fobj, size, offset, BUFFER_SIZE=2**16): +def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): """Insert size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or @@ -233,59 +300,55 @@ def insert_bytes(fobj, size, offset, BUFFER_SIZE=2**16): assert 0 < size assert 0 <= offset - locked = False + fobj.seek(0, 2) filesize = fobj.tell() movesize = filesize - offset - fobj.write('\x00' * size) + fobj.write(b'\x00' * size) fobj.flush() + try: + import mmap + file_map = mmap.mmap(fobj.fileno(), filesize + size) try: - import mmap - map = mmap.mmap(fobj.fileno(), filesize + size) - try: - map.move(offset + size, offset, movesize) - finally: - map.close() - except (ValueError, EnvironmentError, ImportError): - # handle broken mmap scenarios - locked = lock(fobj) - fobj.truncate(filesize) + file_map.move(offset + size, offset, movesize) + finally: + file_map.close() + except (ValueError, EnvironmentError, ImportError, AttributeError): + # handle broken mmap scenarios, BytesIO() + fobj.truncate(filesize) - fobj.seek(0, 2) - padsize = size - # Don't generate an enormous string if we need to pad - # the file out several megs. - while padsize: - addsize = min(BUFFER_SIZE, padsize) - fobj.write("\x00" * addsize) - padsize -= addsize + fobj.seek(0, 2) + padsize = size + # Don't generate an enormous string if we need to pad + # the file out several megs. + while padsize: + addsize = min(BUFFER_SIZE, padsize) + fobj.write(b"\x00" * addsize) + padsize -= addsize - fobj.seek(filesize, 0) - while movesize: - # At the start of this loop, fobj is pointing at the end - # of the data we need to move, which is of movesize length. - thismove = min(BUFFER_SIZE, movesize) - # Seek back however much we're going to read this frame. - fobj.seek(-thismove, 1) - nextpos = fobj.tell() - # Read it, so we're back at the end. - data = fobj.read(thismove) - # Seek back to where we need to write it. - fobj.seek(-thismove + size, 1) - # Write it. - fobj.write(data) - # And seek back to the end of the unmoved data. - fobj.seek(nextpos) - movesize -= thismove + fobj.seek(filesize, 0) + while movesize: + # At the start of this loop, fobj is pointing at the end + # of the data we need to move, which is of movesize length. + thismove = min(BUFFER_SIZE, movesize) + # Seek back however much we're going to read this frame. + fobj.seek(-thismove, 1) + nextpos = fobj.tell() + # Read it, so we're back at the end. + data = fobj.read(thismove) + # Seek back to where we need to write it. + fobj.seek(-thismove + size, 1) + # Write it. + fobj.write(data) + # And seek back to the end of the unmoved data. + fobj.seek(nextpos) + movesize -= thismove - fobj.flush() - finally: - if locked: - unlock(fobj) + fobj.flush() -def delete_bytes(fobj, size, offset, BUFFER_SIZE=2**16): +def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): """Delete size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or @@ -293,57 +356,195 @@ def delete_bytes(fobj, size, offset, BUFFER_SIZE=2**16): falls back to a significantly slower method if mmap fails. """ - locked = False assert 0 < size assert 0 <= offset + fobj.seek(0, 2) filesize = fobj.tell() movesize = filesize - offset - size assert 0 <= movesize - try: - if movesize > 0: - fobj.flush() + + if movesize > 0: + fobj.flush() + try: + import mmap + file_map = mmap.mmap(fobj.fileno(), filesize) try: - import mmap - map = mmap.mmap(fobj.fileno(), filesize) - try: - map.move(offset, offset + size, movesize) - finally: - map.close() - except (ValueError, EnvironmentError, ImportError): - # handle broken mmap scenarios - locked = lock(fobj) + file_map.move(offset, offset + size, movesize) + finally: + file_map.close() + except (ValueError, EnvironmentError, ImportError, AttributeError): + # handle broken mmap scenarios, BytesIO() + fobj.seek(offset + size) + buf = fobj.read(BUFFER_SIZE) + while buf: + fobj.seek(offset) + fobj.write(buf) + offset += len(buf) fobj.seek(offset + size) buf = fobj.read(BUFFER_SIZE) - while buf: - fobj.seek(offset) - fobj.write(buf) - offset += len(buf) - fobj.seek(offset + size) - buf = fobj.read(BUFFER_SIZE) - fobj.truncate(filesize - size) - fobj.flush() - finally: - if locked: - unlock(fobj) + fobj.truncate(filesize - size) + fobj.flush() -def utf8(data): - """Convert a basestring to a valid UTF-8 str.""" +def resize_bytes(fobj, old_size, new_size, offset): + """Resize an area in a file adding and deleting at the end of it. + Does nothing if no resizing is needed. + """ - if isinstance(data, str): - return data.decode("utf-8", "replace").encode("utf-8") - elif isinstance(data, unicode): - return data.encode("utf-8") - else: - raise TypeError("only unicode/str types can be converted to UTF-8") + if new_size < old_size: + delete_size = old_size - new_size + delete_at = offset + new_size + delete_bytes(fobj, delete_size, delete_at) + elif new_size > old_size: + insert_size = new_size - old_size + insert_at = offset + old_size + insert_bytes(fobj, insert_size, insert_at) def dict_match(d, key, default=None): - try: + """Like __getitem__ but works as if the keys() are all filename patterns. + Returns the value of any dict key that matches the passed key. + """ + + if key in d and "[" not in key: return d[key] - except KeyError: - for pattern, value in d.iteritems(): + else: + for pattern, value in iteritems(d): if fnmatchcase(key, pattern): return value return default + + +def decode_terminated(data, encoding, strict=True): + """Returns the decoded data until the first NULL terminator + and all data after it. + + In case the data can't be decoded raises UnicodeError. + In case the encoding is not found raises LookupError. + In case the data isn't null terminated (even if it is encoded correctly) + raises ValueError except if strict is False, then the decoded string + will be returned anyway. + """ + + codec_info = codecs.lookup(encoding) + + # normalize encoding name so we can compare by name + encoding = codec_info.name + + # fast path + if encoding in ("utf-8", "iso8859-1"): + index = data.find(b"\x00") + if index == -1: + # make sure we raise UnicodeError first, like in the slow path + res = data.decode(encoding), b"" + if strict: + raise ValueError("not null terminated") + else: + return res + return data[:index].decode(encoding), data[index + 1:] + + # slow path + decoder = codec_info.incrementaldecoder() + r = [] + for i, b in enumerate(iterbytes(data)): + c = decoder.decode(b) + if c == u"\x00": + return u"".join(r), data[i + 1:] + r.append(c) + else: + # make sure the decoder is finished + r.append(decoder.decode(b"", True)) + if strict: + raise ValueError("not null terminated") + return u"".join(r), b"" + + +class BitReaderError(Exception): + pass + + +class BitReader(object): + + def __init__(self, fileobj): + self._fileobj = fileobj + self._buffer = 0 + self._bits = 0 + self._pos = fileobj.tell() + + def bits(self, count): + """Reads `count` bits and returns an uint, MSB read first. + + May raise BitReaderError if not enough data could be read or + IOError by the underlying file object. + """ + + if count < 0: + raise ValueError + + if count > self._bits: + n_bytes = (count - self._bits + 7) // 8 + data = self._fileobj.read(n_bytes) + if len(data) != n_bytes: + raise BitReaderError("not enough data") + for b in bytearray(data): + self._buffer = (self._buffer << 8) | b + self._bits += n_bytes * 8 + + self._bits -= count + value = self._buffer >> self._bits + self._buffer &= (1 << self._bits) - 1 + assert self._bits < 8 + return value + + def bytes(self, count): + """Returns a bytearray of length `count`. Works unaligned.""" + + if count < 0: + raise ValueError + + # fast path + if self._bits == 0: + data = self._fileobj.read(count) + if len(data) != count: + raise BitReaderError("not enough data") + return data + + return bytes(bytearray(self.bits(8) for _ in xrange(count))) + + def skip(self, count): + """Skip `count` bits. + + Might raise BitReaderError if there wasn't enough data to skip, + but might also fail on the next bits() instead. + """ + + if count < 0: + raise ValueError + + if count <= self._bits: + self.bits(count) + else: + count -= self.align() + n_bytes = count // 8 + self._fileobj.seek(n_bytes, 1) + count -= n_bytes * 8 + self.bits(count) + + def get_position(self): + """Returns the amount of bits read or skipped so far""" + + return (self._fileobj.tell() - self._pos) * 8 - self._bits + + def align(self): + """Align to the next byte, returns the amount of bits skipped""" + + bits = self._bits + self._buffer = 0 + self._bits = 0 + return bits + + def is_aligned(self): + """If we are currently aligned to bytes and nothing is buffered""" + + return self._bits == 0 diff --git a/libs/mutagen/_vorbis.py b/libs/mutagen/_vorbis.py index 4ee8da4a..17634e06 100644 --- a/libs/mutagen/_vorbis.py +++ b/libs/mutagen/_vorbis.py @@ -1,5 +1,7 @@ -# Vorbis comment support for Mutagen -# Copyright 2005-2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2005-2006 Joe Wreschnig +# 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as @@ -16,9 +18,8 @@ The specification is at http://www.xiph.org/vorbis/doc/v-comment.html. import sys -from cStringIO import StringIO - import mutagen +from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2 from mutagen._util import DictMixin, cdata @@ -27,13 +28,20 @@ def is_valid_key(key): Valid Vorbis comment keys are printable ASCII between 0x20 (space) and 0x7D ('}'), excluding '='. + + Takes str/unicode in Python 2, unicode in Python 3 """ + + if PY3 and isinstance(key, bytes): + raise TypeError("needs to be str not bytes") + for c in key: if c < " " or c > "}" or c == "=": return False else: return bool(key) + istag = is_valid_key @@ -49,7 +57,7 @@ class VorbisEncodingError(error): pass -class VComment(mutagen.Metadata, list): +class VComment(mutagen.Tags, list): """A Vorbis comment parser, accessor, and renderer. All comment ordering is preserved. A VComment is a list of @@ -60,35 +68,40 @@ class VComment(mutagen.Metadata, list): file-like object, not a filename. Attributes: - vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen' + + * vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen' """ vendor = u"Mutagen " + mutagen.version_string def __init__(self, data=None, *args, **kwargs): + self._size = 0 # Collect the args to pass to load, this lets child classes # override just load and get equivalent magic for the # constructor. if data is not None: - if isinstance(data, str): - data = StringIO(data) + if isinstance(data, bytes): + data = BytesIO(data) elif not hasattr(data, 'read'): - raise TypeError("VComment requires string data or a file-like") + raise TypeError("VComment requires bytes or a file-like") + start = data.tell() self.load(data, *args, **kwargs) + self._size = data.tell() - start def load(self, fileobj, errors='replace', framing=True): """Parse a Vorbis comment from a file-like object. Keyword arguments: - errors: - 'strict', 'replace', or 'ignore'. This affects Unicode decoding - and how other malformed content is interpreted. - framing -- if true, fail if a framing bit is not present + + * errors: + 'strict', 'replace', or 'ignore'. This affects Unicode decoding + and how other malformed content is interpreted. + * framing -- if true, fail if a framing bit is not present Framing bits are required by the Vorbis comment specification, but are not used in FLAC Vorbis comment blocks. - """ + try: vendor_length = cdata.uint_le(fileobj.read(4)) self.vendor = fileobj.read(vendor_length).decode('utf-8', errors) @@ -101,21 +114,25 @@ class VComment(mutagen.Metadata, list): raise error("cannot read %d bytes, too large" % length) try: tag, value = string.split('=', 1) - except ValueError, err: + except ValueError as err: if errors == "ignore": continue elif errors == "replace": tag, value = u"unknown%d" % i, string else: - raise VorbisEncodingError, err, sys.exc_info()[2] + reraise(VorbisEncodingError, err, sys.exc_info()[2]) try: tag = tag.encode('ascii', errors) except UnicodeEncodeError: raise VorbisEncodingError("invalid tag name %r" % tag) else: + # string keys in py3k + if PY3: + tag = tag.decode("ascii") if is_valid_key(tag): self.append((tag, value)) - if framing and not ord(fileobj.read(1)) & 0x01: + + if framing and not bytearray(fileobj.read(1))[0] & 0x01: raise VorbisUnsetFrameError("framing bit was unset") except (cdata.error, TypeError): raise error("file is not a valid Vorbis comment") @@ -126,9 +143,14 @@ class VComment(mutagen.Metadata, list): Check to make sure every key used is a valid Vorbis key, and that every value used is a valid Unicode or UTF-8 string. If any invalid keys or values are found, a ValueError is raised. + + In Python 3 all keys and values have to be a string. """ - if not isinstance(self.vendor, unicode): + if not isinstance(self.vendor, text_type): + if PY3: + raise ValueError("vendor needs to be str") + try: self.vendor.decode('utf-8') except UnicodeDecodeError: @@ -138,19 +160,25 @@ class VComment(mutagen.Metadata, list): try: if not is_valid_key(key): raise ValueError - except: + except TypeError: raise ValueError("%r is not a valid key" % key) - if not isinstance(value, unicode): + + if not isinstance(value, text_type): + if PY3: + raise ValueError("%r needs to be str" % key) + try: - value.encode("utf-8") + value.decode("utf-8") except: raise ValueError("%r is not a valid value" % value) - else: - return True + + return True def clear(self): """Clear all keys from the comment.""" - del(self[:]) + + for i in list(self): + self.remove(i) def write(self, framing=True): """Return a string representation of the data. @@ -159,25 +187,41 @@ class VComment(mutagen.Metadata, list): invalid data may raise a ValueError. Keyword arguments: - framing -- if true, append a framing bit (see load) + + * framing -- if true, append a framing bit (see load) """ self.validate() - f = StringIO() - f.write(cdata.to_uint_le(len(self.vendor.encode('utf-8')))) - f.write(self.vendor.encode('utf-8')) + def _encode(value): + if not isinstance(value, bytes): + return value.encode('utf-8') + return value + + f = BytesIO() + vendor = _encode(self.vendor) + f.write(cdata.to_uint_le(len(vendor))) + f.write(vendor) f.write(cdata.to_uint_le(len(self))) for tag, value in self: - comment = "%s=%s" % (tag, value.encode('utf-8')) + tag = _encode(tag) + value = _encode(value) + comment = tag + b"=" + value f.write(cdata.to_uint_le(len(comment))) f.write(comment) if framing: - f.write("\x01") + f.write(b"\x01") return f.getvalue() def pprint(self): - return "\n".join(["%s=%s" % (k.lower(), v) for k, v in self]) + + def _decode(value): + if not isinstance(value, text_type): + return value.decode('utf-8', 'replace') + return value + + tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self] + return u"\n".join(tags) class VCommentDict(VComment, DictMixin): @@ -199,9 +243,17 @@ class VCommentDict(VComment, DictMixin): This is a copy, so comment['title'].append('a title') will not work. - """ - key = key.lower().encode('ascii') + + # PY3 only + if isinstance(key, slice): + return VComment.__getitem__(self, key) + + if not is_valid_key(key): + raise ValueError + + key = key.lower() + values = [value for (k, value) in self if k.lower() == key] if not values: raise KeyError(key) @@ -210,16 +262,29 @@ class VCommentDict(VComment, DictMixin): def __delitem__(self, key): """Delete all values associated with the key.""" - key = key.lower().encode('ascii') - to_delete = filter(lambda x: x[0].lower() == key, self) + + # PY3 only + if isinstance(key, slice): + return VComment.__delitem__(self, key) + + if not is_valid_key(key): + raise ValueError + + key = key.lower() + to_delete = [x for x in self if x[0].lower() == key] if not to_delete: raise KeyError(key) else: - map(self.remove, to_delete) + for item in to_delete: + self.remove(item) def __contains__(self, key): """Return true if the key has any values.""" - key = key.lower().encode('ascii') + + if not is_valid_key(key): + raise ValueError + + key = key.lower() for k, value in self: if k.lower() == key: return True @@ -232,23 +297,34 @@ class VCommentDict(VComment, DictMixin): Setting a value overwrites all old ones. The value may be a list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 string. - """ - key = key.encode('ascii') + # PY3 only + if isinstance(key, slice): + return VComment.__setitem__(self, key, values) + + if not is_valid_key(key): + raise ValueError + if not isinstance(values, list): values = [values] try: del(self[key]) except KeyError: pass + + if PY2: + key = key.encode('ascii') + for value in values: self.append((key, value)) def keys(self): """Return all keys in the comment.""" - return self and list(set([k.lower() for k, v in self])) + + return list(set([k.lower() for k, v in self])) def as_dict(self): """Return a copy of the comment data in a real dict.""" + return dict([(key, self[key]) for key in self.keys()]) diff --git a/libs/mutagen/aac.py b/libs/mutagen/aac.py new file mode 100644 index 00000000..83968a05 --- /dev/null +++ b/libs/mutagen/aac.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2014 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + +""" +* ADTS - Audio Data Transport Stream +* ADIF - Audio Data Interchange Format +* See ISO/IEC 13818-7 / 14496-03 +""" + +from mutagen import StreamInfo +from mutagen._file import FileType +from mutagen._util import BitReader, BitReaderError, MutagenError +from mutagen._compat import endswith, xrange + + +_FREQS = [ + 96000, 88200, 64000, 48000, + 44100, 32000, 24000, 22050, + 16000, 12000, 11025, 8000, + 7350, +] + + +class _ADTSStream(object): + """Represents a series of frames belonging to the same stream""" + + parsed_frames = 0 + """Number of successfully parsed frames""" + + offset = 0 + """offset in bytes at which the stream starts (the first sync word)""" + + @classmethod + def find_stream(cls, fileobj, max_bytes): + """Returns a possibly valid _ADTSStream or None. + + Args: + max_bytes (int): maximum bytes to read + """ + + r = BitReader(fileobj) + stream = cls(r) + if stream.sync(max_bytes): + stream.offset = (r.get_position() - 12) // 8 + return stream + + def sync(self, max_bytes): + """Find the next sync. + Returns True if found.""" + + # at least 2 bytes for the sync + max_bytes = max(max_bytes, 2) + + r = self._r + r.align() + while max_bytes > 0: + try: + b = r.bytes(1) + if b == b"\xff": + if r.bits(4) == 0xf: + return True + r.align() + max_bytes -= 2 + else: + max_bytes -= 1 + except BitReaderError: + return False + return False + + def __init__(self, r): + """Use _ADTSStream.find_stream to create a stream""" + + self._fixed_header_key = None + self._r = r + self.offset = -1 + self.parsed_frames = 0 + + self._samples = 0 + self._payload = 0 + self._start = r.get_position() / 8 + self._last = self._start + + @property + def bitrate(self): + """Bitrate of the raw aac blocks, excluding framing/crc""" + + assert self.parsed_frames, "no frame parsed yet" + + if self._samples == 0: + return 0 + + return (8 * self._payload * self.frequency) // self._samples + + @property + def samples(self): + """samples so far""" + + assert self.parsed_frames, "no frame parsed yet" + + return self._samples + + @property + def size(self): + """bytes read in the stream so far (including framing)""" + + assert self.parsed_frames, "no frame parsed yet" + + return self._last - self._start + + @property + def channels(self): + """0 means unknown""" + + assert self.parsed_frames, "no frame parsed yet" + + b_index = self._fixed_header_key[6] + if b_index == 7: + return 8 + elif b_index > 7: + return 0 + else: + return b_index + + @property + def frequency(self): + """0 means unknown""" + + assert self.parsed_frames, "no frame parsed yet" + + f_index = self._fixed_header_key[4] + try: + return _FREQS[f_index] + except IndexError: + return 0 + + def parse_frame(self): + """True if parsing was successful. + Fails either because the frame wasn't valid or the stream ended. + """ + + try: + return self._parse_frame() + except BitReaderError: + return False + + def _parse_frame(self): + r = self._r + # start == position of sync word + start = r.get_position() - 12 + + # adts_fixed_header + id_ = r.bits(1) + layer = r.bits(2) + protection_absent = r.bits(1) + + profile = r.bits(2) + sampling_frequency_index = r.bits(4) + private_bit = r.bits(1) + # TODO: if 0 we could parse program_config_element() + channel_configuration = r.bits(3) + original_copy = r.bits(1) + home = r.bits(1) + + # the fixed header has to be the same for every frame in the stream + fixed_header_key = ( + id_, layer, protection_absent, profile, sampling_frequency_index, + private_bit, channel_configuration, original_copy, home, + ) + + if self._fixed_header_key is None: + self._fixed_header_key = fixed_header_key + else: + if self._fixed_header_key != fixed_header_key: + return False + + # adts_variable_header + r.skip(2) # copyright_identification_bit/start + frame_length = r.bits(13) + r.skip(11) # adts_buffer_fullness + nordbif = r.bits(2) + # adts_variable_header end + + crc_overhead = 0 + if not protection_absent: + crc_overhead += (nordbif + 1) * 16 + if nordbif != 0: + crc_overhead *= 2 + + left = (frame_length * 8) - (r.get_position() - start) + if left < 0: + return False + r.skip(left) + assert r.is_aligned() + + self._payload += (left - crc_overhead) / 8 + self._samples += (nordbif + 1) * 1024 + self._last = r.get_position() / 8 + + self.parsed_frames += 1 + return True + + +class ProgramConfigElement(object): + + element_instance_tag = None + object_type = None + sampling_frequency_index = None + channels = None + + def __init__(self, r): + """Reads the program_config_element() + + Raises BitReaderError + """ + + self.element_instance_tag = r.bits(4) + self.object_type = r.bits(2) + self.sampling_frequency_index = r.bits(4) + num_front_channel_elements = r.bits(4) + num_side_channel_elements = r.bits(4) + num_back_channel_elements = r.bits(4) + num_lfe_channel_elements = r.bits(2) + num_assoc_data_elements = r.bits(3) + num_valid_cc_elements = r.bits(4) + + mono_mixdown_present = r.bits(1) + if mono_mixdown_present == 1: + r.skip(4) + stereo_mixdown_present = r.bits(1) + if stereo_mixdown_present == 1: + r.skip(4) + matrix_mixdown_idx_present = r.bits(1) + if matrix_mixdown_idx_present == 1: + r.skip(3) + + elms = num_front_channel_elements + num_side_channel_elements + \ + num_back_channel_elements + channels = 0 + for i in xrange(elms): + channels += 1 + element_is_cpe = r.bits(1) + if element_is_cpe: + channels += 1 + r.skip(4) + channels += num_lfe_channel_elements + self.channels = channels + + r.skip(4 * num_lfe_channel_elements) + r.skip(4 * num_assoc_data_elements) + r.skip(5 * num_valid_cc_elements) + r.align() + comment_field_bytes = r.bits(8) + r.skip(8 * comment_field_bytes) + + +class AACError(MutagenError): + pass + + +class AACInfo(StreamInfo): + """AAC stream information. + + Attributes: + + * channels -- number of audio channels + * length -- file length in seconds, as a float + * sample_rate -- audio sampling rate in Hz + * bitrate -- audio bitrate, in bits per second + + The length of the stream is just a guess and might not be correct. + """ + + channels = 0 + length = 0 + sample_rate = 0 + bitrate = 0 + + def __init__(self, fileobj): + # skip id3v2 header + start_offset = 0 + header = fileobj.read(10) + from mutagen.id3 import BitPaddedInt + if header.startswith(b"ID3"): + size = BitPaddedInt(header[6:]) + start_offset = size + 10 + + fileobj.seek(start_offset) + adif = fileobj.read(4) + if adif == b"ADIF": + self._parse_adif(fileobj) + self._type = "ADIF" + else: + self._parse_adts(fileobj, start_offset) + self._type = "ADTS" + + def _parse_adif(self, fileobj): + r = BitReader(fileobj) + try: + copyright_id_present = r.bits(1) + if copyright_id_present: + r.skip(72) # copyright_id + r.skip(1 + 1) # original_copy, home + bitstream_type = r.bits(1) + self.bitrate = r.bits(23) + npce = r.bits(4) + if bitstream_type == 0: + r.skip(20) # adif_buffer_fullness + + pce = ProgramConfigElement(r) + try: + self.sample_rate = _FREQS[pce.sampling_frequency_index] + except IndexError: + pass + self.channels = pce.channels + + # other pces.. + for i in xrange(npce): + ProgramConfigElement(r) + r.align() + except BitReaderError as e: + raise AACError(e) + + # use bitrate + data size to guess length + start = fileobj.tell() + fileobj.seek(0, 2) + length = fileobj.tell() - start + if self.bitrate != 0: + self.length = (8.0 * length) / self.bitrate + + def _parse_adts(self, fileobj, start_offset): + max_initial_read = 512 + max_resync_read = 10 + max_sync_tries = 10 + + frames_max = 100 + frames_needed = 3 + + # Try up to X times to find a sync word and read up to Y frames. + # If more than Z frames are valid we assume a valid stream + offset = start_offset + for i in xrange(max_sync_tries): + fileobj.seek(offset) + s = _ADTSStream.find_stream(fileobj, max_initial_read) + if s is None: + raise AACError("sync not found") + # start right after the last found offset + offset += s.offset + 1 + + for i in xrange(frames_max): + if not s.parse_frame(): + break + if not s.sync(max_resync_read): + break + + if s.parsed_frames >= frames_needed: + break + else: + raise AACError( + "no valid stream found (only %d frames)" % s.parsed_frames) + + self.sample_rate = s.frequency + self.channels = s.channels + self.bitrate = s.bitrate + + # size from stream start to end of file + fileobj.seek(0, 2) + stream_size = fileobj.tell() - (offset + s.offset) + # approx + self.length = float(s.samples * stream_size) / (s.size * s.frequency) + + def pprint(self): + return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % ( + self._type, self.sample_rate, self.length, self.channels, + self.bitrate) + + +class AAC(FileType): + """Load ADTS or ADIF streams containing AAC. + + Tagging is not supported. + Use the ID3/APEv2 classes directly instead. + """ + + _mimes = ["audio/x-aac"] + + def load(self, filename): + self.filename = filename + with open(filename, "rb") as h: + self.info = AACInfo(h) + + def add_tags(self): + raise AACError("doesn't support tags") + + @staticmethod + def score(filename, fileobj, header): + filename = filename.lower() + s = endswith(filename, ".aac") or endswith(filename, ".adts") or \ + endswith(filename, ".adif") + s += b"ADIF" in header + return s + + +Open = AAC +error = AACError + +__all__ = ["AAC", "Open"] diff --git a/libs/mutagen/aiff.py b/libs/mutagen/aiff.py new file mode 100644 index 00000000..dc580063 --- /dev/null +++ b/libs/mutagen/aiff.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Evan Purkhiser +# 2014 Ben Ockmore +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. + +"""AIFF audio stream information and tags.""" + +import sys +import struct +from struct import pack + +from ._compat import endswith, text_type, reraise +from mutagen import StreamInfo, FileType + +from mutagen.id3 import ID3 +from mutagen.id3._util import ID3NoHeaderError, error as ID3Error +from mutagen._util import resize_bytes, delete_bytes, MutagenError + +__all__ = ["AIFF", "Open", "delete"] + + +class error(MutagenError, RuntimeError): + pass + + +class InvalidChunk(error, IOError): + pass + + +# based on stdlib's aifc +_HUGE_VAL = 1.79769313486231e+308 + + +def is_valid_chunk_id(id): + assert isinstance(id, text_type) + + return ((len(id) <= 4) and (min(id) >= u' ') and + (max(id) <= u'~')) + + +def read_float(data): # 10 bytes + expon, himant, lomant = struct.unpack('>hLL', data) + sign = 1 + if expon < 0: + sign = -1 + expon = expon + 0x8000 + if expon == himant == lomant == 0: + f = 0.0 + elif expon == 0x7FFF: + f = _HUGE_VAL + else: + expon = expon - 16383 + f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) + return sign * f + + +class IFFChunk(object): + """Representation of a single IFF chunk""" + + # Chunk headers are 8 bytes long (4 for ID and 4 for the size) + HEADER_SIZE = 8 + + def __init__(self, fileobj, parent_chunk=None): + self.__fileobj = fileobj + self.parent_chunk = parent_chunk + self.offset = fileobj.tell() + + header = fileobj.read(self.HEADER_SIZE) + if len(header) < self.HEADER_SIZE: + raise InvalidChunk() + + self.id, self.data_size = struct.unpack('>4si', header) + + try: + self.id = self.id.decode('ascii') + except UnicodeDecodeError: + raise InvalidChunk() + + if not is_valid_chunk_id(self.id): + raise InvalidChunk() + + self.size = self.HEADER_SIZE + self.data_size + self.data_offset = fileobj.tell() + + def read(self): + """Read the chunks data""" + + self.__fileobj.seek(self.data_offset) + return self.__fileobj.read(self.data_size) + + def write(self, data): + """Write the chunk data""" + + if len(data) > self.data_size: + raise ValueError + + self.__fileobj.seek(self.data_offset) + self.__fileobj.write(data) + + def delete(self): + """Removes the chunk from the file""" + + delete_bytes(self.__fileobj, self.size, self.offset) + if self.parent_chunk is not None: + self.parent_chunk._update_size( + self.parent_chunk.data_size - self.size) + + def _update_size(self, data_size): + """Update the size of the chunk""" + + self.__fileobj.seek(self.offset + 4) + self.__fileobj.write(pack('>I', data_size)) + if self.parent_chunk is not None: + size_diff = self.data_size - data_size + self.parent_chunk._update_size( + self.parent_chunk.data_size - size_diff) + self.data_size = data_size + self.size = data_size + self.HEADER_SIZE + + def resize(self, new_data_size): + """Resize the file and update the chunk sizes""" + + resize_bytes( + self.__fileobj, self.data_size, new_data_size, self.data_offset) + self._update_size(new_data_size) + + +class IFFFile(object): + """Representation of a IFF file""" + + def __init__(self, fileobj): + self.__fileobj = fileobj + self.__chunks = {} + + # AIFF Files always start with the FORM chunk which contains a 4 byte + # ID before the start of other chunks + fileobj.seek(0) + self.__chunks[u'FORM'] = IFFChunk(fileobj) + + # Skip past the 4 byte FORM id + fileobj.seek(IFFChunk.HEADER_SIZE + 4) + + # Where the next chunk can be located. We need to keep track of this + # since the size indicated in the FORM header may not match up with the + # offset determined from the size of the last chunk in the file + self.__next_offset = fileobj.tell() + + # Load all of the chunks + while True: + try: + chunk = IFFChunk(fileobj, self[u'FORM']) + except InvalidChunk: + break + self.__chunks[chunk.id.strip()] = chunk + + # Calculate the location of the next chunk, + # considering the pad byte + self.__next_offset = chunk.offset + chunk.size + self.__next_offset += self.__next_offset % 2 + fileobj.seek(self.__next_offset) + + def __contains__(self, id_): + """Check if the IFF file contains a specific chunk""" + + assert isinstance(id_, text_type) + + if not is_valid_chunk_id(id_): + raise KeyError("AIFF key must be four ASCII characters.") + + return id_ in self.__chunks + + def __getitem__(self, id_): + """Get a chunk from the IFF file""" + + assert isinstance(id_, text_type) + + if not is_valid_chunk_id(id_): + raise KeyError("AIFF key must be four ASCII characters.") + + try: + return self.__chunks[id_] + except KeyError: + raise KeyError( + "%r has no %r chunk" % (self.__fileobj.name, id_)) + + def __delitem__(self, id_): + """Remove a chunk from the IFF file""" + + assert isinstance(id_, text_type) + + if not is_valid_chunk_id(id_): + raise KeyError("AIFF key must be four ASCII characters.") + + self.__chunks.pop(id_).delete() + + def insert_chunk(self, id_): + """Insert a new chunk at the end of the IFF file""" + + assert isinstance(id_, text_type) + + if not is_valid_chunk_id(id_): + raise KeyError("AIFF key must be four ASCII characters.") + + self.__fileobj.seek(self.__next_offset) + self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0)) + self.__fileobj.seek(self.__next_offset) + chunk = IFFChunk(self.__fileobj, self[u'FORM']) + self[u'FORM']._update_size(self[u'FORM'].data_size + chunk.size) + + self.__chunks[id_] = chunk + self.__next_offset = chunk.offset + chunk.size + + +class AIFFInfo(StreamInfo): + """AIFF audio stream information. + + Information is parsed from the COMM chunk of the AIFF file + + Useful attributes: + + * length -- audio length, in seconds + * bitrate -- audio bitrate, in bits per second + * channels -- The number of audio channels + * sample_rate -- audio sample rate, in Hz + * sample_size -- The audio sample size + """ + + length = 0 + bitrate = 0 + channels = 0 + sample_rate = 0 + + def __init__(self, fileobj): + iff = IFFFile(fileobj) + try: + common_chunk = iff[u'COMM'] + except KeyError as e: + raise error(str(e)) + + data = common_chunk.read() + + info = struct.unpack('>hLh10s', data[:18]) + channels, frame_count, sample_size, sample_rate = info + + self.sample_rate = int(read_float(sample_rate)) + self.sample_size = sample_size + self.channels = channels + self.bitrate = channels * sample_size * self.sample_rate + self.length = frame_count / float(self.sample_rate) + + def pprint(self): + return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % ( + self.channels, self.bitrate, self.sample_rate, self.length) + + +class _IFFID3(ID3): + """A AIFF file with ID3v2 tags""" + + def _pre_load_header(self, fileobj): + try: + fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset) + except (InvalidChunk, KeyError): + raise ID3NoHeaderError("No ID3 chunk") + + def save(self, filename=None, v2_version=4, v23_sep='/', padding=None): + """Save ID3v2 data to the AIFF file""" + + if filename is None: + filename = self.filename + + # Unlike the parent ID3.save method, we won't save to a blank file + # since we would have to construct a empty AIFF file + with open(filename, 'rb+') as fileobj: + iff_file = IFFFile(fileobj) + + if u'ID3' not in iff_file: + iff_file.insert_chunk(u'ID3') + + chunk = iff_file[u'ID3'] + + try: + data = self._prepare_data( + fileobj, chunk.data_offset, chunk.data_size, v2_version, + v23_sep, padding) + except ID3Error as e: + reraise(error, e, sys.exc_info()[2]) + + new_size = len(data) + new_size += new_size % 2 # pad byte + assert new_size % 2 == 0 + chunk.resize(new_size) + data += (new_size - len(data)) * b'\x00' + assert new_size == len(data) + chunk.write(data) + + def delete(self, filename=None): + """Completely removes the ID3 chunk from the AIFF file""" + + if filename is None: + filename = self.filename + delete(filename) + self.clear() + + +def delete(filename): + """Completely removes the ID3 chunk from the AIFF file""" + + with open(filename, "rb+") as file_: + try: + del IFFFile(file_)[u'ID3'] + except KeyError: + pass + + +class AIFF(FileType): + """An AIFF audio file. + + :ivar info: :class:`AIFFInfo` + :ivar tags: :class:`ID3` + """ + + _mimes = ["audio/aiff", "audio/x-aiff"] + + @staticmethod + def score(filename, fileobj, header): + filename = filename.lower() + + return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") + + endswith(filename, b".aiff") + endswith(filename, b".aifc")) + + def add_tags(self): + """Add an empty ID3 tag to the file.""" + if self.tags is None: + self.tags = _IFFID3() + else: + raise error("an ID3 tag already exists") + + def load(self, filename, **kwargs): + """Load stream and tag information from a file.""" + self.filename = filename + + try: + self.tags = _IFFID3(filename, **kwargs) + except ID3NoHeaderError: + self.tags = None + except ID3Error as e: + raise error(e) + + with open(filename, "rb") as fileobj: + self.info = AIFFInfo(fileobj) + + +Open = AIFF diff --git a/libs/mutagen/apev2.py b/libs/mutagen/apev2.py index aa1e00e6..3b79aba9 100644 --- a/libs/mutagen/apev2.py +++ b/libs/mutagen/apev2.py @@ -1,6 +1,6 @@ -# An APEv2 tag reader -# -# Copyright 2005 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2005 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -30,30 +30,45 @@ http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification. __all__ = ["APEv2", "APEv2File", "Open", "delete"] +import sys import struct -from cStringIO import StringIO +from collections import MutableSequence -from mutagen import Metadata, FileType -from mutagen._util import DictMixin, cdata, utf8, delete_bytes +from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string, + xrange) +from mutagen import Metadata, FileType, StreamInfo +from mutagen._util import (DictMixin, cdata, delete_bytes, total_ordering, + MutagenError) def is_valid_apev2_key(key): - return (2 <= len(key) <= 255 and min(key) >= ' ' and max(key) <= '~' and - key not in ["OggS", "TAG", "ID3", "MP+"]) + if not isinstance(key, text_type): + if PY3: + raise TypeError("APEv2 key must be str") + + try: + key = key.decode('ascii') + except UnicodeDecodeError: + return False + + # PY26 - Change to set literal syntax (since set is faster than list here) + return ((2 <= len(key) <= 255) and (min(key) >= u' ') and + (max(key) <= u'~') and + (key not in [u"OggS", u"TAG", u"ID3", u"MP+"])) # There are three different kinds of APE tag values. # "0: Item contains text information coded in UTF-8 # 1: Item contains binary information # 2: Item is a locator of external stored information [e.g. URL] # 3: reserved" -TEXT, BINARY, EXTERNAL = range(3) +TEXT, BINARY, EXTERNAL = xrange(3) -HAS_HEADER = 1L << 31 -HAS_NO_FOOTER = 1L << 30 -IS_HEADER = 1L << 29 +HAS_HEADER = 1 << 31 +HAS_NO_FOOTER = 1 << 30 +IS_HEADER = 1 << 29 -class error(IOError): +class error(IOError, MutagenError): pass @@ -89,9 +104,17 @@ class _APEv2Data(object): def __init__(self, fileobj): self.__find_metadata(fileobj) - self.metadata = max(self.header, self.footer) + + if self.header is None: + self.metadata = self.footer + elif self.footer is None: + self.metadata = self.header + else: + self.metadata = max(self.header, self.footer) + if self.metadata is None: return + self.__fill_missing(fileobj) self.__fix_brokenness(fileobj) if self.data is not None: @@ -107,7 +130,7 @@ class _APEv2Data(object): except IOError: fileobj.seek(0, 2) return - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": fileobj.seek(-8, 1) self.footer = self.metadata = fileobj.tell() return @@ -115,10 +138,10 @@ class _APEv2Data(object): # Check for an APEv2 tag followed by an ID3v1 tag at the end. try: fileobj.seek(-128, 2) - if fileobj.read(3) == "TAG": + if fileobj.read(3) == b"TAG": fileobj.seek(-35, 1) # "TAG" + header length - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": fileobj.seek(-8, 1) self.footer = fileobj.tell() return @@ -127,7 +150,7 @@ class _APEv2Data(object): # (http://www.id3.org/lyrics3200.html) # (header length - "APETAGEX") - "LYRICS200" fileobj.seek(15, 1) - if fileobj.read(9) == 'LYRICS200': + if fileobj.read(9) == b'LYRICS200': fileobj.seek(-15, 1) # "LYRICS200" + size tag try: offset = int(fileobj.read(6)) @@ -135,7 +158,7 @@ class _APEv2Data(object): raise IOError fileobj.seek(-32 - offset - 6, 1) - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": fileobj.seek(-8, 1) self.footer = fileobj.tell() return @@ -145,7 +168,7 @@ class _APEv2Data(object): # Check for a tag at the start. fileobj.seek(0, 0) - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": self.is_at_start = True self.header = 0 @@ -162,7 +185,7 @@ class _APEv2Data(object): # offset + the size, which includes the footer. self.end = self.data + self.size fileobj.seek(self.end - 32, 0) - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": self.footer = self.end - 32 elif self.footer is not None: self.end = self.footer + 32 @@ -194,7 +217,7 @@ class _APEv2Data(object): except IOError: break else: - if fileobj.read(8) == "APETAGEX": + if fileobj.read(8) == b"APETAGEX": fileobj.seek(-8, 1) start = fileobj.tell() else: @@ -202,18 +225,12 @@ class _APEv2Data(object): self.start = start -class APEv2(DictMixin, Metadata): - """A file with an APEv2 tag. - - ID3v1 tags are silently ignored and overwritten. - """ - - filename = None +class _CIDictProxy(DictMixin): def __init__(self, *args, **kwargs): self.__casemap = {} self.__dict = {} - super(APEv2, self).__init__(*args, **kwargs) + super(_CIDictProxy, self).__init__(*args, **kwargs) # Internally all names are stored as lowercase, but the case # they were set with is remembered and used when saving. This # is roughly in line with the standard, which says that keys @@ -221,31 +238,54 @@ class APEv2(DictMixin, Metadata): # not allowed, and recommends case-insensitive # implementations. + def __getitem__(self, key): + return self.__dict[key.lower()] + + def __setitem__(self, key, value): + lower = key.lower() + self.__casemap[lower] = key + self.__dict[lower] = value + + def __delitem__(self, key): + lower = key.lower() + del(self.__casemap[lower]) + del(self.__dict[lower]) + + def keys(self): + return [self.__casemap.get(key, key) for key in self.__dict.keys()] + + +class APEv2(_CIDictProxy, Metadata): + """A file with an APEv2 tag. + + ID3v1 tags are silently ignored and overwritten. + """ + + filename = None + def pprint(self): """Return tag key=value pairs in a human-readable format.""" - items = self.items() - items.sort() - return "\n".join(["%s=%s" % (k, v.pprint()) for k, v in items]) + + items = sorted(self.items()) + return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items) def load(self, filename): """Load tags from a filename.""" + self.filename = filename - fileobj = open(filename, "rb") - try: + with open(filename, "rb") as fileobj: data = _APEv2Data(fileobj) - finally: - fileobj.close() + if data.tag: self.clear() - self.__casemap.clear() self.__parse_tag(data.tag, data.items) else: raise APENoHeaderError("No APE tag found") def __parse_tag(self, tag, count): - fileobj = StringIO(tag) + fileobj = cBytesIO(tag) - for i in range(count): + for i in xrange(count): size_data = fileobj.read(4) # someone writes wrong item counts if not size_data: @@ -259,25 +299,37 @@ class APEv2(DictMixin, Metadata): if kind == 3: raise APEBadItemError("value type must be 0, 1, or 2") key = value = fileobj.read(1) - while key[-1:] != '\x00' and value: + while key[-1:] != b'\x00' and value: value = fileobj.read(1) key += value - if key[-1:] == "\x00": + if key[-1:] == b"\x00": key = key[:-1] + if PY3: + try: + key = key.decode("ascii") + except UnicodeError as err: + reraise(APEBadItemError, err, sys.exc_info()[2]) value = fileobj.read(size) - self[key] = APEValue(value, kind) + + value = _get_value_type(kind)._new(value) + + self[key] = value def __getitem__(self, key): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - key = key.encode('ascii') - return self.__dict[key.lower()] + if PY2: + key = key.encode('ascii') + + return super(APEv2, self).__getitem__(key) def __delitem__(self, key): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - key = key.encode('ascii') - del(self.__dict[key.lower()]) + if PY2: + key = key.encode('ascii') + + super(APEv2, self).__delitem__(key) def __setitem__(self, key, value): """'Magic' value setter. @@ -288,6 +340,9 @@ class APEv2(DictMixin, Metadata): as a list of string/Unicode values. If you pass in a string that is not valid UTF-8, it assumes it is a binary value. + Python 3: all bytes will be assumed to be a byte value, even + if they are valid utf-8. + If you need to force a specific type of value (e.g. binary data that also happens to be valid UTF-8, or an external reference), use the APEValue factory and set the value to the @@ -299,30 +354,40 @@ class APEv2(DictMixin, Metadata): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - key = key.encode('ascii') + + if PY2: + key = key.encode('ascii') if not isinstance(value, _APEValue): # let's guess at the content if we're not already a value... - if isinstance(value, unicode): + if isinstance(value, text_type): # unicode? we've got to be text. - value = APEValue(utf8(value), TEXT) + value = APEValue(value, TEXT) elif isinstance(value, list): + items = [] + for v in value: + if not isinstance(v, text_type): + if PY3: + raise TypeError("item in list not str") + v = v.decode("utf-8") + items.append(v) + # list? text. - value = APEValue("\0".join(map(utf8, value)), TEXT) + value = APEValue(u"\0".join(items), TEXT) else: - try: - value.decode("utf-8") - except UnicodeError: - # invalid UTF8 text, probably binary + if PY3: value = APEValue(value, BINARY) else: - # valid UTF8, probably text - value = APEValue(value, TEXT) - self.__casemap[key.lower()] = key - self.__dict[key.lower()] = value + try: + value.decode("utf-8") + except UnicodeError: + # invalid UTF8 text, probably binary + value = APEValue(value, BINARY) + else: + # valid UTF8, probably text + value = APEValue(value, TEXT) - def keys(self): - return [self.__casemap.get(key, key) for key in self.__dict.keys()] + super(APEv2, self).__setitem__(key, value) def save(self, filename=None): """Save changes to a file. @@ -348,41 +413,55 @@ class APEv2(DictMixin, Metadata): fileobj.truncate() fileobj.seek(0, 2) + tags = [] + for key, value in self.items(): + # Packed format for an item: + # 4B: Value length + # 4B: Value type + # Key name + # 1B: Null + # Key value + value_data = value._write() + if not isinstance(key, bytes): + key = key.encode("utf-8") + tag_data = bytearray() + tag_data += struct.pack("<2I", len(value_data), value.kind << 1) + tag_data += key + b"\0" + value_data + tags.append(bytes(tag_data)) + # "APE tags items should be sorted ascending by size... This is # not a MUST, but STRONGLY recommended. Actually the items should # be sorted by importance/byte, but this is not feasible." - tags = [v._internal(k) for k, v in self.items()] - tags.sort(lambda a, b: cmp(len(a), len(b))) + tags.sort(key=len) num_tags = len(tags) - tags = "".join(tags) + tags = b"".join(tags) - header = "APETAGEX%s%s" % ( - # version, tag size, item count, flags - struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER | IS_HEADER), - "\0" * 8) + header = bytearray(b"APETAGEX") + # version, tag size, item count, flags + header += struct.pack("<4I", 2000, len(tags) + 32, num_tags, + HAS_HEADER | IS_HEADER) + header += b"\0" * 8 fileobj.write(header) fileobj.write(tags) - footer = "APETAGEX%s%s" % ( - # version, tag size, item count, flags - struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER), - "\0" * 8) + footer = bytearray(b"APETAGEX") + footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags, + HAS_HEADER) + footer += b"\0" * 8 + fileobj.write(footer) fileobj.close() def delete(self, filename=None): """Remove tags from a file.""" + filename = filename or self.filename - fileobj = open(filename, "r+b") - try: + with open(filename, "r+b") as fileobj: data = _APEv2Data(fileobj) if data.start is not None and data.size is not None: delete_bytes(fileobj, data.end - data.start, data.start) - finally: - fileobj.close() + self.clear() @@ -391,105 +470,212 @@ Open = APEv2 def delete(filename): """Remove tags from a file.""" + try: APEv2(filename).delete() except APENoHeaderError: pass +def _get_value_type(kind): + """Returns a _APEValue subclass or raises ValueError""" + + if kind == TEXT: + return APETextValue + elif kind == BINARY: + return APEBinaryValue + elif kind == EXTERNAL: + return APEExtValue + raise ValueError("unknown kind %r" % kind) + + def APEValue(value, kind): """APEv2 tag value factory. Use this if you need to specify the value's type manually. Binary and text data are automatically detected by APEv2.__setitem__. """ - if kind == TEXT: - return APETextValue(value, kind) - elif kind == BINARY: - return APEBinaryValue(value, kind) - elif kind == EXTERNAL: - return APEExtValue(value, kind) - else: + + try: + type_ = _get_value_type(kind) + except ValueError: raise ValueError("kind must be TEXT, BINARY, or EXTERNAL") + else: + return type_(value) class _APEValue(object): - def __init__(self, value, kind): - self.kind = kind - self.value = value - def __len__(self): - return len(self.value) + kind = None + value = None - def __str__(self): - return self.value + def __init__(self, value, kind=None): + # kind kwarg is for backwards compat + if kind is not None and kind != self.kind: + raise ValueError + self.value = self._validate(value) - # Packed format for an item: - # 4B: Value length - # 4B: Value type - # Key name - # 1B: Null - # Key value - def _internal(self, key): - return "%s%s\0%s" % ( - struct.pack("<2I", len(self.value), self.kind << 1), - key, self.value) + @classmethod + def _new(cls, data): + instance = cls.__new__(cls) + instance._parse(data) + return instance + + def _parse(self, data): + """Sets value or raises APEBadItemError""" + + raise NotImplementedError + + def _write(self): + """Returns bytes""" + + raise NotImplementedError + + def _validate(self, value): + """Returns validated value or raises TypeError/ValueErrr""" + + raise NotImplementedError def __repr__(self): return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind) -class APETextValue(_APEValue): +@swap_to_string +@total_ordering +class _APEUtf8Value(_APEValue): + + def _parse(self, data): + try: + self.value = data.decode("utf-8") + except UnicodeDecodeError as e: + reraise(APEBadItemError, e, sys.exc_info()[2]) + + def _validate(self, value): + if not isinstance(value, text_type): + if PY3: + raise TypeError("value not str") + else: + value = value.decode("utf-8") + return value + + def _write(self): + return self.value.encode("utf-8") + + def __len__(self): + return len(self.value) + + def __bytes__(self): + return self._write() + + def __eq__(self, other): + return self.value == other + + def __lt__(self, other): + return self.value < other + + def __str__(self): + return self.value + + +class APETextValue(_APEUtf8Value, MutableSequence): """An APEv2 text value. Text values are Unicode/UTF-8 strings. They can be accessed like - strings (with a null seperating the values), or arrays of strings.""" + strings (with a null separating the values), or arrays of strings. + """ - def __unicode__(self): - return unicode(str(self), "utf-8") + kind = TEXT def __iter__(self): """Iterate over the strings of the value (not the characters)""" - return iter(unicode(self).split("\0")) + + return iter(self.value.split(u"\0")) def __getitem__(self, index): - return unicode(self).split("\0")[index] + return self.value.split(u"\0")[index] def __len__(self): - return self.value.count("\0") + 1 - - def __cmp__(self, other): - return cmp(unicode(self), other) - - __hash__ = _APEValue.__hash__ + return self.value.count(u"\0") + 1 def __setitem__(self, index, value): + if not isinstance(value, text_type): + if PY3: + raise TypeError("value not str") + else: + value = value.decode("utf-8") + values = list(self) - values[index] = value.encode("utf-8") - self.value = "\0".join(values).encode("utf-8") + values[index] = value + self.value = u"\0".join(values) + + def insert(self, index, value): + if not isinstance(value, text_type): + if PY3: + raise TypeError("value not str") + else: + value = value.decode("utf-8") + + values = list(self) + values.insert(index, value) + self.value = u"\0".join(values) + + def __delitem__(self, index): + values = list(self) + del values[index] + self.value = u"\0".join(values) def pprint(self): - return " / ".join(self) + return u" / ".join(self) +@swap_to_string +@total_ordering class APEBinaryValue(_APEValue): """An APEv2 binary value.""" + kind = BINARY + + def _parse(self, data): + self.value = data + + def _write(self): + return self.value + + def _validate(self, value): + if not isinstance(value, bytes): + raise TypeError("value not bytes") + return bytes(value) + + def __len__(self): + return len(self.value) + + def __bytes__(self): + return self._write() + + def __eq__(self, other): + return self.value == other + + def __lt__(self, other): + return self.value < other + def pprint(self): - return "[%d bytes]" % len(self) + return u"[%d bytes]" % len(self) -class APEExtValue(_APEValue): +class APEExtValue(_APEUtf8Value): """An APEv2 external value. External values are usually URI or IRI strings. """ + + kind = EXTERNAL + def pprint(self): - return "[External] %s" % unicode(self) + return u"[External] %s" % self.value class APEv2File(FileType): - class _Info(object): + class _Info(StreamInfo): length = 0 bitrate = 0 @@ -498,21 +684,21 @@ class APEv2File(FileType): @staticmethod def pprint(): - return "Unknown format with APEv2 tag." + return u"Unknown format with APEv2 tag." def load(self, filename): self.filename = filename self.info = self._Info(open(filename, "rb")) try: self.tags = APEv2(filename) - except error: + except APENoHeaderError: self.tags = None def add_tags(self): if self.tags is None: self.tags = APEv2() else: - raise ValueError("%r already has tags: %r" % (self, self.tags)) + raise error("%r already has tags: %r" % (self, self.tags)) @staticmethod def score(filename, fileobj, header): @@ -521,5 +707,4 @@ class APEv2File(FileType): except IOError: fileobj.seek(0) footer = fileobj.read() - filename = filename.lower() - return (("APETAGEX" in footer) - header.startswith("ID3")) + return ((b"APETAGEX" in footer) - header.startswith(b"ID3")) diff --git a/libs/mutagen/asf.py b/libs/mutagen/asf.py deleted file mode 100644 index fab5559b..00000000 --- a/libs/mutagen/asf.py +++ /dev/null @@ -1,704 +0,0 @@ -# Copyright 2006-2007 Lukas Lalinsky -# Copyright 2005-2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write ASF (Window Media Audio) files.""" - -__all__ = ["ASF", "Open"] - -import struct -from mutagen import FileType, Metadata -from mutagen._util import insert_bytes, delete_bytes, DictMixin - - -class error(IOError): - pass - - -class ASFError(error): - pass - - -class ASFHeaderError(error): - pass - - -class ASFInfo(object): - """ASF stream information.""" - - def __init__(self): - self.length = 0.0 - self.sample_rate = 0 - self.bitrate = 0 - self.channels = 0 - - def pprint(self): - s = "Windows Media Audio %d bps, %s Hz, %d channels, %.2f seconds" % ( - self.bitrate, self.sample_rate, self.channels, self.length) - return s - - -class ASFTags(list, DictMixin, Metadata): - """Dictionary containing ASF attributes.""" - - def pprint(self): - return "\n".join(["%s=%s" % (k, v) for k, v in self]) - - def __getitem__(self, key): - """A list of values for the key. - - This is a copy, so comment['title'].append('a title') will not - work. - - """ - values = [value for (k, value) in self if k == key] - if not values: - raise KeyError(key) - else: - return values - - def __delitem__(self, key): - """Delete all values associated with the key.""" - to_delete = filter(lambda x: x[0] == key, self) - if not to_delete: - raise KeyError(key) - else: - map(self.remove, to_delete) - - def __contains__(self, key): - """Return true if the key has any values.""" - for k, value in self: - if k == key: - return True - else: - return False - - def __setitem__(self, key, values): - """Set a key's value or values. - - Setting a value overwrites all old ones. The value may be a - list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 - string. - - """ - if not isinstance(values, list): - values = [values] - try: - del(self[key]) - except KeyError: - pass - for value in values: - if key in _standard_attribute_names: - value = unicode(value) - elif not isinstance(value, ASFBaseAttribute): - if isinstance(value, basestring): - value = ASFUnicodeAttribute(value) - elif isinstance(value, bool): - value = ASFBoolAttribute(value) - elif isinstance(value, int): - value = ASFDWordAttribute(value) - elif isinstance(value, long): - value = ASFQWordAttribute(value) - self.append((key, value)) - - def keys(self): - """Return all keys in the comment.""" - return self and set(zip(*self)[0]) - - def as_dict(self): - """Return a copy of the comment data in a real dict.""" - d = {} - for key, value in self: - d.setdefault(key, []).append(value) - return d - - -class ASFBaseAttribute(object): - """Generic attribute.""" - TYPE = None - - def __init__(self, value=None, data=None, language=None, - stream=None, **kwargs): - self.language = language - self.stream = stream - if data: - self.value = self.parse(data, **kwargs) - else: - self.value = value - - def data_size(self): - raise NotImplementedError - - def __repr__(self): - name = "%s(%r" % (type(self).__name__, self.value) - if self.language: - name += ", language=%d" % self.language - if self.stream: - name += ", stream=%d" % self.stream - name += ")" - return name - - def render(self, name): - name = name.encode("utf-16-le") + "\x00\x00" - data = self._render() - return (struct.pack(" 0: - texts.append(data[pos:end].decode("utf-16-le").strip("\x00")) - else: - texts.append(None) - pos = end - title, author, copyright, desc, rating = texts - for key, value in dict( - Title=title, - Author=author, - Copyright=copyright, - Description=desc, - Rating=rating - ).items(): - if value is not None: - asf.tags[key] = value - - def render(self, asf): - def render_text(name): - value = asf.tags.get(name, []) - if value: - return value[0].encode("utf-16-le") + "\x00\x00" - else: - return "" - texts = map(render_text, _standard_attribute_names) - data = struct.pack(" 0xFFFF or value.TYPE == GUID) - if (value.language is None and value.stream is None and - name not in self.to_extended_content_description and - not library_only): - self.to_extended_content_description[name] = value - elif (value.language is None and value.stream is not None and - name not in self.to_metadata and not library_only): - self.to_metadata[name] = value - else: - self.to_metadata_library.append((name, value)) - - # Add missing objects - if not self.content_description_obj: - self.content_description_obj = \ - ContentDescriptionObject() - self.objects.append(self.content_description_obj) - if not self.extended_content_description_obj: - self.extended_content_description_obj = \ - ExtendedContentDescriptionObject() - self.objects.append(self.extended_content_description_obj) - if not self.header_extension_obj: - self.header_extension_obj = \ - HeaderExtensionObject() - self.objects.append(self.header_extension_obj) - if not self.metadata_obj: - self.metadata_obj = \ - MetadataObject() - self.header_extension_obj.objects.append(self.metadata_obj) - if not self.metadata_library_obj: - self.metadata_library_obj = \ - MetadataLibraryObject() - self.header_extension_obj.objects.append(self.metadata_library_obj) - - # Render the header - data = "".join([obj.render(self) for obj in self.objects]) - data = (HeaderObject.GUID + - struct.pack(" self.size: - insert_bytes(fileobj, size - self.size, self.size) - if size < self.size: - delete_bytes(fileobj, self.size - size, 0) - fileobj.seek(0) - fileobj.write(data) - finally: - fileobj.close() - - self.size = size - self.num_objects = len(self.objects) - - def __read_file(self, fileobj): - header = fileobj.read(30) - if len(header) != 30 or header[:16] != HeaderObject.GUID: - raise ASFHeaderError("Not an ASF file.") - - self.extended_content_description_obj = None - self.content_description_obj = None - self.header_extension_obj = None - self.metadata_obj = None - self.metadata_library_obj = None - - self.size, self.num_objects = struct.unpack(" 0xFFFF or value.TYPE == GUID) + can_cont_desc = value.TYPE == UNICODE + + if library_only or value.language is not None: + self.to_metadata_library.append((name, value)) + elif value.stream is not None: + if name not in self.to_metadata: + self.to_metadata[name] = value + else: + self.to_metadata_library.append((name, value)) + elif name in ContentDescriptionObject.NAMES: + if name not in self.to_content_description and can_cont_desc: + self.to_content_description[name] = value + else: + self.to_metadata_library.append((name, value)) + else: + if name not in self.to_extended_content_description: + self.to_extended_content_description[name] = value + else: + self.to_metadata_library.append((name, value)) + + # Add missing objects + header = self._header + if header.get_child(ContentDescriptionObject.GUID) is None: + header.objects.append(ContentDescriptionObject()) + if header.get_child(ExtendedContentDescriptionObject.GUID) is None: + header.objects.append(ExtendedContentDescriptionObject()) + header_ext = header.get_child(HeaderExtensionObject.GUID) + if header_ext is None: + header_ext = HeaderExtensionObject() + header.objects.append(header_ext) + if header_ext.get_child(MetadataObject.GUID) is None: + header_ext.objects.append(MetadataObject()) + if header_ext.get_child(MetadataLibraryObject.GUID) is None: + header_ext.objects.append(MetadataLibraryObject()) + + # Render to file + with open(self.filename, "rb+") as fileobj: + old_size = header.parse_size(fileobj)[0] + data = header.render_full(self, fileobj, old_size, padding) + size = len(data) + resize_bytes(fileobj, old_size, size, 0) + fileobj.seek(0) + fileobj.write(data) + + def add_tags(self): + raise ASFError + + def delete(self, filename=None): + + if filename is not None and filename != self.filename: + raise ValueError("saving to another file not supported atm") + + self.tags.clear() + self.save(padding=lambda x: 0) + + @staticmethod + def score(filename, fileobj, header): + return header.startswith(HeaderObject.GUID) * 2 + +Open = ASF diff --git a/libs/mutagen/asf/_attrs.py b/libs/mutagen/asf/_attrs.py new file mode 100644 index 00000000..4621c9fa --- /dev/null +++ b/libs/mutagen/asf/_attrs.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2005-2006 Joe Wreschnig +# Copyright (C) 2006-2007 Lukas Lalinsky +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +import sys +import struct + +from mutagen._compat import swap_to_string, text_type, PY2, reraise +from mutagen._util import total_ordering + +from ._util import ASFError + + +class ASFBaseAttribute(object): + """Generic attribute.""" + + TYPE = None + + _TYPES = {} + + value = None + """The Python value of this attribute (type depends on the class)""" + + language = None + """Language""" + + stream = None + """Stream""" + + def __init__(self, value=None, data=None, language=None, + stream=None, **kwargs): + self.language = language + self.stream = stream + if data: + self.value = self.parse(data, **kwargs) + else: + if value is None: + # we used to support not passing any args and instead assign + # them later, keep that working.. + self.value = None + else: + self.value = self._validate(value) + + @classmethod + def _register(cls, other): + cls._TYPES[other.TYPE] = other + return other + + @classmethod + def _get_type(cls, type_): + """Raises KeyError""" + + return cls._TYPES[type_] + + def _validate(self, value): + """Raises TypeError or ValueError in case the user supplied value + isn't valid. + """ + + return value + + def data_size(self): + raise NotImplementedError + + def __repr__(self): + name = "%s(%r" % (type(self).__name__, self.value) + if self.language: + name += ", language=%d" % self.language + if self.stream: + name += ", stream=%d" % self.stream + name += ")" + return name + + def render(self, name): + name = name.encode("utf-16-le") + b"\x00\x00" + data = self._render() + return (struct.pack("" % ( + type(self).__name__, bytes2guid(self.GUID), self.objects) + + def pprint(self): + l = [] + l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID))) + for o in self.objects: + for e in o.pprint().splitlines(): + l.append(" " + e) + return "\n".join(l) + + +class UnknownObject(BaseObject): + """Unknown ASF object.""" + + def __init__(self, guid): + super(UnknownObject, self).__init__() + assert isinstance(guid, bytes) + self.GUID = guid + + +@BaseObject._register +class HeaderObject(BaseObject): + """ASF header.""" + + GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C") + + @classmethod + def parse_full(cls, asf, fileobj): + """Raises ASFHeaderError""" + + header = cls() + + remaining_header, num_objects = cls.parse_size(fileobj) + remaining_header -= 30 + + for i in xrange(num_objects): + obj_header_size = 24 + if remaining_header < obj_header_size: + raise ASFHeaderError("invalid header size") + data = fileobj.read(obj_header_size) + if len(data) != obj_header_size: + raise ASFHeaderError("truncated") + remaining_header -= obj_header_size + + guid, size = struct.unpack("<16sQ", data) + obj = BaseObject._get_object(guid) + + payload_size = size - obj_header_size + if remaining_header < payload_size: + raise ASFHeaderError("invalid object size") + remaining_header -= payload_size + + try: + data = fileobj.read(payload_size) + except OverflowError: + # read doesn't take 64bit values + raise ASFHeaderError("invalid header size") + if len(data) != payload_size: + raise ASFHeaderError("truncated") + + obj.parse(asf, data) + header.objects.append(obj) + + return header + + @classmethod + def parse_size(cls, fileobj): + """Returns (size, num_objects) + + Raises ASFHeaderError + """ + + header = fileobj.read(30) + if len(header) != 30 or header[:16] != HeaderObject.GUID: + raise ASFHeaderError("Not an ASF file.") + + return struct.unpack("= 0 + info = PaddingInfo(available - needed_size, content_size) + + # add padding + padding = info._get_padding(padding_func) + padding_obj.parse(asf, b"\x00" * padding) + data += padding_obj.render(asf) + num_objects += 1 + + data = (HeaderObject.GUID + + struct.pack(" 0: + texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00")) + else: + texts.append(None) + pos = end + + for key, value in izip(self.NAMES, texts): + if value is not None: + value = ASFUnicodeAttribute(value=value) + asf._tags.setdefault(self.GUID, []).append((key, value)) + + def render(self, asf): + def render_text(name): + value = asf.to_content_description.get(name) + if value is not None: + return text_type(value).encode("utf-16-le") + b"\x00\x00" + else: + return b"" + + texts = [render_text(x) for x in self.NAMES] + data = struct.pack("= 0 + asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0) + + +@BaseObject._register +class StreamPropertiesObject(BaseObject): + """Stream properties.""" + + GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365") + + def parse(self, asf, data): + super(StreamPropertiesObject, self).parse(asf, data) + channels, sample_rate, bitrate = struct.unpack("H", int(s[19:23], 16)), + p(">Q", int(s[24:], 16))[2:], + ]) + + +def bytes2guid(s): + """Converts a serialized GUID to a text GUID""" + + assert isinstance(s, bytes) + + u = struct.unpack + v = [] + v.extend(u("HQ", s[8:10] + b"\x00\x00" + s[10:])) + return "%08X-%04X-%04X-%04X-%012X" % tuple(v) + + +# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4} +CODECS = { + 0x0000: u"Unknown Wave Format", + 0x0001: u"Microsoft PCM Format", + 0x0002: u"Microsoft ADPCM Format", + 0x0003: u"IEEE Float", + 0x0004: u"Compaq Computer VSELP", + 0x0005: u"IBM CVSD", + 0x0006: u"Microsoft CCITT A-Law", + 0x0007: u"Microsoft CCITT u-Law", + 0x0008: u"Microsoft DTS", + 0x0009: u"Microsoft DRM", + 0x000A: u"Windows Media Audio 9 Voice", + 0x000B: u"Windows Media Audio 10 Voice", + 0x000C: u"OGG Vorbis", + 0x000D: u"FLAC", + 0x000E: u"MOT AMR", + 0x000F: u"Nice Systems IMBE", + 0x0010: u"OKI ADPCM", + 0x0011: u"Intel IMA ADPCM", + 0x0012: u"Videologic MediaSpace ADPCM", + 0x0013: u"Sierra Semiconductor ADPCM", + 0x0014: u"Antex Electronics G.723 ADPCM", + 0x0015: u"DSP Solutions DIGISTD", + 0x0016: u"DSP Solutions DIGIFIX", + 0x0017: u"Dialogic OKI ADPCM", + 0x0018: u"MediaVision ADPCM", + 0x0019: u"Hewlett-Packard CU codec", + 0x001A: u"Hewlett-Packard Dynamic Voice", + 0x0020: u"Yamaha ADPCM", + 0x0021: u"Speech Compression SONARC", + 0x0022: u"DSP Group True Speech", + 0x0023: u"Echo Speech EchoSC1", + 0x0024: u"Ahead Inc. Audiofile AF36", + 0x0025: u"Audio Processing Technology APTX", + 0x0026: u"Ahead Inc. AudioFile AF10", + 0x0027: u"Aculab Prosody 1612", + 0x0028: u"Merging Technologies S.A. LRC", + 0x0030: u"Dolby Labs AC2", + 0x0031: u"Microsoft GSM 6.10", + 0x0032: u"Microsoft MSNAudio", + 0x0033: u"Antex Electronics ADPCME", + 0x0034: u"Control Resources VQLPC", + 0x0035: u"DSP Solutions Digireal", + 0x0036: u"DSP Solutions DigiADPCM", + 0x0037: u"Control Resources CR10", + 0x0038: u"Natural MicroSystems VBXADPCM", + 0x0039: u"Crystal Semiconductor IMA ADPCM", + 0x003A: u"Echo Speech EchoSC3", + 0x003B: u"Rockwell ADPCM", + 0x003C: u"Rockwell DigiTalk", + 0x003D: u"Xebec Multimedia Solutions", + 0x0040: u"Antex Electronics G.721 ADPCM", + 0x0041: u"Antex Electronics G.728 CELP", + 0x0042: u"Intel G.723", + 0x0043: u"Intel G.723.1", + 0x0044: u"Intel G.729 Audio", + 0x0045: u"Sharp G.726 Audio", + 0x0050: u"Microsoft MPEG-1", + 0x0052: u"InSoft RT24", + 0x0053: u"InSoft PAC", + 0x0055: u"MP3 - MPEG Layer III", + 0x0059: u"Lucent G.723", + 0x0060: u"Cirrus Logic", + 0x0061: u"ESS Technology ESPCM", + 0x0062: u"Voxware File-Mode", + 0x0063: u"Canopus Atrac", + 0x0064: u"APICOM G.726 ADPCM", + 0x0065: u"APICOM G.722 ADPCM", + 0x0066: u"Microsoft DSAT", + 0x0067: u"Microsoft DSAT Display", + 0x0069: u"Voxware Byte Aligned", + 0x0070: u"Voxware AC8", + 0x0071: u"Voxware AC10", + 0x0072: u"Voxware AC16", + 0x0073: u"Voxware AC20", + 0x0074: u"Voxware RT24 MetaVoice", + 0x0075: u"Voxware RT29 MetaSound", + 0x0076: u"Voxware RT29HW", + 0x0077: u"Voxware VR12", + 0x0078: u"Voxware VR18", + 0x0079: u"Voxware TQ40", + 0x007A: u"Voxware SC3", + 0x007B: u"Voxware SC3", + 0x0080: u"Softsound", + 0x0081: u"Voxware TQ60", + 0x0082: u"Microsoft MSRT24", + 0x0083: u"AT&T Labs G.729A", + 0x0084: u"Motion Pixels MVI MV12", + 0x0085: u"DataFusion Systems G.726", + 0x0086: u"DataFusion Systems GSM610", + 0x0088: u"Iterated Systems ISIAudio", + 0x0089: u"Onlive", + 0x008A: u"Multitude FT SX20", + 0x008B: u"Infocom ITS ACM G.721", + 0x008C: u"Convedia G.729", + 0x008D: u"Congruency Audio", + 0x0091: u"Siemens Business Communications SBC24", + 0x0092: u"Sonic Foundry Dolby AC3 SPDIF", + 0x0093: u"MediaSonic G.723", + 0x0094: u"Aculab Prosody 8KBPS", + 0x0097: u"ZyXEL ADPCM", + 0x0098: u"Philips LPCBB", + 0x0099: u"Studer Professional Audio AG Packed", + 0x00A0: u"Malden Electronics PHONYTALK", + 0x00A1: u"Racal Recorder GSM", + 0x00A2: u"Racal Recorder G720.a", + 0x00A3: u"Racal Recorder G723.1", + 0x00A4: u"Racal Recorder Tetra ACELP", + 0x00B0: u"NEC AAC", + 0x00FF: u"CoreAAC Audio", + 0x0100: u"Rhetorex ADPCM", + 0x0101: u"BeCubed Software IRAT", + 0x0111: u"Vivo G.723", + 0x0112: u"Vivo Siren", + 0x0120: u"Philips CELP", + 0x0121: u"Philips Grundig", + 0x0123: u"Digital G.723", + 0x0125: u"Sanyo ADPCM", + 0x0130: u"Sipro Lab Telecom ACELP.net", + 0x0131: u"Sipro Lab Telecom ACELP.4800", + 0x0132: u"Sipro Lab Telecom ACELP.8V3", + 0x0133: u"Sipro Lab Telecom ACELP.G.729", + 0x0134: u"Sipro Lab Telecom ACELP.G.729A", + 0x0135: u"Sipro Lab Telecom ACELP.KELVIN", + 0x0136: u"VoiceAge AMR", + 0x0140: u"Dictaphone G.726 ADPCM", + 0x0141: u"Dictaphone CELP68", + 0x0142: u"Dictaphone CELP54", + 0x0150: u"Qualcomm PUREVOICE", + 0x0151: u"Qualcomm HALFRATE", + 0x0155: u"Ring Zero Systems TUBGSM", + 0x0160: u"Windows Media Audio Standard", + 0x0161: u"Windows Media Audio 9 Standard", + 0x0162: u"Windows Media Audio 9 Professional", + 0x0163: u"Windows Media Audio 9 Lossless", + 0x0164: u"Windows Media Audio Pro over SPDIF", + 0x0170: u"Unisys NAP ADPCM", + 0x0171: u"Unisys NAP ULAW", + 0x0172: u"Unisys NAP ALAW", + 0x0173: u"Unisys NAP 16K", + 0x0174: u"Sycom ACM SYC008", + 0x0175: u"Sycom ACM SYC701 G725", + 0x0176: u"Sycom ACM SYC701 CELP54", + 0x0177: u"Sycom ACM SYC701 CELP68", + 0x0178: u"Knowledge Adventure ADPCM", + 0x0180: u"Fraunhofer IIS MPEG-2 AAC", + 0x0190: u"Digital Theater Systems DTS", + 0x0200: u"Creative Labs ADPCM", + 0x0202: u"Creative Labs FastSpeech8", + 0x0203: u"Creative Labs FastSpeech10", + 0x0210: u"UHER informatic GmbH ADPCM", + 0x0215: u"Ulead DV Audio", + 0x0216: u"Ulead DV Audio", + 0x0220: u"Quarterdeck", + 0x0230: u"I-link Worldwide ILINK VC", + 0x0240: u"Aureal Semiconductor RAW SPORT", + 0x0249: u"Generic Passthru", + 0x0250: u"Interactive Products HSX", + 0x0251: u"Interactive Products RPELP", + 0x0260: u"Consistent Software CS2", + 0x0270: u"Sony SCX", + 0x0271: u"Sony SCY", + 0x0272: u"Sony ATRAC3", + 0x0273: u"Sony SPC", + 0x0280: u"Telum Audio", + 0x0281: u"Telum IA Audio", + 0x0285: u"Norcom Voice Systems ADPCM", + 0x0300: u"Fujitsu TOWNS SND", + 0x0350: u"Micronas SC4 Speech", + 0x0351: u"Micronas CELP833", + 0x0400: u"Brooktree BTV Digital", + 0x0401: u"Intel Music Coder", + 0x0402: u"Intel Audio", + 0x0450: u"QDesign Music", + 0x0500: u"On2 AVC0 Audio", + 0x0501: u"On2 AVC1 Audio", + 0x0680: u"AT&T Labs VME VMPCM", + 0x0681: u"AT&T Labs TPC", + 0x08AE: u"ClearJump Lightwave Lossless", + 0x1000: u"Olivetti GSM", + 0x1001: u"Olivetti ADPCM", + 0x1002: u"Olivetti CELP", + 0x1003: u"Olivetti SBC", + 0x1004: u"Olivetti OPR", + 0x1100: u"Lernout & Hauspie", + 0x1101: u"Lernout & Hauspie CELP", + 0x1102: u"Lernout & Hauspie SBC8", + 0x1103: u"Lernout & Hauspie SBC12", + 0x1104: u"Lernout & Hauspie SBC16", + 0x1400: u"Norris Communication", + 0x1401: u"ISIAudio", + 0x1500: u"AT&T Labs Soundspace Music Compression", + 0x1600: u"Microsoft MPEG ADTS AAC", + 0x1601: u"Microsoft MPEG RAW AAC", + 0x1608: u"Nokia MPEG ADTS AAC", + 0x1609: u"Nokia MPEG RAW AAC", + 0x181C: u"VoxWare MetaVoice RT24", + 0x1971: u"Sonic Foundry Lossless", + 0x1979: u"Innings Telecom ADPCM", + 0x1FC4: u"NTCSoft ALF2CD ACM", + 0x2000: u"Dolby AC3", + 0x2001: u"DTS", + 0x4143: u"Divio AAC", + 0x4201: u"Nokia Adaptive Multi-Rate", + 0x4243: u"Divio G.726", + 0x4261: u"ITU-T H.261", + 0x4263: u"ITU-T H.263", + 0x4264: u"ITU-T H.264", + 0x674F: u"Ogg Vorbis Mode 1", + 0x6750: u"Ogg Vorbis Mode 2", + 0x6751: u"Ogg Vorbis Mode 3", + 0x676F: u"Ogg Vorbis Mode 1+", + 0x6770: u"Ogg Vorbis Mode 2+", + 0x6771: u"Ogg Vorbis Mode 3+", + 0x7000: u"3COM NBX Audio", + 0x706D: u"FAAD AAC Audio", + 0x77A1: u"True Audio Lossless Audio", + 0x7A21: u"GSM-AMR CBR 3GPP Audio", + 0x7A22: u"GSM-AMR VBR 3GPP Audio", + 0xA100: u"Comverse Infosys G723.1", + 0xA101: u"Comverse Infosys AVQSBC", + 0xA102: u"Comverse Infosys SBC", + 0xA103: u"Symbol Technologies G729a", + 0xA104: u"VoiceAge AMR WB", + 0xA105: u"Ingenient Technologies G.726", + 0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)", + 0xA107: u"Encore Software Ltd's G.726", + 0xA108: u"ZOLL Medical Corporation ASAO", + 0xA109: u"Speex Voice", + 0xA10A: u"Vianix MASC Speech Compression", + 0xA10B: u"Windows Media 9 Spectrum Analyzer Output", + 0xA10C: u"Media Foundation Spectrum Analyzer Output", + 0xA10D: u"GSM 6.10 (Full-Rate) Speech", + 0xA10E: u"GSM 6.20 (Half-Rate) Speech", + 0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech", + 0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech", + 0xA111: u"GSM Adaptive Multi-Rate WideBand Speech", + 0xA112: u"Polycom G.722", + 0xA113: u"Polycom G.728", + 0xA114: u"Polycom G.729a", + 0xA115: u"Polycom Siren", + 0xA116: u"Global IP Sound ILBC", + 0xA117: u"Radio Time Time Shifted Radio", + 0xA118: u"Nice Systems ACA", + 0xA119: u"Nice Systems ADPCM", + 0xA11A: u"Vocord Group ITU-T G.721", + 0xA11B: u"Vocord Group ITU-T G.726", + 0xA11C: u"Vocord Group ITU-T G.722.1", + 0xA11D: u"Vocord Group ITU-T G.728", + 0xA11E: u"Vocord Group ITU-T G.729", + 0xA11F: u"Vocord Group ITU-T G.729a", + 0xA120: u"Vocord Group ITU-T G.723.1", + 0xA121: u"Vocord Group LBC", + 0xA122: u"Nice G.728", + 0xA123: u"France Telecom G.729 ACM Audio", + 0xA124: u"CODIAN Audio", + 0xCC12: u"Intel YUV12 Codec", + 0xCFCC: u"Digital Processing Systems Perception Motion JPEG", + 0xD261: u"DEC H.261", + 0xD263: u"DEC H.263", + 0xFFFE: u"Extensible Wave Format", + 0xFFFF: u"Unregistered", +} diff --git a/libs/mutagen/easyid3.py b/libs/mutagen/easyid3.py index e69a5453..f8dd2de0 100644 --- a/libs/mutagen/easyid3.py +++ b/libs/mutagen/easyid3.py @@ -1,5 +1,6 @@ -# Simpler (but far more limited) API for ID3 editing -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as @@ -13,6 +14,7 @@ more like Vorbis or APEv2 tags. import mutagen.id3 +from ._compat import iteritems, text_type, PY2 from mutagen import Metadata from mutagen._util import DictMixin, dict_match from mutagen.id3 import ID3, error, delete, ID3FileType @@ -154,6 +156,8 @@ class EasyID3(DictMixin, Metadata): for v in value: if v and max(v) > u'\x7f': enc = 3 + break + id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc)) else: frame.text = value @@ -171,8 +175,10 @@ class EasyID3(DictMixin, Metadata): load = property(lambda s: s.__id3.load, lambda s, v: setattr(s.__id3, 'load', v)) - save = property(lambda s: s.__id3.save, - lambda s, v: setattr(s.__id3, 'save', v)) + def save(self, *args, **kwargs): + # ignore v2_version until we support 2.3 here + kwargs.pop("v2_version", None) + self.__id3.save(*args, **kwargs) delete = property(lambda s: s.__id3.delete, lambda s, v: setattr(s.__id3, 'delete', v)) @@ -193,8 +199,12 @@ class EasyID3(DictMixin, Metadata): def __setitem__(self, key, value): key = key.lower() - if isinstance(value, basestring): - value = [value] + if PY2: + if isinstance(value, basestring): + value = [value] + else: + if isinstance(value, text_type): + value = [value] func = dict_match(self.Set, key, self.SetFallback) if func is not None: return func(self.__id3, key, value) @@ -263,6 +273,18 @@ def date_delete(id3, key): del(id3["TDRC"]) +def original_date_get(id3, key): + return [stamp.text for stamp in id3["TDOR"].text] + + +def original_date_set(id3, key, value): + id3.add(mutagen.id3.TDOR(encoding=3, text=value)) + + +def original_date_delete(id3, key): + del(id3["TDOR"]) + + def performer_get(id3, key): people = [] wanted_role = key.split(":", 1)[1] @@ -433,7 +455,7 @@ def peakgain_list(id3, key): keys.append("replaygain_%s_peak" % frame.desc) return keys -for frameid, key in { +for frameid, key in iteritems({ "TALB": "album", "TBPM": "bpm", "TCMP": "compilation", # iTunes extension @@ -461,18 +483,20 @@ for frameid, key in { "TSOT": "titlesort", "TSRC": "isrc", "TSST": "discsubtitle", -}.iteritems(): + "TLAN": "language", +}): EasyID3.RegisterTextKey(key, frameid) EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete) EasyID3.RegisterKey("date", date_get, date_set, date_delete) +EasyID3.RegisterKey("originaldate", original_date_get, original_date_set, + original_date_delete) EasyID3.RegisterKey( "performer:*", performer_get, performer_set, performer_delete, performer_list) EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get, musicbrainz_trackid_set, musicbrainz_trackid_delete) EasyID3.RegisterKey("website", website_get, website_set, website_delete) -EasyID3.RegisterKey("website", website_get, website_set, website_delete) EasyID3.RegisterKey( "replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list) EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete) @@ -481,7 +505,7 @@ EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete) # http://musicbrainz.org/docs/specs/metadata_tags.html # http://bugs.musicbrainz.org/ticket/1383 # http://musicbrainz.org/doc/MusicBrainzTag -for desc, key in { +for desc, key in iteritems({ u"MusicBrainz Artist Id": "musicbrainz_artistid", u"MusicBrainz Album Id": "musicbrainz_albumid", u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid", @@ -495,7 +519,13 @@ for desc, key in { u"ASIN": "asin", u"ALBUMARTISTSORT": "albumartistsort", u"BARCODE": "barcode", -}.iteritems(): + u"CATALOGNUMBER": "catalognumber", + u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid", + u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid", + u"MusicBrainz Work Id": "musicbrainz_workid", + u"Acoustid Fingerprint": "acoustid_fingerprint", + u"Acoustid Id": "acoustid_id", +}): EasyID3.RegisterTXXXKey(key, desc) diff --git a/libs/mutagen/easymp4.py b/libs/mutagen/easymp4.py index 3abacccc..8ad7fd0e 100644 --- a/libs/mutagen/easymp4.py +++ b/libs/mutagen/easymp4.py @@ -1,12 +1,16 @@ -# Copyright 2009 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2009 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. -from mutagen import Metadata -from mutagen._util import DictMixin, dict_match, utf8 +from mutagen import Tags +from mutagen._util import DictMixin, dict_match from mutagen.mp4 import MP4, MP4Tags, error, delete +from ._compat import PY2, text_type, PY3 + __all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"] @@ -15,14 +19,14 @@ class EasyMP4KeyError(error, KeyError, ValueError): pass -class EasyMP4Tags(DictMixin, Metadata): +class EasyMP4Tags(DictMixin, Tags): """A file with MPEG-4 iTunes metadata. Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII strings, and values are a list of Unicode strings (and these lists are always of length 0 or 1). - If you need access to the full MP4 metadata feature set, you should use + If you need access to the full MP4 metadata feature set, you should use MP4, not EasyMP4. """ @@ -36,6 +40,7 @@ class EasyMP4Tags(DictMixin, Metadata): self.load = self.__mp4.load self.save = self.__mp4.save self.delete = self.__mp4.delete + self._padding = self.__mp4._padding filename = property(lambda s: s.__mp4.filename, lambda s, fn: setattr(s.__mp4, 'filename', fn)) @@ -91,16 +96,16 @@ class EasyMP4Tags(DictMixin, Metadata): cls.RegisterKey(key, getter, setter, deleter) @classmethod - def RegisterIntKey(cls, key, atomid, min_value=0, max_value=2**16-1): + def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1): """Register a scalar integer key. """ def getter(tags, key): - return map(unicode, tags[atomid]) + return list(map(text_type, tags[atomid])) def setter(tags, key, value): clamp = lambda x: int(min(max(min_value, x), max_value)) - tags[atomid] = map(clamp, map(int, value)) + tags[atomid] = [clamp(v) for v in map(int, value)] def deleter(tags, key): del(tags[atomid]) @@ -108,14 +113,15 @@ class EasyMP4Tags(DictMixin, Metadata): cls.RegisterKey(key, getter, setter, deleter) @classmethod - def RegisterIntPairKey(cls, key, atomid, min_value=0, max_value=2**16-1): + def RegisterIntPairKey(cls, key, atomid, min_value=0, + max_value=(2 ** 16) - 1): def getter(tags, key): ret = [] for (track, total) in tags[atomid]: if total: ret.append(u"%d/%d" % (track, total)) else: - ret.append(unicode(track)) + ret.append(text_type(track)) return ret def setter(tags, key, value): @@ -148,13 +154,20 @@ class EasyMP4Tags(DictMixin, Metadata): EasyMP4Tags.RegisterFreeformKey( "musicbrainz_artistid", "MusicBrainz Artist Id") """ - atomid = "----:%s:%s" % (mean, name) + atomid = "----:" + mean + ":" + name def getter(tags, key): return [s.decode("utf-8", "replace") for s in tags[atomid]] def setter(tags, key, value): - tags[atomid] = map(utf8, value) + encoded = [] + for v in value: + if not isinstance(v, text_type): + if PY3: + raise TypeError("%r not str" % v) + v = v.decode("utf-8") + encoded.append(v.encode("utf-8")) + tags[atomid] = encoded def deleter(tags, key): del(tags[atomid]) @@ -171,8 +184,14 @@ class EasyMP4Tags(DictMixin, Metadata): def __setitem__(self, key, value): key = key.lower() - if isinstance(value, basestring): - value = [value] + + if PY2: + if isinstance(value, basestring): + value = [value] + else: + if isinstance(value, text_type): + value = [value] + func = dict_match(self.Set, key) if func is not None: return func(self.__mp4, key, value) diff --git a/libs/mutagen/flac.py b/libs/mutagen/flac.py index f8e014bc..f3cc5ab5 100644 --- a/libs/mutagen/flac.py +++ b/libs/mutagen/flac.py @@ -1,5 +1,6 @@ -# FLAC comment support for Mutagen -# Copyright 2005 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2005 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as @@ -22,17 +23,17 @@ http://flac.sourceforge.net/format.html __all__ = ["FLAC", "Open", "delete"] import struct -from cStringIO import StringIO -from _vorbis import VCommentDict -from mutagen import FileType -from mutagen._util import insert_bytes +from ._vorbis import VCommentDict +import mutagen + +from ._compat import cBytesIO, endswith, chr_, xrange +from mutagen._util import resize_bytes, MutagenError, get_size +from mutagen._tags import PaddingInfo from mutagen.id3 import BitPaddedInt -import sys -if sys.version_info >= (2, 6): - from functools import reduce +from functools import reduce -class error(IOError): +class error(IOError, MutagenError): pass @@ -44,10 +45,10 @@ class FLACVorbisError(ValueError, error): pass -def to_int_be(string): +def to_int_be(data): """Convert an arbitrarily-long string to a long using big-endian byte order.""" - return reduce(lambda a, b: (a << 8) + ord(b), string, 0L) + return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0) class StrictFileObject(object): @@ -83,14 +84,23 @@ class MetadataBlock(object): """ _distrust_size = False + """For block types setting this, we don't trust the size field and + use the size of the content instead.""" + + _invalid_overflow_size = -1 + """In case the real size was bigger than what is representable by the + 24 bit size field, we save the wrong specified size here. This can + only be set if _distrust_size is True""" + + _MAX_SIZE = 2 ** 24 - 1 def __init__(self, data): """Parse the given data string or file-like as a metadata block. The metadata header should not be included.""" if data is not None: if not isinstance(data, StrictFileObject): - if isinstance(data, str): - data = StringIO(data) + if isinstance(data, bytes): + data = cBytesIO(data) elif not hasattr(data, 'read'): raise TypeError( "StreamInfo requires string data or a file-like") @@ -103,37 +113,61 @@ class MetadataBlock(object): def write(self): return self.data - @staticmethod - def writeblocks(blocks): - """Render metadata block as a byte string.""" - data = [] - codes = [[block.code, block.write()] for block in blocks] - codes[-1][0] |= 128 - for code, datum in codes: - byte = chr(code) - if len(datum) > 2**24: + @classmethod + def _writeblock(cls, block, is_last=False): + """Returns the block content + header. + + Raises error. + """ + + data = bytearray() + code = (block.code | 128) if is_last else block.code + datum = block.write() + size = len(datum) + if size > cls._MAX_SIZE: + if block._distrust_size and block._invalid_overflow_size != -1: + # The original size of this block was (1) wrong and (2) + # the real size doesn't allow us to save the file + # according to the spec (too big for 24 bit uint). Instead + # simply write back the original wrong size.. at least + # we don't make the file more "broken" as it is. + size = block._invalid_overflow_size + else: raise error("block is too long to write") - length = struct.pack(">I", len(datum))[-3:] - data.append(byte + length + datum) - return "".join(data) + assert not size > cls._MAX_SIZE + length = struct.pack(">I", size)[-3:] + data.append(code) + data += length + data += datum + return data - @staticmethod - def group_padding(blocks): - """Consolidate FLAC padding metadata blocks. + @classmethod + def _writeblocks(cls, blocks, available, cont_size, padding_func): + """Render metadata block as a byte string.""" - The overall size of the rendered blocks does not change, so - this adds several bytes of padding for each merged block.""" - paddings = filter(lambda x: isinstance(x, Padding), blocks) - map(blocks.remove, paddings) - # total padding size is the sum of padding sizes plus 4 bytes - # per removed header. - size = sum([padding.length for padding in paddings]) - padding = Padding() - padding.length = size + 4 * (len(paddings) - 1) - blocks.append(padding) + # write everything except padding + data = bytearray() + for block in blocks: + if isinstance(block, Padding): + continue + data += cls._writeblock(block) + blockssize = len(data) + + # take the padding overhead into account. we always add one + # to make things simple. + padding_block = Padding() + blockssize += len(cls._writeblock(padding_block)) + + # finally add a padding block + info = PaddingInfo(available - blockssize, cont_size) + padding_block.length = min(info._get_padding(padding_func), + cls._MAX_SIZE) + data += cls._writeblock(padding_block, is_last=True) + + return data -class StreamInfo(MetadataBlock): +class StreamInfo(MetadataBlock, mutagen.StreamInfo): """FLAC stream information. This contains information about the audio data in the FLAC file. @@ -188,13 +222,13 @@ class StreamInfo(MetadataBlock): bps_tail = bps_total >> 36 bps_head = (sample_channels_bps & 1) << 4 self.bits_per_sample = int(bps_head + bps_tail + 1) - self.total_samples = bps_total & 0xFFFFFFFFFL + self.total_samples = bps_total & 0xFFFFFFFFF self.length = self.total_samples / float(self.sample_rate) self.md5_signature = to_int_be(data.read(16)) def write(self): - f = StringIO() + f = cBytesIO() f.write(struct.pack(">I", self.min_blocksize)[-2:]) f.write(struct.pack(">I", self.max_blocksize)[-2:]) f.write(struct.pack(">I", self.min_framesize)[-3:]) @@ -206,22 +240,22 @@ class StreamInfo(MetadataBlock): byte = (self.sample_rate & 0xF) << 4 byte += ((self.channels - 1) & 7) << 1 byte += ((self.bits_per_sample - 1) >> 4) & 1 - f.write(chr(byte)) + f.write(chr_(byte)) # 4 bits of bps, 4 of sample count byte = ((self.bits_per_sample - 1) & 0xF) << 4 byte += (self.total_samples >> 32) & 0xF - f.write(chr(byte)) + f.write(chr_(byte)) # last 32 of sample count - f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFFL)) + f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF)) # MD5 signature sig = self.md5_signature f.write(struct.pack( - ">4I", (sig >> 96) & 0xFFFFFFFFL, (sig >> 64) & 0xFFFFFFFFL, - (sig >> 32) & 0xFFFFFFFFL, sig & 0xFFFFFFFFL)) + ">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF, + (sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF)) return f.getvalue() def pprint(self): - return "FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate) + return u"FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate) class SeekPoint(tuple): @@ -284,7 +318,7 @@ class SeekTable(MetadataBlock): sp = data.tryread(self.__SEEKPOINT_SIZE) def write(self): - f = StringIO() + f = cBytesIO() for seekpoint in self.seekpoints: packed = struct.pack( self.__SEEKPOINT_FORMAT, @@ -378,10 +412,10 @@ class CueSheetTrack(object): __hash__ = object.__hash__ def __repr__(self): - return ("<%s number=%r, offset=%d, isrc=%r, type=%r, " - "pre_emphasis=%r, indexes=%r)>") % ( - type(self).__name__, self.track_number, self.start_offset, - self.isrc, self.type, self.pre_emphasis, self.indexes) + return (("<%s number=%r, offset=%d, isrc=%r, type=%r, " + "pre_emphasis=%r, indexes=%r)>") % + (type(self).__name__, self.track_number, self.start_offset, + self.isrc, self.type, self.pre_emphasis, self.indexes)) class CueSheet(MetadataBlock): @@ -409,7 +443,7 @@ class CueSheet(MetadataBlock): code = 5 - media_catalog_number = '' + media_catalog_number = b'' lead_in_samples = 88200 compact_disc = True @@ -432,20 +466,20 @@ class CueSheet(MetadataBlock): header = data.read(self.__CUESHEET_SIZE) media_catalog_number, lead_in_samples, flags, num_tracks = \ struct.unpack(self.__CUESHEET_FORMAT, header) - self.media_catalog_number = media_catalog_number.rstrip('\0') + self.media_catalog_number = media_catalog_number.rstrip(b'\0') self.lead_in_samples = lead_in_samples self.compact_disc = bool(flags & 0x80) self.tracks = [] - for i in range(num_tracks): + for i in xrange(num_tracks): track = data.read(self.__CUESHEET_TRACK_SIZE) start_offset, track_number, isrc_padded, flags, num_indexes = \ struct.unpack(self.__CUESHEET_TRACK_FORMAT, track) - isrc = isrc_padded.rstrip('\0') + isrc = isrc_padded.rstrip(b'\0') type_ = (flags & 0x80) >> 7 pre_emphasis = bool(flags & 0x40) val = CueSheetTrack( track_number, start_offset, isrc, type_, pre_emphasis) - for j in range(num_indexes): + for j in xrange(num_indexes): index = data.read(self.__CUESHEET_TRACKINDEX_SIZE) index_offset, index_number = struct.unpack( self.__CUESHEET_TRACKINDEX_FORMAT, index) @@ -454,7 +488,7 @@ class CueSheet(MetadataBlock): self.tracks.append(val) def write(self): - f = StringIO() + f = cBytesIO() flags = 0 if self.compact_disc: flags |= 0x80 @@ -480,10 +514,10 @@ class CueSheet(MetadataBlock): return f.getvalue() def __repr__(self): - return ("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, " - "tracks=%r>") % ( - type(self).__name__, self.media_catalog_number, - self.lead_in_samples, self.compact_disc, self.tracks) + return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, " + "tracks=%r>") % + (type(self).__name__, self.media_catalog_number, + self.lead_in_samples, self.compact_disc, self.tracks)) class Picture(MetadataBlock): @@ -500,6 +534,21 @@ class Picture(MetadataBlock): * colors -- number of colors for indexed palettes (like GIF), 0 for non-indexed * data -- picture data + + To create a picture from file (in order to add to a FLAC file), + instantiate this object without passing anything to the constructor and + then set the properties manually:: + + p = Picture() + + with open("Folder.jpg", "rb") as f: + pic.data = f.read() + + pic.type = id3.PictureType.COVER_FRONT + pic.mime = u"image/jpeg" + pic.width = 500 + pic.height = 500 + pic.depth = 16 # color depth """ code = 6 @@ -513,7 +562,7 @@ class Picture(MetadataBlock): self.height = 0 self.depth = 0 self.colors = 0 - self.data = '' + self.data = b'' super(Picture, self).__init__(data) def __eq__(self, other): @@ -541,7 +590,7 @@ class Picture(MetadataBlock): self.data = data.read(length) def write(self): - f = StringIO() + f = cBytesIO() mime = self.mime.encode('UTF-8') f.write(struct.pack('>2I', self.type, len(mime))) f.write(mime) @@ -563,13 +612,12 @@ class Padding(MetadataBlock): To avoid rewriting the entire FLAC file when editing comments, metadata is often padded. Padding should occur at the end, and no - more than one padding block should be in any FLAC file. Mutagen - handles this with MetadataBlock.group_padding. + more than one padding block should be in any FLAC file. """ code = 1 - def __init__(self, data=""): + def __init__(self, data=b""): super(Padding, self).__init__(data) def load(self, data): @@ -577,7 +625,7 @@ class Padding(MetadataBlock): def write(self): try: - return "\x00" * self.length + return b"\x00" * self.length # On some 64 bit platforms this won't generate a MemoryError # or OverflowError since you might have enough RAM, but it # still generates a ValueError. On other 64 bit platforms, @@ -596,28 +644,32 @@ class Padding(MetadataBlock): return "<%s (%d bytes)>" % (type(self).__name__, self.length) -class FLAC(FileType): +class FLAC(mutagen.FileType): """A FLAC audio file. Attributes: - * info -- stream information (length, bitrate, sample rate) - * tags -- metadata tags, if any * cuesheet -- CueSheet object, if any * seektable -- SeekTable object, if any * pictures -- list of embedded pictures """ - _mimes = ["audio/x-flac", "application/x-flac"] + _mimes = ["audio/flac", "audio/x-flac", "application/x-flac"] + + info = None + """A `StreamInfo`""" + + tags = None + """A `VCommentDict`""" METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict, CueSheet, Picture] """Known metadata block types, indexed by ID.""" @staticmethod - def score(filename, fileobj, header): - return (header.startswith("fLaC") + - filename.lower().endswith(".flac") * 3) + def score(filename, fileobj, header_data): + return (header_data.startswith(b"fLaC") + + endswith(filename.lower(), ".flac") * 3) def __read_metadata_block(self, fileobj): byte = ord(fileobj.read(1)) @@ -637,10 +689,14 @@ class FLAC(FileType): # so we have to too. Instead of parsing the size # given, parse an actual Vorbis comment, leaving # fileobj in the right position. - # http://code.google.com/p/mutagen/issues/detail?id=52 + # https://github.com/quodlibet/mutagen/issues/52 # ..same for the Picture block: - # http://code.google.com/p/mutagen/issues/detail?id=106 + # https://github.com/quodlibet/mutagen/issues/106 + start = fileobj.tell() block = block_type(fileobj) + real_size = fileobj.tell() - start + if real_size > MetadataBlock._MAX_SIZE: + block._invalid_overflow_size = size else: data = fileobj.read(size) block = block_type(data) @@ -681,12 +737,12 @@ class FLAC(FileType): """ if filename is None: filename = self.filename - for s in list(self.metadata_blocks): - if isinstance(s, VCFLACDict): - self.metadata_blocks.remove(s) - self.tags = None - self.save() - break + + if self.tags is not None: + self.metadata_blocks.remove(self.tags) + self.save(padding=lambda x: 0) + self.metadata_blocks.append(self.tags) + self.tags.clear() vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.") @@ -721,15 +777,17 @@ class FLAC(FileType): def clear_pictures(self): """Delete all pictures from the file.""" - self.metadata_blocks = filter(lambda b: b.code != Picture.code, - self.metadata_blocks) + + blocks = [b for b in self.metadata_blocks if b.code != Picture.code] + self.metadata_blocks = blocks @property def pictures(self): """List of embedded pictures""" - return filter(lambda b: b.code == Picture.code, self.metadata_blocks) - def save(self, filename=None, deleteid3=False): + return [b for b in self.metadata_blocks if b.code == Picture.code] + + def save(self, filename=None, deleteid3=False, padding=None): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. @@ -737,46 +795,28 @@ class FLAC(FileType): if filename is None: filename = self.filename - f = open(filename, 'rb+') - - try: - # Ensure we've got padding at the end, and only at the end. - # If adding makes it too large, we'll scale it down later. - self.metadata_blocks.append(Padding('\x00' * 1020)) - MetadataBlock.group_padding(self.metadata_blocks) + with open(filename, 'rb+') as f: header = self.__check_header(f) + audio_offset = self.__find_audio_offset(f) # "fLaC" and maybe ID3 - available = self.__find_audio_offset(f) - header - data = MetadataBlock.writeblocks(self.metadata_blocks) + available = audio_offset - header # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 - if len(data) > available: - # If we have too much data, see if we can reduce padding. - padding = self.metadata_blocks[-1] - newlength = padding.length - (len(data) - available) - if newlength > 0: - padding.length = newlength - data = MetadataBlock.writeblocks(self.metadata_blocks) - assert len(data) == available - - elif len(data) < available: - # If we have too little data, increase padding. - self.metadata_blocks[-1].length += (available - len(data)) - data = MetadataBlock.writeblocks(self.metadata_blocks) - assert len(data) == available - - if len(data) != available: - # We couldn't reduce the padding enough. - diff = (len(data) - available) - insert_bytes(f, diff, header) + content_size = get_size(f) - audio_offset + assert content_size >= 0 + data = MetadataBlock._writeblocks( + self.metadata_blocks, available, content_size, padding) + data_size = len(data) + resize_bytes(f, available, data_size, header) f.seek(header - 4) - f.write("fLaC" + data) + f.write(b"fLaC") + f.write(data) # Delete ID3v1 if deleteid3: @@ -785,11 +825,9 @@ class FLAC(FileType): except IOError: pass else: - if f.read(3) == "TAG": + if f.read(3) == b"TAG": f.seek(-128, 2) f.truncate() - finally: - f.close() def __find_audio_offset(self, fileobj): byte = 0x00 @@ -810,14 +848,19 @@ class FLAC(FileType): return fileobj.tell() def __check_header(self, fileobj): + """Returns the offset of the flac block start + (skipping id3 tags if found). The passed fileobj will be advanced to + that offset as well. + """ + size = 4 header = fileobj.read(4) - if header != "fLaC": + if header != b"fLaC": size = None - if header[:3] == "ID3": + if header[:3] == b"ID3": size = 14 + BitPaddedInt(fileobj.read(6)[2:]) fileobj.seek(size - 4) - if fileobj.read(4) != "fLaC": + if fileobj.read(4) != b"fLaC": size = None if size is None: raise FLACNoHeaderError( diff --git a/libs/mutagen/id3.py b/libs/mutagen/id3/__init__.py similarity index 54% rename from libs/mutagen/id3.py rename to libs/mutagen/id3/__init__.py index 27d30e90..11bf54ed 100644 --- a/libs/mutagen/id3.py +++ b/libs/mutagen/id3/__init__.py @@ -1,4 +1,5 @@ -# id3 support for mutagen +# -*- coding: utf-8 -*- + # Copyright (C) 2005 Michael Urman # 2006 Lukas Lalinsky # 2013 Christoph Reiter @@ -32,15 +33,134 @@ interested in the :class:`ID3` class to start with. __all__ = ['ID3', 'ID3FileType', 'Frames', 'Open', 'delete'] import struct +import errno from struct import unpack, pack, error as StructError import mutagen -from mutagen._util import insert_bytes, delete_bytes, DictProxy +from mutagen._util import insert_bytes, delete_bytes, DictProxy, enum +from mutagen._tags import PaddingInfo +from .._compat import chr_, PY3 -from mutagen._id3util import * -from mutagen._id3frames import * -from mutagen._id3specs import * +from ._util import * +from ._frames import * +from ._specs import * + + +@enum +class ID3v1SaveOptions(object): + + REMOVE = 0 + """ID3v1 tags will be removed""" + + UPDATE = 1 + """ID3v1 tags will be updated but not added""" + + CREATE = 2 + """ID3v1 tags will be created and/or updated""" + + +def _fullread(fileobj, size): + """Read a certain number of bytes from the source file. + + Raises ValueError on invalid size input or EOFError/IOError. + """ + + if size < 0: + raise ValueError('Requested bytes (%s) less than zero' % size) + data = fileobj.read(size) + if len(data) != size: + raise EOFError("Not enough data to read") + return data + + +class ID3Header(object): + + _V24 = (2, 4, 0) + _V23 = (2, 3, 0) + _V22 = (2, 2, 0) + _V11 = (1, 1) + + f_unsynch = property(lambda s: bool(s._flags & 0x80)) + f_extended = property(lambda s: bool(s._flags & 0x40)) + f_experimental = property(lambda s: bool(s._flags & 0x20)) + f_footer = property(lambda s: bool(s._flags & 0x10)) + + def __init__(self, fileobj=None): + """Raises ID3NoHeaderError, ID3UnsupportedVersionError or error""" + + if fileobj is None: + # for testing + self._flags = 0 + return + + fn = getattr(fileobj, "name", "") + try: + data = _fullread(fileobj, 10) + except EOFError: + raise ID3NoHeaderError("%s: too small" % fn) + + id3, vmaj, vrev, flags, size = unpack('>3sBBB4s', data) + self._flags = flags + self.size = BitPaddedInt(size) + 10 + self.version = (2, vmaj, vrev) + + if id3 != b'ID3': + raise ID3NoHeaderError("%r doesn't start with an ID3 tag" % fn) + + if vmaj not in [2, 3, 4]: + raise ID3UnsupportedVersionError("%r ID3v2.%d not supported" + % (fn, vmaj)) + + if not BitPaddedInt.has_valid_padding(size): + raise error("Header size not synchsafe") + + if (self.version >= self._V24) and (flags & 0x0f): + raise error( + "%r has invalid flags %#02x" % (fn, flags)) + elif (self._V23 <= self.version < self._V24) and (flags & 0x1f): + raise error( + "%r has invalid flags %#02x" % (fn, flags)) + + if self.f_extended: + try: + extsize_data = _fullread(fileobj, 4) + except EOFError: + raise error("%s: too small" % fn) + + if PY3: + frame_id = extsize_data.decode("ascii", "replace") + else: + frame_id = extsize_data + + if frame_id in Frames: + # Some tagger sets the extended header flag but + # doesn't write an extended header; in this case, the + # ID3 data follows immediately. Since no extended + # header is going to be long enough to actually match + # a frame, and if it's *not* a frame we're going to be + # completely lost anyway, this seems to be the most + # correct check. + # https://github.com/quodlibet/quodlibet/issues/126 + self._flags ^= 0x40 + extsize = 0 + fileobj.seek(-4, 1) + elif self.version >= self._V24: + # "Where the 'Extended header size' is the size of the whole + # extended header, stored as a 32 bit synchsafe integer." + extsize = BitPaddedInt(extsize_data) - 4 + if not BitPaddedInt.has_valid_padding(extsize_data): + raise error( + "Extended header size not synchsafe") + else: + # "Where the 'Extended header size', currently 6 or 10 bytes, + # excludes itself." + extsize = unpack('>L', extsize_data)[0] + + try: + self._extdata = _fullread(fileobj, extsize) + except EOFError: + raise error("%s: too small" % fn) class ID3(DictProxy, mutagen.Metadata): @@ -53,39 +173,53 @@ class ID3(DictProxy, mutagen.Metadata): * size -- the total size of the ID3 tag, including the header """ + __module__ = "mutagen.id3" + PEDANTIC = True - version = (2, 4, 0) + """Deprecated. Doesn't have any effect""" filename = None - size = 0 - __flags = 0 - __readbytes = 0 - __crc = None - __unknown_version = None - - _V24 = (2, 4, 0) - _V23 = (2, 3, 0) - _V22 = (2, 2, 0) - _V11 = (1, 1) def __init__(self, *args, **kwargs): self.unknown_frames = [] + self.__unknown_version = None + self._header = None + self._version = (2, 4, 0) super(ID3, self).__init__(*args, **kwargs) - def __fullread(self, size): - try: - if size < 0: - raise ValueError('Requested bytes (%s) less than zero' % size) - if size > self.__filesize: - raise EOFError('Requested %#x of %#x (%s)' % ( - long(size), long(self.__filesize), self.filename)) - except AttributeError: - pass - data = self.__fileobj.read(size) - if len(data) != size: - raise EOFError - self.__readbytes += size - return data + @property + def version(self): + """ID3 tag version as a tuple (of the loaded file)""" + + if self._header is not None: + return self._header.version + return self._version + + @version.setter + def version(self, value): + self._version = value + + @property + def f_unsynch(self): + if self._header is not None: + return self._header.f_unsynch + return False + + @property + def f_extended(self): + if self._header is not None: + return self._header.f_extended + return False + + @property + def size(self): + if self._header is not None: + return self._header.size + return 0 + + def _pre_load_header(self, fileobj): + # XXX: for aiff to adjust the offset.. + pass def load(self, filename, known_frames=None, translate=True, v2_version=4): """Load tags from a filename. @@ -107,60 +241,53 @@ class ID3(DictProxy, mutagen.Metadata): mutagen.id3.ID3(filename, known_frames=my_frames) """ - if not v2_version in (3, 4): + if v2_version not in (3, 4): raise ValueError("Only 3 and 4 possible for v2_version") - from os.path import getsize - self.filename = filename + self.unknown_frames = [] self.__known_frames = known_frames - self.__fileobj = open(filename, 'rb') - self.__filesize = getsize(filename) - try: + self._header = None + self._padding = 0 # for testing + + with open(filename, 'rb') as fileobj: + self._pre_load_header(fileobj) + try: - self.__load_header() - except EOFError: - self.size = 0 - raise ID3NoHeaderError("%s: too small (%d bytes)" % ( - filename, self.__filesize)) - except (ID3NoHeaderError, ID3UnsupportedVersionError), err: - self.size = 0 - import sys - stack = sys.exc_info()[2] - try: - self.__fileobj.seek(-128, 2) - except EnvironmentError: - raise err, None, stack - else: - frames = ParseID3v1(self.__fileobj.read(128)) - if frames is not None: - self.version = self._V11 - map(self.add, frames.values()) - else: - raise err, None, stack + self._header = ID3Header(fileobj) + except (ID3NoHeaderError, ID3UnsupportedVersionError): + frames, offset = _find_id3v1(fileobj) + if frames is None: + raise + + self.version = ID3Header._V11 + for v in frames.values(): + self.add(v) else: frames = self.__known_frames if frames is None: - if self._V23 <= self.version: + if self.version >= ID3Header._V23: frames = Frames - elif self._V22 <= self.version: + elif self.version >= ID3Header._V22: frames = Frames_2_2 - data = self.__fullread(self.size - 10) + + try: + data = _fullread(fileobj, self.size - 10) + except (ValueError, EOFError, IOError) as e: + raise error(e) + for frame in self.__read_frames(data, frames=frames): if isinstance(frame, Frame): self.add(frame) else: self.unknown_frames.append(frame) - self.__unknown_version = self.version - finally: - self.__fileobj.close() - del self.__fileobj - del self.__filesize - if translate: - if v2_version == 3: - self.update_to_v23() - else: - self.update_to_v24() + self.__unknown_version = self.version[:2] + + if translate: + if v2_version == 3: + self.update_to_v23() + else: + self.update_to_v24() def getall(self, key): """Return all frames with a given name (the list may be empty). @@ -188,8 +315,9 @@ class ID3(DictProxy, mutagen.Metadata): del(self[key]) else: key = key + ":" - for k in filter(lambda s: s.startswith(key), self.keys()): - del(self[k]) + for k in list(self.keys()): + if k.startswith(key): + del(self[k]) def setall(self, key, values): """Delete frames of the given type and add frames in 'values'.""" @@ -209,8 +337,7 @@ class ID3(DictProxy, mutagen.Metadata): ``POPM=user@example.org=3 128/255`` """ - frames = list(map(Frame.pprint, self.values())) - frames.sort() + frames = sorted(Frame.pprint(s) for s in self.values()) return "\n".join(frames) def loaded_frame(self, tag): @@ -227,151 +354,88 @@ class ID3(DictProxy, mutagen.Metadata): """Add a frame to the tag.""" return self.loaded_frame(frame) - def __load_header(self): - fn = self.filename - data = self.__fullread(10) - id3, vmaj, vrev, flags, size = unpack('>3sBBB4s', data) - self.__flags = flags - self.size = BitPaddedInt(size) + 10 - self.version = (2, vmaj, vrev) - - if id3 != 'ID3': - raise ID3NoHeaderError("'%s' doesn't start with an ID3 tag" % fn) - if vmaj not in [2, 3, 4]: - raise ID3UnsupportedVersionError("'%s' ID3v2.%d not supported" - % (fn, vmaj)) - - if self.PEDANTIC: - if not BitPaddedInt.has_valid_padding(size): - raise ValueError("Header size not synchsafe") - - if self._V24 <= self.version and (flags & 0x0f): - raise ValueError("'%s' has invalid flags %#02x" % (fn, flags)) - elif self._V23 <= self.version < self._V24 and (flags & 0x1f): - raise ValueError("'%s' has invalid flags %#02x" % (fn, flags)) - - if self.f_extended: - extsize = self.__fullread(4) - if extsize in Frames: - # Some tagger sets the extended header flag but - # doesn't write an extended header; in this case, the - # ID3 data follows immediately. Since no extended - # header is going to be long enough to actually match - # a frame, and if it's *not* a frame we're going to be - # completely lost anyway, this seems to be the most - # correct check. - # http://code.google.com/p/quodlibet/issues/detail?id=126 - self.__flags ^= 0x40 - self.__extsize = 0 - self.__fileobj.seek(-4, 1) - self.__readbytes -= 4 - elif self.version >= self._V24: - # "Where the 'Extended header size' is the size of the whole - # extended header, stored as a 32 bit synchsafe integer." - self.__extsize = BitPaddedInt(extsize) - 4 - if self.PEDANTIC: - if not BitPaddedInt.has_valid_padding(extsize): - raise ValueError("Extended header size not synchsafe") - else: - # "Where the 'Extended header size', currently 6 or 10 bytes, - # excludes itself." - self.__extsize = unpack('>L', extsize)[0] - if self.__extsize: - self.__extdata = self.__fullread(self.__extsize) - else: - self.__extdata = "" - - def __determine_bpi(self, data, frames, EMPTY="\x00" * 10): - if self.version < self._V24: - return int - # have to special case whether to use bitpaddedints here - # spec says to use them, but iTunes has it wrong - - # count number of tags found as BitPaddedInt and how far past - o = 0 - asbpi = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - bpioff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - size = BitPaddedInt(size) - o += 10 + size - if name in frames: - asbpi += 1 - else: - bpioff = o - len(data) - - # count number of tags found as int and how far past - o = 0 - asint = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - intoff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - o += 10 + size - if name in frames: - asint += 1 - else: - intoff = o - len(data) - - # if more tags as int, or equal and bpi is past and int is not - if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)): - return int - return BitPaddedInt + def __setitem__(self, key, tag): + if not isinstance(tag, Frame): + raise TypeError("%r not a Frame instance" % tag) + super(ID3, self).__setitem__(key, tag) def __read_frames(self, data, frames): - if self.version < self._V24 and self.f_unsynch: + assert self.version >= ID3Header._V22 + + if self.version < ID3Header._V24 and self.f_unsynch: try: data = unsynch.decode(data) except ValueError: pass - if self._V23 <= self.version: - bpi = self.__determine_bpi(data, frames) + if self.version >= ID3Header._V23: + if self.version < ID3Header._V24: + bpi = int + else: + bpi = _determine_bpi(data, frames) + while data: header = data[:10] try: name, size, flags = unpack('>4sLH', header) except struct.error: return # not enough header - if name.strip('\x00') == '': + if name.strip(b'\x00') == b'': return + size = bpi(size) - framedata = data[10:10+size] - data = data[10+size:] + framedata = data[10:10 + size] + data = data[10 + size:] + self._padding = len(data) if size == 0: continue # drop empty frames + + if PY3: + try: + name = name.decode('ascii') + except UnicodeDecodeError: + continue + try: + # someone writes 2.3 frames with 2.2 names + if name[-1] == "\x00": + tag = Frames_2_2[name[:-1]] + name = tag.__base__.__name__ + tag = frames[name] except KeyError: if is_valid_frame_id(name): yield header + framedata else: try: - yield self.__load_framedata(tag, flags, framedata) + yield tag._fromData(self._header, flags, framedata) except NotImplementedError: yield header + framedata except ID3JunkFrameError: pass - - elif self._V22 <= self.version: + elif self.version >= ID3Header._V22: while data: header = data[0:6] try: name, size = unpack('>3s3s', header) except struct.error: return # not enough header - size, = struct.unpack('>L', '\x00'+size) - if name.strip('\x00') == '': + size, = struct.unpack('>L', b'\x00' + size) + if name.strip(b'\x00') == b'': return - framedata = data[6:6+size] - data = data[6+size:] + + framedata = data[6:6 + size] + data = data[6 + size:] + self._padding = len(data) if size == 0: continue # drop empty frames + + if PY3: + try: + name = name.decode('ascii') + except UnicodeDecodeError: + continue + try: tag = frames[name] except KeyError: @@ -379,146 +443,139 @@ class ID3(DictProxy, mutagen.Metadata): yield header + framedata else: try: - yield self.__load_framedata(tag, 0, framedata) - except NotImplementedError: + yield tag._fromData(self._header, 0, framedata) + except (ID3EncryptionUnsupportedError, + NotImplementedError): yield header + framedata except ID3JunkFrameError: pass - def __load_framedata(self, tag, flags, framedata): - return tag.fromData(self, flags, framedata) - - f_unsynch = property(lambda s: bool(s.__flags & 0x80)) - f_extended = property(lambda s: bool(s.__flags & 0x40)) - f_experimental = property(lambda s: bool(s.__flags & 0x20)) - f_footer = property(lambda s: bool(s.__flags & 0x10)) - - #f_crc = property(lambda s: bool(s.__extflags & 0x8000)) - - def save(self, filename=None, v1=1, v2_version=4, v23_sep='/'): - """Save changes to a file. - - If no filename is given, the one most recently loaded is used. - - Keyword arguments: - v1 -- if 0, ID3v1 tags will be removed - if 1, ID3v1 tags will be updated but not added - if 2, ID3v1 tags will be created and/or updated - v2 -- version of ID3v2 tags (3 or 4). - - By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 - tags, you must call method update_to_v23 before saving the file. - - v23_sep -- the separator used to join multiple text values - if v2_version == 3. Defaults to '/' but if it's None - will be the ID3v2v2.4 null separator. - - The lack of a way to update only an ID3v1 tag is intentional. - """ - + def _prepare_data(self, fileobj, start, available, v2_version, v23_sep, + pad_func): if v2_version == 3: - version = self._V23 + version = ID3Header._V23 elif v2_version == 4: - version = self._V24 + version = ID3Header._V24 else: raise ValueError("Only 3 or 4 allowed for v2_version") # Sort frames by 'importance' order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] - order = dict(zip(order, range(len(order)))) + order = dict((b, a) for a, b in enumerate(order)) last = len(order) - frames = self.items() - frames.sort(lambda a, b: cmp(order.get(a[0][:4], last), - order.get(b[0][:4], last))) + frames = sorted(self.items(), + key=lambda a: (order.get(a[0][:4], last), a[0])) framedata = [self.__save_frame(frame, version=version, v23_sep=v23_sep) for (key, frame) in frames] # only write unknown frames if they were loaded from the version # we are saving with or upgraded to it - if self.__unknown_version == version: - framedata.extend([data for data in self.unknown_frames - if len(data) > 10]) + if self.__unknown_version == version[:2]: + framedata.extend(data for data in self.unknown_frames + if len(data) > 10) - if not framedata: - try: - self.delete(filename) - except EnvironmentError, err: - from errno import ENOENT - if err.errno != ENOENT: - raise - return + needed = sum(map(len, framedata)) + 10 - framedata = ''.join(framedata) - framesize = len(framedata) + fileobj.seek(0, 2) + trailing_size = fileobj.tell() - start + + info = PaddingInfo(available - needed, trailing_size) + new_padding = info._get_padding(pad_func) + if new_padding < 0: + raise error("invalid padding") + new_size = needed + new_padding + + new_framesize = BitPaddedInt.to_str(new_size - 10, width=4) + header = pack('>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize) + + data = bytearray(header) + for frame in framedata: + data += frame + assert new_size >= len(data) + data += (new_size - len(data)) * b'\x00' + assert new_size == len(data) + + return data + + def save(self, filename=None, v1=1, v2_version=4, v23_sep='/', + padding=None): + """Save changes to a file. + + Args: + filename: + Filename to save the tag to. If no filename is given, + the one most recently loaded is used. + v1 (ID3v1SaveOptions): + if 0, ID3v1 tags will be removed. + if 1, ID3v1 tags will be updated but not added. + if 2, ID3v1 tags will be created and/or updated + v2 (int): + version of ID3v2 tags (3 or 4). + v23_sep (str): + the separator used to join multiple text values + if v2_version == 3. Defaults to '/' but if it's None + will be the ID3v2v2.4 null separator. + padding (function): + A function taking a PaddingInfo which should + return the amount of padding to use. If None (default) + will default to something reasonable. + + By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 + tags, you must call method update_to_v23 before saving the file. + + The lack of a way to update only an ID3v1 tag is intentional. + + Can raise id3.error. + """ if filename is None: filename = self.filename + try: f = open(filename, 'rb+') - except IOError, err: + except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') + try: - idata = f.read(10) try: - id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) - except struct.error: - id3, insize = '', 0 - insize = BitPaddedInt(insize) - if id3 != 'ID3': - insize = -10 - - if insize >= framesize: - outsize = insize + header = ID3Header(f) + except ID3NoHeaderError: + old_size = 0 else: - outsize = (framesize + 1023) & ~0x3FF - framedata += '\x00' * (outsize - framesize) + old_size = header.size - framesize = BitPaddedInt.to_str(outsize, width=4) - flags = 0 - header = pack('>3sBBB4s', 'ID3', v2_version, 0, flags, framesize) - data = header + framedata + data = self._prepare_data( + f, 0, old_size, v2_version, v23_sep, padding) + new_size = len(data) - if (insize < outsize): - insert_bytes(f, outsize-insize, insize+10) + if (old_size < new_size): + insert_bytes(f, new_size - old_size, old_size) + elif (old_size > new_size): + delete_bytes(f, old_size - new_size, new_size) f.seek(0) f.write(data) - try: - f.seek(-128, 2) - except IOError, err: - # If the file is too small, that's OK - it just means - # we're certain it doesn't have a v1 tag. - from errno import EINVAL - if err.errno != EINVAL: - # If we failed to see for some other reason, bail out. - raise - # Since we're sure this isn't a v1 tag, don't read it. - f.seek(0, 2) - - data = f.read(128) - try: - idx = data.index("TAG") - except ValueError: - offset = 0 - has_v1 = False - else: - offset = idx - len(data) - has_v1 = True - - f.seek(offset, 2) - if v1 == 1 and has_v1 or v1 == 2: - f.write(MakeID3v1(self)) - else: - f.truncate() + self.__save_v1(f, v1) finally: f.close() + def __save_v1(self, f, v1): + tag, offset = _find_id3v1(f) + has_v1 = tag is not None + + f.seek(offset, 2) + if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \ + v1 == ID3v1SaveOptions.CREATE: + f.write(MakeID3v1(self)) + else: + f.truncate() + def delete(self, filename=None, delete_v1=True, delete_v2=True): """Remove tags from a file. @@ -534,13 +591,14 @@ class ID3(DictProxy, mutagen.Metadata): delete(filename, delete_v1, delete_v2) self.clear() - def __save_frame(self, frame, name=None, version=_V24, v23_sep=None): + def __save_frame(self, frame, name=None, version=ID3Header._V24, + v23_sep=None): flags = 0 - if self.PEDANTIC and isinstance(frame, TextFrame): + if isinstance(frame, TextFrame): if len(str(frame)) == 0: - return '' + return b'' - if version == self._V23: + if version == ID3Header._V23: framev23 = frame._get_v23_frame(sep=v23_sep) framedata = framev23._writeData() else: @@ -551,19 +609,28 @@ class ID3(DictProxy, mutagen.Metadata): # Disabled as this causes iTunes and other programs # to fail to find these frames, which usually includes # e.g. APIC. - #framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib') - #flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN + # framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib') + # flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN pass - if version == self._V24: + if version == ID3Header._V24: bits = 7 - elif version == self._V23: + elif version == ID3Header._V23: bits = 8 else: raise ValueError datasize = BitPaddedInt.to_str(len(framedata), width=4, bits=bits) - header = pack('>4s4sH', name or type(frame).__name__, datasize, flags) + + if name is not None: + assert isinstance(name, bytes) + frame_name = name + else: + frame_name = type(frame).__name__ + if PY3: + frame_name = frame_name.encode("ascii") + + header = pack('>4s4sH', frame_name, datasize, flags) return header + framedata def __update_common(self): @@ -573,20 +640,14 @@ class ID3(DictProxy, mutagen.Metadata): # Get rid of "(xx)Foobr" format. self["TCON"].genres = self["TCON"].genres - if self.version < self._V23: - # ID3v2.2 PIC frames are slightly different. - pics = self.getall("APIC") - mimes = {"PNG": "image/png", "JPG": "image/jpeg"} - self.delall("APIC") - for pic in pics: + mimes = {"PNG": "image/png", "JPG": "image/jpeg"} + for pic in self.getall("APIC"): + if pic.mime in mimes: newpic = APIC( - encoding=pic.encoding, mime=mimes.get(pic.mime, pic.mime), + encoding=pic.encoding, mime=mimes[pic.mime], type=pic.type, desc=pic.desc, data=pic.data) self.add(newpic) - # ID3v2.2 LNK frames are just way too different to upgrade. - self.delall("LINK") - def update_to_v24(self): """Convert older tags into an ID3v2.4 tag. @@ -597,28 +658,37 @@ class ID3(DictProxy, mutagen.Metadata): self.__update_common() - if self.__unknown_version == (2, 3, 0): + if self.__unknown_version == (2, 3): # convert unknown 2.3 frames (flags/size) to 2.4 converted = [] for frame in self.unknown_frames: try: name, size, flags = unpack('>4sLH', frame[:10]) - frame = BinaryFrame.fromData(self, flags, frame[10:]) - except (struct.error, error): + except struct.error: continue + + try: + frame = BinaryFrame._fromData( + self._header, flags, frame[10:]) + except (error, NotImplementedError): + continue + converted.append(self.__save_frame(frame, name=name)) self.unknown_frames[:] = converted - self.__unknown_version = (2, 4, 0) + self.__unknown_version = (2, 4) # TDAT, TYER, and TIME have been turned into TDRC. try: - if str(self.get("TYER", "")).strip("\x00"): - date = str(self.pop("TYER")) - if str(self.get("TDAT", "")).strip("\x00"): - dat = str(self.pop("TDAT")) + date = text_type(self.get("TYER", "")) + if date.strip(u"\x00"): + self.pop("TYER") + dat = text_type(self.get("TDAT", "")) + if dat.strip("\x00"): + self.pop("TDAT") date = "%s-%s-%s" % (date, dat[2:], dat[:2]) - if str(self.get("TIME", "")).strip("\x00"): - time = str(self.pop("TIME")) + time = text_type(self.get("TIME", "")) + if time.strip("\x00"): + self.pop("TIME") date += "T%s:%s:00" % (time[:2], time[2:]) if "TDRC" not in self: self.add(TDRC(encoding=0, text=date)) @@ -720,45 +790,142 @@ def delete(filename, delete_v1=True, delete_v2=True): * delete_v2 -- delete any ID3v2 tag """ - f = open(filename, 'rb+') + with open(filename, 'rb+') as f: - if delete_v1: - try: - f.seek(-128, 2) - except IOError: - pass - else: - if f.read(3) == "TAG": - f.seek(-128, 2) + if delete_v1: + tag, offset = _find_id3v1(f) + if tag is not None: + f.seek(offset, 2) f.truncate() - # technically an insize=0 tag is invalid, but we delete it anyway - # (primarily because we used to write it) - if delete_v2: - f.seek(0, 0) - idata = f.read(10) - try: - id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) - except struct.error: - id3, insize = '', -1 - insize = BitPaddedInt(insize) - if id3 == 'ID3' and insize >= 0: - delete_bytes(f, insize + 10, 0) + # technically an insize=0 tag is invalid, but we delete it anyway + # (primarily because we used to write it) + if delete_v2: + f.seek(0, 0) + idata = f.read(10) + try: + id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) + except struct.error: + id3, insize = b'', -1 + insize = BitPaddedInt(insize) + if id3 == b'ID3' and insize >= 0: + delete_bytes(f, insize + 10, 0) # support open(filename) as interface Open = ID3 -# ID3v1.1 support. -def ParseID3v1(string): - """Parse an ID3v1 tag, returning a list of ID3v2.4 frames.""" +def _determine_bpi(data, frames, EMPTY=b"\x00" * 10): + """Takes id3v2.4 frame data and determines if ints or bitpaddedints + should be used for parsing. Needed because iTunes used to write + normal ints for frame sizes. + """ + + # count number of tags found as BitPaddedInt and how far past + o = 0 + asbpi = 0 + while o < len(data) - 10: + part = data[o:o + 10] + if part == EMPTY: + bpioff = -((len(data) - o) % 10) + break + name, size, flags = unpack('>4sLH', part) + size = BitPaddedInt(size) + o += 10 + size + if PY3: + try: + name = name.decode("ascii") + except UnicodeDecodeError: + continue + if name in frames: + asbpi += 1 + else: + bpioff = o - len(data) + + # count number of tags found as int and how far past + o = 0 + asint = 0 + while o < len(data) - 10: + part = data[o:o + 10] + if part == EMPTY: + intoff = -((len(data) - o) % 10) + break + name, size, flags = unpack('>4sLH', part) + o += 10 + size + if PY3: + try: + name = name.decode("ascii") + except UnicodeDecodeError: + continue + if name in frames: + asint += 1 + else: + intoff = o - len(data) + + # if more tags as int, or equal and bpi is past and int is not + if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)): + return int + return BitPaddedInt + + +def _find_id3v1(fileobj): + """Returns a tuple of (id3tag, offset_to_end) or (None, 0) + + offset mainly because we used to write too short tags in some cases and + we need the offset to delete them. + """ + + # id3v1 is always at the end (after apev2) + + extra_read = b"APETAGEX".index(b"TAG") try: - string = string[string.index("TAG"):] + fileobj.seek(-128 - extra_read, 2) + except IOError as e: + if e.errno == errno.EINVAL: + # If the file is too small, might be ok since we wrote too small + # tags at some point. let's see how the parsing goes.. + fileobj.seek(0, 0) + else: + raise + + data = fileobj.read(128 + extra_read) + try: + idx = data.index(b"TAG") + except ValueError: + return (None, 0) + else: + # FIXME: make use of the apev2 parser here + # if TAG is part of APETAGEX assume this is an APEv2 tag + try: + ape_idx = data.index(b"APETAGEX") + except ValueError: + pass + else: + if idx == ape_idx + extra_read: + return (None, 0) + + tag = ParseID3v1(data[idx:]) + if tag is None: + return (None, 0) + + offset = idx - len(data) + return (tag, offset) + + +# ID3v1.1 support. +def ParseID3v1(data): + """Parse an ID3v1 tag, returning a list of ID3v2.4 frames. + + Returns a {frame_name: frame} dict or None. + """ + + try: + data = data[data.index(b"TAG"):] except ValueError: return None - if 128 < len(string) or len(string) < 124: + if 128 < len(data) or len(data) < 124: return None # Issue #69 - Previous versions of Mutagen, when encountering @@ -766,19 +933,19 @@ def ParseID3v1(string): # wrote only the characters available - e.g. "1" or "" - into the # year field. To parse those, reduce the size of the year field. # Amazingly, "0s" works as a struct format string. - unpack_fmt = "3s30s30s30s%ds29sBB" % (len(string) - 124) + unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124) try: tag, title, artist, album, year, comment, track, genre = unpack( - unpack_fmt, string) + unpack_fmt, data) except StructError: return None - if tag != "TAG": + if tag != b"TAG": return None - def fix(string): - return string.split("\x00")[0].strip().decode('latin1') + def fix(data): + return data.split(b"\x00")[0].strip().decode('latin1') title, artist, album, year, comment = map( fix, [title, artist, album, year, comment]) @@ -797,7 +964,7 @@ def ParseID3v1(string): encoding=0, lang="eng", desc="ID3v1 Comment", text=comment) # Don't read a track number if it looks like the comment was # padded with spaces instead of nulls (thanks, WinAmp). - if track and (track != 32 or string[-3] == '\x00'): + if track and ((track != 32) or (data[-3] == b'\x00'[0])): frames["TRCK"] = TRCK(encoding=0, text=str(track)) if genre != 255: frames["TCON"] = TCON(encoding=0, text=str(genre)) @@ -814,22 +981,22 @@ def MakeID3v1(id3): if v2id in id3: text = id3[v2id].text[0].encode('latin1', 'replace')[:30] else: - text = "" - v1[name] = text + ("\x00" * (30 - len(text))) + text = b"" + v1[name] = text + (b"\x00" * (30 - len(text))) if "COMM" in id3: cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] else: - cmnt = "" - v1["comment"] = cmnt + ("\x00" * (29 - len(cmnt))) + cmnt = b"" + v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt))) if "TRCK" in id3: try: - v1["track"] = chr(+id3["TRCK"]) + v1["track"] = chr_(+id3["TRCK"]) except ValueError: - v1["track"] = "\x00" + v1["track"] = b"\x00" else: - v1["track"] = "\x00" + v1["track"] = b"\x00" if "TCON" in id3: try: @@ -838,20 +1005,28 @@ def MakeID3v1(id3): pass else: if genre in TCON.GENRES: - v1["genre"] = chr(TCON.GENRES.index(genre)) + v1["genre"] = chr_(TCON.GENRES.index(genre)) if "genre" not in v1: - v1["genre"] = "\xff" + v1["genre"] = b"\xff" if "TDRC" in id3: - year = str(id3["TDRC"]) + year = text_type(id3["TDRC"]).encode('ascii') elif "TYER" in id3: - year = str(id3["TYER"]) + year = text_type(id3["TYER"]).encode('ascii') else: - year = "" - v1["year"] = (year + "\x00\x00\x00\x00")[:4] + year = b"" + v1["year"] = (year + b"\x00\x00\x00\x00")[:4] - return ("TAG%(title)s%(artist)s%(album)s%(year)s%(comment)s" - "%(track)s%(genre)s") % v1 + return ( + b"TAG" + + v1["title"] + + v1["artist"] + + v1["album"] + + v1["year"] + + v1["comment"] + + v1["track"] + + v1["genre"] + ) class ID3FileType(mutagen.FileType): @@ -859,7 +1034,7 @@ class ID3FileType(mutagen.FileType): ID3 = ID3 - class _Info(object): + class _Info(mutagen.StreamInfo): length = 0 def __init__(self, fileobj, offset): @@ -870,8 +1045,8 @@ class ID3FileType(mutagen.FileType): return "Unknown format with ID3 tag" @staticmethod - def score(filename, fileobj, header): - return header.startswith("ID3") + def score(filename, fileobj, header_data): + return header_data.startswith(b"ID3") def add_tags(self, ID3=None): """Add an empty ID3 tag to the file. @@ -903,8 +1078,9 @@ class ID3FileType(mutagen.FileType): self.filename = filename try: self.tags = ID3(filename, **kwargs) - except error: + except ID3NoHeaderError: self.tags = None + if self.tags is not None: try: offset = self.tags.size @@ -912,8 +1088,6 @@ class ID3FileType(mutagen.FileType): offset = None else: offset = None - try: - fileobj = open(filename, "rb") + + with open(filename, "rb") as fileobj: self.info = self._Info(fileobj, offset) - finally: - fileobj.close() diff --git a/libs/mutagen/_id3frames.py b/libs/mutagen/id3/_frames.py similarity index 82% rename from libs/mutagen/_id3frames.py rename to libs/mutagen/id3/_frames.py index c6130f6b..33ecf5cd 100644 --- a/libs/mutagen/_id3frames.py +++ b/libs/mutagen/id3/_frames.py @@ -1,28 +1,35 @@ +# -*- coding: utf-8 -*- + # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. -from zlib import error as zlibError -from warnings import warn +import zlib from struct import unpack -from mutagen._id3util import ( - ID3Warning, ID3JunkFrameError, ID3BadCompressedData, - ID3EncryptionUnsupportedError, ID3BadUnsynchData, unsynch) -from mutagen._id3specs import ( +from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch +from ._specs import ( BinaryDataSpec, StringSpec, Latin1TextSpec, EncodedTextSpec, ByteSpec, EncodingSpec, ASPIIndexSpec, SizedIntegerSpec, IntegerSpec, VolumeAdjustmentsSpec, VolumePeakSpec, VolumeAdjustmentSpec, ChannelSpec, MultiSpec, SynchronizedTextSpec, KeyEventSpec, TimeStampSpec, - EncodedNumericPartTextSpec, EncodedNumericTextSpec) + EncodedNumericPartTextSpec, EncodedNumericTextSpec, SpecError, + PictureTypeSpec) +from .._compat import text_type, string_types, swap_to_string, iteritems, izip def is_valid_frame_id(frame_id): return frame_id.isalnum() and frame_id.isupper() +def _bytes2key(b): + assert isinstance(b, bytes) + + return b.decode("latin1") + + class Frame(object): """Fundamental unit of ID3 data. @@ -52,24 +59,28 @@ class Frame(object): if len(args) == 1 and len(kwargs) == 0 and \ isinstance(args[0], type(self)): other = args[0] - for checker in self._framespec: - try: - val = checker.validate(self, getattr(other, checker.name)) - except ValueError as e: - e.message = "%s: %s" % (checker.name, e.message) - raise - setattr(self, checker.name, val) + # ask the sub class to fill in our data + other._to_other(self) else: - for checker, val in zip(self._framespec, args): - setattr(self, checker.name, checker.validate(self, val)) + for checker, val in izip(self._framespec, args): + setattr(self, checker.name, val) for checker in self._framespec[len(args):]: - try: - validated = checker.validate( - self, kwargs.get(checker.name, None)) - except ValueError as e: - e.message = "%s: %s" % (checker.name, e.message) - raise - setattr(self, checker.name, validated) + setattr(self, checker.name, kwargs.get(checker.name)) + + def __setattr__(self, name, value): + for checker in self._framespec: + if checker.name == name: + self.__dict__[name] = checker.validate(self, value) + return + super(Frame, self).__setattr__(name, value) + + def _to_other(self, other): + # this impl covers subclasses with the same framespec + if other._framespec is not self._framespec: + raise ValueError + + for checker in other._framespec: + setattr(other, checker.name, getattr(self, checker.name)) def _get_v23_frame(self, **kwargs): """Returns a frame copy which is suitable for writing into a v2.3 tag. @@ -104,30 +115,31 @@ class Frame(object): """ kw = [] for attr in self._framespec: - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) + # so repr works during __init__ + if hasattr(self, attr.name): + kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) return '%s(%s)' % (type(self).__name__, ', '.join(kw)) def _readData(self, data): - odata = data + """Raises ID3JunkFrameError; Returns leftover data""" + for reader in self._framespec: if len(data): try: value, data = reader.read(self, data) - except UnicodeDecodeError: - raise ID3JunkFrameError + except SpecError as e: + raise ID3JunkFrameError(e) else: - raise ID3JunkFrameError + raise ID3JunkFrameError("no data left") setattr(self, reader.name, value) - if data.strip('\x00'): - warn('Leftover data: %s: %r (from %r)' % ( - type(self).__name__, data, odata), - ID3Warning) + + return data def _writeData(self): data = [] for writer in self._framespec: data.append(writer.write(self, getattr(self, writer.name))) - return ''.join(data) + return b''.join(data) def pprint(self): """Return a human-readable representation of the frame.""" @@ -137,10 +149,17 @@ class Frame(object): return "[unrepresentable data]" @classmethod - def fromData(cls, id3, tflags, data): - """Construct this ID3 frame from raw string data.""" + def _fromData(cls, id3, tflags, data): + """Construct this ID3 frame from raw string data. - if id3._V24 <= id3.version: + Raises: + + ID3JunkFrameError in case parsing failed + NotImplementedError in case parsing isn't implemented + ID3EncryptionUnsupportedError in case the frame is encrypted. + """ + + if id3.version >= id3._V24: if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN): # The data length int is syncsafe in 2.4 (but not 2.3). # However, we don't actually need the data length int, @@ -151,25 +170,28 @@ class Frame(object): if tflags & Frame.FLAG24_UNSYNCH or id3.f_unsynch: try: data = unsynch.decode(data) - except ValueError, err: - if id3.PEDANTIC: - raise ID3BadUnsynchData('%s: %r' % (err, data)) + except ValueError: + # Some things write synch-unsafe data with either the frame + # or global unsynch flag set. Try to load them as is. + # https://github.com/quodlibet/mutagen/issues/210 + # https://github.com/quodlibet/mutagen/issues/223 + pass if tflags & Frame.FLAG24_ENCRYPT: raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG24_COMPRESS: try: - data = data.decode('zlib') - except zlibError, err: + data = zlib.decompress(data) + except zlib.error as err: # the initial mutagen that went out with QL 0.12 did not # write the 4 bytes of uncompressed size. Compensate. data = datalen_bytes + data try: - data = data.decode('zlib') - except zlibError, err: - if id3.PEDANTIC: - raise ID3BadCompressedData('%s: %r' % (err, data)) + data = zlib.decompress(data) + except zlib.error as err: + raise ID3JunkFrameError( + 'zlib: %s: %r' % (err, data)) - elif id3._V23 <= id3.version: + elif id3.version >= id3._V23: if tflags & Frame.FLAG23_COMPRESS: usize, = unpack('>L', data[:4]) data = data[4:] @@ -177,14 +199,11 @@ class Frame(object): raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG23_COMPRESS: try: - data = data.decode('zlib') - except zlibError, err: - if id3.PEDANTIC: - raise ID3BadCompressedData('%s: %r' % (err, data)) + data = zlib.decompress(data) + except zlib.error as err: + raise ID3JunkFrameError('zlib: %s: %r' % (err, data)) frame = cls() - frame._rawdata = data - frame._flags = tflags frame._readData(data) return frame @@ -205,30 +224,53 @@ class FrameOpt(Frame): super(FrameOpt, self).__init__(*args, **kwargs) for spec in self._optionalspec: if spec.name in kwargs: - validated = spec.validate(self, kwargs[spec.name]) - setattr(self, spec.name, validated) + setattr(self, spec.name, kwargs[spec.name]) else: break + def __setattr__(self, name, value): + for checker in self._optionalspec: + if checker.name == name: + self.__dict__[name] = checker.validate(self, value) + return + super(FrameOpt, self).__setattr__(name, value) + + def _to_other(self, other): + super(FrameOpt, self)._to_other(other) + + # this impl covers subclasses with the same optionalspec + if other._optionalspec is not self._optionalspec: + raise ValueError + + for checker in other._optionalspec: + if hasattr(self, checker.name): + setattr(other, checker.name, getattr(self, checker.name)) + def _readData(self, data): - odata = data + """Raises ID3JunkFrameError; Returns leftover data""" + for reader in self._framespec: if len(data): - value, data = reader.read(self, data) + try: + value, data = reader.read(self, data) + except SpecError as e: + raise ID3JunkFrameError(e) else: - raise ID3JunkFrameError + raise ID3JunkFrameError("no data left") setattr(self, reader.name, value) + if data: for reader in self._optionalspec: if len(data): - value, data = reader.read(self, data) + try: + value, data = reader.read(self, data) + except SpecError as e: + raise ID3JunkFrameError(e) else: break setattr(self, reader.name, value) - if data.strip('\x00'): - warn('Leftover data: %s: %r (from %r)' % ( - type(self).__name__, data, odata), - ID3Warning) + + return data def _writeData(self): data = [] @@ -239,7 +281,7 @@ class FrameOpt(Frame): data.append(writer.write(self, getattr(self, writer.name))) except AttributeError: break - return ''.join(data) + return b''.join(data) def __repr__(self): kw = [] @@ -251,6 +293,7 @@ class FrameOpt(Frame): return '%s(%s)' % (type(self).__name__, ', '.join(kw)) +@swap_to_string class TextFrame(Frame): """Text strings. @@ -271,17 +314,17 @@ class TextFrame(Frame): MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), ] - def __str__(self): - return self.__unicode__().encode('utf-8') + def __bytes__(self): + return text_type(self).encode('utf-8') - def __unicode__(self): + def __str__(self): return u'\u0000'.join(self.text) def __eq__(self, other): - if isinstance(other, str): - return str(self) == other - elif isinstance(other, unicode): - return unicode(self) == other + if isinstance(other, bytes): + return bytes(self) == other + elif isinstance(other, text_type): + return text_type(self) == other return self.text == other __hash__ = Frame.__hash__ @@ -344,6 +387,7 @@ class NumericPartTextFrame(TextFrame): return int(self.text[0].split("/")[0]) +@swap_to_string class TimeStampTextFrame(TextFrame): """A list of time stamps. @@ -356,16 +400,17 @@ class TimeStampTextFrame(TextFrame): MultiSpec('text', TimeStampSpec('stamp'), sep=u','), ] - def __str__(self): - return self.__unicode__().encode('utf-8') + def __bytes__(self): + return text_type(self).encode('utf-8') - def __unicode__(self): - return ','.join([stamp.text for stamp in self.text]) + def __str__(self): + return u','.join([stamp.text for stamp in self.text]) def _pprint(self): - return " / ".join([stamp.text for stamp in self.text]) + return u" / ".join([stamp.text for stamp in self.text]) +@swap_to_string class UrlFrame(Frame): """A frame containing a URL string. @@ -380,10 +425,10 @@ class UrlFrame(Frame): _framespec = [Latin1TextSpec('url')] - def __str__(self): + def __bytes__(self): return self.url.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.url def __eq__(self, other): @@ -446,7 +491,7 @@ class TCON(TextFrame): if genreid: for gid in genreid[1:-1].split(")("): if gid.isdigit() and int(gid) < len(self.GENRES): - gid = unicode(self.GENRES[int(gid)]) + gid = text_type(self.GENRES[int(gid)]) newgenres.append(gid) elif gid == "CR": newgenres.append(u"Cover") @@ -467,12 +512,12 @@ class TCON(TextFrame): return genres def __set_genres(self, genres): - if isinstance(genres, basestring): + if isinstance(genres, string_types): genres = [genres] - self.text = map(self.__decode, genres) + self.text = [self.__decode(g) for g in genres] def __decode(self, value): - if isinstance(value, str): + if isinstance(value, bytes): enc = EncodedTextSpec._encodings[self.encoding][0] return value.decode(enc) else: @@ -869,6 +914,7 @@ class SYTC(Frame): __hash__ = Frame.__hash__ +@swap_to_string class USLT(Frame): """Unsynchronised lyrics/text transcription. @@ -885,12 +931,12 @@ class USLT(Frame): @property def HashKey(self): - return '%s:%s:%r' % (self.FrameID, self.desc, self.lang) + return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - def __str__(self): + def __bytes__(self): return self.text.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.text def __eq__(self, other): @@ -899,6 +945,7 @@ class USLT(Frame): __hash__ = Frame.__hash__ +@swap_to_string class SYLT(Frame): """Synchronised lyrics/text.""" @@ -913,7 +960,7 @@ class SYLT(Frame): @property def HashKey(self): - return '%s:%s:%r' % (self.FrameID, self.desc, self.lang) + return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) def __eq__(self, other): return str(self) == other @@ -921,7 +968,10 @@ class SYLT(Frame): __hash__ = Frame.__hash__ def __str__(self): - return "".join([text for (text, time) in self.text]).encode('utf-8') + return u"".join(text for (text, time) in self.text) + + def __bytes__(self): + return text_type(self).encode("utf-8") class COMM(TextFrame): @@ -940,10 +990,10 @@ class COMM(TextFrame): @property def HashKey(self): - return '%s:%s:%r' % (self.FrameID, self.desc, self.lang) + return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) def _pprint(self): - return "%s=%r=%s" % (self.desc, self.lang, " / ".join(self.text)) + return "%s=%s=%s" % (self.desc, self.lang, " / ".join(self.text)) class RVA2(Frame): @@ -1063,7 +1113,7 @@ class APIC(Frame): _framespec = [ EncodingSpec('encoding'), Latin1TextSpec('mime'), - ByteSpec('type'), + PictureTypeSpec('type'), EncodedTextSpec('desc'), BinaryDataSpec('data'), ] @@ -1077,6 +1127,12 @@ class APIC(Frame): def HashKey(self): return '%s:%s' % (self.FrameID, self.desc) + def _validate_from_22(self, other, checker): + if checker.name == "mime": + self.mime = other.mime.decode("ascii", "ignore") + else: + super(APIC, self)._validate_from_22(other, checker) + def _pprint(self): return "%s (%s, %d bytes)" % ( self.desc, self.mime, len(self.data)) @@ -1102,7 +1158,7 @@ class PCNT(Frame): return self.count def _pprint(self): - return unicode(self.count) + return text_type(self.count) class POPM(FrameOpt): @@ -1202,6 +1258,7 @@ class RBUF(FrameOpt): return self.size +@swap_to_string class AENC(FrameOpt): """Audio encryption. @@ -1227,10 +1284,10 @@ class AENC(FrameOpt): def HashKey(self): return '%s:%s' % (self.FrameID, self.owner) - def __str__(self): + def __bytes__(self): return self.owner.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.owner def __eq__(self, other): @@ -1259,8 +1316,8 @@ class LINK(FrameOpt): @property def HashKey(self): try: - return "%s:%s:%s:%r" % ( - self.FrameID, self.frameid, self.url, self.data) + return "%s:%s:%s:%s" % ( + self.FrameID, self.frameid, self.url, _bytes2key(self.data)) except AttributeError: return "%s:%s:%s" % (self.FrameID, self.frameid, self.url) @@ -1323,13 +1380,10 @@ class UFID(Frame): __hash__ = Frame.__hash__ def _pprint(self): - isascii = ord(max(self.data)) < 128 - if isascii: - return "%s=%s" % (self.owner, self.data) - else: - return "%s (%d bytes)" % (self.owner, len(self.data)) + return "%s=%r" % (self.owner, self.data) +@swap_to_string class USER(Frame): """Terms of use. @@ -1348,12 +1402,12 @@ class USER(Frame): @property def HashKey(self): - return '%s:%r' % (self.FrameID, self.lang) + return '%s:%s' % (self.FrameID, self.lang) - def __str__(self): + def __bytes__(self): return self.text.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.text def __eq__(self, other): @@ -1365,6 +1419,7 @@ class USER(Frame): return "%r=%s" % (self.lang, self.text) +@swap_to_string class OWNE(Frame): """Ownership frame.""" @@ -1375,10 +1430,10 @@ class OWNE(Frame): EncodedTextSpec('seller'), ] - def __str__(self): + def __bytes__(self): return self.seller.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.seller def __eq__(self, other): @@ -1407,7 +1462,7 @@ class COMR(FrameOpt): @property def HashKey(self): - return '%s:%s' % (self.FrameID, self._writeData()) + return '%s:%s' % (self.FrameID, _bytes2key(self._writeData())) def __eq__(self, other): return self._writeData() == other._writeData() @@ -1415,6 +1470,7 @@ class COMR(FrameOpt): __hash__ = FrameOpt.__hash__ +@swap_to_string class ENCR(Frame): """Encryption method registration. @@ -1432,7 +1488,7 @@ class ENCR(Frame): def HashKey(self): return "%s:%s" % (self.FrameID, self.owner) - def __str__(self): + def __bytes__(self): return self.data def __eq__(self, other): @@ -1441,6 +1497,7 @@ class ENCR(Frame): __hash__ = Frame.__hash__ +@swap_to_string class GRID(FrameOpt): """Group identification registration.""" @@ -1458,10 +1515,10 @@ class GRID(FrameOpt): def __pos__(self): return self.group - def __str__(self): + def __bytes__(self): return self.owner.encode('utf-8') - def __unicode__(self): + def __str__(self): return self.owner def __eq__(self, other): @@ -1470,6 +1527,7 @@ class GRID(FrameOpt): __hash__ = FrameOpt.__hash__ +@swap_to_string class PRIV(Frame): """Private frame.""" @@ -1481,24 +1539,21 @@ class PRIV(Frame): @property def HashKey(self): return '%s:%s:%s' % ( - self.FrameID, self.owner, self.data.decode('latin1')) + self.FrameID, self.owner, _bytes2key(self.data)) - def __str__(self): + def __bytes__(self): return self.data def __eq__(self, other): return self.data == other def _pprint(self): - isascii = ord(max(self.data)) < 128 - if isascii: - return "%s=%s" % (self.owner, self.data) - else: - return "%s (%d bytes)" % (self.owner, len(self.data)) + return "%s=%r" % (self.owner, self.data) __hash__ = Frame.__hash__ +@swap_to_string class SIGN(Frame): """Signature frame.""" @@ -1509,9 +1564,9 @@ class SIGN(Frame): @property def HashKey(self): - return '%s:%c:%s' % (self.FrameID, self.group, self.sig) + return '%s:%s:%s' % (self.FrameID, self.group, _bytes2key(self.sig)) - def __str__(self): + def __bytes__(self): return self.sig def __eq__(self, other): @@ -1557,15 +1612,6 @@ class ASPI(Frame): __hash__ = Frame.__hash__ -Frames = dict([(k, v) for (k, v) in globals().items() - if len(k) == 4 and isinstance(v, type) and - issubclass(v, Frame)]) -"""All supported ID3v2 frames, keyed by frame name.""" - -del(k) -del(v) - - # ID3v2.2 frames class UFI(UFID): "Unique File Identifier" @@ -1779,8 +1825,8 @@ class COM(COMM): "Comment" -#class RVA(RVAD) -#class EQU(EQUA) +# class RVA(RVAD) +# class EQU(EQUA) class REV(RVRB): @@ -1793,9 +1839,24 @@ class PIC(APIC): The 'mime' attribute of an ID3v2.2 attached picture must be either 'PNG' or 'JPG'. """ - _framespec = [EncodingSpec('encoding'), StringSpec('mime', 3), - ByteSpec('type'), EncodedTextSpec('desc'), - BinaryDataSpec('data')] + + _framespec = [ + EncodingSpec('encoding'), + StringSpec('mime', 3), + PictureTypeSpec('type'), + EncodedTextSpec('desc'), + BinaryDataSpec('data') + ] + + def _to_other(self, other): + if not isinstance(other, APIC): + raise TypeError + + other.encoding = self.encoding + other.mime = self.mime + other.type = self.type + other.desc = self.desc + other.data = self.data class GEO(GEOB): @@ -1830,13 +1891,50 @@ class CRA(AENC): class LNK(LINK): """Linked information""" - _framespec = [StringSpec('frameid', 3), Latin1TextSpec('url')] + + _framespec = [ + StringSpec('frameid', 3), + Latin1TextSpec('url') + ] + _optionalspec = [BinaryDataSpec('data')] + def _to_other(self, other): + if not isinstance(other, LINK): + raise TypeError -Frames_2_2 = dict([(k, v) for (k, v) in globals().items() - if len(k) == 3 and isinstance(v, type) and - issubclass(v, Frame)]) + if isinstance(other, LNK): + other.frameid = self.frameid + else: + try: + other.frameid = Frames_2_2[self.frameid].__bases__[0].__name__ + except KeyError: + other.frameid = self.frameid.ljust(4) + other.url = self.url + if hasattr(self, "data"): + other.data = self.data -del k -del v + +Frames = {} +"""All supported ID3v2.3/4 frames, keyed by frame name.""" + + +Frames_2_2 = {} +"""All supported ID3v2.2 frames, keyed by frame name.""" + + +k, v = None, None +for k, v in iteritems(globals()): + if isinstance(v, type) and issubclass(v, Frame): + v.__module__ = "mutagen.id3" + + if len(k) == 3: + Frames_2_2[k] = v + elif len(k) == 4: + Frames[k] = v + +try: + del k + del v +except NameError: + pass diff --git a/libs/mutagen/_id3specs.py b/libs/mutagen/id3/_specs.py similarity index 55% rename from libs/mutagen/_id3specs.py rename to libs/mutagen/id3/_specs.py index 32ef3afe..22e4335b 100644 --- a/libs/mutagen/_id3specs.py +++ b/libs/mutagen/id3/_specs.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -6,12 +8,89 @@ import struct from struct import unpack, pack -from warnings import warn -from mutagen._id3util import ID3JunkFrameError, ID3Warning, BitPaddedInt +from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \ + xrange +from .._util import total_ordering, decode_terminated, enum, izip +from ._util import BitPaddedInt + + +@enum +class PictureType(object): + """Enumeration of image types defined by the ID3 standard for the APIC + frame, but also reused in WMA/FLAC/VorbisComment. + """ + + OTHER = 0 + """Other""" + + FILE_ICON = 1 + """32x32 pixels 'file icon' (PNG only)""" + + OTHER_FILE_ICON = 2 + """Other file icon""" + + COVER_FRONT = 3 + """Cover (front)""" + + COVER_BACK = 4 + """Cover (back)""" + + LEAFLET_PAGE = 5 + """Leaflet page""" + + MEDIA = 6 + """Media (e.g. label side of CD)""" + + LEAD_ARTIST = 7 + """Lead artist/lead performer/soloist""" + + ARTIST = 8 + """Artist/performer""" + + CONDUCTOR = 9 + """Conductor""" + + BAND = 10 + """Band/Orchestra""" + + COMPOSER = 11 + """Composer""" + + LYRICIST = 12 + """Lyricist/text writer""" + + RECORDING_LOCATION = 13 + """Recording Location""" + + DURING_RECORDING = 14 + """During recording""" + + DURING_PERFORMANCE = 15 + """During performance""" + + SCREEN_CAPTURE = 16 + """Movie/video screen capture""" + + FISH = 17 + """A bright coloured fish""" + + ILLUSTRATION = 18 + """Illustration""" + + BAND_LOGOTYPE = 19 + """Band/artist logotype""" + + PUBLISHER_LOGOTYPE = 20 + """Publisher/Studio logotype""" + + +class SpecError(Exception): + pass class Spec(object): + def __init__(self, name): self.name = name @@ -25,23 +104,49 @@ class Spec(object): return value + def read(self, frame, data): + """Returns the (value, left_data) or raises SpecError""" + + raise NotImplementedError + + def write(self, frame, value): + raise NotImplementedError + + def validate(self, frame, value): + """Returns the validated data or raises ValueError/TypeError""" + + raise NotImplementedError + class ByteSpec(Spec): def read(self, frame, data): - return ord(data[0]), data[1:] + return bytearray(data)[0], data[1:] def write(self, frame, value): - return chr(value) + return chr_(value) def validate(self, frame, value): if value is not None: - chr(value) + chr_(value) + return value + + +class PictureTypeSpec(ByteSpec): + + def read(self, frame, data): + value, data = ByteSpec.read(self, frame, data) + return PictureType(value), data + + def validate(self, frame, value): + value = ByteSpec.validate(self, frame, value) + if value is not None: + return PictureType(value) return value class IntegerSpec(Spec): def read(self, frame, data): - return int(BitPaddedInt(data, bits=8)), '' + return int(BitPaddedInt(data, bits=8)), b'' def write(self, frame, value): return BitPaddedInt.to_str(value, bits=8, width=-1) @@ -64,98 +169,148 @@ class SizedIntegerSpec(Spec): return value +@enum +class Encoding(object): + """Text Encoding""" + + LATIN1 = 0 + """ISO-8859-1""" + + UTF16 = 1 + """UTF-16 with BOM""" + + UTF16BE = 2 + """UTF-16BE without BOM""" + + UTF8 = 3 + """UTF-8""" + + class EncodingSpec(ByteSpec): + def read(self, frame, data): enc, data = super(EncodingSpec, self).read(frame, data) - if enc < 16: - return enc, data - else: - return 0, chr(enc)+data + if enc not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, + Encoding.UTF8): + raise SpecError('Invalid Encoding: %r' % enc) + return Encoding(enc), data def validate(self, frame, value): - if 0 <= value <= 3: - return value if value is None: return None - raise ValueError('Invalid Encoding: %r' % value) + if value not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, + Encoding.UTF8): + raise ValueError('Invalid Encoding: %r' % value) + return Encoding(value) def _validate23(self, frame, value, **kwargs): # only 0, 1 are valid in v2.3, default to utf-16 - return min(1, value) + if value not in (Encoding.LATIN1, Encoding.UTF16): + value = Encoding.UTF16 + return value class StringSpec(Spec): + """A fixed size ASCII only payload.""" + def __init__(self, name, length): super(StringSpec, self).__init__(name) self.len = length def read(s, frame, data): - return data[:s.len], data[s.len:] + chunk = data[:s.len] + try: + ascii = chunk.decode("ascii") + except UnicodeDecodeError: + raise SpecError("not ascii") + else: + if PY3: + chunk = ascii + + return chunk, data[s.len:] def write(s, frame, value): if value is None: - return '\x00' * s.len + return b'\x00' * s.len else: - return (str(value) + '\x00' * s.len)[:s.len] + if PY3: + value = value.encode("ascii") + return (bytes(value) + b'\x00' * s.len)[:s.len] def validate(s, frame, value): if value is None: return None - if isinstance(value, basestring) and len(value) == s.len: + + if PY3: + if not isinstance(value, str): + raise TypeError("%s has to be str" % s.name) + value.encode("ascii") + else: + if not isinstance(value, bytes): + value = value.encode("ascii") + + if len(value) == s.len: return value + raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value)) class BinaryDataSpec(Spec): def read(self, frame, data): - return data, '' + return data, b'' def write(self, frame, value): - return str(value) + if value is None: + return b"" + if isinstance(value, bytes): + return value + value = text_type(value).encode("ascii") + return value def validate(self, frame, value): - return str(value) + if value is None: + return None + + if isinstance(value, bytes): + return value + elif PY3: + raise TypeError("%s has to be bytes" % self.name) + + value = text_type(value).encode("ascii") + return value class EncodedTextSpec(Spec): - # Okay, seriously. This is private and defined explicitly and - # completely by the ID3 specification. You can't just add - # encodings here however you want. - _encodings = ( - ('latin1', '\x00'), - ('utf16', '\x00\x00'), - ('utf_16_be', '\x00\x00'), - ('utf8', '\x00') - ) + + _encodings = { + Encoding.LATIN1: ('latin1', b'\x00'), + Encoding.UTF16: ('utf16', b'\x00\x00'), + Encoding.UTF16BE: ('utf_16_be', b'\x00\x00'), + Encoding.UTF8: ('utf8', b'\x00'), + } def read(self, frame, data): enc, term = self._encodings[frame.encoding] - ret = '' - if len(term) == 1: - if term in data: - data, ret = data.split(term, 1) - else: - offset = -1 - try: - while True: - offset = data.index(term, offset+1) - if offset & 1: - continue - data, ret = data[0:offset], data[offset+2:] - break - except ValueError: - pass + try: + # allow missing termination + return decode_terminated(data, enc, strict=False) + except ValueError: + # utf-16 termination with missing BOM, or single NULL + if not data[:len(term)].strip(b"\x00"): + return u"", data[len(term):] - if len(data) < len(term): - return u'', ret - return data.decode(enc), ret + # utf-16 data with single NULL, see issue 169 + try: + return decode_terminated(data + b"\x00", enc) + except ValueError: + raise SpecError("Decoding error") def write(self, frame, value): enc, term = self._encodings[frame.encoding] return value.encode(enc) + term def validate(self, frame, value): - return unicode(value) + return text_type(value) class MultiSpec(Spec): @@ -184,28 +339,28 @@ class MultiSpec(Spec): data.append(self.specs[0].write(frame, v)) else: for record in value: - for v, s in zip(record, self.specs): + for v, s in izip(record, self.specs): data.append(s.write(frame, v)) - return ''.join(data) + return b''.join(data) def validate(self, frame, value): if value is None: return [] - if self.sep and isinstance(value, basestring): + if self.sep and isinstance(value, string_types): value = value.split(self.sep) if isinstance(value, list): if len(self.specs) == 1: return [self.specs[0].validate(frame, v) for v in value] else: return [ - [s.validate(frame, v) for (v, s) in zip(val, self.specs)] + [s.validate(frame, v) for (v, s) in izip(val, self.specs)] for val in value] raise ValueError('Invalid MultiSpec data: %r' % value) def _validate23(self, frame, value, **kwargs): if len(self.specs) != 1: return [[s._validate23(frame, v, **kwargs) - for (v, s) in zip(val, self.specs)] + for (v, s) in izip(val, self.specs)] for val in value] spec = self.specs[0] @@ -232,19 +387,21 @@ class EncodedNumericPartTextSpec(EncodedTextSpec): class Latin1TextSpec(EncodedTextSpec): def read(self, frame, data): - if '\x00' in data: - data, ret = data.split('\x00', 1) + if b'\x00' in data: + data, ret = data.split(b'\x00', 1) else: - ret = '' + ret = b'' return data.decode('latin1'), ret def write(self, data, value): - return value.encode('latin1') + '\x00' + return value.encode('latin1') + b'\x00' def validate(self, frame, value): - return unicode(value) + return text_type(value) +@swap_to_string +@total_ordering class ID3TimeStamp(object): """A time stamp in ID3v2 format. @@ -261,6 +418,11 @@ class ID3TimeStamp(object): def __init__(self, text): if isinstance(text, ID3TimeStamp): text = text.text + elif not isinstance(text, text_type): + if PY3: + raise TypeError("not a str") + text = text.decode("utf-8") + self.text = text __formats = ['%04d'] + ['%02d'] * 5 @@ -270,7 +432,9 @@ class ID3TimeStamp(object): parts = [self.year, self.month, self.day, self.hour, self.minute, self.second] pieces = [] - for i, part in enumerate(iter(iter(parts).next, None)): + for i, part in enumerate(parts): + if part is None: + break pieces.append(self.__formats[i] % part + self.__seps[i]) return u''.join(pieces)[:-1] @@ -289,11 +453,17 @@ class ID3TimeStamp(object): def __str__(self): return self.text + def __bytes__(self): + return self.text.encode("utf-8") + def __repr__(self): return repr(self.text) - def __cmp__(self, other): - return cmp(self.text, other.text) + def __eq__(self, other): + return self.text == other.text + + def __lt__(self, other): + return self.text < other.text __hash__ = object.__hash__ @@ -319,22 +489,26 @@ class TimeStampSpec(EncodedTextSpec): class ChannelSpec(ByteSpec): (OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE, - BACKCENTRE, SUBWOOFER) = range(9) + BACKCENTRE, SUBWOOFER) = xrange(9) class VolumeAdjustmentSpec(Spec): def read(self, frame, data): value, = unpack('>h', data[0:2]) - return value/512.0, data[2:] + return value / 512.0, data[2:] def write(self, frame, value): - return pack('>h', int(round(value * 512))) + number = int(round(value * 512)) + # pack only fails in 2.7, do it manually in 2.6 + if not -32768 <= number <= 32767: + raise SpecError("not in range") + return pack('>h', number) def validate(self, frame, value): if value is not None: try: self.write(frame, value) - except struct.error: + except SpecError: raise ValueError("out of range") return value @@ -343,27 +517,32 @@ class VolumePeakSpec(Spec): def read(self, frame, data): # http://bugs.xmms.org/attachment.cgi?id=113&action=view peak = 0 - bits = ord(data[0]) - bytes = min(4, (bits + 7) >> 3) + data_array = bytearray(data) + bits = data_array[0] + vol_bytes = min(4, (bits + 7) >> 3) # not enough frame data - if bytes + 1 > len(data): - raise ID3JunkFrameError - shift = ((8 - (bits & 7)) & 7) + (4 - bytes) * 8 - for i in range(1, bytes+1): + if vol_bytes + 1 > len(data): + raise SpecError("not enough frame data") + shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8 + for i in xrange(1, vol_bytes + 1): peak *= 256 - peak += ord(data[i]) + peak += data_array[i] peak *= 2 ** shift - return (float(peak) / (2**31-1)), data[1+bytes:] + return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:] def write(self, frame, value): + number = int(round(value * 32768)) + # pack only fails in 2.7, do it manually in 2.6 + if not 0 <= number <= 65535: + raise SpecError("not in range") # always write as 16 bits for sanity. - return "\x10" + pack('>H', int(round(value * 32768))) + return b"\x10" + pack('>H', number) def validate(self, frame, value): if value is not None: try: self.write(frame, value) - except struct.error: + except SpecError: raise ValueError("out of range") return value @@ -373,26 +552,26 @@ class SynchronizedTextSpec(EncodedTextSpec): texts = [] encoding, term = self._encodings[frame.encoding] while data: - l = len(term) try: - value_idx = data.index(term) + value, data = decode_terminated(data, encoding) except ValueError: - raise ID3JunkFrameError - value = data[:value_idx].decode(encoding) - if len(data) < value_idx + l + 4: - raise ID3JunkFrameError - time, = struct.unpack(">I", data[value_idx+l:value_idx+l+4]) + raise SpecError("decoding error") + + if len(data) < 4: + raise SpecError("not enough data") + time, = struct.unpack(">I", data[:4]) + texts.append((value, time)) - data = data[value_idx+l+4:] - return texts, "" + data = data[4:] + return texts, b"" def write(self, frame, value): data = [] encoding, term = self._encodings[frame.encoding] - for text, time in frame.text: + for text, time in value: text = text.encode(encoding) + term data.append(text + struct.pack(">I", time)) - return "".join(data) + return b"".join(data) def validate(self, frame, value): return value @@ -407,7 +586,7 @@ class KeyEventSpec(Spec): return events, data def write(self, frame, value): - return "".join([struct.pack(">bI", *event) for event in value]) + return b"".join(struct.pack(">bI", *event) for event in value) def validate(self, frame, value): return value @@ -423,14 +602,13 @@ class VolumeAdjustmentsSpec(Spec): freq /= 2.0 adj /= 512.0 adjustments[freq] = adj - adjustments = adjustments.items() - adjustments.sort() + adjustments = sorted(adjustments.items()) return adjustments, data def write(self, frame, value): value.sort() - return "".join([struct.pack(">Hh", int(freq * 2), int(adj * 512)) - for (freq, adj) in value]) + return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512)) + for (freq, adj) in value) def validate(self, frame, value): return value @@ -445,12 +623,14 @@ class ASPIIndexSpec(Spec): format = "B" size = 1 else: - warn("invalid bit count in ASPI (%d)" % frame.b, ID3Warning) - return [], data + raise SpecError("invalid bit count in ASPI (%d)" % frame.b) indexes = data[:frame.N * size] data = data[frame.N * size:] - return list(struct.unpack(">" + format * frame.N, indexes)), data + try: + return list(struct.unpack(">" + format * frame.N, indexes)), data + except struct.error as e: + raise SpecError(e) def write(self, frame, values): if frame.b == 16: @@ -458,8 +638,11 @@ class ASPIIndexSpec(Spec): elif frame.b == 8: format = "B" else: - raise ValueError("frame.b must be 8 or 16") - return struct.pack(">" + format * frame.N, *values) + raise SpecError("frame.b must be 8 or 16") + try: + return struct.pack(">" + format * frame.N, *values) + except struct.error as e: + raise SpecError(e) def validate(self, frame, values): return values diff --git a/libs/mutagen/_id3util.py b/libs/mutagen/id3/_util.py similarity index 64% rename from libs/mutagen/_id3util.py rename to libs/mutagen/id3/_util.py index de82e36a..29f7241d 100644 --- a/libs/mutagen/_id3util.py +++ b/libs/mutagen/id3/_util.py @@ -1,12 +1,18 @@ +# -*- coding: utf-8 -*- + # Copyright (C) 2005 Michael Urman # 2013 Christoph Reiter +# 2014 Ben Ockmore # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. +from .._compat import long_, integer_types, PY3 +from .._util import MutagenError -class error(Exception): + +class error(MutagenError): pass @@ -14,18 +20,6 @@ class ID3NoHeaderError(error, ValueError): pass -class ID3BadUnsynchData(error, ValueError): - pass - - -class ID3BadCompressedData(error, ValueError): - pass - - -class ID3TagError(error, ValueError): - pass - - class ID3UnsupportedVersionError(error, NotImplementedError): pass @@ -38,50 +32,29 @@ class ID3JunkFrameError(error, ValueError): pass -class ID3Warning(error, UserWarning): - pass - - class unsynch(object): @staticmethod def decode(value): - output = [] - safe = True - append = output.append - for val in value: - if safe: - append(val) - safe = val != '\xFF' - else: - if val >= '\xE0': - raise ValueError('invalid sync-safe string') - elif val != '\x00': - append(val) - safe = True - if not safe: + fragments = bytearray(value).split(b'\xff') + if len(fragments) > 1 and not fragments[-1]: raise ValueError('string ended unsafe') - return ''.join(output) + + for f in fragments[1:]: + if (not f) or (f[0] >= 0xE0): + raise ValueError('invalid sync-safe string') + + if f[0] == 0x00: + del f[0] + + return bytes(bytearray(b'\xff').join(fragments)) @staticmethod def encode(value): - output = [] - safe = True - append = output.append - for val in value: - if safe: - append(val) - if val == '\xFF': - safe = False - elif val == '\x00' or val >= '\xE0': - append('\x00') - append(val) - safe = val != '\xFF' - else: - append(val) - safe = True - if not safe: - append('\x00') - return ''.join(output) + fragments = bytearray(value).split(b'\xff') + for f in fragments[1:]: + if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00): + f.insert(0, 0x00) + return bytes(bytearray(b'\xff').join(fragments)) class _BitPaddedMixin(object): @@ -111,11 +84,11 @@ class _BitPaddedMixin(object): while value: append(value & mask) value >>= bits - bytes_ = bytes_.ljust(minwidth, "\x00") + bytes_ = bytes_.ljust(minwidth, b"\x00") if bigendian: bytes_.reverse() - return str(bytes_) + return bytes(bytes_) @staticmethod def has_valid_padding(value, bits=7): @@ -125,14 +98,14 @@ class _BitPaddedMixin(object): mask = (((1 << (8 - bits)) - 1) << bits) - if isinstance(value, (int, long)): + if isinstance(value, integer_types): while value: if value & mask: return False value >>= 8 - elif isinstance(value, str): - for byte in value: - if ord(byte) & mask: + elif isinstance(value, bytes): + for byte in bytearray(value): + if byte & mask: return False else: raise TypeError @@ -148,29 +121,47 @@ class BitPaddedInt(int, _BitPaddedMixin): numeric_value = 0 shift = 0 - if isinstance(value, (int, long)): + if isinstance(value, integer_types): while value: numeric_value += (value & mask) << shift value >>= 8 shift += bits - elif isinstance(value, str): + elif isinstance(value, bytes): if bigendian: value = reversed(value) - for byte in value: - numeric_value += (ord(byte) & mask) << shift + for byte in bytearray(value): + numeric_value += (byte & mask) << shift shift += bits else: raise TypeError - if isinstance(numeric_value, long): - self = long.__new__(BitPaddedLong, numeric_value) - else: + if isinstance(numeric_value, int): self = int.__new__(BitPaddedInt, numeric_value) + else: + self = long_.__new__(BitPaddedLong, numeric_value) self.bits = bits self.bigendian = bigendian return self +if PY3: + BitPaddedLong = BitPaddedInt +else: + class BitPaddedLong(long_, _BitPaddedMixin): + pass -class BitPaddedLong(long, _BitPaddedMixin): - pass + +class ID3BadUnsynchData(error, ValueError): + """Deprecated""" + + +class ID3BadCompressedData(error, ValueError): + """Deprecated""" + + +class ID3TagError(error, ValueError): + """Deprecated""" + + +class ID3Warning(error, UserWarning): + """Deprecated""" diff --git a/libs/mutagen/m4a.py b/libs/mutagen/m4a.py index 64b89679..3ed148c5 100644 --- a/libs/mutagen/m4a.py +++ b/libs/mutagen/m4a.py @@ -1,36 +1,27 @@ +# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. -"""Read and write MPEG-4 audio files with iTunes metadata. - -This module will read MPEG-4 audio information and metadata, -as found in Apple's M4A (aka MP4, M4B, M4P) files. - -There is no official specification for this format. The source code -for TagLib, FAAD, and various MPEG specifications at -http://developer.apple.com/documentation/QuickTime/QTFF/, -http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt, -and http://wiki.multimedia.cx/index.php?title=Apple_QuickTime were all -consulted. - -This module does not support 64 bit atom sizes, and so will not -work on metadata over 4GB. +""" +since 1.9: mutagen.m4a is deprecated; use mutagen.mp4 instead. +since 1.31: mutagen.m4a will no longer work; any operation that could fail + will fail now. """ -import struct -import sys +import warnings -from cStringIO import StringIO +from mutagen import FileType, Tags, StreamInfo +from ._util import DictProxy, MutagenError -from mutagen import FileType, Metadata -from mutagen._constants import GENRES -from mutagen._util import cdata, insert_bytes, delete_bytes, DictProxy +warnings.warn( + "mutagen.m4a is deprecated; use mutagen.mp4 instead.", + DeprecationWarning) -class error(IOError): +class error(IOError, MutagenError): pass @@ -46,493 +37,65 @@ class M4AMetadataValueError(ValueError, M4AMetadataError): pass -import warnings -warnings.warn( - "mutagen.m4a is deprecated; use mutagen.mp4 instead.", DeprecationWarning) - - -# This is not an exhaustive list of container atoms, but just the -# ones this module needs to peek inside. -_CONTAINERS = ["moov", "udta", "trak", "mdia", "meta", "ilst", - "stbl", "minf", "stsd"] -_SKIP_SIZE = {"meta": 4} - __all__ = ['M4A', 'Open', 'delete', 'M4ACover'] -class M4ACover(str): - """A cover artwork. +class M4ACover(bytes): - Attributes: - imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) - """ FORMAT_JPEG = 0x0D FORMAT_PNG = 0x0E def __new__(cls, data, imageformat=None): - self = str.__new__(cls, data) + self = bytes.__new__(cls, data) if imageformat is None: imageformat = M4ACover.FORMAT_JPEG self.imageformat = imageformat - try: - self.format - except AttributeError: - self.format = imageformat return self -class Atom(object): - """An individual atom. - - Attributes: - children -- list child atoms (or None for non-container atoms) - length -- length of this atom, including length and name - name -- four byte name of the atom, as a str - offset -- location in the constructor-given fileobj of this atom - - This structure should only be used internally by Mutagen. - """ - - children = None - - def __init__(self, fileobj): - self.offset = fileobj.tell() - self.length, self.name = struct.unpack(">I4s", fileobj.read(8)) - if self.length == 1: - raise error("64 bit atom sizes are not supported") - elif self.length < 8: - return - - if self.name in _CONTAINERS: - self.children = [] - fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1) - while fileobj.tell() < self.offset + self.length: - self.children.append(Atom(fileobj)) - else: - fileobj.seek(self.offset + self.length, 0) - - @staticmethod - def render(name, data): - """Render raw atom data.""" - # this raises OverflowError if Py_ssize_t can't handle the atom data - size = len(data) + 8 - if size <= 0xFFFFFFFF: - return struct.pack(">I4s", size, name) + data - else: - return struct.pack(">I4sQ", 1, name, size + 8) + data - - def __getitem__(self, remaining): - """Look up a child atom, potentially recursively. - - e.g. atom['udta', 'meta'] => - """ - if not remaining: - return self - elif self.children is None: - raise KeyError("%r is not a container" % self.name) - for child in self.children: - if child.name == remaining[0]: - return child[remaining[1:]] - else: - raise KeyError("%r not found" % remaining[0]) - - def __repr__(self): - klass = self.__class__.__name__ - if self.children is None: - return "<%s name=%r length=%r offset=%r>" % ( - klass, self.name, self.length, self.offset) - else: - children = "\n".join([" " + line for child in self.children - for line in repr(child).splitlines()]) - return "<%s name=%r length=%r offset=%r\n%s>" % ( - klass, self.name, self.length, self.offset, children) - - -class Atoms(object): - """Root atoms in a given file. - - Attributes: - atoms -- a list of top-level atoms as Atom objects - - This structure should only be used internally by Mutagen. - """ - def __init__(self, fileobj): - self.atoms = [] - fileobj.seek(0, 2) - end = fileobj.tell() - fileobj.seek(0) - while fileobj.tell() < end: - self.atoms.append(Atom(fileobj)) - - def path(self, *names): - """Look up and return the complete path of an atom. - - For example, atoms.path('moov', 'udta', 'meta') will return a - list of three atoms, corresponding to the moov, udta, and meta - atoms. - """ - path = [self] - for name in names: - path.append(path[-1][name, ]) - return path[1:] - - def __getitem__(self, names): - """Look up a child atom. - - 'names' may be a list of atoms (['moov', 'udta']) or a string - specifying the complete path ('moov.udta'). - """ - if isinstance(names, basestring): - names = names.split(".") - for child in self.atoms: - if child.name == names[0]: - return child[names[1:]] - else: - raise KeyError("%s not found" % names[0]) - - def __repr__(self): - return "\n".join([repr(child) for child in self.atoms]) - - -class M4ATags(DictProxy, Metadata): - """Dictionary containing Apple iTunes metadata list key/values. - - Keys are four byte identifiers, except for freeform ('----') - keys. Values are usually unicode strings, but some atoms have a - special structure: - cpil -- boolean - trkn, disk -- tuple of 16 bit ints (current, total) - tmpo -- 16 bit int - covr -- list of M4ACover objects (which are tagged strs) - gnre -- not supported. Use '\\xa9gen' instead. - - The freeform '----' frames use a key in the format '----:mean:name' - where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique - identifier for this frame. The value is a str, but is probably - text that can be decoded as UTF-8. - - M4A tag data cannot exist outside of the structure of an M4A file, - so this class should not be manually instantiated. - - Unknown non-text tags are removed. - """ +class M4ATags(DictProxy, Tags): def load(self, atoms, fileobj): - try: - ilst = atoms["moov.udta.meta.ilst"] - except KeyError, key: - raise M4AMetadataError(key) - for atom in ilst.children: - fileobj.seek(atom.offset + 8) - data = fileobj.read(atom.length - 8) - parse = self.__atoms.get(atom.name, (M4ATags.__parse_text,))[0] - parse(self, atom, data) - - @staticmethod - def __key_sort(item1, item2): - (key1, v1) = item1 - (key2, v2) = item2 - # iTunes always writes the tags in order of "relevance", try - # to copy it as closely as possible. - order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", - "\xa9gen", "gnre", "trkn", "disk", - "\xa9day", "cpil", "tmpo", "\xa9too", - "----", "covr", "\xa9lyr"] - order = dict(zip(order, range(len(order)))) - last = len(order) - # If there's no key-based way to distinguish, order by length. - # If there's still no way, go by string comparison on the - # values, so we at least have something determinstic. - return (cmp(order.get(key1[:4], last), order.get(key2[:4], last)) or - cmp(len(v1), len(v2)) or cmp(v1, v2)) + raise error("deprecated") def save(self, filename): - """Save the metadata to the given filename.""" - values = [] - items = self.items() - items.sort(self.__key_sort) - for key, value in items: - render = self.__atoms.get( - key[:4], (None, M4ATags.__render_text))[1] - values.append(render(self, key, value)) - data = Atom.render("ilst", "".join(values)) - - # Find the old atoms. - fileobj = open(filename, "rb+") - try: - atoms = Atoms(fileobj) - - moov = atoms["moov"] - - if moov != atoms.atoms[-1]: - # "Free" the old moov block. Something in the mdat - # block is not happy when its offset changes and it - # won't play back. So, rather than try to figure that - # out, just move the moov atom to the end of the file. - offset = self.__move_moov(fileobj, moov) - else: - offset = 0 - - try: - path = atoms.path("moov", "udta", "meta", "ilst") - except KeyError: - self.__save_new(fileobj, atoms, data, offset) - else: - self.__save_existing(fileobj, atoms, path, data, offset) - finally: - fileobj.close() - - def __move_moov(self, fileobj, moov): - fileobj.seek(moov.offset) - data = fileobj.read(moov.length) - fileobj.seek(moov.offset) - free = Atom.render("free", "\x00" * (moov.length - 8)) - fileobj.write(free) - fileobj.seek(0, 2) - # Figure out how far we have to shift all our successive - # seek calls, relative to what the atoms say. - old_end = fileobj.tell() - fileobj.write(data) - return old_end - moov.offset - - def __save_new(self, fileobj, atoms, ilst, offset): - hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9) - meta = Atom.render("meta", "\x00\x00\x00\x00" + hdlr + ilst) - moov, udta = atoms.path("moov", "udta") - insert_bytes(fileobj, len(meta), udta.offset + offset + 8) - fileobj.seek(udta.offset + offset + 8) - fileobj.write(meta) - self.__update_parents(fileobj, [moov, udta], len(meta), offset) - - def __save_existing(self, fileobj, atoms, path, data, offset): - # Replace the old ilst atom. - ilst = path.pop() - delta = len(data) - ilst.length - fileobj.seek(ilst.offset + offset) - if delta > 0: - insert_bytes(fileobj, delta, ilst.offset + offset) - elif delta < 0: - delete_bytes(fileobj, -delta, ilst.offset + offset) - fileobj.seek(ilst.offset + offset) - fileobj.write(data) - self.__update_parents(fileobj, path, delta, offset) - - def __update_parents(self, fileobj, path, delta, offset): - # Update all parent atoms with the new size. - for atom in path: - fileobj.seek(atom.offset + offset) - size = cdata.uint_be(fileobj.read(4)) + delta - fileobj.seek(atom.offset + offset) - fileobj.write(cdata.to_uint_be(size)) - - def __render_data(self, key, flags, data): - data = struct.pack(">2I", flags, 0) + data - return Atom.render(key, Atom.render("data", data)) - - def __parse_freeform(self, atom, data): - try: - fileobj = StringIO(data) - mean_length = cdata.uint_be(fileobj.read(4)) - # skip over 8 bytes of atom name, flags - mean = fileobj.read(mean_length - 4)[8:] - name_length = cdata.uint_be(fileobj.read(4)) - name = fileobj.read(name_length - 4)[8:] - value_length = cdata.uint_be(fileobj.read(4)) - # Name, flags, and reserved bytes - value = fileobj.read(value_length - 4)[12:] - except struct.error: - # Some ---- atoms have no data atom, I have no clue why - # they actually end up in the file. - pass - else: - self["%s:%s:%s" % (atom.name, mean, name)] = value - - def __render_freeform(self, key, value): - dummy, mean, name = key.split(":", 2) - mean = struct.pack(">I4sI", len(mean) + 12, "mean", 0) + mean - name = struct.pack(">I4sI", len(name) + 12, "name", 0) + name - value = struct.pack(">I4s2I", len(value) + 16, "data", 0x1, 0) + value - final = mean + name + value - return Atom.render("----", final) - - def __parse_pair(self, atom, data): - self[atom.name] = struct.unpack(">2H", data[18:22]) - - def __render_pair(self, key, value): - track, total = value - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data = struct.pack(">4H", 0, track, total, 0) - return self.__render_data(key, 0, data) - else: - raise M4AMetadataValueError("invalid numeric pair %r" % (value,)) - - def __render_pair_no_trailing(self, key, value): - track, total = value - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data = struct.pack(">3H", 0, track, total) - return self.__render_data(key, 0, data) - else: - raise M4AMetadataValueError("invalid numeric pair %r" % (value,)) - - def __parse_genre(self, atom, data): - # Translate to a freeform genre. - genre = cdata.short_be(data[16:18]) - if "\xa9gen" not in self: - try: - self["\xa9gen"] = GENRES[genre - 1] - except IndexError: - pass - - def __parse_tempo(self, atom, data): - self[atom.name] = cdata.short_be(data[16:18]) - - def __render_tempo(self, key, value): - if 0 <= value < 1 << 16: - return self.__render_data(key, 0x15, cdata.to_ushort_be(value)) - else: - raise M4AMetadataValueError("invalid short integer %r" % value) - - def __parse_compilation(self, atom, data): - try: - self[atom.name] = bool(ord(data[16:17])) - except TypeError: - self[atom.name] = False - - def __render_compilation(self, key, value): - return self.__render_data(key, 0x15, chr(bool(value))) - - def __parse_cover(self, atom, data): - length, name, imageformat = struct.unpack(">I4sI", data[:12]) - if name != "data": - raise M4AMetadataError( - "unexpected atom %r inside 'covr'" % name) - if imageformat not in (M4ACover.FORMAT_JPEG, M4ACover.FORMAT_PNG): - imageformat = M4ACover.FORMAT_JPEG - self[atom.name] = M4ACover(data[16:length], imageformat) - - def __render_cover(self, key, value): - try: - imageformat = value.imageformat - except AttributeError: - imageformat = M4ACover.FORMAT_JPEG - data = Atom.render("data", struct.pack(">2I", imageformat, 0) + value) - return Atom.render(key, data) - - def __parse_text(self, atom, data): - flags = cdata.uint_be(data[8:12]) - if flags == 1: - self[atom.name] = data[16:].decode('utf-8', 'replace') - - def __render_text(self, key, value): - return self.__render_data(key, 0x1, value.encode('utf-8')) + raise error("deprecated") def delete(self, filename): - self.clear() - self.save(filename) - - __atoms = { - "----": (__parse_freeform, __render_freeform), - "trkn": (__parse_pair, __render_pair), - "disk": (__parse_pair, __render_pair_no_trailing), - "gnre": (__parse_genre, None), - "tmpo": (__parse_tempo, __render_tempo), - "cpil": (__parse_compilation, __render_compilation), - "covr": (__parse_cover, __render_cover), - } + raise error("deprecated") def pprint(self): - values = [] - for key, value in self.iteritems(): - key = key.decode('latin1') - try: - values.append("%s=%s" % (key, value)) - except UnicodeDecodeError: - values.append("%s=[%d bytes of data]" % (key, len(value))) - return "\n".join(values) + return u"" -class M4AInfo(object): - """MPEG-4 stream information. - - Attributes: - bitrate -- bitrate in bits per second, as an int - length -- file length in seconds, as a float - """ +class M4AInfo(StreamInfo): bitrate = 0 def __init__(self, atoms, fileobj): - hdlr = atoms["moov.trak.mdia.hdlr"] - fileobj.seek(hdlr.offset) - if "soun" not in fileobj.read(hdlr.length): - raise M4AStreamInfoError("track has no audio data") - - mdhd = atoms["moov.trak.mdia.mdhd"] - fileobj.seek(mdhd.offset) - data = fileobj.read(mdhd.length) - if ord(data[8]) == 0: - offset = 20 - fmt = ">2I" - else: - offset = 28 - fmt = ">IQ" - end = offset + struct.calcsize(fmt) - unit, length = struct.unpack(fmt, data[offset:end]) - self.length = float(length) / unit - - try: - atom = atoms["moov.trak.mdia.minf.stbl.stsd"] - fileobj.seek(atom.offset) - data = fileobj.read(atom.length) - self.bitrate = cdata.uint_be(data[-17:-13]) - except (ValueError, KeyError): - # Bitrate values are optional. - pass + raise error("deprecated") def pprint(self): - return "MPEG-4 audio, %.2f seconds, %d bps" % ( - self.length, self.bitrate) + return u"" class M4A(FileType): - """An MPEG-4 audio file, probably containing AAC. - - If more than one track is present in the file, the first is used. - Only audio ('soun') tracks will be read. - """ _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] def load(self, filename): - self.filename = filename - fileobj = open(filename, "rb") - try: - atoms = Atoms(fileobj) - try: - self.info = M4AInfo(atoms, fileobj) - except StandardError, err: - raise M4AStreamInfoError, err, sys.exc_info()[2] - try: - self.tags = M4ATags(atoms, fileobj) - except M4AMetadataError: - self.tags = None - except StandardError, err: - raise M4AMetadataError, err, sys.exc_info()[2] - finally: - fileobj.close() + raise error("deprecated") def add_tags(self): self.tags = M4ATags() @staticmethod def score(filename, fileobj, header): - return ("ftyp" in header) + ("mp4" in header) + return 0 Open = M4A def delete(filename): - """Remove tags from a file.""" - - M4A(filename).delete() + raise error("deprecated") diff --git a/libs/mutagen/monkeysaudio.py b/libs/mutagen/monkeysaudio.py index 355749b9..0e29273f 100644 --- a/libs/mutagen/monkeysaudio.py +++ b/libs/mutagen/monkeysaudio.py @@ -1,6 +1,6 @@ -# A Monkey's Audio (APE) reader/tagger -# -# Copyright 2006 Lukas Lalinsky +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Lukas Lalinsky # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -18,6 +18,8 @@ __all__ = ["MonkeysAudio", "Open", "delete"] import struct +from ._compat import endswith +from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen._util import cdata @@ -26,7 +28,7 @@ class MonkeysAudioHeaderError(error): pass -class MonkeysAudioInfo(object): +class MonkeysAudioInfo(StreamInfo): """Monkey's Audio stream information. Attributes: @@ -40,7 +42,7 @@ class MonkeysAudioInfo(object): def __init__(self, fileobj): header = fileobj.read(76) - if len(header) != 76 or not header.startswith("MAC "): + if len(header) != 76 or not header.startswith(b"MAC "): raise MonkeysAudioHeaderError("not a Monkey's Audio file") self.version = cdata.ushort_le(header[4:6]) if self.version >= 3980: @@ -62,13 +64,13 @@ class MonkeysAudioInfo(object): blocks_per_frame = 9216 self.version /= 1000.0 self.length = 0.0 - if self.sample_rate != 0 and total_frames > 0: + if (self.sample_rate != 0) and (total_frames > 0): total_blocks = ((total_frames - 1) * blocks_per_frame + final_frame_blocks) self.length = float(total_blocks) / self.sample_rate def pprint(self): - return "Monkey's Audio %.2f, %.2f seconds, %d Hz" % ( + return u"Monkey's Audio %.2f, %.2f seconds, %d Hz" % ( self.version, self.length, self.sample_rate) @@ -78,7 +80,7 @@ class MonkeysAudio(APEv2File): @staticmethod def score(filename, fileobj, header): - return header.startswith("MAC ") + filename.lower().endswith(".ape") + return header.startswith(b"MAC ") + endswith(filename.lower(), ".ape") Open = MonkeysAudio diff --git a/libs/mutagen/mp3.py b/libs/mutagen/mp3.py index 2426610b..afb600cf 100644 --- a/libs/mutagen/mp3.py +++ b/libs/mutagen/mp3.py @@ -1,5 +1,6 @@ -# MP3 stream header information support for Mutagen. -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as @@ -10,12 +11,16 @@ import os import struct +from ._compat import endswith, xrange +from ._mp3util import XingHeader, XingHeaderError, VBRIHeader, VBRIHeaderError +from mutagen import StreamInfo +from mutagen._util import MutagenError, enum from mutagen.id3 import ID3FileType, BitPaddedInt, delete __all__ = ["MP3", "Open", "delete", "MP3"] -class error(RuntimeError): +class error(RuntimeError, MutagenError): pass @@ -27,11 +32,50 @@ class InvalidMPEGHeader(error, IOError): pass +@enum +class BitrateMode(object): + + UNKNOWN = 0 + """Probably a CBR file, but not sure""" + + CBR = 1 + """Constant Bitrate""" + + VBR = 2 + """Variable Bitrate""" + + ABR = 3 + """Average Bitrate (a variant of VBR)""" + + +def _guess_xing_bitrate_mode(xing): + + if xing.lame_header: + lame = xing.lame_header + if lame.vbr_method in (1, 8): + return BitrateMode.CBR + elif lame.vbr_method in (2, 9): + return BitrateMode.ABR + elif lame.vbr_method in (3, 4, 5, 6): + return BitrateMode.VBR + # everything else undefined, continue guessing + + # info tags get only written by lame for cbr files + if xing.is_info: + return BitrateMode.CBR + + # older lame and non-lame with some variant of vbr + if xing.vbr_scale != -1 or xing.lame_version: + return BitrateMode.VBR + + return BitrateMode.UNKNOWN + + # Mode values. -STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4) +STEREO, JOINTSTEREO, DUALCHANNEL, MONO = xrange(4) -class MPEGInfo(object): +class MPEGInfo(StreamInfo): """MPEG audio stream information Parse information about an MPEG audio file. This also reads the @@ -43,8 +87,18 @@ class MPEGInfo(object): Useful attributes: * length -- audio length, in seconds + * channels -- number of audio channels * bitrate -- audio bitrate, in bits per second * sketchy -- if true, the file may not be valid MPEG audio + * encoder_info -- a string containing encoder name and possibly version. + In case a lame tag is present this will start with + ``"LAME "``, if unknown it is empty, otherwise the + text format is undefined. + * bitrate_mode -- a :class:`BitrateMode` + + * track_gain -- replaygain track gain (89db) or None + * track_peak -- replaygain track peak or None + * album_gain -- replaygain album gain (89db) or None Useless attributes: @@ -58,19 +112,20 @@ class MPEGInfo(object): # Map (version, layer) tuples to bitrates. __BITRATE = { - (1, 1): range(0, 480, 32), + (1, 1): [0, 32, 64, 96, 128, 160, 192, 224, + 256, 288, 320, 352, 384, 416, 448], (1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384], (1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320], (2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256], - (2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, + (2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160], } __BITRATE[(2, 3)] = __BITRATE[(2, 2)] - for i in range(1, 4): + for i in xrange(1, 4): __BITRATE[(2.5, i)] = __BITRATE[(2, i)] # Map version to sample rates. @@ -81,6 +136,9 @@ class MPEGInfo(object): } sketchy = False + encoder_info = u"" + bitrate_mode = BitrateMode.UNKNOWN + track_gain = track_peak = album_gain = album_peak = None def __init__(self, fileobj, offset=None): """Parse MPEG stream information from a file-like object. @@ -104,9 +162,9 @@ class MPEGInfo(object): try: id3, insize = struct.unpack('>3sxxx4s', idata) except struct.error: - id3, insize = '', 0 + id3, insize = b'', 0 insize = BitPaddedInt(insize) - if id3 == 'ID3' and insize > 0: + if id3 == b'ID3' and insize > 0: offset = insize + 10 else: offset = 0 @@ -138,11 +196,11 @@ class MPEGInfo(object): # is assuming the offset didn't lie. data = fileobj.read(32768) - frame_1 = data.find("\xff") - while 0 <= frame_1 <= len(data) - 4: + frame_1 = data.find(b"\xff") + while 0 <= frame_1 <= (len(data) - 4): frame_data = struct.unpack(">I", data[frame_1:frame_1 + 4])[0] - if (frame_data >> 16) & 0xE0 != 0xE0: - frame_1 = data.find("\xff", frame_1 + 2) + if ((frame_data >> 16) & 0xE0) != 0xE0: + frame_1 = data.find(b"\xff", frame_1 + 2) else: version = (frame_data >> 19) & 0x3 layer = (frame_data >> 17) & 0x3 @@ -150,20 +208,22 @@ class MPEGInfo(object): bitrate = (frame_data >> 12) & 0xF sample_rate = (frame_data >> 10) & 0x3 padding = (frame_data >> 9) & 0x1 - #private = (frame_data >> 8) & 0x1 + # private = (frame_data >> 8) & 0x1 self.mode = (frame_data >> 6) & 0x3 - #mode_extension = (frame_data >> 4) & 0x3 - #copyright = (frame_data >> 3) & 0x1 - #original = (frame_data >> 2) & 0x1 - #emphasis = (frame_data >> 0) & 0x3 + # mode_extension = (frame_data >> 4) & 0x3 + # copyright = (frame_data >> 3) & 0x1 + # original = (frame_data >> 2) & 0x1 + # emphasis = (frame_data >> 0) & 0x3 if (version == 1 or layer == 0 or sample_rate == 0x3 or bitrate == 0 or bitrate == 0xF): - frame_1 = data.find("\xff", frame_1 + 2) + frame_1 = data.find(b"\xff", frame_1 + 2) else: break else: raise HeaderNotFoundError("can't sync to an MPEG frame") + self.channels = 1 if self.mode == MONO else 2 + # There is a serious problem here, which is that many flags # in an MPEG header are backwards. self.version = [2.5, None, 2, 1][version] @@ -176,17 +236,18 @@ class MPEGInfo(object): self.sample_rate = self.__RATES[self.version][sample_rate] if self.layer == 1: - frame_length = (12 * self.bitrate / self.sample_rate + padding) * 4 + frame_length = ( + (12 * self.bitrate // self.sample_rate) + padding) * 4 frame_size = 384 elif self.version >= 2 and self.layer == 3: - frame_length = 72 * self.bitrate / self.sample_rate + padding + frame_length = (72 * self.bitrate // self.sample_rate) + padding frame_size = 576 else: - frame_length = 144 * self.bitrate / self.sample_rate + padding + frame_length = (144 * self.bitrate // self.sample_rate) + padding frame_size = 1152 if check_second: - possible = frame_1 + frame_length + possible = int(frame_1 + frame_length) if possible > len(data) + 4: raise HeaderNotFoundError("can't sync to second MPEG frame") try: @@ -194,51 +255,70 @@ class MPEGInfo(object): ">H", data[possible:possible + 2])[0] except struct.error: raise HeaderNotFoundError("can't sync to second MPEG frame") - if frame_data & 0xFFE0 != 0xFFE0: + if (frame_data & 0xFFE0) != 0xFFE0: raise HeaderNotFoundError("can't sync to second MPEG frame") self.length = 8 * real_size / float(self.bitrate) # Try to find/parse the Xing header, which trumps the above length # and bitrate calculation. - fileobj.seek(offset, 0) - data = fileobj.read(32768) + + if self.layer != 3: + return + + # Xing + xing_offset = XingHeader.get_offset(self) + fileobj.seek(offset + frame_1 + xing_offset, 0) try: - xing = data[:-4].index("Xing") - except ValueError: - # Try to find/parse the VBRI header, which trumps the above length - # calculation. - try: - vbri = data[:-24].index("VBRI") - except ValueError: - pass - else: - # If a VBRI header was found, this is definitely MPEG audio. - self.sketchy = False - vbri_version = struct.unpack('>H', data[vbri + 4:vbri + 6])[0] - if vbri_version == 1: - frame_count = struct.unpack( - '>I', data[vbri + 14:vbri + 18])[0] - samples = float(frame_size * frame_count) - self.length = (samples / self.sample_rate) or self.length + xing = XingHeader(fileobj) + except XingHeaderError: + pass else: - # If a Xing header was found, this is definitely MPEG audio. + lame = xing.lame_header self.sketchy = False - flags = struct.unpack('>I', data[xing + 4:xing + 8])[0] - if flags & 0x1: - frame_count = struct.unpack('>I', data[xing + 8:xing + 12])[0] - samples = float(frame_size * frame_count) - self.length = (samples / self.sample_rate) or self.length - if flags & 0x2: - bytes = struct.unpack('>I', data[xing + 12:xing + 16])[0] - self.bitrate = int((bytes * 8) // self.length) + self.bitrate_mode = _guess_xing_bitrate_mode(xing) + if xing.frames != -1: + samples = frame_size * xing.frames + if lame is not None: + samples -= lame.encoder_delay_start + samples -= lame.encoder_padding_end + self.length = float(samples) / self.sample_rate + if xing.bytes != -1 and self.length: + self.bitrate = int((xing.bytes * 8) / self.length) + if xing.lame_version: + self.encoder_info = u"LAME %s" % xing.lame_version + if lame is not None: + self.track_gain = lame.track_gain_adjustment + self.track_peak = lame.track_peak + self.album_gain = lame.album_gain_adjustment + return + + # VBRI + vbri_offset = VBRIHeader.get_offset(self) + fileobj.seek(offset + frame_1 + vbri_offset, 0) + try: + vbri = VBRIHeader(fileobj) + except VBRIHeaderError: + pass + else: + self.bitrate_mode = BitrateMode.VBR + self.encoder_info = u"FhG" + self.sketchy = False + self.length = float(frame_size * vbri.frames) / self.sample_rate + if self.length: + self.bitrate = int((vbri.bytes * 8) / self.length) def pprint(self): - s = "MPEG %s layer %d, %d bps, %s Hz, %.2f seconds" % ( - self.version, self.layer, self.bitrate, self.sample_rate, - self.length) + info = str(self.bitrate_mode).split(".", 1)[-1] + if self.bitrate_mode == BitrateMode.UNKNOWN: + info = u"CBR?" + if self.encoder_info: + info += ", %s" % self.encoder_info + s = u"MPEG %s layer %d, %d bps (%s), %s Hz, %d chn, %.2f seconds" % ( + self.version, self.layer, self.bitrate, info, + self.sample_rate, self.channels, self.length) if self.sketchy: - s += " (sketchy)" + s += u" (sketchy)" return s @@ -250,15 +330,22 @@ class MP3(ID3FileType): """ _Info = MPEGInfo - _mimes = ["audio/mp3", "audio/x-mp3", "audio/mpeg", "audio/mpg", - "audio/x-mpeg"] + + _mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"] + + @property + def mime(self): + l = self.info.layer + return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime @staticmethod - def score(filename, fileobj, header): + def score(filename, fileobj, header_data): filename = filename.lower() - return (header.startswith("ID3") * 2 + filename.endswith(".mp3") + - filename.endswith(".mp2") + filename.endswith(".mpg") + - filename.endswith(".mpeg")) + + return (header_data.startswith(b"ID3") * 2 + + endswith(filename, b".mp3") + + endswith(filename, b".mp2") + endswith(filename, b".mpg") + + endswith(filename, b".mpeg")) Open = MP3 diff --git a/libs/mutagen/mp4.py b/libs/mutagen/mp4.py deleted file mode 100644 index 984a38c4..00000000 --- a/libs/mutagen/mp4.py +++ /dev/null @@ -1,822 +0,0 @@ -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write MPEG-4 audio files with iTunes metadata. - -This module will read MPEG-4 audio information and metadata, -as found in Apple's MP4 (aka M4A, M4B, M4P) files. - -There is no official specification for this format. The source code -for TagLib, FAAD, and various MPEG specifications at - -* http://developer.apple.com/documentation/QuickTime/QTFF/ -* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt -* http://standards.iso.org/ittf/PubliclyAvailableStandards/\ -c041828_ISO_IEC_14496-12_2005(E).zip -* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime - -were all consulted. -""" - -import struct -import sys - -from mutagen import FileType, Metadata -from mutagen._constants import GENRES -from mutagen._util import cdata, insert_bytes, DictProxy, utf8 - - -class error(IOError): - pass - - -class MP4MetadataError(error): - pass - - -class MP4StreamInfoError(error): - pass - - -class MP4MetadataValueError(ValueError, MP4MetadataError): - pass - - -# This is not an exhaustive list of container atoms, but just the -# ones this module needs to peek inside. -_CONTAINERS = ["moov", "udta", "trak", "mdia", "meta", "ilst", - "stbl", "minf", "moof", "traf"] -_SKIP_SIZE = {"meta": 4} - -__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm'] - - -class MP4Cover(str): - """A cover artwork. - - Attributes: - - * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) - """ - FORMAT_JPEG = 0x0D - FORMAT_PNG = 0x0E - - def __new__(cls, data, *args, **kwargs): - return str.__new__(cls, data) - - def __init__(self, data, imageformat=FORMAT_JPEG): - self.imageformat = imageformat - try: - self.format - except AttributeError: - self.format = imageformat - - -class MP4FreeForm(str): - """A freeform value. - - Attributes: - - * dataformat -- format of the data (either FORMAT_TEXT or FORMAT_DATA) - """ - - FORMAT_DATA = 0x0 - FORMAT_TEXT = 0x1 - - def __new__(cls, data, *args, **kwargs): - return str.__new__(cls, data) - - def __init__(self, data, dataformat=FORMAT_TEXT): - self.dataformat = dataformat - - -class Atom(object): - """An individual atom. - - Attributes: - children -- list child atoms (or None for non-container atoms) - length -- length of this atom, including length and name - name -- four byte name of the atom, as a str - offset -- location in the constructor-given fileobj of this atom - - This structure should only be used internally by Mutagen. - """ - - children = None - - def __init__(self, fileobj, level=0): - self.offset = fileobj.tell() - self.length, self.name = struct.unpack(">I4s", fileobj.read(8)) - if self.length == 1: - self.length, = struct.unpack(">Q", fileobj.read(8)) - if self.length < 16: - raise MP4MetadataError( - "64 bit atom length can only be 16 and higher") - elif self.length == 0: - if level != 0: - raise MP4MetadataError( - "only a top-level atom can have zero length") - # Only the last atom is supposed to have a zero-length, meaning it - # extends to the end of file. - fileobj.seek(0, 2) - self.length = fileobj.tell() - self.offset - fileobj.seek(self.offset + 8, 0) - elif self.length < 8: - raise MP4MetadataError( - "atom length can only be 0, 1 or 8 and higher") - - if self.name in _CONTAINERS: - self.children = [] - fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1) - while fileobj.tell() < self.offset + self.length: - self.children.append(Atom(fileobj, level + 1)) - else: - fileobj.seek(self.offset + self.length, 0) - - @staticmethod - def render(name, data): - """Render raw atom data.""" - # this raises OverflowError if Py_ssize_t can't handle the atom data - size = len(data) + 8 - if size <= 0xFFFFFFFF: - return struct.pack(">I4s", size, name) + data - else: - return struct.pack(">I4sQ", 1, name, size + 8) + data - - def findall(self, name, recursive=False): - """Recursively find all child atoms by specified name.""" - if self.children is not None: - for child in self.children: - if child.name == name: - yield child - if recursive: - for atom in child.findall(name, True): - yield atom - - def __getitem__(self, remaining): - """Look up a child atom, potentially recursively. - - e.g. atom['udta', 'meta'] => - """ - if not remaining: - return self - elif self.children is None: - raise KeyError("%r is not a container" % self.name) - for child in self.children: - if child.name == remaining[0]: - return child[remaining[1:]] - else: - raise KeyError("%r not found" % remaining[0]) - - def __repr__(self): - klass = self.__class__.__name__ - if self.children is None: - return "<%s name=%r length=%r offset=%r>" % ( - klass, self.name, self.length, self.offset) - else: - children = "\n".join([" " + line for child in self.children - for line in repr(child).splitlines()]) - return "<%s name=%r length=%r offset=%r\n%s>" % ( - klass, self.name, self.length, self.offset, children) - - -class Atoms(object): - """Root atoms in a given file. - - Attributes: - atoms -- a list of top-level atoms as Atom objects - - This structure should only be used internally by Mutagen. - """ - - def __init__(self, fileobj): - self.atoms = [] - fileobj.seek(0, 2) - end = fileobj.tell() - fileobj.seek(0) - while fileobj.tell() + 8 <= end: - self.atoms.append(Atom(fileobj)) - - def path(self, *names): - """Look up and return the complete path of an atom. - - For example, atoms.path('moov', 'udta', 'meta') will return a - list of three atoms, corresponding to the moov, udta, and meta - atoms. - """ - - path = [self] - for name in names: - path.append(path[-1][name, ]) - return path[1:] - - def __contains__(self, names): - try: - self[names] - except KeyError: - return False - return True - - def __getitem__(self, names): - """Look up a child atom. - - 'names' may be a list of atoms (['moov', 'udta']) or a string - specifying the complete path ('moov.udta'). - """ - - if isinstance(names, basestring): - names = names.split(".") - for child in self.atoms: - if child.name == names[0]: - return child[names[1:]] - else: - raise KeyError("%s not found" % names[0]) - - def __repr__(self): - return "\n".join([repr(child) for child in self.atoms]) - - -class MP4Tags(DictProxy, Metadata): - r"""Dictionary containing Apple iTunes metadata list key/values. - - Keys are four byte identifiers, except for freeform ('----') - keys. Values are usually unicode strings, but some atoms have a - special structure: - - Text values (multiple values per key are supported): - - * '\\xa9nam' -- track title - * '\\xa9alb' -- album - * '\\xa9ART' -- artist - * 'aART' -- album artist - * '\\xa9wrt' -- composer - * '\\xa9day' -- year - * '\\xa9cmt' -- comment - * 'desc' -- description (usually used in podcasts) - * 'purd' -- purchase date - * '\\xa9grp' -- grouping - * '\\xa9gen' -- genre - * '\\xa9lyr' -- lyrics - * 'purl' -- podcast URL - * 'egid' -- podcast episode GUID - * 'catg' -- podcast category - * 'keyw' -- podcast keywords - * '\\xa9too' -- encoded by - * 'cprt' -- copyright - * 'soal' -- album sort order - * 'soaa' -- album artist sort order - * 'soar' -- artist sort order - * 'sonm' -- title sort order - * 'soco' -- composer sort order - * 'sosn' -- show sort order - * 'tvsh' -- show name - - Boolean values: - - * 'cpil' -- part of a compilation - * 'pgap' -- part of a gapless album - * 'pcst' -- podcast (iTunes reads this only on import) - - Tuples of ints (multiple values per key are supported): - - * 'trkn' -- track number, total tracks - * 'disk' -- disc number, total discs - - Others: - - * 'tmpo' -- tempo/BPM, 16 bit int - * 'covr' -- cover artwork, list of MP4Cover objects (which are - tagged strs) - * 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead. - - The freeform '----' frames use a key in the format '----:mean:name' - where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique - identifier for this frame. The value is a str, but is probably - text that can be decoded as UTF-8. Multiple values per key are - supported. - - MP4 tag data cannot exist outside of the structure of an MP4 file, - so this class should not be manually instantiated. - - Unknown non-text tags are removed. - """ - - def load(self, atoms, fileobj): - try: - ilst = atoms["moov.udta.meta.ilst"] - except KeyError, key: - raise MP4MetadataError(key) - for atom in ilst.children: - fileobj.seek(atom.offset + 8) - data = fileobj.read(atom.length - 8) - if len(data) != atom.length - 8: - raise MP4MetadataError("Not enough data") - - if atom.name in self.__atoms: - info = self.__atoms[atom.name] - info[0](self, atom, data, *info[2:]) - else: - # unknown atom, try as text and skip if it fails - # FIXME: keep them somehow - try: - self.__parse_text(atom, data) - except MP4MetadataError: - continue - - @classmethod - def _can_load(cls, atoms): - return "moov.udta.meta.ilst" in atoms - - @staticmethod - def __key_sort(item1, item2): - (key1, v1) = item1 - (key2, v2) = item2 - # iTunes always writes the tags in order of "relevance", try - # to copy it as closely as possible. - order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", - "\xa9gen", "gnre", "trkn", "disk", - "\xa9day", "cpil", "pgap", "pcst", "tmpo", - "\xa9too", "----", "covr", "\xa9lyr"] - order = dict(zip(order, range(len(order)))) - last = len(order) - # If there's no key-based way to distinguish, order by length. - # If there's still no way, go by string comparison on the - # values, so we at least have something determinstic. - return (cmp(order.get(key1[:4], last), order.get(key2[:4], last)) or - cmp(len(v1), len(v2)) or cmp(v1, v2)) - - def save(self, filename): - """Save the metadata to the given filename.""" - values = [] - items = self.items() - items.sort(self.__key_sort) - for key, value in items: - info = self.__atoms.get(key[:4], (None, type(self).__render_text)) - try: - values.append(info[1](self, key, value, *info[2:])) - except (TypeError, ValueError), s: - raise MP4MetadataValueError, s, sys.exc_info()[2] - data = Atom.render("ilst", "".join(values)) - - # Find the old atoms. - fileobj = open(filename, "rb+") - try: - atoms = Atoms(fileobj) - try: - path = atoms.path("moov", "udta", "meta", "ilst") - except KeyError: - self.__save_new(fileobj, atoms, data) - else: - self.__save_existing(fileobj, atoms, path, data) - finally: - fileobj.close() - - def __pad_ilst(self, data, length=None): - if length is None: - length = ((len(data) + 1023) & ~1023) - len(data) - return Atom.render("free", "\x00" * length) - - def __save_new(self, fileobj, atoms, ilst): - hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9) - meta = Atom.render( - "meta", "\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst)) - try: - path = atoms.path("moov", "udta") - except KeyError: - # moov.udta not found -- create one - path = atoms.path("moov") - meta = Atom.render("udta", meta) - offset = path[-1].offset + 8 - insert_bytes(fileobj, len(meta), offset) - fileobj.seek(offset) - fileobj.write(meta) - self.__update_parents(fileobj, path, len(meta)) - self.__update_offsets(fileobj, atoms, len(meta), offset) - - def __save_existing(self, fileobj, atoms, path, data): - # Replace the old ilst atom. - ilst = path.pop() - offset = ilst.offset - length = ilst.length - - # Check for padding "free" atoms - meta = path[-1] - index = meta.children.index(ilst) - try: - prev = meta.children[index-1] - if prev.name == "free": - offset = prev.offset - length += prev.length - except IndexError: - pass - try: - next = meta.children[index+1] - if next.name == "free": - length += next.length - except IndexError: - pass - - delta = len(data) - length - if delta > 0 or (delta < 0 and delta > -8): - data += self.__pad_ilst(data) - delta = len(data) - length - insert_bytes(fileobj, delta, offset) - elif delta < 0: - data += self.__pad_ilst(data, -delta - 8) - delta = 0 - - fileobj.seek(offset) - fileobj.write(data) - self.__update_parents(fileobj, path, delta) - self.__update_offsets(fileobj, atoms, delta, offset) - - def __update_parents(self, fileobj, path, delta): - """Update all parent atoms with the new size.""" - for atom in path: - fileobj.seek(atom.offset) - size = cdata.uint_be(fileobj.read(4)) - if size == 1: # 64bit - # skip name (4B) and read size (8B) - size = cdata.ulonglong_be(fileobj.read(12)[4:]) - fileobj.seek(atom.offset + 8) - fileobj.write(cdata.to_ulonglong_be(size + delta)) - else: # 32bit - fileobj.seek(atom.offset) - fileobj.write(cdata.to_uint_be(size + delta)) - - def __update_offset_table(self, fileobj, fmt, atom, delta, offset): - """Update offset table in the specified atom.""" - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 12) - data = fileobj.read(atom.length - 12) - fmt = fmt % cdata.uint_be(data[:4]) - offsets = struct.unpack(fmt, data[4:]) - offsets = [o + (0, delta)[offset < o] for o in offsets] - fileobj.seek(atom.offset + 16) - fileobj.write(struct.pack(fmt, *offsets)) - - def __update_tfhd(self, fileobj, atom, delta, offset): - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 9) - data = fileobj.read(atom.length - 9) - flags = cdata.uint_be("\x00" + data[:3]) - if flags & 1: - o = cdata.ulonglong_be(data[7:15]) - if o > offset: - o += delta - fileobj.seek(atom.offset + 16) - fileobj.write(cdata.to_ulonglong_be(o)) - - def __update_offsets(self, fileobj, atoms, delta, offset): - """Update offset tables in all 'stco' and 'co64' atoms.""" - if delta == 0: - return - moov = atoms["moov"] - for atom in moov.findall('stco', True): - self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) - for atom in moov.findall('co64', True): - self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) - try: - for atom in atoms["moof"].findall('tfhd', True): - self.__update_tfhd(fileobj, atom, delta, offset) - except KeyError: - pass - - def __parse_data(self, atom, data): - pos = 0 - while pos < atom.length - 8: - length, name, flags = struct.unpack(">I4sI", data[pos:pos+12]) - if name != "data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (name, atom.name)) - yield flags, data[pos+16:pos+length] - pos += length - - def __render_data(self, key, flags, value): - return Atom.render(key, "".join([ - Atom.render("data", struct.pack(">2I", flags, 0) + data) - for data in value])) - - def __parse_freeform(self, atom, data): - length = cdata.uint_be(data[:4]) - mean = data[12:length] - pos = length - length = cdata.uint_be(data[pos:pos+4]) - name = data[pos+12:pos+length] - pos += length - value = [] - while pos < atom.length - 8: - length, atom_name = struct.unpack(">I4s", data[pos:pos+8]) - if atom_name != "data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (atom_name, atom.name)) - - version = ord(data[pos+8]) - if version != 0: - raise MP4MetadataError("Unsupported version: %r" % version) - - flags = struct.unpack(">I", "\x00" + data[pos+9:pos+12])[0] - value.append(MP4FreeForm(data[pos+16:pos+length], - dataformat=flags)) - pos += length - if value: - self["%s:%s:%s" % (atom.name, mean, name)] = value - - def __render_freeform(self, key, value): - dummy, mean, name = key.split(":", 2) - mean = struct.pack(">I4sI", len(mean) + 12, "mean", 0) + mean - name = struct.pack(">I4sI", len(name) + 12, "name", 0) + name - if isinstance(value, basestring): - value = [value] - data = "" - for v in value: - flags = MP4FreeForm.FORMAT_TEXT - if isinstance(v, MP4FreeForm): - flags = v.dataformat - data += struct.pack(">I4s2I", len(v) + 16, "data", flags, 0) - data += v - return Atom.render("----", mean + name + data) - - def __parse_pair(self, atom, data): - self[atom.name] = [struct.unpack(">2H", d[2:6]) for - flags, d in self.__parse_data(atom, data)] - - def __render_pair(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">4H", 0, track, total, 0)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, data) - - def __render_pair_no_trailing(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">3H", 0, track, total)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, data) - - def __parse_genre(self, atom, data): - # Translate to a freeform genre. - genre = cdata.short_be(data[16:18]) - if "\xa9gen" not in self: - try: - self["\xa9gen"] = [GENRES[genre - 1]] - except IndexError: - pass - - def __parse_tempo(self, atom, data): - self[atom.name] = [cdata.ushort_be(value[1]) for - value in self.__parse_data(atom, data)] - - def __render_tempo(self, key, value): - try: - if len(value) == 0: - return self.__render_data(key, 0x15, "") - - if min(value) < 0 or max(value) >= 2**16: - raise MP4MetadataValueError( - "invalid 16 bit integers: %r" % value) - except TypeError: - raise MP4MetadataValueError( - "tmpo must be a list of 16 bit integers") - - values = map(cdata.to_ushort_be, value) - return self.__render_data(key, 0x15, values) - - def __parse_bool(self, atom, data): - try: - self[atom.name] = bool(ord(data[16:17])) - except TypeError: - self[atom.name] = False - - def __render_bool(self, key, value): - return self.__render_data(key, 0x15, [chr(bool(value))]) - - def __parse_cover(self, atom, data): - self[atom.name] = [] - pos = 0 - while pos < atom.length - 8: - length, name, imageformat = struct.unpack(">I4sI", - data[pos:pos+12]) - if name != "data": - if name == "name": - pos += length - continue - raise MP4MetadataError( - "unexpected atom %r inside 'covr'" % name) - if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG): - imageformat = MP4Cover.FORMAT_JPEG - cover = MP4Cover(data[pos+16:pos+length], imageformat) - self[atom.name].append(cover) - pos += length - - def __render_cover(self, key, value): - atom_data = [] - for cover in value: - try: - imageformat = cover.imageformat - except AttributeError: - imageformat = MP4Cover.FORMAT_JPEG - atom_data.append(Atom.render( - "data", struct.pack(">2I", imageformat, 0) + cover)) - return Atom.render(key, "".join(atom_data)) - - def __parse_text(self, atom, data, expected_flags=1): - value = [text.decode('utf-8', 'replace') for flags, text - in self.__parse_data(atom, data) - if flags == expected_flags] - if value: - self[atom.name] = value - - def __render_text(self, key, value, flags=1): - if isinstance(value, basestring): - value = [value] - return self.__render_data( - key, flags, map(utf8, value)) - - def delete(self, filename): - """Remove the metadata from the given filename.""" - - self.clear() - self.save(filename) - - __atoms = { - "----": (__parse_freeform, __render_freeform), - "trkn": (__parse_pair, __render_pair), - "disk": (__parse_pair, __render_pair_no_trailing), - "gnre": (__parse_genre, None), - "tmpo": (__parse_tempo, __render_tempo), - "cpil": (__parse_bool, __render_bool), - "pgap": (__parse_bool, __render_bool), - "pcst": (__parse_bool, __render_bool), - "covr": (__parse_cover, __render_cover), - "purl": (__parse_text, __render_text, 0), - "egid": (__parse_text, __render_text, 0), - } - - # the text atoms we know about which should make loading fail if parsing - # any of them fails - for name in ["\xa9nam", "\xa9alb", "\xa9ART", "aART", "\xa9wrt", "\xa9day", - "\xa9cmt", "desc", "purd", "\xa9grp", "\xa9gen", "\xa9lyr", - "catg", "keyw", "\xa9too", "cprt", "soal", "soaa", "soar", - "sonm", "soco", "sosn", "tvsh"]: - __atoms[name] = (__parse_text, __render_text) - - def pprint(self): - values = [] - for key, value in self.iteritems(): - key = key.decode('latin1') - if key == "covr": - values.append("%s=%s" % (key, ", ".join( - ["[%d bytes of data]" % len(data) for data in value]))) - elif isinstance(value, list): - values.append("%s=%s" % (key, " / ".join(map(unicode, value)))) - else: - values.append("%s=%s" % (key, value)) - return "\n".join(values) - - -class MP4Info(object): - """MPEG-4 stream information. - - Attributes: - - * bitrate -- bitrate in bits per second, as an int - * length -- file length in seconds, as a float - * channels -- number of audio channels - * sample_rate -- audio sampling rate in Hz - * bits_per_sample -- bits per sample - """ - - bitrate = 0 - channels = 0 - sample_rate = 0 - bits_per_sample = 0 - - def __init__(self, atoms, fileobj): - for trak in list(atoms["moov"].findall("trak")): - hdlr = trak["mdia", "hdlr"] - fileobj.seek(hdlr.offset) - data = fileobj.read(hdlr.length) - if data[16:20] == "soun": - break - else: - raise MP4StreamInfoError("track has no audio data") - - mdhd = trak["mdia", "mdhd"] - fileobj.seek(mdhd.offset) - data = fileobj.read(mdhd.length) - if ord(data[8]) == 0: - offset = 20 - fmt = ">2I" - else: - offset = 28 - fmt = ">IQ" - end = offset + struct.calcsize(fmt) - unit, length = struct.unpack(fmt, data[offset:end]) - self.length = float(length) / unit - - try: - atom = trak["mdia", "minf", "stbl", "stsd"] - fileobj.seek(atom.offset) - data = fileobj.read(atom.length) - if data[20:24] == "mp4a": - length = cdata.uint_be(data[16:20]) - (self.channels, self.bits_per_sample, _, - self.sample_rate) = struct.unpack(">3HI", data[40:50]) - # ES descriptor type - if data[56:60] == "esds" and ord(data[64:65]) == 0x03: - pos = 65 - # skip extended descriptor type tag, length, ES ID - # and stream priority - if data[pos:pos+3] == "\x80\x80\x80": - pos += 3 - pos += 4 - # decoder config descriptor type - if ord(data[pos]) == 0x04: - pos += 1 - # skip extended descriptor type tag, length, - # object type ID, stream type, buffer size - # and maximum bitrate - if data[pos:pos+3] == "\x80\x80\x80": - pos += 3 - pos += 10 - # average bitrate - self.bitrate = cdata.uint_be(data[pos:pos+4]) - except (ValueError, KeyError): - # stsd atoms are optional - pass - - def pprint(self): - return "MPEG-4 audio, %.2f seconds, %d bps" % ( - self.length, self.bitrate) - - -class MP4(FileType): - """An MPEG-4 audio file, probably containing AAC. - - If more than one track is present in the file, the first is used. - Only audio ('soun') tracks will be read. - - :ivar info: :class:`MP4Info` - :ivar tags: :class:`MP4Tags` - """ - - MP4Tags = MP4Tags - - _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] - - def load(self, filename): - self.filename = filename - fileobj = open(filename, "rb") - try: - atoms = Atoms(fileobj) - - # ftyp is always the first atom in a valid MP4 file - if not atoms.atoms or atoms.atoms[0].name != "ftyp": - raise error("Not a MP4 file") - - try: - self.info = MP4Info(atoms, fileobj) - except StandardError, err: - raise MP4StreamInfoError, err, sys.exc_info()[2] - - if not MP4Tags._can_load(atoms): - self.tags = None - else: - try: - self.tags = self.MP4Tags(atoms, fileobj) - except StandardError, err: - raise MP4MetadataError, err, sys.exc_info()[2] - finally: - fileobj.close() - - def add_tags(self): - if self.tags is None: - self.tags = self.MP4Tags() - else: - raise error("an MP4 tag already exists") - - @staticmethod - def score(filename, fileobj, header): - return ("ftyp" in header) + ("mp4" in header) - - -Open = MP4 - - -def delete(filename): - """Remove tags from a file.""" - - MP4(filename).delete() diff --git a/libs/mutagen/mp4/__init__.py b/libs/mutagen/mp4/__init__.py new file mode 100644 index 00000000..e3c16a7f --- /dev/null +++ b/libs/mutagen/mp4/__init__.py @@ -0,0 +1,1023 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +"""Read and write MPEG-4 audio files with iTunes metadata. + +This module will read MPEG-4 audio information and metadata, +as found in Apple's MP4 (aka M4A, M4B, M4P) files. + +There is no official specification for this format. The source code +for TagLib, FAAD, and various MPEG specifications at + +* http://developer.apple.com/documentation/QuickTime/QTFF/ +* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt +* http://standards.iso.org/ittf/PubliclyAvailableStandards/\ +c041828_ISO_IEC_14496-12_2005(E).zip +* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime + +were all consulted. +""" + +import struct +import sys + +from mutagen import FileType, Tags, StreamInfo, PaddingInfo +from mutagen._constants import GENRES +from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError, + hashable, enum, get_size, resize_bytes) +from mutagen._compat import (reraise, PY2, string_types, text_type, chr_, + iteritems, PY3, cBytesIO, izip, xrange) +from ._atom import Atoms, Atom, AtomError +from ._util import parse_full_atom +from ._as_entry import AudioSampleEntry, ASEntryError + + +class error(IOError, MutagenError): + pass + + +class MP4MetadataError(error): + pass + + +class MP4StreamInfoError(error): + pass + + +class MP4MetadataValueError(ValueError, MP4MetadataError): + pass + + +__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType'] + + +@enum +class AtomDataType(object): + """Enum for `dataformat` attribute of MP4FreeForm. + + .. versionadded:: 1.25 + """ + + IMPLICIT = 0 + """for use with tags for which no type needs to be indicated because + only one type is allowed""" + + UTF8 = 1 + """without any count or null terminator""" + + UTF16 = 2 + """also known as UTF-16BE""" + + SJIS = 3 + """deprecated unless it is needed for special Japanese characters""" + + HTML = 6 + """the HTML file header specifies which HTML version""" + + XML = 7 + """the XML header must identify the DTD or schemas""" + + UUID = 8 + """also known as GUID; stored as 16 bytes in binary (valid as an ID)""" + + ISRC = 9 + """stored as UTF-8 text (valid as an ID)""" + + MI3P = 10 + """stored as UTF-8 text (valid as an ID)""" + + GIF = 12 + """(deprecated) a GIF image""" + + JPEG = 13 + """a JPEG image""" + + PNG = 14 + """PNG image""" + + URL = 15 + """absolute, in UTF-8 characters""" + + DURATION = 16 + """in milliseconds, 32-bit integer""" + + DATETIME = 17 + """in UTC, counting seconds since midnight, January 1, 1904; + 32 or 64-bits""" + + GENRES = 18 + """a list of enumerated values""" + + INTEGER = 21 + """a signed big-endian integer with length one of { 1,2,3,4,8 } bytes""" + + RIAA_PA = 24 + """RIAA parental advisory; { -1=no, 1=yes, 0=unspecified }, + 8-bit ingteger""" + + UPC = 25 + """Universal Product Code, in text UTF-8 format (valid as an ID)""" + + BMP = 27 + """Windows bitmap image""" + + +@hashable +class MP4Cover(bytes): + """A cover artwork. + + Attributes: + + * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) + """ + + FORMAT_JPEG = AtomDataType.JPEG + FORMAT_PNG = AtomDataType.PNG + + def __new__(cls, data, *args, **kwargs): + return bytes.__new__(cls, data) + + def __init__(self, data, imageformat=FORMAT_JPEG): + self.imageformat = imageformat + + __hash__ = bytes.__hash__ + + def __eq__(self, other): + if not isinstance(other, MP4Cover): + return bytes(self) == other + + return (bytes(self) == bytes(other) and + self.imageformat == other.imageformat) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%r, %r)" % ( + type(self).__name__, bytes(self), + AtomDataType(self.imageformat)) + + +@hashable +class MP4FreeForm(bytes): + """A freeform value. + + Attributes: + + * dataformat -- format of the data (see AtomDataType) + """ + + FORMAT_DATA = AtomDataType.IMPLICIT # deprecated + FORMAT_TEXT = AtomDataType.UTF8 # deprecated + + def __new__(cls, data, *args, **kwargs): + return bytes.__new__(cls, data) + + def __init__(self, data, dataformat=AtomDataType.UTF8, version=0): + self.dataformat = dataformat + self.version = version + + __hash__ = bytes.__hash__ + + def __eq__(self, other): + if not isinstance(other, MP4FreeForm): + return bytes(self) == other + + return (bytes(self) == bytes(other) and + self.dataformat == other.dataformat and + self.version == other.version) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%r, %r)" % ( + type(self).__name__, bytes(self), + AtomDataType(self.dataformat)) + + + +def _name2key(name): + if PY2: + return name + return name.decode("latin-1") + + +def _key2name(key): + if PY2: + return key + return key.encode("latin-1") + + +def _find_padding(atom_path): + # Check for padding "free" atom + # XXX: we only use them if they are adjacent to ilst, and only one. + # and there also is a top level free atom which we could use maybe..? + + meta, ilst = atom_path[-2:] + assert meta.name == b"meta" and ilst.name == b"ilst" + index = meta.children.index(ilst) + try: + prev = meta.children[index - 1] + if prev.name == b"free": + return prev + except IndexError: + pass + + try: + next_ = meta.children[index + 1] + if next_.name == b"free": + return next_ + except IndexError: + pass + + +def _item_sort_key(key, value): + # iTunes always writes the tags in order of "relevance", try + # to copy it as closely as possible. + order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", + "\xa9gen", "gnre", "trkn", "disk", + "\xa9day", "cpil", "pgap", "pcst", "tmpo", + "\xa9too", "----", "covr", "\xa9lyr"] + order = dict(izip(order, xrange(len(order)))) + last = len(order) + # If there's no key-based way to distinguish, order by length. + # If there's still no way, go by string comparison on the + # values, so we at least have something determinstic. + return (order.get(key[:4], last), len(repr(value)), repr(value)) + + +class MP4Tags(DictProxy, Tags): + r"""Dictionary containing Apple iTunes metadata list key/values. + + Keys are four byte identifiers, except for freeform ('----') + keys. Values are usually unicode strings, but some atoms have a + special structure: + + Text values (multiple values per key are supported): + + * '\\xa9nam' -- track title + * '\\xa9alb' -- album + * '\\xa9ART' -- artist + * 'aART' -- album artist + * '\\xa9wrt' -- composer + * '\\xa9day' -- year + * '\\xa9cmt' -- comment + * 'desc' -- description (usually used in podcasts) + * 'purd' -- purchase date + * '\\xa9grp' -- grouping + * '\\xa9gen' -- genre + * '\\xa9lyr' -- lyrics + * 'purl' -- podcast URL + * 'egid' -- podcast episode GUID + * 'catg' -- podcast category + * 'keyw' -- podcast keywords + * '\\xa9too' -- encoded by + * 'cprt' -- copyright + * 'soal' -- album sort order + * 'soaa' -- album artist sort order + * 'soar' -- artist sort order + * 'sonm' -- title sort order + * 'soco' -- composer sort order + * 'sosn' -- show sort order + * 'tvsh' -- show name + + Boolean values: + + * 'cpil' -- part of a compilation + * 'pgap' -- part of a gapless album + * 'pcst' -- podcast (iTunes reads this only on import) + + Tuples of ints (multiple values per key are supported): + + * 'trkn' -- track number, total tracks + * 'disk' -- disc number, total discs + + Others: + + * 'tmpo' -- tempo/BPM, 16 bit int + * 'covr' -- cover artwork, list of MP4Cover objects (which are + tagged strs) + * 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead. + + The freeform '----' frames use a key in the format '----:mean:name' + where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique + identifier for this frame. The value is a str, but is probably + text that can be decoded as UTF-8. Multiple values per key are + supported. + + MP4 tag data cannot exist outside of the structure of an MP4 file, + so this class should not be manually instantiated. + + Unknown non-text tags and tags that failed to parse will be written + back as is. + """ + + def __init__(self, *args, **kwargs): + self._failed_atoms = {} + super(MP4Tags, self).__init__() + if args or kwargs: + self.load(*args, **kwargs) + + def load(self, atoms, fileobj): + try: + path = atoms.path(b"moov", b"udta", b"meta", b"ilst") + except KeyError as key: + raise MP4MetadataError(key) + + free = _find_padding(path) + self._padding = free.datalength if free is not None else 0 + + ilst = path[-1] + for atom in ilst.children: + ok, data = atom.read(fileobj) + if not ok: + raise MP4MetadataError("Not enough data") + + try: + if atom.name in self.__atoms: + info = self.__atoms[atom.name] + info[0](self, atom, data) + else: + # unknown atom, try as text + self.__parse_text(atom, data, implicit=False) + except MP4MetadataError: + # parsing failed, save them so we can write them back + key = _name2key(atom.name) + self._failed_atoms.setdefault(key, []).append(data) + + def __setitem__(self, key, value): + if not isinstance(key, str): + raise TypeError("key has to be str") + self._render(key, value) + super(MP4Tags, self).__setitem__(key, value) + + @classmethod + def _can_load(cls, atoms): + return b"moov.udta.meta.ilst" in atoms + + def _render(self, key, value): + atom_name = _key2name(key)[:4] + if atom_name in self.__atoms: + render_func = self.__atoms[atom_name][1] + else: + render_func = type(self).__render_text + + return render_func(self, key, value) + + def save(self, filename, padding=None): + """Save the metadata to the given filename.""" + + values = [] + items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv)) + for key, value in items: + try: + values.append(self._render(key, value)) + except (TypeError, ValueError) as s: + reraise(MP4MetadataValueError, s, sys.exc_info()[2]) + + for key, failed in iteritems(self._failed_atoms): + # don't write atoms back if we have added a new one with + # the same name, this excludes freeform which can have + # multiple atoms with the same key (most parsers seem to be able + # to handle that) + if key in self: + assert _key2name(key) != b"----" + continue + for data in failed: + values.append(Atom.render(_key2name(key), data)) + + data = Atom.render(b"ilst", b"".join(values)) + + # Find the old atoms. + with open(filename, "rb+") as fileobj: + try: + atoms = Atoms(fileobj) + except AtomError as err: + reraise(error, err, sys.exc_info()[2]) + + self.__save(fileobj, atoms, data, padding) + + def __save(self, fileobj, atoms, data, padding): + try: + path = atoms.path(b"moov", b"udta", b"meta", b"ilst") + except KeyError: + self.__save_new(fileobj, atoms, data, padding) + else: + self.__save_existing(fileobj, atoms, path, data, padding) + + def __save_new(self, fileobj, atoms, ilst_data, padding_func): + hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) + meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data + + try: + path = atoms.path(b"moov", b"udta") + except KeyError: + path = atoms.path(b"moov") + + offset = path[-1]._dataoffset + + # ignoring some atom overhead... but we don't have padding left anyway + # and padding_size is guaranteed to be less than zero + content_size = get_size(fileobj) - offset + padding_size = -len(meta_data) + assert padding_size < 0 + info = PaddingInfo(padding_size, content_size) + new_padding = info._get_padding(padding_func) + new_padding = min(0xFFFFFFFF, new_padding) + + free = Atom.render(b"free", b"\x00" * new_padding) + meta = Atom.render(b"meta", meta_data + free) + if path[-1].name != b"udta": + # moov.udta not found -- create one + data = Atom.render(b"udta", meta) + else: + data = meta + + insert_bytes(fileobj, len(data), offset) + fileobj.seek(offset) + fileobj.write(data) + self.__update_parents(fileobj, path, len(data)) + self.__update_offsets(fileobj, atoms, len(data), offset) + + def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func): + # Replace the old ilst atom. + ilst = path[-1] + offset = ilst.offset + length = ilst.length + + # Use adjacent free atom if there is one + free = _find_padding(path) + if free is not None: + offset = min(offset, free.offset) + length += free.length + + # Always add a padding atom to make things easier + padding_overhead = len(Atom.render(b"free", b"")) + content_size = get_size(fileobj) - (offset + length) + padding_size = length - (len(ilst_data) + padding_overhead) + info = PaddingInfo(padding_size, content_size) + new_padding = info._get_padding(padding_func) + # Limit padding size so we can be sure the free atom overhead is as we + # calculated above (see Atom.render) + new_padding = min(0xFFFFFFFF, new_padding) + + ilst_data += Atom.render(b"free", b"\x00" * new_padding) + + resize_bytes(fileobj, length, len(ilst_data), offset) + delta = len(ilst_data) - length + + fileobj.seek(offset) + fileobj.write(ilst_data) + self.__update_parents(fileobj, path[:-1], delta) + self.__update_offsets(fileobj, atoms, delta, offset) + + def __update_parents(self, fileobj, path, delta): + """Update all parent atoms with the new size.""" + + if delta == 0: + return + + for atom in path: + fileobj.seek(atom.offset) + size = cdata.uint_be(fileobj.read(4)) + if size == 1: # 64bit + # skip name (4B) and read size (8B) + size = cdata.ulonglong_be(fileobj.read(12)[4:]) + fileobj.seek(atom.offset + 8) + fileobj.write(cdata.to_ulonglong_be(size + delta)) + else: # 32bit + fileobj.seek(atom.offset) + fileobj.write(cdata.to_uint_be(size + delta)) + + def __update_offset_table(self, fileobj, fmt, atom, delta, offset): + """Update offset table in the specified atom.""" + if atom.offset > offset: + atom.offset += delta + fileobj.seek(atom.offset + 12) + data = fileobj.read(atom.length - 12) + fmt = fmt % cdata.uint_be(data[:4]) + offsets = struct.unpack(fmt, data[4:]) + offsets = [o + (0, delta)[offset < o] for o in offsets] + fileobj.seek(atom.offset + 16) + fileobj.write(struct.pack(fmt, *offsets)) + + def __update_tfhd(self, fileobj, atom, delta, offset): + if atom.offset > offset: + atom.offset += delta + fileobj.seek(atom.offset + 9) + data = fileobj.read(atom.length - 9) + flags = cdata.uint_be(b"\x00" + data[:3]) + if flags & 1: + o = cdata.ulonglong_be(data[7:15]) + if o > offset: + o += delta + fileobj.seek(atom.offset + 16) + fileobj.write(cdata.to_ulonglong_be(o)) + + def __update_offsets(self, fileobj, atoms, delta, offset): + """Update offset tables in all 'stco' and 'co64' atoms.""" + if delta == 0: + return + moov = atoms[b"moov"] + for atom in moov.findall(b'stco', True): + self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) + for atom in moov.findall(b'co64', True): + self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) + try: + for atom in atoms[b"moof"].findall(b'tfhd', True): + self.__update_tfhd(fileobj, atom, delta, offset) + except KeyError: + pass + + def __parse_data(self, atom, data): + pos = 0 + while pos < atom.length - 8: + head = data[pos:pos + 12] + if len(head) != 12: + raise MP4MetadataError("truncated atom % r" % atom.name) + length, name = struct.unpack(">I4s", head[:8]) + version = ord(head[8:9]) + flags = struct.unpack(">I", b"\x00" + head[9:12])[0] + if name != b"data": + raise MP4MetadataError( + "unexpected atom %r inside %r" % (name, atom.name)) + + chunk = data[pos + 16:pos + length] + if len(chunk) != length - 16: + raise MP4MetadataError("truncated atom % r" % atom.name) + yield version, flags, chunk + pos += length + + def __add(self, key, value, single=False): + assert isinstance(key, str) + + if single: + self[key] = value + else: + self.setdefault(key, []).extend(value) + + def __render_data(self, key, version, flags, value): + return Atom.render(_key2name(key), b"".join([ + Atom.render( + b"data", struct.pack(">2I", version << 24 | flags, 0) + data) + for data in value])) + + def __parse_freeform(self, atom, data): + length = cdata.uint_be(data[:4]) + mean = data[12:length] + pos = length + length = cdata.uint_be(data[pos:pos + 4]) + name = data[pos + 12:pos + length] + pos += length + value = [] + while pos < atom.length - 8: + length, atom_name = struct.unpack(">I4s", data[pos:pos + 8]) + if atom_name != b"data": + raise MP4MetadataError( + "unexpected atom %r inside %r" % (atom_name, atom.name)) + + version = ord(data[pos + 8:pos + 8 + 1]) + flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0] + value.append(MP4FreeForm(data[pos + 16:pos + length], + dataformat=flags, version=version)) + pos += length + + key = _name2key(atom.name + b":" + mean + b":" + name) + self.__add(key, value) + + def __render_freeform(self, key, value): + if isinstance(value, bytes): + value = [value] + + dummy, mean, name = _key2name(key).split(b":", 2) + mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean + name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name + + data = b"" + for v in value: + flags = AtomDataType.UTF8 + version = 0 + if isinstance(v, MP4FreeForm): + flags = v.dataformat + version = v.version + + data += struct.pack( + ">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0) + data += v + + return Atom.render(b"----", mean + name + data) + + def __parse_pair(self, atom, data): + key = _name2key(atom.name) + values = [struct.unpack(">2H", d[2:6]) for + version, flags, d in self.__parse_data(atom, data)] + self.__add(key, values) + + def __render_pair(self, key, value): + data = [] + for v in value: + try: + track, total = v + except TypeError: + raise ValueError + if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: + data.append(struct.pack(">4H", 0, track, total, 0)) + else: + raise MP4MetadataValueError( + "invalid numeric pair %r" % ((track, total),)) + return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) + + def __render_pair_no_trailing(self, key, value): + data = [] + for (track, total) in value: + if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: + data.append(struct.pack(">3H", 0, track, total)) + else: + raise MP4MetadataValueError( + "invalid numeric pair %r" % ((track, total),)) + return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) + + def __parse_genre(self, atom, data): + values = [] + for version, flags, data in self.__parse_data(atom, data): + # version = 0, flags = 0 + if len(data) != 2: + raise MP4MetadataValueError("invalid genre") + genre = cdata.short_be(data) + # Translate to a freeform genre. + try: + genre = GENRES[genre - 1] + except IndexError: + # this will make us write it back at least + raise MP4MetadataValueError("unknown genre") + values.append(genre) + key = _name2key(b"\xa9gen") + self.__add(key, values) + + def __parse_tempo(self, atom, data): + values = [] + for version, flags, data in self.__parse_data(atom, data): + # version = 0, flags = 0 or 21 + if len(data) != 2: + raise MP4MetadataValueError("invalid tempo") + values.append(cdata.ushort_be(data)) + key = _name2key(atom.name) + self.__add(key, values) + + def __render_tempo(self, key, value): + try: + if len(value) == 0: + return self.__render_data(key, 0, AtomDataType.INTEGER, b"") + + if (min(value) < 0) or (max(value) >= 2 ** 16): + raise MP4MetadataValueError( + "invalid 16 bit integers: %r" % value) + except TypeError: + raise MP4MetadataValueError( + "tmpo must be a list of 16 bit integers") + + values = [cdata.to_ushort_be(v) for v in value] + return self.__render_data(key, 0, AtomDataType.INTEGER, values) + + def __parse_bool(self, atom, data): + for version, flags, data in self.__parse_data(atom, data): + if len(data) != 1: + raise MP4MetadataValueError("invalid bool") + + value = bool(ord(data)) + key = _name2key(atom.name) + self.__add(key, value, single=True) + + def __render_bool(self, key, value): + return self.__render_data( + key, 0, AtomDataType.INTEGER, [chr_(bool(value))]) + + def __parse_cover(self, atom, data): + values = [] + pos = 0 + while pos < atom.length - 8: + length, name, imageformat = struct.unpack(">I4sI", + data[pos:pos + 12]) + if name != b"data": + if name == b"name": + pos += length + continue + raise MP4MetadataError( + "unexpected atom %r inside 'covr'" % name) + if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG): + # Sometimes AtomDataType.IMPLICIT or simply wrong. + # In all cases it was jpeg, so default to it + imageformat = MP4Cover.FORMAT_JPEG + cover = MP4Cover(data[pos + 16:pos + length], imageformat) + values.append(cover) + pos += length + + key = _name2key(atom.name) + self.__add(key, values) + + def __render_cover(self, key, value): + atom_data = [] + for cover in value: + try: + imageformat = cover.imageformat + except AttributeError: + imageformat = MP4Cover.FORMAT_JPEG + atom_data.append(Atom.render( + b"data", struct.pack(">2I", imageformat, 0) + cover)) + return Atom.render(_key2name(key), b"".join(atom_data)) + + def __parse_text(self, atom, data, implicit=True): + # implicit = False, for parsing unknown atoms only take utf8 ones. + # For known ones we can assume the implicit are utf8 too. + values = [] + for version, flags, atom_data in self.__parse_data(atom, data): + if implicit: + if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8): + raise MP4MetadataError( + "Unknown atom type %r for %r" % (flags, atom.name)) + else: + if flags != AtomDataType.UTF8: + raise MP4MetadataError( + "%r is not text, ignore" % atom.name) + + try: + text = atom_data.decode("utf-8") + except UnicodeDecodeError as e: + raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e)) + + values.append(text) + + key = _name2key(atom.name) + self.__add(key, values) + + def __render_text(self, key, value, flags=AtomDataType.UTF8): + if isinstance(value, string_types): + value = [value] + + encoded = [] + for v in value: + if not isinstance(v, text_type): + if PY3: + raise TypeError("%r not str" % v) + try: + v = v.decode("utf-8") + except (AttributeError, UnicodeDecodeError) as e: + raise TypeError(e) + encoded.append(v.encode("utf-8")) + + return self.__render_data(key, 0, flags, encoded) + + def delete(self, filename): + """Remove the metadata from the given filename.""" + + self._failed_atoms.clear() + self.clear() + self.save(filename, padding=lambda x: 0) + + __atoms = { + b"----": (__parse_freeform, __render_freeform), + b"trkn": (__parse_pair, __render_pair), + b"disk": (__parse_pair, __render_pair_no_trailing), + b"gnre": (__parse_genre, None), + b"tmpo": (__parse_tempo, __render_tempo), + b"cpil": (__parse_bool, __render_bool), + b"pgap": (__parse_bool, __render_bool), + b"pcst": (__parse_bool, __render_bool), + b"covr": (__parse_cover, __render_cover), + b"purl": (__parse_text, __render_text), + b"egid": (__parse_text, __render_text), + } + + # these allow implicit flags and parse as text + for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt", + b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp", + b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too", + b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco", + b"sosn", b"tvsh"]: + __atoms[name] = (__parse_text, __render_text) + + def pprint(self): + + def to_line(key, value): + assert isinstance(key, text_type) + if isinstance(value, text_type): + return u"%s=%s" % (key, value) + return u"%s=%r" % (key, value) + + values = [] + for key, value in sorted(iteritems(self)): + if not isinstance(key, text_type): + key = key.decode("latin-1") + if key == "covr": + values.append(u"%s=%s" % (key, u", ".join( + [u"[%d bytes of data]" % len(data) for data in value]))) + elif isinstance(value, list): + for v in value: + values.append(to_line(key, v)) + else: + values.append(to_line(key, value)) + return u"\n".join(values) + + +class MP4Info(StreamInfo): + """MPEG-4 stream information. + + Attributes: + + * bitrate -- bitrate in bits per second, as an int + * length -- file length in seconds, as a float + * channels -- number of audio channels + * sample_rate -- audio sampling rate in Hz + * bits_per_sample -- bits per sample + * codec (string): + * if starting with ``"mp4a"`` uses an mp4a audio codec + (see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``) + * for everything else see a list of possible values at + http://www.mp4ra.org/codecs.html + + e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc. + * codec_description (string): + Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in + the future, use for display purposes only. + """ + + bitrate = 0 + channels = 0 + sample_rate = 0 + bits_per_sample = 0 + codec = u"" + codec_name = u"" + + def __init__(self, atoms, fileobj): + try: + moov = atoms[b"moov"] + except KeyError: + raise MP4StreamInfoError("not a MP4 file") + + for trak in moov.findall(b"trak"): + hdlr = trak[b"mdia", b"hdlr"] + ok, data = hdlr.read(fileobj) + if not ok: + raise MP4StreamInfoError("Not enough data") + if data[8:12] == b"soun": + break + else: + raise MP4StreamInfoError("track has no audio data") + + mdhd = trak[b"mdia", b"mdhd"] + ok, data = mdhd.read(fileobj) + if not ok: + raise MP4StreamInfoError("Not enough data") + + try: + version, flags, data = parse_full_atom(data) + except ValueError as e: + raise MP4StreamInfoError(e) + + if version == 0: + offset = 8 + fmt = ">2I" + elif version == 1: + offset = 16 + fmt = ">IQ" + else: + raise MP4StreamInfoError("Unknown mdhd version %d" % version) + + end = offset + struct.calcsize(fmt) + unit, length = struct.unpack(fmt, data[offset:end]) + try: + self.length = float(length) / unit + except ZeroDivisionError: + self.length = 0 + + try: + atom = trak[b"mdia", b"minf", b"stbl", b"stsd"] + except KeyError: + pass + else: + self._parse_stsd(atom, fileobj) + + def _parse_stsd(self, atom, fileobj): + """Sets channels, bits_per_sample, sample_rate and optionally bitrate. + + Can raise MP4StreamInfoError. + """ + + assert atom.name == b"stsd" + + ok, data = atom.read(fileobj) + if not ok: + raise MP4StreamInfoError("Invalid stsd") + + try: + version, flags, data = parse_full_atom(data) + except ValueError as e: + raise MP4StreamInfoError(e) + + if version != 0: + raise MP4StreamInfoError("Unsupported stsd version") + + try: + num_entries, offset = cdata.uint32_be_from(data, 0) + except cdata.error as e: + raise MP4StreamInfoError(e) + + if num_entries == 0: + return + + # look at the first entry if there is one + entry_fileobj = cBytesIO(data[offset:]) + try: + entry_atom = Atom(entry_fileobj) + except AtomError as e: + raise MP4StreamInfoError(e) + + try: + entry = AudioSampleEntry(entry_atom, entry_fileobj) + except ASEntryError as e: + raise MP4StreamInfoError(e) + else: + self.channels = entry.channels + self.bits_per_sample = entry.sample_size + self.sample_rate = entry.sample_rate + self.bitrate = entry.bitrate + self.codec = entry.codec + self.codec_description = entry.codec_description + + def pprint(self): + return "MPEG-4 audio (%s), %.2f seconds, %d bps" % ( + self.codec_description, self.length, self.bitrate) + + +class MP4(FileType): + """An MPEG-4 audio file, probably containing AAC. + + If more than one track is present in the file, the first is used. + Only audio ('soun') tracks will be read. + + :ivar info: :class:`MP4Info` + :ivar tags: :class:`MP4Tags` + """ + + MP4Tags = MP4Tags + + _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] + + def load(self, filename): + self.filename = filename + with open(filename, "rb") as fileobj: + try: + atoms = Atoms(fileobj) + except AtomError as err: + reraise(error, err, sys.exc_info()[2]) + + try: + self.info = MP4Info(atoms, fileobj) + except error: + raise + except Exception as err: + reraise(MP4StreamInfoError, err, sys.exc_info()[2]) + + if not MP4Tags._can_load(atoms): + self.tags = None + self._padding = 0 + else: + try: + self.tags = self.MP4Tags(atoms, fileobj) + except error: + raise + except Exception as err: + reraise(MP4MetadataError, err, sys.exc_info()[2]) + else: + self._padding = self.tags._padding + + def save(self, filename=None, padding=None): + super(MP4, self).save(filename, padding=padding) + + def delete(self, filename=None): + super(MP4, self).delete(filename) + + def add_tags(self): + if self.tags is None: + self.tags = self.MP4Tags() + else: + raise error("an MP4 tag already exists") + + @staticmethod + def score(filename, fileobj, header_data): + return (b"ftyp" in header_data) + (b"mp4" in header_data) + + +Open = MP4 + + +def delete(filename): + """Remove tags from a file.""" + + MP4(filename).delete() diff --git a/libs/mutagen/mp4/_as_entry.py b/libs/mutagen/mp4/_as_entry.py new file mode 100644 index 00000000..306d5720 --- /dev/null +++ b/libs/mutagen/mp4/_as_entry.py @@ -0,0 +1,542 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2014 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +from mutagen._compat import cBytesIO, xrange +from mutagen.aac import ProgramConfigElement +from mutagen._util import BitReader, BitReaderError, cdata +from mutagen._compat import text_type +from ._util import parse_full_atom +from ._atom import Atom, AtomError + + +class ASEntryError(Exception): + pass + + +class AudioSampleEntry(object): + """Parses an AudioSampleEntry atom. + + Private API. + + Attrs: + channels (int): number of channels + sample_size (int): sample size in bits + sample_rate (int): sample rate in Hz + bitrate (int): bits per second (0 means unknown) + codec (string): + audio codec, either 'mp4a[.*][.*]' (rfc6381) or 'alac' + codec_description (string): descriptive codec name e.g. "AAC LC+SBR" + + Can raise ASEntryError. + """ + + channels = 0 + sample_size = 0 + sample_rate = 0 + bitrate = 0 + codec = None + codec_description = None + + def __init__(self, atom, fileobj): + ok, data = atom.read(fileobj) + if not ok: + raise ASEntryError("too short %r atom" % atom.name) + + fileobj = cBytesIO(data) + r = BitReader(fileobj) + + try: + # SampleEntry + r.skip(6 * 8) # reserved + r.skip(2 * 8) # data_ref_index + + # AudioSampleEntry + r.skip(8 * 8) # reserved + self.channels = r.bits(16) + self.sample_size = r.bits(16) + r.skip(2 * 8) # pre_defined + r.skip(2 * 8) # reserved + self.sample_rate = r.bits(32) >> 16 + except BitReaderError as e: + raise ASEntryError(e) + + assert r.is_aligned() + + try: + extra = Atom(fileobj) + except AtomError as e: + raise ASEntryError(e) + + self.codec = atom.name.decode("latin-1") + self.codec_description = None + + if atom.name == b"mp4a" and extra.name == b"esds": + self._parse_esds(extra, fileobj) + elif atom.name == b"alac" and extra.name == b"alac": + self._parse_alac(extra, fileobj) + elif atom.name == b"ac-3" and extra.name == b"dac3": + self._parse_dac3(extra, fileobj) + + if self.codec_description is None: + self.codec_description = self.codec.upper() + + def _parse_dac3(self, atom, fileobj): + # ETSI TS 102 366 + + assert atom.name == b"dac3" + + ok, data = atom.read(fileobj) + if not ok: + raise ASEntryError("truncated %s atom" % atom.name) + fileobj = cBytesIO(data) + r = BitReader(fileobj) + + # sample_rate in AudioSampleEntry covers values in + # fscod2 and not just fscod, so ignore fscod here. + try: + r.skip(2 + 5 + 3) # fscod, bsid, bsmod + acmod = r.bits(3) + lfeon = r.bits(1) + bit_rate_code = r.bits(5) + r.skip(5) # reserved + except BitReaderError as e: + raise ASEntryError(e) + + self.channels = [2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon + + try: + self.bitrate = [ + 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, + 224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000 + except IndexError: + pass + + def _parse_alac(self, atom, fileobj): + # https://alac.macosforge.org/trac/browser/trunk/ + # ALACMagicCookieDescription.txt + + assert atom.name == b"alac" + + ok, data = atom.read(fileobj) + if not ok: + raise ASEntryError("truncated %s atom" % atom.name) + + try: + version, flags, data = parse_full_atom(data) + except ValueError as e: + raise ASEntryError(e) + + if version != 0: + raise ASEntryError("Unsupported version %d" % version) + + fileobj = cBytesIO(data) + r = BitReader(fileobj) + + try: + # for some files the AudioSampleEntry values default to 44100/2chan + # and the real info is in the alac cookie, so prefer it + r.skip(32) # frameLength + compatibleVersion = r.bits(8) + if compatibleVersion != 0: + return + self.sample_size = r.bits(8) + r.skip(8 + 8 + 8) + self.channels = r.bits(8) + r.skip(16 + 32) + self.bitrate = r.bits(32) + self.sample_rate = r.bits(32) + except BitReaderError as e: + raise ASEntryError(e) + + def _parse_esds(self, esds, fileobj): + assert esds.name == b"esds" + + ok, data = esds.read(fileobj) + if not ok: + raise ASEntryError("truncated %s atom" % esds.name) + + try: + version, flags, data = parse_full_atom(data) + except ValueError as e: + raise ASEntryError(e) + + if version != 0: + raise ASEntryError("Unsupported version %d" % version) + + fileobj = cBytesIO(data) + r = BitReader(fileobj) + + try: + tag = r.bits(8) + if tag != ES_Descriptor.TAG: + raise ASEntryError("unexpected descriptor: %d" % tag) + assert r.is_aligned() + except BitReaderError as e: + raise ASEntryError(e) + + try: + decSpecificInfo = ES_Descriptor.parse(fileobj) + except DescriptorError as e: + raise ASEntryError(e) + dec_conf_desc = decSpecificInfo.decConfigDescr + + self.bitrate = dec_conf_desc.avgBitrate + self.codec += dec_conf_desc.codec_param + self.codec_description = dec_conf_desc.codec_desc + + decSpecificInfo = dec_conf_desc.decSpecificInfo + if decSpecificInfo is not None: + if decSpecificInfo.channels != 0: + self.channels = decSpecificInfo.channels + + if decSpecificInfo.sample_rate != 0: + self.sample_rate = decSpecificInfo.sample_rate + + +class DescriptorError(Exception): + pass + + +class BaseDescriptor(object): + + TAG = None + + @classmethod + def _parse_desc_length_file(cls, fileobj): + """May raise ValueError""" + + value = 0 + for i in xrange(4): + try: + b = cdata.uint8(fileobj.read(1)) + except cdata.error as e: + raise ValueError(e) + value = (value << 7) | (b & 0x7f) + if not b >> 7: + break + else: + raise ValueError("invalid descriptor length") + + return value + + @classmethod + def parse(cls, fileobj): + """Returns a parsed instance of the called type. + The file position is right after the descriptor after this returns. + + Raises DescriptorError + """ + + try: + length = cls._parse_desc_length_file(fileobj) + except ValueError as e: + raise DescriptorError(e) + pos = fileobj.tell() + instance = cls(fileobj, length) + left = length - (fileobj.tell() - pos) + if left < 0: + raise DescriptorError("descriptor parsing read too much data") + fileobj.seek(left, 1) + return instance + + +class ES_Descriptor(BaseDescriptor): + + TAG = 0x3 + + def __init__(self, fileobj, length): + """Raises DescriptorError""" + + r = BitReader(fileobj) + try: + self.ES_ID = r.bits(16) + self.streamDependenceFlag = r.bits(1) + self.URL_Flag = r.bits(1) + self.OCRstreamFlag = r.bits(1) + self.streamPriority = r.bits(5) + if self.streamDependenceFlag: + self.dependsOn_ES_ID = r.bits(16) + if self.URL_Flag: + URLlength = r.bits(8) + self.URLstring = r.bytes(URLlength) + if self.OCRstreamFlag: + self.OCR_ES_Id = r.bits(16) + + tag = r.bits(8) + except BitReaderError as e: + raise DescriptorError(e) + + if tag != DecoderConfigDescriptor.TAG: + raise DescriptorError("unexpected DecoderConfigDescrTag %d" % tag) + + assert r.is_aligned() + self.decConfigDescr = DecoderConfigDescriptor.parse(fileobj) + + +class DecoderConfigDescriptor(BaseDescriptor): + + TAG = 0x4 + + decSpecificInfo = None + """A DecoderSpecificInfo, optional""" + + def __init__(self, fileobj, length): + """Raises DescriptorError""" + + r = BitReader(fileobj) + + try: + self.objectTypeIndication = r.bits(8) + self.streamType = r.bits(6) + self.upStream = r.bits(1) + self.reserved = r.bits(1) + self.bufferSizeDB = r.bits(24) + self.maxBitrate = r.bits(32) + self.avgBitrate = r.bits(32) + + if (self.objectTypeIndication, self.streamType) != (0x40, 0x5): + return + + # all from here is optional + if length * 8 == r.get_position(): + return + + tag = r.bits(8) + except BitReaderError as e: + raise DescriptorError(e) + + if tag == DecoderSpecificInfo.TAG: + assert r.is_aligned() + self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj) + + @property + def codec_param(self): + """string""" + + param = u".%X" % self.objectTypeIndication + info = self.decSpecificInfo + if info is not None: + param += u".%d" % info.audioObjectType + return param + + @property + def codec_desc(self): + """string or None""" + + info = self.decSpecificInfo + desc = None + if info is not None: + desc = info.description + return desc + + +class DecoderSpecificInfo(BaseDescriptor): + + TAG = 0x5 + + _TYPE_NAMES = [ + None, "AAC MAIN", "AAC LC", "AAC SSR", "AAC LTP", "SBR", + "AAC scalable", "TwinVQ", "CELP", "HVXC", None, None, "TTSI", + "Main synthetic", "Wavetable synthesis", "General MIDI", + "Algorithmic Synthesis and Audio FX", "ER AAC LC", None, "ER AAC LTP", + "ER AAC scalable", "ER Twin VQ", "ER BSAC", "ER AAC LD", "ER CELP", + "ER HVXC", "ER HILN", "ER Parametric", "SSC", "PS", "MPEG Surround", + None, "Layer-1", "Layer-2", "Layer-3", "DST", "ALS", "SLS", + "SLS non-core", "ER AAC ELD", "SMR Simple", "SMR Main", "USAC", + "SAOC", "LD MPEG Surround", "USAC" + ] + + _FREQS = [ + 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, + 12000, 11025, 8000, 7350, + ] + + @property + def description(self): + """string or None if unknown""" + + name = None + try: + name = self._TYPE_NAMES[self.audioObjectType] + except IndexError: + pass + if name is None: + return + if self.sbrPresentFlag == 1: + name += "+SBR" + if self.psPresentFlag == 1: + name += "+PS" + return text_type(name) + + @property + def sample_rate(self): + """0 means unknown""" + + if self.sbrPresentFlag == 1: + return self.extensionSamplingFrequency + elif self.sbrPresentFlag == 0: + return self.samplingFrequency + else: + # these are all types that support SBR + aot_can_sbr = (1, 2, 3, 4, 6, 17, 19, 20, 22) + if self.audioObjectType not in aot_can_sbr: + return self.samplingFrequency + # there shouldn't be SBR for > 48KHz + if self.samplingFrequency > 24000: + return self.samplingFrequency + # either samplingFrequency or samplingFrequency * 2 + return 0 + + @property + def channels(self): + """channel count or 0 for unknown""" + + # from ProgramConfigElement() + if hasattr(self, "pce_channels"): + return self.pce_channels + + conf = getattr( + self, "extensionChannelConfiguration", self.channelConfiguration) + + if conf == 1: + if self.psPresentFlag == -1: + return 0 + elif self.psPresentFlag == 1: + return 2 + else: + return 1 + elif conf == 7: + return 8 + elif conf > 7: + return 0 + else: + return conf + + def _get_audio_object_type(self, r): + """Raises BitReaderError""" + + audioObjectType = r.bits(5) + if audioObjectType == 31: + audioObjectTypeExt = r.bits(6) + audioObjectType = 32 + audioObjectTypeExt + return audioObjectType + + def _get_sampling_freq(self, r): + """Raises BitReaderError""" + + samplingFrequencyIndex = r.bits(4) + if samplingFrequencyIndex == 0xf: + samplingFrequency = r.bits(24) + else: + try: + samplingFrequency = self._FREQS[samplingFrequencyIndex] + except IndexError: + samplingFrequency = 0 + return samplingFrequency + + def __init__(self, fileobj, length): + """Raises DescriptorError""" + + r = BitReader(fileobj) + try: + self._parse(r, length) + except BitReaderError as e: + raise DescriptorError(e) + + def _parse(self, r, length): + """Raises BitReaderError""" + + def bits_left(): + return length * 8 - r.get_position() + + self.audioObjectType = self._get_audio_object_type(r) + self.samplingFrequency = self._get_sampling_freq(r) + self.channelConfiguration = r.bits(4) + + self.sbrPresentFlag = -1 + self.psPresentFlag = -1 + if self.audioObjectType in (5, 29): + self.extensionAudioObjectType = 5 + self.sbrPresentFlag = 1 + if self.audioObjectType == 29: + self.psPresentFlag = 1 + self.extensionSamplingFrequency = self._get_sampling_freq(r) + self.audioObjectType = self._get_audio_object_type(r) + if self.audioObjectType == 22: + self.extensionChannelConfiguration = r.bits(4) + else: + self.extensionAudioObjectType = 0 + + if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23): + try: + GASpecificConfig(r, self) + except NotImplementedError: + # unsupported, (warn?) + return + else: + # unsupported + return + + if self.audioObjectType in ( + 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39): + epConfig = r.bits(2) + if epConfig in (2, 3): + # unsupported + return + + if self.extensionAudioObjectType != 5 and bits_left() >= 16: + syncExtensionType = r.bits(11) + if syncExtensionType == 0x2b7: + self.extensionAudioObjectType = self._get_audio_object_type(r) + + if self.extensionAudioObjectType == 5: + self.sbrPresentFlag = r.bits(1) + if self.sbrPresentFlag == 1: + self.extensionSamplingFrequency = \ + self._get_sampling_freq(r) + if bits_left() >= 12: + syncExtensionType = r.bits(11) + if syncExtensionType == 0x548: + self.psPresentFlag = r.bits(1) + + if self.extensionAudioObjectType == 22: + self.sbrPresentFlag = r.bits(1) + if self.sbrPresentFlag == 1: + self.extensionSamplingFrequency = \ + self._get_sampling_freq(r) + self.extensionChannelConfiguration = r.bits(4) + + +def GASpecificConfig(r, info): + """Reads GASpecificConfig which is needed to get the data after that + (there is no length defined to skip it) and to read program_config_element + which can contain channel counts. + + May raise BitReaderError on error or + NotImplementedError if some reserved data was set. + """ + + assert isinstance(info, DecoderSpecificInfo) + + r.skip(1) # frameLengthFlag + dependsOnCoreCoder = r.bits(1) + if dependsOnCoreCoder: + r.skip(14) + extensionFlag = r.bits(1) + if not info.channelConfiguration: + pce = ProgramConfigElement(r) + info.pce_channels = pce.channels + if info.audioObjectType == 6 or info.audioObjectType == 20: + r.skip(3) + if extensionFlag: + if info.audioObjectType == 22: + r.skip(5 + 11) + if info.audioObjectType in (17, 19, 20, 23): + r.skip(1 + 1 + 1) + extensionFlag3 = r.bits(1) + if extensionFlag3 != 0: + raise NotImplementedError("extensionFlag3 set") diff --git a/libs/mutagen/mp4/_atom.py b/libs/mutagen/mp4/_atom.py new file mode 100644 index 00000000..f73eb556 --- /dev/null +++ b/libs/mutagen/mp4/_atom.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +import struct + +from mutagen._compat import PY2 + +# This is not an exhaustive list of container atoms, but just the +# ones this module needs to peek inside. +_CONTAINERS = [b"moov", b"udta", b"trak", b"mdia", b"meta", b"ilst", + b"stbl", b"minf", b"moof", b"traf"] +_SKIP_SIZE = {b"meta": 4} + + +class AtomError(Exception): + pass + + +class Atom(object): + """An individual atom. + + Attributes: + children -- list child atoms (or None for non-container atoms) + length -- length of this atom, including length and name + datalength = -- length of this atom without length, name + name -- four byte name of the atom, as a str + offset -- location in the constructor-given fileobj of this atom + + This structure should only be used internally by Mutagen. + """ + + children = None + + def __init__(self, fileobj, level=0): + """May raise AtomError""" + + self.offset = fileobj.tell() + try: + self.length, self.name = struct.unpack(">I4s", fileobj.read(8)) + except struct.error: + raise AtomError("truncated data") + self._dataoffset = self.offset + 8 + if self.length == 1: + try: + self.length, = struct.unpack(">Q", fileobj.read(8)) + except struct.error: + raise AtomError("truncated data") + self._dataoffset += 8 + if self.length < 16: + raise AtomError( + "64 bit atom length can only be 16 and higher") + elif self.length == 0: + if level != 0: + raise AtomError( + "only a top-level atom can have zero length") + # Only the last atom is supposed to have a zero-length, meaning it + # extends to the end of file. + fileobj.seek(0, 2) + self.length = fileobj.tell() - self.offset + fileobj.seek(self.offset + 8, 0) + elif self.length < 8: + raise AtomError( + "atom length can only be 0, 1 or 8 and higher") + + if self.name in _CONTAINERS: + self.children = [] + fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1) + while fileobj.tell() < self.offset + self.length: + self.children.append(Atom(fileobj, level + 1)) + else: + fileobj.seek(self.offset + self.length, 0) + + @property + def datalength(self): + return self.length - (self._dataoffset - self.offset) + + def read(self, fileobj): + """Return if all data could be read and the atom payload""" + + fileobj.seek(self._dataoffset, 0) + data = fileobj.read(self.datalength) + return len(data) == self.datalength, data + + @staticmethod + def render(name, data): + """Render raw atom data.""" + # this raises OverflowError if Py_ssize_t can't handle the atom data + size = len(data) + 8 + if size <= 0xFFFFFFFF: + return struct.pack(">I4s", size, name) + data + else: + return struct.pack(">I4sQ", 1, name, size + 8) + data + + def findall(self, name, recursive=False): + """Recursively find all child atoms by specified name.""" + if self.children is not None: + for child in self.children: + if child.name == name: + yield child + if recursive: + for atom in child.findall(name, True): + yield atom + + def __getitem__(self, remaining): + """Look up a child atom, potentially recursively. + + e.g. atom['udta', 'meta'] => + """ + if not remaining: + return self + elif self.children is None: + raise KeyError("%r is not a container" % self.name) + for child in self.children: + if child.name == remaining[0]: + return child[remaining[1:]] + else: + raise KeyError("%r not found" % remaining[0]) + + def __repr__(self): + cls = self.__class__.__name__ + if self.children is None: + return "<%s name=%r length=%r offset=%r>" % ( + cls, self.name, self.length, self.offset) + else: + children = "\n".join([" " + line for child in self.children + for line in repr(child).splitlines()]) + return "<%s name=%r length=%r offset=%r\n%s>" % ( + cls, self.name, self.length, self.offset, children) + + +class Atoms(object): + """Root atoms in a given file. + + Attributes: + atoms -- a list of top-level atoms as Atom objects + + This structure should only be used internally by Mutagen. + """ + + def __init__(self, fileobj): + self.atoms = [] + fileobj.seek(0, 2) + end = fileobj.tell() + fileobj.seek(0) + while fileobj.tell() + 8 <= end: + self.atoms.append(Atom(fileobj)) + + def path(self, *names): + """Look up and return the complete path of an atom. + + For example, atoms.path('moov', 'udta', 'meta') will return a + list of three atoms, corresponding to the moov, udta, and meta + atoms. + """ + + path = [self] + for name in names: + path.append(path[-1][name, ]) + return path[1:] + + def __contains__(self, names): + try: + self[names] + except KeyError: + return False + return True + + def __getitem__(self, names): + """Look up a child atom. + + 'names' may be a list of atoms (['moov', 'udta']) or a string + specifying the complete path ('moov.udta'). + """ + + if PY2: + if isinstance(names, basestring): + names = names.split(b".") + else: + if isinstance(names, bytes): + names = names.split(b".") + + for child in self.atoms: + if child.name == names[0]: + return child[names[1:]] + else: + raise KeyError("%r not found" % names[0]) + + def __repr__(self): + return "\n".join([repr(child) for child in self.atoms]) diff --git a/libs/mutagen/mp4/_util.py b/libs/mutagen/mp4/_util.py new file mode 100644 index 00000000..9583334a --- /dev/null +++ b/libs/mutagen/mp4/_util.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2014 Christoph Reiter +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +from mutagen._util import cdata + + +def parse_full_atom(data): + """Some atoms are versioned. Split them up in (version, flags, payload). + Can raise ValueError. + """ + + if len(data) < 4: + raise ValueError("not enough data") + + version = ord(data[0:1]) + flags = cdata.uint_be(b"\x00" + data[1:4]) + return version, flags, data[4:] diff --git a/libs/mutagen/musepack.py b/libs/mutagen/musepack.py index 9804deb3..7880958b 100644 --- a/libs/mutagen/musepack.py +++ b/libs/mutagen/musepack.py @@ -1,7 +1,7 @@ -# A Musepack reader/tagger -# -# Copyright 2006 Lukas Lalinsky -# Copyright 2012 Christoph Reiter +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Lukas Lalinsky +# Copyright (C) 2012 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -19,6 +19,8 @@ __all__ = ["Musepack", "Open", "delete"] import struct +from ._compat import endswith, xrange +from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen.id3 import BitPaddedInt from mutagen._util import cdata @@ -46,8 +48,9 @@ def _parse_sv8_int(fileobj, limit=9): c = fileobj.read(1) if len(c) != 1: raise EOFError - num = (num << 7) | (ord(c) & 0x7F) - if not ord(c) & 0x80: + c = bytearray(c) + num = (num << 7) | (c[0] & 0x7F) + if not c[0] & 0x80: return num, i + 1 if limit > 0: raise ValueError @@ -63,7 +66,7 @@ def _calc_sv8_peak(peak): return (10 ** (peak / (256.0 * 20.0)) / 65535.0) -class MusepackInfo(object): +class MusepackInfo(StreamInfo): """Musepack stream information. Attributes: @@ -91,7 +94,7 @@ class MusepackInfo(object): raise MusepackHeaderError("not a Musepack file") # Skip ID3v2 tags - if header[:3] == "ID3": + if header[:3] == b"ID3": header = fileobj.read(6) if len(header) != 6: raise MusepackHeaderError("not a Musepack file") @@ -101,7 +104,7 @@ class MusepackInfo(object): if len(header) != 4: raise MusepackHeaderError("not a Musepack file") - if header.startswith("MPCK"): + if header.startswith(b"MPCK"): self.__parse_sv8(fileobj) else: self.__parse_sv467(fileobj) @@ -111,29 +114,31 @@ class MusepackInfo(object): self.bitrate = int(round(fileobj.tell() * 8 / self.length)) def __parse_sv8(self, fileobj): - #SV8 http://trac.musepack.net/trac/wiki/SV8Specification + # SV8 http://trac.musepack.net/trac/wiki/SV8Specification key_size = 2 - mandatory_packets = ["SH", "RG"] + mandatory_packets = [b"SH", b"RG"] def check_frame_key(key): - if len(frame_type) != key_size or not 'AA' <= frame_type <= 'ZZ': + if ((len(frame_type) != key_size) or + (not b'AA' <= frame_type <= b'ZZ')): raise MusepackHeaderError("Invalid frame key.") frame_type = fileobj.read(key_size) check_frame_key(frame_type) - while frame_type not in ("AP", "SE") and mandatory_packets: + while frame_type not in (b"AP", b"SE") and mandatory_packets: try: frame_size, slen = _parse_sv8_int(fileobj) except (EOFError, ValueError): raise MusepackHeaderError("Invalid packet size.") data_size = frame_size - key_size - slen + # packets can be at maximum data_size big and are padded with zeros - if frame_type == "SH": + if frame_type == b"SH": mandatory_packets.remove(frame_type) self.__parse_stream_header(fileobj, data_size) - elif frame_type == "RG": + elif frame_type == b"RG": mandatory_packets.remove(frame_type) self.__parse_replaygain_packet(fileobj, data_size) else: @@ -143,37 +148,43 @@ class MusepackInfo(object): check_frame_key(frame_type) if mandatory_packets: - raise MusepackHeaderError("Missing mandatory packets: %s." - % ", ".join(mandatory_packets)) + raise MusepackHeaderError("Missing mandatory packets: %s." % + ", ".join(map(repr, mandatory_packets))) self.length = float(self.samples) / self.sample_rate self.bitrate = 0 def __parse_stream_header(self, fileobj, data_size): + # skip CRC fileobj.seek(4, 1) + remaining_size = data_size - 4 + try: - self.version = ord(fileobj.read(1)) + self.version = bytearray(fileobj.read(1))[0] except TypeError: raise MusepackHeaderError("SH packet ended unexpectedly.") + + remaining_size -= 1 + try: samples, l1 = _parse_sv8_int(fileobj) samples_skip, l2 = _parse_sv8_int(fileobj) except (EOFError, ValueError): raise MusepackHeaderError( "SH packet: Invalid sample counts.") - left_size = data_size - 5 - l1 - l2 - if left_size != 2: - raise MusepackHeaderError("Invalid SH packet size.") - data = fileobj.read(left_size) - if len(data) != left_size: - raise MusepackHeaderError("SH packet ended unexpectedly.") - self.sample_rate = RATES[ord(data[-2]) >> 5] - self.channels = (ord(data[-1]) >> 4) + 1 + self.samples = samples - samples_skip + remaining_size -= l1 + l2 + + data = fileobj.read(remaining_size) + if len(data) != remaining_size: + raise MusepackHeaderError("SH packet ended unexpectedly.") + self.sample_rate = RATES[bytearray(data)[0] >> 5] + self.channels = (bytearray(data)[1] >> 4) + 1 def __parse_replaygain_packet(self, fileobj, data_size): data = fileobj.read(data_size) - if data_size != 9: + if data_size < 9: raise MusepackHeaderError("Invalid RG packet size.") if len(data) != data_size: raise MusepackHeaderError("RG packet ended unexpectedly.") @@ -197,8 +208,8 @@ class MusepackInfo(object): raise MusepackHeaderError("not a Musepack file") # SV7 - if header.startswith("MP+"): - self.version = ord(header[3]) & 0xF + if header.startswith(b"MP+"): + self.version = bytearray(header)[3] & 0xF if self.version < 7: raise MusepackHeaderError("not a Musepack file") frames = cdata.uint_le(header[4:8]) @@ -235,12 +246,12 @@ class MusepackInfo(object): def pprint(self): rg_data = [] if hasattr(self, "title_gain"): - rg_data.append("%+0.2f (title)" % self.title_gain) + rg_data.append(u"%+0.2f (title)" % self.title_gain) if hasattr(self, "album_gain"): - rg_data.append("%+0.2f (album)" % self.album_gain) + rg_data.append(u"%+0.2f (album)" % self.album_gain) rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or "" - return "Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % ( + return u"Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % ( self.version, self.length, self.sample_rate, self.bitrate, rg_data) @@ -250,8 +261,10 @@ class Musepack(APEv2File): @staticmethod def score(filename, fileobj, header): - return (header.startswith("MP+") + header.startswith("MPCK") + - filename.lower().endswith(".mpc")) + filename = filename.lower() + + return (header.startswith(b"MP+") + header.startswith(b"MPCK") + + endswith(filename, b".mpc")) Open = Musepack diff --git a/libs/mutagen/ogg.py b/libs/mutagen/ogg.py index 657eb7f7..9961a966 100644 --- a/libs/mutagen/ogg.py +++ b/libs/mutagen/ogg.py @@ -1,4 +1,6 @@ -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -18,13 +20,12 @@ import struct import sys import zlib -from cStringIO import StringIO - from mutagen import FileType -from mutagen._util import cdata, insert_bytes, delete_bytes +from mutagen._util import cdata, resize_bytes, MutagenError +from ._compat import cBytesIO, reraise, chr_, izip, xrange -class error(IOError): +class error(IOError, MutagenError): """Ogg stream parsing errors.""" pass @@ -59,7 +60,7 @@ class OggPage(object): version = 0 __type_flags = 0 - position = 0L + position = 0 serial = 0 sequence = 0 offset = None @@ -78,15 +79,15 @@ class OggPage(object): raise EOFError try: - (oggs, self.version, self.__type_flags, self.position, - self.serial, self.sequence, crc, segments) = struct.unpack( - "<4sBBqIIiB", header) + (oggs, self.version, self.__type_flags, + self.position, self.serial, self.sequence, + crc, segments) = struct.unpack("<4sBBqIIiB", header) except struct.error: raise error("unable to read full header; got %r" % header) - if oggs != "OggS": + if oggs != b"OggS": raise error("read %r, expected %r, at 0x%x" % ( - oggs, "OggS", fileobj.tell() - 27)) + oggs, b"OggS", fileobj.tell() - 27)) if self.version != 0: raise error("version %r unsupported" % self.version) @@ -96,7 +97,7 @@ class OggPage(object): lacing_bytes = fileobj.read(segments) if len(lacing_bytes) != segments: raise error("unable to read %r lacing bytes" % segments) - for c in map(ord, lacing_bytes): + for c in bytearray(lacing_bytes): total += c if c < 255: lacings.append(total) @@ -105,8 +106,8 @@ class OggPage(object): lacings.append(total) self.complete = False - self.packets = map(fileobj.read, lacings) - if map(len, self.packets) != lacings: + self.packets = [fileobj.read(l) for l in lacings] + if [len(p) for p in self.packets] != lacings: raise error("unable to read full data") def __eq__(self, other): @@ -134,21 +135,21 @@ class OggPage(object): """ data = [ - struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags, + struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags, self.position, self.serial, self.sequence, 0) ] lacing_data = [] for datum in self.packets: quot, rem = divmod(len(datum), 255) - lacing_data.append("\xff" * quot + chr(rem)) - lacing_data = "".join(lacing_data) - if not self.complete and lacing_data.endswith("\x00"): + lacing_data.append(b"\xff" * quot + chr_(rem)) + lacing_data = b"".join(lacing_data) + if not self.complete and lacing_data.endswith(b"\x00"): lacing_data = lacing_data[:-1] - data.append(chr(len(lacing_data))) + data.append(chr_(len(lacing_data))) data.append(lacing_data) data.extend(self.packets) - data = "".join(data) + data = b"".join(data) # Python's CRC is swapped relative to Ogg's needs. # crc32 returns uint prior to py2.6 on some platforms, so force uint @@ -196,8 +197,8 @@ class OggPage(object): lambda self, v: self.__set_flag(2, v), doc="This is the last page of a logical bitstream.") - @classmethod - def renumber(klass, fileobj, serial, start): + @staticmethod + def renumber(fileobj, serial, start): """Renumber pages belonging to a specified logical stream. fileobj must be opened with mode r+b or w+b. @@ -235,8 +236,8 @@ class OggPage(object): fileobj.seek(page.offset + page.size, 0) number += 1 - @classmethod - def to_packets(klass, pages, strict=False): + @staticmethod + def to_packets(pages, strict=False): """Construct a list of packet data from a list of Ogg pages. If strict is true, the first page must start a new packet, @@ -253,7 +254,7 @@ class OggPage(object): if not pages[-1].complete: raise ValueError("last packet does not complete") elif pages and pages[0].continued: - packets.append([""]) + packets.append([b""]) for page in pages: if serial != page.serial: @@ -267,13 +268,46 @@ class OggPage(object): packets[-1].append(page.packets[0]) else: packets.append([page.packets[0]]) - packets.extend([[p] for p in page.packets[1:]]) + packets.extend([p] for p in page.packets[1:]) - return ["".join(p) for p in packets] + return [b"".join(p) for p in packets] @classmethod - def from_packets(klass, packets, sequence=0, - default_size=4096, wiggle_room=2048): + def _from_packets_try_preserve(cls, packets, old_pages): + """Like from_packets but in case the size and number of the packets + is the same as in the given pages the layout of the pages will + be copied (the page size and number will match). + + If the packets don't match this behaves like:: + + OggPage.from_packets(packets, sequence=old_pages[0].sequence) + """ + + old_packets = cls.to_packets(old_pages) + + if [len(p) for p in packets] != [len(p) for p in old_packets]: + # doesn't match, fall back + return cls.from_packets(packets, old_pages[0].sequence) + + new_data = b"".join(packets) + new_pages = [] + for old in old_pages: + new = OggPage() + new.sequence = old.sequence + new.complete = old.complete + new.continued = old.continued + new.position = old.position + for p in old.packets: + data, new_data = new_data[:len(p)], new_data[len(p):] + new.packets.append(data) + new_pages.append(new) + assert not new_data + + return new_pages + + @staticmethod + def from_packets(packets, sequence=0, default_size=4096, + wiggle_room=2048): """Construct a list of Ogg pages from a list of packet data. The algorithm will generate pages of approximately @@ -300,7 +334,7 @@ class OggPage(object): page.sequence = sequence for packet in packets: - page.packets.append("") + page.packets.append(b"") while packet: data, packet = packet[:chunk_size], packet[chunk_size:] if page.size < default_size and len(page.packets) < 255: @@ -314,7 +348,7 @@ class OggPage(object): if page.packets[-1]: page.complete = False if len(page.packets) == 1: - page.position = -1L + page.position = -1 else: page.packets.pop(-1) pages.append(page) @@ -325,7 +359,7 @@ class OggPage(object): if len(packet) < wiggle_room: page.packets[-1] += packet - packet = "" + packet = b"" if page.packets: pages.append(page) @@ -333,7 +367,7 @@ class OggPage(object): return pages @classmethod - def replace(klass, fileobj, old_pages, new_pages): + def replace(cls, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. @@ -345,9 +379,13 @@ class OggPage(object): such, it must be opened r+b or w+b. """ + if not len(old_pages) or not len(new_pages): + raise ValueError("empty pages list not allowed") + # Number the new pages starting from the first old page. first = old_pages[0].sequence - for page, seq in zip(new_pages, range(first, first + len(new_pages))): + for page, seq in izip(new_pages, + xrange(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial @@ -359,26 +397,30 @@ class OggPage(object): new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: - new_pages[-1].position = -1L + new_pages[-1].position = -1 - new_data = "".join(map(klass.write, new_pages)) + new_data = [cls.write(p) for p in new_pages] - # Make room in the file for the new data. - delta = len(new_data) - fileobj.seek(old_pages[0].offset, 0) - insert_bytes(fileobj, delta, old_pages[0].offset) - fileobj.seek(old_pages[0].offset, 0) - fileobj.write(new_data) - new_data_end = old_pages[0].offset + delta + # Add dummy data or merge the remaining data together so multiple + # new pages replace an old one + pages_diff = len(old_pages) - len(new_data) + if pages_diff > 0: + new_data.extend([b""] * pages_diff) + elif pages_diff < 0: + new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])] - # Go through the old pages and delete them. Since we shifted - # the data down the file, we need to adjust their offsets. We - # also need to go backwards, so we don't adjust the deltas of - # the other pages. - old_pages.reverse() - for old_page in old_pages: - adj_offset = old_page.offset + delta - delete_bytes(fileobj, old_page.size, adj_offset) + # Replace pages one by one. If the sizes match no resize happens. + offset_adjust = 0 + new_data_end = None + assert len(old_pages) == len(new_data) + for old_page, data in izip(old_pages, new_data): + offset = old_page.offset + offset_adjust + data_size = len(data) + resize_bytes(fileobj, old_page.size, data_size, offset) + fileobj.seek(offset, 0) + fileobj.write(data) + new_data_end = offset + data_size + offset_adjust += (data_size - old_page.size) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. @@ -386,10 +428,10 @@ class OggPage(object): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 - klass.renumber(fileobj, serial, sequence) + cls.renumber(fileobj, serial, sequence) - @classmethod - def find_last(klass, fileobj, serial): + @staticmethod + def find_last(fileobj, serial): """Find the last page of the stream 'serial'. If the file is not multiplexed this function is fast. If it is, @@ -401,19 +443,19 @@ class OggPage(object): # For non-muxed streams, look at the last page. try: - fileobj.seek(-256*256, 2) + fileobj.seek(-256 * 256, 2) except IOError: # The file is less than 64k in length. fileobj.seek(0) data = fileobj.read() try: - index = data.rindex("OggS") + index = data.rindex(b"OggS") except ValueError: raise error("unable to find final Ogg header") - stringobj = StringIO(data[index:]) + bytesobj = cBytesIO(data[index:]) best_page = None try: - page = OggPage(stringobj) + page = OggPage(bytesobj) except error: pass else: @@ -453,18 +495,15 @@ class OggFileType(FileType): """Load file information from a filename.""" self.filename = filename - fileobj = open(filename, "rb") - try: + with open(filename, "rb") as fileobj: try: self.info = self._Info(fileobj) self.tags = self._Tags(fileobj, self.info) self.info._post_tags(fileobj) - except error, e: - raise self._Error, e, sys.exc_info()[2] + except error as e: + reraise(self._Error, e, sys.exc_info()[2]) except EOFError: - raise self._Error, "no appropriate stream found" - finally: - fileobj.close() + raise self._Error("no appropriate stream found") def delete(self, filename=None): """Remove tags from a file. @@ -476,18 +515,20 @@ class OggFileType(FileType): filename = self.filename self.tags.clear() - fileobj = open(filename, "rb+") - try: + # TODO: we should delegate the deletion to the subclass and not through + # _inject. + with open(filename, "rb+") as fileobj: try: - self.tags._inject(fileobj) - except error, e: - raise self._Error, e, sys.exc_info()[2] + self.tags._inject(fileobj, lambda x: 0) + except error as e: + reraise(self._Error, e, sys.exc_info()[2]) except EOFError: - raise self._Error, "no appropriate stream found" - finally: - fileobj.close() + raise self._Error("no appropriate stream found") - def save(self, filename=None): + def add_tags(self): + raise self._Error + + def save(self, filename=None, padding=None): """Save a tag to a file. If no filename is given, the one most recently loaded is used. @@ -498,10 +539,10 @@ class OggFileType(FileType): fileobj = open(filename, "rb+") try: try: - self.tags._inject(fileobj) - except error, e: - raise self._Error, e, sys.exc_info()[2] + self.tags._inject(fileobj, padding) + except error as e: + reraise(self._Error, e, sys.exc_info()[2]) except EOFError: - raise self._Error, "no appropriate stream found" + raise self._Error("no appropriate stream found") finally: fileobj.close() diff --git a/libs/mutagen/oggflac.py b/libs/mutagen/oggflac.py index 14ecec00..b86226ca 100644 --- a/libs/mutagen/oggflac.py +++ b/libs/mutagen/oggflac.py @@ -1,6 +1,6 @@ -# Ogg FLAC support. -# -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -19,9 +19,11 @@ __all__ = ["OggFLAC", "Open", "delete"] import struct -from cStringIO import StringIO +from ._compat import cBytesIO -from mutagen.flac import StreamInfo, VCFLACDict, StrictFileObject +from mutagen import StreamInfo +from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError +from mutagen._vorbis import VCommentDict from mutagen.ogg import OggPage, OggFileType, error as OggError @@ -34,31 +36,24 @@ class OggFLACHeaderError(error): class OggFLACStreamInfo(StreamInfo): - """Ogg FLAC general header and stream info. + """Ogg FLAC stream info.""" - This encompasses the Ogg wrapper for the FLAC STREAMINFO metadata - block, as well as the Ogg codec setup that precedes it. + length = 0 + """File length in seconds, as a float""" - Attributes (in addition to StreamInfo's): + channels = 0 + """Number of channels""" - * packets -- number of metadata packets - * serial -- Ogg logical stream serial number - """ + sample_rate = 0 + """Sample rate in Hz""" - packets = 0 - serial = 0 - - def load(self, data): - # Ogg expects file objects that don't raise on read - if isinstance(data, StrictFileObject): - data = data._fileobj - - page = OggPage(data) - while not page.packets[0].startswith("\x7FFLAC"): - page = OggPage(data) + def __init__(self, fileobj): + page = OggPage(fileobj) + while not page.packets[0].startswith(b"\x7FFLAC"): + page = OggPage(fileobj) major, minor, self.packets, flac = struct.unpack( ">BBH4s", page.packets[0][5:13]) - if flac != "fLaC": + if flac != b"fLaC": raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac) elif (major, minor) != (1, 0): raise OggFLACHeaderError( @@ -66,8 +61,16 @@ class OggFLACStreamInfo(StreamInfo): self.serial = page.serial # Skip over the block header. - stringobj = StrictFileObject(StringIO(page.packets[0][17:])) - super(OggFLACStreamInfo, self).load(stringobj) + stringobj = cBytesIO(page.packets[0][17:]) + + try: + flac_info = FLACStreamInfo(stringobj) + except FLACError as e: + raise OggFLACHeaderError(e) + + for attr in ["min_blocksize", "max_blocksize", "sample_rate", + "channels", "bits_per_sample", "total_samples", "length"]: + setattr(self, attr, getattr(flac_info, attr)) def _post_tags(self, fileobj): if self.length: @@ -76,31 +79,33 @@ class OggFLACStreamInfo(StreamInfo): self.length = page.position / float(self.sample_rate) def pprint(self): - return "Ogg " + super(OggFLACStreamInfo, self).pprint() + return u"Ogg FLAC, %.2f seconds, %d Hz" % ( + self.length, self.sample_rate) -class OggFLACVComment(VCFLACDict): - def load(self, data, info, errors='replace'): +class OggFLACVComment(VCommentDict): + + def __init__(self, fileobj, info): # data should be pointing at the start of an Ogg page, after # the first FLAC page. pages = [] complete = False while not complete: - page = OggPage(data) + page = OggPage(fileobj) if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) - comment = StringIO(OggPage.to_packets(pages)[0][4:]) - super(OggFLACVComment, self).load(comment, errors=errors) + comment = cBytesIO(OggPage.to_packets(pages)[0][4:]) + super(OggFLACVComment, self).__init__(comment, framing=False) - def _inject(self, fileobj): + def _inject(self, fileobj, padding_func): """Write tag data into the FLAC Vorbis comment packet/page.""" # Ogg FLAC has no convenient data marker like Vorbis, but the # second packet - and second page - must be the comment data. fileobj.seek(0) page = OggPage(fileobj) - while not page.packets[0].startswith("\x7FFLAC"): + while not page.packets[0].startswith(b"\x7FFLAC"): page = OggPage(fileobj) first_page = page @@ -116,8 +121,8 @@ class OggFLACVComment(VCFLACDict): packets = OggPage.to_packets(old_pages, strict=False) # Set the new comment block. - data = self.write() - data = packets[0][0] + struct.pack(">I", len(data))[-3:] + data + data = self.write(framing=False) + data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data packets[0] = data new_pages = OggPage.from_packets(packets, old_pages[0].sequence) @@ -132,10 +137,19 @@ class OggFLAC(OggFileType): _Error = OggFLACHeaderError _mimes = ["audio/x-oggflac"] + info = None + """A `OggFLACStreamInfo`""" + + tags = None + """A `VCommentDict`""" + + def save(self, filename=None): + return super(OggFLAC, self).save(filename) + @staticmethod def score(filename, fileobj, header): - return (header.startswith("OggS") * ( - ("FLAC" in header) + ("fLaC" in header))) + return (header.startswith(b"OggS") * ( + (b"FLAC" in header) + (b"fLaC" in header))) Open = OggFLAC diff --git a/libs/mutagen/oggopus.py b/libs/mutagen/oggopus.py index 6de44391..7154e479 100644 --- a/libs/mutagen/oggopus.py +++ b/libs/mutagen/oggopus.py @@ -1,4 +1,6 @@ -# Copyright 2012 Christoph Reiter +# -*- coding: utf-8 -*- + +# Copyright (C) 2012, 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -16,6 +18,10 @@ __all__ = ["OggOpus", "Open", "delete"] import struct +from mutagen import StreamInfo +from mutagen._compat import BytesIO +from mutagen._util import get_size +from mutagen._tags import PaddingInfo from mutagen._vorbis import VCommentDict from mutagen.ogg import OggPage, OggFileType, error as OggError @@ -28,20 +34,18 @@ class OggOpusHeaderError(error): pass -class OggOpusInfo(object): - """Ogg Opus stream information. - - Attributes: - - * length - file length in seconds, as a float - * channels - number of channels - """ +class OggOpusInfo(StreamInfo): + """Ogg Opus stream information.""" length = 0 + """File length in seconds, as a float""" + + channels = 0 + """Number of channels""" def __init__(self, fileobj): page = OggPage(fileobj) - while not page.packets[0].startswith("OpusHead"): + while not page.packets[0].startswith(b"OpusHead"): page = OggPage(fileobj) self.serial = page.serial @@ -56,7 +60,7 @@ class OggOpusInfo(object): self.__pre_skip = pre_skip # only the higher 4 bits change on incombatible changes - major, minor = version >> 4, version & 0xF + major = version >> 4 if major != 0: raise OggOpusHeaderError("version %r unsupported" % major) @@ -65,7 +69,7 @@ class OggOpusInfo(object): self.length = (page.position - self.__pre_skip) / float(48000) def pprint(self): - return "Ogg Opus, %.2f seconds" % (self.length) + return u"Ogg Opus, %.2f seconds" % (self.length) class OggOpusVComment(VCommentDict): @@ -74,8 +78,8 @@ class OggOpusVComment(VCommentDict): def __get_comment_pages(self, fileobj, info): # find the first tags page with the right serial page = OggPage(fileobj) - while info.serial != page.serial or \ - not page.packets[0].startswith("OpusTags"): + while ((info.serial != page.serial) or + not page.packets[0].startswith(b"OpusTags")): page = OggPage(fileobj) # get all comment pages @@ -90,16 +94,39 @@ class OggOpusVComment(VCommentDict): def __init__(self, fileobj, info): pages = self.__get_comment_pages(fileobj, info) data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags - super(OggOpusVComment, self).__init__(data, framing=False) + fileobj = BytesIO(data) + super(OggOpusVComment, self).__init__(fileobj, framing=False) + self._padding = len(data) - self._size - def _inject(self, fileobj): + # in case the LSB of the first byte after v-comment is 1, preserve the + # following data + padding_flag = fileobj.read(1) + if padding_flag and ord(padding_flag) & 0x1: + self._pad_data = padding_flag + fileobj.read() + self._padding = 0 # we have to preserve, so no padding + else: + self._pad_data = b"" + + def _inject(self, fileobj, padding_func): fileobj.seek(0) info = OggOpusInfo(fileobj) old_pages = self.__get_comment_pages(fileobj, info) packets = OggPage.to_packets(old_pages) - packets[0] = "OpusTags" + self.write(framing=False) - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) + vcomment_data = b"OpusTags" + self.write(framing=False) + + if self._pad_data: + # if we have padding data to preserver we can't add more padding + # as long as we don't know the structure of what follows + packets[0] = vcomment_data + self._pad_data + else: + content_size = get_size(fileobj) - len(packets[0]) # approx + padding_left = len(packets[0]) - len(vcomment_data) + info = PaddingInfo(padding_left, content_size) + new_padding = info._get_padding(padding_func) + packets[0] = vcomment_data + b"\x00" * new_padding + + new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) @@ -111,9 +138,15 @@ class OggOpus(OggFileType): _Error = OggOpusHeaderError _mimes = ["audio/ogg", "audio/ogg; codecs=opus"] + info = None + """A `OggOpusInfo`""" + + tags = None + """A `VCommentDict`""" + @staticmethod def score(filename, fileobj, header): - return (header.startswith("OggS") * ("OpusHead" in header)) + return (header.startswith(b"OggS") * (b"OpusHead" in header)) Open = OggOpus diff --git a/libs/mutagen/oggspeex.py b/libs/mutagen/oggspeex.py index 4f208521..9b16930b 100644 --- a/libs/mutagen/oggspeex.py +++ b/libs/mutagen/oggspeex.py @@ -1,5 +1,5 @@ -# Ogg Speex support. -# +# -*- coding: utf-8 -*- + # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -19,9 +19,11 @@ http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html. __all__ = ["OggSpeex", "Open", "delete"] +from mutagen import StreamInfo from mutagen._vorbis import VCommentDict from mutagen.ogg import OggPage, OggFileType, error as OggError -from mutagen._util import cdata +from mutagen._util import cdata, get_size +from mutagen._tags import PaddingInfo class error(OggError): @@ -32,24 +34,25 @@ class OggSpeexHeaderError(error): pass -class OggSpeexInfo(object): - """Ogg Speex stream information. +class OggSpeexInfo(StreamInfo): + """Ogg Speex stream information.""" - Attributes: + length = 0 + """file length in seconds, as a float""" - * bitrate - nominal bitrate in bits per second - * channels - number of channels - * length - file length in seconds, as a float + channels = 0 + """number of channels""" + + bitrate = 0 + """nominal bitrate in bits per second. The reference encoder does not set the bitrate; in this case, the bitrate will be 0. """ - length = 0 - def __init__(self, fileobj): page = OggPage(fileobj) - while not page.packets[0].startswith("Speex "): + while not page.packets[0].startswith(b"Speex "): page = OggPage(fileobj) if not page.first: raise OggSpeexHeaderError( @@ -64,7 +67,7 @@ class OggSpeexInfo(object): self.length = page.position / float(self.sample_rate) def pprint(self): - return "Ogg Speex, %.2f seconds" % self.length + return u"Ogg Speex, %.2f seconds" % self.length class OggSpeexVComment(VCommentDict): @@ -78,10 +81,11 @@ class OggSpeexVComment(VCommentDict): if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0] + "\x01" + data = OggPage.to_packets(pages)[0] super(OggSpeexVComment, self).__init__(data, framing=False) + self._padding = len(data) - self._size - def _inject(self, fileobj): + def _inject(self, fileobj, padding_func): """Write tag data into the Speex comment packet/page.""" fileobj.seek(0) @@ -89,7 +93,7 @@ class OggSpeexVComment(VCommentDict): # Find the first header page, with the stream info. # Use it to get the serial number. page = OggPage(fileobj) - while not page.packets[0].startswith("Speex "): + while not page.packets[0].startswith(b"Speex "): page = OggPage(fileobj) # Look for the next page with that serial number, it'll start @@ -108,10 +112,17 @@ class OggSpeexVComment(VCommentDict): packets = OggPage.to_packets(old_pages, strict=False) - # Set the new comment packet. - packets[0] = self.write(framing=False) + content_size = get_size(fileobj) - len(packets[0]) # approx + vcomment_data = self.write(framing=False) + padding_left = len(packets[0]) - len(vcomment_data) - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) + info = PaddingInfo(padding_left, content_size) + new_padding = info._get_padding(padding_func) + + # Set the new comment packet. + packets[0] = vcomment_data + b"\x00" * new_padding + + new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) @@ -123,9 +134,15 @@ class OggSpeex(OggFileType): _Error = OggSpeexHeaderError _mimes = ["audio/x-speex"] + info = None + """A `OggSpeexInfo`""" + + tags = None + """A `VCommentDict`""" + @staticmethod def score(filename, fileobj, header): - return (header.startswith("OggS") * ("Speex " in header)) + return (header.startswith(b"OggS") * (b"Speex " in header)) Open = OggSpeex diff --git a/libs/mutagen/oggtheora.py b/libs/mutagen/oggtheora.py index edf221a7..122e7d4b 100644 --- a/libs/mutagen/oggtheora.py +++ b/libs/mutagen/oggtheora.py @@ -1,5 +1,5 @@ -# Ogg Theora support. -# +# -*- coding: utf-8 -*- + # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -18,8 +18,10 @@ __all__ = ["OggTheora", "Open", "delete"] import struct +from mutagen import StreamInfo from mutagen._vorbis import VCommentDict -from mutagen._util import cdata +from mutagen._util import cdata, get_size +from mutagen._tags import PaddingInfo from mutagen.ogg import OggPage, OggFileType, error as OggError @@ -31,20 +33,21 @@ class OggTheoraHeaderError(error): pass -class OggTheoraInfo(object): - """Ogg Theora stream information. - - Attributes: - - * length - file length in seconds, as a float - * fps - video frames per second, as a float - """ +class OggTheoraInfo(StreamInfo): + """Ogg Theora stream information.""" length = 0 + """File length in seconds, as a float""" + + fps = 0 + """Video frames per second, as a float""" + + bitrate = 0 + """Bitrate in bps (int)""" def __init__(self, fileobj): page = OggPage(fileobj) - while not page.packets[0].startswith("\x80theora"): + while not page.packets[0].startswith(b"\x80theora"): page = OggPage(fileobj) if not page.first: raise OggTheoraHeaderError( @@ -56,7 +59,7 @@ class OggTheoraInfo(object): "found Theora version %d.%d != 3.2" % (vmaj, vmin)) fps_num, fps_den = struct.unpack(">2I", data[22:30]) self.fps = fps_num / float(fps_den) - self.bitrate = cdata.uint_be("\x00" + data[37:40]) + self.bitrate = cdata.uint_be(b"\x00" + data[37:40]) self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F self.serial = page.serial @@ -68,7 +71,8 @@ class OggTheoraInfo(object): self.length = frames / float(self.fps) def pprint(self): - return "Ogg Theora, %.2f seconds, %d bps" % (self.length, self.bitrate) + return u"Ogg Theora, %.2f seconds, %d bps" % (self.length, + self.bitrate) class OggTheoraCommentDict(VCommentDict): @@ -83,14 +87,15 @@ class OggTheoraCommentDict(VCommentDict): pages.append(page) complete = page.complete or (len(page.packets) > 1) data = OggPage.to_packets(pages)[0][7:] - super(OggTheoraCommentDict, self).__init__(data + "\x01") + super(OggTheoraCommentDict, self).__init__(data, framing=False) + self._padding = len(data) - self._size - def _inject(self, fileobj): + def _inject(self, fileobj, padding_func): """Write tag data into the Theora comment packet/page.""" fileobj.seek(0) page = OggPage(fileobj) - while not page.packets[0].startswith("\x81theora"): + while not page.packets[0].startswith(b"\x81theora"): page = OggPage(fileobj) old_pages = [page] @@ -101,9 +106,16 @@ class OggTheoraCommentDict(VCommentDict): packets = OggPage.to_packets(old_pages, strict=False) - packets[0] = "\x81theora" + self.write(framing=False) + content_size = get_size(fileobj) - len(packets[0]) # approx + vcomment_data = b"\x81theora" + self.write(framing=False) + padding_left = len(packets[0]) - len(vcomment_data) - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) + info = PaddingInfo(padding_left, content_size) + new_padding = info._get_padding(padding_func) + + packets[0] = vcomment_data + b"\x00" * new_padding + + new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) @@ -115,10 +127,16 @@ class OggTheora(OggFileType): _Error = OggTheoraHeaderError _mimes = ["video/x-theora"] + info = None + """A `OggTheoraInfo`""" + + tags = None + """A `VCommentDict`""" + @staticmethod def score(filename, fileobj, header): - return (header.startswith("OggS") * - (("\x80theora" in header) + ("\x81theora" in header))) + return (header.startswith(b"OggS") * + ((b"\x80theora" in header) + (b"\x81theora" in header)) * 2) Open = OggTheora diff --git a/libs/mutagen/oggvorbis.py b/libs/mutagen/oggvorbis.py index 509fd966..b058a0c1 100644 --- a/libs/mutagen/oggvorbis.py +++ b/libs/mutagen/oggvorbis.py @@ -1,5 +1,5 @@ -# Ogg Vorbis support. -# +# -*- coding: utf-8 -*- + # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -19,7 +19,10 @@ __all__ = ["OggVorbis", "Open", "delete"] import struct +from mutagen import StreamInfo from mutagen._vorbis import VCommentDict +from mutagen._util import get_size +from mutagen._tags import PaddingInfo from mutagen.ogg import OggPage, OggFileType, error as OggError @@ -31,20 +34,24 @@ class OggVorbisHeaderError(error): pass -class OggVorbisInfo(object): - """Ogg Vorbis stream information. - - Attributes: - - * length - file length in seconds, as a float - * bitrate - nominal ('average') bitrate in bits per second, as an int - """ +class OggVorbisInfo(StreamInfo): + """Ogg Vorbis stream information.""" length = 0 + """File length in seconds, as a float""" + + channels = 0 + """Number of channels""" + + bitrate = 0 + """Nominal ('average') bitrate in bits per second, as an int""" + + sample_rate = 0 + """Sample rate in Hz""" def __init__(self, fileobj): page = OggPage(fileobj) - while not page.packets[0].startswith("\x01vorbis"): + while not page.packets[0].startswith(b"\x01vorbis"): page = OggPage(fileobj) if not page.first: raise OggVorbisHeaderError( @@ -73,7 +80,8 @@ class OggVorbisInfo(object): self.length = page.position / float(self.sample_rate) def pprint(self): - return "Ogg Vorbis, %.2f seconds, %d bps" % (self.length, self.bitrate) + return u"Ogg Vorbis, %.2f seconds, %d bps" % ( + self.length, self.bitrate) class OggVCommentDict(VCommentDict): @@ -89,15 +97,16 @@ class OggVCommentDict(VCommentDict): complete = page.complete or (len(page.packets) > 1) data = OggPage.to_packets(pages)[0][7:] # Strip off "\x03vorbis". super(OggVCommentDict, self).__init__(data) + self._padding = len(data) - self._size - def _inject(self, fileobj): + def _inject(self, fileobj, padding_func): """Write tag data into the Vorbis comment packet/page.""" # Find the old pages in the file; we'll need to remove them, # plus grab any stray setup packet data out of them. fileobj.seek(0) page = OggPage(fileobj) - while not page.packets[0].startswith("\x03vorbis"): + while not page.packets[0].startswith(b"\x03vorbis"): page = OggPage(fileobj) old_pages = [page] @@ -108,10 +117,17 @@ class OggVCommentDict(VCommentDict): packets = OggPage.to_packets(old_pages, strict=False) - # Set the new comment packet. - packets[0] = "\x03vorbis" + self.write() + content_size = get_size(fileobj) - len(packets[0]) # approx + vcomment_data = b"\x03vorbis" + self.write() + padding_left = len(packets[0]) - len(vcomment_data) - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) + info = PaddingInfo(padding_left, content_size) + new_padding = info._get_padding(padding_func) + + # Set the new comment packet. + packets[0] = vcomment_data + b"\x00" * new_padding + + new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) @@ -123,9 +139,15 @@ class OggVorbis(OggFileType): _Error = OggVorbisHeaderError _mimes = ["audio/vorbis", "audio/x-vorbis"] + info = None + """A `OggVorbisInfo`""" + + tags = None + """A `VCommentDict`""" + @staticmethod def score(filename, fileobj, header): - return (header.startswith("OggS") * ("\x01vorbis" in header)) + return (header.startswith(b"OggS") * (b"\x01vorbis" in header)) Open = OggVorbis diff --git a/libs/mutagen/optimfrog.py b/libs/mutagen/optimfrog.py index 24a87af8..0d85a818 100644 --- a/libs/mutagen/optimfrog.py +++ b/libs/mutagen/optimfrog.py @@ -1,6 +1,6 @@ -# OptimFROG reader/tagger -# -# Copyright 2006 Lukas Lalinsky +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Lukas Lalinsky # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -22,6 +22,8 @@ __all__ = ["OptimFROG", "Open", "delete"] import struct +from ._compat import endswith +from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete @@ -29,7 +31,7 @@ class OptimFROGHeaderError(error): pass -class OptimFROGInfo(object): +class OptimFROGInfo(StreamInfo): """OptimFROG stream information. Attributes: @@ -41,7 +43,7 @@ class OptimFROGInfo(object): def __init__(self, fileobj): header = fileobj.read(76) - if (len(header) != 76 or not header.startswith("OFR ") or + if (len(header) != 76 or not header.startswith(b"OFR ") or struct.unpack("I", b"\x00" + bytes(data))[0] + tempos.append((deltasum, TEMPO, tempo)) + off += num + elif event_type in (0xF0, 0xF7): + val, off = _var_int(chunk, off) + off += val + else: + if event_type < 0x80: + # if < 0x80 take the type from the previous midi event + off += 1 + event_type = status + elif event_type < 0xF0: + off += 2 + status = event_type + else: + raise SMFError("invalid event") + + if event_type >> 4 in (0xD, 0xC): + off -= 1 + + events.append((deltasum, MIDI, delta)) + + return events, tempos + + +def _read_midi_length(fileobj): + """Returns the duration in seconds. Can raise all kind of errors...""" + + TEMPO, MIDI = range(2) + + def read_chunk(fileobj): + info = fileobj.read(8) + if len(info) != 8: + raise SMFError("truncated") + chunklen = struct.unpack(">I", info[4:])[0] + data = fileobj.read(chunklen) + if len(data) != chunklen: + raise SMFError("truncated") + return info[:4], data + + identifier, chunk = read_chunk(fileobj) + if identifier != b"MThd": + raise SMFError("Not a MIDI file") + + if len(chunk) != 6: + raise SMFError("truncated") + + format_, ntracks, tickdiv = struct.unpack(">HHH", chunk) + if format_ > 1: + raise SMFError("Not supported format %d" % format_) + + if tickdiv >> 15: + # fps = (-(tickdiv >> 8)) & 0xFF + # subres = tickdiv & 0xFF + # never saw one of those + raise SMFError("Not supported timing interval") + + # get a list of events and tempo changes for each track + tracks = [] + first_tempos = None + for tracknum in xrange(ntracks): + identifier, chunk = read_chunk(fileobj) + if identifier != b"MTrk": + continue + events, tempos = _read_track(chunk) + + # In case of format == 1, copy the first tempo list to all tracks + first_tempos = first_tempos or tempos + if format_ == 1: + tempos = list(first_tempos) + events += tempos + events.sort() + tracks.append(events) + + # calculate the duration of each track + durations = [] + for events in tracks: + tempo = 500000 + parts = [] + deltasum = 0 + for (dummy, type_, data) in events: + if type_ == TEMPO: + parts.append((deltasum, tempo)) + tempo = data + deltasum = 0 + else: + deltasum += data + parts.append((deltasum, tempo)) + + duration = 0 + for (deltasum, tempo) in parts: + quarter, tpq = deltasum / float(tickdiv), tempo + duration += (quarter * tpq) + duration /= 10 ** 6 + + durations.append(duration) + + # return the longest one + return max(durations) + + +class SMFInfo(StreamInfo): + + def __init__(self, fileobj): + """Raises SMFError""" + + self.length = _read_midi_length(fileobj) + """Length in seconds""" + + def pprint(self): + return u"SMF, %.2f seconds" % self.length + + +class SMF(FileType): + """Standard MIDI File (SMF)""" + + _mimes = ["audio/midi", "audio/x-midi"] + + def load(self, filename): + self.filename = filename + try: + with open(filename, "rb") as h: + self.info = SMFInfo(h) + except IOError as e: + raise SMFError(e) + + def add_tags(self): + raise SMFError("doesn't support tags") + + @staticmethod + def score(filename, fileobj, header): + filename = filename.lower() + return header.startswith(b"MThd") and ( + endswith(filename, ".mid") or endswith(filename, ".midi")) + + +Open = SMF +error = SMFError + +__all__ = ["SMF"] diff --git a/libs/mutagen/trueaudio.py b/libs/mutagen/trueaudio.py index 264d13a8..1c8d56c4 100644 --- a/libs/mutagen/trueaudio.py +++ b/libs/mutagen/trueaudio.py @@ -1,5 +1,6 @@ -# True Audio support for Mutagen -# Copyright 2006 Joe Wreschnig +# -*- coding: utf-8 -*- + +# Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as @@ -16,11 +17,13 @@ True Audio files use ID3 tags. __all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"] +from ._compat import endswith +from mutagen import StreamInfo from mutagen.id3 import ID3FileType, delete -from mutagen._util import cdata +from mutagen._util import cdata, MutagenError -class error(RuntimeError): +class error(RuntimeError, MutagenError): pass @@ -28,7 +31,7 @@ class TrueAudioHeaderError(error, IOError): pass -class TrueAudioInfo(object): +class TrueAudioInfo(StreamInfo): """True Audio stream information. Attributes: @@ -40,14 +43,14 @@ class TrueAudioInfo(object): def __init__(self, fileobj, offset): fileobj.seek(offset or 0) header = fileobj.read(18) - if len(header) != 18 or not header.startswith("TTA"): + if len(header) != 18 or not header.startswith(b"TTA"): raise TrueAudioHeaderError("TTA header not found") self.sample_rate = cdata.int_le(header[10:14]) samples = cdata.uint_le(header[14:18]) self.length = float(samples) / self.sample_rate def pprint(self): - return "True Audio, %.2f seconds, %d Hz." % ( + return u"True Audio, %.2f seconds, %d Hz." % ( self.length, self.sample_rate) @@ -63,8 +66,8 @@ class TrueAudio(ID3FileType): @staticmethod def score(filename, fileobj, header): - return (header.startswith("ID3") + header.startswith("TTA") + - filename.lower().endswith(".tta") * 2) + return (header.startswith(b"ID3") + header.startswith(b"TTA") + + endswith(filename.lower(), b".tta") * 2) Open = TrueAudio diff --git a/libs/mutagen/wavpack.py b/libs/mutagen/wavpack.py index 1a2db818..80710f6d 100644 --- a/libs/mutagen/wavpack.py +++ b/libs/mutagen/wavpack.py @@ -1,6 +1,7 @@ -# A WavPack reader/tagger -# +# -*- coding: utf-8 -*- + # Copyright 2006 Joe Wreschnig +# 2014 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -9,11 +10,16 @@ """WavPack reading and writing. WavPack is a lossless format that uses APEv2 tags. Read -http://www.wavpack.com/ for more information. + +* http://www.wavpack.com/ +* http://www.wavpack.com/file_format.txt + +for more information. """ __all__ = ["WavPack", "Open", "delete"] +from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen._util import cdata @@ -25,7 +31,46 @@ RATES = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 192000] -class WavPackInfo(object): +class _WavPackHeader(object): + + def __init__(self, block_size, version, track_no, index_no, total_samples, + block_index, block_samples, flags, crc): + + self.block_size = block_size + self.version = version + self.track_no = track_no + self.index_no = index_no + self.total_samples = total_samples + self.block_index = block_index + self.block_samples = block_samples + self.flags = flags + self.crc = crc + + @classmethod + def from_fileobj(cls, fileobj): + """A new _WavPackHeader or raises WavPackHeaderError""" + + header = fileobj.read(32) + if len(header) != 32 or not header.startswith(b"wvpk"): + raise WavPackHeaderError("not a WavPack header: %r" % header) + + block_size = cdata.uint_le(header[4:8]) + version = cdata.ushort_le(header[8:10]) + track_no = ord(header[10:11]) + index_no = ord(header[11:12]) + samples = cdata.uint_le(header[12:16]) + if samples == 2 ** 32 - 1: + samples = -1 + block_index = cdata.uint_le(header[16:20]) + block_samples = cdata.uint_le(header[20:24]) + flags = cdata.uint_le(header[24:28]) + crc = cdata.uint_le(header[28:32]) + + return _WavPackHeader(block_size, version, track_no, index_no, + samples, block_index, block_samples, flags, crc) + + +class WavPackInfo(StreamInfo): """WavPack stream information. Attributes: @@ -37,18 +82,35 @@ class WavPackInfo(object): """ def __init__(self, fileobj): - header = fileobj.read(28) - if len(header) != 28 or not header.startswith("wvpk"): + try: + header = _WavPackHeader.from_fileobj(fileobj) + except WavPackHeaderError: raise WavPackHeaderError("not a WavPack file") - samples = cdata.uint_le(header[12:16]) - flags = cdata.uint_le(header[24:28]) - self.version = cdata.short_le(header[8:10]) - self.channels = bool(flags & 4) or 2 - self.sample_rate = RATES[(flags >> 23) & 0xF] + + self.version = header.version + self.channels = bool(header.flags & 4) or 2 + self.sample_rate = RATES[(header.flags >> 23) & 0xF] + + if header.total_samples == -1 or header.block_index != 0: + # TODO: we could make this faster by using the tag size + # and search backwards for the last block, then do + # last.block_index + last.block_samples - initial.block_index + samples = header.block_samples + while 1: + fileobj.seek(header.block_size - 32 + 8, 1) + try: + header = _WavPackHeader.from_fileobj(fileobj) + except WavPackHeaderError: + break + samples += header.block_samples + else: + samples = header.total_samples + self.length = float(samples) / self.sample_rate def pprint(self): - return "WavPack, %.2f seconds, %d Hz" % (self.length, self.sample_rate) + return u"WavPack, %.2f seconds, %d Hz" % (self.length, + self.sample_rate) class WavPack(APEv2File): @@ -57,7 +119,7 @@ class WavPack(APEv2File): @staticmethod def score(filename, fileobj, header): - return header.startswith("wvpk") * 2 + return header.startswith(b"wvpk") * 2 Open = WavPack diff --git a/libs/unidecode/__init__.py b/libs/unidecode/__init__.py index 82eb5a3f..3b68de4c 100644 --- a/libs/unidecode/__init__.py +++ b/libs/unidecode/__init__.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +# vi:tabstop=4:expandtab:sw=4 """Transliterate Unicode text into plain 7-bit ASCII. Example usage: @@ -18,19 +19,53 @@ from sys import version_info Cache = {} -def unidecode(string): + +def _warn_if_not_unicode(string): + if version_info[0] < 3 and not isinstance(string, unicode): + warnings.warn( "Argument %r is not an unicode object. " + "Passing an encoded string will likely have " + "unexpected results." % (type(string),), + RuntimeWarning, 2) + + +def unidecode_expect_ascii(string): + """Transliterate an Unicode object into an ASCII string + + >>> unidecode(u"\u5317\u4EB0") + "Bei Jing " + + This function first tries to convert the string using ASCII codec. + If it fails (because of non-ASCII characters), it falls back to + transliteration using the character tables. + + This is approx. five times faster if the string only contains ASCII + characters, but slightly slower than using unidecode directly if non-ASCII + chars are present. + """ + + _warn_if_not_unicode(string) + try: + bytestring = string.encode('ASCII') + except UnicodeEncodeError: + return _unidecode(string) + if version_info[0] >= 3: + return string + else: + return bytestring + +def unidecode_expect_nonascii(string): """Transliterate an Unicode object into an ASCII string >>> unidecode(u"\u5317\u4EB0") "Bei Jing " """ - if version_info[0] < 3 and not isinstance(string, unicode): - warnings.warn( "Argument %r is not an unicode object. " - "Passing an encoded string will likely have " - "unexpected results." % (type(string),), - RuntimeWarning, 2) + _warn_if_not_unicode(string) + return _unidecode(string) +unidecode = unidecode_expect_ascii + +def _unidecode(string): retval = [] for char in string: @@ -43,6 +78,11 @@ def unidecode(string): if codepoint > 0xeffff: continue # Characters in Private Use Area and above are ignored + if 0xd800 <= codepoint <= 0xdfff: + warnings.warn( "Surrogate character %r will be ignored. " + "You might be using a narrow Python build." % (char,), + RuntimeWarning, 2) + section = codepoint >> 8 # Chop off the last two hex digits position = codepoint % 256 # Last two hex digits @@ -50,7 +90,7 @@ def unidecode(string): table = Cache[section] except KeyError: try: - mod = __import__('unidecode.x%03x'%(section), [], [], ['data']) + mod = __import__('unidecode.x%03x'%(section), globals(), locals(), ['data']) except ImportError: Cache[section] = None continue # No match: ignore this character and carry on. diff --git a/libs/unidecode/util.py b/libs/unidecode/util.py new file mode 100644 index 00000000..477280d1 --- /dev/null +++ b/libs/unidecode/util.py @@ -0,0 +1,58 @@ +# vim:ts=4 sw=4 expandtab softtabstop=4 +from __future__ import print_function +import optparse +import locale +import os +import sys +import warnings + +from unidecode import unidecode + +PY3 = sys.version_info[0] >= 3 + +def fatal(msg): + sys.stderr.write(msg + "\n") + sys.exit(1) + +def main(): + default_encoding = locale.getpreferredencoding() + + parser = optparse.OptionParser('%prog [options] [FILE]', + description="Transliterate Unicode text into ASCII. FILE is path to file to transliterate. " + "Standard input is used if FILE is omitted and -c is not specified.") + parser.add_option('-e', '--encoding', metavar='ENCODING', default=default_encoding, + help='Specify an encoding (default is %s)' % (default_encoding,)) + parser.add_option('-c', metavar='TEXT', dest='text', + help='Transliterate TEXT instead of FILE') + + options, args = parser.parse_args() + + encoding = options.encoding + + if args: + if options.text: + fatal("Can't use both FILE and -c option") + else: + with open(args[0], 'rb') as f: + stream = f.read() + elif options.text: + if PY3: + stream = os.fsencode(options.text) + else: + stream = options.text + # add a newline to the string if it comes from the + # command line so that the result is printed nicely + # on the console. + stream += '\n'.encode('ascii') + else: + if PY3: + stream = sys.stdin.buffer.read() + else: + stream = sys.stdin.read() + + try: + stream = stream.decode(encoding) + except UnicodeDecodeError as e: + fatal('Unable to decode input: %s, start: %d, end: %d' % (e.reason, e.start, e.end)) + + sys.stdout.write(unidecode(stream)) diff --git a/libs/unidecode/x000.py b/libs/unidecode/x000.py index 6821df47..c3f8f515 100644 --- a/libs/unidecode/x000.py +++ b/libs/unidecode/x000.py @@ -1,132 +1,15 @@ data = ( -'\x00', # 0x00 -'\x01', # 0x01 -'\x02', # 0x02 -'\x03', # 0x03 -'\x04', # 0x04 -'\x05', # 0x05 -'\x06', # 0x06 -'\x07', # 0x07 -'\x08', # 0x08 -'\x09', # 0x09 -'\x0a', # 0x0a -'\x0b', # 0x0b -'\x0c', # 0x0c -'\x0d', # 0x0d -'\x0e', # 0x0e -'\x0f', # 0x0f -'\x10', # 0x10 -'\x11', # 0x11 -'\x12', # 0x12 -'\x13', # 0x13 -'\x14', # 0x14 -'\x15', # 0x15 -'\x16', # 0x16 -'\x17', # 0x17 -'\x18', # 0x18 -'\x19', # 0x19 -'\x1a', # 0x1a -'\x1b', # 0x1b -'\x1c', # 0x1c -'\x1d', # 0x1d -'\x1e', # 0x1e -'\x1f', # 0x1f -' ', # 0x20 -'!', # 0x21 -'"', # 0x22 -'#', # 0x23 -'$', # 0x24 -'%', # 0x25 -'&', # 0x26 -'\'', # 0x27 -'(', # 0x28 -')', # 0x29 -'*', # 0x2a -'+', # 0x2b -',', # 0x2c -'-', # 0x2d -'.', # 0x2e -'/', # 0x2f -'0', # 0x30 -'1', # 0x31 -'2', # 0x32 -'3', # 0x33 -'4', # 0x34 -'5', # 0x35 -'6', # 0x36 -'7', # 0x37 -'8', # 0x38 -'9', # 0x39 -':', # 0x3a -';', # 0x3b -'<', # 0x3c -'=', # 0x3d -'>', # 0x3e -'?', # 0x3f -'@', # 0x40 -'A', # 0x41 -'B', # 0x42 -'C', # 0x43 -'D', # 0x44 -'E', # 0x45 -'F', # 0x46 -'G', # 0x47 -'H', # 0x48 -'I', # 0x49 -'J', # 0x4a -'K', # 0x4b -'L', # 0x4c -'M', # 0x4d -'N', # 0x4e -'O', # 0x4f -'P', # 0x50 -'Q', # 0x51 -'R', # 0x52 -'S', # 0x53 -'T', # 0x54 -'U', # 0x55 -'V', # 0x56 -'W', # 0x57 -'X', # 0x58 -'Y', # 0x59 -'Z', # 0x5a -']', # 0x5b -'\\', # 0x5c -']', # 0x5d -'^', # 0x5e -'_', # 0x5f -'`', # 0x60 -'a', # 0x61 -'b', # 0x62 -'c', # 0x63 -'d', # 0x64 -'e', # 0x65 -'f', # 0x66 -'g', # 0x67 -'h', # 0x68 -'i', # 0x69 -'j', # 0x6a -'k', # 0x6b -'l', # 0x6c -'m', # 0x6d -'n', # 0x6e -'o', # 0x6f -'p', # 0x70 -'q', # 0x71 -'r', # 0x72 -'s', # 0x73 -'t', # 0x74 -'u', # 0x75 -'v', # 0x76 -'w', # 0x77 -'x', # 0x78 -'y', # 0x79 -'z', # 0x7a -'{', # 0x7b -'|', # 0x7c -'}', # 0x7d -'~', # 0x7e -'', # 0x7f +# Code points u+007f and below are equivalent to ASCII and are handled by a +# special case in the code. Hence they are not present in this table. +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', +'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', + '', # 0x80 '', # 0x81 '', # 0x82 @@ -162,7 +45,10 @@ data = ( ' ', # 0xa0 '!', # 0xa1 'C/', # 0xa2 + +# Not "GBP" - Pound Sign is used for more than just British Pounds. 'PS', # 0xa3 + '$?', # 0xa4 'Y=', # 0xa5 '|', # 0xa6 @@ -177,8 +63,11 @@ data = ( '-', # 0xaf 'deg', # 0xb0 '+-', # 0xb1 + +# These might be combined with other superscript digits (u+2070 - u+2079) '2', # 0xb2 '3', # 0xb3 + '\'', # 0xb4 'u', # 0xb5 'P', # 0xb6 @@ -195,7 +84,10 @@ data = ( 'A', # 0xc1 'A', # 0xc2 'A', # 0xc3 + +# Not "AE" - used in languages other than German 'A', # 0xc4 + 'A', # 0xc5 'AE', # 0xc6 'C', # 0xc7 @@ -213,13 +105,19 @@ data = ( 'O', # 0xd3 'O', # 0xd4 'O', # 0xd5 + +# Not "OE" - used in languages other than German 'O', # 0xd6 + 'x', # 0xd7 'O', # 0xd8 'U', # 0xd9 'U', # 0xda 'U', # 0xdb + +# Not "UE" - used in languages other than German 'U', # 0xdc + 'Y', # 0xdd 'Th', # 0xde 'ss', # 0xdf @@ -227,7 +125,10 @@ data = ( 'a', # 0xe1 'a', # 0xe2 'a', # 0xe3 + +# Not "ae" - used in languages other than German 'a', # 0xe4 + 'a', # 0xe5 'ae', # 0xe6 'c', # 0xe7 @@ -245,13 +146,19 @@ data = ( 'o', # 0xf3 'o', # 0xf4 'o', # 0xf5 + +# Not "oe" - used in languages other than German 'o', # 0xf6 + '/', # 0xf7 'o', # 0xf8 'u', # 0xf9 'u', # 0xfa 'u', # 0xfb + +# Not "ue" - used in languages other than German 'u', # 0xfc + 'y', # 0xfd 'th', # 0xfe 'y', # 0xff diff --git a/libs/unidecode/x020.py b/libs/unidecode/x020.py index f67264c8..b6494730 100644 --- a/libs/unidecode/x020.py +++ b/libs/unidecode/x020.py @@ -171,7 +171,7 @@ data = ( 'W', # 0xa9 'NS', # 0xaa 'D', # 0xab -'EU', # 0xac +'EUR', # 0xac 'K', # 0xad 'T', # 0xae 'Dr', # 0xaf diff --git a/libs/unidecode/x021.py b/libs/unidecode/x021.py index fcb651ba..067d9bdc 100644 --- a/libs/unidecode/x021.py +++ b/libs/unidecode/x021.py @@ -1,7 +1,7 @@ data = ( '', # 0x00 '', # 0x01 -'', # 0x02 +'C', # 0x02 '', # 0x03 '', # 0x04 '', # 0x05 @@ -12,7 +12,7 @@ data = ( '', # 0x0a '', # 0x0b '', # 0x0c -'', # 0x0d +'H', # 0x0d '', # 0x0e '', # 0x0f '', # 0x10 @@ -20,22 +20,22 @@ data = ( '', # 0x12 '', # 0x13 '', # 0x14 -'', # 0x15 +'N', # 0x15 '', # 0x16 '', # 0x17 '', # 0x18 -'', # 0x19 -'', # 0x1a +'P', # 0x19 +'Q', # 0x1a '', # 0x1b '', # 0x1c -'', # 0x1d +'R', # 0x1d '', # 0x1e '', # 0x1f '(sm)', # 0x20 'TEL', # 0x21 '(tm)', # 0x22 '', # 0x23 -'', # 0x24 +'Z', # 0x24 '', # 0x25 '', # 0x26 '', # 0x27 @@ -45,12 +45,12 @@ data = ( 'A', # 0x2b '', # 0x2c '', # 0x2d -'', # 0x2e -'', # 0x2f -'', # 0x30 -'', # 0x31 +'e', # 0x2e +'e', # 0x2f +'E', # 0x30 +'F', # 0x31 'F', # 0x32 -'', # 0x33 +'M', # 0x33 '', # 0x34 '', # 0x35 '', # 0x36 @@ -59,20 +59,20 @@ data = ( '', # 0x39 '', # 0x3a 'FAX', # 0x3b -'[?]', # 0x3c -'[?]', # 0x3d -'[?]', # 0x3e -'[?]', # 0x3f +'', # 0x3c +'', # 0x3d +'', # 0x3e +'', # 0x3f '[?]', # 0x40 '[?]', # 0x41 '[?]', # 0x42 '[?]', # 0x43 '[?]', # 0x44 -'[?]', # 0x45 -'[?]', # 0x46 -'[?]', # 0x47 -'[?]', # 0x48 -'[?]', # 0x49 +'D', # 0x45 +'d', # 0x46 +'e', # 0x47 +'i', # 0x48 +'j', # 0x49 '[?]', # 0x4a '[?]', # 0x4b '[?]', # 0x4c diff --git a/libs/unidecode/x04e.py b/libs/unidecode/x04e.py index e346f67b..b472b855 100644 --- a/libs/unidecode/x04e.py +++ b/libs/unidecode/x04e.py @@ -1,5 +1,5 @@ data = ( -'[?] ', # 0x00 +'Yi ', # 0x00 'Ding ', # 0x01 'Kao ', # 0x02 'Qi ', # 0x03 From 0abc7e17cdc66ae587a8bcb0f77f899eaf3698e1 Mon Sep 17 00:00:00 2001 From: Labrys Date: Mon, 6 Jun 2016 12:16:22 -0400 Subject: [PATCH 53/82] Update configobj to v5.0.6: Dependencies * six 1.10.0 --- libs/configobj/_version.py | 2 +- libs/configobj/configobj.py | 54 ++++++++++++++++++++----------------- libs/configobj/validate.py | 2 +- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/libs/configobj/_version.py b/libs/configobj/_version.py index 742c20e4..6d013711 100644 --- a/libs/configobj/_version.py +++ b/libs/configobj/_version.py @@ -1 +1 @@ -__version__ = '5.0.4' \ No newline at end of file +__version__ = '5.0.6' \ No newline at end of file diff --git a/libs/configobj/configobj.py b/libs/configobj/configobj.py index 76ff78f1..ba886e86 100644 --- a/libs/configobj/configobj.py +++ b/libs/configobj/configobj.py @@ -19,9 +19,10 @@ import sys from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE -# imported lazily to avoid startup performance hit if it isn't used import six +from _version import __version__ +# imported lazily to avoid startup performance hit if it isn't used compiler = None # A dictionary mapping BOM to @@ -1580,7 +1581,7 @@ class ConfigObj(Section): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): - self._handle_error("Cannot compute the section depth at line %s.", + self._handle_error("Cannot compute the section depth", NestingError, infile, cur_index) continue @@ -1590,7 +1591,7 @@ class ConfigObj(Section): parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: - self._handle_error("Cannot compute nesting level at line %s.", + self._handle_error("Cannot compute nesting level", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: @@ -1600,12 +1601,13 @@ class ConfigObj(Section): # the new section is a child the current section parent = this_section else: - self._handle_error("Section too nested at line %s.", + self._handle_error("Section too nested", NestingError, infile, cur_index) + continue sect_name = self._unquote(sect_name) if sect_name in parent: - self._handle_error('Duplicate section name at line %s.', + self._handle_error('Duplicate section name', DuplicateError, infile, cur_index) continue @@ -1625,7 +1627,7 @@ class ConfigObj(Section): mat = self._keyword.match(line) if mat is None: self._handle_error( - 'Invalid line ({0!r}) (matched as neither section nor keyword) at line "%s".'.format(line), + 'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line), ParseError, infile, cur_index) else: # is a keyword value @@ -1640,7 +1642,7 @@ class ConfigObj(Section): value, infile, cur_index, maxline) except SyntaxError: self._handle_error( - 'Parse error in multiline value at line %s.', + 'Parse error in multiline value', ParseError, infile, cur_index) continue else: @@ -1650,9 +1652,9 @@ class ConfigObj(Section): value = unrepr(value) except Exception as e: if type(e) == UnknownType: - msg = 'Unknown name or type in value at line %s.' + msg = 'Unknown name or type in value' else: - msg = 'Parse error from unrepr-ing multiline value at line %s.' + msg = 'Parse error from unrepr-ing multiline value' self._handle_error(msg, UnreprError, infile, cur_index) continue @@ -1663,9 +1665,9 @@ class ConfigObj(Section): value = unrepr(value) except Exception as e: if isinstance(e, UnknownType): - msg = 'Unknown name or type in value at line %s.' + msg = 'Unknown name or type in value' else: - msg = 'Parse error from unrepr-ing value at line %s.' + msg = 'Parse error from unrepr-ing value' self._handle_error(msg, UnreprError, infile, cur_index) continue @@ -1675,14 +1677,14 @@ class ConfigObj(Section): (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( - 'Parse error in value at line %s.', + 'Parse error in value', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( - 'Duplicate keyword name at line %s.', + 'Duplicate keyword name', DuplicateError, infile, cur_index) continue # add the key. @@ -1733,7 +1735,7 @@ class ConfigObj(Section): """ line = infile[cur_index] cur_index += 1 - message = text % cur_index + message = '{0} at line {1}.'.format(text, cur_index) error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here @@ -2106,21 +2108,25 @@ class ConfigObj(Section): # Windows specific hack to avoid writing '\r\r\n' newline = '\n' output = self._a_to_u(newline).join(out) - if self.encoding: - output = output.encode(self.encoding) - if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): - # Add the UTF8 BOM - output = BOM_UTF8 + output - if not output.endswith(newline): output += newline + + if isinstance(output, six.binary_type): + output_bytes = output + else: + output_bytes = output.encode(self.encoding or + self.default_encoding or + 'ascii') + + if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): + # Add the UTF8 BOM + output_bytes = BOM_UTF8 + output_bytes + if outfile is not None: - outfile.write(output) + outfile.write(output_bytes) else: with open(self.filename, 'wb') as h: - h.write(output.encode(self.encoding or - self.default_encoding or - 'ascii')) + h.write(output_bytes) def validate(self, validator, preserve_errors=False, copy=False, section=None): diff --git a/libs/configobj/validate.py b/libs/configobj/validate.py index 11ab2ef3..b7a964c4 100644 --- a/libs/configobj/validate.py +++ b/libs/configobj/validate.py @@ -538,7 +538,7 @@ class Validator(object): ConfigObj, an alternative to ConfigParser which supports lists and can validate a config file using a config schema. For more details on using Validator with ConfigObj see: - http://www.voidspace.org.uk/python/configobj.html + https://configobj.readthedocs.org/en/latest/configobj.html """ # this regex does the initial parsing of the checks From 6bb4ae56bdc5b10ea87e307f15eeb6e421eb6650 Mon Sep 17 00:00:00 2001 From: Labrys Date: Mon, 6 Jun 2016 12:38:26 -0400 Subject: [PATCH 54/82] Update jaraco.windows to v3.6: Dependencies: * backports.functools-lru-cache 1.2.1 * jaraco.classes 1.3 * jaraco.collections 1.3.2 * jaraco.functools 1.11 * jaraco.structures 1.0 * jaraco.text 1.7 * jaraco.ui 1.4 * jaraco.windows 3.6 * more-itertools 2.2 * path.py 8.2.1 * six 1.10.0 --- ....functools_lru_cache-1.2.1-py3.5-nspkg.pth | 1 + libs/backports/functools_lru_cache.py | 184 ++ libs/jaraco.collections-1.3.2-py3.5-nspkg.pth | 1 + libs/jaraco.functools-1.11-py2.7-nspkg.pth | 1 + libs/jaraco.text-1.7-py3.5-nspkg.pth | 1 + libs/jaraco.windows-3.6-py3.5-nspkg.pth | 1 + libs/jaraco/__init__.py | 11 +- libs/jaraco/classes/__init__.py | 0 libs/jaraco/classes/ancestry.py | 67 + libs/jaraco/classes/meta.py | 40 + libs/jaraco/classes/properties.py | 65 + libs/jaraco/collections.py | 773 ++++++++ libs/jaraco/functools.py | 268 +++ libs/jaraco/structures/__init__.py | 0 libs/jaraco/structures/binary.py | 130 ++ libs/jaraco/text.py | 371 ++++ libs/jaraco/ui/__init__.py | 0 libs/jaraco/ui/cmdline.py | 62 + libs/jaraco/ui/editor.py | 108 ++ libs/jaraco/ui/input.py | 26 + libs/jaraco/ui/menu.py | 34 + libs/jaraco/ui/progress.py | 150 ++ libs/more_itertools/__init__.py | 2 + libs/more_itertools/more.py | 237 +++ libs/more_itertools/recipes.py | 331 ++++ libs/more_itertools/tests/__init__.py | 0 libs/more_itertools/tests/test_more.py | 143 ++ libs/more_itertools/tests/test_recipes.py | 433 +++++ libs/path.py | 1722 +++++++++++++++++ libs/test_path.py | 1119 +++++++++++ 30 files changed, 6271 insertions(+), 10 deletions(-) create mode 100644 libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth create mode 100644 libs/backports/functools_lru_cache.py create mode 100644 libs/jaraco.collections-1.3.2-py3.5-nspkg.pth create mode 100644 libs/jaraco.functools-1.11-py2.7-nspkg.pth create mode 100644 libs/jaraco.text-1.7-py3.5-nspkg.pth create mode 100644 libs/jaraco.windows-3.6-py3.5-nspkg.pth create mode 100644 libs/jaraco/classes/__init__.py create mode 100644 libs/jaraco/classes/ancestry.py create mode 100644 libs/jaraco/classes/meta.py create mode 100644 libs/jaraco/classes/properties.py create mode 100644 libs/jaraco/collections.py create mode 100644 libs/jaraco/functools.py create mode 100644 libs/jaraco/structures/__init__.py create mode 100644 libs/jaraco/structures/binary.py create mode 100644 libs/jaraco/text.py create mode 100644 libs/jaraco/ui/__init__.py create mode 100644 libs/jaraco/ui/cmdline.py create mode 100644 libs/jaraco/ui/editor.py create mode 100644 libs/jaraco/ui/input.py create mode 100644 libs/jaraco/ui/menu.py create mode 100644 libs/jaraco/ui/progress.py create mode 100644 libs/more_itertools/__init__.py create mode 100644 libs/more_itertools/more.py create mode 100644 libs/more_itertools/recipes.py create mode 100644 libs/more_itertools/tests/__init__.py create mode 100644 libs/more_itertools/tests/test_more.py create mode 100644 libs/more_itertools/tests/test_recipes.py create mode 100644 libs/path.py create mode 100644 libs/test_path.py diff --git a/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth b/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth new file mode 100644 index 00000000..0b1f79dd --- /dev/null +++ b/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('backports',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('backports', types.ModuleType('backports'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/libs/backports/functools_lru_cache.py b/libs/backports/functools_lru_cache.py new file mode 100644 index 00000000..707c6c76 --- /dev/null +++ b/libs/backports/functools_lru_cache.py @@ -0,0 +1,184 @@ +from __future__ import absolute_import + +import functools +from collections import namedtuple +from threading import RLock + +_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + + +@functools.wraps(functools.update_wrapper) +def update_wrapper(wrapper, + wrapped, + assigned = functools.WRAPPER_ASSIGNMENTS, + updated = functools.WRAPPER_UPDATES): + """ + Patch two bugs in functools.update_wrapper. + """ + # workaround for http://bugs.python.org/issue3445 + assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr)) + wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated) + # workaround for https://bugs.python.org/issue17482 + wrapper.__wrapped__ = wrapped + return wrapper + + +class _HashedSeq(list): + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + +def _make_key(args, kwds, typed, + kwd_mark=(object(),), + fasttypes=set([int, str, frozenset, type(None)]), + sorted=sorted, tuple=tuple, type=type, len=len): + 'Make a cache key from optionally typed positional and keyword arguments' + key = args + if kwds: + sorted_items = sorted(kwds.items()) + key += kwd_mark + for item in sorted_items: + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for k, v in sorted_items) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + + +def lru_cache(maxsize=100, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) with + f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + def decorating_function(user_function): + + cache = dict() + stats = [0, 0] # make statistics updateable non-locally + HITS, MISSES = 0, 1 # names for the stats fields + make_key = _make_key + cache_get = cache.get # bound method to lookup key or return None + _len = len # localize the global len() function + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + nonlocal_root = [root] # make updateable non-locally + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + if maxsize == 0: + + def wrapper(*args, **kwds): + # no caching, just do a statistics update after a successful call + result = user_function(*args, **kwds) + stats[MISSES] += 1 + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # simple caching without ordering or size limit + key = make_key(args, kwds, typed) + result = cache_get(key, root) # root used here as a unique not-found sentinel + if result is not root: + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + stats[MISSES] += 1 + return result + + else: + + def wrapper(*args, **kwds): + # size limited caching that tracks accesses by recency + key = make_key(args, kwds, typed) if kwds or typed else args + with lock: + link = cache_get(key) + if link is not None: + # record recent use of the key by moving it to the front of the list + root, = nonlocal_root + link_prev, link_next, key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + with lock: + root, = nonlocal_root + if key in cache: + # getting here means that this same key was added to the + # cache while the lock was released. since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif _len(cache) >= maxsize: + # use the old root to store the new key and result + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # empty the oldest link and make it the new root + root = nonlocal_root[0] = oldroot[NEXT] + oldkey = root[KEY] + root[KEY] = root[RESULT] = None + # now update the cache dictionary for the new links + del cache[oldkey] + cache[key] = oldroot + else: + # put result in a new link at the front of the list + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + stats[MISSES] += 1 + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) + + def cache_clear(): + """Clear the cache and cache statistics""" + with lock: + cache.clear() + root = nonlocal_root[0] + root[:] = [root, root, None, None] + stats[:] = [0, 0] + + wrapper.__wrapped__ = user_function + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return update_wrapper(wrapper, user_function) + + return decorating_function diff --git a/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth b/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth new file mode 100644 index 00000000..c8127a57 --- /dev/null +++ b/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/libs/jaraco.functools-1.11-py2.7-nspkg.pth b/libs/jaraco.functools-1.11-py2.7-nspkg.pth new file mode 100644 index 00000000..c8127a57 --- /dev/null +++ b/libs/jaraco.functools-1.11-py2.7-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/libs/jaraco.text-1.7-py3.5-nspkg.pth b/libs/jaraco.text-1.7-py3.5-nspkg.pth new file mode 100644 index 00000000..c8127a57 --- /dev/null +++ b/libs/jaraco.text-1.7-py3.5-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/libs/jaraco.windows-3.6-py3.5-nspkg.pth b/libs/jaraco.windows-3.6-py3.5-nspkg.pth new file mode 100644 index 00000000..c8127a57 --- /dev/null +++ b/libs/jaraco.windows-3.6-py3.5-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/libs/jaraco/__init__.py b/libs/jaraco/__init__.py index 1b2910c2..5284146e 100644 --- a/libs/jaraco/__init__.py +++ b/libs/jaraco/__init__.py @@ -1,10 +1 @@ -# this is a namespace package -__import__('pkg_resources').declare_namespace(__name__) - -try: - # py2exe support (http://www.py2exe.org/index.cgi/ExeWithEggs) - import modulefinder - for p in __path__: - modulefinder.AddPackagePath(__name__, p) -except ImportError: - pass +__import__("pkg_resources").declare_namespace(__name__) diff --git a/libs/jaraco/classes/__init__.py b/libs/jaraco/classes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libs/jaraco/classes/ancestry.py b/libs/jaraco/classes/ancestry.py new file mode 100644 index 00000000..905c18fd --- /dev/null +++ b/libs/jaraco/classes/ancestry.py @@ -0,0 +1,67 @@ +""" +Routines for obtaining the class names +of an object and its parent classes. +""" + +from __future__ import unicode_literals + +def all_bases(c): + """ + return a tuple of all base classes the class c has as a parent. + >>> object in all_bases(list) + True + """ + return c.mro()[1:] + +def all_classes(c): + """ + return a tuple of all classes to which c belongs + >>> list in all_classes(list) + True + """ + return c.mro() + +# borrowed from http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/ +def iter_subclasses(cls, _seen=None): + """ + Generator over all subclasses of a given class, in depth-first order. + + >>> bool in list(iter_subclasses(int)) + True + >>> class A(object): pass + >>> class B(A): pass + >>> class C(A): pass + >>> class D(B,C): pass + >>> class E(D): pass + >>> + >>> for cls in iter_subclasses(A): + ... print(cls.__name__) + B + D + E + C + >>> # get ALL (new-style) classes currently defined + >>> res = [cls.__name__ for cls in iter_subclasses(object)] + >>> 'type' in res + True + >>> 'tuple' in res + True + >>> len(res) > 100 + True + """ + + if not isinstance(cls, type): + raise TypeError('iter_subclasses must be called with ' + 'new-style classes, not %.100r' % cls) + if _seen is None: _seen = set() + try: + subs = cls.__subclasses__() + except TypeError: # fails only when cls is type + subs = cls.__subclasses__(cls) + for sub in subs: + if sub in _seen: + continue + _seen.add(sub) + yield sub + for sub in iter_subclasses(sub, _seen): + yield sub diff --git a/libs/jaraco/classes/meta.py b/libs/jaraco/classes/meta.py new file mode 100644 index 00000000..cdb744d7 --- /dev/null +++ b/libs/jaraco/classes/meta.py @@ -0,0 +1,40 @@ +""" +meta.py + +Some useful metaclasses. +""" + +from __future__ import unicode_literals + +class LeafClassesMeta(type): + """ + A metaclass for classes that keeps track of all of them that + aren't base classes. + """ + + _leaf_classes = set() + + def __init__(cls, name, bases, attrs): + if not hasattr(cls, '_leaf_classes'): + cls._leaf_classes = set() + leaf_classes = getattr(cls, '_leaf_classes') + leaf_classes.add(cls) + # remove any base classes + leaf_classes -= set(bases) + + +class TagRegistered(type): + """ + As classes of this metaclass are created, they keep a registry in the + base class of all classes by a class attribute, indicated by attr_name. + """ + attr_name = 'tag' + + def __init__(cls, name, bases, namespace): + super(TagRegistered, cls).__init__(name, bases, namespace) + if not hasattr(cls, '_registry'): + cls._registry = {} + meta = cls.__class__ + attr = getattr(cls, meta.attr_name, None) + if attr: + cls._registry[attr] = cls diff --git a/libs/jaraco/classes/properties.py b/libs/jaraco/classes/properties.py new file mode 100644 index 00000000..d64262a3 --- /dev/null +++ b/libs/jaraco/classes/properties.py @@ -0,0 +1,65 @@ +from __future__ import unicode_literals + +import six + + +class NonDataProperty(object): + """Much like the property builtin, but only implements __get__, + making it a non-data property, and can be subsequently reset. + + See http://users.rcn.com/python/download/Descriptor.htm for more + information. + + >>> class X(object): + ... @NonDataProperty + ... def foo(self): + ... return 3 + >>> x = X() + >>> x.foo + 3 + >>> x.foo = 4 + >>> x.foo + 4 + """ + + def __init__(self, fget): + assert fget is not None, "fget cannot be none" + assert six.callable(fget), "fget must be callable" + self.fget = fget + + def __get__(self, obj, objtype=None): + if obj is None: + return self + return self.fget(obj) + + +# from http://stackoverflow.com/a/5191224 +class ClassPropertyDescriptor(object): + + def __init__(self, fget, fset=None): + self.fget = fget + self.fset = fset + + def __get__(self, obj, klass=None): + if klass is None: + klass = type(obj) + return self.fget.__get__(obj, klass)() + + def __set__(self, obj, value): + if not self.fset: + raise AttributeError("can't set attribute") + type_ = type(obj) + return self.fset.__get__(obj, type_)(value) + + def setter(self, func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + self.fset = func + return self + + +def classproperty(func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + + return ClassPropertyDescriptor(func) diff --git a/libs/jaraco/collections.py b/libs/jaraco/collections.py new file mode 100644 index 00000000..6af6ad45 --- /dev/null +++ b/libs/jaraco/collections.py @@ -0,0 +1,773 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, unicode_literals, division + +import re +import operator +import collections +import itertools +import copy + +import six +from jaraco.classes.properties import NonDataProperty +import jaraco.text + + +class DictFilter(object): + """ + Takes a dict, and simulates a sub-dict based on the keys. + + >>> sample = {'a': 1, 'b': 2, 'c': 3} + >>> filtered = DictFilter(sample, ['a', 'c']) + >>> filtered == {'a': 1, 'c': 3} + True + + One can also filter by a regular expression pattern + + >>> sample['d'] = 4 + >>> sample['ef'] = 5 + + Here we filter for only single-character keys + + >>> filtered = DictFilter(sample, include_pattern='.$') + >>> filtered == {'a': 1, 'b': 2, 'c': 3, 'd': 4} + True + + Also note that DictFilter keeps a reference to the original dict, so + if you modify the original dict, that could modify the filtered dict. + + >>> del sample['d'] + >>> del sample['a'] + >>> filtered == {'b': 2, 'c': 3} + True + + """ + def __init__(self, dict, include_keys=[], include_pattern=None): + self.dict = dict + self.specified_keys = set(include_keys) + if include_pattern is not None: + self.include_pattern = re.compile(include_pattern) + else: + # for performance, replace the pattern_keys property + self.pattern_keys = set() + + def get_pattern_keys(self): + #key_matches = lambda k, v: self.include_pattern.match(k) + keys = filter(self.include_pattern.match, self.dict.keys()) + return set(keys) + pattern_keys = NonDataProperty(get_pattern_keys) + + @property + def include_keys(self): + return self.specified_keys.union(self.pattern_keys) + + def keys(self): + return self.include_keys.intersection(self.dict.keys()) + + def values(self): + keys = self.keys() + values = map(self.dict.get, keys) + return values + + def __getitem__(self, i): + if not i in self.include_keys: + return KeyError, i + return self.dict[i] + + def items(self): + keys = self.keys() + values = map(self.dict.get, keys) + return zip(keys, values) + + def __eq__(self, other): + return dict(self) == other + + def __ne__(self, other): + return dict(self) != other + + +def dict_map(function, dictionary): + """ + dict_map is much like the built-in function map. It takes a dictionary + and applys a function to the values of that dictionary, returning a + new dictionary with the mapped values in the original keys. + + >>> d = dict_map(lambda x:x+1, dict(a=1, b=2)) + >>> d == dict(a=2,b=3) + True + """ + return dict((key, function(value)) for key, value in dictionary.items()) + + +class RangeMap(dict): + """ + A dictionary-like object that uses the keys as bounds for a range. + Inclusion of the value for that range is determined by the + key_match_comparator, which defaults to less-than-or-equal. + A value is returned for a key if it is the first key that matches in + the sorted list of keys. + + One may supply keyword parameters to be passed to the sort function used + to sort keys (i.e. cmp [python 2 only], keys, reverse) as sort_params. + + Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b' + + >>> r = RangeMap({3: 'a', 6: 'b'}) # boy, that was easy + >>> r[1], r[2], r[3], r[4], r[5], r[6] + ('a', 'a', 'a', 'b', 'b', 'b') + + Even float values should work so long as the comparison operator + supports it. + + >>> r[4.5] + 'b' + + But you'll notice that the way rangemap is defined, it must be open-ended + on one side. + + >>> r[0] + 'a' + >>> r[-1] + 'a' + + One can close the open-end of the RangeMap by using undefined_value + + >>> r = RangeMap({0: RangeMap.undefined_value, 3: 'a', 6: 'b'}) + >>> r[0] + Traceback (most recent call last): + ... + KeyError: 0 + + One can get the first or last elements in the range by using RangeMap.Item + + >>> last_item = RangeMap.Item(-1) + >>> r[last_item] + 'b' + + .last_item is a shortcut for Item(-1) + + >>> r[RangeMap.last_item] + 'b' + + Sometimes it's useful to find the bounds for a RangeMap + + >>> r.bounds() + (0, 6) + + RangeMap supports .get(key, default) + + >>> r.get(0, 'not found') + 'not found' + + >>> r.get(7, 'not found') + 'not found' + """ + def __init__(self, source, sort_params = {}, key_match_comparator = operator.le): + dict.__init__(self, source) + self.sort_params = sort_params + self.match = key_match_comparator + + def __getitem__(self, item): + sorted_keys = sorted(self.keys(), **self.sort_params) + if isinstance(item, RangeMap.Item): + result = self.__getitem__(sorted_keys[item]) + else: + key = self._find_first_match_(sorted_keys, item) + result = dict.__getitem__(self, key) + if result is RangeMap.undefined_value: + raise KeyError(key) + return result + + def get(self, key, default=None): + """ + Return the value for key if key is in the dictionary, else default. + If default is not given, it defaults to None, so that this method + never raises a KeyError. + """ + try: + return self[key] + except KeyError: + return default + + def _find_first_match_(self, keys, item): + is_match = lambda k: self.match(item, k) + matches = list(filter(is_match, keys)) + if matches: + return matches[0] + raise KeyError(item) + + def bounds(self): + sorted_keys = sorted(self.keys(), **self.sort_params) + return ( + sorted_keys[RangeMap.first_item], + sorted_keys[RangeMap.last_item], + ) + + # some special values for the RangeMap + undefined_value = type(str('RangeValueUndefined'), (object,), {})() + class Item(int): pass + first_item = Item(0) + last_item = Item(-1) + + +__identity = lambda x: x + + +def sorted_items(d, key=__identity, reverse=False): + """ + Return the items of the dictionary sorted by the keys + + >>> sample = dict(foo=20, bar=42, baz=10) + >>> tuple(sorted_items(sample)) + (('bar', 42), ('baz', 10), ('foo', 20)) + + >>> reverse_string = lambda s: ''.join(reversed(s)) + >>> tuple(sorted_items(sample, key=reverse_string)) + (('foo', 20), ('bar', 42), ('baz', 10)) + + >>> tuple(sorted_items(sample, reverse=True)) + (('foo', 20), ('baz', 10), ('bar', 42)) + """ + # wrap the key func so it operates on the first element of each item + pairkey_key = lambda item: key(item[0]) + return sorted(d.items(), key=pairkey_key, reverse=reverse) + + +class KeyTransformingDict(dict): + """ + A dict subclass that transforms the keys before they're used. + Subclasses may override the default transform_key to customize behavior. + """ + @staticmethod + def transform_key(key): + return key + + def __init__(self, *args, **kargs): + super(KeyTransformingDict, self).__init__() + # build a dictionary using the default constructs + d = dict(*args, **kargs) + # build this dictionary using transformed keys. + for item in d.items(): + self.__setitem__(*item) + + def __setitem__(self, key, val): + key = self.transform_key(key) + super(KeyTransformingDict, self).__setitem__(key, val) + + def __getitem__(self, key): + key = self.transform_key(key) + return super(KeyTransformingDict, self).__getitem__(key) + + def __contains__(self, key): + key = self.transform_key(key) + return super(KeyTransformingDict, self).__contains__(key) + + def __delitem__(self, key): + key = self.transform_key(key) + return super(KeyTransformingDict, self).__delitem__(key) + + def get(self, key, *args, **kwargs): + key = self.transform_key(key) + return super(KeyTransformingDict, self).get(key, *args, **kwargs) + + def setdefault(self, key, *args, **kwargs): + key = self.transform_key(key) + return super(KeyTransformingDict, self).setdefault(key, *args, **kwargs) + + def pop(self, key, *args, **kwargs): + key = self.transform_key(key) + return super(KeyTransformingDict, self).pop(key, *args, **kwargs) + + def matching_key_for(self, key): + """ + Given a key, return the actual key stored in self that matches. + Raise KeyError if the key isn't found. + """ + try: + return next(e_key for e_key in self.keys() if e_key == key) + except StopIteration: + raise KeyError(key) + + +class FoldedCaseKeyedDict(KeyTransformingDict): + """ + A case-insensitive dictionary (keys are compared as insensitive + if they are strings). + + >>> d = FoldedCaseKeyedDict() + >>> d['heLlo'] = 'world' + >>> list(d.keys()) == ['heLlo'] + True + >>> list(d.values()) == ['world'] + True + >>> d['hello'] == 'world' + True + >>> 'hello' in d + True + >>> 'HELLO' in d + True + >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})).replace("u'", "'")) + {'heLlo': 'world'} + >>> d = FoldedCaseKeyedDict({'heLlo': 'world'}) + >>> print(d['hello']) + world + >>> print(d['Hello']) + world + >>> list(d.keys()) + ['heLlo'] + >>> d = FoldedCaseKeyedDict({'heLlo': 'world', 'Hello': 'world'}) + >>> list(d.values()) + ['world'] + >>> key, = d.keys() + >>> key in ['heLlo', 'Hello'] + True + >>> del d['HELLO'] + >>> d + {} + + get should work + + >>> d['Sumthin'] = 'else' + >>> d.get('SUMTHIN') + 'else' + >>> d.get('OTHER', 'thing') + 'thing' + >>> del d['sumthin'] + + setdefault should also work + + >>> d['This'] = 'that' + >>> print(d.setdefault('this', 'other')) + that + >>> len(d) + 1 + >>> print(d['this']) + that + >>> print(d.setdefault('That', 'other')) + other + >>> print(d['THAT']) + other + + Make it pop! + + >>> print(d.pop('THAT')) + other + + To retrieve the key in its originally-supplied form, use matching_key_for + + >>> print(d.matching_key_for('this')) + This + """ + @staticmethod + def transform_key(key): + return jaraco.text.FoldedCase(key) + + +class DictAdapter(object): + """ + Provide a getitem interface for attributes of an object. + + Let's say you want to get at the string.lowercase property in a formatted + string. It's easy with DictAdapter. + + >>> import string + >>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string)) + lowercase is abcdefghijklmnopqrstuvwxyz + """ + def __init__(self, wrapped_ob): + self.object = wrapped_ob + + def __getitem__(self, name): + return getattr(self.object, name) + + +class ItemsAsAttributes(object): + """ + Mix-in class to enable a mapping object to provide items as + attributes. + + >>> C = type(str('C'), (dict, ItemsAsAttributes), dict()) + >>> i = C() + >>> i['foo'] = 'bar' + >>> i.foo + 'bar' + + Natural attribute access takes precedence + + >>> i.foo = 'henry' + >>> i.foo + 'henry' + + But as you might expect, the mapping functionality is preserved. + + >>> i['foo'] + 'bar' + + A normal attribute error should be raised if an attribute is + requested that doesn't exist. + + >>> i.missing + Traceback (most recent call last): + ... + AttributeError: 'C' object has no attribute 'missing' + + It also works on dicts that customize __getitem__ + + >>> missing_func = lambda self, key: 'missing item' + >>> C = type(str('C'), (dict, ItemsAsAttributes), dict(__missing__ = missing_func)) + >>> i = C() + >>> i.missing + 'missing item' + >>> i.foo + 'missing item' + """ + def __getattr__(self, key): + try: + return getattr(super(ItemsAsAttributes, self), key) + except AttributeError as e: + # attempt to get the value from the mapping (return self[key]) + # but be careful not to lose the original exception context. + noval = object() + def _safe_getitem(cont, key, missing_result): + try: + return cont[key] + except KeyError: + return missing_result + result = _safe_getitem(self, key, noval) + if result is not noval: + return result + # raise the original exception, but use the original class + # name, not 'super'. + message, = e.args + message = message.replace('super', self.__class__.__name__, 1) + e.args = message, + raise + + +def invert_map(map): + """ + Given a dictionary, return another dictionary with keys and values + switched. If any of the values resolve to the same key, raises + a ValueError. + + >>> numbers = dict(a=1, b=2, c=3) + >>> letters = invert_map(numbers) + >>> letters[1] + 'a' + >>> numbers['d'] = 3 + >>> invert_map(numbers) + Traceback (most recent call last): + ... + ValueError: Key conflict in inverted mapping + """ + res = dict((v,k) for k, v in map.items()) + if not len(res) == len(map): + raise ValueError('Key conflict in inverted mapping') + return res + + +class IdentityOverrideMap(dict): + """ + A dictionary that by default maps each key to itself, but otherwise + acts like a normal dictionary. + + >>> d = IdentityOverrideMap() + >>> d[42] + 42 + >>> d['speed'] = 'speedo' + >>> print(d['speed']) + speedo + """ + + def __missing__(self, key): + return key + + +class DictStack(list, collections.Mapping): + """ + A stack of dictionaries that behaves as a view on those dictionaries, + giving preference to the last. + + >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)]) + >>> stack['a'] + 2 + >>> stack['b'] + 2 + >>> stack['c'] + 2 + >>> stack.push(dict(a=3)) + >>> stack['a'] + 3 + >>> set(stack.keys()) == set(['a', 'b', 'c']) + True + >>> d = stack.pop() + >>> stack['a'] + 2 + >>> d = stack.pop() + >>> stack['a'] + 1 + """ + + def keys(self): + return list(set(itertools.chain.from_iterable(c.keys() for c in self))) + + def __getitem__(self, key): + for scope in reversed(self): + if key in scope: return scope[key] + raise KeyError(key) + + push = list.append + + +class BijectiveMap(dict): + """ + A Bijective Map (two-way mapping). + + Implemented as a simple dictionary of 2x the size, mapping values back + to keys. + + Note, this implementation may be incomplete. If there's not a test for + your use case below, it's likely to fail, so please test and send pull + requests or patches for additional functionality needed. + + + >>> m = BijectiveMap() + >>> m['a'] = 'b' + >>> m == {'a': 'b', 'b': 'a'} + True + >>> print(m['b']) + a + + >>> m['c'] = 'd' + >>> len(m) + 2 + + Some weird things happen if you map an item to itself or overwrite a + single key of a pair, so it's disallowed. + + >>> m['e'] = 'e' + Traceback (most recent call last): + ValueError: Key cannot map to itself + + >>> m['d'] = 'e' + Traceback (most recent call last): + ValueError: Key/Value pairs may not overlap + + >>> print(m.pop('d')) + c + + >>> 'c' in m + False + + >>> m = BijectiveMap(dict(a='b')) + >>> len(m) + 1 + >>> print(m['b']) + a + + >>> m = BijectiveMap() + >>> m.update(a='b') + >>> m['b'] + 'a' + + >>> del m['b'] + >>> len(m) + 0 + >>> 'a' in m + False + """ + def __init__(self, *args, **kwargs): + super(BijectiveMap, self).__init__() + self.update(*args, **kwargs) + + def __setitem__(self, item, value): + if item == value: + raise ValueError("Key cannot map to itself") + if (value in self or item in self) and self[item] != value: + raise ValueError("Key/Value pairs may not overlap") + super(BijectiveMap, self).__setitem__(item, value) + super(BijectiveMap, self).__setitem__(value, item) + + def __delitem__(self, item): + self.pop(item) + + def __len__(self): + return super(BijectiveMap, self).__len__() // 2 + + def pop(self, key, *args, **kwargs): + mirror = self[key] + super(BijectiveMap, self).__delitem__(mirror) + return super(BijectiveMap, self).pop(key, *args, **kwargs) + + def update(self, *args, **kwargs): + # build a dictionary using the default constructs + d = dict(*args, **kwargs) + # build this dictionary using transformed keys. + for item in d.items(): + self.__setitem__(*item) + + +class FrozenDict(collections.Mapping, collections.Hashable): + """ + An immutable mapping. + + >>> a = FrozenDict(a=1, b=2) + >>> b = FrozenDict(a=1, b=2) + >>> a == b + True + + >>> a == dict(a=1, b=2) + True + >>> dict(a=1, b=2) == a + True + + >>> a['c'] = 3 + Traceback (most recent call last): + ... + TypeError: 'FrozenDict' object does not support item assignment + + >>> a.update(y=3) + Traceback (most recent call last): + ... + AttributeError: 'FrozenDict' object has no attribute 'update' + + Copies should compare equal + + >>> copy.copy(a) == a + True + + Copies should be the same type + + >>> isinstance(copy.copy(a), FrozenDict) + True + + FrozenDict supplies .copy(), even though collections.Mapping doesn't + demand it. + + >>> a.copy() == a + True + >>> a.copy() is not a + True + """ + __slots__ = ['__data'] + + def __new__(cls, *args, **kwargs): + self = super(FrozenDict, cls).__new__(cls) + self.__data = dict(*args, **kwargs) + return self + + # Container + def __contains__(self, key): + return key in self.__data + + # Hashable + def __hash__(self): + return hash(tuple(sorted(self.__data.iteritems()))) + + # Mapping + def __iter__(self): + return iter(self.__data) + + def __len__(self): + return len(self.__data) + + def __getitem__(self, key): + return self.__data[key] + + # override get for efficiency provided by dict + def get(self, *args, **kwargs): + return self.__data.get(*args, **kwargs) + + # override eq to recognize underlying implementation + def __eq__(self, other): + if isinstance(other, FrozenDict): + other = other.__data + return self.__data.__eq__(other) + + def copy(self): + "Return a shallow copy of self" + return copy.copy(self) + + +class Enumeration(ItemsAsAttributes, BijectiveMap): + """ + A convenient way to provide enumerated values + + >>> e = Enumeration('a b c') + >>> e['a'] + 0 + + >>> e.a + 0 + + >>> e[1] + 'b' + + >>> set(e.names) == set('abc') + True + + >>> set(e.codes) == set(range(3)) + True + + >>> e.get('d') is None + True + + Codes need not start with 0 + + >>> e = Enumeration('a b c', range(1, 4)) + >>> e['a'] + 1 + + >>> e[3] + 'c' + """ + def __init__(self, names, codes=None): + if isinstance(names, six.string_types): + names = names.split() + if codes is None: + codes = itertools.count() + super(Enumeration, self).__init__(zip(names, codes)) + + @property + def names(self): + return (key for key in self if isinstance(key, six.string_types)) + + @property + def codes(self): + return (self[name] for name in self.names) + + +class Everything(object): + """ + A collection "containing" every possible thing. + + >>> 'foo' in Everything() + True + + >>> import random + >>> random.randint(1, 999) in Everything() + True + """ + def __contains__(self, other): + return True + + +class InstrumentedDict(six.moves.UserDict): + """ + Instrument an existing dictionary with additional + functionality, but always reference and mutate + the original dictionary. + + >>> orig = {'a': 1, 'b': 2} + >>> inst = InstrumentedDict(orig) + >>> inst['a'] + 1 + >>> inst['c'] = 3 + >>> orig['c'] + 3 + >>> inst.keys() == orig.keys() + True + """ + def __init__(self, data): + six.moves.UserDict.__init__(self) + self.data = data diff --git a/libs/jaraco/functools.py b/libs/jaraco/functools.py new file mode 100644 index 00000000..d9ccf3a6 --- /dev/null +++ b/libs/jaraco/functools.py @@ -0,0 +1,268 @@ +from __future__ import absolute_import, unicode_literals, print_function, division + +import functools +import time +import warnings + +try: + from functools import lru_cache +except ImportError: + try: + from backports.functools_lru_cache import lru_cache + except ImportError: + try: + from functools32 import lru_cache + except ImportError: + warnings.warn("No lru_cache available") + + +def compose(*funcs): + """ + Compose any number of unary functions into a single unary function. + + >>> import textwrap + >>> from six import text_type + >>> text_type.strip(textwrap.dedent(compose.__doc__)) == compose(text_type.strip, textwrap.dedent)(compose.__doc__) + True + + Compose also allows the innermost function to take arbitrary arguments. + + >>> round_three = lambda x: round(x, ndigits=3) + >>> f = compose(round_three, int.__truediv__) + >>> [f(3*x, x+1) for x in range(1,10)] + [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] + """ + + compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs)) + return functools.reduce(compose_two, funcs) + + +def method_caller(method_name, *args, **kwargs): + """ + Return a function that will call a named method on the + target object with optional positional and keyword + arguments. + + >>> lower = method_caller('lower') + >>> lower('MyString') + 'mystring' + """ + def call_method(target): + func = getattr(target, method_name) + return func(*args, **kwargs) + return call_method + + +def once(func): + """ + Decorate func so it's only ever called the first time. + + This decorator can ensure that an expensive or non-idempotent function + will not be expensive on subsequent calls and is idempotent. + + >>> func = once(lambda a: a+3) + >>> func(3) + 6 + >>> func(9) + 6 + >>> func('12') + 6 + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not hasattr(func, 'always_returns'): + func.always_returns = func(*args, **kwargs) + return func.always_returns + return wrapper + + +def method_cache(method, cache_wrapper=None): + """ + Wrap lru_cache to support storing the cache data in the object instances. + + Abstracts the common paradigm where the method explicitly saves an + underscore-prefixed protected property on first call and returns that + subsequently. + + >>> class MyClass: + ... calls = 0 + ... + ... @method_cache + ... def method(self, value): + ... self.calls += 1 + ... return value + + >>> a = MyClass() + >>> a.method(3) + 3 + >>> for x in range(75): + ... res = a.method(x) + >>> a.calls + 75 + + Note that the apparent behavior will be exactly like that of lru_cache + except that the cache is stored on each instance, so values in one + instance will not flush values from another, and when an instance is + deleted, so are the cached values for that instance. + + >>> b = MyClass() + >>> for x in range(35): + ... res = b.method(x) + >>> b.calls + 35 + >>> a.method(0) + 0 + >>> a.calls + 75 + + Note that if method had been decorated with ``functools.lru_cache()``, + a.calls would have been 76 (due to the cached value of 0 having been + flushed by the 'b' instance). + + Clear the cache with ``.cache_clear()`` + + >>> a.method.cache_clear() + + Another cache wrapper may be supplied: + + >>> cache = lru_cache(maxsize=2) + >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) + >>> a = MyClass() + >>> a.method2() + 3 + + See also + http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ + for another implementation and additional justification. + """ + cache_wrapper = cache_wrapper or lru_cache() + def wrapper(self, *args, **kwargs): + # it's the first call, replace the method with a cached, bound method + bound_method = functools.partial(method, self) + cached_method = cache_wrapper(bound_method) + setattr(self, method.__name__, cached_method) + return cached_method(*args, **kwargs) + return _special_method_cache(method, cache_wrapper) or wrapper + + +def _special_method_cache(method, cache_wrapper): + """ + Because Python treats special methods differently, it's not + possible to use instance attributes to implement the cached + methods. + + Instead, install the wrapper method under a different name + and return a simple proxy to that wrapper. + + https://github.com/jaraco/jaraco.functools/issues/5 + """ + name = method.__name__ + special_names = '__getattr__', '__getitem__' + if name not in special_names: + return + + wrapper_name = '__cached' + name + + def proxy(self, *args, **kwargs): + if wrapper_name not in vars(self): + bound = functools.partial(method, self) + cache = cache_wrapper(bound) + setattr(self, wrapper_name, cache) + else: + cache = getattr(self, wrapper_name) + return cache(*args, **kwargs) + + return proxy + + +def apply(transform): + """ + Decorate a function with a transform function that is + invoked on results returned from the decorated function. + + >>> @apply(reversed) + ... def get_numbers(start): + ... return range(start, start+3) + >>> list(get_numbers(4)) + [6, 5, 4] + """ + def wrap(func): + return compose(transform, func) + return wrap + + +def call_aside(f, *args, **kwargs): + """ + Call a function for its side effect after initialization. + + >>> @call_aside + ... def func(): print("called") + called + >>> func() + called + + Use functools.partial to pass parameters to the initial call + + >>> @functools.partial(call_aside, name='bingo') + ... def func(name): print("called with", name) + called with bingo + """ + f(*args, **kwargs) + return f + + +class Throttler(object): + """ + Rate-limit a function (or other callable) + """ + def __init__(self, func, max_rate=float('Inf')): + if isinstance(func, Throttler): + func = func.func + self.func = func + self.max_rate = max_rate + self.reset() + + def reset(self): + self.last_called = 0 + + def __call__(self, *args, **kwargs): + self._wait() + return self.func(*args, **kwargs) + + def _wait(self): + "ensure at least 1/max_rate seconds from last call" + elapsed = time.time() - self.last_called + must_wait = 1 / self.max_rate - elapsed + time.sleep(max(0, must_wait)) + self.last_called = time.time() + + def __get__(self, obj, type=None): + return first_invoke(self._wait, functools.partial(self.func, obj)) + + +def first_invoke(func1, func2): + """ + Return a function that when invoked will invoke func1 without + any parameters (for its side-effect) and then invoke func2 + with whatever parameters were passed, returning its result. + """ + def wrapper(*args, **kwargs): + func1() + return func2(*args, **kwargs) + return wrapper + + +def retry_call(func, cleanup=lambda: None, retries=0, trap=()): + """ + Given a callable func, trap the indicated exceptions + for up to 'retries' times, invoking cleanup on the + exception. On the final attempt, allow any exceptions + to propagate. + """ + for attempt in range(retries): + try: + return func() + except trap: + cleanup() + + return func() diff --git a/libs/jaraco/structures/__init__.py b/libs/jaraco/structures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libs/jaraco/structures/binary.py b/libs/jaraco/structures/binary.py new file mode 100644 index 00000000..e4db2c65 --- /dev/null +++ b/libs/jaraco/structures/binary.py @@ -0,0 +1,130 @@ +from __future__ import absolute_import, unicode_literals + +from functools import reduce + + +def get_bit_values(number, size=32): + """ + Get bit values as a list for a given number + + >>> get_bit_values(1) == [0]*31 + [1] + True + + >>> get_bit_values(0xDEADBEEF) + [1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1] + + You may override the default word size of 32-bits to match your actual + application. + + >>> get_bit_values(0x3, 2) + [1, 1] + + >>> get_bit_values(0x3, 4) + [0, 0, 1, 1] + """ + number += 2**size + return list(map(int, bin(number)[-size:])) + +def gen_bit_values(number): + """ + Return a zero or one for each bit of a numeric value up to the most + significant 1 bit, beginning with the least significant bit. + + >>> list(gen_bit_values(16)) + [0, 0, 0, 0, 1] + """ + digits = bin(number)[2:] + return map(int, reversed(digits)) + +def coalesce(bits): + """ + Take a sequence of bits, most significant first, and + coalesce them into a number. + + >>> coalesce([1,0,1]) + 5 + """ + operation = lambda a, b: (a << 1 | b) + return reduce(operation, bits) + +class Flags(object): + """ + Subclasses should define _names, a list of flag names beginning + with the least-significant bit. + + >>> class MyFlags(Flags): + ... _names = 'a', 'b', 'c' + >>> mf = MyFlags.from_number(5) + >>> mf['a'] + 1 + >>> mf['b'] + 0 + >>> mf['c'] == mf[2] + True + >>> mf['b'] = 1 + >>> mf['a'] = 0 + >>> mf.number + 6 + """ + def __init__(self, values): + self._values = list(values) + if hasattr(self, '_names'): + n_missing_bits = len(self._names) - len(self._values) + self._values.extend([0] * n_missing_bits) + + @classmethod + def from_number(cls, number): + return cls(gen_bit_values(number)) + + @property + def number(self): + return coalesce(reversed(self._values)) + + def __setitem__(self, key, value): + # first try by index, then by name + try: + self._values[key] = value + except TypeError: + index = self._names.index(key) + self._values[index] = value + + def __getitem__(self, key): + # first try by index, then by name + try: + return self._values[key] + except TypeError: + index = self._names.index(key) + return self._values[index] + +class BitMask(type): + """ + A metaclass to create a bitmask with attributes. Subclass an int and + set this as the metaclass to use. + + Here's how to create such a class on Python 3: + + class MyBits(int, metaclass=BitMask): + a = 0x1 + b = 0x4 + c = 0x3 + + For testing purposes, construct explicitly to support Python 2 + + >>> ns = dict(a=0x1, b=0x4, c=0x3) + >>> MyBits = BitMask(str('MyBits'), (int,), ns) + + >>> b1 = MyBits(3) + >>> b1.a, b1.b, b1.c + (True, False, True) + >>> b2 = MyBits(8) + >>> any([b2.a, b2.b, b2.c]) + False + """ + + def __new__(cls, name, bases, attrs): + newattrs = dict( + (attr, property(lambda self, value=value: bool(self & value))) + for attr, value in attrs.items() + if not attr.startswith('_') + ) + return type.__new__(cls, name, bases, newattrs) diff --git a/libs/jaraco/text.py b/libs/jaraco/text.py new file mode 100644 index 00000000..c459e6e0 --- /dev/null +++ b/libs/jaraco/text.py @@ -0,0 +1,371 @@ +from __future__ import absolute_import, unicode_literals, print_function + +import sys +import re +import inspect +import itertools +import textwrap +import functools + +import six + +import jaraco.collections +from jaraco.functools import compose + + +def substitution(old, new): + """ + Return a function that will perform a substitution on a string + """ + return lambda s: s.replace(old, new) + + +def multi_substitution(*substitutions): + """ + Take a sequence of pairs specifying substitutions, and create + a function that performs those substitutions. + + >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') + 'baz' + """ + substitutions = itertools.starmap(substitution, substitutions) + # compose function applies last function first, so reverse the + # substitutions to get the expected order. + substitutions = reversed(tuple(substitutions)) + return compose(*substitutions) + + +class FoldedCase(six.text_type): + """ + A case insensitive string class; behaves just like str + except compares equal when the only variation is case. + >>> s = FoldedCase('hello world') + + >>> s == 'Hello World' + True + + >>> 'Hello World' == s + True + + >>> s.index('O') + 4 + + >>> s.split('O') + ['hell', ' w', 'rld'] + + >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) + ['alpha', 'Beta', 'GAMMA'] + """ + def __lt__(self, other): + return self.lower() < other.lower() + + def __gt__(self, other): + return self.lower() > other.lower() + + def __eq__(self, other): + return self.lower() == other.lower() + + def __hash__(self): + return hash(self.lower()) + + # cache lower since it's likely to be called frequently. + def lower(self): + self._lower = super(FoldedCase, self).lower() + self.lower = lambda: self._lower + return self._lower + + def index(self, sub): + return self.lower().index(sub.lower()) + + def split(self, splitter=' ', maxsplit=0): + pattern = re.compile(re.escape(splitter), re.I) + return pattern.split(self, maxsplit) + + +def local_format(string): + """ + format the string using variables in the caller's local namespace. + + >>> a = 3 + >>> local_format("{a:5}") + ' 3' + """ + context = inspect.currentframe().f_back.f_locals + if sys.version_info < (3, 2): + return string.format(**context) + return string.format_map(context) + + +def global_format(string): + """ + format the string using variables in the caller's global namespace. + + >>> a = 3 + >>> fmt = "The func name: {global_format.__name__}" + >>> global_format(fmt) + 'The func name: global_format' + """ + context = inspect.currentframe().f_back.f_globals + if sys.version_info < (3, 2): + return string.format(**context) + return string.format_map(context) + + +def namespace_format(string): + """ + Format the string using variable in the caller's scope (locals + globals). + + >>> a = 3 + >>> fmt = "A is {a} and this func is {namespace_format.__name__}" + >>> namespace_format(fmt) + 'A is 3 and this func is namespace_format' + """ + context = jaraco.collections.DictStack() + context.push(inspect.currentframe().f_back.f_globals) + context.push(inspect.currentframe().f_back.f_locals) + if sys.version_info < (3, 2): + return string.format(**context) + return string.format_map(context) + + +def is_decodable(value): + r""" + Return True if the supplied value is decodable (using the default + encoding). + + >>> is_decodable(b'\xff') + False + >>> is_decodable(b'\x32') + True + """ + # TODO: This code could be expressed more consisely and directly + # with a jaraco.context.ExceptionTrap, but that adds an unfortunate + # long dependency tree, so for now, use boolean literals. + try: + value.decode() + except UnicodeDecodeError: + return False + return True + +def is_binary(value): + """ + Return True if the value appears to be binary (that is, it's a byte + string and isn't decodable). + """ + return isinstance(value, bytes) and not is_decodable(value) + +def trim(s): + r""" + Trim something like a docstring to remove the whitespace that + is common due to indentation and formatting. + + >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") + 'foo = bar\n\tbar = baz' + """ + return textwrap.dedent(s).strip() + +class Splitter(object): + """object that will split a string with the given arguments for each call + >>> s = Splitter(',') + >>> s('hello, world, this is your, master calling') + ['hello', ' world', ' this is your', ' master calling'] + """ + def __init__(self, *args): + self.args = args + + def __call__(self, s): + return s.split(*self.args) + +def indent(string, prefix=' ' * 4): + return prefix + string + +class WordSet(tuple): + """ + Given a Python identifier, return the words that identifier represents, + whether in camel case, underscore-separated, etc. + + >>> WordSet.parse("camelCase") + ('camel', 'Case') + + >>> WordSet.parse("under_sep") + ('under', 'sep') + + Acronyms should be retained + + >>> WordSet.parse("firstSNL") + ('first', 'SNL') + + >>> WordSet.parse("you_and_I") + ('you', 'and', 'I') + + >>> WordSet.parse("A simple test") + ('A', 'simple', 'test') + + Multiple caps should not interfere with the first cap of another word. + + >>> WordSet.parse("myABCClass") + ('my', 'ABC', 'Class') + + The result is a WordSet, so you can get the form you need. + + >>> WordSet.parse("myABCClass").underscore_separated() + 'my_ABC_Class' + + >>> WordSet.parse('a-command').camel_case() + 'ACommand' + + >>> WordSet.parse('someIdentifier').lowered().space_separated() + 'some identifier' + + Slices of the result should return another WordSet. + + >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() + 'out_of_context' + + >>> WordSet.from_class_name(WordSet()).lowered().space_separated() + 'word set' + """ + _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') + + def capitalized(self): + return WordSet(word.capitalize() for word in self) + + def lowered(self): + return WordSet(word.lower() for word in self) + + def camel_case(self): + return ''.join(self.capitalized()) + + def headless_camel_case(self): + words = iter(self) + first = next(words).lower() + return itertools.chain((first,), WordSet(words).camel_case()) + + def underscore_separated(self): + return '_'.join(self) + + def dash_separated(self): + return '-'.join(self) + + def space_separated(self): + return ' '.join(self) + + def __getitem__(self, item): + result = super(WordSet, self).__getitem__(item) + if isinstance(item, slice): + result = WordSet(result) + return result + + # for compatibility with Python 2 + def __getslice__(self, i, j): + return self.__getitem__(slice(i, j)) + + @classmethod + def parse(cls, identifier): + matches = cls._pattern.finditer(identifier) + return WordSet(match.group(0) for match in matches) + + @classmethod + def from_class_name(cls, subject): + return cls.parse(subject.__class__.__name__) + +# for backward compatibility +words = WordSet.parse + + +def simple_html_strip(s): + r""" + Remove HTML from the string `s`. + + >>> str(simple_html_strip('')) + '' + + >>> print(simple_html_strip('A stormy day in paradise')) + A stormy day in paradise + + >>> print(simple_html_strip('Somebody tell the truth.')) + Somebody tell the truth. + + >>> print(simple_html_strip('What about
\nmultiple lines?')) + What about + multiple lines? + """ + html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) + texts = ( + match.group(3) or '' + for match + in html_stripper.finditer(s) + ) + return ''.join(texts) + + +class SeparatedValues(six.text_type): + """ + A string separated by a separator. Overrides __iter__ for getting + the values. + + >>> list(SeparatedValues('a,b,c')) + ['a', 'b', 'c'] + + Whitespace is stripped and empty values are discarded. + + >>> list(SeparatedValues(' a, b , c, ')) + ['a', 'b', 'c'] + """ + separator = ',' + + def __iter__(self): + parts = self.split(self.separator) + return six.moves.filter(None, (part.strip() for part in parts)) + +class Stripper: + r""" + Given a series of lines, find the common prefix and strip it from them. + + >>> lines = [ + ... 'abcdefg\n', + ... 'abc\n', + ... 'abcde\n', + ... ] + >>> res = Stripper.strip_prefix(lines) + >>> res.prefix + 'abc' + >>> list(res.lines) + ['defg\n', '\n', 'de\n'] + + If no prefix is common, nothing should be stripped. + + >>> lines = [ + ... 'abcd\n', + ... '1234\n', + ... ] + >>> res = Stripper.strip_prefix(lines) + >>> res.prefix = '' + >>> list(res.lines) + ['abcd\n', '1234\n'] + """ + def __init__(self, prefix, lines): + self.prefix = prefix + self.lines = map(self, lines) + + @classmethod + def strip_prefix(cls, lines): + prefix_lines, lines = itertools.tee(lines) + prefix = functools.reduce(cls.common_prefix, prefix_lines) + return cls(prefix, lines) + + def __call__(self, line): + if not self.prefix: + return line + null, prefix, rest = line.partition(self.prefix) + return rest + + @staticmethod + def common_prefix(s1, s2): + """ + Return the common prefix of two lines. + """ + index = min(len(s1), len(s2)) + while s1[:index] != s2[:index]: + index -= 1 + return s1[:index] diff --git a/libs/jaraco/ui/__init__.py b/libs/jaraco/ui/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libs/jaraco/ui/cmdline.py b/libs/jaraco/ui/cmdline.py new file mode 100644 index 00000000..0634f21d --- /dev/null +++ b/libs/jaraco/ui/cmdline.py @@ -0,0 +1,62 @@ +import argparse + +import six +from jaraco.classes import meta +from jaraco import text + + +@six.add_metaclass(meta.LeafClassesMeta) +class Command(object): + """ + A general-purpose base class for creating commands for a command-line + program using argparse. Each subclass of Command represents a separate + sub-command of a program. + + For example, one might use Command subclasses to implement the Mercurial + command set:: + + class Commit(Command): + @staticmethod + def add_arguments(cls, parser): + parser.add_argument('-m', '--message') + + @classmethod + def run(cls, args): + "Run the 'commit' command with args (parsed)" + + class Merge(Command): pass + class Pull(Command): pass + ... + + Then one could create an entry point for Mercurial like so:: + + def hg_command(): + Command.invoke() + """ + + @classmethod + def add_subparsers(cls, parser): + subparsers = parser.add_subparsers() + [cmd_class.add_parser(subparsers) for cmd_class in cls._leaf_classes] + + @classmethod + def add_parser(cls, subparsers): + cmd_string = text.words(cls.__name__).lowered().dash_separated() + parser = subparsers.add_parser(cmd_string) + parser.set_defaults(action=cls) + cls.add_arguments(parser) + return parser + + @classmethod + def add_arguments(cls, parser): + pass + + @classmethod + def invoke(cls): + """ + Invoke the command using ArgumentParser + """ + parser = argparse.ArgumentParser() + cls.add_subparsers(parser) + args = parser.parse_args() + args.action.run(args) diff --git a/libs/jaraco/ui/editor.py b/libs/jaraco/ui/editor.py new file mode 100644 index 00000000..b37c759d --- /dev/null +++ b/libs/jaraco/ui/editor.py @@ -0,0 +1,108 @@ +from __future__ import unicode_literals, absolute_import + +import tempfile +import os +import sys +import subprocess +import mimetypes +import collections +import io +import difflib + +import six + +class EditProcessException(RuntimeError): pass + +class EditableFile(object): + """ + EditableFile saves some data to a temporary file, launches a + platform editor for interactive editing, and then reloads the data, + setting .changed to True if the data was edited. + + e.g.:: + + x = EditableFile('foo') + x.edit() + + if x.changed: + print(x.data) + + The EDITOR environment variable can define which executable to use + (also XML_EDITOR if the content-type to edit includes 'xml'). If no + EDITOR is defined, defaults to 'notepad' on Windows and 'edit' on + other platforms. + """ + platform_default_editors = collections.defaultdict( + lambda: 'edit', + win32 = 'notepad', + linux2 = 'vi', + ) + encoding = 'utf-8' + + def __init__(self, data='', content_type='text/plain'): + self.data = six.text_type(data) + self.content_type = content_type + + def __enter__(self): + extension = mimetypes.guess_extension(self.content_type) or '' + fobj, self.name = tempfile.mkstemp(extension) + os.write(fobj, self.data.encode(self.encoding)) + os.close(fobj) + return self + + def read(self): + with open(self.name, 'rb') as f: + return f.read().decode(self.encoding) + + def __exit__(self, *tb_info): + os.remove(self.name) + + def edit(self): + """ + Edit the file + """ + self.changed = False + with self: + editor = self.get_editor() + cmd = [editor, self.name] + try: + res = subprocess.call(cmd) + except Exception as e: + print("Error launching editor %(editor)s" % locals()) + print(e) + return + if res != 0: + msg = '%(editor)s returned error status %(res)d' % locals() + raise EditProcessException(msg) + new_data = self.read() + if new_data != self.data: + self.changed = self._save_diff(self.data, new_data) + self.data = new_data + + @staticmethod + def _search_env(keys): + """ + Search the environment for the supplied keys, returning the first + one found or None if none was found. + """ + matches = (os.environ[key] for key in keys if key in os.environ) + return next(matches, None) + + def get_editor(self): + """ + Give preference to an XML_EDITOR or EDITOR defined in the + environment. Otherwise use a default editor based on platform. + """ + env_search = ['EDITOR'] + if 'xml' in self.content_type: + env_search.insert(0, 'XML_EDITOR') + default_editor = self.platform_default_editors[sys.platform] + return self._search_env(env_search) or default_editor + + @staticmethod + def _save_diff(*versions): + def get_lines(content): + return list(io.StringIO(content)) + lines = map(get_lines, versions) + diff = difflib.context_diff(*lines) + return tuple(diff) diff --git a/libs/jaraco/ui/input.py b/libs/jaraco/ui/input.py new file mode 100644 index 00000000..3d108fc0 --- /dev/null +++ b/libs/jaraco/ui/input.py @@ -0,0 +1,26 @@ +""" +This module currently provides a cross-platform getch function +""" + +try: + # Windows + from msvcrt import getch +except ImportError: + pass + +try: + # Unix + import sys + import tty + import termios + + def getch(): + fd = sys.stdin.fileno() + old = termios.tcgetattr(fd) + try: + tty.setraw(fd) + return sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old) +except ImportError: + pass diff --git a/libs/jaraco/ui/menu.py b/libs/jaraco/ui/menu.py new file mode 100644 index 00000000..aede93b3 --- /dev/null +++ b/libs/jaraco/ui/menu.py @@ -0,0 +1,34 @@ +from __future__ import print_function, absolute_import, unicode_literals + +import itertools + +import six + +class Menu(object): + """ + A simple command-line based menu + """ + def __init__(self, choices=None, formatter=str): + self.choices = choices or list() + self.formatter = formatter + + def get_choice(self, prompt="> "): + n = len(self.choices) + number_width = len(str(n)) + 1 + menu_fmt = '{number:{number_width}}) {choice}' + formatted_choices = map(self.formatter, self.choices) + for number, choice in zip(itertools.count(1), formatted_choices): + print(menu_fmt.format(**locals())) + print() + try: + answer = int(six.moves.input(prompt)) + result = self.choices[answer - 1] + except ValueError: + print('invalid selection') + result = None + except IndexError: + print('invalid selection') + result = None + except KeyboardInterrupt: + result = None + return result diff --git a/libs/jaraco/ui/progress.py b/libs/jaraco/ui/progress.py new file mode 100644 index 00000000..a00adf47 --- /dev/null +++ b/libs/jaraco/ui/progress.py @@ -0,0 +1,150 @@ +from __future__ import (print_function, absolute_import, unicode_literals, + division) + +import time +import sys +import itertools +import abc +import datetime + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AbstractProgressBar(object): + def __init__(self, unit='', size=70): + """ + Size is the nominal size in characters + """ + self.unit = unit + self.size = size + + def report(self, amt): + sys.stdout.write('\r%s' % self.get_bar(amt)) + sys.stdout.flush() + + @abc.abstractmethod + def get_bar(self, amt): + "Return the string to be printed. Should be size >= self.size" + + def summary(self, str): + return ' (' + self.unit_str(str) + ')' + + def unit_str(self, str): + if self.unit: + str += ' ' + self.unit + return str + + def finish(self): + print() + + def __enter__(self): + self.report(0) + return self + + def __exit__(self, exc, exc_val, tb): + if exc is None: + self.finish() + else: + print() + + def iterate(self, iterable): + """ + Report the status as the iterable is consumed. + """ + with self: + for n, item in enumerate(iterable, 1): + self.report(n) + yield item + + +class SimpleProgressBar(AbstractProgressBar): + + _PROG_DISPGLYPH = itertools.cycle(['|', '/', '-', '\\']) + + def get_bar(self, amt): + bar = next(self._PROG_DISPGLYPH) + template = ' [{bar:^{bar_len}}]' + summary = self.summary('{amt}') + template += summary + empty = template.format( + bar='', + bar_len=0, + amt=amt, + ) + bar_len = self.size - len(empty) + return template.format(**locals()) + + @classmethod + def demo(cls): + bar3 = cls(unit='cubes', size=30) + with bar3: + for x in six.moves.range(1, 759): + bar3.report(x) + time.sleep(0.01) + + +class TargetProgressBar(AbstractProgressBar): + def __init__(self, total=None, unit='', size=70): + """ + Size is the nominal size in characters + """ + self.total = total + super(TargetProgressBar, self).__init__(unit, size) + + def get_bar(self, amt): + template = ' [{bar:<{bar_len}}]' + completed = amt / self.total + percent = int(completed * 100) + percent_str = ' {percent:3}%' + template += percent_str + summary = self.summary('{amt}/{total}') + template += summary + empty = template.format( + total=self.total, + bar='', + bar_len=0, + **locals() + ) + bar_len = self.size - len(empty) + bar = '=' * int(completed * bar_len) + return template.format(total=self.total, **locals()) + + @classmethod + def demo(cls): + bar1 = cls(100, 'blocks') + with bar1: + for x in six.moves.range(1, 101): + bar1.report(x) + time.sleep(0.05) + + bar2 = cls(758, size=50) + with bar2: + for x in six.moves.range(1, 759): + bar2.report(x) + time.sleep(0.01) + + def finish(self): + self.report(self.total) + super(TargetProgressBar, self).finish() + + +def countdown(template, duration=datetime.timedelta(seconds=5)): + """ + Do a countdown for duration, printing the template (which may accept one + positional argument). Template should be something like + ``countdown complete in {} seconds.`` + """ + now = datetime.datetime.now() + deadline = now + duration + remaining = deadline - datetime.datetime.now() + while remaining: + remaining = deadline - datetime.datetime.now() + remaining = max(datetime.timedelta(), remaining) + msg = template.format(remaining.total_seconds()) + print(msg, end=' '*10) + sys.stdout.flush() + time.sleep(.1) + print('\b'*80, end='') + sys.stdout.flush() + print() diff --git a/libs/more_itertools/__init__.py b/libs/more_itertools/__init__.py new file mode 100644 index 00000000..5a3467fe --- /dev/null +++ b/libs/more_itertools/__init__.py @@ -0,0 +1,2 @@ +from more_itertools.more import * +from more_itertools.recipes import * diff --git a/libs/more_itertools/more.py b/libs/more_itertools/more.py new file mode 100644 index 00000000..56512ce4 --- /dev/null +++ b/libs/more_itertools/more.py @@ -0,0 +1,237 @@ +from functools import partial, wraps +from itertools import izip_longest +from recipes import * + +__all__ = ['chunked', 'first', 'peekable', 'collate', 'consumer', 'ilen', + 'iterate', 'with_iter'] + + +_marker = object() + + +def chunked(iterable, n): + """Break an iterable into lists of a given length:: + + >>> list(chunked([1, 2, 3, 4, 5, 6, 7], 3)) + [[1, 2, 3], [4, 5, 6], [7]] + + If the length of ``iterable`` is not evenly divisible by ``n``, the last + returned list will be shorter. + + This is useful for splitting up a computation on a large number of keys + into batches, to be pickled and sent off to worker processes. One example + is operations on rows in MySQL, which does not implement server-side + cursors properly and would otherwise load the entire dataset into RAM on + the client. + + """ + # Doesn't seem to run into any number-of-args limits. + for group in (list(g) for g in izip_longest(*[iter(iterable)] * n, + fillvalue=_marker)): + if group[-1] is _marker: + # If this is the last group, shuck off the padding: + del group[group.index(_marker):] + yield group + + +def first(iterable, default=_marker): + """Return the first item of an iterable, ``default`` if there is none. + + >>> first(xrange(4)) + 0 + >>> first(xrange(0), 'some default') + 'some default' + + If ``default`` is not provided and there are no items in the iterable, + raise ``ValueError``. + + ``first()`` is useful when you have a generator of expensive-to-retrieve + values and want any arbitrary one. It is marginally shorter than + ``next(iter(...))`` but saves you an entire ``try``/``except`` when you + want to provide a fallback value. + + """ + try: + return next(iter(iterable)) + except StopIteration: + # I'm on the edge about raising ValueError instead of StopIteration. At + # the moment, ValueError wins, because the caller could conceivably + # want to do something different with flow control when I raise the + # exception, and it's weird to explicitly catch StopIteration. + if default is _marker: + raise ValueError('first() was called on an empty iterable, and no ' + 'default value was provided.') + return default + + +class peekable(object): + """Wrapper for an iterator to allow 1-item lookahead + + Call ``peek()`` on the result to get the value that will next pop out of + ``next()``, without advancing the iterator: + + >>> p = peekable(xrange(2)) + >>> p.peek() + 0 + >>> p.next() + 0 + >>> p.peek() + 1 + >>> p.next() + 1 + + Pass ``peek()`` a default value, and it will be returned in the case where + the iterator is exhausted: + + >>> p = peekable([]) + >>> p.peek('hi') + 'hi' + + If no default is provided, ``peek()`` raises ``StopIteration`` when there + are no items left. + + To test whether there are more items in the iterator, examine the + peekable's truth value. If it is truthy, there are more items. + + >>> assert peekable(xrange(1)) + >>> assert not peekable([]) + + """ + # Lowercase to blend in with itertools. The fact that it's a class is an + # implementation detail. + + def __init__(self, iterable): + self._it = iter(iterable) + + def __iter__(self): + return self + + def __nonzero__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def peek(self, default=_marker): + """Return the item that will be next returned from ``next()``. + + Return ``default`` if there are no items left. If ``default`` is not + provided, raise ``StopIteration``. + + """ + if not hasattr(self, '_peek'): + try: + self._peek = self._it.next() + except StopIteration: + if default is _marker: + raise + return default + return self._peek + + def next(self): + ret = self.peek() + del self._peek + return ret + + +def collate(*iterables, **kwargs): + """Return a sorted merge of the items from each of several already-sorted + ``iterables``. + + >>> list(collate('ACDZ', 'AZ', 'JKL')) + ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] + + Works lazily, keeping only the next value from each iterable in memory. Use + ``collate()`` to, for example, perform a n-way mergesort of items that + don't fit in memory. + + :arg key: A function that returns a comparison value for an item. Defaults + to the identity function. + :arg reverse: If ``reverse=True``, yield results in descending order + rather than ascending. ``iterables`` must also yield their elements in + descending order. + + If the elements of the passed-in iterables are out of order, you might get + unexpected results. + + """ + key = kwargs.pop('key', lambda a: a) + reverse = kwargs.pop('reverse', False) + + min_or_max = partial(max if reverse else min, key=lambda (a, b): a) + peekables = [peekable(it) for it in iterables] + peekables = [p for p in peekables if p] # Kill empties. + while peekables: + _, p = min_or_max((key(p.peek()), p) for p in peekables) + yield p.next() + peekables = [p for p in peekables if p] + + +def consumer(func): + """Decorator that automatically advances a PEP-342-style "reverse iterator" + to its first yield point so you don't have to call ``next()`` on it + manually. + + >>> @consumer + ... def tally(): + ... i = 0 + ... while True: + ... print 'Thing number %s is %s.' % (i, (yield)) + ... i += 1 + ... + >>> t = tally() + >>> t.send('red') + Thing number 0 is red. + >>> t.send('fish') + Thing number 1 is fish. + + Without the decorator, you would have to call ``t.next()`` before + ``t.send()`` could be used. + + """ + @wraps(func) + def wrapper(*args, **kwargs): + gen = func(*args, **kwargs) + gen.next() + return gen + return wrapper + + +def ilen(iterable): + """Return the number of items in ``iterable``. + + >>> from itertools import ifilter + >>> ilen(ifilter(lambda x: x % 3 == 0, xrange(1000000))) + 333334 + + This does, of course, consume the iterable, so handle it with care. + + """ + return sum(1 for _ in iterable) + + +def iterate(func, start): + """Return ``start``, ``func(start)``, ``func(func(start))``, ... + + >>> from itertools import islice + >>> list(islice(iterate(lambda x: 2*x, 1), 10)) + [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + """ + while True: + yield start + start = func(start) + + +def with_iter(context_manager): + """Wrap an iterable in a ``with`` statement, so it closes once exhausted. + + Example:: + + upper_lines = (line.upper() for line in with_iter(open('foo'))) + + """ + with context_manager as iterable: + for item in iterable: + yield item diff --git a/libs/more_itertools/recipes.py b/libs/more_itertools/recipes.py new file mode 100644 index 00000000..c92373c6 --- /dev/null +++ b/libs/more_itertools/recipes.py @@ -0,0 +1,331 @@ +"""Imported from the recipes section of the itertools documentation. + +All functions taken from the recipes section of the itertools library docs +[1]_. +Some backward-compatible usability improvements have been made. + +.. [1] http://docs.python.org/library/itertools.html#recipes + +""" +from collections import deque +from itertools import chain, combinations, count, cycle, groupby, ifilterfalse, imap, islice, izip, izip_longest, repeat, starmap, tee # Wrapping breaks 2to3. +import operator +from random import randrange, sample, choice + + +__all__ = ['take', 'tabulate', 'consume', 'nth', 'quantify', 'padnone', + 'ncycles', 'dotproduct', 'flatten', 'repeatfunc', 'pairwise', + 'grouper', 'roundrobin', 'powerset', 'unique_everseen', + 'unique_justseen', 'iter_except', 'random_product', + 'random_permutation', 'random_combination', + 'random_combination_with_replacement'] + + +def take(n, iterable): + """Return first n items of the iterable as a list + + >>> take(3, range(10)) + [0, 1, 2] + >>> take(5, range(3)) + [0, 1, 2] + + Effectively a short replacement for ``next`` based iterator consumption + when you want more than one item, but less than the whole iterator. + + """ + return list(islice(iterable, n)) + + +def tabulate(function, start=0): + """Return an iterator mapping the function over linear input. + + The start argument will be increased by 1 each time the iterator is called + and fed into the function. + + >>> t = tabulate(lambda x: x**2, -3) + >>> take(3, t) + [9, 4, 1] + + """ + return imap(function, count(start)) + + +def consume(iterator, n=None): + """Advance the iterator n-steps ahead. If n is none, consume entirely. + + Efficiently exhausts an iterator without returning values. Defaults to + consuming the whole iterator, but an optional second argument may be + provided to limit consumption. + + >>> i = (x for x in range(10)) + >>> next(i) + 0 + >>> consume(i, 3) + >>> next(i) + 4 + >>> consume(i) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + If the iterator has fewer items remaining than the provided limit, the + whole iterator will be consumed. + + >>> i = (x for x in range(3)) + >>> consume(i, 5) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + """ + # Use functions that consume iterators at C speed. + if n is None: + # feed the entire iterator into a zero-length deque + deque(iterator, maxlen=0) + else: + # advance to the empty slice starting at position n + next(islice(iterator, n, n), None) + + +def nth(iterable, n, default=None): + """Returns the nth item or a default value + + >>> l = range(10) + >>> nth(l, 3) + 3 + >>> nth(l, 20, "zebra") + 'zebra' + + """ + return next(islice(iterable, n, None), default) + + +def quantify(iterable, pred=bool): + """Return the how many times the predicate is true + + >>> quantify([True, False, True]) + 2 + + """ + return sum(imap(pred, iterable)) + + +def padnone(iterable): + """Returns the sequence of elements and then returns None indefinitely. + + >>> take(5, padnone(range(3))) + [0, 1, 2, None, None] + + Useful for emulating the behavior of the built-in map() function. + + """ + return chain(iterable, repeat(None)) + + +def ncycles(iterable, n): + """Returns the sequence elements n times + + >>> list(ncycles(["a", "b"], 3)) + ['a', 'b', 'a', 'b', 'a', 'b'] + + """ + return chain.from_iterable(repeat(tuple(iterable), n)) + + +def dotproduct(vec1, vec2): + """Returns the dot product of the two iterables + + >>> dotproduct([10, 10], [20, 20]) + 400 + + """ + return sum(imap(operator.mul, vec1, vec2)) + + +def flatten(listOfLists): + """Return an iterator flattening one level of nesting in a list of lists + + >>> list(flatten([[0, 1], [2, 3]])) + [0, 1, 2, 3] + + """ + return chain.from_iterable(listOfLists) + + +def repeatfunc(func, times=None, *args): + """Repeat calls to func with specified arguments. + + >>> list(repeatfunc(lambda: 5, 3)) + [5, 5, 5] + >>> list(repeatfunc(lambda x: x ** 2, 3, 3)) + [9, 9, 9] + + """ + if times is None: + return starmap(func, repeat(args)) + return starmap(func, repeat(args, times)) + + +def pairwise(iterable): + """Returns an iterator of paired items, overlapping, from the original + + >>> take(4, pairwise(count())) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + """ + a, b = tee(iterable) + next(b, None) + return izip(a, b) + + +def grouper(n, iterable, fillvalue=None): + """Collect data into fixed-length chunks or blocks + + >>> list(grouper(3, 'ABCDEFG', 'x')) + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + + """ + args = [iter(iterable)] * n + return izip_longest(fillvalue=fillvalue, *args) + + +def roundrobin(*iterables): + """Yields an item from each iterable, alternating between them + + >>> list(roundrobin('ABC', 'D', 'EF')) + ['A', 'D', 'E', 'B', 'F', 'C'] + + """ + # Recipe credited to George Sakkis + pending = len(iterables) + nexts = cycle(iter(it).next for it in iterables) + while pending: + try: + for next in nexts: + yield next() + except StopIteration: + pending -= 1 + nexts = cycle(islice(nexts, pending)) + + +def powerset(iterable): + """Yields all possible subsets of the iterable + + >>> list(powerset([1,2,3])) + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] + + """ + s = list(iterable) + return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) + + +def unique_everseen(iterable, key=None): + """Yield unique elements, preserving order. + + >>> list(unique_everseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D'] + >>> list(unique_everseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'D'] + + """ + seen = set() + seen_add = seen.add + if key is None: + for element in ifilterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + + +def unique_justseen(iterable, key=None): + """Yields elements in order, ignoring serial duplicates + + >>> list(unique_justseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D', 'A', 'B'] + >>> list(unique_justseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'A', 'D'] + + """ + return imap(next, imap(operator.itemgetter(1), groupby(iterable, key))) + + +def iter_except(func, exception, first=None): + """Yields results from a function repeatedly until an exception is raised. + + Converts a call-until-exception interface to an iterator interface. + Like __builtin__.iter(func, sentinel) but uses an exception instead + of a sentinel to end the loop. + + >>> l = range(3) + >>> list(iter_except(l.pop, IndexError)) + [2, 1, 0] + + """ + try: + if first is not None: + yield first() + while 1: + yield func() + except exception: + pass + + +def random_product(*args, **kwds): + """Returns a random pairing of items from each iterable argument + + If `repeat` is provided as a kwarg, it's value will be used to indicate + how many pairings should be chosen. + + >>> random_product(['a', 'b', 'c'], [1, 2], repeat=2) # doctest:+SKIP + ('b', '2', 'c', '2') + + """ + pools = map(tuple, args) * kwds.get('repeat', 1) + return tuple(choice(pool) for pool in pools) + + +def random_permutation(iterable, r=None): + """Returns a random permutation. + + If r is provided, the permutation is truncated to length r. + + >>> random_permutation(range(5)) # doctest:+SKIP + (3, 4, 0, 1, 2) + + """ + pool = tuple(iterable) + r = len(pool) if r is None else r + return tuple(sample(pool, r)) + + +def random_combination(iterable, r): + """Returns a random combination of length r, chosen without replacement. + + >>> random_combination(range(5), 3) # doctest:+SKIP + (2, 3, 4) + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(sample(xrange(n), r)) + return tuple(pool[i] for i in indices) + + +def random_combination_with_replacement(iterable, r): + """Returns a random combination of length r, chosen with replacement. + + >>> random_combination_with_replacement(range(3), 5) # # doctest:+SKIP + (0, 0, 1, 2, 2) + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(randrange(n) for i in xrange(r)) + return tuple(pool[i] for i in indices) diff --git a/libs/more_itertools/tests/__init__.py b/libs/more_itertools/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libs/more_itertools/tests/test_more.py b/libs/more_itertools/tests/test_more.py new file mode 100644 index 00000000..53b10618 --- /dev/null +++ b/libs/more_itertools/tests/test_more.py @@ -0,0 +1,143 @@ +from contextlib import closing +from itertools import islice, ifilter +from StringIO import StringIO +from unittest import TestCase + +from nose.tools import eq_, assert_raises + +from more_itertools import * # Test all the symbols are in __all__. + + +class CollateTests(TestCase): + """Unit tests for ``collate()``""" + # Also accidentally tests peekable, though that could use its own tests + + def test_default(self): + """Test with the default `key` function.""" + iterables = [xrange(4), xrange(7), xrange(3, 6)] + eq_(sorted(reduce(list.__add__, [list(it) for it in iterables])), + list(collate(*iterables))) + + def test_key(self): + """Test using a custom `key` function.""" + iterables = [xrange(5, 0, -1), xrange(4, 0, -1)] + eq_(list(sorted(reduce(list.__add__, + [list(it) for it in iterables]), + reverse=True)), + list(collate(*iterables, key=lambda x: -x))) + + def test_empty(self): + """Be nice if passed an empty list of iterables.""" + eq_([], list(collate())) + + def test_one(self): + """Work when only 1 iterable is passed.""" + eq_([0, 1], list(collate(xrange(2)))) + + def test_reverse(self): + """Test the `reverse` kwarg.""" + iterables = [xrange(4, 0, -1), xrange(7, 0, -1), xrange(3, 6, -1)] + eq_(sorted(reduce(list.__add__, [list(it) for it in iterables]), + reverse=True), + list(collate(*iterables, reverse=True))) + + +class ChunkedTests(TestCase): + """Tests for ``chunked()``""" + + def test_even(self): + """Test when ``n`` divides evenly into the length of the iterable.""" + eq_(list(chunked('ABCDEF', 3)), [['A', 'B', 'C'], ['D', 'E', 'F']]) + + def test_odd(self): + """Test when ``n`` does not divide evenly into the length of the + iterable. + + """ + eq_(list(chunked('ABCDE', 3)), [['A', 'B', 'C'], ['D', 'E']]) + + +class FirstTests(TestCase): + """Tests for ``first()``""" + + def test_many(self): + """Test that it works on many-item iterables.""" + # Also try it on a generator expression to make sure it works on + # whatever those return, across Python versions. + eq_(first(x for x in xrange(4)), 0) + + def test_one(self): + """Test that it doesn't raise StopIteration prematurely.""" + eq_(first([3]), 3) + + def test_empty_stop_iteration(self): + """It should raise StopIteration for empty iterables.""" + assert_raises(ValueError, first, []) + + def test_default(self): + """It should return the provided default arg for empty iterables.""" + eq_(first([], 'boo'), 'boo') + + +class PeekableTests(TestCase): + """Tests for ``peekable()`` behavor not incidentally covered by testing + ``collate()`` + + """ + def test_peek_default(self): + """Make sure passing a default into ``peek()`` works.""" + p = peekable([]) + eq_(p.peek(7), 7) + + def test_truthiness(self): + """Make sure a ``peekable`` tests true iff there are items remaining in + the iterable. + + """ + p = peekable([]) + self.failIf(p) + p = peekable(xrange(3)) + self.failUnless(p) + + def test_simple_peeking(self): + """Make sure ``next`` and ``peek`` advance and don't advance the + iterator, respectively. + + """ + p = peekable(xrange(10)) + eq_(p.next(), 0) + eq_(p.peek(), 1) + eq_(p.next(), 1) + + +class ConsumerTests(TestCase): + """Tests for ``consumer()``""" + + def test_consumer(self): + @consumer + def eater(): + while True: + x = yield + + e = eater() + e.send('hi') # without @consumer, would raise TypeError + + +def test_ilen(): + """Sanity-check ``ilen()``.""" + eq_(ilen(ifilter(lambda x: x % 10 == 0, range(101))), 11) + + +def test_with_iter(): + """Make sure ``with_iter`` iterates over and closes things correctly.""" + s = StringIO('One fish\nTwo fish') + initial_words = [line.split()[0] for line in with_iter(closing(s))] + eq_(initial_words, ['One', 'Two']) + + # Make sure closing happened: + try: + list(s) + except ValueError: # "I/O operation on closed file" + pass + else: + raise AssertionError('StringIO object was not closed.') diff --git a/libs/more_itertools/tests/test_recipes.py b/libs/more_itertools/tests/test_recipes.py new file mode 100644 index 00000000..485d9d30 --- /dev/null +++ b/libs/more_itertools/tests/test_recipes.py @@ -0,0 +1,433 @@ +from random import seed +from unittest import TestCase + +from nose.tools import eq_, assert_raises, ok_ + +from more_itertools import * + + +def setup_module(): + seed(1337) + + +class TakeTests(TestCase): + """Tests for ``take()``""" + + def test_simple_take(self): + """Test basic usage""" + t = take(5, xrange(10)) + eq_(t, [0, 1, 2, 3, 4]) + + def test_null_take(self): + """Check the null case""" + t = take(0, xrange(10)) + eq_(t, []) + + def test_negative_take(self): + """Make sure taking negative items results in a ValueError""" + assert_raises(ValueError, take, -3, xrange(10)) + + def test_take_too_much(self): + """Taking more than an iterator has remaining should return what the + iterator has remaining. + + """ + t = take(10, xrange(5)) + eq_(t, [0, 1, 2, 3, 4]) + + +class TabulateTests(TestCase): + """Tests for ``tabulate()``""" + + def test_simple_tabulate(self): + """Test the happy path""" + t = tabulate(lambda x: x) + f = tuple([next(t) for _ in range(3)]) + eq_(f, (0, 1, 2)) + + def test_count(self): + """Ensure tabulate accepts specific count""" + t = tabulate(lambda x: 2 * x, -1) + f = (next(t), next(t), next(t)) + eq_(f, (-2, 0, 2)) + + +class ConsumeTests(TestCase): + """Tests for ``consume()``""" + + def test_sanity(self): + """Test basic functionality""" + r = (x for x in range(10)) + consume(r, 3) + eq_(3, next(r)) + + def test_null_consume(self): + """Check the null case""" + r = (x for x in range(10)) + consume(r, 0) + eq_(0, next(r)) + + def test_negative_consume(self): + """Check that negative consumsion throws an error""" + r = (x for x in range(10)) + assert_raises(ValueError, consume, r, -1) + + def test_total_consume(self): + """Check that iterator is totally consumed by default""" + r = (x for x in range(10)) + consume(r) + assert_raises(StopIteration, next, r) + + +class NthTests(TestCase): + """Tests for ``nth()``""" + + def test_basic(self): + """Make sure the nth item is returned""" + l = range(10) + for i, v in enumerate(l): + eq_(nth(l, i), v) + + def test_default(self): + """Ensure a default value is returned when nth item not found""" + l = range(3) + eq_(nth(l, 100, "zebra"), "zebra") + + def test_negative_item_raises(self): + """Ensure asking for a negative item raises an exception""" + assert_raises(ValueError, nth, range(10), -3) + + +class QuantifyTests(TestCase): + """Tests for ``quantify()``""" + + def test_happy_path(self): + """Make sure True count is returned""" + q = [True, False, True] + eq_(quantify(q), 2) + + def test_custom_predicate(self): + """Ensure non-default predicates return as expected""" + q = range(10) + eq_(quantify(q, lambda x: x % 2 == 0), 5) + + +class PadnoneTests(TestCase): + """Tests for ``padnone()``""" + + def test_happy_path(self): + """wrapper iterator should return None indefinitely""" + r = range(2) + p = padnone(r) + eq_([0, 1, None, None], [next(p) for _ in range(4)]) + + +class NcyclesTests(TestCase): + """Tests for ``nyclces()``""" + + def test_happy_path(self): + """cycle a sequence three times""" + r = ["a", "b", "c"] + n = ncycles(r, 3) + eq_(["a", "b", "c", "a", "b", "c", "a", "b", "c"], + list(n)) + + def test_null_case(self): + """asking for 0 cycles should return an empty iterator""" + n = ncycles(range(100), 0) + assert_raises(StopIteration, next, n) + + def test_pathalogical_case(self): + """asking for negative cycles should return an empty iterator""" + n = ncycles(range(100), -10) + assert_raises(StopIteration, next, n) + + +class DotproductTests(TestCase): + """Tests for ``dotproduct()``'""" + + def test_happy_path(self): + """simple dotproduct example""" + eq_(400, dotproduct([10, 10], [20, 20])) + + +class FlattenTests(TestCase): + """Tests for ``flatten()``""" + + def test_basic_usage(self): + """ensure list of lists is flattened one level""" + f = [[0, 1, 2], [3, 4, 5]] + eq_(range(6), list(flatten(f))) + + def test_single_level(self): + """ensure list of lists is flattened only one level""" + f = [[0, [1, 2]], [[3, 4], 5]] + eq_([0, [1, 2], [3, 4], 5], list(flatten(f))) + + +class RepeatfuncTests(TestCase): + """Tests for ``repeatfunc()``""" + + def test_simple_repeat(self): + """test simple repeated functions""" + r = repeatfunc(lambda: 5) + eq_([5, 5, 5, 5, 5], [next(r) for _ in range(5)]) + + def test_finite_repeat(self): + """ensure limited repeat when times is provided""" + r = repeatfunc(lambda: 5, times=5) + eq_([5, 5, 5, 5, 5], list(r)) + + def test_added_arguments(self): + """ensure arguments are applied to the function""" + r = repeatfunc(lambda x: x, 2, 3) + eq_([3, 3], list(r)) + + def test_null_times(self): + """repeat 0 should return an empty iterator""" + r = repeatfunc(range, 0, 3) + assert_raises(StopIteration, next, r) + + +class PairwiseTests(TestCase): + """Tests for ``pairwise()``""" + + def test_base_case(self): + """ensure an iterable will return pairwise""" + p = pairwise([1, 2, 3]) + eq_([(1, 2), (2, 3)], list(p)) + + def test_short_case(self): + """ensure an empty iterator if there's not enough values to pair""" + p = pairwise("a") + assert_raises(StopIteration, next, p) + + +class GrouperTests(TestCase): + """Tests for ``grouper()``""" + + def test_even(self): + """Test when group size divides evenly into the length of + the iterable. + + """ + eq_(list(grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')]) + + def test_odd(self): + """Test when group size does not divide evenly into the length of the + iterable. + + """ + eq_(list(grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)]) + + def test_fill_value(self): + """Test that the fill value is used to pad the final group""" + eq_(list(grouper(3, 'ABCDE', 'x')), [('A', 'B', 'C'), ('D', 'E', 'x')]) + + +class RoundrobinTests(TestCase): + """Tests for ``roundrobin()``""" + + def test_even_groups(self): + """Ensure ordered output from evenly populated iterables""" + eq_(list(roundrobin('ABC', [1, 2, 3], range(3))), + ['A', 1, 0, 'B', 2, 1, 'C', 3, 2]) + + def test_uneven_groups(self): + """Ensure ordered output from unevenly populated iterables""" + eq_(list(roundrobin('ABCD', [1, 2], range(0))), + ['A', 1, 'B', 2, 'C', 'D']) + + +class PowersetTests(TestCase): + """Tests for ``powerset()``""" + + def test_combinatorics(self): + """Ensure a proper enumeration""" + p = powerset([1, 2, 3]) + eq_(list(p), + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]) + + +class UniqueEverseenTests(TestCase): + """Tests for ``unique_everseen()``""" + + def test_everseen(self): + """ensure duplicate elements are ignored""" + u = unique_everseen('AAAABBBBCCDAABBB') + eq_(['A', 'B', 'C', 'D'], + list(u)) + + def test_custom_key(self): + """ensure the custom key comparison works""" + u = unique_everseen('aAbACCc', key=str.lower) + eq_(list('abC'), list(u)) + + +class UniqueJustseenTests(TestCase): + """Tests for ``unique_justseen()``""" + + def test_justseen(self): + """ensure only last item is remembered""" + u = unique_justseen('AAAABBBCCDABB') + eq_(list('ABCDAB'), list(u)) + + def test_custom_key(self): + """ensure the custom key comparison works""" + u = unique_justseen('AABCcAD', str.lower) + eq_(list('ABCAD'), list(u)) + + +class IterExceptTests(TestCase): + """Tests for ``iter_except()``""" + + def test_exact_exception(self): + """ensure the exact specified exception is caught""" + l = [1, 2, 3] + i = iter_except(l.pop, IndexError) + eq_(list(i), [3, 2, 1]) + + def test_generic_exception(self): + """ensure the generic exception can be caught""" + l = [1, 2] + i = iter_except(l.pop, Exception) + eq_(list(i), [2, 1]) + + def test_uncaught_exception_is_raised(self): + """ensure a non-specified exception is raised""" + l = [1, 2, 3] + i = iter_except(l.pop, KeyError) + assert_raises(IndexError, list, i) + + def test_first(self): + """ensure first is run before the function""" + l = [1, 2, 3] + f = lambda: 25 + i = iter_except(l.pop, IndexError, f) + eq_(list(i), [25, 3, 2, 1]) + + +class RandomProductTests(TestCase): + """Tests for ``random_product()`` + + Since random.choice() has different results with the same seed across + python versions 2.x and 3.x, these tests use highly probably events to + create predictable outcomes across platforms. + """ + + def test_simple_lists(self): + """Ensure that one item is chosen from each list in each pair. + Also ensure that each item from each list eventually appears in + the chosen combinations. + + Odds are roughly 1 in 7.1 * 10e16 that one item from either list will + not be chosen after 100 samplings of one item from each list. Just to + be safe, better use a known random seed, too. + + """ + nums = [1, 2, 3] + lets = ['a', 'b', 'c'] + n, m = zip(*[random_product(nums, lets) for _ in range(100)]) + n, m = set(n), set(m) + eq_(n, set(nums)) + eq_(m, set(lets)) + eq_(len(n), len(nums)) + eq_(len(m), len(lets)) + + def test_list_with_repeat(self): + """ensure multiple items are chosen, and that they appear to be chosen + from one list then the next, in proper order. + + """ + nums = [1, 2, 3] + lets = ['a', 'b', 'c'] + r = list(random_product(nums, lets, repeat=100)) + eq_(2 * 100, len(r)) + n, m = set(r[::2]), set(r[1::2]) + eq_(n, set(nums)) + eq_(m, set(lets)) + eq_(len(n), len(nums)) + eq_(len(m), len(lets)) + + +class RandomPermutationTests(TestCase): + """Tests for ``random_permutation()``""" + + def test_full_permutation(self): + """ensure every item from the iterable is returned in a new ordering + + 15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so + we fix a seed value just to be sure. + + """ + i = range(15) + r = random_permutation(i) + eq_(set(i), set(r)) + if i == r: + raise AssertionError("Values were not permuted") + + def test_partial_permutation(self): + """ensure all returned items are from the iterable, that the returned + permutation is of the desired length, and that all items eventually + get returned. + + Sampling 100 permutations of length 5 from a set of 15 leaves a + (2/3)^100 chance that an item will not be chosen. Multiplied by 15 + items, there is a 1 in 2.6e16 chance that at least 1 item will not + show up in the resulting output. Using a random seed will fix that. + + """ + items = range(15) + item_set = set(items) + all_items = set() + for _ in xrange(100): + permutation = random_permutation(items, 5) + eq_(len(permutation), 5) + permutation_set = set(permutation) + ok_(permutation_set <= item_set) + all_items |= permutation_set + eq_(all_items, item_set) + + +class RandomCombinationTests(TestCase): + """Tests for ``random_combination()``""" + + def test_psuedorandomness(self): + """ensure different subsets of the iterable get returned over many + samplings of random combinations""" + items = range(15) + all_items = set() + for _ in xrange(50): + combination = random_combination(items, 5) + all_items |= set(combination) + eq_(all_items, set(items)) + + def test_no_replacement(self): + """ensure that elements are sampled without replacement""" + items = range(15) + for _ in xrange(50): + combination = random_combination(items, len(items)) + eq_(len(combination), len(set(combination))) + assert_raises(ValueError, random_combination, items, len(items) + 1) + + +class RandomCombinationWithReplacementTests(TestCase): + """Tests for ``random_combination_with_replacement()``""" + + def test_replacement(self): + """ensure that elements are sampled with replacement""" + items = range(5) + combo = random_combination_with_replacement(items, len(items) * 2) + eq_(2 * len(items), len(combo)) + if len(set(combo)) == len(combo): + raise AssertionError("Combination contained no duplicates") + + def test_psuedorandomness(self): + """ensure different subsets of the iterable get returned over many + samplings of random combinations""" + items = range(15) + all_items = set() + for _ in xrange(50): + combination = random_combination_with_replacement(items, 5) + all_items |= set(combination) + eq_(all_items, set(items)) diff --git a/libs/path.py b/libs/path.py new file mode 100644 index 00000000..1e92a490 --- /dev/null +++ b/libs/path.py @@ -0,0 +1,1722 @@ +# +# Copyright (c) 2010 Mikhail Gusarov +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# + +""" +path.py - An object representing a path to a file or directory. + +https://github.com/jaraco/path.py + +Example:: + + from path import Path + d = Path('/home/guido/bin') + for f in d.files('*.py'): + f.chmod(0o755) +""" + +from __future__ import unicode_literals + +import sys +import warnings +import os +import fnmatch +import glob +import shutil +import codecs +import hashlib +import errno +import tempfile +import functools +import operator +import re +import contextlib +import io +from distutils import dir_util +import importlib + +try: + import win32security +except ImportError: + pass + +try: + import pwd +except ImportError: + pass + +try: + import grp +except ImportError: + pass + +############################################################################## +# Python 2/3 support +PY3 = sys.version_info >= (3,) +PY2 = not PY3 + +string_types = str, +text_type = str +getcwdu = os.getcwd + +def surrogate_escape(error): + """ + Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. + """ + chars = error.object[error.start:error.end] + assert len(chars) == 1 + val = ord(chars) + val += 0xdc00 + return __builtin__.unichr(val), error.end + +if PY2: + import __builtin__ + string_types = __builtin__.basestring, + text_type = __builtin__.unicode + getcwdu = os.getcwdu + codecs.register_error('surrogateescape', surrogate_escape) + +@contextlib.contextmanager +def io_error_compat(): + try: + yield + except IOError as io_err: + # On Python 2, io.open raises IOError; transform to OSError for + # future compatibility. + os_err = OSError(*io_err.args) + os_err.filename = getattr(io_err, 'filename', None) + raise os_err + +############################################################################## + +__all__ = ['Path', 'CaseInsensitivePattern'] + + +LINESEPS = ['\r\n', '\r', '\n'] +U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029'] +NEWLINE = re.compile('|'.join(LINESEPS)) +U_NEWLINE = re.compile('|'.join(U_LINESEPS)) +NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern)) +U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern)) + + +try: + import pkg_resources + __version__ = pkg_resources.require('path.py')[0].version +except Exception: + __version__ = 'unknown' + + +class TreeWalkWarning(Warning): + pass + + +# from jaraco.functools +def compose(*funcs): + compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs)) + return functools.reduce(compose_two, funcs) + + +def simple_cache(func): + """ + Save results for the :meth:'path.using_module' classmethod. + When Python 3.2 is available, use functools.lru_cache instead. + """ + saved_results = {} + + def wrapper(cls, module): + if module in saved_results: + return saved_results[module] + saved_results[module] = func(cls, module) + return saved_results[module] + return wrapper + + +class ClassProperty(property): + def __get__(self, cls, owner): + return self.fget.__get__(None, owner)() + + +class multimethod(object): + """ + Acts like a classmethod when invoked from the class and like an + instancemethod when invoked from the instance. + """ + def __init__(self, func): + self.func = func + + def __get__(self, instance, owner): + return ( + functools.partial(self.func, owner) if instance is None + else functools.partial(self.func, owner, instance) + ) + + +class Path(text_type): + """ + Represents a filesystem path. + + For documentation on individual methods, consult their + counterparts in :mod:`os.path`. + + Some methods are additionally included from :mod:`shutil`. + The functions are linked directly into the class namespace + such that they will be bound to the Path instance. For example, + ``Path(src).copy(target)`` is equivalent to + ``shutil.copy(src, target)``. Therefore, when referencing + the docs for these methods, assume `src` references `self`, + the Path instance. + """ + + module = os.path + """ The path module to use for path operations. + + .. seealso:: :mod:`os.path` + """ + + def __init__(self, other=''): + if other is None: + raise TypeError("Invalid initial value for path: None") + + @classmethod + @simple_cache + def using_module(cls, module): + subclass_name = cls.__name__ + '_' + module.__name__ + if PY2: + subclass_name = str(subclass_name) + bases = (cls,) + ns = {'module': module} + return type(subclass_name, bases, ns) + + @ClassProperty + @classmethod + def _next_class(cls): + """ + What class should be used to construct new instances from this class + """ + return cls + + @classmethod + def _always_unicode(cls, path): + """ + Ensure the path as retrieved from a Python API, such as :func:`os.listdir`, + is a proper Unicode string. + """ + if PY3 or isinstance(path, text_type): + return path + return path.decode(sys.getfilesystemencoding(), 'surrogateescape') + + # --- Special Python methods. + + def __repr__(self): + return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__()) + + # Adding a Path and a string yields a Path. + def __add__(self, more): + try: + return self._next_class(super(Path, self).__add__(more)) + except TypeError: # Python bug + return NotImplemented + + def __radd__(self, other): + if not isinstance(other, string_types): + return NotImplemented + return self._next_class(other.__add__(self)) + + # The / operator joins Paths. + def __div__(self, rel): + """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) + + Join two path components, adding a separator character if + needed. + + .. seealso:: :func:`os.path.join` + """ + return self._next_class(self.module.join(self, rel)) + + # Make the / operator work even when true division is enabled. + __truediv__ = __div__ + + # The / operator joins Paths the other way around + def __rdiv__(self, rel): + """ fp.__rdiv__(rel) == rel / fp + + Join two path components, adding a separator character if + needed. + + .. seealso:: :func:`os.path.join` + """ + return self._next_class(self.module.join(rel, self)) + + # Make the / operator work even when true division is enabled. + __rtruediv__ = __rdiv__ + + def __enter__(self): + self._old_dir = self.getcwd() + os.chdir(self) + return self + + def __exit__(self, *_): + os.chdir(self._old_dir) + + @classmethod + def getcwd(cls): + """ Return the current working directory as a path object. + + .. seealso:: :func:`os.getcwdu` + """ + return cls(getcwdu()) + + # + # --- Operations on Path strings. + + def abspath(self): + """ .. seealso:: :func:`os.path.abspath` """ + return self._next_class(self.module.abspath(self)) + + def normcase(self): + """ .. seealso:: :func:`os.path.normcase` """ + return self._next_class(self.module.normcase(self)) + + def normpath(self): + """ .. seealso:: :func:`os.path.normpath` """ + return self._next_class(self.module.normpath(self)) + + def realpath(self): + """ .. seealso:: :func:`os.path.realpath` """ + return self._next_class(self.module.realpath(self)) + + def expanduser(self): + """ .. seealso:: :func:`os.path.expanduser` """ + return self._next_class(self.module.expanduser(self)) + + def expandvars(self): + """ .. seealso:: :func:`os.path.expandvars` """ + return self._next_class(self.module.expandvars(self)) + + def dirname(self): + """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """ + return self._next_class(self.module.dirname(self)) + + def basename(self): + """ .. seealso:: :attr:`name`, :func:`os.path.basename` """ + return self._next_class(self.module.basename(self)) + + def expand(self): + """ Clean up a filename by calling :meth:`expandvars()`, + :meth:`expanduser()`, and :meth:`normpath()` on it. + + This is commonly everything needed to clean up a filename + read from a configuration file, for example. + """ + return self.expandvars().expanduser().normpath() + + @property + def namebase(self): + """ The same as :meth:`name`, but with one file extension stripped off. + + For example, + ``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``, + but + ``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``. + """ + base, ext = self.module.splitext(self.name) + return base + + @property + def ext(self): + """ The file extension, for example ``'.py'``. """ + f, ext = self.module.splitext(self) + return ext + + @property + def drive(self): + """ The drive specifier, for example ``'C:'``. + + This is always empty on systems that don't use drive specifiers. + """ + drive, r = self.module.splitdrive(self) + return self._next_class(drive) + + parent = property( + dirname, None, None, + """ This path's parent directory, as a new Path object. + + For example, + ``Path('/usr/local/lib/libpython.so').parent == + Path('/usr/local/lib')`` + + .. seealso:: :meth:`dirname`, :func:`os.path.dirname` + """) + + name = property( + basename, None, None, + """ The name of this file or directory without the full path. + + For example, + ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'`` + + .. seealso:: :meth:`basename`, :func:`os.path.basename` + """) + + def splitpath(self): + """ p.splitpath() -> Return ``(p.parent, p.name)``. + + .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split` + """ + parent, child = self.module.split(self) + return self._next_class(parent), child + + def splitdrive(self): + """ p.splitdrive() -> Return ``(p.drive, )``. + + Split the drive specifier from this path. If there is + no drive specifier, :samp:`{p.drive}` is empty, so the return value + is simply ``(Path(''), p)``. This is always the case on Unix. + + .. seealso:: :func:`os.path.splitdrive` + """ + drive, rel = self.module.splitdrive(self) + return self._next_class(drive), rel + + def splitext(self): + """ p.splitext() -> Return ``(p.stripext(), p.ext)``. + + Split the filename extension from this path and return + the two parts. Either part may be empty. + + The extension is everything from ``'.'`` to the end of the + last path segment. This has the property that if + ``(a, b) == p.splitext()``, then ``a + b == p``. + + .. seealso:: :func:`os.path.splitext` + """ + filename, ext = self.module.splitext(self) + return self._next_class(filename), ext + + def stripext(self): + """ p.stripext() -> Remove one file extension from the path. + + For example, ``Path('/home/guido/python.tar.gz').stripext()`` + returns ``Path('/home/guido/python.tar')``. + """ + return self.splitext()[0] + + def splitunc(self): + """ .. seealso:: :func:`os.path.splitunc` """ + unc, rest = self.module.splitunc(self) + return self._next_class(unc), rest + + @property + def uncshare(self): + """ + The UNC mount point for this path. + This is empty for paths on local drives. + """ + unc, r = self.module.splitunc(self) + return self._next_class(unc) + + @multimethod + def joinpath(cls, first, *others): + """ + Join first to zero or more :class:`Path` components, adding a separator + character (:samp:`{first}.module.sep`) if needed. Returns a new instance of + :samp:`{first}._next_class`. + + .. seealso:: :func:`os.path.join` + """ + if not isinstance(first, cls): + first = cls(first) + return first._next_class(first.module.join(first, *others)) + + def splitall(self): + r""" Return a list of the path components in this path. + + The first item in the list will be a Path. Its value will be + either :data:`os.curdir`, :data:`os.pardir`, empty, or the root + directory of this path (for example, ``'/'`` or ``'C:\\'``). The + other items in the list will be strings. + + ``path.Path.joinpath(*result)`` will yield the original path. + """ + parts = [] + loc = self + while loc != os.curdir and loc != os.pardir: + prev = loc + loc, child = prev.splitpath() + if loc == prev: + break + parts.append(child) + parts.append(loc) + parts.reverse() + return parts + + def relpath(self, start='.'): + """ Return this path as a relative path, + based from `start`, which defaults to the current working directory. + """ + cwd = self._next_class(start) + return cwd.relpathto(self) + + def relpathto(self, dest): + """ Return a relative path from `self` to `dest`. + + If there is no relative path from `self` to `dest`, for example if + they reside on different drives in Windows, then this returns + ``dest.abspath()``. + """ + origin = self.abspath() + dest = self._next_class(dest).abspath() + + orig_list = origin.normcase().splitall() + # Don't normcase dest! We want to preserve the case. + dest_list = dest.splitall() + + if orig_list[0] != self.module.normcase(dest_list[0]): + # Can't get here from there. + return dest + + # Find the location where the two paths start to differ. + i = 0 + for start_seg, dest_seg in zip(orig_list, dest_list): + if start_seg != self.module.normcase(dest_seg): + break + i += 1 + + # Now i is the point where the two paths diverge. + # Need a certain number of "os.pardir"s to work up + # from the origin to the point of divergence. + segments = [os.pardir] * (len(orig_list) - i) + # Need to add the diverging part of dest_list. + segments += dest_list[i:] + if len(segments) == 0: + # If they happen to be identical, use os.curdir. + relpath = os.curdir + else: + relpath = self.module.join(*segments) + return self._next_class(relpath) + + # --- Listing, searching, walking, and matching + + def listdir(self, pattern=None): + """ D.listdir() -> List of items in this directory. + + Use :meth:`files` or :meth:`dirs` instead if you want a listing + of just files or just subdirectories. + + The elements of the list are Path objects. + + With the optional `pattern` argument, this only lists + items whose names match the given pattern. + + .. seealso:: :meth:`files`, :meth:`dirs` + """ + if pattern is None: + pattern = '*' + return [ + self / child + for child in map(self._always_unicode, os.listdir(self)) + if self._next_class(child).fnmatch(pattern) + ] + + def dirs(self, pattern=None): + """ D.dirs() -> List of this directory's subdirectories. + + The elements of the list are Path objects. + This does not walk recursively into subdirectories + (but see :meth:`walkdirs`). + + With the optional `pattern` argument, this only lists + directories whose names match the given pattern. For + example, ``d.dirs('build-*')``. + """ + return [p for p in self.listdir(pattern) if p.isdir()] + + def files(self, pattern=None): + """ D.files() -> List of the files in this directory. + + The elements of the list are Path objects. + This does not walk into subdirectories (see :meth:`walkfiles`). + + With the optional `pattern` argument, this only lists files + whose names match the given pattern. For example, + ``d.files('*.pyc')``. + """ + + return [p for p in self.listdir(pattern) if p.isfile()] + + def walk(self, pattern=None, errors='strict'): + """ D.walk() -> iterator over files and subdirs, recursively. + + The iterator yields Path objects naming each child item of + this directory and its descendants. This requires that + ``D.isdir()``. + + This performs a depth-first traversal of the directory tree. + Each directory is returned just before all its children. + + The `errors=` keyword argument controls behavior when an + error occurs. The default is ``'strict'``, which causes an + exception. Other allowed values are ``'warn'`` (which + reports the error via :func:`warnings.warn()`), and ``'ignore'``. + `errors` may also be an arbitrary callable taking a msg parameter. + """ + class Handlers: + def strict(msg): + raise + + def warn(msg): + warnings.warn(msg, TreeWalkWarning) + + def ignore(msg): + pass + + if not callable(errors) and errors not in vars(Handlers): + raise ValueError("invalid errors parameter") + errors = vars(Handlers).get(errors, errors) + + try: + childList = self.listdir() + except Exception: + exc = sys.exc_info()[1] + tmpl = "Unable to list directory '%(self)s': %(exc)s" + msg = tmpl % locals() + errors(msg) + return + + for child in childList: + if pattern is None or child.fnmatch(pattern): + yield child + try: + isdir = child.isdir() + except Exception: + exc = sys.exc_info()[1] + tmpl = "Unable to access '%(child)s': %(exc)s" + msg = tmpl % locals() + errors(msg) + isdir = False + + if isdir: + for item in child.walk(pattern, errors): + yield item + + def walkdirs(self, pattern=None, errors='strict'): + """ D.walkdirs() -> iterator over subdirs, recursively. + + With the optional `pattern` argument, this yields only + directories whose names match the given pattern. For + example, ``mydir.walkdirs('*test')`` yields only directories + with names ending in ``'test'``. + + The `errors=` keyword argument controls behavior when an + error occurs. The default is ``'strict'``, which causes an + exception. The other allowed values are ``'warn'`` (which + reports the error via :func:`warnings.warn()`), and ``'ignore'``. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + dirs = self.dirs() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + return + else: + raise + + for child in dirs: + if pattern is None or child.fnmatch(pattern): + yield child + for subsubdir in child.walkdirs(pattern, errors): + yield subsubdir + + def walkfiles(self, pattern=None, errors='strict'): + """ D.walkfiles() -> iterator over files in D, recursively. + + The optional argument `pattern` limits the results to files + with names that match the pattern. For example, + ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp`` + extension. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + childList = self.listdir() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + return + else: + raise + + for child in childList: + try: + isfile = child.isfile() + isdir = not isfile and child.isdir() + except: + if errors == 'ignore': + continue + elif errors == 'warn': + warnings.warn( + "Unable to access '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + continue + else: + raise + + if isfile: + if pattern is None or child.fnmatch(pattern): + yield child + elif isdir: + for f in child.walkfiles(pattern, errors): + yield f + + def fnmatch(self, pattern, normcase=None): + """ Return ``True`` if `self.name` matches the given `pattern`. + + `pattern` - A filename pattern with wildcards, + for example ``'*.py'``. If the pattern contains a `normcase` + attribute, it is applied to the name and path prior to comparison. + + `normcase` - (optional) A function used to normalize the pattern and + filename before matching. Defaults to :meth:`self.module`, which defaults + to :meth:`os.path.normcase`. + + .. seealso:: :func:`fnmatch.fnmatch` + """ + default_normcase = getattr(pattern, 'normcase', self.module.normcase) + normcase = normcase or default_normcase + name = normcase(self.name) + pattern = normcase(pattern) + return fnmatch.fnmatchcase(name, pattern) + + def glob(self, pattern): + """ Return a list of Path objects that match the pattern. + + `pattern` - a path relative to this directory, with wildcards. + + For example, ``Path('/users').glob('*/bin/*')`` returns a list + of all the files users have in their :file:`bin` directories. + + .. seealso:: :func:`glob.glob` + """ + cls = self._next_class + return [cls(s) for s in glob.glob(self / pattern)] + + # + # --- Reading or writing an entire file at once. + + def open(self, *args, **kwargs): + """ Open this file and return a corresponding :class:`file` object. + + Keyword arguments work as in :func:`io.open`. If the file cannot be + opened, an :class:`~exceptions.OSError` is raised. + """ + with io_error_compat(): + return io.open(self, *args, **kwargs) + + def bytes(self): + """ Open this file, read all bytes, return them as a string. """ + with self.open('rb') as f: + return f.read() + + def chunks(self, size, *args, **kwargs): + """ Returns a generator yielding chunks of the file, so it can + be read piece by piece with a simple for loop. + + Any argument you pass after `size` will be passed to :meth:`open`. + + :example: + + >>> hash = hashlib.md5() + >>> for chunk in Path("path.py").chunks(8192, mode='rb'): + ... hash.update(chunk) + + This will read the file by chunks of 8192 bytes. + """ + with self.open(*args, **kwargs) as f: + for chunk in iter(lambda: f.read(size) or None, None): + yield chunk + + def write_bytes(self, bytes, append=False): + """ Open this file and write the given bytes to it. + + Default behavior is to overwrite any existing file. + Call ``p.write_bytes(bytes, append=True)`` to append instead. + """ + if append: + mode = 'ab' + else: + mode = 'wb' + with self.open(mode) as f: + f.write(bytes) + + def text(self, encoding=None, errors='strict'): + r""" Open this file, read it in, return the content as a string. + + All newline sequences are converted to ``'\n'``. Keyword arguments + will be passed to :meth:`open`. + + .. seealso:: :meth:`lines` + """ + with self.open(mode='r', encoding=encoding, errors=errors) as f: + return U_NEWLINE.sub('\n', f.read()) + + def write_text(self, text, encoding=None, errors='strict', + linesep=os.linesep, append=False): + r""" Write the given text to this file. + + The default behavior is to overwrite any existing file; + to append instead, use the `append=True` keyword argument. + + There are two differences between :meth:`write_text` and + :meth:`write_bytes`: newline handling and Unicode handling. + See below. + + Parameters: + + `text` - str/unicode - The text to be written. + + `encoding` - str - The Unicode encoding that will be used. + This is ignored if `text` isn't a Unicode string. + + `errors` - str - How to handle Unicode encoding errors. + Default is ``'strict'``. See ``help(unicode.encode)`` for the + options. This is ignored if `text` isn't a Unicode + string. + + `linesep` - keyword argument - str/unicode - The sequence of + characters to be used to mark end-of-line. The default is + :data:`os.linesep`. You can also specify ``None`` to + leave all newlines as they are in `text`. + + `append` - keyword argument - bool - Specifies what to do if + the file already exists (``True``: append to the end of it; + ``False``: overwrite it.) The default is ``False``. + + + --- Newline handling. + + ``write_text()`` converts all standard end-of-line sequences + (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default + end-of-line sequence (see :data:`os.linesep`; on Windows, for example, + the end-of-line marker is ``'\r\n'``). + + If you don't like your platform's default, you can override it + using the `linesep=` keyword argument. If you specifically want + ``write_text()`` to preserve the newlines as-is, use ``linesep=None``. + + This applies to Unicode text the same as to 8-bit text, except + there are three additional standard Unicode end-of-line sequences: + ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``. + + (This is slightly different from when you open a file for + writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')`` + in Python.) + + + --- Unicode + + If `text` isn't Unicode, then apart from newline handling, the + bytes are written verbatim to the file. The `encoding` and + `errors` arguments are not used and must be omitted. + + If `text` is Unicode, it is first converted to :func:`bytes` using the + specified `encoding` (or the default encoding if `encoding` + isn't specified). The `errors` argument applies only to this + conversion. + + """ + if isinstance(text, text_type): + if linesep is not None: + text = U_NEWLINE.sub(linesep, text) + text = text.encode(encoding or sys.getdefaultencoding(), errors) + else: + assert encoding is None + text = NEWLINE.sub(linesep, text) + self.write_bytes(text, append=append) + + def lines(self, encoding=None, errors='strict', retain=True): + r""" Open this file, read all lines, return them in a list. + + Optional arguments: + `encoding` - The Unicode encoding (or character set) of + the file. The default is ``None``, meaning the content + of the file is read as 8-bit characters and returned + as a list of (non-Unicode) str objects. + `errors` - How to handle Unicode errors; see help(str.decode) + for the options. Default is ``'strict'``. + `retain` - If ``True``, retain newline characters; but all newline + character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are + translated to ``'\n'``. If ``False``, newline characters are + stripped off. Default is ``True``. + + This uses ``'U'`` mode. + + .. seealso:: :meth:`text` + """ + if encoding is None and retain: + with self.open('U') as f: + return f.readlines() + else: + return self.text(encoding, errors).splitlines(retain) + + def write_lines(self, lines, encoding=None, errors='strict', + linesep=os.linesep, append=False): + r""" Write the given lines of text to this file. + + By default this overwrites any existing file at this path. + + This puts a platform-specific newline sequence on every line. + See `linesep` below. + + `lines` - A list of strings. + + `encoding` - A Unicode encoding to use. This applies only if + `lines` contains any Unicode strings. + + `errors` - How to handle errors in Unicode encoding. This + also applies only to Unicode strings. + + linesep - The desired line-ending. This line-ending is + applied to every line. If a line already has any + standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``, + ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will + be stripped off and this will be used instead. The + default is os.linesep, which is platform-dependent + (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.). + Specify ``None`` to write the lines as-is, like + :meth:`file.writelines`. + + Use the keyword argument ``append=True`` to append lines to the + file. The default is to overwrite the file. + + .. warning :: + + When you use this with Unicode data, if the encoding of the + existing data in the file is different from the encoding + you specify with the `encoding=` parameter, the result is + mixed-encoding data, which can really confuse someone trying + to read the file later. + """ + with self.open('ab' if append else 'wb') as f: + for l in lines: + isUnicode = isinstance(l, text_type) + if linesep is not None: + pattern = U_NL_END if isUnicode else NL_END + l = pattern.sub('', l) + linesep + if isUnicode: + l = l.encode(encoding or sys.getdefaultencoding(), errors) + f.write(l) + + def read_md5(self): + """ Calculate the md5 hash for this file. + + This reads through the entire file. + + .. seealso:: :meth:`read_hash` + """ + return self.read_hash('md5') + + def _hash(self, hash_name): + """ Returns a hash object for the file at the current path. + + `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``) + that's available in the :mod:`hashlib` module. + """ + m = hashlib.new(hash_name) + for chunk in self.chunks(8192, mode="rb"): + m.update(chunk) + return m + + def read_hash(self, hash_name): + """ Calculate given hash for this file. + + List of supported hashes can be obtained from :mod:`hashlib` package. + This reads the entire file. + + .. seealso:: :meth:`hashlib.hash.digest` + """ + return self._hash(hash_name).digest() + + def read_hexhash(self, hash_name): + """ Calculate given hash for this file, returning hexdigest. + + List of supported hashes can be obtained from :mod:`hashlib` package. + This reads the entire file. + + .. seealso:: :meth:`hashlib.hash.hexdigest` + """ + return self._hash(hash_name).hexdigest() + + # --- Methods for querying the filesystem. + # N.B. On some platforms, the os.path functions may be implemented in C + # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get + # bound. Playing it safe and wrapping them all in method calls. + + def isabs(self): + """ .. seealso:: :func:`os.path.isabs` """ + return self.module.isabs(self) + + def exists(self): + """ .. seealso:: :func:`os.path.exists` """ + return self.module.exists(self) + + def isdir(self): + """ .. seealso:: :func:`os.path.isdir` """ + return self.module.isdir(self) + + def isfile(self): + """ .. seealso:: :func:`os.path.isfile` """ + return self.module.isfile(self) + + def islink(self): + """ .. seealso:: :func:`os.path.islink` """ + return self.module.islink(self) + + def ismount(self): + """ .. seealso:: :func:`os.path.ismount` """ + return self.module.ismount(self) + + def samefile(self, other): + """ .. seealso:: :func:`os.path.samefile` """ + if not hasattr(self.module, 'samefile'): + other = Path(other).realpath().normpath().normcase() + return self.realpath().normpath().normcase() == other + return self.module.samefile(self, other) + + def getatime(self): + """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """ + return self.module.getatime(self) + + atime = property( + getatime, None, None, + """ Last access time of the file. + + .. seealso:: :meth:`getatime`, :func:`os.path.getatime` + """) + + def getmtime(self): + """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """ + return self.module.getmtime(self) + + mtime = property( + getmtime, None, None, + """ Last-modified time of the file. + + .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime` + """) + + def getctime(self): + """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """ + return self.module.getctime(self) + + ctime = property( + getctime, None, None, + """ Creation time of the file. + + .. seealso:: :meth:`getctime`, :func:`os.path.getctime` + """) + + def getsize(self): + """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """ + return self.module.getsize(self) + + size = property( + getsize, None, None, + """ Size of the file, in bytes. + + .. seealso:: :meth:`getsize`, :func:`os.path.getsize` + """) + + if hasattr(os, 'access'): + def access(self, mode): + """ Return ``True`` if current user has access to this path. + + mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`, + :data:`os.W_OK`, :data:`os.X_OK` + + .. seealso:: :func:`os.access` + """ + return os.access(self, mode) + + def stat(self): + """ Perform a ``stat()`` system call on this path. + + .. seealso:: :meth:`lstat`, :func:`os.stat` + """ + return os.stat(self) + + def lstat(self): + """ Like :meth:`stat`, but do not follow symbolic links. + + .. seealso:: :meth:`stat`, :func:`os.lstat` + """ + return os.lstat(self) + + def __get_owner_windows(self): + """ + Return the name of the owner of this file or directory. Follow + symbolic links. + + Return a name of the form ``r'DOMAIN\\User Name'``; may be a group. + + .. seealso:: :attr:`owner` + """ + desc = win32security.GetFileSecurity( + self, win32security.OWNER_SECURITY_INFORMATION) + sid = desc.GetSecurityDescriptorOwner() + account, domain, typecode = win32security.LookupAccountSid(None, sid) + return domain + '\\' + account + + def __get_owner_unix(self): + """ + Return the name of the owner of this file or directory. Follow + symbolic links. + + .. seealso:: :attr:`owner` + """ + st = self.stat() + return pwd.getpwuid(st.st_uid).pw_name + + def __get_owner_not_implemented(self): + raise NotImplementedError("Ownership not available on this platform.") + + if 'win32security' in globals(): + get_owner = __get_owner_windows + elif 'pwd' in globals(): + get_owner = __get_owner_unix + else: + get_owner = __get_owner_not_implemented + + owner = property( + get_owner, None, None, + """ Name of the owner of this file or directory. + + .. seealso:: :meth:`get_owner`""") + + if hasattr(os, 'statvfs'): + def statvfs(self): + """ Perform a ``statvfs()`` system call on this path. + + .. seealso:: :func:`os.statvfs` + """ + return os.statvfs(self) + + if hasattr(os, 'pathconf'): + def pathconf(self, name): + """ .. seealso:: :func:`os.pathconf` """ + return os.pathconf(self, name) + + # + # --- Modifying operations on files and directories + + def utime(self, times): + """ Set the access and modified times of this file. + + .. seealso:: :func:`os.utime` + """ + os.utime(self, times) + return self + + def chmod(self, mode): + """ + Set the mode. May be the new mode (os.chmod behavior) or a `symbolic + mode `_. + + .. seealso:: :func:`os.chmod` + """ + if isinstance(mode, string_types): + mask = _multi_permission_mask(mode) + mode = mask(self.stat().st_mode) + os.chmod(self, mode) + return self + + def chown(self, uid=-1, gid=-1): + """ + Change the owner and group by names rather than the uid or gid numbers. + + .. seealso:: :func:`os.chown` + """ + if hasattr(os, 'chown'): + if 'pwd' in globals() and isinstance(uid, string_types): + uid = pwd.getpwnam(uid).pw_uid + if 'grp' in globals() and isinstance(gid, string_types): + gid = grp.getgrnam(gid).gr_gid + os.chown(self, uid, gid) + else: + raise NotImplementedError("Ownership not available on this platform.") + return self + + def rename(self, new): + """ .. seealso:: :func:`os.rename` """ + os.rename(self, new) + return self._next_class(new) + + def renames(self, new): + """ .. seealso:: :func:`os.renames` """ + os.renames(self, new) + return self._next_class(new) + + # + # --- Create/delete operations on directories + + def mkdir(self, mode=0o777): + """ .. seealso:: :func:`os.mkdir` """ + os.mkdir(self, mode) + return self + + def mkdir_p(self, mode=0o777): + """ Like :meth:`mkdir`, but does not raise an exception if the + directory already exists. """ + try: + self.mkdir(mode) + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.EEXIST: + raise + return self + + def makedirs(self, mode=0o777): + """ .. seealso:: :func:`os.makedirs` """ + os.makedirs(self, mode) + return self + + def makedirs_p(self, mode=0o777): + """ Like :meth:`makedirs`, but does not raise an exception if the + directory already exists. """ + try: + self.makedirs(mode) + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.EEXIST: + raise + return self + + def rmdir(self): + """ .. seealso:: :func:`os.rmdir` """ + os.rmdir(self) + return self + + def rmdir_p(self): + """ Like :meth:`rmdir`, but does not raise an exception if the + directory is not empty or does not exist. """ + try: + self.rmdir() + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: + raise + return self + + def removedirs(self): + """ .. seealso:: :func:`os.removedirs` """ + os.removedirs(self) + return self + + def removedirs_p(self): + """ Like :meth:`removedirs`, but does not raise an exception if the + directory is not empty or does not exist. """ + try: + self.removedirs() + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: + raise + return self + + # --- Modifying operations on files + + def touch(self): + """ Set the access/modified times of this file to the current time. + Create the file if it does not exist. + """ + fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666) + os.close(fd) + os.utime(self, None) + return self + + def remove(self): + """ .. seealso:: :func:`os.remove` """ + os.remove(self) + return self + + def remove_p(self): + """ Like :meth:`remove`, but does not raise an exception if the + file does not exist. """ + try: + self.unlink() + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.ENOENT: + raise + return self + + def unlink(self): + """ .. seealso:: :func:`os.unlink` """ + os.unlink(self) + return self + + def unlink_p(self): + """ Like :meth:`unlink`, but does not raise an exception if the + file does not exist. """ + self.remove_p() + return self + + # --- Links + + if hasattr(os, 'link'): + def link(self, newpath): + """ Create a hard link at `newpath`, pointing to this file. + + .. seealso:: :func:`os.link` + """ + os.link(self, newpath) + return self._next_class(newpath) + + if hasattr(os, 'symlink'): + def symlink(self, newlink): + """ Create a symbolic link at `newlink`, pointing here. + + .. seealso:: :func:`os.symlink` + """ + os.symlink(self, newlink) + return self._next_class(newlink) + + if hasattr(os, 'readlink'): + def readlink(self): + """ Return the path to which this symbolic link points. + + The result may be an absolute or a relative path. + + .. seealso:: :meth:`readlinkabs`, :func:`os.readlink` + """ + return self._next_class(os.readlink(self)) + + def readlinkabs(self): + """ Return the path to which this symbolic link points. + + The result is always an absolute path. + + .. seealso:: :meth:`readlink`, :func:`os.readlink` + """ + p = self.readlink() + if p.isabs(): + return p + else: + return (self.parent / p).abspath() + + # High-level functions from shutil + # These functions will be bound to the instance such that + # Path(name).copy(target) will invoke shutil.copy(name, target) + + copyfile = shutil.copyfile + copymode = shutil.copymode + copystat = shutil.copystat + copy = shutil.copy + copy2 = shutil.copy2 + copytree = shutil.copytree + if hasattr(shutil, 'move'): + move = shutil.move + rmtree = shutil.rmtree + + def rmtree_p(self): + """ Like :meth:`rmtree`, but does not raise an exception if the + directory does not exist. """ + try: + self.rmtree() + except OSError: + _, e, _ = sys.exc_info() + if e.errno != errno.ENOENT: + raise + return self + + def chdir(self): + """ .. seealso:: :func:`os.chdir` """ + os.chdir(self) + + cd = chdir + + def merge_tree(self, dst, symlinks=False, *args, **kwargs): + """ + Copy entire contents of self to dst, overwriting existing + contents in dst with those in self. + + If the additional keyword `update` is True, each + `src` will only be copied if `dst` does not exist, + or `src` is newer than `dst`. + + Note that the technique employed stages the files in a temporary + directory first, so this function is not suitable for merging + trees with large files, especially if the temporary directory + is not capable of storing a copy of the entire source tree. + """ + update = kwargs.pop('update', False) + with tempdir() as _temp_dir: + # first copy the tree to a stage directory to support + # the parameters and behavior of copytree. + stage = _temp_dir / str(hash(self)) + self.copytree(stage, symlinks, *args, **kwargs) + # now copy everything from the stage directory using + # the semantics of dir_util.copy_tree + dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks, + update=update) + + # + # --- Special stuff from os + + if hasattr(os, 'chroot'): + def chroot(self): + """ .. seealso:: :func:`os.chroot` """ + os.chroot(self) + + if hasattr(os, 'startfile'): + def startfile(self): + """ .. seealso:: :func:`os.startfile` """ + os.startfile(self) + return self + + # in-place re-writing, courtesy of Martijn Pieters + # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/ + @contextlib.contextmanager + def in_place(self, mode='r', buffering=-1, encoding=None, errors=None, + newline=None, backup_extension=None): + """ + A context in which a file may be re-written in-place with new content. + + Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` + replaces `readable`. + + If an exception occurs, the old file is restored, removing the + written data. + + Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are + allowed. A :exc:`ValueError` is raised on invalid modes. + + For example, to add line numbers to a file:: + + p = Path(filename) + assert p.isfile() + with p.in_place() as (reader, writer): + for number, line in enumerate(reader, 1): + writer.write('{0:3}: '.format(number))) + writer.write(line) + + Thereafter, the file at `filename` will have line numbers in it. + """ + import io + + if set(mode).intersection('wa+'): + raise ValueError('Only read-only file modes can be used') + + # move existing file to backup, create new file with same permissions + # borrowed extensively from the fileinput module + backup_fn = self + (backup_extension or os.extsep + 'bak') + try: + os.unlink(backup_fn) + except os.error: + pass + os.rename(self, backup_fn) + readable = io.open(backup_fn, mode, buffering=buffering, + encoding=encoding, errors=errors, newline=newline) + try: + perm = os.fstat(readable.fileno()).st_mode + except OSError: + writable = open(self, 'w' + mode.replace('r', ''), + buffering=buffering, encoding=encoding, errors=errors, + newline=newline) + else: + os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC + if hasattr(os, 'O_BINARY'): + os_mode |= os.O_BINARY + fd = os.open(self, os_mode, perm) + writable = io.open(fd, "w" + mode.replace('r', ''), + buffering=buffering, encoding=encoding, errors=errors, + newline=newline) + try: + if hasattr(os, 'chmod'): + os.chmod(self, perm) + except OSError: + pass + try: + yield readable, writable + except Exception: + # move backup back + readable.close() + writable.close() + try: + os.unlink(self) + except os.error: + pass + os.rename(backup_fn, self) + raise + else: + readable.close() + writable.close() + finally: + try: + os.unlink(backup_fn) + except os.error: + pass + + @ClassProperty + @classmethod + def special(cls): + """ + Return a SpecialResolver object suitable referencing a suitable + directory for the relevant platform for the given + type of content. + + For example, to get a user config directory, invoke: + + dir = Path.special().user.config + + Uses the `appdirs + `_ to resolve + the paths in a platform-friendly way. + + To create a config directory for 'My App', consider: + + dir = Path.special("My App").user.config.makedirs_p() + + If the ``appdirs`` module is not installed, invocation + of special will raise an ImportError. + """ + return functools.partial(SpecialResolver, cls) + + +class SpecialResolver(object): + class ResolverScope: + def __init__(self, paths, scope): + self.paths = paths + self.scope = scope + + def __getattr__(self, class_): + return self.paths.get_dir(self.scope, class_) + + def __init__(self, path_class, *args, **kwargs): + appdirs = importlib.import_module('appdirs') + + # let appname default to None until + # https://github.com/ActiveState/appdirs/issues/55 is solved. + not args and kwargs.setdefault('appname', None) + + vars(self).update( + path_class=path_class, + wrapper=appdirs.AppDirs(*args, **kwargs), + ) + + def __getattr__(self, scope): + return self.ResolverScope(self, scope) + + def get_dir(self, scope, class_): + """ + Return the callable function from appdirs, but with the + result wrapped in self.path_class + """ + prop_name = '{scope}_{class_}_dir'.format(**locals()) + value = getattr(self.wrapper, prop_name) + MultiPath = Multi.for_class(self.path_class) + return MultiPath.detect(value) + + +class Multi: + """ + A mix-in for a Path which may contain multiple Path separated by pathsep. + """ + @classmethod + def for_class(cls, path_cls): + name = 'Multi' + path_cls.__name__ + if PY2: + name = str(name) + return type(name, (cls, path_cls), {}) + + @classmethod + def detect(cls, input): + if os.pathsep not in input: + cls = cls._next_class + return cls(input) + + def __iter__(self): + return iter(map(self._next_class, self.split(os.pathsep))) + + @ClassProperty + @classmethod + def _next_class(cls): + """ + Multi-subclasses should use the parent class + """ + return next( + class_ + for class_ in cls.__mro__ + if not issubclass(class_, Multi) + ) + + +class tempdir(Path): + """ + A temporary directory via :func:`tempfile.mkdtemp`, and constructed with the + same parameters that you can use as a context manager. + + Example: + + with tempdir() as d: + # do stuff with the Path object "d" + + # here the directory is deleted automatically + + .. seealso:: :func:`tempfile.mkdtemp` + """ + + @ClassProperty + @classmethod + def _next_class(cls): + return Path + + def __new__(cls, *args, **kwargs): + dirname = tempfile.mkdtemp(*args, **kwargs) + return super(tempdir, cls).__new__(cls, dirname) + + def __init__(self, *args, **kwargs): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if not exc_value: + self.rmtree() + + +def _multi_permission_mask(mode): + """ + Support multiple, comma-separated Unix chmod symbolic modes. + + >>> _multi_permission_mask('a=r,u+w')(0) == 0o644 + True + """ + compose = lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)) + return functools.reduce(compose, map(_permission_mask, mode.split(','))) + + +def _permission_mask(mode): + """ + Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function + suitable for applying to a mask to affect that change. + + >>> mask = _permission_mask('ugo+rwx') + >>> mask(0o554) == 0o777 + True + + >>> _permission_mask('go-x')(0o777) == 0o766 + True + + >>> _permission_mask('o-x')(0o445) == 0o444 + True + + >>> _permission_mask('a+x')(0) == 0o111 + True + + >>> _permission_mask('a=rw')(0o057) == 0o666 + True + + >>> _permission_mask('u=x')(0o666) == 0o166 + True + + >>> _permission_mask('g=')(0o157) == 0o107 + True + """ + # parse the symbolic mode + parsed = re.match('(?P[ugoa]+)(?P[-+=])(?P[rwx]*)$', mode) + if not parsed: + raise ValueError("Unrecognized symbolic mode", mode) + + # generate a mask representing the specified permission + spec_map = dict(r=4, w=2, x=1) + specs = (spec_map[perm] for perm in parsed.group('what')) + spec = functools.reduce(operator.or_, specs, 0) + + # now apply spec to each subject in who + shift_map = dict(u=6, g=3, o=0) + who = parsed.group('who').replace('a', 'ugo') + masks = (spec << shift_map[subj] for subj in who) + mask = functools.reduce(operator.or_, masks) + + op = parsed.group('op') + + # if op is -, invert the mask + if op == '-': + mask ^= 0o777 + + # if op is =, retain extant values for unreferenced subjects + if op == '=': + masks = (0o7 << shift_map[subj] for subj in who) + retain = functools.reduce(operator.or_, masks) ^ 0o777 + + op_map = { + '+': operator.or_, + '-': operator.and_, + '=': lambda mask, target: target & retain ^ mask, + } + return functools.partial(op_map[op], mask) + + +class CaseInsensitivePattern(text_type): + """ + A string with a ``'normcase'`` property, suitable for passing to + :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`, + :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive. + + For example, to get all files ending in .py, .Py, .pY, or .PY in the + current directory:: + + from path import Path, CaseInsensitivePattern as ci + Path('.').files(ci('*.py')) + """ + + @property + def normcase(self): + return __import__('ntpath').normcase + +######################## +# Backward-compatibility +class path(Path): + def __new__(cls, *args, **kwargs): + msg = "path is deprecated. Use Path instead." + warnings.warn(msg, DeprecationWarning) + return Path.__new__(cls, *args, **kwargs) + + +__all__ += ['path'] +######################## diff --git a/libs/test_path.py b/libs/test_path.py new file mode 100644 index 00000000..f6aa1b67 --- /dev/null +++ b/libs/test_path.py @@ -0,0 +1,1119 @@ +# -*- coding: utf-8 -*- + +""" +Tests for the path module. + +This suite runs on Linux, OS X, and Windows right now. To extend the +platform support, just add appropriate pathnames for your +platform (os.name) in each place where the p() function is called. +Then report the result. If you can't get the test to run at all on +your platform, there's probably a bug in path.py -- please report the issue +in the issue tracker at https://github.com/jaraco/path.py. + +TestScratchDir.test_touch() takes a while to run. It sleeps a few +seconds to allow some time to pass between calls to check the modify +time on files. +""" + +from __future__ import unicode_literals, absolute_import, print_function + +import codecs +import os +import sys +import shutil +import time +import ntpath +import posixpath +import textwrap +import platform +import importlib + +import pytest + +from path import Path, tempdir +from path import CaseInsensitivePattern as ci +from path import SpecialResolver +from path import Multi + + +def p(**choices): + """ Choose a value from several possible values, based on os.name """ + return choices[os.name] + + +class TestBasics: + def test_relpath(self): + root = Path(p(nt='C:\\', posix='/')) + foo = root / 'foo' + quux = foo / 'quux' + bar = foo / 'bar' + boz = bar / 'Baz' / 'Boz' + up = Path(os.pardir) + + # basics + assert root.relpathto(boz) == Path('foo')/'bar'/'Baz'/'Boz' + assert bar.relpathto(boz) == Path('Baz')/'Boz' + assert quux.relpathto(boz) == up/'bar'/'Baz'/'Boz' + assert boz.relpathto(quux) == up/up/up/'quux' + assert boz.relpathto(bar) == up/up + + # Path is not the first element in concatenation + assert root.relpathto(boz) == 'foo'/Path('bar')/'Baz'/'Boz' + + # x.relpathto(x) == curdir + assert root.relpathto(root) == os.curdir + assert boz.relpathto(boz) == os.curdir + # Make sure case is properly noted (or ignored) + assert boz.relpathto(boz.normcase()) == os.curdir + + # relpath() + cwd = Path(os.getcwd()) + assert boz.relpath() == cwd.relpathto(boz) + + if os.name == 'nt': + # Check relpath across drives. + d = Path('D:\\') + assert d.relpathto(boz) == boz + + def test_construction_from_none(self): + """ + + """ + try: + Path(None) + except TypeError: + pass + else: + raise Exception("DID NOT RAISE") + + def test_construction_from_int(self): + """ + Path class will construct a path as a string of the number + """ + assert Path(1) == '1' + + def test_string_compatibility(self): + """ Test compatibility with ordinary strings. """ + x = Path('xyzzy') + assert x == 'xyzzy' + assert x == str('xyzzy') + + # sorting + items = [Path('fhj'), + Path('fgh'), + 'E', + Path('d'), + 'A', + Path('B'), + 'c'] + items.sort() + assert items == ['A', 'B', 'E', 'c', 'd', 'fgh', 'fhj'] + + # Test p1/p1. + p1 = Path("foo") + p2 = Path("bar") + assert p1/p2 == p(nt='foo\\bar', posix='foo/bar') + + def test_properties(self): + # Create sample path object. + f = p(nt='C:\\Program Files\\Python\\Lib\\xyzzy.py', + posix='/usr/local/python/lib/xyzzy.py') + f = Path(f) + + # .parent + nt_lib = 'C:\\Program Files\\Python\\Lib' + posix_lib = '/usr/local/python/lib' + expected = p(nt=nt_lib, posix=posix_lib) + assert f.parent == expected + + # .name + assert f.name == 'xyzzy.py' + assert f.parent.name == p(nt='Lib', posix='lib') + + # .ext + assert f.ext == '.py' + assert f.parent.ext == '' + + # .drive + assert f.drive == p(nt='C:', posix='') + + def test_methods(self): + # .abspath() + assert Path(os.curdir).abspath() == os.getcwd() + + # .getcwd() + cwd = Path.getcwd() + assert isinstance(cwd, Path) + assert cwd == os.getcwd() + + def test_UNC(self): + if hasattr(os.path, 'splitunc'): + p = Path(r'\\python1\share1\dir1\file1.txt') + assert p.uncshare == r'\\python1\share1' + assert p.splitunc() == os.path.splitunc(str(p)) + + def test_explicit_module(self): + """ + The user may specify an explicit path module to use. + """ + nt_ok = Path.using_module(ntpath)(r'foo\bar\baz') + posix_ok = Path.using_module(posixpath)(r'foo/bar/baz') + posix_wrong = Path.using_module(posixpath)(r'foo\bar\baz') + + assert nt_ok.dirname() == r'foo\bar' + assert posix_ok.dirname() == r'foo/bar' + assert posix_wrong.dirname() == '' + + assert nt_ok / 'quux' == r'foo\bar\baz\quux' + assert posix_ok / 'quux' == r'foo/bar/baz/quux' + + def test_explicit_module_classes(self): + """ + Multiple calls to path.using_module should produce the same class. + """ + nt_path = Path.using_module(ntpath) + assert nt_path is Path.using_module(ntpath) + assert nt_path.__name__ == 'Path_ntpath' + + def test_joinpath_on_instance(self): + res = Path('foo') + foo_bar = res.joinpath('bar') + assert foo_bar == p(nt='foo\\bar', posix='foo/bar') + + def test_joinpath_to_nothing(self): + res = Path('foo') + assert res.joinpath() == res + + def test_joinpath_on_class(self): + "Construct a path from a series of strings" + foo_bar = Path.joinpath('foo', 'bar') + assert foo_bar == p(nt='foo\\bar', posix='foo/bar') + + def test_joinpath_fails_on_empty(self): + "It doesn't make sense to join nothing at all" + try: + Path.joinpath() + except TypeError: + pass + else: + raise Exception("did not raise") + + def test_joinpath_returns_same_type(self): + path_posix = Path.using_module(posixpath) + res = path_posix.joinpath('foo') + assert isinstance(res, path_posix) + res2 = res.joinpath('bar') + assert isinstance(res2, path_posix) + assert res2 == 'foo/bar' + + +class TestSelfReturn: + """ + Some methods don't necessarily return any value (e.g. makedirs, + makedirs_p, rename, mkdir, touch, chroot). These methods should return + self anyhow to allow methods to be chained. + """ + def test_makedirs_p(self, tmpdir): + """ + Path('foo').makedirs_p() == Path('foo') + """ + p = Path(tmpdir) / "newpath" + ret = p.makedirs_p() + assert p == ret + + def test_makedirs_p_extant(self, tmpdir): + p = Path(tmpdir) + ret = p.makedirs_p() + assert p == ret + + def test_rename(self, tmpdir): + p = Path(tmpdir) / "somefile" + p.touch() + target = Path(tmpdir) / "otherfile" + ret = p.rename(target) + assert target == ret + + def test_mkdir(self, tmpdir): + p = Path(tmpdir) / "newdir" + ret = p.mkdir() + assert p == ret + + def test_touch(self, tmpdir): + p = Path(tmpdir) / "empty file" + ret = p.touch() + assert p == ret + + +class TestScratchDir: + """ + Tests that run in a temporary directory (does not test tempdir class) + """ + def test_context_manager(self, tmpdir): + """Can be used as context manager for chdir.""" + d = Path(tmpdir) + subdir = d / 'subdir' + subdir.makedirs() + old_dir = os.getcwd() + with subdir: + assert os.getcwd() == os.path.realpath(subdir) + assert os.getcwd() == old_dir + + def test_touch(self, tmpdir): + # NOTE: This test takes a long time to run (~10 seconds). + # It sleeps several seconds because on Windows, the resolution + # of a file's mtime and ctime is about 2 seconds. + # + # atime isn't tested because on Windows the resolution of atime + # is something like 24 hours. + + threshold = 1 + + d = Path(tmpdir) + f = d / 'test.txt' + t0 = time.time() - threshold + f.touch() + t1 = time.time() + threshold + + assert f.exists() + assert f.isfile() + assert f.size == 0 + assert t0 <= f.mtime <= t1 + if hasattr(os.path, 'getctime'): + ct = f.ctime + assert t0 <= ct <= t1 + + time.sleep(threshold*2) + fobj = open(f, 'ab') + fobj.write('some bytes'.encode('utf-8')) + fobj.close() + + time.sleep(threshold*2) + t2 = time.time() - threshold + f.touch() + t3 = time.time() + threshold + + assert t0 <= t1 < t2 <= t3 # sanity check + + assert f.exists() + assert f.isfile() + assert f.size == 10 + assert t2 <= f.mtime <= t3 + if hasattr(os.path, 'getctime'): + ct2 = f.ctime + if os.name == 'nt': + # On Windows, "ctime" is CREATION time + assert ct == ct2 + assert ct2 < t2 + else: + # On other systems, it might be the CHANGE time + # (especially on Unix, time of inode changes) + assert ct == ct2 or ct2 == f.mtime + + def test_listing(self, tmpdir): + d = Path(tmpdir) + assert d.listdir() == [] + + f = 'testfile.txt' + af = d / f + assert af == os.path.join(d, f) + af.touch() + try: + assert af.exists() + + assert d.listdir() == [af] + + # .glob() + assert d.glob('testfile.txt') == [af] + assert d.glob('test*.txt') == [af] + assert d.glob('*.txt') == [af] + assert d.glob('*txt') == [af] + assert d.glob('*') == [af] + assert d.glob('*.html') == [] + assert d.glob('testfile') == [] + finally: + af.remove() + + # Try a test with 20 files + files = [d / ('%d.txt' % i) for i in range(20)] + for f in files: + fobj = open(f, 'w') + fobj.write('some text\n') + fobj.close() + try: + files2 = d.listdir() + files.sort() + files2.sort() + assert files == files2 + finally: + for f in files: + try: + f.remove() + except: + pass + + def test_listdir_other_encoding(self, tmpdir): + """ + Some filesystems allow non-character sequences in path names. + ``.listdir`` should still function in this case. + See issue #61 for details. + """ + assert Path(tmpdir).listdir() == [] + tmpdir_bytes = str(tmpdir).encode('ascii') + + filename = 'r\xe9\xf1emi'.encode('latin-1') + pathname = os.path.join(tmpdir_bytes, filename) + with open(pathname, 'wb'): + pass + # first demonstrate that os.listdir works + assert os.listdir(tmpdir_bytes) + + # now try with path.py + results = Path(tmpdir).listdir() + assert len(results) == 1 + res, = results + assert isinstance(res, Path) + # OS X seems to encode the bytes in the filename as %XX characters. + if platform.system() == 'Darwin': + assert res.basename() == 'r%E9%F1emi' + return + assert len(res.basename()) == len(filename) + + def test_makedirs(self, tmpdir): + d = Path(tmpdir) + + # Placeholder file so that when removedirs() is called, + # it doesn't remove the temporary directory itself. + tempf = d / 'temp.txt' + tempf.touch() + try: + foo = d / 'foo' + boz = foo / 'bar' / 'baz' / 'boz' + boz.makedirs() + try: + assert boz.isdir() + finally: + boz.removedirs() + assert not foo.exists() + assert d.exists() + + foo.mkdir(0o750) + boz.makedirs(0o700) + try: + assert boz.isdir() + finally: + boz.removedirs() + assert not foo.exists() + assert d.exists() + finally: + os.remove(tempf) + + def assertSetsEqual(self, a, b): + ad = {} + + for i in a: + ad[i] = None + + bd = {} + + for i in b: + bd[i] = None + + assert ad == bd + + def test_shutil(self, tmpdir): + # Note: This only tests the methods exist and do roughly what + # they should, neglecting the details as they are shutil's + # responsibility. + + d = Path(tmpdir) + testDir = d / 'testdir' + testFile = testDir / 'testfile.txt' + testA = testDir / 'A' + testCopy = testA / 'testcopy.txt' + testLink = testA / 'testlink.txt' + testB = testDir / 'B' + testC = testB / 'C' + testCopyOfLink = testC / testA.relpathto(testLink) + + # Create test dirs and a file + testDir.mkdir() + testA.mkdir() + testB.mkdir() + + f = open(testFile, 'w') + f.write('x' * 10000) + f.close() + + # Test simple file copying. + testFile.copyfile(testCopy) + assert testCopy.isfile() + assert testFile.bytes() == testCopy.bytes() + + # Test copying into a directory. + testCopy2 = testA / testFile.name + testFile.copy(testA) + assert testCopy2.isfile() + assert testFile.bytes() == testCopy2.bytes() + + # Make a link for the next test to use. + if hasattr(os, 'symlink'): + testFile.symlink(testLink) + else: + testFile.copy(testLink) # fallback + + # Test copying directory tree. + testA.copytree(testC) + assert testC.isdir() + self.assertSetsEqual( + testC.listdir(), + [testC / testCopy.name, + testC / testFile.name, + testCopyOfLink]) + assert not testCopyOfLink.islink() + + # Clean up for another try. + testC.rmtree() + assert not testC.exists() + + # Copy again, preserving symlinks. + testA.copytree(testC, True) + assert testC.isdir() + self.assertSetsEqual( + testC.listdir(), + [testC / testCopy.name, + testC / testFile.name, + testCopyOfLink]) + if hasattr(os, 'symlink'): + assert testCopyOfLink.islink() + assert testCopyOfLink.readlink() == testFile + + # Clean up. + testDir.rmtree() + assert not testDir.exists() + self.assertList(d.listdir(), []) + + def assertList(self, listing, expected): + assert sorted(listing) == sorted(expected) + + def test_patterns(self, tmpdir): + d = Path(tmpdir) + names = ['x.tmp', 'x.xtmp', 'x2g', 'x22', 'x.txt'] + dirs = [d, d/'xdir', d/'xdir.tmp', d/'xdir.tmp'/'xsubdir'] + + for e in dirs: + if not e.isdir(): + e.makedirs() + + for name in names: + (e/name).touch() + self.assertList(d.listdir('*.tmp'), [d/'x.tmp', d/'xdir.tmp']) + self.assertList(d.files('*.tmp'), [d/'x.tmp']) + self.assertList(d.dirs('*.tmp'), [d/'xdir.tmp']) + self.assertList(d.walk(), [e for e in dirs + if e != d] + [e/n for e in dirs + for n in names]) + self.assertList(d.walk('*.tmp'), + [e/'x.tmp' for e in dirs] + [d/'xdir.tmp']) + self.assertList(d.walkfiles('*.tmp'), [e/'x.tmp' for e in dirs]) + self.assertList(d.walkdirs('*.tmp'), [d/'xdir.tmp']) + + def test_unicode(self, tmpdir): + d = Path(tmpdir) + p = d/'unicode.txt' + + def test(enc): + """ Test that path works with the specified encoding, + which must be capable of representing the entire range of + Unicode codepoints. + """ + + given = ('Hello world\n' + '\u0d0a\u0a0d\u0d15\u0a15\r\n' + '\u0d0a\u0a0d\u0d15\u0a15\x85' + '\u0d0a\u0a0d\u0d15\u0a15\u2028' + '\r' + 'hanging') + clean = ('Hello world\n' + '\u0d0a\u0a0d\u0d15\u0a15\n' + '\u0d0a\u0a0d\u0d15\u0a15\n' + '\u0d0a\u0a0d\u0d15\u0a15\n' + '\n' + 'hanging') + givenLines = [ + ('Hello world\n'), + ('\u0d0a\u0a0d\u0d15\u0a15\r\n'), + ('\u0d0a\u0a0d\u0d15\u0a15\x85'), + ('\u0d0a\u0a0d\u0d15\u0a15\u2028'), + ('\r'), + ('hanging')] + expectedLines = [ + ('Hello world\n'), + ('\u0d0a\u0a0d\u0d15\u0a15\n'), + ('\u0d0a\u0a0d\u0d15\u0a15\n'), + ('\u0d0a\u0a0d\u0d15\u0a15\n'), + ('\n'), + ('hanging')] + expectedLines2 = [ + ('Hello world'), + ('\u0d0a\u0a0d\u0d15\u0a15'), + ('\u0d0a\u0a0d\u0d15\u0a15'), + ('\u0d0a\u0a0d\u0d15\u0a15'), + (''), + ('hanging')] + + # write bytes manually to file + f = codecs.open(p, 'w', enc) + f.write(given) + f.close() + + # test all 3 path read-fully functions, including + # path.lines() in unicode mode. + assert p.bytes() == given.encode(enc) + assert p.text(enc) == clean + assert p.lines(enc) == expectedLines + assert p.lines(enc, retain=False) == expectedLines2 + + # If this is UTF-16, that's enough. + # The rest of these will unfortunately fail because append=True + # mode causes an extra BOM to be written in the middle of the file. + # UTF-16 is the only encoding that has this problem. + if enc == 'UTF-16': + return + + # Write Unicode to file using path.write_text(). + cleanNoHanging = clean + '\n' # This test doesn't work with a + # hanging line. + p.write_text(cleanNoHanging, enc) + p.write_text(cleanNoHanging, enc, append=True) + # Check the result. + expectedBytes = 2 * cleanNoHanging.replace('\n', + os.linesep).encode(enc) + expectedLinesNoHanging = expectedLines[:] + expectedLinesNoHanging[-1] += '\n' + assert p.bytes() == expectedBytes + assert p.text(enc) == 2 * cleanNoHanging + assert p.lines(enc) == 2 * expectedLinesNoHanging + assert p.lines(enc, retain=False) == 2 * expectedLines2 + + # Write Unicode to file using path.write_lines(). + # The output in the file should be exactly the same as last time. + p.write_lines(expectedLines, enc) + p.write_lines(expectedLines2, enc, append=True) + # Check the result. + assert p.bytes() == expectedBytes + + # Now: same test, but using various newline sequences. + # If linesep is being properly applied, these will be converted + # to the platform standard newline sequence. + p.write_lines(givenLines, enc) + p.write_lines(givenLines, enc, append=True) + # Check the result. + assert p.bytes() == expectedBytes + + # Same test, using newline sequences that are different + # from the platform default. + def testLinesep(eol): + p.write_lines(givenLines, enc, linesep=eol) + p.write_lines(givenLines, enc, linesep=eol, append=True) + expected = 2 * cleanNoHanging.replace('\n', eol).encode(enc) + assert p.bytes() == expected + + testLinesep('\n') + testLinesep('\r') + testLinesep('\r\n') + testLinesep('\x0d\x85') + + # Again, but with linesep=None. + p.write_lines(givenLines, enc, linesep=None) + p.write_lines(givenLines, enc, linesep=None, append=True) + # Check the result. + expectedBytes = 2 * given.encode(enc) + assert p.bytes() == expectedBytes + assert p.text(enc) == 2 * clean + expectedResultLines = expectedLines[:] + expectedResultLines[-1] += expectedLines[0] + expectedResultLines += expectedLines[1:] + assert p.lines(enc) == expectedResultLines + + test('UTF-8') + test('UTF-16BE') + test('UTF-16LE') + test('UTF-16') + + def test_chunks(self, tmpdir): + p = (tempdir() / 'test.txt').touch() + txt = "0123456789" + size = 5 + p.write_text(txt) + for i, chunk in enumerate(p.chunks(size)): + assert chunk == txt[i * size:i * size + size] + + assert i == len(txt) / size - 1 + + @pytest.mark.skipif(not hasattr(os.path, 'samefile'), + reason="samefile not present") + def test_samefile(self, tmpdir): + f1 = (tempdir() / '1.txt').touch() + f1.write_text('foo') + f2 = (tempdir() / '2.txt').touch() + f1.write_text('foo') + f3 = (tempdir() / '3.txt').touch() + f1.write_text('bar') + f4 = (tempdir() / '4.txt') + f1.copyfile(f4) + + assert os.path.samefile(f1, f2) == f1.samefile(f2) + assert os.path.samefile(f1, f3) == f1.samefile(f3) + assert os.path.samefile(f1, f4) == f1.samefile(f4) + assert os.path.samefile(f1, f1) == f1.samefile(f1) + + def test_rmtree_p(self, tmpdir): + d = Path(tmpdir) + sub = d / 'subfolder' + sub.mkdir() + (sub / 'afile').write_text('something') + sub.rmtree_p() + assert not sub.exists() + try: + sub.rmtree_p() + except OSError: + self.fail("Calling `rmtree_p` on non-existent directory " + "should not raise an exception.") + + +class TestMergeTree: + @pytest.fixture(autouse=True) + def testing_structure(self, tmpdir): + self.test_dir = Path(tmpdir) + self.subdir_a = self.test_dir / 'A' + self.test_file = self.subdir_a / 'testfile.txt' + self.test_link = self.subdir_a / 'testlink.txt' + self.subdir_b = self.test_dir / 'B' + + self.subdir_a.mkdir() + self.subdir_b.mkdir() + + with open(self.test_file, 'w') as f: + f.write('x' * 10000) + + if hasattr(os, 'symlink'): + self.test_file.symlink(self.test_link) + else: + self.test_file.copy(self.test_link) + + def test_with_nonexisting_dst_kwargs(self): + self.subdir_a.merge_tree(self.subdir_b, symlinks=True) + assert self.subdir_b.isdir() + expected = set(( + self.subdir_b / self.test_file.name, + self.subdir_b / self.test_link.name, + )) + assert set(self.subdir_b.listdir()) == expected + assert Path(self.subdir_b / self.test_link.name).islink() + + def test_with_nonexisting_dst_args(self): + self.subdir_a.merge_tree(self.subdir_b, True) + assert self.subdir_b.isdir() + expected = set(( + self.subdir_b / self.test_file.name, + self.subdir_b / self.test_link.name, + )) + assert set(self.subdir_b.listdir()) == expected + assert Path(self.subdir_b / self.test_link.name).islink() + + def test_with_existing_dst(self): + self.subdir_b.rmtree() + self.subdir_a.copytree(self.subdir_b, True) + + self.test_link.remove() + test_new = self.subdir_a / 'newfile.txt' + test_new.touch() + with open(self.test_file, 'w') as f: + f.write('x' * 5000) + + self.subdir_a.merge_tree(self.subdir_b, True) + + assert self.subdir_b.isdir() + expected = set(( + self.subdir_b / self.test_file.name, + self.subdir_b / self.test_link.name, + self.subdir_b / test_new.name, + )) + assert set(self.subdir_b.listdir()) == expected + assert Path(self.subdir_b / self.test_link.name).islink() + assert len(Path(self.subdir_b / self.test_file.name).bytes()) == 5000 + + def test_copytree_parameters(self): + """ + merge_tree should accept parameters to copytree, such as 'ignore' + """ + ignore = shutil.ignore_patterns('testlink*') + self.subdir_a.merge_tree(self.subdir_b, ignore=ignore) + + assert self.subdir_b.isdir() + assert self.subdir_b.listdir() == [self.subdir_b / self.test_file.name] + + +class TestChdir: + def test_chdir_or_cd(self, tmpdir): + """ tests the chdir or cd method """ + d = Path(str(tmpdir)) + cwd = d.getcwd() + + # ensure the cwd isn't our tempdir + assert str(d) != str(cwd) + # now, we're going to chdir to tempdir + d.chdir() + + # we now ensure that our cwd is the tempdir + assert str(d.getcwd()) == str(tmpdir) + # we're resetting our path + d = Path(cwd) + + # we ensure that our cwd is still set to tempdir + assert str(d.getcwd()) == str(tmpdir) + + # we're calling the alias cd method + d.cd() + # now, we ensure cwd isn'r tempdir + assert str(d.getcwd()) == str(cwd) + assert str(d.getcwd()) != str(tmpdir) + + +class TestSubclass: + class PathSubclass(Path): + pass + + def test_subclass_produces_same_class(self): + """ + When operations are invoked on a subclass, they should produce another + instance of that subclass. + """ + p = self.PathSubclass('/foo') + subdir = p / 'bar' + assert isinstance(subdir, self.PathSubclass) + + +class TestTempDir: + + def test_constructor(self): + """ + One should be able to readily construct a temporary directory + """ + d = tempdir() + assert isinstance(d, Path) + assert d.exists() + assert d.isdir() + d.rmdir() + assert not d.exists() + + def test_next_class(self): + """ + It should be possible to invoke operations on a tempdir and get + Path classes. + """ + d = tempdir() + sub = d / 'subdir' + assert isinstance(sub, Path) + d.rmdir() + + def test_context_manager(self): + """ + One should be able to use a tempdir object as a context, which will + clean up the contents after. + """ + d = tempdir() + res = d.__enter__() + assert res is d + (d / 'somefile.txt').touch() + assert not isinstance(d / 'somefile.txt', tempdir) + d.__exit__(None, None, None) + assert not d.exists() + + def test_context_manager_exception(self): + """ + The context manager will not clean up if an exception occurs. + """ + d = tempdir() + d.__enter__() + (d / 'somefile.txt').touch() + assert not isinstance(d / 'somefile.txt', tempdir) + d.__exit__(TypeError, TypeError('foo'), None) + assert d.exists() + + def test_context_manager_using_with(self): + """ + The context manager will allow using the with keyword and + provide a temporry directory that will be deleted after that. + """ + + with tempdir() as d: + assert d.isdir() + assert not d.isdir() + + +class TestUnicode: + @pytest.fixture(autouse=True) + def unicode_name_in_tmpdir(self, tmpdir): + # build a snowman (dir) in the temporary directory + Path(tmpdir).joinpath('☃').mkdir() + + def test_walkdirs_with_unicode_name(self, tmpdir): + for res in Path(tmpdir).walkdirs(): + pass + + +class TestPatternMatching: + def test_fnmatch_simple(self): + p = Path('FooBar') + assert p.fnmatch('Foo*') + assert p.fnmatch('Foo[ABC]ar') + + def test_fnmatch_custom_mod(self): + p = Path('FooBar') + p.module = ntpath + assert p.fnmatch('foobar') + assert p.fnmatch('FOO[ABC]AR') + + def test_fnmatch_custom_normcase(self): + normcase = lambda path: path.upper() + p = Path('FooBar') + assert p.fnmatch('foobar', normcase=normcase) + assert p.fnmatch('FOO[ABC]AR', normcase=normcase) + + def test_listdir_simple(self): + p = Path('.') + assert len(p.listdir()) == len(os.listdir('.')) + + def test_listdir_empty_pattern(self): + p = Path('.') + assert p.listdir('') == [] + + def test_listdir_patterns(self, tmpdir): + p = Path(tmpdir) + (p/'sub').mkdir() + (p/'File').touch() + assert p.listdir('s*') == [p / 'sub'] + assert len(p.listdir('*')) == 2 + + def test_listdir_custom_module(self, tmpdir): + """ + Listdir patterns should honor the case sensitivity of the path module + used by that Path class. + """ + always_unix = Path.using_module(posixpath) + p = always_unix(tmpdir) + (p/'sub').mkdir() + (p/'File').touch() + assert p.listdir('S*') == [] + + always_win = Path.using_module(ntpath) + p = always_win(tmpdir) + assert p.listdir('S*') == [p/'sub'] + assert p.listdir('f*') == [p/'File'] + + def test_listdir_case_insensitive(self, tmpdir): + """ + Listdir patterns should honor the case sensitivity of the path module + used by that Path class. + """ + p = Path(tmpdir) + (p/'sub').mkdir() + (p/'File').touch() + assert p.listdir(ci('S*')) == [p/'sub'] + assert p.listdir(ci('f*')) == [p/'File'] + assert p.files(ci('S*')) == [] + assert p.dirs(ci('f*')) == [] + + def test_walk_case_insensitive(self, tmpdir): + p = Path(tmpdir) + (p/'sub1'/'foo').makedirs_p() + (p/'sub2'/'foo').makedirs_p() + (p/'sub1'/'foo'/'bar.Txt').touch() + (p/'sub2'/'foo'/'bar.TXT').touch() + (p/'sub2'/'foo'/'bar.txt.bz2').touch() + files = list(p.walkfiles(ci('*.txt'))) + assert len(files) == 2 + assert p/'sub2'/'foo'/'bar.TXT' in files + assert p/'sub1'/'foo'/'bar.Txt' in files + +@pytest.mark.skipif(sys.version_info < (2, 6), + reason="in_place requires io module in Python 2.6") +class TestInPlace: + reference_content = textwrap.dedent(""" + The quick brown fox jumped over the lazy dog. + """.lstrip()) + reversed_content = textwrap.dedent(""" + .god yzal eht revo depmuj xof nworb kciuq ehT + """.lstrip()) + alternate_content = textwrap.dedent(""" + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, quis nostrud exercitation + ullamco laboris nisi ut aliquip ex ea commodo consequat. + Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. Excepteur + sint occaecat cupidatat non proident, sunt in culpa qui + officia deserunt mollit anim id est laborum. + """.lstrip()) + + @classmethod + def create_reference(cls, tmpdir): + p = Path(tmpdir)/'document' + with p.open('w') as stream: + stream.write(cls.reference_content) + return p + + def test_line_by_line_rewrite(self, tmpdir): + doc = self.create_reference(tmpdir) + # reverse all the text in the document, line by line + with doc.in_place() as (reader, writer): + for line in reader: + r_line = ''.join(reversed(line.strip())) + '\n' + writer.write(r_line) + with doc.open() as stream: + data = stream.read() + assert data == self.reversed_content + + def test_exception_in_context(self, tmpdir): + doc = self.create_reference(tmpdir) + with pytest.raises(RuntimeError) as exc: + with doc.in_place() as (reader, writer): + writer.write(self.alternate_content) + raise RuntimeError("some error") + assert "some error" in str(exc) + with doc.open() as stream: + data = stream.read() + assert not 'Lorem' in data + assert 'lazy dog' in data + + +class TestSpecialPaths: + @pytest.fixture(autouse=True, scope='class') + def appdirs_installed(cls): + pytest.importorskip('appdirs') + + @pytest.fixture + def feign_linux(self, monkeypatch): + monkeypatch.setattr("platform.system", lambda: "Linux") + monkeypatch.setattr("sys.platform", "linux") + monkeypatch.setattr("os.pathsep", ":") + # remove any existing import of appdirs, as it sets up some + # state during import. + sys.modules.pop('appdirs') + + def test_basic_paths(self): + appdirs = importlib.import_module('appdirs') + + expected = appdirs.user_config_dir() + assert SpecialResolver(Path).user.config == expected + + expected = appdirs.site_config_dir() + assert SpecialResolver(Path).site.config == expected + + expected = appdirs.user_config_dir('My App', 'Me') + assert SpecialResolver(Path, 'My App', 'Me').user.config == expected + + def test_unix_paths(self, tmpdir, monkeypatch, feign_linux): + fake_config = tmpdir / '_config' + monkeypatch.setitem(os.environ, 'XDG_CONFIG_HOME', str(fake_config)) + expected = str(tmpdir / '_config') + assert SpecialResolver(Path).user.config == expected + + def test_unix_paths_fallback(self, tmpdir, monkeypatch, feign_linux): + "Without XDG_CONFIG_HOME set, ~/.config should be used." + fake_home = tmpdir / '_home' + monkeypatch.setitem(os.environ, 'HOME', str(fake_home)) + expected = str(tmpdir / '_home' / '.config') + assert SpecialResolver(Path).user.config == expected + + def test_property(self): + assert isinstance(Path.special().user.config, Path) + assert isinstance(Path.special().user.data, Path) + assert isinstance(Path.special().user.cache, Path) + + def test_other_parameters(self): + """ + Other parameters should be passed through to appdirs function. + """ + res = Path.special(version="1.0", multipath=True).site.config + assert isinstance(res, Path) + + def test_multipath(self, feign_linux, monkeypatch, tmpdir): + """ + If multipath is provided, on Linux return the XDG_CONFIG_DIRS + """ + fake_config_1 = str(tmpdir / '_config1') + fake_config_2 = str(tmpdir / '_config2') + config_dirs = os.pathsep.join([fake_config_1, fake_config_2]) + monkeypatch.setitem(os.environ, 'XDG_CONFIG_DIRS', config_dirs) + res = Path.special(multipath=True).site.config + assert isinstance(res, Multi) + assert fake_config_1 in res + assert fake_config_2 in res + assert '_config1' in str(res) + + def test_reused_SpecialResolver(self): + """ + Passing additional args and kwargs to SpecialResolver should be + passed through to each invocation of the function in appdirs. + """ + appdirs = importlib.import_module('appdirs') + + adp = SpecialResolver(Path, version="1.0") + res = adp.user.config + + expected = appdirs.user_config_dir(version="1.0") + assert res == expected + + +class TestMultiPath: + def test_for_class(self): + """ + Multi.for_class should return a subclass of the Path class provided. + """ + cls = Multi.for_class(Path) + assert issubclass(cls, Path) + assert issubclass(cls, Multi) + assert cls.__name__ == 'MultiPath' + + def test_detect_no_pathsep(self): + """ + If no pathsep is provided, multipath detect should return an instance + of the parent class with no Multi mix-in. + """ + path = Multi.for_class(Path).detect('/foo/bar') + assert isinstance(path, Path) + assert not isinstance(path, Multi) + + def test_detect_with_pathsep(self): + """ + If a pathsep appears in the input, detect should return an instance + of a Path with the Multi mix-in. + """ + inputs = '/foo/bar', '/baz/bing' + input = os.pathsep.join(inputs) + path = Multi.for_class(Path).detect(input) + + assert isinstance(path, Multi) + + def test_iteration(self): + """ + Iterating over a MultiPath should yield instances of the + parent class. + """ + inputs = '/foo/bar', '/baz/bing' + input = os.pathsep.join(inputs) + path = Multi.for_class(Path).detect(input) + + items = iter(path) + first = next(items) + assert first == '/foo/bar' + assert isinstance(first, Path) + assert not isinstance(first, Multi) + assert next(items) == '/baz/bing' + assert path == input + + +if __name__ == '__main__': + pytest.main() From 1085c5851cb3c36dcde4a402cd332260feb84a59 Mon Sep 17 00:00:00 2001 From: Labrys Date: Tue, 7 Jun 2016 08:51:56 -0400 Subject: [PATCH 55/82] Apply dict type-casting in autoFork too. * Fixes #1056 --- core/nzbToMediaAutoFork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 3e213fd8..105e758c 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -12,7 +12,7 @@ def autoFork(section, inputCategory): # auto-detect correct section # config settings - cfg = core.CFG[section][inputCategory] + cfg = dict(core.CFG[section][inputCategory]) host = cfg.get("host") port = cfg.get("port") From ef8701ed59d8dd51c1ea5837ce25f946c7b9a89c Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Wed, 8 Jun 2016 22:22:07 +0930 Subject: [PATCH 56/82] fix autofork detection. Fixes #1056 --- core/nzbToMediaAutoFork.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 105e758c..1f5b418f 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -21,7 +21,10 @@ def autoFork(section, inputCategory): apikey = cfg.get("apikey") ssl = int(cfg.get("ssl", 0)) web_root = cfg.get("web_root", "") - fork = core.FORKS.items()[core.FORKS.keys().index(cfg.get("fork", "auto"))] + try: + fork = core.FORKS.items()[core.FORKS.keys().index(cfg.get("fork", "auto"))] + except: + fork = "auto" protocol = "https://" if ssl else "http://" detected = False @@ -68,7 +71,7 @@ def autoFork(section, inputCategory): r = [] if r and r.ok: for param in params: - if not 'name={param!r}'.format(param=param) in r.text: + if not 'name="{param}"'.format(param=param) in r.text: rem_params.append(param) for param in rem_params: params.pop(param) From d500c61b957491e7d30a0d855f7e1c0c16bb0092 Mon Sep 17 00:00:00 2001 From: andrzejc Date: Sat, 11 Jun 2016 14:22:20 +0200 Subject: [PATCH 57/82] TorrentToMedia: fix invalid indexing scope, select first elem of pair not char Fix for issue manifesting itself with the following exception: Traceback (most recent call last): File "/opt/nzbToMedia/TorrentToMedia.py", line 366, in exit(main(sys.argv)) File "/opt/nzbToMedia/TorrentToMedia.py", line 303, in main result = processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent) File "/opt/nzbToMedia/TorrentToMedia.py", line 118, in processTorrent outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename) File "/usr/local/lib/python2.7/posixpath.py", line 68, in join if b.startswith('/'): AttributeError: 'tuple' object has no attribute 'startswith' The indexing operator was applied to wrong object: should be selecting first element of tuple returned by splitext() instead of first char of string passed to splitext. --- TorrentToMedia.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index b1a317ae..7b366052 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -114,7 +114,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if not os.path.isdir(os.path.join(inputDirectory, inputName)): basename = os.path.basename(inputDirectory) basename = core.sanitizeName(inputName) \ - if inputName == basename else os.path.splitext(core.sanitizeName(inputName)[0]) + if inputName == basename else os.path.splitext(core.sanitizeName(inputName))[0] outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename) elif uniquePath: outputDestination = os.path.normpath( From 9beeeaa7529bfee35fd0eb22b33a123807eeb57e Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Sat, 18 Jun 2016 10:27:23 +0930 Subject: [PATCH 58/82] not all categories support uniquePath. Fixes #1064 --- TorrentToMedia.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 286ff1a6..6ef75ef3 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -89,7 +89,10 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, Torrent_NoLink = int(section[usercat].get("Torrent_NoLink", 0)) keep_archive = int(section[usercat].get("keep_archive", 0)) extract = int(section[usercat].get('extract', 0)) - uniquePath = int(section[usercat].get("unique_path", 1)) + try: + uniquePath = int(section[usercat].get("unique_path", 1)) + except: + uniquePath = 1 if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) From 5a7d525f6cedf1f672bfd4be2f280e2ab1e620ae Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Sat, 18 Jun 2016 12:38:48 +0930 Subject: [PATCH 59/82] fix NZBGet script headers. --- nzbToCouchPotato.py | 154 +++++++++++++------------- nzbToGamez.py | 54 ++++----- nzbToHeadPhones.py | 62 +++++------ nzbToMedia.py | 264 ++++++++++++++++++++++---------------------- nzbToMylar.py | 66 +++++------ nzbToNzbDrone.py | 146 ++++++++++++------------ nzbToSickBeard.py | 158 +++++++++++++------------- 7 files changed, 452 insertions(+), 452 deletions(-) diff --git a/nzbToCouchPotato.py b/nzbToCouchPotato.py index 32416471..15705898 100755 --- a/nzbToCouchPotato.py +++ b/nzbToCouchPotato.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 - -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +# +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,239 +10,239 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## -# ### OPTIONS ### +############################################################################## +### OPTIONS ### -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -# check_media=1 +#check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## CouchPotato +## CouchPotato # CouchPotato script category. # # category that gets called for post-processing with CouchPotatoServer. -# cpsCategory=movie +#cpsCategory=movie # CouchPotato api key. -# cpsapikey= +#cpsapikey= # CouchPotato host. # # The ipaddress for your CouchPotato server. e.g For the Same system use localhost or 127.0.0.1 -# cpshost=localhost +#cpshost=localhost # CouchPotato port. -# cpsport=5050 +#cpsport=5050 # CouchPotato uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# cpsssl=0 +#cpsssl=0 # CouchPotato URL_Base # # set this if using a reverse proxy. -# cpsweb_root= +#cpsweb_root= # CouchPotato watch directory. # # set this to where your CouchPotato completed downloads are. -# cpswatch_dir= +#cpswatch_dir= # CouchPotato Postprocess Method (renamer, manage). # # use "renamer" for CPS renamer (default) or "manage" to call a manage update. -# cpsmethod=renamer +#cpsmethod=renamer # CouchPotato Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# cpsdelete_failed=0 +#cpsdelete_failed=0 # CouchPotato wait_for # # Set the number of minutes to wait after calling the renamer, to check the movie has changed status. -# cpswait_for=2 +#cpswait_for=2 # CouchPotatoServer and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# cpsremote_path=0 +#cpsremote_path=0 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## Extensions +## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Transcoder +## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -# getSubs=0 +#getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -# subLanguages=eng,spa,fra +#subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -# transcode=0 +#transcode=0 # create a duplicate, or replace the original (0, 1). # -# set to 1 to create a new file or 0 to replace the original -# duplicate=1 +# set to 1 to cretae a new file or 0 to replace the original +#duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -# ignoreExtensions=.avi,.mkv +#ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -# outputFastStart=0 +#outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -# outputVideoPath= +#outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -# processOutput=0 +#processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -# audioLanguage=eng +#audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -# allAudioLanguages=0 +#allAudioLanguages=0 # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. -# allSubLanguages=0 +# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +#allSubLanguages=0 # embedSubs (0,1). # -# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. -# embedSubs=1 +# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +#embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -# burnInSubtitle=0 +#burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -# extractSubs=0 +#extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -# externalSubDir= +#externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -# outputDefault=None +#outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -# hwAccel=0 +#hwAccel=0 # ffmpeg output settings. -# outputVideoExtension=.mp4 -# outputVideoCodec=libx264 -# VideoCodecAllow= -# outputVideoPreset=medium -# outputVideoFramerate=24 -# outputVideoBitrate=800k -# outputAudioCodec=ac3 -# AudioCodecAllow= -# outputAudioChannels=6 -# outputAudioBitrate=640k -# outputQualityPercent= -# outputAudioTrack2Codec=libfaac -# AudioCodec2Allow= -# outputAudioTrack2Channels=2 -# outputAudioTrack2Bitrate=160k -# outputAudioOtherCodec=libmp3lame -# AudioOtherCodecAllow= -# outputAudioOtherChannels=2 -# outputAudioOtherBitrate=128k -# outputSubtitleCodec= +#outputVideoExtension=.mp4 +#outputVideoCodec=libx264 +#VideoCodecAllow= +#outputVideoPreset=medium +#outputVideoFramerate=24 +#outputVideoBitrate=800k +#outputAudioCodec=ac3 +#AudioCodecAllow= +#outputAudioChannels=6 +#outputAudioBitrate=640k +#outputQualityPercent= +#outputAudioTrack2Codec=libfaac +#AudioCodec2Allow= +#outputAudioTrack2Channels=2 +#outputAudioTrack2Bitrate=160k +#outputAudioOtherCodec=libmp3lame +#AudioOtherCodecAllow= +#outputAudioOtherChannels=2 +#outputAudioOtherBitrate=128k +#outputSubtitleCodec= -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia diff --git a/nzbToGamez.py b/nzbToGamez.py index 9b1cb355..b98bf89a 100755 --- a/nzbToGamez.py +++ b/nzbToGamez.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 # -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,95 +10,95 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## +############################################################################## # -# ### OPTIONS ### +### OPTIONS ### -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## Gamez +## Gamez # Gamez script category. # # category that gets called for post-processing with Gamez. -# gzCategory=games +#gzCategory=games # Gamez api key. -# gzapikey= +#gzapikey= # Gamez host. # # The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1 -# gzhost=localhost +#gzhost=localhost # Gamez port. -# gzport=8085 +#gzport=8085 # Gamez uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# gzssl=0 +#gzssl=0 # Gamez library # # move downloaded games here. -# gzlibrary +#gzlibrary # Gamez web_root # # set this if using a reverse proxy. -# gzweb_root= +#gzweb_root= # Gamez watch directory. # # set this to where your Gamez completed downloads are. -# gzwatch_dir= +#gzwatch_dir= -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia diff --git a/nzbToHeadPhones.py b/nzbToHeadPhones.py index 26cf2e3e..8b1b3067 100755 --- a/nzbToHeadPhones.py +++ b/nzbToHeadPhones.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 - -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +# +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to HeadPhones. # @@ -10,107 +10,107 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## -# ### OPTIONS +############################################################################## +### OPTIONS -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## HeadPhones +## HeadPhones # HeadPhones script category. # # category that gets called for post-processing with HeadHones. -# hpCategory=music +#hpCategory=music # HeadPhones api key. -# hpapikey= +#hpapikey= # HeadPhones host. # # The ipaddress for your HeadPhones server. e.g For the Same system use localhost or 127.0.0.1 -# hphost=localhost +#hphost=localhost # HeadPhones port. -# hpport=8181 +#hpport=8181 # HeadPhones uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# hpssl=0 +#hpssl=0 # HeadPhones web_root # # set this if using a reverse proxy. -# hpweb_root= +#hpweb_root= # HeadPhones watch directory. # # set this to where your HeadPhones completed downloads are. -# hpwatch_dir= +#hpwatch_dir= # HeadPhones wait_for # # Set the number of minutes to wait after initiating HeadPhones post-processing to check if the album status has changed. -# hpwait_for=2 +#hpwait_for=2 # HeadPhones and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# hpremote_path=0 +#hpremote_path=0 -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia diff --git a/nzbToMedia.py b/nzbToMedia.py index 8be0110b..65bb70c1 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 # -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones. # @@ -10,494 +10,494 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## -# ### OPTIONS ### +############################################################################## +### OPTIONS ### -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -# check_media=1 +#check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## CouchPotato +## CouchPotato # CouchPotato script category. # # category that gets called for post-processing with CouchPotatoServer. -# cpsCategory=movie +#cpsCategory=movie # CouchPotato api key. -# cpsapikey= +#cpsapikey= # CouchPotato host. # # The ipaddress for your CouchPotato server. e.g For the Same system use localhost or 127.0.0.1 -# cpshost=localhost +#cpshost=localhost # CouchPotato port. -# cpsport=5050 +#cpsport=5050 # CouchPotato uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# cpsssl=0 +#cpsssl=0 # CouchPotato URL_Base # # set this if using a reverse proxy. -# cpsweb_root= +#cpsweb_root= # CouchPotato Postprocess Method (renamer, manage). # # use "renamer" for CPS renamer (default) or "manage" to call a manage update. -# cpsmethod=renamer +#cpsmethod=renamer # CouchPotato Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# cpsdelete_failed=0 +#cpsdelete_failed=0 # CouchPotato wait_for # # Set the number of minutes to wait after calling the renamer, to check the movie has changed status. -# cpswait_for=2 +#cpswait_for=2 # Couchpotato and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# cpsremote_path=0 +#cpsremote_path=0 -# ## SickBeard +## SickBeard # SickBeard script category. # # category that gets called for post-processing with SickBeard. -# sbCategory=tv +#sbCategory=tv # SickBeard host. # # The ipaddress for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 -# sbhost=localhost +#sbhost=localhost # SickBeard port. -# sbport=8081 +#sbport=8081 # SickBeard username. -# sbusername= +#sbusername= # SickBeard password. -# sbpassword= +#sbpassword= # SickBeard uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# sbssl=0 +#sbssl=0 # SickBeard web_root # # set this if using a reverse proxy. -# sbweb_root= +#sbweb_root= # SickBeard watch directory. # # set this if SickBeard and nzbGet are on different systems. -# sbwatch_dir= +#sbwatch_dir= # SickBeard fork. # # set to default or auto to auto-detect the custom fork type. -# sbfork=auto +#sbfork=auto # SickBeard Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# sbdelete_failed=0 +#sbdelete_failed=0 # SickBeard process method. # # set this to move, copy, hardlink, symlink as appropriate if you want to over-ride SB defaults. Leave blank to use SB default. -# sbprocess_method= +#sbprocess_method= # SickBeard and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# sbremote_path=0 +#sbremote_path=0 -# ## NzbDrone +## NzbDrone # NzbDrone script category. # # category that gets called for post-processing with NzbDrone. -# ndCategory=tv2 +#ndCategory=tv2 # NzbDrone host. # # The ipaddress for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 -# ndhost=localhost +#ndhost=localhost # NzbDrone port. -# ndport=8989 +#ndport=8989 # NzbDrone API key. -# ndapikey= +#ndapikey= # NzbDrone uses SSL (0, 1). # # Set to 1 if using SSL, else set to 0. -# ndssl=0 +#ndssl=0 # NzbDrone web root. # # set this if using a reverse proxy. -# ndweb_root= +#ndweb_root= # NzbDrone wait_for # # Set the number of minutes to wait after calling the renamer, to check the episode has changed status. -# ndwait_for=2 +#ndwait_for=2 # NzbDrone Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# nddelete_failed=0 +#nddelete_failed=0 # NzbDrone and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# ndremote_path=0 +#ndremote_path=0 -# ## HeadPhones +## HeadPhones # HeadPhones script category. # # category that gets called for post-processing with HeadHones. -# hpCategory=music +#hpCategory=music # HeadPhones api key. -# hpapikey= +#hpapikey= # HeadPhones host. # # The ipaddress for your HeadPhones server. e.g For the Same system use localhost or 127.0.0.1 -# hphost=localhost +#hphost=localhost # HeadPhones port. -# hpport=8181 +#hpport=8181 # HeadPhones uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# hpssl=0 +#hpssl=0 # HeadPhones web_root # # set this if using a reverse proxy. -# hpweb_root= +#hpweb_root= # HeadPhones and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# hpremote_path=0 +#hpremote_path=0 -# ## Mylar +## Mylar # Mylar script category. # # category that gets called for post-processing with Mylar. -# myCategory=comics +#myCategory=comics # Mylar host. # # The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 -# myhost=localhost +#myhost=localhost # Mylar port. -# myport=8090 +#myport=8090 # Mylar username. -# myusername= +#myusername= # Mylar password. -# mypassword= +#mypassword= # Mylar uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# myssl=0 +#myssl=0 # Mylar web_root # # set this if using a reverse proxy. -# myweb_root= +#myweb_root= # Mylar wait_for # # Set the number of minutes to wait after calling the force process, to check the issue has changed status. -# myswait_for=1 +#myswait_for=1 # Mylar and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# myremote_path=0 +#myremote_path=0 -# ## Gamez +## Gamez # Gamez script category. # # category that gets called for post-processing with Gamez. -# gzCategory=games +#gzCategory=games # Gamez api key. -# gzapikey= +#gzapikey= # Gamez host. # # The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1 -# gzhost=localhost +#gzhost=localhost # Gamez port. -# gzport=8085 +#gzport=8085 # Gamez uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# gzssl=0 +#gzssl=0 # Gamez library # # move downloaded games here. -# gzlibrary +#gzlibrary # Gamez web_root # # set this if using a reverse proxy. -# gzweb_root= +#gzweb_root= # Gamez and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# gzremote_path=0 +#gzremote_path=0 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## Extensions +## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Transcoder +## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -# getSubs=0 +#getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -# subLanguages=eng,spa,fra +#subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -# transcode=0 +#transcode=0 # create a duplicate, or replace the original (0, 1). # # set to 1 to cretae a new file or 0 to replace the original -# duplicate=1 +#duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -# ignoreExtensions=.avi,.mkv +#ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -# outputFastStart=0 +#outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -# outputVideoPath= +#outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -# processOutput=0 +#processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -# audioLanguage=eng +#audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -# allAudioLanguages=0 +#allAudioLanguages=0 # allSubLanguages (0,1). # # allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. -# allSubLanguages=0 +#allSubLanguages=0 # embedSubs (0,1). # # embedSubs. 1 will embded external sub/srt subs into your video if this is supported. -# embedSubs=1 +#embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -# burnInSubtitle=0 +#burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -# extractSubs=0 +#extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -# externalSubDir= +#externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -# outputDefault=None +#outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -# hwAccel=0 +#hwAccel=0 # ffmpeg output settings. -# outputVideoExtension=.mp4 -# outputVideoCodec=libx264 -# VideoCodecAllow= -# outputVideoPreset=medium -# outputVideoFramerate=24 -# outputVideoBitrate=800k -# outputAudioCodec=ac3 -# AudioCodecAllow= -# outputAudioChannels=6 -# outputAudioBitrate=640k -# outputQualityPercent= -# outputAudioTrack2Codec=libfaac -# AudioCodec2Allow= -# outputAudioTrack2Channels=2 -# outputAudioTrack2Bitrate=160k -# outputAudioOtherCodec=libmp3lame -# AudioOtherCodecAllow= -# outputAudioOtherChannels=2 -# outputAudioOtherBitrate=128k -# outputSubtitleCodec= +#outputVideoExtension=.mp4 +#outputVideoCodec=libx264 +#VideoCodecAllow= +#outputVideoPreset=medium +#outputVideoFramerate=24 +#outputVideoBitrate=800k +#outputAudioCodec=ac3 +#AudioCodecAllow= +#outputAudioChannels=6 +#outputAudioBitrate=640k +#outputQualityPercent= +#outputAudioTrack2Codec=libfaac +#AudioCodec2Allow= +#outputAudioTrack2Channels=2 +#outputAudioTrack2Bitrate=160k +#outputAudioOtherCodec=libmp3lame +#AudioOtherCodecAllow= +#outputAudioOtherChannels=2 +#outputAudioOtherBitrate=128k +#outputSubtitleCodec= -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ## UserScript +## UserScript # User Script category. # # category that gets called for post-processing with user script (accepts "UNCAT", "ALL", or a defined category). -# usCategory=mine +#usCategory=mine # User Script Remote Path (0,1). # # Script calls commands on another system. -# usremote_path=0 +#usremote_path=0 # User Script extensions. # # What extension do you want to process? Specify all the extension, or use "ALL" to process all files. -# user_script_mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg +#user_script_mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg # User Script Path # # Specify the path to your custom script. -# user_script_path=/nzbToMedia/userscripts/script.sh +#user_script_path=/nzbToMedia/userscripts/script.sh # User Script arguments. # # Specify the argument(s) passed to script, comma separated in order. # for example FP,FN,DN, TN, TL for file path (absolute file name with path), file name, absolute directory name (with path), Torrent Name, Torrent Label/Category. # So the result is /media/test/script/script.sh FP FN DN TN TL. Add other arguments as needed eg -f, -r -# user_script_param=FN +#user_script_param=FN # User Script Run Once (0,1). # # Set user_script_runOnce = 0 to run for each file, or 1 to only run once (presumably on teh entire directory). -# user_script_runOnce=0 +#user_script_runOnce=0 # User Script Success Codes. # # Specify the successcodes returned by the user script as a comma separated list. Linux default is 0 -# user_script_successCodes=0 +#user_script_successCodes=0 # User Script Clean After (0,1). # # Clean after? Note that delay function is used to prevent possible mistake :) Delay is intended as seconds -# user_script_clean=1 +#user_script_clean=1 # User Script Delay. # # Delay in seconds after processing. -# usdelay=120 +#usdelay=120 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## from __future__ import print_function diff --git a/nzbToMylar.py b/nzbToMylar.py index 5d566123..9af60f9a 100755 --- a/nzbToMylar.py +++ b/nzbToMylar.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 - -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +# +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to Mylar. # @@ -10,111 +10,111 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## +############################################################################## # -# ### OPTIONS +### OPTIONS -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## Mylar +## Mylar # Mylar script category. # # category that gets called for post-processing with Mylar. -# myCategory=comics +#myCategory=comics # Mylar host. # -# The ip address for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 -# myhost=localhost +# The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 +#myhost=localhost # Mylar port. -# myport=8090 +#myport=8090 # Mylar username. -# myusername= +#myusername= # Mylar password. -# mypassword= +#mypassword= # Mylar uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# myssl=0 +#myssl=0 # Mylar web_root # # set this if using a reverse proxy. -# myweb_root= +#myweb_root= # Mylar wait_for # # Set the number of minutes to wait after calling the force process, to check the issue has changed status. -# myswait_for=1 +#myswait_for=1 # Mylar watch directory. # # set this to where your Mylar completed downloads are. -# mywatch_dir= +#mywatch_dir= # Mylar and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# myremote_path=0 +#myremote_path=0 -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia diff --git a/nzbToNzbDrone.py b/nzbToNzbDrone.py index 549bb9e3..e8183a27 100755 --- a/nzbToNzbDrone.py +++ b/nzbToNzbDrone.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 - -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +# +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to NzbDrone. # @@ -10,226 +10,226 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## -# ### OPTIONS ### +############################################################################## +### OPTIONS ### -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -# check_media=1 +#check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## NzbDrone +## NzbDrone # NzbDrone script category. # # category that gets called for post-processing with NzbDrone. -# ndCategory=tv2 +#ndCategory=tv2 # NzbDrone host. # -# The ip address for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 -# ndhost=localhost +# The ipaddress for your NzbDrone/Sonarr server. e.g For the Same system use localhost or 127.0.0.1 +#ndhost=localhost # NzbDrone port. -# ndport=8989 +#ndport=8989 # NzbDrone API key. -# ndapikey= +#ndapikey= # NzbDrone uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# ndssl=0 +#ndssl=0 # NzbDrone web_root # # set this if using a reverse proxy. -# ndweb_root= +#ndweb_root= # NzbDrone wait_for # # Set the number of minutes to wait after calling the renamer, to check the episode has changed status. -# ndwait_for=2 +#ndwait_for=2 # NzbDrone Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# nddelete_failed=0 +#nddelete_failed=0 # NzbDrone and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# ndremote_path=0 +#ndremote_path=0 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## Extensions +## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Transcoder +## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -# getSubs = 0 +#getSubs = 0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -# subLanguages = eng,spa,fra +#subLanguages = eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -# transcode=0 +#transcode=0 # create a duplicate, or replace the original (0, 1). # -# set to 1 to create a new file or 0 to replace the original -# duplicate=1 +# set to 1 to cretae a new file or 0 to replace the original +#duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -# ignoreExtensions=.avi,.mkv +#ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -# outputFastStart = 0 +#outputFastStart = 0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -# outputVideoPath = +#outputVideoPath = # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -# processOutput = 0 +#processOutput = 0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -# audioLanguage = eng +#audioLanguage = eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -# allAudioLanguages = 0 +#allAudioLanguages = 0 # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. -# allSubLanguages = 0 +# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +#allSubLanguages = 0 # embedSubs (0,1). # -# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. -# embedSubs = 1 +# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +#embedSubs = 1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -# burnInSubtitle = 0 +#burnInSubtitle = 0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -# extractSubs = 0 +#extractSubs = 0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -# externalSubDir = +#externalSubDir = # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -# outputDefault = None +#outputDefault = None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -# hwAccel=0 +#hwAccel=0 # ffmpeg output settings. -# outputVideoExtension=.mp4 -# outputVideoCodec=libx264 -# VideoCodecAllow = -# outputVideoPreset=medium -# outputVideoFramerate=24 -# outputVideoBitrate=800k -# outputAudioCodec=libmp3lame -# AudioCodecAllow = -# outputAudioBitrate=128k -# outputQualityPercent = 0 -# outputAudioTrack2Codec = libfaac -# AudioCodec2Allow = -# outputAudioTrack2Bitrate = 128k -# outputAudioOtherCodec = libmp3lame -# AudioOtherCodecAllow = -# outputAudioOtherBitrate = 128k -# outputSubtitleCodec = +#outputVideoExtension=.mp4 +#outputVideoCodec=libx264 +#VideoCodecAllow = +#outputVideoPreset=medium +#outputVideoFramerate=24 +#outputVideoBitrate=800k +#outputAudioCodec=libmp3lame +#AudioCodecAllow = +#outputAudioBitrate=128k +#outputQualityPercent = 0 +#outputAudioTrack2Codec = libfaac +#AudioCodec2Allow = +#outputAudioTrack2Bitrate = 128k +#outputAudioOtherCodec = libmp3lame +#AudioOtherCodecAllow = +#outputAudioOtherBitrate = 128k +#outputSubtitleCodec = -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia diff --git a/nzbToSickBeard.py b/nzbToSickBeard.py index 5a0a03a6..aa249aa2 100755 --- a/nzbToSickBeard.py +++ b/nzbToSickBeard.py @@ -1,8 +1,8 @@ #!/usr/bin/env python2 # coding=utf-8 - -# ############################################################################## -# ### NZBGET POST-PROCESSING SCRIPT ### +# +############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to SickBeard. # @@ -10,242 +10,242 @@ # # NOTE: This script requires Python to be installed on your system. -# ############################################################################## -# ### OPTIONS ### +############################################################################## +### OPTIONS ### -# ## General +## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version -# auto_update=0 +#auto_update=0 # Check Media for corruption (0, 1). # # Enable/Disable media file checking using ffprobe. -# check_media=1 +#check_media=1 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. -# safe_mode=1 +#safe_mode=1 -# ## SickBeard +## SickBeard # SickBeard script category. # # category that gets called for post-processing with SickBeard. -# sbCategory=tv +#sbCategory=tv # SickBeard host. # -# The ip address for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 -# sbhost=localhost +# The ipaddress for your SickBeard/SickRage server. e.g For the Same system use localhost or 127.0.0.1 +#sbhost=localhost # SickBeard port. -# sbport=8081 +#sbport=8081 # SickBeard username. -# sbusername= +#sbusername= # SickBeard password. -# sbpassword= +#sbpassword= # SickBeard uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. -# sbssl=0 +#sbssl=0 # SickBeard web_root # # set this if using a reverse proxy. -# sbweb_root= +#sbweb_root= # SickBeard watch directory. # # set this to where your SickBeard completed downloads are. -# sbwatch_dir= +#sbwatch_dir= # SickBeard fork. # # set to default or auto to auto-detect the custom fork type. -# sbfork=auto +#sbfork=auto # SickBeard Delete Failed Downloads (0, 1). # # set to 1 to delete failed, or 0 to leave files in place. -# sbdelete_failed=0 +#sbdelete_failed=0 # SickBeard process method. # # set this to move, copy, hardlink, symlink as appropriate if you want to over-ride SB defaults. Leave blank to use SB default. -# sbprocess_method= +#sbprocess_method= # SickBeard and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. -# sbremote_path=0 +#sbremote_path=0 -# ## Network +## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ -# mountPoints= +#mountPoints= -# ## Extensions +## Extensions # Media Extensions # # This is a list of media extensions that are used to verify that the download does contain valid media. -# mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts +#mediaExtensions=.mkv,.avi,.divx,.xvid,.mov,.wmv,.mp4,.mpg,.mpeg,.vob,.iso,.ts -# ## Posix +## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). -# niceness=10 +#niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. -# ionice_class=2 +#ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. -# ionice_classdata=4 +#ionice_classdata=4 -# ## Transcoder +## Transcoder # getSubs (0, 1). # # set to 1 to download subtitles. -# getSubs=0 +#getSubs=0 # subLanguages. # # subLanguages. create a list of languages in the order you want them in your subtitles. -# subLanguages=eng,spa,fra +#subLanguages=eng,spa,fra # Transcode (0, 1). # # set to 1 to transcode, otherwise set to 0. -# transcode=0 +#transcode=0 # create a duplicate, or replace the original (0, 1). # -# set to 1 to create a new file or 0 to replace the original -# duplicate=1 +# set to 1 to cretae a new file or 0 to replace the original +#duplicate=1 # ignore extensions. # # list of extensions that won't be transcoded. -# ignoreExtensions=.avi,.mkv +#ignoreExtensions=.avi,.mkv # outputFastStart (0,1). # # outputFastStart. 1 will use -movflags + faststart. 0 will disable this from being used. -# outputFastStart=0 +#outputFastStart=0 # outputVideoPath. # # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. -# outputVideoPath= +#outputVideoPath= # processOutput (0,1). # # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. -# processOutput=0 +#processOutput=0 # audioLanguage. # # audioLanguage. set the 3 letter language code you want as your primary audio track. -# audioLanguage=eng +#audioLanguage=eng # allAudioLanguages (0,1). # # allAudioLanguages. 1 will keep all audio tracks (uses AudioCodec3) where available. -# allAudioLanguages=0 +#allAudioLanguages=0 # allSubLanguages (0,1). # -# allSubLanguages. 1 will keep all existing sub languages. 0 will discard those not in your list above. -# allSubLanguages=0 +# allSubLanguages. 1 will keep all exisiting sub languages. 0 will discare those not in your list above. +#allSubLanguages=0 # embedSubs (0,1). # -# embedSubs. 1 will embed external sub/srt subs into your video if this is supported. -# embedSubs=1 +# embedSubs. 1 will embded external sub/srt subs into your video if this is supported. +#embedSubs=1 # burnInSubtitle (0,1). # # burnInSubtitle. burns the default sub language into your video (needed for players that don't support subs) -# burnInSubtitle=0 +#burnInSubtitle=0 # extractSubs (0,1). # # extractSubs. 1 will extract subs from the video file and save these as external srt files. -# extractSubs=0 +#extractSubs=0 # externalSubDir. # # externalSubDir. set the directory where subs should be saved (if not the same directory as the video) -# externalSubDir= +#externalSubDir= # outputDefault (None, iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release). # # outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # If you want to use your own profile, set None and set the remaining options below. -# outputDefault=None +#outputDefault=None # hwAccel (0,1). # # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg). -# hwAccel=0 +#hwAccel=0 # ffmpeg output settings. -# outputVideoExtension=.mp4 -# outputVideoCodec=libx264 -# VideoCodecAllow= -# outputVideoPreset=medium -# outputVideoFramerate=24 -# outputVideoBitrate=800k -# outputAudioCodec=ac3 -# AudioCodecAllow= -# outputAudioChannels=6 -# outputAudioBitrate=640k -# outputQualityPercent= -# outputAudioTrack2Codec=libfaac -# AudioCodec2Allow= -# outputAudioTrack2Channels=2 -# outputAudioTrack2Bitrate=160k -# outputAudioOtherCodec=libmp3lame -# AudioOtherCodecAllow= -# outputAudioOtherChannels=2 -# outputAudioOtherBitrate=128k -# outputSubtitleCodec= +#outputVideoExtension=.mp4 +#outputVideoCodec=libx264 +#VideoCodecAllow= +#outputVideoPreset=medium +#outputVideoFramerate=24 +#outputVideoBitrate=800k +#outputAudioCodec=ac3 +#AudioCodecAllow= +#outputAudioChannels=6 +#outputAudioBitrate=640k +#outputQualityPercent= +#outputAudioTrack2Codec=libfaac +#AudioCodec2Allow= +#outputAudioTrack2Channels=2 +#outputAudioTrack2Bitrate=160k +#outputAudioOtherCodec=libmp3lame +#AudioOtherCodecAllow= +#outputAudioOtherChannels=2 +#outputAudioOtherBitrate=128k +#outputSubtitleCodec= -# ## WakeOnLan +## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. -# wolwake=0 +#wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. -# wolmac=00:01:2e:2D:64:e1 +#wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. -# wolhost=192.168.1.37 -# wolport=80 +#wolhost=192.168.1.37 +#wolport=80 -# ### NZBGET POST-PROCESSING SCRIPT ### -# ############################################################################## +### NZBGET POST-PROCESSING SCRIPT ### +############################################################################## import sys import nzbToMedia From 8861e3afa5a962a0d13d793238aa41ebe4bd8221 Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 18 Jun 2016 20:57:34 -0400 Subject: [PATCH 60/82] PEP 8: Fix long line length --- TorrentToMedia.py | 101 ++++++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 44 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 6ef75ef3..8fb02d64 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -42,9 +42,9 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) - inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, - inputCategory, root, - core.CATEGORIES) # Confirm the category by parsing directory structure + # Confirm the category by parsing directory structure + inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, inputCategory, + root, core.CATEGORIES) if inputCategory == "": inputCategory = "UNCAT" @@ -58,32 +58,36 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, except UnicodeError: pass - logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) + logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format + (inputDirectory, inputName, inputCategory)) # auto-detect section section = core.CFG.findsection(inputCategory).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: - logger.error( - 'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format( - inputCategory)) + logger.error('Category:[{0}] is not defined or is not enabled. ' + 'Please rename it or ensure it is enabled for the appropriate section ' + 'in your autoProcessMedia.cfg and try again.'.format + (inputCategory)) return [-1, ""] else: usercat = "ALL" if len(section) > 1: - logger.error( - 'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format( - usercat, section.keys())) + logger.error('Category:[{0}] is not unique, {1} are using it. ' + 'Please rename it or disable all other sections using the same category name ' + 'in your autoProcessMedia.cfg and try again.'.format + (usercat, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] logger.info('Auto-detected SECTION:{0}'.format(sectionName)) else: - logger.error("Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!".format( - inputCategory)) + logger.error("Unable to locate a section with subsection:{0} " + "enabled in your autoProcessMedia.cfg, exiting!".format + (inputCategory)) return [-1, ""] Torrent_NoLink = int(section[usercat].get("Torrent_NoLink", 0)) @@ -91,7 +95,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, extract = int(section[usercat].get('extract', 0)) try: uniquePath = int(section[usercat].get("unique_path", 1)) - except: + except TypeError: uniquePath = 1 if clientAgent != 'manual': @@ -121,9 +125,9 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.info("Output directory set to: {0}".format(outputDestination)) if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: - logger.error( - 'The output directory:[{0}] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format( - inputDirectory)) + logger.error('The output directory:[{0}] is the Download Directory. ' + 'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format + (inputDirectory)) return [-1, ""] logger.debug("Scanning files in directory: {0}".format(inputDirectory)) @@ -149,8 +153,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if not os.path.basename(filePath) in outputDestination: targetFile = core.os.path.join( core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) - logger.debug( - "Setting outputDestination to {0} to preserve folder structure".format(os.path.dirname(targetFile))) + logger.debug("Setting outputDestination to {0} to preserve folder structure".format + (os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) except UnicodeError: @@ -161,7 +165,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if any([core.sanitizeName(inputName) in core.sanitizeName(inputFile), core.sanitizeName(fileName) in core.sanitizeName(inputName)]): foundFile = True - logger.debug("Found file {0} that matches Torrent Name {1}".format(fullFileName, inputName)) + logger.debug("Found file {0} that matches Torrent Name {1}".format + (fullFileName, inputName)) else: continue @@ -173,7 +178,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug("Looking for files with modified/created dates less than 5 minutes old.") if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): foundFile = True - logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format(fullFileName)) + logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format + (fullFileName)) else: continue # This file has not been recently moved or created, skip it @@ -190,7 +196,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) core.extractFiles(inputDirectory, outputDestination, keep_archive) - if inputCategory not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. + if inputCategory not in core.NOFLATTEN: + # don't flatten hp in case multi cd albums, and we need to copy this back later. core.flatten(outputDestination) # Now check if video files exist in destination: @@ -206,7 +213,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, else: logger.warning("Found no media files in {0}".format(outputDestination)) - # Only these sections can handling failed downloads so make sure everything else gets through without the check for failed + # Only these sections can handling failed downloads + # so make sure everything else gets through without the check for failed if sectionName not in ['CouchPotato', 'SickBeard', 'NzbDrone']: status = 0 @@ -220,29 +228,32 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, result = external_script(outputDestination, inputName, inputCategory, section[usercat]) elif sectionName == 'CouchPotato': - result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, status, clientAgent, inputHash, - inputCategory) + result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, + status, clientAgent, inputHash, inputCategory) elif sectionName in ['SickBeard', 'NzbDrone']: if inputHash: inputHash = inputHash.upper() - result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, - inputHash, inputCategory) + result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, + status, clientAgent, inputHash, inputCategory) elif sectionName == 'HeadPhones': - result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) + result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, + status, clientAgent, inputCategory) elif sectionName == 'Mylar': - result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, - inputCategory) + result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName, + status, clientAgent, inputCategory) elif sectionName == 'Gamez': - result = core.autoProcessGames().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) + result = core.autoProcessGames().process(sectionName, outputDestination, inputName, + status, clientAgent, inputCategory) plex_update(inputCategory) if result[0] != 0: if not core.TORRENT_RESUME_ON_FAILURE: - logger.error("A problem was reported in the autoProcess* script. torrent won't resume seeding (settings)") + logger.error("A problem was reported in the autoProcess* script. " + "Torrent won't resume seeding (settings)") elif clientAgent != 'manual': - logger.error( - "A problem was reported in the autoProcess* script. If torrent was paused we will resume seeding") + logger.error("A problem was reported in the autoProcess* script. " + "If torrent was paused we will resume seeding") core.resume_torrent(clientAgent, inputHash, inputID, inputName) else: @@ -259,7 +270,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, replace_links(os.path.join(dirpath, file)) core.remove_torrent(clientAgent, inputHash, inputID, inputName) - if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN + if not sectionName == 'UserScript': + # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories core.cleanDir(outputDestination, sectionName, inputCategory) @@ -300,18 +312,19 @@ def main(args): if not core.CFG[section][subsection].isenabled(): continue for dirName in core.getDirs(section, subsection, link='hard'): - logger.info("Starting manual run for {0}:{1} - Folder:{2}".format(section, subsection, dirName)) + logger.info("Starting manual run for {0}:{1} - Folder:{2}".format + (section, subsection, dirName)) - logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) + logger.info("Checking database for download info for {0} ...".format + (os.path.basename(dirName))) core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: - logger.info( - "Found download info for {0}, setting variables now ...".format(os.path.basename(dirName))) + logger.info("Found download info for {0}, " + "setting variables now ...".format(os.path.basename(dirName))) else: - logger.info( - 'Unable to locate download info for {0}, continuing to try and process this release ...'.format( - os.path.basename(dirName)) - ) + logger.info('Unable to locate download info for {0}, ' + 'continuing to try and process this release ...'.format + (os.path.basename(dirName))) clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) @@ -333,8 +346,8 @@ def main(args): results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None, clientAgent or 'manual') if results[0] != 0: - logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format( - section, subsection)) + logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format + (section, subsection)) result = results if result[0] == 0: From 390b401ee2674fe9618fc79fa2c4264debd7025a Mon Sep 17 00:00:00 2001 From: Labrys Date: Sat, 18 Jun 2016 20:59:18 -0400 Subject: [PATCH 61/82] Apply dict type-casting to section[usercat] in TorrentToMedia.py * Fixes #1064 --- TorrentToMedia.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 8fb02d64..8309b178 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -90,13 +90,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, (inputCategory)) return [-1, ""] + section = dict(section) # Type cast to dict() to allow effective usage of .get() + Torrent_NoLink = int(section[usercat].get("Torrent_NoLink", 0)) keep_archive = int(section[usercat].get("keep_archive", 0)) extract = int(section[usercat].get('extract', 0)) - try: - uniquePath = int(section[usercat].get("unique_path", 1)) - except TypeError: - uniquePath = 1 + uniquePath = int(section[usercat].get("unique_path", 1)) if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) From a2247345d46b2df0711d7b4482ec0bdc3724bcf3 Mon Sep 17 00:00:00 2001 From: labrys Date: Tue, 21 Jun 2016 23:47:49 -0400 Subject: [PATCH 62/82] Fix #1071: IndexError when unable to get download info --- nzbToMedia.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/nzbToMedia.py b/nzbToMedia.py index 65bb70c1..dcb9f1b7 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -745,21 +745,22 @@ def main(args, section=None): if not core.CFG[section][subsection].isenabled(): continue for dirName in getDirs(section, subsection, link='move'): - logger.info("Starting manual run for {0}:{1} - Folder:{2}".format(section, subsection, dirName)) - + logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dirName)) logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) + core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: - logger.info( - "Found download info for {0}, setting variables now ...".format(os.path.basename(dirName))) + logger.info("Found download info for {0}, " + "setting variables now ...".format + (os.path.basename(dirName))) + clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) + download_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) else: - logger.info( - 'Unable to locate download info for {0}, continuing to try and process this release ...'.format( - os.path.basename(dirName)) - ) - - clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) - download_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) + logger.info('Unable to locate download info for {0}, ' + 'continuing to try and process this release ...'.format + (os.path.basename(dirName))) + clientAgent = '' + download_id = '' if clientAgent and clientAgent.lower() not in core.NZB_CLIENTS: continue From d2767292fc7f6332a4a9618dbd02d5f343cf7fa4 Mon Sep 17 00:00:00 2001 From: labrys Date: Wed, 22 Jun 2016 01:27:50 -0400 Subject: [PATCH 63/82] Fix #1064: TypeError when key not found in config --- TorrentToMedia.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 8309b178..2cd6427f 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -90,12 +90,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, (inputCategory)) return [-1, ""] - section = dict(section) # Type cast to dict() to allow effective usage of .get() + section = dict(section[sectionName][usercat]) # Type cast to dict() to allow effective usage of .get() - Torrent_NoLink = int(section[usercat].get("Torrent_NoLink", 0)) - keep_archive = int(section[usercat].get("keep_archive", 0)) - extract = int(section[usercat].get('extract', 0)) - uniquePath = int(section[usercat].get("unique_path", 1)) + Torrent_NoLink = int(section.get("Torrent_NoLink", 0)) + keep_archive = int(section.get("keep_archive", 0)) + extract = int(section.get('extract', 0)) + uniquePath = int(section.get("unique_path", 1)) if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) @@ -224,7 +224,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, result = [0, ""] if sectionName == 'UserScript': - result = external_script(outputDestination, inputName, inputCategory, section[usercat]) + result = external_script(outputDestination, inputName, inputCategory, section) elif sectionName == 'CouchPotato': result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, From e3fb399fadf653759869011f78d5d5cfac9aa954 Mon Sep 17 00:00:00 2001 From: labrys Date: Wed, 22 Jun 2016 01:43:27 -0400 Subject: [PATCH 64/82] Apply fix #1071 to TorrentToMedia as well --- TorrentToMedia.py | 14 ++++++++------ nzbToMedia.py | 6 +++--- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 2cd6427f..514575f1 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -318,18 +318,20 @@ def main(args): (os.path.basename(dirName))) core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: + clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) + inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) + inputID = text_type(core.DOWNLOADINFO[0].get('input_id', '')) logger.info("Found download info for {0}, " "setting variables now ...".format(os.path.basename(dirName))) else: logger.info('Unable to locate download info for {0}, ' 'continuing to try and process this release ...'.format (os.path.basename(dirName))) + clientAgent = 'manual' + inputHash = '' + inputID = '' - clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) - inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) - inputID = text_type(core.DOWNLOADINFO[0].get('input_id', '')) - - if clientAgent and clientAgent.lower() not in core.TORRENT_CLIENTS: + if clientAgent.lower() not in core.TORRENT_CLIENTS: continue try: @@ -343,7 +345,7 @@ def main(args): pass results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None, - clientAgent or 'manual') + clientAgent) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) diff --git a/nzbToMedia.py b/nzbToMedia.py index dcb9f1b7..bce75147 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -753,13 +753,13 @@ def main(args, section=None): logger.info("Found download info for {0}, " "setting variables now ...".format (os.path.basename(dirName))) - clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', '')) + clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) download_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) else: logger.info('Unable to locate download info for {0}, ' 'continuing to try and process this release ...'.format (os.path.basename(dirName))) - clientAgent = '' + clientAgent = 'manual' download_id = '' if clientAgent and clientAgent.lower() not in core.NZB_CLIENTS: @@ -775,7 +775,7 @@ def main(args, section=None): except UnicodeError: pass - results = process(dirName, inputName, 0, clientAgent=clientAgent or 'manual', + results = process(dirName, inputName, 0, clientAgent=clientAgent, download_id=download_id or None, inputCategory=subsection) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format From c145d3abde4111d795b62c4f41c1415a4cfb8f67 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Sat, 6 Aug 2016 08:16:22 +0930 Subject: [PATCH 65/82] fix issues with import of cfg. --- nzbToMedia.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nzbToMedia.py b/nzbToMedia.py index bce75147..10c68a46 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -581,16 +581,18 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down inputCategory)) return [-1, ""] - extract = int(section[usercat].get('extract', 0)) + cfg = dict(core.CFG[sectionName][usercat]) + + extract = int(cfg.get("extract", 0)) try: - if int(section[usercat]['remote_path']) and not core.REMOTEPATHS: + if int(cfg.get("remote_path")) and not core.REMOTEPATHS: logger.error('Remote Path is enabled for {0}:{1} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!'.format( sectionName, inputCategory)) return [-1, ""] except: logger.error('Remote Path {0} is not valid for {1}:{2} Please set this to either 0 to disable or 1 to enable!'.format( - section[usercat]['remote_path'], sectionName, inputCategory)) + core.get("remote_path"), sectionName, inputCategory)) inputName, inputDirectory = convert_to_ascii(inputName, inputDirectory) From 613ddb129af6c31499b76e9355a5ddfdb9848dfd Mon Sep 17 00:00:00 2001 From: Fernando Date: Mon, 5 Sep 2016 14:53:34 -0300 Subject: [PATCH 66/82] Add Medusa fork and new param "ignore_subs" --- autoProcessMedia.cfg.spec | 4 +++- core/__init__.py | 4 +++- core/autoProcess/autoProcessTV.py | 7 +++++++ nzbToMedia.py | 5 +++++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/autoProcessMedia.cfg.spec b/autoProcessMedia.cfg.spec index e8be8ac4..f1f47058 100644 --- a/autoProcessMedia.cfg.spec +++ b/autoProcessMedia.cfg.spec @@ -85,8 +85,10 @@ process_method = # force processing of already processed content when running a manual scan. force = 0 - # tell SickRage to delete all source files after processing. + # tell SickRage/Medusa to delete all source files after processing. delete_on = 0 + # tell Medusa to ignore check for associated subtitle check when postponing release + ignore_subs = 0 extract = 1 nzbExtractionBy = Downloader # Set this to minimum required size to consider a media file valid (in MB) diff --git a/core/__init__.py b/core/__init__.py index 39b6500d..c2851748 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -63,15 +63,17 @@ FORK_FAILED = "failed" FORK_FAILED_TORRENT = "failed-torrent" FORK_SICKRAGETV = "sickragetv" FORK_SICKRAGE = "sickrage" +FORK_MEDUSA = "medusa" FORK_SICKGEAR = "sickgear" FORKS[FORK_DEFAULT] = {"dir": None} FORKS[FORK_FAILED] = {"dirName": None, "failed": None} FORKS[FORK_FAILED_TORRENT] = {"dir": None, "failed": None, "process_method": None} FORKS[FORK_SICKRAGETV] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} FORKS[FORK_SICKRAGE] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} +FORKS[FORK_MEDUSA] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None, "ignore_subs":None} FORKS[FORK_SICKGEAR] = {"dir": None, "failed": None, "process_method": None, "force": None} ALL_FORKS = {"dir": None, "dirName": None, "proc_dir": None, "failed": None, "process_method": None, "force": None, - "delete_on": None} + "delete_on": None, "ignore_subs": None} # NZBGet Exit Codes NZBGET_POSTPROCESS_PARCHECK = 92 diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 153439ff..4a3aeaa9 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -78,6 +78,7 @@ class autoProcessTV(object): wait_for = int(cfg.get("wait_for", 2)) force = int(cfg.get("force", 0)) delete_on = int(cfg.get("delete_on", 0)) + ignore_subs = int(cfg.get("ignore_subs", 0)) extract = int(cfg.get("extract", 0)) if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. @@ -198,6 +199,12 @@ class autoProcessTV(object): else: del fork_params[param] + if param == "ignore_subs": + if ignore_subs: + fork_params[param] = ignore_subs + else: + del fork_params[param] + # delete any unused params so we don't pass them to SB by mistake [fork_params.pop(k) for k, v in fork_params.items() if v is None] diff --git a/nzbToMedia.py b/nzbToMedia.py index 65bb70c1..88131913 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -124,6 +124,11 @@ # set to 1 to delete failed, or 0 to leave files in place. #sbdelete_failed=0 +# SickBeard Ignore associated subtitle check (0, 1). +# +# set to 1 to ignore subtitles check, or 0 to don't check. +#sbignore_subs=0 + # SickBeard process method. # # set this to move, copy, hardlink, symlink as appropriate if you want to over-ride SB defaults. Leave blank to use SB default. From adcc061a67b0bc99cdcc41d30edbdba3cf6947b3 Mon Sep 17 00:00:00 2001 From: Billie Thompson Date: Sun, 18 Sep 2016 11:50:19 +0100 Subject: [PATCH 67/82] Update autoProcessTV.py There is a minor typo in the user messages. This will resolve it. --- core/autoProcess/autoProcessTV.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 153439ff..a4327c4c 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -211,7 +211,7 @@ class autoProcessTV(object): logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) elif section == "NzbDrone": logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(fork), section) - return [1, "{0}: Downlaod Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. + return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. else: logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: From e5a46f581d2cadab95af73bcc0888c20276a3bbd Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Wed, 21 Sep 2016 09:42:32 +0930 Subject: [PATCH 68/82] added check for language tag size. Fixes #1087 --- core/transcoder/transcoder.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 3356c6ef..6264d9f1 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -430,8 +430,10 @@ def buildCommands(file, newDir, movieName, bitbucket): continue lan = os.path.splitext(os.path.splitext(subfile)[0])[1] command.extend(['-i', subfile]) - meta_cmd.extend(['-metadata:s:s:{x}'.format(x=len(s_mapped) + n), - 'language={lang}'.format(lang=lan[1:])]) + lansplit = lan.split('-') + if len(lansplit[0]) == 3 and ( len(lansplit) == 1 or ( len(lansplit) == 2 and len(lansplit[1]) == 2 ) ): + meta_cmd.extend(['-metadata:s:s:{x}'.format(x=len(s_mapped) + n), + 'language={lang}'.format(lang=lan[1:])]) n += 1 map_cmd.extend(['-map', '{x}:0'.format(x=n)]) From 319d418af82f00b5e3583bc468530603f11c6ee2 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Wed, 21 Sep 2016 11:12:42 +0930 Subject: [PATCH 69/82] convert to 3 letter language code. Fixes #1088 --- core/transcoder/transcoder.py | 17 ++++++++++++----- tests/general.py | 4 +++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index 6264d9f1..cdb8b233 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -11,6 +11,7 @@ import shutil import re from core import logger from core.nzbToMediaUtil import makeDir +from babelfish import Language def isVideoGood(videofile, status): @@ -428,12 +429,18 @@ def buildCommands(file, newDir, movieName, bitbucket): sub_details, result = getVideoDetails(subfile) if not sub_details or not sub_details.get("streams"): continue - lan = os.path.splitext(os.path.splitext(subfile)[0])[1] command.extend(['-i', subfile]) - lansplit = lan.split('-') - if len(lansplit[0]) == 3 and ( len(lansplit) == 1 or ( len(lansplit) == 2 and len(lansplit[1]) == 2 ) ): + lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split('-')[0] + metlan = None + try: + if len(lan) == 3: + metlan = Language(lan) + if len(lan) == 2: + metlan = Language.fromalpha2(lan) + except: pass + if metlan: meta_cmd.extend(['-metadata:s:s:{x}'.format(x=len(s_mapped) + n), - 'language={lang}'.format(lang=lan[1:])]) + 'language={lang}'.format(lang=metlan.alpha3)]) n += 1 map_cmd.extend(['-map', '{x}:0'.format(x=n)]) @@ -492,7 +499,7 @@ def extract_subs(file, newfilePath, bitbucket): for n in range(num): sub = subStreams[n] idx = sub["index"] - lan = sub.geet("tags", {}).get("language", "unk") + lan = sub.get("tags", {}).get("language", "unk") if num == 1: outputFile = os.path.join(subdir, "{0}.srt".format(name)) diff --git a/tests/general.py b/tests/general.py index cfcee84e..e51ef9c9 100755 --- a/tests/general.py +++ b/tests/general.py @@ -39,7 +39,9 @@ if server_responding("http://127.0.0.1:8090"): print "Mylar Running" from babelfish import Language -print Language('eng') +lan = 'pt' +lan = Language.fromalpha2(lan) +print lan.alpha3 import subliminal From 0625f7f3c04342dba4025fa9d340ecad53fb3828 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Wed, 21 Sep 2016 13:31:41 +0930 Subject: [PATCH 70/82] updated libs to fix guessit and subliminal. Fixes #1080 --- .gitignore | 1 + core/nzbToMediaUtil.py | 8 +- libs/concurrent/__init__.py | 3 + libs/concurrent/futures/__init__.py | 18 + libs/concurrent/futures/_base.py | 574 +++++ libs/concurrent/futures/_compat.py | 101 + libs/concurrent/futures/process.py | 363 +++ libs/concurrent/futures/thread.py | 138 ++ libs/dateutil/__init__.py | 9 + libs/dateutil/easter.py | 92 + libs/dateutil/parser.py | 886 +++++++ libs/dateutil/relativedelta.py | 432 ++++ libs/dateutil/rrule.py | 1097 +++++++++ libs/dateutil/tz.py | 951 ++++++++ libs/dateutil/tzwin.py | 180 ++ libs/dateutil/zoneinfo/__init__.py | 87 + libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz | Bin 0 -> 171995 bytes libs/guessit/__init__.py | 359 +-- libs/guessit/__main__.py | 312 +-- libs/guessit/__version__.py | 23 +- libs/guessit/api.py | 150 ++ libs/guessit/backports.py | 27 + libs/guessit/containers.py | 771 ------- libs/guessit/date.py | 129 -- libs/guessit/fileutils.py | 87 - libs/guessit/guess.py | 514 ----- libs/guessit/hash_ed2k.py | 69 - libs/guessit/hash_mpc.py | 58 - libs/guessit/jsonutils.py | 32 + libs/guessit/language.py | 311 --- libs/guessit/matcher.py | 306 --- libs/guessit/matchtree.py | 426 ---- libs/guessit/options.py | 93 +- libs/guessit/patterns/__init__.py | 77 - libs/guessit/patterns/extension.py | 32 - libs/guessit/patterns/numeral.py | 150 -- libs/guessit/plugins/__init__.py | 21 - libs/guessit/plugins/transformers.py | 219 -- libs/guessit/quality.py | 65 - libs/guessit/reutils.py | 35 + libs/guessit/rules/__init__.py | 88 + libs/guessit/rules/common/__init__.py | 14 + libs/guessit/rules/common/comparators.py | 68 + libs/guessit/rules/common/date.py | 125 + libs/guessit/rules/common/formatters.py | 136 ++ libs/guessit/rules/common/numeral.py | 165 ++ libs/guessit/rules/common/validators.py | 51 + libs/guessit/rules/common/words.py | 77 + libs/guessit/rules/markers/__init__.py | 5 + libs/guessit/rules/markers/groups.py | 49 + libs/guessit/rules/markers/path.py | 43 + libs/guessit/rules/processors.py | 198 ++ libs/guessit/rules/properties/__init__.py | 5 + libs/guessit/rules/properties/audio_codec.py | 164 ++ libs/guessit/rules/properties/bonus.py | 50 + libs/guessit/rules/properties/cds.py | 35 + libs/guessit/rules/properties/container.py | 53 + libs/guessit/rules/properties/country.py | 109 + libs/guessit/rules/properties/crc.py | 85 + libs/guessit/rules/properties/date.py | 72 + libs/guessit/rules/properties/edition.py | 31 + .../guessit/rules/properties/episode_title.py | 196 ++ libs/guessit/rules/properties/episodes.py | 516 +++++ libs/guessit/rules/properties/film.py | 42 + libs/guessit/rules/properties/format.py | 67 + libs/guessit/rules/properties/language.py | 249 ++ libs/guessit/rules/properties/mimetype.py | 48 + libs/guessit/rules/properties/other.py | 181 ++ libs/guessit/rules/properties/part.py | 41 + .../guessit/rules/properties/release_group.py | 171 ++ libs/guessit/rules/properties/screen_size.py | 77 + libs/guessit/rules/properties/title.py | 347 +++ libs/guessit/rules/properties/type.py | 75 + libs/guessit/rules/properties/video_codec.py | 87 + libs/guessit/rules/properties/website.py | 67 + libs/guessit/slogging.py | 89 - libs/guessit/test/1MB | Bin 1048576 -> 0 bytes libs/guessit/test/__init__.py | 25 +- libs/guessit/test/__main__.py | 40 - libs/guessit/test/autodetect.yaml | 489 ---- libs/guessit/test/dummy.srt | 1 - libs/guessit/test/episodes.yaml | 1174 ---------- libs/guessit/test/episodes.yml | 2048 +++++++++++++++++ libs/guessit/test/guessittest.py | 187 -- libs/guessit/test/{movies.yaml => movies.yml} | 675 +++--- .../opensubtitles_languages_2012_05_09.txt | 473 ---- libs/guessit/test/rules/__init__.py | 3 + libs/guessit/test/rules/audio_codec.yml | 83 + libs/guessit/test/rules/bonus.yml | 9 + libs/guessit/test/rules/cds.yml | 10 + libs/guessit/test/rules/country.yml | 10 + libs/guessit/test/rules/date.yml | 50 + libs/guessit/test/rules/edition.yml | 25 + libs/guessit/test/rules/episodes.yml | 247 ++ libs/guessit/test/rules/film.yml | 9 + libs/guessit/test/rules/format.yml | 112 + libs/guessit/test/rules/language.yml | 39 + libs/guessit/test/rules/other.yml | 137 ++ libs/guessit/test/rules/part.yml | 18 + libs/guessit/test/rules/processors.yml | 8 + libs/guessit/test/rules/release_group.yml | 41 + libs/guessit/test/rules/screen_size.yml | 69 + libs/guessit/test/rules/title.yml | 32 + libs/guessit/test/rules/video_codec.yml | 54 + libs/guessit/test/rules/website.yml | 23 + libs/guessit/test/test-input-file.txt | 2 + libs/guessit/test/test_api.py | 89 +- .../guessit/test/test_api_unicode_literals.py | 66 + libs/guessit/test/test_autodetect.py | 45 - libs/guessit/test/test_autodetect_all.py | 46 - libs/guessit/test/test_benchmark.py | 52 + libs/guessit/test/test_doctests.py | 45 - libs/guessit/test/test_episode.py | 35 - libs/guessit/test/test_hashes.py | 46 - libs/guessit/test/test_language.py | 130 -- libs/guessit/test/test_main.py | 113 +- libs/guessit/test/test_matchtree.py | 93 - libs/guessit/test/test_movie.py | 35 - libs/guessit/test/test_quality.py | 126 - libs/guessit/test/test_utils.py | 163 -- libs/guessit/test/test_yml.py | 285 +++ libs/guessit/test/various.yml | 800 +++++++ libs/guessit/textutils.py | 355 --- libs/guessit/transfo/__init__.py | 30 - libs/guessit/transfo/expected_series.py | 60 - libs/guessit/transfo/expected_title.py | 61 - libs/guessit/transfo/guess_bonus_features.py | 67 - libs/guessit/transfo/guess_country.py | 124 - libs/guessit/transfo/guess_date.py | 49 - libs/guessit/transfo/guess_episode_details.py | 64 - .../guess_episode_info_from_position.py | 181 -- libs/guessit/transfo/guess_episodes_rexps.py | 193 -- libs/guessit/transfo/guess_filetype.py | 237 -- libs/guessit/transfo/guess_idnumber.py | 79 - libs/guessit/transfo/guess_language.py | 186 -- .../guess_movie_title_from_position.py | 173 -- libs/guessit/transfo/guess_properties.py | 288 --- libs/guessit/transfo/guess_release_group.py | 204 -- libs/guessit/transfo/guess_video_rexps.py | 58 - .../transfo/guess_weak_episodes_rexps.py | 81 - libs/guessit/transfo/guess_website.py | 56 - libs/guessit/transfo/guess_year.py | 57 - libs/guessit/transfo/split_explicit_groups.py | 49 - libs/guessit/transfo/split_on_dash.py | 47 - libs/guessit/transfo/split_path_components.py | 45 - libs/guessit/yamlutils.py | 71 + libs/rarfile.py | 2002 ++++++++++++++++ libs/rarfile1/LICENSE | 15 + libs/rarfile1/MANIFEST.in | 3 + libs/rarfile1/Makefile | 31 + libs/rarfile1/PKG-INFO | 56 + libs/rarfile1/README.rst | 39 + libs/rarfile1/doc/Makefile | 153 ++ libs/rarfile1/doc/api.rst | 111 + libs/rarfile1/doc/conf.py | 249 ++ libs/rarfile1/doc/faq.rst | 87 + libs/rarfile1/doc/index.rst | 42 + libs/rarfile1/doc/make.bat | 190 ++ libs/rarfile1/doc/news.rst | 243 ++ libs/rarfile1/dumprar.py | 361 +++ libs/rarfile1/setup.py | 33 + libs/rarfile1/test/Makefile | 9 + libs/rarfile1/test/files/ctime0.rar | Bin 0 -> 73 bytes libs/rarfile1/test/files/ctime0.rar.exp | 7 + libs/rarfile1/test/files/ctime1.rar | Bin 0 -> 77 bytes libs/rarfile1/test/files/ctime1.rar.exp | 8 + libs/rarfile1/test/files/ctime2.rar | Bin 0 -> 78 bytes libs/rarfile1/test/files/ctime2.rar.exp | 8 + libs/rarfile1/test/files/ctime3.rar | Bin 0 -> 79 bytes libs/rarfile1/test/files/ctime3.rar.exp | 8 + libs/rarfile1/test/files/ctime4.rar | Bin 0 -> 80 bytes libs/rarfile1/test/files/ctime4.rar.exp | 8 + .../test/files/rar15-comment-lock.rar | Bin 0 -> 210 bytes .../test/files/rar15-comment-lock.rar.exp | 14 + libs/rarfile1/test/files/rar15-comment.rar | Bin 0 -> 210 bytes .../rarfile1/test/files/rar15-comment.rar.exp | 14 + .../test/files/rar202-comment-nopsw.rar | Bin 0 -> 204 bytes .../test/files/rar202-comment-nopsw.rar.exp | 14 + .../test/files/rar202-comment-psw.rar | Bin 0 -> 254 bytes .../test/files/rar202-comment-psw.rar.exp | 14 + .../rarfile1/test/files/rar3-comment-hpsw.rar | Bin 0 -> 484 bytes .../test/files/rar3-comment-hpsw.rar.exp | 16 + .../test/files/rar3-comment-plain.rar | Bin 0 -> 300 bytes .../test/files/rar3-comment-plain.rar.exp | 16 + libs/rarfile1/test/files/rar3-comment-psw.rar | Bin 0 -> 332 bytes .../test/files/rar3-comment-psw.rar.exp | 16 + libs/rarfile1/test/files/seektest.rar | Bin 0 -> 2253 bytes libs/rarfile1/test/files/seektest.rar.exp | 13 + libs/rarfile1/test/files/unicode.rar | Bin 0 -> 163 bytes libs/rarfile1/test/files/unicode.rar.exp | 11 + libs/rarfile1/test/test1.sh | 32 + libs/rarfile1/test/test2.sh | 19 + libs/rarfile1/test/testcorrupt.py | 85 + libs/rarfile1/test/testio.py | 35 + libs/rarfile1/test/testseek.py | 103 + libs/rebulk/__init__.py | 10 + libs/rebulk/__version__.py | 7 + libs/rebulk/chain.py | 440 ++++ libs/rebulk/debug.py | 56 + libs/rebulk/formatters.py | 23 + libs/rebulk/introspector.py | 126 + libs/rebulk/loose.py | 198 ++ libs/rebulk/match.py | 784 +++++++ libs/rebulk/pattern.py | 471 ++++ libs/rebulk/processors.py | 106 + libs/rebulk/rebulk.py | 350 +++ libs/rebulk/remodule.py | 17 + libs/rebulk/rules.py | 375 +++ libs/rebulk/test/__init__.py | 3 + libs/rebulk/test/default_rules_module.py | 79 + libs/rebulk/test/rebulk_rules_module.py | 38 + libs/rebulk/test/rules_module.py | 54 + libs/rebulk/test/test_chain.py | 303 +++ libs/rebulk/test/test_debug.py | 83 + libs/rebulk/test/test_introspector.py | 138 ++ libs/rebulk/test/test_loose.py | 83 + libs/rebulk/test/test_match.py | 565 +++++ libs/rebulk/test/test_pattern.py | 848 +++++++ libs/rebulk/test/test_processors.py | 215 ++ libs/rebulk/test/test_rebulk.py | 419 ++++ libs/rebulk/test/test_rules.py | 197 ++ libs/rebulk/test/test_toposort.py | 111 + libs/rebulk/test/test_validators.py | 64 + libs/rebulk/toposort.py | 84 + libs/rebulk/utils.py | 153 ++ libs/rebulk/validators.py | 70 + libs/subliminal/__init__.py | 20 +- libs/subliminal/api.py | 140 -- libs/subliminal/cache.py | 52 +- libs/subliminal/cli.py | 592 +++-- libs/subliminal/compat.py | 21 - libs/subliminal/converters/addic7ed.py | 5 +- libs/subliminal/converters/legendastv.py | 27 + libs/subliminal/converters/podnapisi.py | 32 - libs/subliminal/converters/shooter.py | 23 + libs/subliminal/converters/thesubdb.py | 26 + libs/subliminal/converters/tvsubtitles.py | 5 +- libs/subliminal/core.py | 705 ++++++ libs/subliminal/exceptions.py | 23 +- libs/subliminal/extensions.py | 106 + libs/subliminal/providers/__init__.py | 341 +-- libs/subliminal/providers/addic7ed.py | 309 ++- libs/subliminal/providers/legendastv.py | 448 ++++ libs/subliminal/providers/napiprojekt.py | 103 + libs/subliminal/providers/opensubtitles.py | 230 +- libs/subliminal/providers/podnapisi.py | 199 +- libs/subliminal/providers/shooter.py | 79 + libs/subliminal/providers/subscenter.py | 235 ++ libs/subliminal/providers/thesubdb.py | 82 +- libs/subliminal/providers/tvsubtitles.py | 251 +- libs/subliminal/refiners/__init__.py | 12 + libs/subliminal/refiners/metadata.py | 99 + libs/subliminal/refiners/omdb.py | 187 ++ libs/subliminal/refiners/tvdb.py | 350 +++ libs/subliminal/score.py | 292 ++- libs/subliminal/subtitle.py | 322 ++- libs/subliminal/tests/__init__.py | 14 - libs/subliminal/tests/common.py | 22 - libs/subliminal/tests/test_providers.py | 475 ---- libs/subliminal/tests/test_subliminal.py | 191 -- libs/subliminal/utils.py | 152 ++ libs/subliminal/video.py | 434 +--- tests/general.py | 28 +- 263 files changed, 28711 insertions(+), 12615 deletions(-) create mode 100644 libs/concurrent/__init__.py create mode 100644 libs/concurrent/futures/__init__.py create mode 100644 libs/concurrent/futures/_base.py create mode 100644 libs/concurrent/futures/_compat.py create mode 100644 libs/concurrent/futures/process.py create mode 100644 libs/concurrent/futures/thread.py create mode 100644 libs/dateutil/__init__.py create mode 100644 libs/dateutil/easter.py create mode 100644 libs/dateutil/parser.py create mode 100644 libs/dateutil/relativedelta.py create mode 100644 libs/dateutil/rrule.py create mode 100644 libs/dateutil/tz.py create mode 100644 libs/dateutil/tzwin.py create mode 100644 libs/dateutil/zoneinfo/__init__.py create mode 100644 libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz create mode 100644 libs/guessit/api.py create mode 100644 libs/guessit/backports.py delete mode 100644 libs/guessit/containers.py delete mode 100644 libs/guessit/date.py delete mode 100644 libs/guessit/fileutils.py delete mode 100644 libs/guessit/guess.py delete mode 100644 libs/guessit/hash_ed2k.py delete mode 100644 libs/guessit/hash_mpc.py create mode 100644 libs/guessit/jsonutils.py delete mode 100644 libs/guessit/language.py delete mode 100644 libs/guessit/matcher.py delete mode 100644 libs/guessit/matchtree.py delete mode 100755 libs/guessit/patterns/__init__.py delete mode 100644 libs/guessit/patterns/extension.py delete mode 100644 libs/guessit/patterns/numeral.py delete mode 100644 libs/guessit/plugins/__init__.py delete mode 100644 libs/guessit/plugins/transformers.py delete mode 100644 libs/guessit/quality.py create mode 100644 libs/guessit/reutils.py create mode 100644 libs/guessit/rules/__init__.py create mode 100644 libs/guessit/rules/common/__init__.py create mode 100644 libs/guessit/rules/common/comparators.py create mode 100644 libs/guessit/rules/common/date.py create mode 100644 libs/guessit/rules/common/formatters.py create mode 100644 libs/guessit/rules/common/numeral.py create mode 100644 libs/guessit/rules/common/validators.py create mode 100644 libs/guessit/rules/common/words.py create mode 100644 libs/guessit/rules/markers/__init__.py create mode 100644 libs/guessit/rules/markers/groups.py create mode 100644 libs/guessit/rules/markers/path.py create mode 100644 libs/guessit/rules/processors.py create mode 100644 libs/guessit/rules/properties/__init__.py create mode 100644 libs/guessit/rules/properties/audio_codec.py create mode 100644 libs/guessit/rules/properties/bonus.py create mode 100644 libs/guessit/rules/properties/cds.py create mode 100644 libs/guessit/rules/properties/container.py create mode 100644 libs/guessit/rules/properties/country.py create mode 100644 libs/guessit/rules/properties/crc.py create mode 100644 libs/guessit/rules/properties/date.py create mode 100644 libs/guessit/rules/properties/edition.py create mode 100644 libs/guessit/rules/properties/episode_title.py create mode 100644 libs/guessit/rules/properties/episodes.py create mode 100644 libs/guessit/rules/properties/film.py create mode 100644 libs/guessit/rules/properties/format.py create mode 100644 libs/guessit/rules/properties/language.py create mode 100644 libs/guessit/rules/properties/mimetype.py create mode 100644 libs/guessit/rules/properties/other.py create mode 100644 libs/guessit/rules/properties/part.py create mode 100644 libs/guessit/rules/properties/release_group.py create mode 100644 libs/guessit/rules/properties/screen_size.py create mode 100644 libs/guessit/rules/properties/title.py create mode 100644 libs/guessit/rules/properties/type.py create mode 100644 libs/guessit/rules/properties/video_codec.py create mode 100644 libs/guessit/rules/properties/website.py delete mode 100644 libs/guessit/slogging.py delete mode 100644 libs/guessit/test/1MB delete mode 100644 libs/guessit/test/__main__.py delete mode 100644 libs/guessit/test/autodetect.yaml delete mode 100644 libs/guessit/test/dummy.srt delete mode 100644 libs/guessit/test/episodes.yaml create mode 100644 libs/guessit/test/episodes.yml delete mode 100644 libs/guessit/test/guessittest.py rename libs/guessit/test/{movies.yaml => movies.yml} (60%) delete mode 100644 libs/guessit/test/opensubtitles_languages_2012_05_09.txt create mode 100644 libs/guessit/test/rules/__init__.py create mode 100644 libs/guessit/test/rules/audio_codec.yml create mode 100644 libs/guessit/test/rules/bonus.yml create mode 100644 libs/guessit/test/rules/cds.yml create mode 100644 libs/guessit/test/rules/country.yml create mode 100644 libs/guessit/test/rules/date.yml create mode 100644 libs/guessit/test/rules/edition.yml create mode 100644 libs/guessit/test/rules/episodes.yml create mode 100644 libs/guessit/test/rules/film.yml create mode 100644 libs/guessit/test/rules/format.yml create mode 100644 libs/guessit/test/rules/language.yml create mode 100644 libs/guessit/test/rules/other.yml create mode 100644 libs/guessit/test/rules/part.yml create mode 100644 libs/guessit/test/rules/processors.yml create mode 100644 libs/guessit/test/rules/release_group.yml create mode 100644 libs/guessit/test/rules/screen_size.yml create mode 100644 libs/guessit/test/rules/title.yml create mode 100644 libs/guessit/test/rules/video_codec.yml create mode 100644 libs/guessit/test/rules/website.yml create mode 100644 libs/guessit/test/test-input-file.txt create mode 100644 libs/guessit/test/test_api_unicode_literals.py delete mode 100644 libs/guessit/test/test_autodetect.py delete mode 100644 libs/guessit/test/test_autodetect_all.py create mode 100644 libs/guessit/test/test_benchmark.py delete mode 100644 libs/guessit/test/test_doctests.py delete mode 100644 libs/guessit/test/test_episode.py delete mode 100644 libs/guessit/test/test_hashes.py delete mode 100644 libs/guessit/test/test_language.py delete mode 100644 libs/guessit/test/test_matchtree.py delete mode 100644 libs/guessit/test/test_movie.py delete mode 100644 libs/guessit/test/test_quality.py delete mode 100644 libs/guessit/test/test_utils.py create mode 100644 libs/guessit/test/test_yml.py create mode 100644 libs/guessit/test/various.yml delete mode 100644 libs/guessit/textutils.py delete mode 100644 libs/guessit/transfo/__init__.py delete mode 100644 libs/guessit/transfo/expected_series.py delete mode 100644 libs/guessit/transfo/expected_title.py delete mode 100644 libs/guessit/transfo/guess_bonus_features.py delete mode 100644 libs/guessit/transfo/guess_country.py delete mode 100644 libs/guessit/transfo/guess_date.py delete mode 100644 libs/guessit/transfo/guess_episode_details.py delete mode 100644 libs/guessit/transfo/guess_episode_info_from_position.py delete mode 100644 libs/guessit/transfo/guess_episodes_rexps.py delete mode 100644 libs/guessit/transfo/guess_filetype.py delete mode 100644 libs/guessit/transfo/guess_idnumber.py delete mode 100644 libs/guessit/transfo/guess_language.py delete mode 100644 libs/guessit/transfo/guess_movie_title_from_position.py delete mode 100644 libs/guessit/transfo/guess_properties.py delete mode 100644 libs/guessit/transfo/guess_release_group.py delete mode 100644 libs/guessit/transfo/guess_video_rexps.py delete mode 100644 libs/guessit/transfo/guess_weak_episodes_rexps.py delete mode 100644 libs/guessit/transfo/guess_website.py delete mode 100644 libs/guessit/transfo/guess_year.py delete mode 100644 libs/guessit/transfo/split_explicit_groups.py delete mode 100644 libs/guessit/transfo/split_on_dash.py delete mode 100644 libs/guessit/transfo/split_path_components.py create mode 100644 libs/guessit/yamlutils.py create mode 100644 libs/rarfile.py create mode 100644 libs/rarfile1/LICENSE create mode 100644 libs/rarfile1/MANIFEST.in create mode 100644 libs/rarfile1/Makefile create mode 100644 libs/rarfile1/PKG-INFO create mode 100644 libs/rarfile1/README.rst create mode 100644 libs/rarfile1/doc/Makefile create mode 100644 libs/rarfile1/doc/api.rst create mode 100644 libs/rarfile1/doc/conf.py create mode 100644 libs/rarfile1/doc/faq.rst create mode 100644 libs/rarfile1/doc/index.rst create mode 100644 libs/rarfile1/doc/make.bat create mode 100644 libs/rarfile1/doc/news.rst create mode 100755 libs/rarfile1/dumprar.py create mode 100644 libs/rarfile1/setup.py create mode 100644 libs/rarfile1/test/Makefile create mode 100644 libs/rarfile1/test/files/ctime0.rar create mode 100644 libs/rarfile1/test/files/ctime0.rar.exp create mode 100644 libs/rarfile1/test/files/ctime1.rar create mode 100644 libs/rarfile1/test/files/ctime1.rar.exp create mode 100644 libs/rarfile1/test/files/ctime2.rar create mode 100644 libs/rarfile1/test/files/ctime2.rar.exp create mode 100644 libs/rarfile1/test/files/ctime3.rar create mode 100644 libs/rarfile1/test/files/ctime3.rar.exp create mode 100644 libs/rarfile1/test/files/ctime4.rar create mode 100644 libs/rarfile1/test/files/ctime4.rar.exp create mode 100644 libs/rarfile1/test/files/rar15-comment-lock.rar create mode 100644 libs/rarfile1/test/files/rar15-comment-lock.rar.exp create mode 100644 libs/rarfile1/test/files/rar15-comment.rar create mode 100644 libs/rarfile1/test/files/rar15-comment.rar.exp create mode 100644 libs/rarfile1/test/files/rar202-comment-nopsw.rar create mode 100644 libs/rarfile1/test/files/rar202-comment-nopsw.rar.exp create mode 100644 libs/rarfile1/test/files/rar202-comment-psw.rar create mode 100644 libs/rarfile1/test/files/rar202-comment-psw.rar.exp create mode 100644 libs/rarfile1/test/files/rar3-comment-hpsw.rar create mode 100644 libs/rarfile1/test/files/rar3-comment-hpsw.rar.exp create mode 100644 libs/rarfile1/test/files/rar3-comment-plain.rar create mode 100644 libs/rarfile1/test/files/rar3-comment-plain.rar.exp create mode 100644 libs/rarfile1/test/files/rar3-comment-psw.rar create mode 100644 libs/rarfile1/test/files/rar3-comment-psw.rar.exp create mode 100644 libs/rarfile1/test/files/seektest.rar create mode 100644 libs/rarfile1/test/files/seektest.rar.exp create mode 100644 libs/rarfile1/test/files/unicode.rar create mode 100644 libs/rarfile1/test/files/unicode.rar.exp create mode 100755 libs/rarfile1/test/test1.sh create mode 100755 libs/rarfile1/test/test2.sh create mode 100755 libs/rarfile1/test/testcorrupt.py create mode 100755 libs/rarfile1/test/testio.py create mode 100755 libs/rarfile1/test/testseek.py create mode 100644 libs/rebulk/__init__.py create mode 100644 libs/rebulk/__version__.py create mode 100644 libs/rebulk/chain.py create mode 100644 libs/rebulk/debug.py create mode 100644 libs/rebulk/formatters.py create mode 100644 libs/rebulk/introspector.py create mode 100644 libs/rebulk/loose.py create mode 100644 libs/rebulk/match.py create mode 100644 libs/rebulk/pattern.py create mode 100644 libs/rebulk/processors.py create mode 100644 libs/rebulk/rebulk.py create mode 100644 libs/rebulk/remodule.py create mode 100644 libs/rebulk/rules.py create mode 100644 libs/rebulk/test/__init__.py create mode 100644 libs/rebulk/test/default_rules_module.py create mode 100644 libs/rebulk/test/rebulk_rules_module.py create mode 100644 libs/rebulk/test/rules_module.py create mode 100644 libs/rebulk/test/test_chain.py create mode 100644 libs/rebulk/test/test_debug.py create mode 100644 libs/rebulk/test/test_introspector.py create mode 100644 libs/rebulk/test/test_loose.py create mode 100644 libs/rebulk/test/test_match.py create mode 100644 libs/rebulk/test/test_pattern.py create mode 100644 libs/rebulk/test/test_processors.py create mode 100644 libs/rebulk/test/test_rebulk.py create mode 100644 libs/rebulk/test/test_rules.py create mode 100644 libs/rebulk/test/test_toposort.py create mode 100644 libs/rebulk/test/test_validators.py create mode 100644 libs/rebulk/toposort.py create mode 100644 libs/rebulk/utils.py create mode 100644 libs/rebulk/validators.py delete mode 100644 libs/subliminal/api.py delete mode 100644 libs/subliminal/compat.py create mode 100644 libs/subliminal/converters/legendastv.py delete mode 100644 libs/subliminal/converters/podnapisi.py create mode 100644 libs/subliminal/converters/shooter.py create mode 100644 libs/subliminal/converters/thesubdb.py create mode 100644 libs/subliminal/core.py create mode 100644 libs/subliminal/extensions.py create mode 100644 libs/subliminal/providers/legendastv.py create mode 100644 libs/subliminal/providers/napiprojekt.py create mode 100644 libs/subliminal/providers/shooter.py create mode 100644 libs/subliminal/providers/subscenter.py create mode 100644 libs/subliminal/refiners/__init__.py create mode 100644 libs/subliminal/refiners/metadata.py create mode 100644 libs/subliminal/refiners/omdb.py create mode 100644 libs/subliminal/refiners/tvdb.py delete mode 100644 libs/subliminal/tests/__init__.py delete mode 100644 libs/subliminal/tests/common.py delete mode 100644 libs/subliminal/tests/test_providers.py delete mode 100644 libs/subliminal/tests/test_subliminal.py create mode 100644 libs/subliminal/utils.py diff --git a/.gitignore b/.gitignore index fdd8ce76..c708d315 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ *.log *.pid *.db +*.dbm /userscripts/ /logs/ /.idea/ diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 204766a3..4cfd73fb 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1124,7 +1124,7 @@ def import_subs(filename): if not core.GETSUBS: return try: - subliminal.cache_region.configure('dogpile.cache.memory') + subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'}) except: pass @@ -1139,9 +1139,9 @@ def import_subs(filename): logger.debug("Attempting to download subtitles for {0}".format(filename), 'SUBTITLES') try: - video = subliminal.scan_video(filename, subtitles=True, embedded_subtitles=True) - subtitles = subliminal.download_best_subtitles({video}, languages, hearing_impaired=False) - subliminal.save_subtitles(subtitles) + video = subliminal.scan_video(filename) + subtitles = subliminal.download_best_subtitles({video}, languages) + subliminal.save_subtitles(video, subtitles[video]) except Exception as e: logger.error("Failed to download subtitles for {0} due to: {1}".format(filename, e), 'SUBTITLES') diff --git a/libs/concurrent/__init__.py b/libs/concurrent/__init__.py new file mode 100644 index 00000000..b36383a6 --- /dev/null +++ b/libs/concurrent/__init__.py @@ -0,0 +1,3 @@ +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) diff --git a/libs/concurrent/futures/__init__.py b/libs/concurrent/futures/__init__.py new file mode 100644 index 00000000..b5231f8a --- /dev/null +++ b/libs/concurrent/futures/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor diff --git a/libs/concurrent/futures/_base.py b/libs/concurrent/futures/_base.py new file mode 100644 index 00000000..8ed69b7d --- /dev/null +++ b/libs/concurrent/futures/_base.py @@ -0,0 +1,574 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +from __future__ import with_statement +import logging +import threading +import time + +try: + from collections import namedtuple +except ImportError: + from concurrent.futures._compat import namedtuple + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super(_FirstCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + super(_FirstCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super(_FirstCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super(_AllCompletedWaiter, self).__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super(_AllCompletedWaiter, self).add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super(_AllCompletedWaiter, self).add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super(_AllCompletedWaiter, self).add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = set(fs) - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + + try: + for future in finished: + yield future + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), len(fs))) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + for future in finished: + yield future + pending.remove(future) + + finally: + for f in fs: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future has cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + raise self._exception + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self.future), + self.future._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, **kwargs): + """Returns a iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get('timeout') + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in zip(*iterables)] + + try: + for future in fs: + if timeout is None: + yield future.result() + else: + yield future.result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/libs/concurrent/futures/_compat.py b/libs/concurrent/futures/_compat.py new file mode 100644 index 00000000..11462326 --- /dev/null +++ b/libs/concurrent/futures/_compat.py @@ -0,0 +1,101 @@ +from keyword import iskeyword as _iskeyword +from operator import itemgetter as _itemgetter +import sys as _sys + + +def namedtuple(typename, field_names): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', 'x y') + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, basestring): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = tuple(map(str, field_names)) + for name in (typename,) + field_names: + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen_names = set() + for name in field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen_names: + raise ValueError('Encountered duplicate field name: %r' % name) + seen_names.add(name) + + # Create and fill-in the class template + numfields = len(field_names) + argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes + reprtxt = ', '.join('%s=%%r' % name for name in field_names) + dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names)) + template = '''class %(typename)s(tuple): + '%(typename)s(%(argtxt)s)' \n + __slots__ = () \n + _fields = %(field_names)r \n + def __new__(_cls, %(argtxt)s): + return _tuple.__new__(_cls, (%(argtxt)s)) \n + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new %(typename)s object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != %(numfields)d: + raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) + return result \n + def __repr__(self): + return '%(typename)s(%(reprtxt)s)' %% self \n + def _asdict(t): + 'Return a new dict which maps field names to their values' + return {%(dicttxt)s} \n + def _replace(_self, **kwds): + 'Return a new %(typename)s object replacing specified fields with new values' + result = _self._make(map(kwds.pop, %(field_names)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) + return result \n + def __getnewargs__(self): + return tuple(self) \n\n''' % locals() + for i, name in enumerate(field_names): + template += ' %s = _property(_itemgetter(%d))\n' % (name, i) + + # Execute the template string in a temporary namespace and + # support tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + _property=property, _tuple=tuple) + try: + exec(template, namespace) + except SyntaxError: + e = _sys.exc_info()[1] + raise SyntaxError(e.message + ':\n' + template) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example). + if hasattr(_sys, '_getframe'): + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + + return result diff --git a/libs/concurrent/futures/process.py b/libs/concurrent/futures/process.py new file mode 100644 index 00000000..98684f8e --- /dev/null +++ b/libs/concurrent/futures/process.py @@ -0,0 +1,363 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Request Q" +""" + +from __future__ import with_statement +import atexit +import multiprocessing +import threading +import weakref +import sys + +from concurrent.futures import _base + +try: + import queue +except ImportError: + import Queue as queue + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(None) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException: + e = sys.exc_info()[1] + result_queue.put(_ResultItem(call_item.work_id, + exception=e)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + nb_shutdown_processes = [0] + def shutdown_one_process(): + """Tell a worker to terminate, which will in turn wake us again""" + call_queue.put(None) + nb_shutdown_processes[0] += 1 + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + result_item = result_queue.get(block=True) + if result_item is not None: + work_item = pending_work_items[result_item.work_id] + del pending_work_items[result_item.work_id] + + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if _shutdown or executor is None or executor._shutdown_thread: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + while nb_shutdown_processes[0] < len(processes): + shutdown_one_process() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS + # X. + for p in processes: + p.join() + call_queue.close() + return + del executor + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import os + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermine limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = multiprocessing.cpu_count() + else: + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + self._result_queue = multiprocessing.Queue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + self._processes = set() + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes.add(p) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + self._adjust_process_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join() + # To reduce the risk of openning too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/libs/concurrent/futures/thread.py b/libs/concurrent/futures/thread.py new file mode 100644 index 00000000..a45959d3 --- /dev/null +++ b/libs/concurrent/futures/thread.py @@ -0,0 +1,138 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +from __future__ import with_statement +import atexit +import threading +import weakref +import sys + +from concurrent.futures import _base + +try: + import queue +except ImportError: + import Queue as queue + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException: + e = sys.exc_info()[1] + self.future.set_exception(e) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + +class ThreadPoolExecutor(_base.Executor): + def __init__(self, max_workers): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + """ + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + # TODO(bquinlan): Should avoid creating new threads if there are more + # idle threads than items in the work queue. + if len(self._threads) < self._max_workers: + t = threading.Thread(target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/libs/dateutil/__init__.py b/libs/dateutil/__init__.py new file mode 100644 index 00000000..290814cf --- /dev/null +++ b/libs/dateutil/__init__.py @@ -0,0 +1,9 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" +__version__ = "1.5" diff --git a/libs/dateutil/easter.py b/libs/dateutil/easter.py new file mode 100644 index 00000000..d7944104 --- /dev/null +++ b/libs/dateutil/easter.py @@ -0,0 +1,92 @@ +""" +Copyright (c) 2003-2007 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + EASTER_JULIAN = 1 + EASTER_ORTHODOX = 2 + EASTER_WESTERN = 3 + + The default method is method 3. + + More about the algorithm may be found at: + + http://users.chariot.net.au/~gmarts/eastalg.htm + + and + + http://www.tondering.dk/claus/calendar.html + + """ + + if not (1 <= method <= 3): + raise ValueError, "invalid method" + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g+15)%30 + j = (y+y//4+i)%7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e+y//100-16-(y//100-16)//4 + else: + # New method + c = y//100 + h = (c-c//4-(8*c+13)//25+19*g+15)%30 + i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) + j = (y+y//4+i+2-c+c//4)%7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i-j+e + d = 1+(p+27+(p+6)//40)%31 + m = 3+(p+26)//30 + return datetime.date(int(y),int(m),int(d)) + diff --git a/libs/dateutil/parser.py b/libs/dateutil/parser.py new file mode 100644 index 00000000..5d824e41 --- /dev/null +++ b/libs/dateutil/parser.py @@ -0,0 +1,886 @@ +# -*- coding:iso-8859-1 -*- +""" +Copyright (c) 2003-2007 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +import datetime +import string +import time +import sys +import os + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +import relativedelta +import tz + + +__all__ = ["parse", "parserinfo"] + + +# Some pointers: +# +# http://www.cl.cam.ac.uk/~mgk25/iso-time.html +# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html +# http://www.w3.org/TR/NOTE-datetime +# http://ringmaster.arc.nasa.gov/tools/time_formats.html +# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm +# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html + + +class _timelex(object): + + def __init__(self, instream): + if isinstance(instream, basestring): + instream = StringIO(instream) + self.instream = instream + self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' + 'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' + 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') + self.numchars = '0123456789' + self.whitespace = ' \t\r\n' + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + if self.tokenstack: + return self.tokenstack.pop(0) + seenletters = False + token = None + state = None + wordchars = self.wordchars + numchars = self.numchars + whitespace = self.whitespace + while not self.eof: + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + if not nextchar: + self.eof = True + break + elif not state: + token = nextchar + if nextchar in wordchars: + state = 'a' + elif nextchar in numchars: + state = '0' + elif nextchar in whitespace: + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + seenletters = True + if nextchar in wordchars: + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + if nextchar in numchars: + token += nextchar + elif nextchar == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + seenletters = True + if nextchar == '.' or nextchar in wordchars: + token += nextchar + elif nextchar in numchars and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + if nextchar == '.' or nextchar in numchars: + token += nextchar + elif nextchar in wordchars and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + if (state in ('a.', '0.') and + (seenletters or token.count('.') > 1 or token[-1] == '.')): + l = token.split('.') + token = l[0] + for tok in l[1:]: + self.tokenstack.append('.') + if tok: + self.tokenstack.append(tok) + return token + + def __iter__(self): + return self + + def next(self): + token = self.get_token() + if token is None: + raise StopIteration + return token + + def split(cls, s): + return list(cls(s)) + split = classmethod(split) + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (classname, ", ".join(l)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), + ("Wed", "Wednesday"), + ("Thu", "Thursday"), + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year//100*100 + + def _convert(self, lst): + dct = {} + for i in range(len(lst)): + v = lst[i] + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + if len(name) >= 3: + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + if len(name) >= 3: + try: + return self._months[name.lower()]+1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + return self.TZOFFSET.get(name) + + def convertyear(self, year): + if year < 100: + year += self._century + if abs(year-self._year) >= 50: + if year < self._year: + year += 100 + else: + year -= 100 + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year) + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class parser(object): + + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, + **kwargs): + if not default: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + res = self._parse(timestr, **kwargs) + if res is None: + raise ValueError, "unknown string format" + repl = {} + for attr in ["year", "month", "day", "hour", + "minute", "second", "microsecond"]: + value = getattr(res, attr) + if value is not None: + repl[attr] = value + ret = default.replace(**repl) + if res.weekday is not None and not res.day: + ret = ret+relativedelta.relativedelta(weekday=res.weekday) + if not ignoretz: + if callable(tzinfos) or tzinfos and res.tzname in tzinfos: + if callable(tzinfos): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, basestring): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, int): + tzinfo = tz.tzoffset(res.tzname, tzdata) + else: + raise ValueError, "offset must be tzinfo subclass, " \ + "tz string, or int offset" + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=tz.tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=tz.tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False): + info = self.info + if dayfirst is None: + dayfirst = info.dayfirst + if yearfirst is None: + yearfirst = info.yearfirst + res = self._result() + l = _timelex.split(timestr) + try: + + # year/month/day list + ymd = [] + + # Index of the month string in ymd + mstridx = -1 + + len_l = len(l) + i = 0 + while i < len_l: + + # Check if it's a number + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Token is a number + len_li = len(l[i]) + i += 1 + if (len(ymd) == 3 and len_li in (2, 4) + and (i >= len_l or (l[i] != ':' and + info.hms(l[i]) is None))): + # 19990101T23[59] + s = l[i-1] + res.hour = int(s[:2]) + if len_li == 4: + res.minute = int(s[2:]) + elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = l[i-1] + if not ymd and l[i-1].find('.') == -1: + ymd.append(info.convertyear(int(s[:2]))) + ymd.append(int(s[2:4])) + ymd.append(int(s[4:])) + else: + # 19990101T235959[.59] + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = _parsems(s[4:]) + elif len_li == 8: + # YYYYMMDD + s = l[i-1] + ymd.append(int(s[:4])) + ymd.append(int(s[4:6])) + ymd.append(int(s[6:])) + elif len_li in (12, 14): + # YYYYMMDDhhmm[ss] + s = l[i-1] + ymd.append(int(s[:4])) + ymd.append(int(s[4:6])) + ymd.append(int(s[6:8])) + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + if len_li == 14: + res.second = int(s[12:]) + elif ((i < len_l and info.hms(l[i]) is not None) or + (i+1 < len_l and l[i] == ' ' and + info.hms(l[i+1]) is not None)): + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + if l[i] == ' ': + i += 1 + idx = info.hms(l[i]) + while True: + if idx == 0: + res.hour = int(value) + if value%1: + res.minute = int(60*(value%1)) + elif idx == 1: + res.minute = int(value) + if value%1: + res.second = int(60*(value%1)) + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + i += 1 + if i >= len_l or idx == 2: + break + # 12h00 + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + break + else: + i += 1 + idx += 1 + if i < len_l: + newidx = info.hms(l[i]) + if newidx is not None: + idx = newidx + elif i+1 < len_l and l[i] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + i += 1 + value = float(l[i]) + res.minute = int(value) + if value%1: + res.second = int(60*(value%1)) + i += 1 + if i < len_l and l[i] == ':': + res.second, res.microsecond = _parsems(l[i+1]) + i += 2 + elif i < len_l and l[i] in ('-', '/', '.'): + sep = l[i] + ymd.append(int(value)) + i += 1 + if i < len_l and not info.jump(l[i]): + try: + # 01-01[-01] + ymd.append(int(l[i])) + except ValueError: + # 01-Jan[-01] + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + else: + return None + i += 1 + if i < len_l and l[i] == sep: + # We have three members + i += 1 + value = info.month(l[i]) + if value is not None: + ymd.append(value) + mstridx = len(ymd)-1 + assert mstridx == -1 + else: + ymd.append(int(l[i])) + i += 1 + elif i >= len_l or info.jump(l[i]): + if i+1 < len_l and info.ampm(l[i+1]) is not None: + # 12 am + res.hour = int(value) + if res.hour < 12 and info.ampm(l[i+1]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i+1]) == 0: + res.hour = 0 + i += 1 + else: + # Year, month or day + ymd.append(int(value)) + i += 1 + elif info.ampm(l[i]) is not None: + # 12am + res.hour = int(value) + if res.hour < 12 and info.ampm(l[i]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i]) == 0: + res.hour = 0 + i += 1 + elif not fuzzy: + return None + else: + i += 1 + continue + + # Check weekday + value = info.weekday(l[i]) + if value is not None: + res.weekday = value + i += 1 + continue + + # Check month name + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + i += 1 + if i < len_l: + if l[i] in ('-', '/'): + # Jan-01[-99] + sep = l[i] + i += 1 + ymd.append(int(l[i])) + i += 1 + if i < len_l and l[i] == sep: + # Jan-01-99 + i += 1 + ymd.append(int(l[i])) + i += 1 + elif (i+3 < len_l and l[i] == l[i+2] == ' ' + and info.pertain(l[i+1])): + # Jan of 01 + # In this case, 01 is clearly year + try: + value = int(l[i+3]) + except ValueError: + # Wrong guess + pass + else: + # Convert it here to become unambiguous + ymd.append(info.convertyear(value)) + i += 4 + continue + + # Check am/pm + value = info.ampm(l[i]) + if value is not None: + if value == 1 and res.hour < 12: + res.hour += 12 + elif value == 0 and res.hour == 12: + res.hour = 0 + i += 1 + continue + + # Check for a timezone name + if (res.hour is not None and len(l[i]) <= 5 and + res.tzname is None and res.tzoffset is None and + not [x for x in l[i] if x not in string.ascii_uppercase]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + i += 1 + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i < len_l and l[i] in ('+', '-'): + l[i] = ('+', '-')[l[i] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + continue + + # Check for a numbered timezone + if res.hour is not None and l[i] in ('+', '-'): + signal = (-1,1)[l[i] == '+'] + i += 1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + res.tzoffset = int(l[i])*3600+int(l[i+2])*60 + i += 2 + elif len_li <= 2: + # -[0]3 + res.tzoffset = int(l[i][:2])*3600 + else: + return None + i += 1 + res.tzoffset *= signal + + # Look for a timezone name between parenthesis + if (i+3 < len_l and + info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and + 3 <= len(l[i+2]) <= 5 and + not [x for x in l[i+2] + if x not in string.ascii_uppercase]): + # -0300 (BRST) + res.tzname = l[i+2] + i += 4 + continue + + # Check jumps + if not (info.jump(l[i]) or fuzzy): + return None + + i += 1 + + # Process year/month/day + len_ymd = len(ymd) + if len_ymd > 3: + # More than three members!? + return None + elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): + # One member, or two members with a month string + if mstridx != -1: + res.month = ymd[mstridx] + del ymd[mstridx] + if len_ymd > 1 or mstridx == -1: + if ymd[0] > 31: + res.year = ymd[0] + else: + res.day = ymd[0] + elif len_ymd == 2: + # Two members with numbers + if ymd[0] > 31: + # 99-01 + res.year, res.month = ymd + elif ymd[1] > 31: + # 01-99 + res.month, res.year = ymd + elif dayfirst and ymd[1] <= 12: + # 13-01 + res.day, res.month = ymd + else: + # 01-13 + res.month, res.day = ymd + if len_ymd == 3: + # Three members + if mstridx == 0: + res.month, res.day, res.year = ymd + elif mstridx == 1: + if ymd[0] > 31 or (yearfirst and ymd[2] <= 31): + # 99-Jan-01 + res.year, res.month, res.day = ymd + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + res.day, res.month, res.year = ymd + elif mstridx == 2: + # WTF!? + if ymd[1] > 31: + # 01-99-Jan + res.day, res.year, res.month = ymd + else: + # 99-01-Jan + res.year, res.day, res.month = ymd + else: + if ymd[0] > 31 or \ + (yearfirst and ymd[1] <= 12 and ymd[2] <= 31): + # 99-01-01 + res.year, res.month, res.day = ymd + elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12): + # 13-01-01 + res.day, res.month, res.year = ymd + else: + # 01-13-01 + res.month, res.day, res.year = ymd + + except (IndexError, ValueError, AssertionError): + return None + + if not info.validate(res): + return None + return res + +DEFAULTPARSER = parser() +def parse(timestr, parserinfo=None, **kwargs): + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = _timelex.split(tzstr) + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + i = j + if (i < len_l and + (l[i] in ('+', '-') or l[i][0] in "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1,-1)[l[i] == '+'] + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, + (int(l[i][:2])*3600+int(l[i][2:])*60)*signal) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i])*3600+int(l[i+2])*60)*signal) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2])*3600*signal) + else: + return None + i += 1 + if res.dstabbr: + break + else: + break + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + i += 2 + if l[i] == '-': + value = int(l[i+1])*-1 + i += 1 + else: + value = int(l[i]) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i])-1)%7 + else: + x.day = int(l[i]) + i += 2 + x.time = int(l[i]) + i += 2 + if i < len_l: + if l[i] in ('-','+'): + signal = (-1,1)[l[i] == "+"] + i += 1 + else: + signal = 1 + res.dstoffset = (res.stdoffset+int(l[i]))*signal + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',','/','J','M', + '.','-',':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + i += 1 + x.month = int(l[i]) + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.weekday = (int(l[i])-1)%7 + else: + # year day (zero based) + x.yday = int(l[i])+1 + + i += 1 + + if i < len_l and l[i] == '/': + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + x.time = int(l[i])*3600+int(l[i+2])*60 + i += 2 + if i+1 < len_l and l[i+1] == ':': + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2])*3600) + else: + return None + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + return res + + +DEFAULTTZPARSER = _tzparser() +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +def _parsems(value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + +# vim:ts=4:sw=4:et diff --git a/libs/dateutil/relativedelta.py b/libs/dateutil/relativedelta.py new file mode 100644 index 00000000..0c72a818 --- /dev/null +++ b/libs/dateutil/relativedelta.py @@ -0,0 +1,432 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +import datetime +import calendar + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + +class relativedelta: + """ +The relativedelta type is based on the specification of the excelent +work done by M.-A. Lemburg in his mx.DateTime extension. However, +notice that this type does *NOT* implement the same algorithm as +his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + +There's two different ways to build a relativedelta instance. The +first one is passing it two date/datetime classes: + + relativedelta(datetime1, datetime2) + +And the other way is to use the following keyword arguments: + + year, month, day, hour, minute, second, microsecond: + Absolute information. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative. + + weekday: + One of the weekday instances (MO, TU, etc). These instances may + receive a parameter N, specifying the Nth weekday, which could + be positive or negative (like MO(+1) or MO(-2). Not specifying + it is the same as specifying +1. You can also use an integer, + where 0=MO. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + +Here is the behavior of operations with relativedelta: + +1) Calculate the absolute year, using the 'year' argument, or the + original datetime year, if the argument is not present. + +2) Add the relative 'years' argument to the absolute year. + +3) Do steps 1 and 2 for month/months. + +4) Calculate the absolute day, using the 'day' argument, or the + original datetime day, if the argument is not present. Then, + subtract from the day until it fits in the year and month + found after their operations. + +5) Add the relative 'days' argument to the absolute day. Notice + that the 'weeks' argument is multiplied by 7 and added to + 'days'. + +6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, + microsecond/microseconds. + +7) If the 'weekday' argument is present, calculate the weekday, + with the given (wday, nth) tuple. wday is the index of the + weekday (0-6, 0=Mon), and nth is the number of weeks to add + forward or backward, depending on its signal. Notice that if + the calculated date is already Monday, for example, using + (0, 1) or (0, -1) won't change the day. + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + if dt1 and dt2: + if not isinstance(dt1, datetime.date) or \ + not isinstance(dt2, datetime.date): + raise TypeError, "relativedelta only diffs datetime/date" + if type(dt1) is not type(dt2): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month) + self._set_months(months) + dtm = self.__radd__(dt2) + if dt1 < dt2: + while dt1 > dtm: + months += 1 + self._set_months(months) + dtm = self.__radd__(dt2) + else: + while dt1 < dtm: + months -= 1 + self._set_months(months) + dtm = self.__radd__(dt2) + delta = dt1 - dtm + self.seconds = delta.seconds+delta.days*86400 + self.microseconds = delta.microseconds + else: + self.years = years + self.months = months + self.days = days+weeks*7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if type(weekday) is int: + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError, "invalid year day (%d)" % yday + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = self.microseconds//abs(self.microseconds) + div, mod = divmod(self.microseconds*s, 1000000) + self.microseconds = mod*s + self.seconds += div*s + if abs(self.seconds) > 59: + s = self.seconds//abs(self.seconds) + div, mod = divmod(self.seconds*s, 60) + self.seconds = mod*s + self.minutes += div*s + if abs(self.minutes) > 59: + s = self.minutes//abs(self.minutes) + div, mod = divmod(self.minutes*s, 60) + self.minutes = mod*s + self.hours += div*s + if abs(self.hours) > 23: + s = self.hours//abs(self.hours) + div, mod = divmod(self.hours*s, 24) + self.hours = mod*s + self.days += div*s + if abs(self.months) > 11: + s = self.months//abs(self.months) + div, mod = divmod(self.months*s, 12) + self.months = mod*s + self.years += div*s + if (self.hours or self.minutes or self.seconds or self.microseconds or + self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = self.months//abs(self.months) + div, mod = divmod(self.months*s, 12) + self.months = mod*s + self.years = div*s + else: + self.years = 0 + + def __radd__(self, other): + if not isinstance(other, datetime.date): + raise TypeError, "unsupported type for add operation" + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth)-1)*7 + if nth > 0: + jumpdays += (7-ret.weekday()+weekday)%7 + else: + jumpdays += (ret.weekday()-weekday)%7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __add__(self, other): + if not isinstance(other, relativedelta): + raise TypeError, "unsupported type for add operation" + return relativedelta(years=other.years+self.years, + months=other.months+self.months, + days=other.days+self.days, + hours=other.hours+self.hours, + minutes=other.minutes+self.minutes, + seconds=other.seconds+self.seconds, + microseconds=other.microseconds+self.microseconds, + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=other.second or self.microsecond) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + raise TypeError, "unsupported type for sub operation" + return relativedelta(years=other.years-self.years, + months=other.months-self.months, + days=other.days-self.days, + hours=other.hours-self.hours, + minutes=other.minutes-self.minutes, + seconds=other.seconds-self.seconds, + microseconds=other.microseconds-self.microseconds, + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=other.second or self.microsecond) + + def __neg__(self): + return relativedelta(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __nonzero__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + + def __mul__(self, other): + f = float(other) + return relativedelta(years=self.years*f, + months=self.months*f, + days=self.days*f, + hours=self.hours*f, + minutes=self.minutes*f, + seconds=self.seconds*f, + microseconds=self.microseconds*f, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return False + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + return self.__mul__(1/float(other)) + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("%s=%+d" % (attr, value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + +# vim:ts=4:sw=4:et diff --git a/libs/dateutil/rrule.py b/libs/dateutil/rrule.py new file mode 100644 index 00000000..6bd83cad --- /dev/null +++ b/libs/dateutil/rrule.py @@ -0,0 +1,1097 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +import itertools +import datetime +import calendar +import thread +import sys + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+ + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = range(1,30), range(1,31), range(1,32) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366) +M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365) +WDAYMASK = [0,1,2,3,4,5,6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = range(7) + +# Imported on demand. +easter = None +parser = None + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + if n == 0: + raise ValueError, "Can't create weekday with n == 0" + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + +class rrulebase: + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = thread.allocate_lock() + self._cache_gen = self._iter() + self._cache_complete = False + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(gen.next()) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxint, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = gen.next() + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + if self._len is None: + for x in self: pass + return self._len + + def before(self, dt, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def between(self, after, before, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + +class rrule(rrulebase): + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + rrulebase.__init__(self, cache) + global easter + if not dtstart: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + if wkst is None: + self._wkst = calendar.firstweekday() + elif type(wkst) is int: + self._wkst = wkst + else: + self._wkst = wkst.weekday + if bysetpos is None: + self._bysetpos = None + elif type(bysetpos) is int: + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + if not (byweekno or byyearday or bymonthday or + byweekday is not None or byeaster is not None): + if freq == YEARLY: + if not bymonth: + bymonth = dtstart.month + bymonthday = dtstart.day + elif freq == MONTHLY: + bymonthday = dtstart.day + elif freq == WEEKLY: + byweekday = dtstart.weekday() + # bymonth + if not bymonth: + self._bymonth = None + elif type(bymonth) is int: + self._bymonth = (bymonth,) + else: + self._bymonth = tuple(bymonth) + # byyearday + if not byyearday: + self._byyearday = None + elif type(byyearday) is int: + self._byyearday = (byyearday,) + else: + self._byyearday = tuple(byyearday) + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if type(byeaster) is int: + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(byeaster) + else: + self._byeaster = None + # bymonthay + if not bymonthday: + self._bymonthday = () + self._bynmonthday = () + elif type(bymonthday) is int: + if bymonthday < 0: + self._bynmonthday = (bymonthday,) + self._bymonthday = () + else: + self._bymonthday = (bymonthday,) + self._bynmonthday = () + else: + self._bymonthday = tuple([x for x in bymonthday if x > 0]) + self._bynmonthday = tuple([x for x in bymonthday if x < 0]) + # byweekno + if byweekno is None: + self._byweekno = None + elif type(byweekno) is int: + self._byweekno = (byweekno,) + else: + self._byweekno = tuple(byweekno) + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + elif type(byweekday) is int: + self._byweekday = (byweekday,) + self._bynweekday = None + elif hasattr(byweekday, "n"): + if not byweekday.n or freq > MONTHLY: + self._byweekday = (byweekday.weekday,) + self._bynweekday = None + else: + self._bynweekday = ((byweekday.weekday, byweekday.n),) + self._byweekday = None + else: + self._byweekday = [] + self._bynweekday = [] + for wday in byweekday: + if type(wday) is int: + self._byweekday.append(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.append(wday.weekday) + else: + self._bynweekday.append((wday.weekday, wday.n)) + self._byweekday = tuple(self._byweekday) + self._bynweekday = tuple(self._bynweekday) + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = (dtstart.hour,) + else: + self._byhour = None + elif type(byhour) is int: + self._byhour = (byhour,) + else: + self._byhour = tuple(byhour) + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = (dtstart.minute,) + else: + self._byminute = None + elif type(byminute) is int: + self._byminute = (byminute,) + else: + self._byminute = tuple(byminute) + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = (dtstart.second,) + else: + self._bysecond = None + elif type(bysecond) is int: + self._bysecond = (bysecond,) + else: + self._bysecond = tuple(bysecond) + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY:ii.ydayset, + MONTHLY:ii.mdayset, + WEEKLY:ii.wdayset, + DAILY:ii.ddayset, + HOURLY:ii.ddayset, + MINUTELY:ii.ddayset, + SECONDLY:ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY:ii.htimeset, + MINUTELY:ii.mtimeset, + SECONDLY:ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday + and -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday + and -ii.nextyearlen+i-ii.yearlen + not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal+i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + while True: + hour += interval + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + if not byhour or hour in byhour: + break + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + while True: + minute += interval + div, mod = divmod(minute, 60) + if div: + minute = mod + hour += div + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + filtered = False + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute)): + break + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399-(hour*3600+minute*60+second)) + //interval)*interval) + while True: + second += self._interval + div, mod = divmod(second, 60) + if div: + second = mod + minute += div + div, mod = divmod(minute, 60) + if div: + minute = mod + hour += div + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + break + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365+calendar.isleap(year) + self.nextyearlen = 365+calendar.isleap(year+1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + #no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1,1,1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst)%7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen+ + (lyearweekday-rr._wkst)%7)%7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst)%7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and + (month != self.lastmonth or year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday)%7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday)%7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return range(self.yearlen), 0, self.yearlen + + def mdayset(self, year, month, day): + set = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + set[i] = i + return set, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + set = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + set[i] = i + i += 1 + #if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return set, start, i + + def ddayset(self, year, month, day): + set = [None]*self.yearlen + i = datetime.date(year, month, day).toordinal()-self.yearordinal + set[i] = i + return set, i, i+1 + + def htimeset(self, hour, minute, second): + set = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + set.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + set.sort() + return set + + def mtimeset(self, hour, minute, second): + set = [] + rr = self.rrule + for second in rr._bysecond: + set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + set.sort() + return set + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + + class _genitem: + def __init__(self, genlist, gen): + try: + self.dt = gen() + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def next(self): + try: + self.dt = self.gen() + except StopIteration: + self.genlist.remove(self) + + def __cmp__(self, other): + return cmp(self.dt, other.dt) + + def __init__(self, cache=False): + rrulebase.__init__(self, cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + def rrule(self, rrule): + self._rrule.append(rrule) + + def rdate(self, rdate): + self._rdate.append(rdate) + + def exrule(self, exrule): + self._exrule.append(exrule) + + def exdate(self, exdate): + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate).next) + for gen in [iter(x).next for x in self._rrule]: + self._genitem(rlist, gen) + rlist.sort() + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate).next) + for gen in [iter(x).next for x in self._exrule]: + self._genitem(exlist, gen) + exlist.sort() + lastdt = None + total = 0 + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exlist[0].next() + exlist.sort() + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + ritem.next() + rlist.sort() + self._len = total + +class _rrulestr: + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError, "invalid until date" + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg): + l = [] + for wday in value.split(','): + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: n = int(n) + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError, "unknown parameter name" + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError, "unknown parameter '%s'" % name + except (KeyError, ValueError): + raise ValueError, "invalid '%s': %s" % (name, value) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + s = s.upper() + if not s.strip(): + raise ValueError, "empty string" + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and + (s.find(':') == -1 or s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError, "empty property name" + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError, "unsupported RRULE parm: "+parm + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError, "unsupported RDATE parm: "+parm + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError, "unsupported EXRULE parm: "+parm + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError, "unsupported RDATE parm: "+parm + exdatevals.append(value) + elif name == "DTSTART": + for parm in parms: + raise ValueError, "unsupported DTSTART parm: "+parm + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + raise ValueError, "unsupported property: "+name + if (forceset or len(rrulevals) > 1 or + rdatevals or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + set = rruleset(cache=cache) + for value in rrulevals: + set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + set.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + set.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + set.rdate(dtstart) + return set + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/libs/dateutil/tz.py b/libs/dateutil/tz.py new file mode 100644 index 00000000..0e28d6b3 --- /dev/null +++ b/libs/dateutil/tz.py @@ -0,0 +1,951 @@ +""" +Copyright (c) 2003-2007 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +import datetime +import struct +import time +import sys +import os + +relativedelta = None +parser = None +rrule = None + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] + +try: + from dateutil.tzwin import tzwin, tzwinlocal +except (ImportError, OSError): + tzwin, tzwinlocal = None, None + +ZERO = datetime.timedelta(0) +EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() + +class tzutc(datetime.tzinfo): + + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def __eq__(self, other): + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class tzoffset(datetime.tzinfo): + + def __init__(self, name, offset): + self._name = name + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + def tzname(self, dt): + return self._name + + def __eq__(self, other): + return (isinstance(other, tzoffset) and + self._offset == other._offset) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + `self._name`, + self._offset.days*86400+self._offset.seconds) + + __reduce__ = object.__reduce__ + +class tzlocal(datetime.tzinfo): + + _std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + _dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + _dst_offset = _std_offset + + def utcoffset(self, dt): + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + #>>> import tz, datetime + #>>> t = tz.tzlocal() + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRDT' + #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + #'BRST' + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRST' + #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + #'BRDT' + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRDT' + # + # Here is a more stable implementation: + # + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + return time.localtime(timestamp+time.timezone).tm_isdst + + def __eq__(self, other): + if not isinstance(other, tzlocal): + return False + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return False + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt) + + def __ne__(self, other): + return not self.__eq__(other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + +class tzfile(datetime.tzinfo): + + # http://www.twinsun.com/tz/tz-link.htm + # ftp://elsie.nci.nih.gov/pub/tz*.tar.gz + + def __init__(self, fileobj): + if isinstance(fileobj, basestring): + self._filename = fileobj + fileobj = open(fileobj) + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = `fileobj` + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + + if fileobj.read(4) != "TZif": + raise ValueError, "magic not found" + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + self._trans_list = struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4)) + else: + self._trans_list = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + self._trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + self._trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt) + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now + if leapcnt: + leap = struct.unpack(">%dl" % (leapcnt*2), + fileobj.read(leapcnt*8)) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # ** Everything has been read ** + + # Build ttinfo list + self._ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = (gmtoff+30)//60*60 + tti = _ttinfo() + tti.offset = gmtoff + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + self._ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + trans_idx = [] + for idx in self._trans_idx: + trans_idx.append(self._ttinfo_list[idx]) + self._trans_idx = tuple(trans_idx) + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + self._ttinfo_std = None + self._ttinfo_dst = None + self._ttinfo_before = None + if self._ttinfo_list: + if not self._trans_list: + self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] + else: + for i in range(timecnt-1,-1,-1): + tti = self._trans_idx[i] + if not self._ttinfo_std and not tti.isdst: + self._ttinfo_std = tti + elif not self._ttinfo_dst and tti.isdst: + self._ttinfo_dst = tti + if self._ttinfo_std and self._ttinfo_dst: + break + else: + if self._ttinfo_dst and not self._ttinfo_std: + self._ttinfo_std = self._ttinfo_dst + + for tti in self._ttinfo_list: + if not tti.isdst: + self._ttinfo_before = tti + break + else: + self._ttinfo_before = self._ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = 0 + self._trans_list = list(self._trans_list) + for i in range(len(self._trans_list)): + tti = self._trans_idx[i] + if not tti.isdst: + # This is std time. + self._trans_list[i] += tti.offset + laststdoffset = tti.offset + else: + # This is dst time. Convert to std. + self._trans_list[i] += laststdoffset + self._trans_list = tuple(self._trans_list) + + def _find_ttinfo(self, dt, laststd=0): + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + idx = 0 + for trans in self._trans_list: + if timestamp < trans: + break + idx += 1 + else: + return self._ttinfo_std + if idx == 0: + return self._ttinfo_before + if laststd: + while idx > 0: + tti = self._trans_idx[idx-1] + if not tti.isdst: + return tti + idx -= 1 + else: + return self._ttinfo_std + else: + return self._trans_idx[idx-1] + + def utcoffset(self, dt): + if not self._ttinfo_std: + return ZERO + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if not self._ttinfo_dst: + return ZERO + tti = self._find_ttinfo(dt) + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.delta-self._find_ttinfo(dt, laststd=1).delta + + # An alternative for that would be: + # + # return self._ttinfo_dst.offset-self._ttinfo_std.offset + # + # However, this class stores historical changes in the + # dst offset, so I belive that this wouldn't be the right + # way to implement this. + + def tzname(self, dt): + if not self._ttinfo_std: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return False + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + def __ne__(self, other): + return not self.__eq__(other) + + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._filename`) + + def __reduce__(self): + if not os.path.isfile(self._filename): + raise ValueError, "Unpickable %s class" % self.__class__.__name__ + return (self.__class__, (self._filename,)) + +class tzrange(datetime.tzinfo): + + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + global relativedelta + if not relativedelta: + from dateutil import relativedelta + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + def utcoffset(self, dt): + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def _isdst(self, dt): + if not self._start_delta: + return False + year = datetime.datetime(dt.year,1,1) + start = year+self._start_delta + end = year+self._end_delta + dt = dt.replace(tzinfo=None) + if start < end: + return dt >= start and dt < end + else: + return dt >= start or dt < end + + def __eq__(self, other): + if not isinstance(other, tzrange): + return False + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class tzstr(tzrange): + + def __init__(self, s): + global parser + if not parser: + from dateutil import parser + self._s = s + + res = parser._parsetz(s) + if res is None: + raise ValueError, "unknown string format" + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC"): + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + def _delta(self, x, isend=0): + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset-self._std_offset + kwargs["seconds"] -= delta.seconds+delta.days*86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._s`) + +class _tzicalvtzcomp: + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + +class _tzicalvtz(datetime.tzinfo): + def __init__(self, tzid, comps=[]): + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + dt = dt.replace(tzinfo=None) + try: + return self._cachecomp[self._cachedate.index(dt)] + except ValueError: + pass + lastcomp = None + lastcompdt = None + for comp in self._comps: + if not comp.isdst: + # Handle the extra hour in DST -> STD + compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) + else: + compdt = comp.rrule.before(dt, inc=True) + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + self._cachedate.insert(0, dt) + self._cachecomp.insert(0, lastcomp) + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + return lastcomp + + def utcoffset(self, dt): + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % `self._tzid` + + __reduce__ = object.__reduce__ + +class tzical: + def __init__(self, fileobj): + global rrule + if not rrule: + from dateutil import rrule + + if isinstance(fileobj, basestring): + self._s = fileobj + fileobj = open(fileobj) + elif hasattr(fileobj, "name"): + self._s = fileobj.name + else: + self._s = `fileobj` + + self._vtz = {} + + self._parse_rfc(fileobj.read()) + + def keys(self): + return self._vtz.keys() + + def get(self, tzid=None): + if tzid is None: + keys = self._vtz.keys() + if len(keys) == 0: + raise ValueError, "no timezones defined" + elif len(keys) > 1: + raise ValueError, "more than one timezone available" + tzid = keys[0] + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError, "empty offset" + if s[0] in ('+', '-'): + signal = (-1,+1)[s[0]=='+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2])*3600+int(s[2:])*60)*signal + elif len(s) == 6: + return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal + else: + raise ValueError, "invalid offset: "+s + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError, "empty string" + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError, "empty property name" + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError, "unknown component: "+value + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError, \ + "component not closed: "+comptype + if not tzid: + raise ValueError, \ + "mandatory TZID not found" + if not comps: + raise ValueError, \ + "at least one component is needed" + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError, \ + "mandatory DTSTART not found" + if tzoffsetfrom is None: + raise ValueError, \ + "mandatory TZOFFSETFROM not found" + if tzoffsetto is None: + raise ValueError, \ + "mandatory TZOFFSETFROM not found" + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError, \ + "invalid component end: "+value + elif comptype: + if name == "DTSTART": + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError, \ + "unsupported %s parm: %s "%(name, parms[0]) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError, \ + "unsupported TZOFFSETTO parm: "+parms[0] + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError, \ + "unsupported TZNAME parm: "+parms[0] + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError, "unsupported property: "+name + else: + if name == "TZID": + if parms: + raise ValueError, \ + "unsupported TZID parm: "+parms[0] + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError, "unsupported property: "+name + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._s`) + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + +def gettz(name=None): + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[:-1] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ','_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin: + try: + tz = tzwin(name) + except OSError: + pass + if not tz: + from dateutil.zoneinfo import gettz + tz = gettz(name) + if not tz: + for c in name: + # name must have at least one offset to be a tzstr + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + +# vim:ts=4:sw=4:et diff --git a/libs/dateutil/tzwin.py b/libs/dateutil/tzwin.py new file mode 100644 index 00000000..073e0ff6 --- /dev/null +++ b/libs/dateutil/tzwin.py @@ -0,0 +1,180 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct +import _winreg + +__author__ = "Jeffrey Harris & Gustavo Niemeyer " + +__all__ = ["tzwin", "tzwinlocal"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + +def _settzkeyname(): + global TZKEYNAME + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + try: + _winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + +_settzkeyname() + +class tzwinbase(datetime.tzinfo): + """tzinfo class based on win32's timezones available in the registry.""" + + def utcoffset(self, dt): + if self._isdst(dt): + return datetime.timedelta(minutes=self._dstoffset) + else: + return datetime.timedelta(minutes=self._stdoffset) + + def dst(self, dt): + if self._isdst(dt): + minutes = self._dstoffset - self._stdoffset + return datetime.timedelta(minutes=minutes) + else: + return datetime.timedelta(0) + + def tzname(self, dt): + if self._isdst(dt): + return self._dstname + else: + return self._stdname + + def list(): + """Return a list of all time zones known to the system.""" + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + tzkey = _winreg.OpenKey(handle, TZKEYNAME) + result = [_winreg.EnumKey(tzkey, i) + for i in range(_winreg.QueryInfoKey(tzkey)[0])] + tzkey.Close() + handle.Close() + return result + list = staticmethod(list) + + def display(self): + return self._display + + def _isdst(self, dt): + dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + if dston < dstoff: + return dston <= dt.replace(tzinfo=None) < dstoff + else: + return not dstoff <= dt.replace(tzinfo=None) < dston + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name)) + keydict = valuestodict(tzkey) + tzkey.Close() + handle.Close() + + self._stdname = keydict["Std"].encode("iso-8859-1") + self._dstname = keydict["Dlt"].encode("iso-8859-1") + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 + + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + + def __init__(self): + + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + + tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME) + keydict = valuestodict(tzlocalkey) + tzlocalkey.Close() + + self._stdname = keydict["StandardName"].encode("iso-8859-1") + self._dstname = keydict["DaylightName"].encode("iso-8859-1") + + try: + tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname)) + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + tzkey.Close() + except OSError: + self._display = None + + handle.Close() + + self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] + self._dstoffset = self._stdoffset-keydict["DaylightBias"] + + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:6] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:6] + + def __reduce__(self): + return (self.__class__, ()) + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """dayofweek == 0 means Sunday, whichweek 5 means last instance""" + first = datetime.datetime(year, month, 1, hour, minute) + weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) + for n in xrange(whichweek): + dt = weekdayone+(whichweek-n)*ONEWEEK + if dt.month == month: + return dt + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dict = {} + size = _winreg.QueryInfoKey(key)[1] + for i in range(size): + data = _winreg.EnumValue(key, i) + dict[data[0]] = data[1] + return dict diff --git a/libs/dateutil/zoneinfo/__init__.py b/libs/dateutil/zoneinfo/__init__.py new file mode 100644 index 00000000..9bed6264 --- /dev/null +++ b/libs/dateutil/zoneinfo/__init__.py @@ -0,0 +1,87 @@ +""" +Copyright (c) 2003-2005 Gustavo Niemeyer + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +from dateutil.tz import tzfile +from tarfile import TarFile +import os + +__author__ = "Gustavo Niemeyer " +__license__ = "PSF License" + +__all__ = ["setcachesize", "gettz", "rebuild"] + +CACHE = [] +CACHESIZE = 10 + +class tzfile(tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + +def getzoneinfofile(): + filenames = os.listdir(os.path.join(os.path.dirname(__file__))) + filenames.sort() + filenames.reverse() + for entry in filenames: + if entry.startswith("zoneinfo") and ".tar." in entry: + return os.path.join(os.path.dirname(__file__), entry) + return None + +ZONEINFOFILE = getzoneinfofile() + +del getzoneinfofile + +def setcachesize(size): + global CACHESIZE, CACHE + CACHESIZE = size + del CACHE[size:] + +def gettz(name): + tzinfo = None + if ZONEINFOFILE: + for cachedname, tzinfo in CACHE: + if cachedname == name: + break + else: + tf = TarFile.open(ZONEINFOFILE) + try: + zonefile = tf.extractfile(name) + except KeyError: + tzinfo = None + else: + tzinfo = tzfile(zonefile) + tf.close() + CACHE.insert(0, (name, tzinfo)) + del CACHE[CACHESIZE:] + return tzinfo + +def rebuild(filename, tag=None, format="gz"): + import tempfile, shutil + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + if tag: tag = "-"+tag + targetname = "zoneinfo%s.tar.%s" % (tag, format) + try: + tf = TarFile.open(filename) + for name in tf.getnames(): + if not (name.endswith(".sh") or + name.endswith(".tab") or + name == "leapseconds"): + tf.extract(name, tmpdir) + filepath = os.path.join(tmpdir, name) + os.system("zic -d %s %s" % (zonedir, filepath)) + tf.close() + target = os.path.join(moduledir, targetname) + for entry in os.listdir(moduledir): + if entry.startswith("zoneinfo") and ".tar." in entry: + os.unlink(os.path.join(moduledir, entry)) + tf = TarFile.open(target, "w:%s" % format) + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + tf.close() + finally: + shutil.rmtree(tmpdir) diff --git a/libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz b/libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..8bd4f96402be50779e4b2749688d077347a6eef0 GIT binary patch literal 171995 zcmYJ4XIPU>u&@N-#BP#=jW)VN@f(YM7=O<_n!8MN z8Ez8`oxgXoN?WUnin>)M7UsIf5_JkzwzB>iKT7*EAkYIJK<+CT$A;l%mprGrMgshe z^D_Qpl?ue6e=&UX8&AoZS04DtXCF24yLRBVi1C_kzf;CcZLDYJ1@*YE{mwn_A2Uk3 z4kyWVN7aF|?W9g>M=?y^|MAuyx-9*RRbhPnzR7Qqeo?0SMwdWz_J zicr!(bZH=zG!b2zh~zgd6sFhZ8*YVpNIgs4j=M2zl^rEO2{UDpZ%_>LNFmwKJ`fMR z*VFfNAgS45doqu~hWl+4v-E;t`dCU;yI5@RsL(-%X@x`GtNPpp@mB`htfkWt$x-ev zSoK}J7MFMW4uf-^2l^m0)#I^cUI#Acl2Fl{%rZ9X-MKjeogJk)IVc%qyr=xh(kuRu z&w6PrtmhBkqC2rR2bO~+;Z#KDCuz}MEPYBGRc}zdR2ZIAT3q-laS6Aq+=%E*qF9D7 zW!M-mf@M{y`D zqQ06Z>K%#CKZ%()!<%mt(3~-=ZZ~wqI)!=IR2#4F6G%UvS5Ei1&6rNK2`xTZK1jiu zTAl|}<0L+ec@GgQ{qXwm{wX1}w|N?ipK3IL@}wd7-|0H(fB@{whYxh_uM)8}#Bqfe zgG~J)JNZ^td6{_oZm%dx@=sA}u>x;`$6AlNG2Y&12^Ya5+E74;rnVFN7-oKd!JO~x z&)>AnDr&M%Pm0HB`8?{mG^sIXAKs67YgXam24~B-;LJ+=h&c|sk}3X89S z_C!t~YbvhVZJkE^M1=PT%ES}FEvO2^ij0qSm}`x}!`&YkLIuUp&&Q@GFWdiOM2A|7 z)GP792HxKPKfPX)PwTj@TkXxV3#;@TJ@gp1a#gz}xI1S3B*CTcEW^Wm28LMsLcJjq zX6`N^Wgk$5XMFcgC1O2iL>bDmff@@|@U0$`c4jvRB#d> zDL+w?kE%q3eWS+tH#GdhK<1m!){cQH@B|P+QhwZF9EUZu3@>T>`DPdwQh5fgoIPn=6M>Nu+yTKH|)0%h$U!|_9Z`#&Z8Y@4mLQ?2xDslY?q z?M#yzLd0joVtJ!)wYj=oMdTL7uJ~dC_DRM-(S{bJ7*t{;W;0Z z$NqdH59dlO9vXcbhACh0*t+I~F3(a1gK>ye6Fskm~$ zaDe{ui2T$KYnF(T!GBvWf``*eEp>Gf{^wsmo{S|){M!;?EOB0?zP(Tye1`!ZAlChx z|K&>K?5*&}CG2!3@Ly6rif)wNQ(kQr3*>pKoGLW^a07sWBL7Qp#myzZ)RcCF2S=K58oN zUWrV%|Ly%P>oer)X&(}mzV5*dtB%4}Pz7kjS?=#V>+&ZS) zp0)k2*B)!~3sCN+m`Z1s*CmRj&Dob!`A8Ov`X|Yi9tC(jGv-_}GEHRXtJZVI78ad$ z4^D43Vz%tCOhqO2`EN`0J=9BH2bf11G}^fDu9kENvrrjA#S_TZI`Ck(9$>^BNa zlAo538pr=OS@7!`ezE<~v2&!Ubf_uWsMRysGb$o;O#IxcQG>BsYo>Zo-c%$@vLtv* ztKrSRw>7_uJVt8DNr%7lKkOJjEVXgEA7*5KT^bRnNBmRF!(h$!uY$hijgqU#9&_4hx($Vk~cWri1m|`GtCdaO2`orHv~l zRhs)V>PKC6req z4NJI9!XtJ=+m8)(E5tg{FqI9Z`xvniiqyUi%cW#fXAVn{d&3`oL;Efar7Mrv2t#U< z!G_5k6O3#uHi%BcbMHLAoAmlw1OFJCL8ZsrP-~tDaW(0%1V0iU+?74mn6El_2`Y6J z5gYH3+BC3SPLGe}ql%2V-12H#VF}+zc=U#pH$Eb@Z#yP@C*kqDsf{71yrs;TE5xR5 z6_zkS!jqfros=skujW{-<;RY?rGl`HMrt!VChU^%tPCmJ#vrv>VYzU5wUBD9yVojp zNshXQVaQxXd9}!}guAvm#|Q*Z9CDb$&LoMKl*g1tyO#r1t%@j!M-Fqta-YeoImCpq_!X|*IZt$JuKl}2tOUI z_QUIy2~QBVDM)P*Sgy6a+7Jkblt+(NyMYUp@DyR2hSZjTEM)Hw<#trR6 zdBhkQQty74Zu1RoIt9cS1x!yYOm_~9fDRR`kmdUag~7i8a8aq~BTB02YjUVzqy22T zs`dyg99@9V0c^2z?~It$mX<>9@CEU{mO(&!uu17v)@C@#8ly;!?0|%qt?7Q2-vj}x z#COrUY`vTN2QR4>X?gkDf}K2w172koEyl|T|3k<7vUF#~ZQVxS>a*G}&^!%+UQJWs zw<@d_NB-Ro)r3Z_$SE)p$+>IaLi3wS?AL`XxwK53sQe3Xe|UT)%UF51vndt7c(lV9 zBKY4jRmEs*ECLB} zS&EjgM+=L583rMcmiKjZp+kH-RD4W7aJJS5cWDtb%zIlVB}*Exg9};Q!fz7cCmu7S z7h1nxH*akcN55+sYmIevoAbEd(riI}p7`kh-wp~z6mMmz>7LKM{8(TCm6C`d;a^ywVR4uia z!Pm)yECNN*zenO-Ta1so{|3}To0LJ;&#CttTfbjzrQfCwYIXK>v1j()Cw3$Yf%C@_ zW3}|Cuob___qn^s{sn06uKYaDBt2LhJcSbn2W{L#N9N|Wn>$UB;qA8l_9xs|-?oyb zk>b7fN%noBsJ=Z@PV zg8@Bp?ZOtx?t>{RnUTUu4>`G`FRvuCmVWdv8a|v3wT@~bE-m(DmPu=6knzu+Ta+>} z2wn5JkoFPCBa{p(-I8|2&Xz?j`g-B7XP#|e+YcaDdAnxLN@(^%@lM^y@$E&TM$xN+ zqpiX|FU_r8^jzH2%icE#4H#5MbNdM8-fi;v5U|))GS#j=-9#YhyD#KZz_-?>E(oVG zdkv8@wQi3_NCwsCTDSF`$&s18+`hj5u|)r^0a+SRX{ij|?9?8yOL9|SyD6Usfp_3M z)vRpo`=fgE?YBq1GL+ZOW@m3V9lEma^H$lvF)%fyX_}@EZ!gjuATd)Id^_=}naE7t zoxx;eh-&SKmp0~Ttkjoqb_Rd1PBU9}QBI;Zbi%jsTGQ_C`C`jjFhSYb49Px5I=K6Q z@yX|iB?%MB+f7=dE-tM%Z!Siqg};&57`|n)HGHM1pp4I_YI~&}r@lgP7W|p0seYJY z%|7TG|G-2C8N9m|%NP{x?0EwK$A@KFcr69+T@b*D37~}%AX@@p2-r>3gk~+BbSOsom?LDAF=6!30hctqBcv~zgYQe*n)n~44r0+t4bpy<7a*8#-A?7jIp zTA6KPw3J+vmfSvxESPM#OXoukmS9cXVDhQ9BlbDodAe9}3Vd&Cp`}{Zo&rqp0-QMk z3@QTbIfK#GJD>&qhn5O@bOp-(SZFGL?-!(LA1hB{n20QP*23i&uzzjYX0=hK-l$;>m13h3z z`hDLWJ9rs>)*f&de+zU`gaFHZ00~wAqa^@>a4_0B0Q}{J1AikCk|BdO>Er>gy}^c) zxIwrRBtg+x8N>iOQ~*b}0Fp@oyqN%kL9)!t?*i0$3!uODjI;+E6T!O$S+2pmA-S7C zKYWS1gg{qxIY9j;Ybh!m?2yh0EdS95ErpLsw`zmxp4|!?+t-o&vMK8R5m$g)D z4x;nE114w85Vm?r}LK}k^S0v)wDnDqM`q)vg(>Ic zJQ{2#ymH^YDfc#(DR>B2wd&ph@E`|ZqK0>yPl42!H(voT27b*KEQy0GbY;-LFM}_! z0SP%p0BDE+IMD-Sk^@Hzppt7}gOyklj41Gne@*sP7=FHiKa`=sR?r1S2OolC0F45F z@U-gy5}-;1G2lrHUS_lCvNpC5*837;y`im+VEf(l5m~f#y!-A1&V+Do5)k{fzhv0- zFFsUo?n#I6ELsOm7Q)&z|0^<`(_)JZ5qH{mtUCuuspkH!g*}0{4U(oXwhxj<`=O;^ z>b?L)^k8y71)STGZ4CTNSHK$< z4hDz_zy|XEs4o)D4ZNYZyAJ2RB|rk_&Nl(-%NohH@d8+v0+>_)Kwk&=@e*K}0${ZF z+5RvgL3`hmw^_8kUhe=-E?W~I5Z>xbB!vO!0o|zAzI&+qRkT$5`CV-Kq8*67js*

zm7;?n#_1Fg&onpK^PV*DF?bi)56A)R-U92LX@XWJh-x(J8k}4756A+g1(=`=%-RL7 z1Zmiq+yVFpf*1V&T&AwhUYkGeFxr3Z3_L>MkjS@PCgA)YK-XmgRzRI(k6k9flLVXo z=k*nU?<@eNfz11-x%Q#n`Q&hJkAC2BT@7Tpff)Gs!i-J7;R80m?m-!(L1J_j&V9`p zEccKRL?8eX5QdZkX6u&;m>37By-a{V9eBvaLSSkpb&$sUd=Q3W)0-?>Kg$o-_FQ>y z$!wa5?5}%Lwp(a`JXW{^Uzzp*W^aMRc^%M_3jw|s%m7-a0ifH1ZN#@KLa9fA{aay> zj91`sK5im{b7OB%!?`Ku!IPn=zXD(i%8Ft}lrjJvL))&>fA@-9rM>397CZ3FiUSye z?Yno}F+fYbbdW_${mo+n-MZmRr~sfpDYbXA@?4Sodlj3$e*`M_7woQ9PBzwi&ZvFYV@)L<)X9Kh?zD}bl`KzFJFTBhzG##w#(m0q zY`a7Q@Xxpa@C}3?x*-G1&u;=)u>+id@+f!Y0tj3&-e)Gv4B1Nu2Yq{!0AQCGpa2xg zw&NxENCC<~$uO0J<3e>CJY*^}(CROH7s5I%bOXB&!vFir4PYIF|94Un01d+bt9T8Y z?&9_zKp`o>Lr`OqI-~$w@uXN2ea8JGIuPs8AUI=@U{^1sz)B`f;4zyRU1A>W!z2al z$i(y#Tc8q5E>Ftr-1LkOCTX z=Mi}FISWAD%?KuP=YUq82xwVngMp)~0FizGeWw6Lm$kCh0FdSYFcWtPa8$?!sDd=w znM1R5_9qtCZgqfK34cY5P49ROvfJSm!ZKT`!njWXc5sVfrfraCn;o>Mz|Z1aDXE|< zycUtABdHIjOSXh%=~(_IgXjJ70~^n7@+Op-X^32$((D-IaqADs(pfek1cPGW{Kxu( zx8Zx5Yyi%JXep%@a9BQK?*peqAAxncDCmZ!fQ71sD^Zt7UT;0Mk z2We>aUjtpayP$PXfY_(UDWzRK3hX3->J0#CIDqF{fFfdGmPH86vPH4!Upv^rin*VF zV>K19#O7rZ)=mJD6#ynS0BY#~&hP*+2LOZM?et@>EHKt9>>T7N6fYq)``ts^mkhj>rvU+mc79yA;t=pFb% zjq(ci-Ffg>=^+}b2EQv&A3;=SzB2aguh)J*$Kep|dt%U@3oFIe`9G0gnxugeIDe&x zjhtaSyr6;g+VT(8PBEKn&| zCmunP4=IEBqg4nd8z~18Z?3uh@Vjyxf1G|;MX;a_J${@$#N1495`r0>XpQXV&V-UD zAoRi?C~woh`x>q+u*~1<`6mr4;_goi+G&5;EBdrGd2kYBl%L2nvb&+ym2C=vKI@Az+t^*OD+V*eBQ(hRoMAHWk{w;5-eUaV%tjYxECl!pEdVG|E7lej89<>iV>Gp>)~w?0R_JxrX`9_(!_Bl8fZj zTI;b7Gs&i-yX6|oUU%J6y_tKuccL*}P1btsu4PcrPK)Zj+J%e@^iO74Gm-;FS_509 zGc#ETHH}McrHm}7Y>h$UmDw(&NjIE{DqEA0_}6R~jTP^CFJ>xycrPvvUT|Dc`8K>|*0xGb~dthy}U zjvCt?8_$-;&Dra>w+{|tK5~!$)mFkjmAE?3Y*u)bSEFun?Z)8s;PZlIKRo;Z${rfR zJQo0ew++C-Ya)uFi>aQLNHf*3SwbI|Z;Az}{u0b*24nNAvrtY>&QC0; zq>qpj#`(z+bnV@@SiEff2~_{<2>OKD@ap40{=%|v-Wch}4> z>5D%ww9HIi;rDe;tXY*F(Mk?paI+7gH1N8U$aX&eD@8P~@ry3x`eNHG%FgUET>J=xlnESZn z)P>XqKYv^o;+^3g!xV5*n`@%D@EO@61`BkT$DBUWCkWFed!$eE`5!XzI`b#BFr*q0 znkL4{UH4&_u6&rTT9~djTE4h?zq_1<9i`2ViepC&v!iG@P}&@*I1bb>2a1LhrOk-3yC)-o3!Qn-#oi8rWV*-i?rQE|b_qjRQF^opSDl78C_05Yp zD5IaV8M>g$i(?FeLhy_#KQib0M>PwB@z&>Nh5o`~AT_C{QIp!_~ToGgex2 zj*fcJt0L1it2?9Kpchl9n6NsQbP>zYNJ%HMOH%c^AvUG2_^jsPZYk&M^Np95iDOqiQo4MP z(sCgK*#gHD7&6?@mRgJ~uVh0VvY~_@Q{UjDKRzz6grN?vRtn!kxe)|)SGv7m{l%a& z0P#Z;8}mcztasCd$=?KZqTvSC^$>S6y4Cv(8lR{#5@r^@``?(%$BX0jrXPwv{|rLq;-PHF`@%w*Eb3f0d1vAb3R->P208O? zwOZNuJVPkD)K>q27vbmP${@Z$C_V{Os}8=3HSGwBZug{qw{0x0JPQ}OSBaXFR}W{X z<)>4~NJzXsyF7X_&zsRW`B5PEzS=~?gi@_T8RaggcjPYh0He%K5WOsQ3PoaB6kSpA zXN&v>cKKrQp!{sip^!-wO9rJ_ces1aG=-Mkz3Oy>u@_c5TmtFdLFT!dx#{VHeZsvB zhM8`jn%3GFxj}*P8Kd53&<~4>b`yVvPOe{^f# z+3R~WwVNN2(<5zDD(eebC$QfiPsP~}*32^II#?BS)!=0xP5smjvzMM$nA)wq`|85U zH<4MH?zZ&xItGvIrhM-{N0MT1!|jBndRYFkm(cow&$zrzy_%>y5kRlg{g&3Xs%U{nfwXy&-R@UEg9$wYqCN#<{G_ zn3DONwdK2%S!>A9u!{aW{s$CM6^rx3IdLPyOk+ya8|#hLcG+=){O^-?zfQ zh53APOQ{ZG52wXhknzZq@yV0jmnRea?^F04SU3YL{4Ok<5f;t_3ui`#v%tbxk>PBx za2PE79xR+47R~_+=XAW48g?tQTI)TH_UC(^mCra)F1bKpeHPKpP$?d>TGe!B`54Bb zA|J;A>xo06R1t?993N=np4hS%(8QV8#&ZefqGwP?Uq4JRyy_ zx2EgL#$b_zYaAU$$+wl4E`zVI@1p+ZukTw>7G%#QB z{kPLkTN=gC5Y$1{Y3iwbQ$*~r48HyK+~r=LbNU76U%AE4*~5Wt%-1)h_RHjQWg~s+)D=aOl`)$0(9$XoIG(Netf{B0w!TLDA#ym)wcv~GcFUb_mEg-{uc5rrCDw&u zsK_gR3-ZW=qNpp8mKmc9InY@Zar{NFjv<7{ypO(m^QEmQenB<`Z#rj%%ag`w)`=_i zba$)A2*(<0GKHO`5C(2~2_7$GV;F0ekEb3_Id4~6Zo>l|d|k}dwiZ+tp0XzDh=SoCYeNxnytnNSH@Qvqh`41B^=It z^5=(&65XulX?~iP4Dxt4n>cWY?Mb9!vvW;;QfcZ0C3eX|XF8a` z2Px*@C~_ZMfKP!pZhobvrjDli6tx+L!JMJ%pzh8togh44b2a2Zd+Hb;4XLLS-w5J& zePC!PCg`~u)Xx7#)fqPpzT;Q#{Lv%|i9X?<+I~}hk15$)AtLr?SCg?PddZexM_e372IJ=1S{G9xUb^o3wBP6nRj2abCGkaeiY_d!pE5J4-|#~V*`B|I4WPfoe><1xzmlNjrmvE8WB9`#KAaeLd|;=hW?|}N z=>pn4#0c6J2Fv~B=FzkvQI&9L@v1XdtY_YtB*r#c&BjIV#qOa|HF&~ToeCp%*w)Cr zNcYME$mtM64K9exfD0l<+}qO86SisF`45LHtHz$p4Tvb+_ZJ{~ekwp&v)efTtRAYB zd51H0bH6xaNF-jZb0_Am0sq#LZq4dw8O;gk505ubd(Sx9zV*AFKMdR(z;j?)$tH7?; zR-IOniBYGH!f2eR@{~`HgF^hiORiV{4TX^;_}x@bYH_r`m&7&ELow08k;y+_0~iz{ zt?d+wOgs}yG%O{mgS3sNUY}Sf%vezMdbv&|>=?{xdEGfiOu7EMnp?UglpEA`U*ScG zSg*#;8AB);^p&EtqW_Q7Qr0^GBFc9H65sB~&nq&>d!Gn-$kM-9|DIoxeG$#lTOKP$ z@s}K%coP;*8QVFRD4VKppzJx2^T|s9^{L=Uo*^wt=d?JX@6U*z>XdY!h=TKxxK);a z9z|djc><0K>$nc}-R^v2B|<@X(Jw-Qu|+<9OdZ(DI@d|{D9}Ir^B%>V1%K}!*`N0)L1X%|K^5r3 zze&P#NWz05;UOU9AtU9XA?3MC%5%>)C!YiLivzW;nDOi0tzTagkoOXiUlWn{l8|4M zkoS_2Uz3sdJ|n+=M&3(7er1>lQ4)nIiNTb_kxCLUB}t@`6i44ZX8H&vX_%4>Oi31| zBnMN1!+72xbsr(r*f_*)#N8uNr3*t*$ZHeBc&_~d_a)R|95pxMUXiGZg`t?_wW;Iy zbZ)>3-Xjwf5q9@Dly1fqlBgOY=AW2`p&qd5Fu-^ye~l_5KC*L+-HbaVQ4I`3J(Aak z!FXb8v#skFsNHVf0%>(U{rvI`Da~AV)A6kwi+|FA@~7(xS}g{gYi)A*i%(j4_tuPt zGBRhq^l9}mo8LA6;aCHobJ$yAWd%^#6UbThf9sc zD#V{o247&E!Cfyb)*O7e&y-VRrAv=K-s2jCvZzj4Mc(gDN(HjwF0YN@!AFPr_6i{$ zJsgyVpS$z)6q}Vd3neFEG%@>qZDF`=k(h4ffmG&FV+Vh@+rdfncy1%TQSyEM*QP8J z9K(_w;|;HLCANBheU0T4jHYk3lYJv}-RdtM{-54lPJ+_3C%8*;>kGerb9AMTFSQkE zo_23s$`P;nlpmv-tgA>ui&4k3)#pwMQ#%Y4EBdk$30dAF&tCqsRp`q0r%Ze`&N%v5 zopNADlgIR{K98wAw#36;+;GFp<=t>+Ztd{WZ#JtH0$R$%!Hq;cvlCI>xy6!VuWyOj zn9%5VcY_VYMwxc4^2L7leAzJRUlgnUSm|YI{MP%~)fM=O(m1?aeQNP=7CM(x&Gese zZ*TRVnw-uZ;1lA=wQiV~o$BJ;Xi4zX*tUJWpY#F8xi0xQPgTB^bP!S~wtj+s+^yLW zM#_k=!+Bq5>>`W-(qf=D!lq6I>c`K z5$U0Xn3fj=AAG;641XRa=Y4vNFj5e-d41Vu+W&BA6oWjDxGpim2HQX)s}Tr44vwcZ zaRRaTQp8DBv%*kX^20nZ+eGBC8X}dbC6>#NV~1c+l~y+H9jR(}7|Ke1SOjL9iu8Dj znC9XL%-OZLt0L%Zdg&iidwi(P!iF+oJHz0U&oC{mxVYur4<)AWMtv(RktpgXem;kD z>rzQS(pUq2~;^-x} zawpnYW4BE%HZK9Eg1hMUD5js4m6Rb07;l}24$za3Z$9`_=I{b8tEwii=HJqixq#ZJ z-CO50=;lasI92V7Tf|hn2$e4XxQ+R{zuX^vOT}+(=G+@zJ=Zi?J&lWk+>-YoR5Aws zutJT6S-ecI^i?foy?eUYvevf5vTyYe^6bz}p@rS`9oD#;k*s*(i-%F=6GImae{O&# zcgDiVc~GZe>oFZG^lGR(FwLYR{?Q)hLGH^^$fs;I-c@ysq>C#MaiEG2QbV|sSyrea z4%84rk^Wr9xk^?4d6Du2k@CD-@#i^gzIATa8|)^?8Mt<*n04+DXBw+E@Zn5}2@jpO zzHSd}@Ekj%*h$*N4|YkNW1R;Mu?x1FP@LS~T-C`zaU%)Kf5!*mbqo>8cV8=u(OEO= z{O65cS@ZJx9yR0pFviteb+2yDSiSIcI`REXBg$*GrRqZOqWe~iba_zy<_qse$)n?g zLIq0aAIvP@MRyVPvaTv`yStc++z_5k8ZmXW=lEwC{C`=!54$+VHuz%;NAmm%226g_ zdV87LkJS@86&|_2S`y&zE2*S#V(@m|b|1@a<8@8$)F>()6}aI&=G?q#6f5C6A*=Co z?pK1jr^uPPtAum2{+!=K+1=C3qeAI!;-7P-4%&0ZC6SHL5>uk5+fE&^X+$9l)n>lm zGAsRAT~6}mhpolByqb>%nJXk8{mmxzPq12Zb@a|f6-19xS&~_s;lem*tc+ zcIc9;tk=*%9ZTcBk4cPa!0}SW6*d$b8|o1o>O~tZ3qKnP&wnI558m)glJGow!;c{0 zdG;r{Ei@cS@D3UJ4oMJ!42?h%yhnz2)!LI*gN66`iwP+Ni{_ib)YLU9mFKLo%NY3mEvTOqUY+rDa!?}ZXiI=Nj~T;Zh6K%hNm3olIri@9O{NP#%3u)EaQERIQ&Bh<|yo zw2-Lo?W`kUoUbJiv-SEx^nZmWLUt%OE6b;ytXfADMb*5Wcav{)P8SR;`{BRpw@254??Z9I>MR3J5)a# zc&P;#dQ;MVul65F+@TU1%#{)SCgMG5MdIx<$=?|}PM@f<#j4Lz)pK1n7>OB0YNvTx9y8Sxm) zt==#i3i!4~v}xmHdH846M^DZD$jj_^iO6Z{jK7)PF4oi7-TT1sceDAK{Vr6eWwFy} za`eSMef4Et^EVaz*3+2go5KA|NU8(Ecqndw~p;CR@l0e zAjI;{;c+g-6rtMBmHi5dWskl)$wiZqb+b}VT`OAYANr1L%uVL|{G-mJ`6K&UJ@rjO zPW<&W)W?g+G|UTmMPGj_bb(xX^}Mz8P^Q%2qG?Fst1+BjSKmdxnq+;E+G~LVUaJ|= z($V;7*W9v(pNUfXGNs$6Ca!L*rklq@fl(vXxb?M2GX}^?@@uogw*)_-cs(Pdg$V4P z75N&{gY_HCp2mBdt(l#5MGfOC`&y`JTJ)e>dY;Az6Svd$;1>ri1J0uRD|>R?bCT{1 z^-00^p1pRr-WYsaRzZ;7_!Y+{Bc>jYvw@YORZ_0NxzM049$W)g-M-J-Pbn#})ZqZAMj zT5(u{NeF*CjdolYSs(upjdo?19T)%KS7NkD9u=vi>P^pohjE%0zwV%UpfB=L^-fVr zx#Fj>C(nhGzG2Y{Mqtp2O}*bnx_p_!lGG@b!14y3nbVFC@TpNiEl<%-xFzDvDHZZBrMQIPBRTXDr7>5tGKi2(P*c2(jpRx zrkRR69;Ln2y9@dX^7_7Cez)`9sFr%pd9OywjG#a`cACjLM7IFp7#;Pf&;-n7Lh>>c z{H|a@Bu0&{ekMh!so1%{e^eL%hQ5%7zD&FU(^Cx7Ww@yw9yWfZD1oHl)jd@22PE%* z1vz-7^ii!~K8?+MI6JJU92JdeRxZd1G6Y)qrFNkx?&0P{5zy!I0{u6&&3x88pi)X4 z)ml*C^c*EFL`C}rgWg^aP|xXUs|2cEj){WFWz5{3qm4gRnNPZwfCo@yD$TQ65PVDv z5^E;(V#Ngv^~ovx9Bg3{w$dn|SI^aVyW8*F#K`eeMmY&}QGQ)2?%2jMcK#*tAtx>! zBPq8gC1>?;U7rxAZjH_Rkw&@4+JWfPvfY~o--vx#{FA_71v&2odu!HOZoz$fkAhL) z|2lL!G+$k0osvf_VBge07avCESwx;>;nmVB{F@}*k^c=CHDyF@-T7~T1!t1J(D&FM1TtN~lPOQr(KUUI2bozSKYU)we;N{&ob(YY3_sBR zQg+~0)%%mLrteN!!?J`KUuus}^P4=3^0(nl#f&PQcCmF2+1$zQ= zUs}$#xK41aQFitVQ=7!5-1)3t(=P&+Tl>+I{z(TPirUfDlhi-0rM`X|q|5j;$VzmF zIgjqecRrCSlgzKe->N^~|1ayaEL#~Y;*3uyZ%bX_Q0O7!zh$9^ZWoaGG;~T;fPO_8 zH_SRq?v8eWN5)M<+sq|_0#L->u~f;ujkT4f81JTY&e;Vh{~X$jf>6yFYc!{kYEckV z3F;^GHtc6GT+0rt9%a)Ts=Z|H_cg6DdlbDepc(}|a_Rj9R7Rok!l+TSU>i`;r5{_K zGXnd;MKIY;!w%c%qP^7G{GS-!-3>GZxyA5fUq{hd{9u7?&~Ro4jU}4OZC)|_#7FI= zU}9h~0p`G;B;18s-{_1d-i2_I1qTZm@pqvese*$n1u#$pG;s5VkYB=;qv%#X;Nvdv z!7qmQ2UE=jVSS@uv6?XJ+`})$_r>tW>h{4E+n{l#ZvUo&UqB3RQ3BQq4dc&IkAf)D z!9HxkIzlv5@ul=uz6A%ZQD8NC;AEjV=`NHu28?sV_%DF*2RpDACE$wtPVP_WM$RZY zMhGnb0yLaof}OP}7aW*Rfq47CcyJI<4|3e!=(VE&Y9o1lcSKY$G20lM_7W6rWQWbt z0WJxF%T`t3G72={#+SbHf$!^JuK8==vI=B*;RR^GFI^^p?0Ux`$K*hSS3t5jr=C6K z5W}lSf}CZ6b&tNrJ{F_B3l)1|Dz*GTDQNk*EtxxEp*=CY&TeK0w^`ipHvi#vaznXU zYp;96^FyQEp=+x18%IB^is1H=anjMp@LvyljI@+1yBw%FNSeHEN8pA_u0cdmA3ML z*rJy9LLaISJL{R`JX1R$uN{1Kr!=AKV4`08eCp;@VsEKvVkYx>S-&deIGE6M6kPK* zr{mSz;eYs;BJ0D0EEYTKDJVA^QpZf@$K27P=rskYqH8B1rc{>W)<&+T4{A>yy+YZY7L!TpS@ zwYlop@`;}R{9IxhOKrAAX43S<^Pz{Cfy>cr$80(-)4x;hHHDKAN%!CsBexxcMzxE# z9Nb;c#*%0#-+fkJO||3E6Q4BM+{tPXrfOLgUdvnWo#(X>sI7=8#l|*FlZni1^5V6O zjBQ^~W?-y|OWc#kZztzV8yPIv-L8F2T)mr1W+Y?s$w;%&2i(HG=ua#&t=9Fe={NE0 z)>p3)x$b_la@#bjHK$*!+i_?t_4bQ##r_dr%AM?zIm@~M>0jnsIy{O7-QA|fYeQwX z7s#IeZc)J)Z%Q|$Zb3us!;4M=?wuPrkErNI8yu!~Ld+~fX9i_@Kf`NS$8ls+$>aAc zO*>r1t%f(N!M22EreifOS*4DyiM7?d)#I~vGgj(bwXLZ${WDIs{w=NbbAQ5? zOJ9qs^hhrd+QQymKW3k{C*=Dy>ba?{&bU0ON55^X$QV!?TdK|z{=E6$sN^PK(v6oY_kz|1rh=z7D)D$l; zl+0;U{So(<;tIEr`nxLPXw9!l6nSvT4~*~PNtu2rQswl7QCYr<7_tl{79Q$!5&jG0 zpwp%D+Lg|pxGNOg>en8m^E0%$REAwD2!MhDD5The3`4I}a$L>np&>$YlHYln1b)%8 z1!h040yAP@#s=$}e?U=iX>q9_2Z{$k(Q|1*XxVdJIi8b=P5t46RQ`i2 zhKC`4ElEk<#dE^$Xe5D*84Amr_R`RRIci{zP`gXz^0m$$np;4j4HQ~e4J$*3EJ?|I z%!c_m1s0W6%M#jNNLs7;f?3(3r8#Gk_@eDSr_$HA%-&~|e99=b}id($|rTFCL}S$?fSq0@BiR-vEu z{CH}{L6hsmkzMNCnjqs@I)DCDqidtL@!)N{iU*zG6xWWQ>uCz6CsHF%n7T@FJcK6NKsEs`AqfhxCTy zza!s-$JIA0`6kkOQ)&zPG(X!GzA*VxdWSQWmqEF3>^H&FfxuM;(?ZJOy6-Gb9(-RW z>I_JQ8d?v>eOB#X9Lwm)>zph3S|qlL*c4{++Lww4#pGgcn>$9asPl`hB@B(*RT<|o zq^oY}op_r%;!MGRvsOH0EqiN}m3*_IK}p|cE7Pg4Xsvv~#M%CHF1}=C^}2i7^SiqY z&Q78~rvBARPS4iqP^rhWrlr-?WIaz+RL}m-TlIe*@O98hPXGyy8Y0JvGaIiP1UvU8MeC# zRo^)CQ*EgHiYq+?

Jk^IwowFrX->o;|1cQ!3ke_;$i`X0>Ez9;y^@Fe7#JY20ae zpUqGmJ4V0h7e*(dA{8~PbTO4`;jm`>s?S`a+l=|;Mxt+R{lw`r^*!mk5%DV8%iFjO zjVB7he&pzepl!TKT%wwSilwn8LMZAcC4Z^V*N`r8Wzll<&-d4FFWu>B`^WU_RR;1O zGyC1={T7OZk^Y}7UsS~0q~OO3xrNwX(U0DEv?*=koKQR~e*8F&i-WKB(cOW8>Z^R+ z$3R30__{rSR0BB%B1#DQK&pWp6I$Mt&w`EIMB0)9H$ZI3z$*}2a_|bomIAy|*p{Y< zVzZ8~gW1GjHjru{$HbO*!xAkXUK76G(JLei^`78caYIveW z1CVJT7$RZHoIT{monO^Y5PsK%pLT+|KpcSt0*L~W0i*;-Gmzgv=78(~!LbV%QaxIL z^$|WwA}8Tty<({GCegz8s($p!_2*JatS?rcM_(!kfPxGt1c74UQb7n5R6qd-3aSvG zxc1A96hmbwceOG-R8Aqu;0=FmTWs+>L(%C<5Nv#jY@ciQZu1E|_>u z3_P&`MZ}B0&jsXb`VMa*S2*N*)gL9L0h6H6L<=d9#uXl2WqyOl{ru+by%p(1eBFn) zfmc&3u};SqYr(PCUsRu%ZOgWI!NR5PgYfPi$nlv2{o zDzUUkOA8|1-7AW83)1jPN(<7pNH@~mCEdMo$KSp8kC`+3o#&Z(X3jaw?w*+soB4G6 z1#{J1YPE+xG8^cn6H3B?V;%bR<$M7m?(Y&$5)HgXUVJ{ea7kPl%7*ndYTB|NMwR?% zT59|`N}a7GW!1^Sa3iH9b<$MgXSnm@L@{%}uRy7IMUO>%gMZgl<^oHzL=@?la59~Z z$*7Q-kUErxMeE4GV5#Yfq2jp5&{?2r599-vFZa=E* z+gjFsysFVWon4$*mn6+AD^Ip7guhFtlJO83LSZ-1RY_<)6Ffksj^&IFVzsQOjIL(Y zXpSUx4B?tvCf>YYHOQ5UEEjFWnk!vq-TDL{hi8Q*Cs*lV4mHtB+5u=b$s4$8`P?h$37+=pIoWwx$$B-6fafY;-31hNSp1Z4HCRng62Did_%!MUGY^;!(S`;E zHYf&?bhE81A5>#~W8XQvVGPsrpREj)vwsFye7U%kj~S*1kPfieTn;7tD0+v4=HD|D z|G!HMG6xuC_zkp_=*bcRURm*m-Oa^*|ATRq5DI?rj?k{)6Ikl4HHPpE1#6t#@G{m+ z&JR=U(8ARrei)wSGt9$>w^Z^3enLk6`{{8F^G)TE%mV(4{-&*i+^ZM23nkGd6{>+oywgs*<13!A9X`o(yw^%jy%_##B-h!qit3 z6VF(yzDjL$eT1W~9EFLhizU$FiRFk|DKXVej7DQa9jggO?Es_7gMx=o6{ysmNl+K&q}|8xBl;zr+AE}De3M)thly+?Ql ztmNVsuN)M6lX9Ytml@s$F9}XoG`qiz3l74Aha&OHs86NbV*GYu48ZJJ4WwzKqWEjv= zmJBy#t7z>U%^nLCGy6SAX!e~uucJ*<$E9cztMlpV-3qIHYr~2^bB4#+Sal@lm#!>d zq74iA`s+1fbY1ffji2<>X7nX4AhbDai+FScw*4yFIy?+JD?cZ-4xqZ;ARl zY9_L!pVRPnlen#(f|J>2-J#XHX+?IpV57dzk|#`?eDO(f*%GVHoR&_B6y~Y!dmW34 z&7Y-)rGb~Ssz-i9#h>*`j4bbO8M)^D)pRa0~l37AJDJ^f>tS2)SuBdA44k%H;c%bJ~ z_m>WbOOM{M&&84+y~ng2C^UNCJ{Nyhfu<@>$_f&7C*`#jr1WvTu@z+SalC^SWdCt| zfE9#*I6gX-FXbDqqFPYyvsne*pj@t51(Tp$UdAyhChI7nQ5q&dzHpOQn1wK-Mqt#; z%ygqLeP(9KY;N;ZW@e*N*eo-%KS@07p<+Q*oKE0y?ygd5UX&X?e|uGgj*{3^IhMc1 z?-{)sO$_1|Zr1*^?Y&>B{Q0B(MU-!9EBp|H0?(uy>U-DVqehs+k#ot;QwOny^PNvU zh{nOBSkq~7zDNMiIz6^Vm!c4EgwkR}J|A0F4GF7Ee(SYFT-o1T9+Hl5a7|(~8|zqh zG3QQka{$zXQCsM)MYLp3+`hVd*28W62jTv;JmAjDojKIr2(6V`z@kmT%;T}z>)q9T^y$y|wI>(nkovw4q`?Q>f-gK@RrzXiuM3&byq#2iA>LT<; zRZcw`8pQ%4(^%W*vAZHWA*&}prf5!QtA#ft$J>$*^|>_BWQkD^2y4bfqJ|wg>zeZO z->lodUvYjOWfLoH#4P>iN$H7_QOS^F{_6Emp|aMd3dGtWrj$y0yjZzyghTR2#CN?^)986+jr_IYPNBQ)L?4x0lY_G#56EgcngG z&}BPaYZ(Hnvl|+=w>m|}~QO3mDoFZ7iRA3a-~R$bWIQ_FARM~$N<5yw~I zDjEkd^Ge54F)0%Ctiu%IArPTT90aGAYt%(|;9aQcRCEf32Xw z#$1n;vi?x<;w7uUbUc(P?~b$*R!{;S?lZc_8cK}0RWB6^17kH!Pg@`$9Bn1T{uxX%(FYlt!BCP*t-e~Hz=XUe0HR^kmxu)uvb@mQk) zRLCe;#$f2QX7qLnYhp7h&585=4#0){ErL7 zl*h%WY!sBx0w6@h8q|^6!+qrJ?7Mq%D5nAzeI<-;x%gAAA}A2n#B*;)i3Kbys+I z@e{-IhP%k0UH1Z*$DXAh{;JVTz>6?9hFe(X z?3=P4?b6seA!$MmJ}spO5FjVRKQGtb9^OOn2Ryi1@QC<*crN+zbfH`ddw#YH7v1`d z7HK;1a5=_Ke$6lDwEH8pg&G^Z{sI4}U@q+=RNROPdF^L;L}7sC)RNTh@)41`dlxPC z_u*9p`qZ@PMj8j@6HA*am0i<7kW;=iO(s@*&B!rojnQa?DP4lm(eDz`wotG zhJRKucy%@ZYTaj3%R4ST4hiiOdBjQ!~ zg{)QCaq3eN?|Aoh-;n2B9tu6eCgjibWwNU1WwQ1R=N6^JgU`h@TX#H%d~oOU+DM&} zwl1Cggu6+X9oPn!4?L#=eM^iU9@N9By*3I-e;1b+e?HaTUm$IoT{mehuC;k!oMQ<+ zN;T#cF)1Ynl#YB{B<r(THaOq)g)gV4lcKg5JK%fZob90sX`w z1C$P$eFH*3-_!-llYw}14gg7oR|3ethytQxnn5&|hW9`= z3os>@^q{9pJ%AOt+y_{ZOPYV4|9~zyL7*qB1x4+qjK{o{eR8JqAwlpKD{D|xCiz#Eh#8XWsL z2$zUgO_({Jay2ZuV2)a;uq9it>Urt+??um)RLDw6@@{PCcXgslndFMYFA$ev4@b^y zw{@e?2rsDP0_kzE?~uN4hX17`&VFs#@8Lpa{8o@%whtHf1H4FK|J0LLzMTLFa-MO0 z34_tff%x`>VdU9ousGc^worZ^7>Gpps4ceYUqKmt7HR<_2+H-HH} z4e*~C*S0#WZW}JVeQNm~RsSI1^}-Th51`sWe_}geywO#YRt)SQ1bPudkTqe0Z1&&F zMaNgSJszJviktFO5I%&RC+uHdRB;E(2ol8 zfY;;c0JUNW1|O#1ys{;`52zmM-QZFg-v^YAc>pd6^N9j|Hs6BqG{8xJ%MDm2myO`n zdvJLkNdT)R9UvWYas9bS0r(KVq<`2GK-$Ru4d^!`Kl5HTs)LxU3_+t;{|3(rm{77~ z|3t+jT!0ob3*P0zJOB|Rs2X(ilNwmI6M^hKAZq+|1D8M$3ut+07cSe_55OuT z=U^&EN_jw~6quqjs|LdT%ztC&`Pcj$2=ffwCZzrxz^Nq(gyTgI!py{jvfUT~C<=-c zn09aj=^C_8`SV`DYgq!oNf{XfrYZm=#_RO*LlsUvi0SwK%2vl?5N2p8AlsOYwMftD zfN-X1pIH$#f~i^+0wY_w1??fhJ;H0I`g9i34SH2S1%7`Ij$F+Kz`F!*%`4{FHNbfZ zJJw#_>bMWa&Z`cr6!T`YCAR?|=8xl)%x!%P@HxdsrY9r8m7g3BF1&oHOS;)H65xaX z8DwXQpg|b8Mvl#vK3ff!5J1^19)Kv5(t)QP7(4H+f%vQk8(_^~9?=AZ>T-kVM0bJb zycVc*DmJ@)Jb!rlrMT##5EWWe@i>Jf!R)|F~n@#HU0k1hdfJFqLk%h5r zn*1?#AqjZRoJ(bes0QyGP5!jNz~3K#w*TMgH`d5=n$4I%(`;ZOnWI6g9RGqXI?((R zPzzu@Zxu-w@6v*J@AMm6~=8Q&?xE76KZ0OwJl1NfN>0T5`G`^hU`NARJcq8tE{rZ4e9 zW&(x}Hjk3-^BjQNpc??jG%fL1A+62s{r3-N4HvzZV+H++3)r~*!yZ}6$i64U|xwF!PGH;Th!V2Dd_YWAXUw8gZT^aDExe}zzX^_2KT8m z0#w$)sRCXjPn6ZG;0st#OAYR};goT}#rrx<0JY>(4awr)0K6&7KFLP`LkX7PtUhxD zH_F#BFaw%Wo}lE~|HW#H=N{DZ`5T6SA+2Tb*+BC<_)L~BZQ}8&@yH#Rc!T?>u!jO@ z*%8ZIDgaB7#ZSh74_qql0GCPt=qVfrlc2ec4Q%+pM--J0#UPHp>)#*))F#a@8lVCX zTrw(Tj)$_1quocYtQ1(l_u3xRCW2|uoY=~eY1F?5nz~^GFA3;CEk6LftrvktY$gJ; z_$M4B1sM%^`Zpw8=rI_2wJoz>r~Jlk3D@XD^moYW)66#3R=5YIwmii5+s#t?IYVVN zn(Z{4)91$d`zZ$x$FIjYSc?9BL72#aHN zq>+=Cr|NwEnm63j*8U0R!$ z&ovx-4%?V|&LSd^^i8+A??%5goFz+L&;p@edgKW^_bJkm5#YBWN5YD|Yf@Ky&NWLX zyS*c4vk*BG-nB(Km(5gQz0JISa}u`oIgar-r;N1VrI4>c3^#AE<-VsNPuAo1xoeie zg50zfSJ%9i+xMpJhs!ECzgG^Uf_H-{qiQHd!MaJuD@01ZAk~3`K!(>#{J#^g@%Mfi z+fGhr3p$>pl54g6n9N?aD(ze_(C;_#u_|>?6a0j*DXEsC{!n%OBb&UkqI%>)RSrMH zq}xh(rDQTd>s7nhGDXg39fRHJ(HCM5p8RRE@w0O)b5q@ow@ew?&<&Y9ZxhIwOR3hs zG#Y`$?4MR;9;!|Fu&`1!n?#S^sARoAqnp(q%U+8*x^S-w_-z!4KF7BqE(?H}f8U+` z_%x&XE_6dUyXA5sJH}Yhtc5d&VE*b0FZ*_xkQW-K>+!8Mm&dr_`ycj=7Gv>MrVDD+ z&Uam_RtRdSj=7I#L-~{=Jp{8u4-?sNi7nD zSfv zdbE-j^>(DhER}ahPCz0|5)$JFqen#!+<{r#h42T)&9wGJ-XYi5A%SS0;# z&@w7tH=y2sEV@9&+Yn$$4=nwGC3ZFta7~$6!ErhiR4{{zL{MSzPY447d+ghAK1Q?W zKraP)*FT2{pmPJE`Ckh;K!^ka9}osZy*jKCFprDX(J}+DYoZF856})L#)|Z~l<1SJ zxk{6iV|I+_^V`Ifn3JriN)x@iZ_xZE7aSaS```)$AkOfQCk(3KHC7+|o;jjY`afhr z{GP53-f}rG=6!>9g@H;OU@QrY@jjg}Xb>l0V&lP#jNjjY=CXr;v|n8lJp*&4VD!%f z%##LH0xEX&Mdj=rMQ0!t{gdc{*A4J`f(yK)fftMPHBlk)y=TuzfM+5w>jM+2!3NBv z!JKK~eSqd_;g09(u-`Iu-ix;ZS^_?BH3qH^WPmFraO(wbHo?G60Js%>M{b!E*bU9Q zxLz*93u$Km%k-2~UU*aPb8yaio>gZib&AT8k?)YdtR|$RO<6z>eCxeH*?D0I^{%$7z z5<`xy!7qF2dg4|pW;)<%CTiuJw@|OGBdGYb2|J4Ck=Kp$w)O||2PP8gt@_!UZ3*#t zmj+6r{`7BlECmbCYL%2yzAk>Nyv@%~m=o(}NBC5#uS9v}td=O`j9zgE_b=i-Y0Q6ne zK(nl^wtMr)&MbLO^|oNnIl2>;<_M!Rzicl0GDIjmTS}i-9b@nC*3x}S%x$77 z`ZJAnhF_YssXd2PhO+ip!}zuKu8VlgWT~uZ&Rm?b{-wcX6g`d9m=C?!`!ju)-+!9} zLPjh#ibgyaHDq=h`*5>jv(IhC`u{H7+H+-Vln`%2$yo_iNkd0_zDCx|t znZnL2D1%H zbKh>luqcn@2?b{?<}iR=snpz@nA62tSI{K*sDCRm>xduq+LV^U_bfT!*NXstRC2rA zB{|)(>{#y0X!`HlySP7|tEFmbqjK&7?`5 zSWs^=f{tX&9YZ5}W*;*Ib|~I=Uu8L}O8>$SjccA^1x;&$%3{!!NnA72P@BbLM{qoN zYD^3VM?#3Nj0Ef=xGu7U;FwB}W>uh4sKb`gnFbVvV@0C(2##rIXja8QAD*C@Q_#%e zKX>4Ul)nfTxHCgDde_`h0slpr0#R%;lj_UKV7I#pvmZ+cwy|Ob?#-kb@Xht;2ksGU zg9*u{>RBvFnt@Fcnmcax+Cgk_fd=VWjQz&wE93KNkH8ua2pm1Lu{DggGB@{_qh4KX zwhRbWPBQws>RaT!bp>54yt)Xk!ZD3K!vTFWg1$q5$OR_t3>OH@u|K{52O%clf1f!@ z8ia$Vp#KH*PX?OS1ARF#`O3`x6DWXS+Ovp^*-hq0eM_Uj=r6ocOL^#0`$Ed%IWD?f z@9IBdHE%RV9zEQ>R=bPv^}4ursMns^UL>W$^P8&9xlxN=aySccWuxX^0k!7s-7jY! z@3Ne=K67W+nV(FPf09e-Z0IiA5vbr@X`4jpS^JMzCVATHrVT{j!ye1&42XkbMoZh-to*W z6i$dcKiz?h5ynLuPs)o6#ReQqbhC0XF8HZ2Ae@?|Ob7dKI(W;szgiHDVHDU-w>XY! zp*?bTC=Tjt1EYlQlIX#EGDjUd)1NVD7#)AqGR7|la~N&EeQZo{qw6EFI>g{Du}RWI zbG)ribeZ{7a^6kOr!I+Xdj(sGIa85HST%{h?CVoW!zymc^npKom$@Ok|0xsc>L$?- zk@gq{g+Qc~h;-%S=nsTu2UNFLqIXBzjdXKE=CYNEF7rThyr4OElCav*_U0l5q7c=N z-=^Cutj|Z=2Pg7E+PCi&{%<`UCpe60FhoUiaU6M7h8_gj<2gQz?nw;a^Yk4QEdLAaD}5?UlumybxdGr`y42 zPQ*a|45amj?eV-4Txj{tFo)Ly*_P2_Qw~{{W|0Qg>3a!bzJf_5>!v+F7dnU>OStRk5T4(76Pt_I=0Hn>`Eoc^`#+3n3|hWx!ShUv0qG9uJ#xG6rP z9qw@X5Y!Lp>6G7|&SHkaM&rnZ5aGdn4v#I7mls3vn9H-1-W9me3G{T4%X=)Q-`~SU zf3#)3UFpzw0&?uO8u%a^WtJoq<&i2w&4AxNL|z|OL`V1Y<%r7M9|yx8lKhI!{H^RJ zwk&l3z}m) z^9f@*!YgAp0;!C^AV@L(%`TI0|IJm*3O>s!M@R^(iyI-wM=BlR&4(5DMpCT*aPF0Z zEi3K=!3?)_>Dw#4tv}nT3VvBJg z^F54T+*{LO;Nu+X!9fmVDEP>DY`l;{=z4LDrnz3?l@PM|`^PQ~T7Ovk-;TOjBV;GAj z9)_fMf`2PbyLeNmC>QZ>rAK}F^AS*#1Cj2!QL&`k?inxMY8|d^7aYmnv~9$m5`wud zl0I6ZrW1ElFtIhabGx{LA1)nWmNPUlw|hs>wYt``@2@6|w|xhBJ$G;}q5=)bG6l!% zDARl|5N(LD;V#mG0tP3bjLvXy%INH3KdE z)Zum>29to1YH86!B6NpPOEbNg0p5Cr8|S!O6XS(cKPSxJg^R3n@jPQy4k7aeH8SB_ z_Czz*8ADdsZlXcRT+x|zj&AA1yiTchf=08|QWdqJ0f(T&{rvjP(wsW_AAQaSWc7TK zPd+efIOOCPq@nz9SDl{NyvlbuU4H4adQSGK4ZX@{IkBuZ+_UsC4)NgIVS!^CVqhsa z4nd4raqR3_dgT4^#v>9*Ky_`vcOtw2McPwc(_n2I@Sh0dpi7q)Qv%LB2Lf9$uSW## zh7!i~a-+ek9>64YXzbrM7x7 z6_Q(E(N*%%lEicxv^THgX^LhT@)gx}`@L-}{*#bzNw_|_lJ-TIJf0iRikpYmOAfbp2}?)g{c^^A)E{zcGWv@fdj{v^5&KqU#I0Vjz){5PL4cJ$H4OD2T}d z(xYM=pyA=65$s zE_tY;_hj2UnwfIWvFvYW2npx^dRjT8b+ix0BcnNI6TGSv-Kt0p#u7Ruqdo=?|JV|$9&jRA>GQt%gzqN?-cQ_wS60@-BasHwOtRU zn!ch?CmV|HgO_U#&@KF8!&3&mZQXZMoKv4@EN;m3iFfta&#?pKLI__!@q4w3FnOq= zj=kZ-tjKs8_$KUKOEsg2tnmPn1Jy0o;~%Ppdk)cH8c(XLQ9||h`1kdo`g;5$b~txT zUp1*6oXX^Tv8a_??t&348B+of?(A1jK%=ImO3^Jsf z8S~w_xApzkxNbHVV*%L6Z-LjuS5_r_Cte@cdGT|d`+MF~D6CQNe(8DviFpsB?~jzo zT*qfQhCL@Yxq%&1w&g5M;Ox;-kQB)jO*puQJ)WrryFBwi5zZ1C)wUqj3u2C zJ;!P5o+XU#&0(;)FrHnZCrE^7BWmdEn~X%iS$^F2`a*Ma9DzUf8mTWMnsQUWpDGA#K#fI6)MvbyrO2GDJ zMOFV2t!9g)Ve7?0!Sy)@O=`YTv%gw>%{$B4^R@HyDqiT!$>MIdh>`!ux5KkX3*MNT zDl-Ejj!ypp>+qXoqHm0HI^-4}3o*_q-*qMgyO>-uF{aSNaK>kt(rOPDc39GJj?2_dogw!~A9(R$gv+|w{%@}%PHP;q3&>G7do z0H~vd0CyO0j{7xtob&#G@7t*spSxc)4jou6EM zysNz*s7aliRy|5{Nc}ll%QemMTdK@t z3196(?E<}+@3;Deo!Y(q_wNTM^}g=YHMCeAiinIZ_Im!5EV1YC-O||KGuoM~^sKk9 z@Z=a7o!_@o_q}8=9F!EAue_AlvpYz9(=qRhB6H1B6dv^7+84I6lD(xs~8wSUg#a1YkXFU9=o&W)}hlAyC-NSGdz@|t>a8oQ0Ym?p~@ zkjdegu^($1Y!?#{>Ly_SK!0F7Lc6pY?JY~(DNz~l@V2Mw*XGCJ7gQ;cT;Y?*WgAmDp2N0q?yPIJl3gx+ zw^cSPF>(X-2{x)ZS{iRZNb(4!yw-ot(eGgI>3&CciGQMs^1jbceTi!!a6vp^g@hy% z<2)V+#lf}ccyOP86$|3}DbD+M&-@({C}j#Ha)1=Ji3d6Ta=g-@f%PY#u1xF2EYVbA zQPf4ddmi3@C?}&rTPvuc?sMqk;qL~G4t^D%Fk6x94q@0PE#x#huD-hm`QDsRnj|bF zazGBY30eDGEfba$Ni;=B7xjs5ll_y_@e>8Z59Td=e3n!xwUGlxuuUF_YjT|TK#w7; z1R}-HS3s3A6**uJ+Z2Yl(uTRNipJIr_4whCY*D3LMh^JHHlf^Sao8^^313u_zo?{t zQOWwElKVxa(8~x#21-2$r5@YIT(Za#ikG90V3P#kM+4X-5p41?Y?2r@NdlWBg-t$z zO_IST$zhWeut`eTBo%Cu8a7G8XiW&&C4|@#L3W8CwvQpZk0G|i((K#B5L*(+E(yez z6tYVSv3&yBeIhMjO$ON|ixr57bI1Q1%OUzRfVVQ3w=$Zy@*8hu=5O-&{6Hi|!^ky1 zKMe;X(>1G}Ni-RK;)2FU^OTqjEGL8SOzU7%++MbBFDM3T_7w=)I?r4%Ol9NDv~Wa%Usy}Q%Xi2qd(DL~$c#h)m^-!V)5p!P90(5DU^y1x2d{2F6HyyN%!0sW&+ z!|mPNjrKbnL3h9Ltck9%Ayp8cR|>ujiR8n%qLw%p$=8mFus05)l{!-bx(7Mf4$+Q- znEUEi@bJUqf?cS2JtiXBWe1U&h*ayU@E-ayDt^g*rXc=jBb5tNOK$L4jr_!ade2P$ z-EZ=i|H4xV3nCjSx7i*rKDYm1Eq^*`3O^qFAk=*J`U%HKNb3XL8^+}S#zF!)F|`am zDFJ(vt=I4<43i8F5nc?@un@Yj(XtdXw1WBNg5p`Ld$)PcK-58D#5nb?r(ya1u$ABv z#1?{n&_WS+aZWY`)gR?W$-Gz+H?uw}*%cbKe3ty7v`ZzZAV2LAbM*YhGCb|*)r#{> zH{4N8nH+5ZhEe$y1GZ$?#wF$yP^T#e6lm0zP*%!NdWrlzeCk`=VixryA4}gh5;^np6Rp40 z+xs_>ROGKF4fkF83=I;|Db|%A`o122ha7$1&Jkb9q3Dq3dCEh#mLsExK3e;l{U-(^ zhd1}6YLxXR&@EFHVOSG85jNwUSrRM-&!O{uhB+N_F z2rE<#%gs>u&zRiHhQoW6c0|J#^~R*9z0^0cN-+(`Y$(40TXdub&d{CfU+5G6`?^@KwDxOeN$8J zb@YR2s?0@L@K$#KhoueLapM3jgbtB|b$_v(yNSs>(7VK(T@0mQpd6=|(6hDd2y8V4 zNvBFHo9|qXI26&I_%DVDe zic(Kp7b~Xw?VjXz?3!k;R9Y~sPzD_ro+fD-99|H6F_ELEUs=$7u2sGRiEob3CAjGQ zH9EW%!d=)xp~iAc=gnL9oBo$cyXu$EG|jg~CTnto8Ys&=R$(e(zPjyL-+;T#I{DZ& z4>J;XiY<&vRXk?d+Z#4A8JGeQwLpYeUJel{mpV;JC6JI>Q7A+uGmY&-(FMwM(sX2ieNY36s&A*TkmNs{e)wP zn&;U$Ygx9Emq#SW1gO$o{T^R`^)U1I=PkPvnv?mDH?K=_}!-;bj6f&jUm9hKdgQ=!LtaJ;UcN@k6w}B7 zK9~hH{eqY#x7m=7(?yBPt7%c{!na}ro) z^>?<~p5^h_|N166Bk|u}OMSG0E?=d3r`;CmZC6Up^I3~o&Z_Te^nqtf z#prU6Z1YOlcb}Xn+r+LzPHiMb`c#?`_rDi-(C4iYP;K_K4S%^ zz^R=z!woGjb^KE7I&%JQpe!NVQVguVRmD!o`R|BH0QOBL&ANAd%JV zxEJ!w*Z`pnO+s z^|DoG&S;;)1irT6lSljRkt#K(8F zFG5F)Lm8euMgPLd`1L`=^&yqPyczOIk6&a|IX#^kQB6GBpazk6jp(T(Zg0ntC+sCh zU7qwbpnZqV5#Ub&R2)^;llF~#OtZ0E@FHW$6y(H5R|Ep`SKOez zafjWGf?H&n(I*TjbFJj#=A6|QlWL_|>EixV2CGB^_}?F+;#$S}&k#BKL=ieh);1ru zi=5X#X&L&8F8Yzrxt78zQ2DbvryN^8#58rN~s$Lr#oc&A}4zf^M`rbHJwK$RB1TU`&;KDjX z2Q=Cd3l{y2`x25%(DPL}M#D2-E(7tOJOhng#I=An!&N{<)vJEp?a#VgwFD{624QqY z<3a)+d1kh?&+OkerYISvJ)S%Akx97H=&d3CGRpxeJ6EOg_KlU=Ik~JEk(W8=rMaE% zawsW}G4PeNMiZ!ZRnwSby;72X*{&5GGk%9Y z&I3vHiP`=>$lN!7FmD(Jt*A#D=>lrwHYWp z1qHEx1!hpt|5soI1)tS?l5F;GL{C%i@$}BUODTJq%?TMJcaHdhA0mDBMTQBl{rlsn zl{cY%PVBOlB2TgfXz@m<3BUXx0M)@VOyQOu>?-a+7pKKjr*{@w6Vl0+qU|H!4t;ws zS0*kxH17`Khjcet&WBFy%=jP?fJ6imF-W8@%rtOA!#8vAeb%)tr)v6N3AN}Gk6Fff5KG z6drMPO$FrwtrBR8G$b<36(~k#LeQ8fXlw^GRxcwR%1rc-DPEE}Na5xqGaNK>|0%C5 zUnp>30*! zXy6&JEdWC9zXtvZ96%@sLWhNMh(ywh50{@6cJJe1m`l0TVim0MUs?}RX}ckcYectm z?CQ|MZhw-$s~t+BtI)%e{WpiknNp%?3{^cki)ZZ~itV;wHd?6A3hI-W8?g6U@%FeG zy3KZ~^CHXdGALlDt$cHgzji0ZZc=vM9Ln)Z74e|Jnnc!mtq%Q1OrT7#L}xz+-UcU%NGfO9y@87&;W{UXoY5G3 zhqpc*acmBeakZ_DIt4Z@sbdr6v_thc)I%musy7i{3+zs2K{?BrMf%@GvqbUA2TD`c zbj14*o^OkPGH2zirr+x4Z4neJ&r_AUAYw|j#SZ=y1^aFMAg|CaHfHNoZ}KIM%}^U6TZR5 z)L)x9ugCnlhbH_G{&Pp-;)_;JPK}dCYf`WrcgGQ0|L9GqC*R=6T6I4DtnFP?qg7x9 zlzMQsi)xVOQuxzOFcsImT^cU@E6UR66ukOI&+Mx_$_lC%Z-0$!8vG(ZIe3<{pGl+t zw@F;TF)^g-;+RcvHvhItb^Hx9AczaBpE zBPbYAPIrw{&Q@R-th{qDYSEU1+MiWR_@-r1Va#Vsu&rDB&rNkn{`ui?F}XSeZi_WRa5}Le`69YrHyAaYTiQJ43|12kBP2 zK{};7MLJa)q@_zrB&3w?k`C!^>Fx%RknVTEUTPKVaOvcUXE9tk^c?S`4E5ExQr;*u z;a76S(L2D0n}Vi((3JU3O4+C0f$w;5*Lre((t*CgxU&36ZSb+nthS2Rv zV!fCdYhOw16sAukqHwDR_G$wQl`c{~81_5tzzt(i>;T1Q1EPueOZE~wHR%%^@Z6Q# zA=g5+DeEp$EhGb?N%%{O5<4yF6Q2=NZd|06DFz-v)xN|Wyim?)B>zER`h*T*HOBzD zJ=OCAM26aYyySs+wE^@aD~UNwp&a9{{0E=YC&uK`uB6qhb>a2pw?f>BVy|S>tjz+; zSY4#-kgMAV(92Pm%6Ne54+&RtYSycPWg;$8hFAml!RYKZ{{f}az)b@CIa;~4VTRhQ z$n}`(3sqc1#xQ#LLaimV^nH3=`jq8a$bSbbDE#>h`wfBMx=}7YOh?6@~DBl z70zg>g4#(xg=FL0Tk_dS2;4femh`5xz}w9uOV5miG>p8qeQUnjb-xxIr>j2E&YMh5 zG^2|p`i+O*nz-NO{81jCpDJ%GwG*ESAFxzP`}j?ZSqBugM_hkaI>~MOZJ4sKbbFxO zxi!86FOU%@+~75yt?2pmIvq+@s_swimOj$6DSqTwi1r)t1ot)f#@}qj3uPiM5A&n~nB-^Nrm<`C~^7 zoCyAtZ}!uB_^EC!Xp3D@n>jlVDA*t<1F#5r*%;flxf_D^wc&@o$+=jRO5T zFA~<{JT0(Kb1xV@Ga?@jtxfNB`oe}w|4hKHYltUc$Fqx_tflWi>B=vBnm56MH#u3s zOXi=zzHMtDK9wuv_smG6SIYGpuHjf}a2P~7jroO<^YllT{!@cvK77EEJGtj>C;Y~E zWDMd-g)wfcz&p%S*a1%K7-ySB-|4)w9k$BoBcqKIw%BeCyKO5DFCQ8DNP-mtmq(Ms zGtNuoou=jgl09^vPwW}#wyL!gYEQbagHa^d((7F0|#(Whxt1PI@ z8v$pxBDHB$-t@#UamC8ms#Z>aDq?$Nzn*Jsb$`>@9C-u3nv08YpxOz%eA>VbOkC50 zUb}f@;9gqaQJM5lqr+6LO3t{KOp?5FkG@%vEWI*~M!7fni*)&rjat>k;rO;kmkg1- zoC*1ZXc^4}WTD=w_6-MPNi{4{LL~d0*W0%(%bk`M{_y;j9!8s#hzq$BMB)A7DD|zM z8!PLVYOzRsKk}F~Ty@)6H!R+wEWH@#M6S;e$0J$7J@76$lC$Z&9<8bHD^{UDnh2E~_YAp7|SWX_m)H?wE|$ia|Z z&>AlJG?_UA!gS99_+e%@6}T`?3`jRdT^qJDz>NrNYkj{J0>>i}0Jnt#Y-VQ92lOMY z;mCX1WC8x{D1dAZkY)SI8K7Yi5oAdKjneW-+wRm=0$#a_f+`_^jTB!*0R0#30E{zj zLl-Y>zXnmKg&`2XxOoFihud#qkWB&x5z;UbtHn2hvL@92s0kDed3b0V&3m_BYI)DTDd4OQk&E5c= zxiQXeiw7M=QP*L*zzX&tu-6CR zp_wgYaQ0&sWIK}}OVt1Jus>G;&g%cosC;|N#M&CLsOk&|Lk~~jIrYDb$4H;3zzYVH zAufvg$omh4bR%&#UCjWF|L_OMiJUGKN3^^6$sZHa}4G-rVRmLl%_w)Jl~T52^X@> zcJC5IaOMrB;6CAjsdl^;(4b$dfui}VFr`P}@zW0{)Nl}ue#UiPEO1xgA1WCQt`#wO zwf=)9H|Xm%!f74Q0(hRM3UN@hAOKk%DagtJDC*q?<~4Pv1^u=SIN{g`+z}XizgDuQ z2{ao1TQAfg^88i52RK`124>ZHz?lfT-zRb0f0X1t_whO=n6eW+D2ZP~vhcSV0QGe)3D`iX03iBv3B8t;53qY2$biAyWA_eF z)V#vQK}hzPO@K-MQE;dga|AG0CE%C4AHW7V)dwJ(FCfd%JHmm2E z*8wy>`T*XGfzT>vwHABfW%D_uq4(DikY;a$SDpSZUMC0-#1{jU0wm*seq2`R`ynY z1d+!tAAp;e>+s@?G&r=!kpm5YJ^KX4PNO6q>3#`z1`Pw?c(Kv}V~0`)ECxRSV=utQ zkokZQI741)z;J4()185p`ehi5eDhi+PwWv;W?xJ}N$wDIq5a3JzCHojr1L*Y5_90s zm!Fh?S!-nK^?SJnSYeeaaFN1gEAqq!!F)1+yDI)TVf*tlusA$n!T^wL1+J^yAvl|= z7=sJ@gC5KVao?D%&piV0A+H~;)|BEOAjuDAqvqeplS+WXMSox@Nfk_?$&&G=05Ba5 z2EOB~Np#d?3N(0?0v}36z<}*)K=z0P*ub{{=hG_!6CwKoAahsXrf?>g5i?=30;Z}p zMI8Q!9t^!V-VijF`(n~jf&JlV3dX2B4-{JXfSDiO1@;N;fNGI5_6fCgu-0_0fkY_) zm3v!Yzza8dpw$E!SnHFH^8^Gd9ib5G8Q9jh8(?4jbU!euozDQv;~)v>#07VRaO^ig zU7swt7(xI({6H2d8ojUr?zA8F0I6E9(~#3v4n8SvnLOlc8+5_sEmr~`D1Xd7y8)eJ zIwYy&eXGG$JbXm*qBz_80F2lHPp#M)? zKOSQ$xCr0C2U2gb5|FJ0cdy$aAeLUNA-4(G$9F4WGn*Cyp_)Q4Hv<1wh^ryq&96IR zPz(h?`{sKsnx&*4Kw?-2%$YqM&@cv8NJTA}9^FwOU~mO{_7CMX_;-ie0k!L3s0ap3qMY zU?$_@dMat0{=1NVpOL$%id0iM?VF*>XrA#)bd35A^Dl93{LjIg z1h??V1{gEq)J0>}UeS8M(Rv`zdLYqypkTZylGaF;$_VNv*e4dU zq>>XY^$x?02#ECj6Zs581pkS^7bCuoby09=?WGJ4BG^|U$QNBmG^64=sHb?{8D%fj znD;tXD&s3?a{AYl*<-vJd?d8=8xmj1>Xm< z_);(ZZr9<}7?ch3AmMQz*~oFm`@NV8Q4#%n?s@BIFQo=@b>9!zdCF>?iW#n8;bH&X zQ*eW*G?Sj9RsAs{at2C6FW_8PU~#Sfp}gpq7r&L6NlW3PK3;*-RA_}5wlZClj_Bh( z*K`u=2Xi5}sb0Z4h^A zB@*CHQ)!A`sQ+!``fEY~-4DEU70-9W6^(=Y_;(Fa9k*d|Fdt2Q?G5)A73Ek-nF!vS zR4f{1ju@188+k?Jk(*kC3JD}XoknUJ1#B`krRp6nwqVYDzf`OpzH32T8)5(NVEcO+V$OIzRZpp2`rXN z^el*jaw>~Q@q~8hfi8nq8g}?Mz-^!q{<-v%Zw=NJ>9Vlp2i}aI1-hgqcCNai#XeaBePG6RZ z5%uB`Q`y6>LO`y0rSRdPnQ`x)ku(VuC=M)O-b1B|owEU<@^QTAMFzT_yo%pG zmAHkUvs;>n~|nWOyTK z24Xv2Nql~cVdviB^oV;wEl|QNs$f3Q|J4=)mx%OyyBl%bdjOipk+6b&O=p52dYC{Y zDU}$L3yA0%BmNblYS+IB7%zsrb2JN(Tss2c50qa3n@cFr1L+7A}W?G%#O!?xf z5R_BP81rTgH3s~}Ja)4#HnJ2Z>8zf(^5HDX>^@^N*~INIig9WwR>u=#n64q9F2KAf z%NnMe5dC_c#|5IDnI`e1o0-;&&4D4;dZ`R(PzUqJV3Y1kPwc!5(op{+^ZA~%-@GWP zh3hOiE@zEX`}6&T8TX6_PGU|w>?P^`c7?iVqInS~KAW^(n*-VvZ-sf0fXBBS*vBfY zD}jj}=g%7IX4to!q>f{k>Ox8PQ2%B?;Xn>;=6i$<_4$CStD$&^Q8^YLs^NrLnLs}! zz=PQ{P5APRPv@Ej=ijgL#_KB++^-IV?(25X3ktkq=1w?0V1pJmvE?ph9Xb{+*oL$2 z3B20xe!ZD7y43jodcn$=14Cr8VrVs$uu0+rn%xv_q=dNK;K#1ZKL{=@>0c(J9!W{> zT)EG!e!aiG^di0E;W^*6+!`Mn$f7=rYDe7EFDt9xJPGpbTbF`G0w8Ia#fvobgbg$qP(Y=1P_<=Y_QJlE59VrKR@y!*-lmUv7x9dCu z%~ou%uqYBjD*P<_v-O^75#^^WM;xf;8$|QO$$$=eGIqBWYUCY@sZ6lNzYE;M@`)$z zKIRR$d>=48XDJjwl>`~Tr!o6u)@?>x69#3j{adxe#n7M{%nE0mv>*8guDy+4!3kgg zN-_0<0E&aQ`RVMb@q=mHBAP7OTfKF))_m7$$J^DvIW=1`>M+I!RWfcG3Ny}sE z$bw>`!pY-+bCKPCbGMFzzagGykW~Tou8&A{$Ip)|7jqUzAML%Suc8TO?Z{4M(zg`K z+@=*C9m=mXkBa*W3?1eiFC3@Z#ma=*TU>rMR=>-<+9&RbvcnwKt7K%KpX#f3+cKDx zA2zo7POI)Z9k8an8GJXleoX)RT1PBzCCO26i2ArtTkoJsXXUiywN2*WZ|;B?9j}ne zx}m9Nx!DTC^4Z*b+u5fI5nEoEMyY;%$!uLWeb1ExvAn&-m)AZ4ufobWH}0h>{x5sL zoFk#isT9;O-QZsdyblV>psa5HbOKrt+B-fSVBW$Ifan8eDZ@>Oju;ax7C#|}a2J6~#w;^?N{;F1_Pd?){a#|65G*9vdsDh(2xhx-RD56Mn+`!FSFwgN)Oo?#MOs-~_EDSbX!^g4B+!brRx$ncVk=}&>{ z`0j7S<>%3FF2{gKiysuH*fr4m*4%}QTaqM&PJ#x*U6HODE}Sb$h3q+ASlnRG0Fu;Z zghEI!AzpagV2BqN9!8uiG7+CeFDPFLAP8;zH3p4ISjVf{;XM z1oGa#dvEUi-aPQVdE9&RocHEG-k^7#c-7NB^hGB?$WOpo5B0{b?=2K(Wv5e&J(E9olvDKMDy3%a@P1W&2^rgOI zQujxxjgJxc(Y2ANPe>y!Zx(-B(SKR%v+&1HxbU(gC1&_1Y3ko;FGAV?O*vR?)MGg5 z-5Z{B#(tvZE*HBE)b;1;4);I&TAu%R<)5sb&8hjHX)pS}pL~*bh2p3WJz4&wnu_;1 zX;~LM0}yQTrTW4Cde+7I{Jfa*lj+zM^kF+-=iuq@G^n)&JJ;}l;?aNb&C1z)^z7UAp(s|AtFdJ5w8Wd zU%&J&DcpUHhX4hKMMrr?m>rITu+R0L=4}voR9Bq!4)WgX4=to@6{vKAIpQmszIxB; z%H5&qZmRCnRIYk?Uix%IxaKs;I#yy{$!qeLBUsyKJ9GDv<;D6!DgRP|Zs|pFn|G5; zn;gvJkk8exWTv%l`q2BBa{IS|>GY^u5_T^0gM@1va}KYzqg}nsOd+#(?J6y=%*~~v z*^P_qbQ|kT-2(wznLukRZSmy&DEikVd!_TN8#$?O%$1CMU$c_``|sroW-b+FYoZx} z;wIgikZjvPMRt};x7S%tC--BzPL{m-3Z0^6B0Vb@4(rJ38Qric^bo$BZ(n>dG8E)q zr9}Bk#p&jJi)h8j(A(1fyG2te#Yo~sC_Fe&IuR~s5~{|3&hoMQpGY(i@%$%3TPa2P zsTIQw;hbd#PhuZyOS?$=R()=}{zsaHiR>D6X9nE`OHRZ{X)WJytCFAWj&|-5cjdEO zP#|v^DQ(>2uGF08xL^sL+!>Qu3lS#p!r#~0QY|uz6UZ5p1r;Qq!W@m*jrW{kW^o`* zBaGHjDkHjeaQfKO3bxn*i+^_6aj|iFy5Rr&cYetDXPA$V&-My*hP4mX)rVeWRVTHl zmm~=}bCw^rYqYKCM3z79-+PA&L@cq4vE=HKc?|Dx|NUXow2Nn$5ZmKDD>qng>&Cx7 zJIr1^JJ6v~nPLZmml~2QI1uH}3fF!zxBOs`ab>1x;$6@*dLUapMOW_ciyx1l#Cx+Q z_Ay2yAgwUKY<0LiezZkOcC|3u*3Q$*SSs(T^yq6+TiS-WV}0A;_oQd3eanaNc+&U? zJ;F*Gjr~{HgdgF=GXZJF`_KeZF+_Q!35n1tJl#|) zKi#Or-b3F-)=~nlqege2T$n%%71=bW?|<5 zn}=bVytUuxbVugsaQT-VrD|%Mycf9Y8zx$Q+THX1#{BJ2_0Pi|Jl^l? z&Z_5a`Wmi2u9alo8g={EM1-lVN zEK@l2Y5NT;ZIA5ydJNV)7Aa}!lDT0|TGz_Xv)0 z{#U(9LyK5n@TSq{f6^Y`qOcYc)qWqx$H6a8F|XKjbH6nd4sB8Yzs^Gg8Z)tRIpwPV zD>n@NPnn6#XM}&<91`Pxs~XFF{soMF1V=@1yx-&I4q3J^N1L)Rw{}R1%lXVfEA7@2 zg*EDYUSp#tcrl)p()i+{i4QfJr+ZlP#ViA-wY$|i_9N6HX zlC1f&mOic$y+MA}-CDr%)7AH~rVpyLZ5e!roUPo$)E&a+hdJyE|Bx(P4`D8x_)EW+0jL1;vq376pY4i^-ISxn#c({q>EH!F!ig z=kPmQ_o+@yH+&R(Xo1_aVTXI`NGEY2Vb}Px8daKkn!fNmy1=ht#gxjp}puXxeI zaQZn7vOz8C_npAnnURj1=DCL!L~t9KDvn|*X(Igi+%^F9yB*Qn&XKj)g|~h95YDdOBA18H_p+}D@)?^Ph-79sA@w`K^>y*D z-Qac|cWmEX5%GwaBrV|_4l?)m}!C=B7(jei==CYP}xACca13nZU2_qPd z0Oc7I76aSf!Y71KNn{K}Oac^QCM+%u67!-D*@4|}r7aL~2v8`QutYUTe6zXWCgm5N zBZUT{VFcq5pwKa4DQS=-XLAWo7B0Xche~2#AQBRwu(FU>0XvfKHiEi0kVE0IFoH=4 zP`Lk!@FfpdaW>v}Hjvd_Yuq)Kf&&6GO`w4HlVkBgrB5)_m$eFQZ(m?QKHfy5P#f!f zK+i=C>(NPbpZD#)+Grf1I-&f{t161LiD6s)M8`YdqyBn+ZiD>yz7oTpb3$6Crowqp{6W-L=DdMI^PY0~qR9s4DDBXv( z_iu-dcN4_ee*aEoELwEC_uB04r>pDN)y|(fEsPAxd(`cdiTZ~Y&I_=-y6e9uY;HK> z)55k^){VG2c6X;&L@z47>U?CgbKd_kHK&ba^VVn5yRcF}-I20pPGH%ZEWNedzHwx7 zvzmT#$@=5V$b+&(orYTrvKx5pvJ#yveXV(AeX(Z-)?&dPHf>S|c5Qc|2GoVz2J7!> zYun9E44+E&YGL%!e6Vp8ee(5LgH&HiM|)jw|1S3YKfgqxqQ}Aijm%5%J~GZ z{p>D|=gVJoWew$2MAp4a$g(!j5;U`SrhQ|3XY65Ds&2e}DlM*WNN%W%$GVC^bM-Z! zpPB`8$(B{&=)PIlx|@9HD34Y6qqT-w^!eRbPTneI)0Et%zHK`!&lB~k7_t8vOVRvqewViRVZ$H!?8ueg{-y$1IA%;Urt=4~7%N}mv;7$k zh1)TceeIHP>B<^^mnk>Yu&q(P?eA^sl-}c>Y=s>4!ui{#vD5O0E(3p?rN>4dOC)Ul zP4=h9jAH3OBvr5O5^oU*C^Zp=5pJ2>OYHkJ=t_1Nqie4H)Iq+xFjROFU=YS4S(c^} zY%A!=p3(4fGC>S`fp@i)m>EI3iyw`LP|K1n`z?ncem541l~nTJ^E943Nq#HKV4fEvPEHIV zDL7*xTz|SEMR=&w(chtjgu?N>04H)Gpgl^WD^??g`S%DK4}-bCrP{##NOtU1isFj{ zo;;~JE6dP{ek@G=tQ4Z^S3Df%ewHRcpc4p$0|DBe$9I;W%f_*=6H>^y#d-LF0te8( z0on)uDUb#V!azGNP(TF~yaEdRfdE|GRSG%{P#^;Yz5oHdgcKUkE~jM~4Vs@zcF_Te z*Fd5MNPG@28&|>C&mv{yM&wZkDzbr$3=oI|0z9%nAOZ*wri75Pvhe5vAF2i(N6bO{ zY?uNCoj^edCh3M_MbIr`AE(2cXKRtmkMisK!geP2m56I>-LL9< z6gZ5++tqF|o)W1$xVChUr7zrc`CfbR+DzLUucm>{?UQ@a?-TEfbA=1izvyMW9!F#t=jt&)5j|s zXt|>}p`$VhGGVDS-R;nwggKt5z=%_!ta6r@mhq&PyV|M57w(NdX{(WoEgMB0;*dCb z%+djbDcjQf6n5Srt<=u(;;wuf0u=58K{@1`b_a<(h~HP3Wi?VT7(pI40gu6^I^P!| zYnXh2Io)3VNLfQIlvi13H3^G3(Sbp8IG7zRP*2aOy&T~ZV(!d=Jwn$j{}Ydfpf9$I zXKtppd%F-J?Do#`zt@W zPen=a73_z_kDCEs!}$Pe&Pv^z044+Ip<)C|@O}vixLosxNv~4uQ)h0z7tJiaxzQ^t zI$4zO3PtgIlSQxe^I832&>Gpz`$7vi%DRcZykBrH2e}HSUY9bh@7!^Jaw;mLHFXqH z{>@C-(*1^IlH5`-u-mT3+_Ewzv7wRVW%1%c@0_Sl`9i8yC9&bgWvVhk1N&fXK3|3L zTh2D5_Kya28aSrHF!+f;bI!JBTNl*rz6;j;7)10wPf2C^osYP)1qanWJC+?8;N4sP zS1{8AQ#`1H^$Mybe(LbLc{P>>wYT_7-YiZ@8Ph-IcD|T;f93ZSFDz`s?4i_AHuLIK!{XHyZYoY<`$?@P6l^AcmvbWftH-MSJP*T)Fowe+F2H+6>CAK9cyHHL zomnBuQ6x;^`D)&csD4OjIX)PwZWg0jUf4|F1hOl_+T z;)50+d3c$aIzBx2j8Y;~J9&p(86pGz0_1&JQo!lpETRmy-WDBrg04 zu5-5go_I`KyL78xL1tgMW3c)uB9)3gy;iKnuTarR*APD~ed4l&N$;C&`8?e!`#aU2 zcPsHOm#K9(+jWUE+7IP6Erohhg~qd%Y=5l=4;$23v5#!`roB@5ORe50*RSi@tC`h~YrBZ#TQS8*c`VS12f~fP%jN@426B{Sq(6 zol-Nm$<(H6eDiGX@4ZuhP_s?)J}Es?oX7mw<7BZ;T?4l!qkNltc5c<_hVJnfoRTjD z_7)Yu11CF7&-hkJ(3&FL#D;atuJp$?smIQVM7-EMein|O6X(!`lk~J9s6*Ka)*mS1?RYdn7x5~ zisBE4kqHWz*&Eo|8@SmUc-b5HMI)>dU7QkKJWf?y>u|^?aL5qclLaaK1s;eC)B2H1 zTvF~`W>8eOFET+ZGQlG@&o}DD64yPF+0`UH$((W1I$( zR@oai*&BA*8}`{7j@cW|$&QrPm`10<8mA&NjJ3!dw^aLl_Ya+rL>HoD7pi0zreqhc zWEZ|<7qMg)`Oh743B!#b`Jf>6pdj6#Ak&~AtDqpqT<#MjWJpF#3ADuo1tkRqWd#Kl z(%7gt)^C4}9u zgO9rcNcYcO{|S4gkQUUJ3Hdq&?RaFy4BPtgnRuA)k~SV%u>>K9bk{iOzx`=;piAxk zpbPupU3RYV6tHcU04??yeD<5Hv(A_eNP`6CE$9j?f3(-#t?OojCvbExAICi$VIKY` z`C^CPjz>*Yj(IsRW6L|5S7^O^PQ~<)dN|o(n+?^szTU_U)lW%bo`hoE2ddivPVO%8 z75(JP@SEee$*|dmFV=0Xg4h9l)(PB z|Lhy}`H{Mt-eeH=kaF07QzKY5P&dQz*P@uGqUM+LTUN}ZxrOe2{#vxSYoed}LXqEw z@s*7qL=wF%I_d?N6K4g*Z}s0mLuU5DQ>Zi8=S0>4xO#^qUv3?KsfFF&U25(T-%6eR z(tSQPUZrKdYUQ2zOk`uB_B|@)l|A(>_aV9L;cenRMAiIV;KuRs>s5fsE7`}Jv9tA7 zAD+Z&_11RZ-$>9JOg!LpIN}<*lfMvGN~#!Enzf*7O?bRH_1(U!eDxMx_6~A5GdO8v$;H2M{I~O{fu5)_0LH5teROtkb9{TBSbLDTN@$R&^VhmNC z5WW0Oegj1}A8yU`Mm6X_9o4Yu{+>K**sk#fL^f=PQ|DuLAs+e!H4JO;QeVZK#+!Hy zA&C2*Sdd0U_jFhK?XFuR%y_;MGrutv61y~@e<DW8dXjjV8DEmL>hupSzRo-Qs&~*n_txu^4n`zZ1~3 z3trdm4RbE{O3)p0{0I|p46N@ObTjG^oBqymFp?^VscUD%I6mKq{h;tClSd^_X<*i~ zmy|tIACb)jo?5R}q*ve9mQl}FESRlb-9Yd0$czt0yl+S*jx)o%sM9xfNl|E1zo3s_ zRnW-bBx9pGF~Umbw#lw~$`xARwScFpFs__9(VS)7Ep@GDu%I?mOF(RBu%>T=-au-B zE-u_Pb?@S++*|T0aiE8m&%RGyf2l%YvUd3Nr*)sK^^(VN_Fn8xN%;B2+{dW0qCKs) zREDK(5!Fjvaoc7scE>%J`?4Pg!7Ygn6ruVP(`b-Y8bU3?brWd0%U9C)2XwxThM?bP+X z;gtNMEjQ78wK;?Fyh2Yv{iThg*`U(nP<(l_?I1djp-aL8o8Kyj_5IK=n-`(G^*Ngv z`<=&*&3)PwJ7P4>ELQqUIUZ~6TuUeaRLgf4=qRiSlQ9XkYdWjH9 zfEdb)7^;OB`cYcfj2RWf1{I?i6=M$-gBcCO1`VV5%g_|wIG3`bR$^jyY-)CFW_E0D zcI=Pr*plqnitO0x?AUtGruW5|7<-r)%vcyUSQy1o>==n-*|AeaY~_+K>R#)62TMf+ zOQi-&l>|#Q2TKhFODzRU?FUQU2L~a43c}vFjmS5Y?ZlDjdZv-W{watro7)EtIT8*z z4Gy^!4!H#mc^D3P84mdX4*3BN8516vyi4 z6`4jAxkeR*Mipf%2KNCx@&i0FCIT`!0x~NCvIqjQa(DjwdL)dwkfB3#0yd0*I;B!#5&JhX;G_uKhM0uNxdouRAn`S8jhQt(qT< z@BG+hDNSRqD8^ZMl%N!R{3&d`V^b%579{NXSIqUIigG_TlW{c1ZMV2bia*(K0Y|%m zpfJsFEyWI}Vb2jq{Jy>EYFp2>w4&OvV~w8-yejTBayHYk$j<& zyvKd;+5nZTBgSgfIv##blPmT;EN&x%t2+x#sH4sYL@6z} z`tPE%M5oHWQajFJ8x$Sm(WUtGebrzTbmwA3p)2)Sv?%#z+n~d^YF(-3Kc$dSy{^3a zAcgF5M@wD8LzKj0#gOq@x#uML!Ma_*ir!#kv4nF(=$+k2(~#;;yF>qs z-?sivD@N(8SEA(3qX>2JL)zvjV)RZ4>KI88Rc%R)x0AoeRqWZ<*$%5r`Ut2+9qBIE z!@aHkF*F?M19ojNsTol&Y;IJvBkEm8lPVewN~#(S%PJZk6X1LRq;3@rfhmyIz`2I124f2PzFZOdzHl=7empRe z0-T%6AK^jUCj^}3*9>Xmw0pAXE1O~{99h_W*TQaYbjOW<3nndkOncjCc41aPjP-@Td za2M+{R2nd#7fWV2BDve#+rLamV!)yf-mbSN{`y-WDdsT&{wvo23SZZTZ1h{MX$h`I z4N0!XPua29AA`E4QG&XvF@m~C5WC)1hrg{V1*tClZEPkFTDSz4-m(mr9z0=u&|3|D zJgg~K=(!{sibon63cAhoZ zmegCclTE%Lk)FZykj?XZ_p?$mxI?Hl4rTE_$n=M#0&}i}HyLj}(d)O>5lmI35ll|W z;Y?g>G6V|hS)aetVtlZm!5~o7zsEx^(Uu9^R*?w|ieNg84ri)D5;IOOpntu=NBDZ9 zfhD)|2_!R+SQ&CVXTh-z9AmU(0+m%|0-HejB`Al3#H*P&CKG56YJtZDw@?+|?XlsU z_;Bbz0G@vHVbqB4s^LD=CJY_?Ie1eG4YvqD4@HkLkoX;^_N--VR`F2;e~CLOpK7Y} zamjUtpEl(bOL6X_ZILhlPTfkbzp*TBe`mQ5yC~t%Zs&0mYBQd_>Oz0hfGwvzGI;f4 zp~+Cy`QS%NGj57mL{_@vRNsMWv*a6Xu^{hc@4oHgXB5(^;qa?kZHFIA@|Q-n6a}QT z2G#eAG_U8iuiCRU3OLvcl$HncHt0mc8CCa_yI8^OygyaQfJ?GSxjor1P8L-Qv87Q@(8F1Cb+qx@3J7+0IJGaf2 z@~7SUNx?WEB0J=g@%%&tUvkZzP;w0&Uo!KxY;41tOl-rfOsv2tNc}RgNscrF(`jhu zKR|!)WVG{hEhMiThM-mlmLSoLi0750%okWet->rpt<9h&6x4i=cs|}G6YGI5jw=3Q zKqQ}Lpe7aVyaMQOPeD7!0Xm34`4vCj){z$8mY+4vfMGVxz%Vb~7B=v+P5^I<9{9-( z?ED5AzeYSi3Ft{0>bx;}I!!yde022Gv2{?pUS5Kxhv)w!?213!5gtM3;CGeBAwtyy z;&crix4FI2*<>2shP3XAIMjSHftv%n%qRT{x5lq){&oU+JrQVTM?sc-!hejuMk5}O zfd9p~^{`VWdWKuypcrj}gGI(tFv4s#8Fp507GE$81;nF3oPM#LVz9Hr9 z!25ZQax64-gtxv*b<-dM@hkfY7T@RPj{={mRe9^%%lPHWY;B@MDf;V3*E<*7W&fM! z$SZrz6TQ3SQ8DhOQ1d(Yamb01w!$T&WOXOk_}Y0=oMO9KtLej`%TlkK-)YLv<d$s5SMihX^)=31Nq&d5Orp_4I>1lryj2SRX9nYBgT5k&pKVGJfw3 z6c%&dAV?8(!Z>?Q{--za;p~BHE*x?BZ#ZHLX*eQnRBzxYNELFMox&hR$Zbl5#tEK^ z&^m9>gMaR3m=v4U`6HRZ;)=8EOeQV+Y9@ z7(@id{*$NBn^T||aP1Ae655m?j~1-L0=g1taau?5P>*RXgw!cK7ZoV1H@_f?^E^XU z1IHwia70}&5Ax*Uh}dBKT0l=?oS+kdJjKGK0>wfaEzTw1GgLX3-oV!s&e`P57dV>| z^uQ1$p)Zn9tQR^6CJ3fSC|Xd1{~4;opD&0pK+`zTIuEoqfMX$9IHDD(n+GbS!A!pq z+?2=w(?KmyLGqps=WqxQ)mCV;^9LA&YqVhO-J7##{Fg9Jk=F^;HFeCdr><4oI=GmK z0v7c@rD%FBnQqAXo)LcGtS^IMm)_5Nt#h9m>jpeti@=3`ges5A&(|GSpx?W?wXl-$DWHAiba%Ia`X6|W6xv2j=tu<9^Zu-y8D z$S6=*u#F5}B2v4KKbJA$|Dvi+mTvTlXQpobZIP~fcvn*|dcj4o*wDSMyDRp&W)lh{ zsvi0Ao`bW#fYPG%yY?wJ7srC-Dv|;pwS3Kl|FSQ?DD!(-hLW@*7?8J$%<$dLIGCSa zv00q@X?ATmp5$MObCQ#Za}8msP#jIy^^@Sf_~H^I!3plcL-0T#cz{4~Slky`Tm!)!65QPhvJl)McyMca_34Kx!zM1LT^pN0Bt>Ylhcn)~Af@c> zBiC)R{!6WV&o_bCk2e{+)tH5-FK@)44dq1YedLxbCeh>$4o+D$LjUFf;=3*Z8Y+r z8h5svf_AD@VUye(A0-zp`hEi82gRRQ6k714LignUT&G(n*Icb2Xxay z5&l4p*XQ>E1!BUh{pZC-=P>$4XCh$ZxP5+9fGGpk_bZ;R&#z5nqf`51GJBuj3TK~R z!H+(_yAK=6$-uOlAj*diXTslp!$fqpX3-oJ@>=m%141wMZZB+`Pu zA&K{9SZ_m89Z1+5FfnFGPDfz#I0~?|&T`_TuC%O+LwZ5k61Ob~StAE>s{+Dgyw|HF zzR3bo8Pz4a@w1^np(MFP9`b-GD=ilRLK;n?8zaEEUp~{xk~#f}*jM@!1K>Ou04JHs zg8n4q3(yBfe+Y0?B}^w#z_Gu85&8l_2!SB@A2uX)fpIh?H2?o!c&bgIwf$!k*K{t1 zUyhdUYCB6Uz{SDM*$1h}9q5|Cb6Mk0TjkMvG^(xAG1EmGfeX&roh0cjnxR(;eFlVXSTLbcT*if`=JlF<UT7$2?>xr-Zp3$TAh2n-(2!5vre^qUK3weR-YLL{isPvDjJJ)Y+;l$J z@1CoOweIluTfZhezcggr?7&#o&p3mW%2P=uMLMaLEMqFVig~;b=Cr3(rxf$aUOt?D zW&x32t#p@{LnFhsYza{R-1iGNsdqQlJ1gWQ);TNFMRCjBkM}<%`K)O#_wnu})9vre z`}XhcW)1z+K12N~FZ}#Wnyg2nd=4(zw)hC}r+3#POji%f>74ijyc}L%8Yu2GAX3N4 zwAz_-Xab0(w@Ud`{}pXUZ&inMy~MHlJN{4qVDHZtTulOV8yj7WW_D71S}ZaS^-=~h00#Qgz)2AC?)mKwn3aU-m-HZ>2@`ejdyk8NPayQEPv{)MJk@XC)ZzENv!n|T z9Ff69Q3u>`=LThg&`)yNsq094etTi}{Kf+u*aNl?SPNj!fZYJjr;F#u%ENegOr(MT z1uJf}CJ0>=NErnfR34D8BAV$J31H;N9}-D{bP$BbIzb1Y@a^LrUCCf;{SToAjJ=$T zoN-_u#3gXG<yG zZ0SQ^xa%frc}`+J%Abohn%Ee{yWtmVDOXOS;*3r*Vr}}q5{fo-B63p87MLip!N@$$ z@okX(wSDJ}ps1G0!I!w&_)A%Sl-^V(Fj{|w57d?(V(tCQ2-V$uCHva2Gn>R2B%F@lFhL(a)*|l`KW^qY45>Cc#HVLVFbNsB8sh(L|A%8&*+ZB_|y`pD8}z)HNK8jp2e zI0W-#XOd|bPXjr&%Z1OYvh*ba(DE&VF-8Qas!T3?S9Qoa8B~=n7p`l-*mv27Bydgz zRYj_Vdk{*6`6~38i>QX*{89JU3uQg=tTVf*Iz)Qh8w^V|8D!pY` zrH=qPrpkqra}#bL5-`t$m}S~^h!~7BRl?^r82fO5WQTww+f~AKO<4Ns7=U>}xW^}! zzBW$cjpIbD^H?BNT_zdvL?DhXV_yan@djrS=DFl|%=7;DFi&E@9e)If8%UChnRvqu zh~Pa8^YjNI`~+NTG4{CtPJF-0hy%&$(jcJl|10_>#LwDJ_fUCOkY&oJY30L8vmNY@ zyPNxZSC6M2vB+fC)YflLL21dQV|X*OHhpQOOYrnEsr2Gf$aHR{mwQ~v+ zDcomq5skUtt>3@OjoL|d_?Lg)w)wSfNVuwg&AxL*;xLE3(F0`B+XBNnOPQlVR4Uc9 z0&3B^Os~!9)vaj>RCwCL?>r(!S9uPTq3ypj_*`z< zsljYSL3y|Au2n`DqA~Pp##Z)FLBChV+sfc?tDZq*-eR%`WBs}yqJCY`sG)*;$@-Pf zrx~*IwbORw6HNn)`E;0}TQJ(AYe^b)CLPkIq~Th*L=d;0RtFz=RR^0z*yPeUJcx4aLk475u?>jRhzpos$P)F1+>pcjGM zv5G)?1N|$&&wIc<0q1#6M$#>}w(=u+9`kdVBJjn(9amF+)YUZTgCYs;PHE16y*-oY zhu*Guq#=X&T(R!-9FNv+FA>nHE2OV)WH$01*ax;FiEQ4uNN>QI;|71LJg}=O)hWoT zX*Dv?B1AsjT$)6f9X-wSyXExKy=7m$C#e30CZNa1Rr(2c()e6#VJ5uan@chf*1lKoj5y>r+T)P;;aqd!LW zfL|9VIxH@&OVO#H>dqm(JV}7%tID&r0bqsL68!LVrMF#b`z>qHAuZ?cbnBPj80jJQ zh(&qnLyo9JQG*m=f*Ou06O*>b4dUT}I0XaUqw9$yI%mZ;g{j`uH&#o0v3lcQVdu4*{&v~2=0{??(f-#PKJN}B}aglxWq`xn1;Em@RV*p3HF=_Eu>%&T}Bd$b{pDp$*npWer zm%hAjMV(@DT_oY`mGq+Q%HpOsXD*-Y~I8l zmr51Z{YtjCN0d#}Jv`{_K)&d6&HUdNJ|X!N_sEQ!t?{l~`(5{EjkK14pzeU6!GNI2fS`qdp!I;D{eYnJfc(4P zRDAT}QzchFjd>U~cwTAnFlq2GYw)mW@UYGdv}`qx{M1O(T*orAM8ggZ8B)O^iN+xr z!6CuLB~ig8iN+-v!Cn15QQd4>7z&TMyxbAw6XLK*?KaiqOV6gpRS1LQ@`-U+rmj!s zhZ3CK1fHAnCWeN?(8Gbxr8z9qCtGECWH~IUtyUOP8OeAXT~i2CUyLbZa0W0M@h?WpbQSp#>NGOGtzqrHJ+2#-Jk&)i< z?myNm2O2091~kFgg|WbJ+}qFiTn5ZSDa3*3TbgN;=7hZ?v`G|_WPBr6C8=HkIl*`4 zyuDoiX^$-HeB^a8QG$Vo=>RWBSRD(zQ21-qeKbt;8?2Jde;YE;_S#poAYY@bJ}ad61^jy8hexNW~R zcrP(Yx%fZ#RY&2G@}=xqy1P&2nS~0P2SgE&@?C+{nlUwb>xO|ac0d>$ym|+>@b#3! zYZD*n!UC7W<^8B#MpxVHT@>El~PLkX&}$I)4<7HySk(%GW+u}RnS#U?ow!_ydI z-=s9(3_etyFBt7hPpf3)K-RZtyj!IX|uDtDv^IZ^qzLl zH{i@Q^T}G;$c~yzx`fP(??2KLzF+B#bq%Ft9hqc7rA$uAccqv__+FKaQ~5yTcTIL_ zOAi;cZg!qyq{NVtlB-?*&nXy3Na@#e2a&QN)*9K#?;MmCh*>(aoyYTfm8Bl+pN^2j0$CC z6>d?aOy3e5*Roy?Fn1Ng`J}v$@BhWh1xoTR(0E-%t$7?;`QGq0S$?bDX}2Xy}6B$`jnDY zG0X;pI(aPGf4h-!==`>BRpHs^wpbkM_uTR5zbfj=ut)L&fNerHPl&GL~@SK?eWetVN;9MVBlMe zi!i*?((s(JNH}Aa!Z4!ZS98L4jA6-be`eOcux_CzcctA7U*&RsT=NA%R!d|>qtj5@ zar1{JE|*iZtIblC*MpxL#bYbHW>G+j;$l+$N(pL>}7P@VRtbUs7~5L{ZMU0;dr|=+WLRD2>z7 zNL=Q8%oVe}=<_t<2F@On&w7qL)b64o&*};}^erZwpEVVn=PY&B{(5A( zipCB*Oa8Xl6v&6`ZH7y*a`|?soS5@HGy2Js zBD^`TqORGrj?1OD>v$*zSNu=RTmEeY6T{IM1W_}jx6p+yn`>0niMEI0qH~vpR%KhV zw(F0o+UbbufK*e zq94K%7O|X8qt)V;bku^7h+YnLb8Crd9Bxs~T4@WoKsx$YJq zDT=Bam6_Wuwp?^B^B4l_4%92Xe!UIb4v3cDE2~+3DNAsjXTKudqu5w$W)N z`c#{}xOtksU+#6cZKm55*rZ)q8kE4d?|u@8_eiv?M)>SNlXasbcI^#`Z;65(FWeU> zeD74>`2I@Ly&W3gxMNGN^PRoRy0y9C^o^;;?A)2@@U5U4^$hFJxo8+@y>J`j{wLCu zE-v-;CAY7`uoxD~ZR^6?IYRQVd%o(Ld5vBq)3Mo_Z>_i-=akamcUuM9c$*Z}h>z9D zMfPPw`}|XHyZYz+Cycud?U1 z6vxYY;&3Yds8ef&N@Z@2(@bl|N=bkJ_cWVvoKw{+nyFZqg!k4ud+)RuAmdWqO0OD?D6{6`w@Sd*8khNu)pq% zx&KW*#bb6@_T#D!ng7}du0QhF`LR1-`!Nrz;uKL!~ zNRGI0h}7{vIQ09o2y#1C<=O7n#p8miVkBwP8&iYYAOG7CIh<6bp{ZAL;q$3fr_8v( zlG7(_(T!Tf7?gD|=TfOVuS9a-f=AiP@7z$ey;FH{c6D)mUST*m-UUf49DVsRc&6KZ zR92=}zut~?Rg|$-b;&*bnkHY=J@RDvmu*w(-{zBZeWSQ+tcZ?;1TF8P!e8IMen_L( zw=U_>N=Wo(z8)MJ8$4dB@-ehiX=;CbMnT6v5cUjzej*6Pr;;?=Dh{87_uA{nyy0~> z{2R8Ztj43lG3z^}n0U+rJSadZdArBiYYzoxSpHEL*J0{xBKG#LwOcdOme9tXpZ^ z=D)ck)Z_qVM|38x2%R`|ioY&>nxHsJ*t%dg%rpE^ou0cc5&ezFp<9@4lP9_aEcV4N z`g`wZY_ni7@VMD>1cbSt0vu+36wHfnF5J%oJ~SBEww?&9yUo^AlCmbx`SuOj5-D)*iQ-`1;?4TN1rpur6hoDP3;1B z|1)Rm2S=xYG0Y!9o8r+d(PBKbaL*T;ZqZWj?7l|TF!qar56!Q7!O^@g%~_+*!CE|) z=G3rrl;}+!wLW3eO^#@V=qMhpfm^J8H!vR$%a@~2&EOieeoL^#xxiY#C)OrbGzO1D z|1EF75?Il6WZ*AV0hzf^G$-%9xohuUcnurvT@J5*C`6#28C+ye9S6bee+Dy0i$>Rg z#mpZuH+8{||4u!6_Cjhz`l+L3dDVKG!J8hpy7PT|kr1(dZl2YiTipCZA25}vw)s&w zFU_l+5O&KLcFRQeofLME(w_6~uK;Jg=4~Lza0Jm(MZV3dCAAc|8U;_ha=o zV^_Chci}XAe`v;kh-JU>7~%F17d!EEU*>&SUiu>U&7}%fBiyNfB_uTB(8LRVFzZ|P z&@z?7aOsF7eCi{W7$Wr>bffRj2KVxanwJNB`_S)6ZBDCzmc1lD z6rf0wX7h^6$>c2%ui46!H1&)khitU8b#efTG-ucLKyk2@Sp%x5AsYj2oi%_WPnvB3V#(X9jK_wh1E{08{2Dlro2@KRQ;#Nk$i`G# zhoGP~9o&Gn(uUQ*%T^YrsTUJHbnsPMhrOWIn<8m;q`2Hc!J>hWtt>}V4=-luz)D+3 z8K5YWW@n1aD*;M?t*i=A0hFD#jx9h@CCx4um!ANX5L;Onpo$$jaMIR^0x0UF*{$Mo zE=7w55w@~%Km|~)+B#(bMUymp1jGU;F}AWbKowV7^sP;rGj8a>OIv5UpthAZY4*Cf zyi?IaK$5NOQd6%2p!~FTE&+-zY4*9e{8=$8Q*8h;bp5J$`UgYtwA+&AxAI?`qGf%o z#b}dwn%}tdHpRoTDRBH=&Um3UkMmvOXpmF%=cQBcT?&X5=2w<|jce`1*xm;wGnzt+ zT#73*6ge_4CD3L#rPimFa+?BL{RAZ&P`z=B#b$5$iT4C@PdZh6IQ(~>h!0i?do^zS zls5O@p@^zsU`#;7TvKB=zl*vW##v55BpZo!S!GRLB0OKltPvm5`%qv}iqe~^ zSSa>H`YE*6FFS8M(A!7op7}~l6n5Sg)^?H0%u_4Ln!M8ppMhDS3`T`qtZ|GehHhyJc2; z$SP4fDBaaZ4O`fH=gNeZbcGv=3Bi?)hHadie1!|GiPV*z#vQ}$jdT~5qZ8?sza|O+u{8mR39PlrxNmmG-3K^oKpi38ZvPx-(V7CgSys6?A!x>R!6oXl&i> z<-JTXZ!kn2bQPx2cDeU$f>mbrB(JMY@zZ^q&vlP@U?-tX)zp;-D|})ttKAIEW@wtS zxa3%&ppas+Dz^xhPH}-&%KwYhCdW2IqwZ;6cq>`~8E8yw)1|QYFySIIDy1-B@!yTW z!EX*q$6)tOxBu=nitmk^X9=zfG{TY#Hw8J+Z=hkTBnRO|{HqG%1)CyV^^6C7yjOQ$ zb?={lbYER+ab6+ScT@7m{uG{zxe&zc2HEg;eq){V%&Eh2tP08|J81vx{dB3js%Uf; z{oE;kynkvWPUlDBZB#tmW0lE~qp-QNNwpURtX>%j;8zh$pvW2>$KHfUbK|-lLnw<5 z(3VxQnD+T@v)by zIzMh6EE7zQx?&ykF9U9i_5$`P-U>c6&gUR8{FbiArMS9t4$pa^{zk8nNyp63hhyHWt>rAx z^CE%A?e}<4i)+>AgB60?!|vQ_vogdn^!!;u##Zj9X&^8gmbBIw&cUHQ zZ09)x7SD?pGU1{XLRJY6Ne-?H(HZC@)4!x-V6-g*wG?$uG#=pQ86{NYF8o#8)3{Fn zkxuLEHJgP^?3YAMv={|WSY&5SAdEXf#Ll0>vxqqRjFwpY9(=GCCvnhLT1sWc6<)z$ z(gY!W=7Tas9t!WrY&d@&3F4G|l7Q74E5oj01Yjdmn`k9Ve0k%Mh75;;oD4--YJUX$ z8Lm3!rT^e8aBrz&k=&asG2vc$kfX;KbKy$r>P`r~#XcoyB$pHx`~>dTB~mzg!>Tf) zO(fZBqMAx1s+=mCL|kZO!Zc#FtdT0RLu@+dGN^=;Be|qaPWUIPbY=rtBFg+reQcje&hSZoQ3V@7lRET60AI~^C+fIfndgO3&`XN{686SMerCbg8Tutmiy z-}M^pTj$~;r=6{$o)+)!o;!b^5F89fSe30Fs;xfj`bJ}2&vxW_59u-K`awF$`p$+} z5Opf!a$73B_}QcW?fQ(~y5g={PeHqEk2;0{rqt3YN%qeNEcK=;64If2Gu{>In%hM- zvj`${xh1tYc!KhM_%Hu= z)_M6z!Nk8m^Mn6ti+!+byblCh5x+urTV^dK_lf8x*o|aZw}4NiJ@r)^2=yhupn>ab zj+!;k%0{21aw_0g&^L-KnAqvwBp2_Kvp23m<)TYTU%RV3$sN_l5R2)izILBXCTgo3 zBQ}p*BQzId35P_fMXb=-$k~^5f?4p>O{KNu@atgj}y|YvFa&sEV0lCV)j`OE8;V!e?6BjgpWbSAj9@Lh$k@-aoa`MwF|NKL|@Y=32r29|jMh_n;9FC_O zsf?}=t{k^ak|GvMB6ZRm!R~HPuKOv1R7&YliQiAu$WUuh*Kq0hXS0KRa`SV^$H)2= zHppiCd&qQ$V;^UpEWAR?3u5pO-ZkM<%zc*m%AIc6X72Y%ug|Jv=TM~obIh=S^cY6>m)d*`ZSu~(_^(}~xG7mF{-6|LbE;nfw@n5XD8zy#?e;z+Fg7J;gy^K0m|TNQCw@mf zU0$;I@wcq}kkq7@)GC5s&oQgz@Dkq>y2M->H{sMk*H$IkJdXKKg@HRT3904`B&GZ} z5)+HmxJ|C%Dc{JlB|K$P(qX4)n~<`(rI4@{{*;j? zCLMM&NuI(xF@J2;m{di(rH&3i#JeYBn_V))Wz(J!Yv;H=G_TP@I^^y2?#pln8HMO$ zlwk|d?Q|+!np);v!#ic{V_ZTOX$Y_dWDlOydIKrnct;asHyCn&tJ>{``TKfGO=Y9) zu}SlKv6IRSum#Af(Y8L8YrHuyDW;Bnqs_5PZo@)C!d59n&eppBNan!u8e4$ahRlY= zvr~bOp+Ey7Hwya>ElcC6)X>UzQVvX~`eZC9VMyD>QJ>R9;!a|dF~Y8$5XUz2WsqY= zC7f#}K2K0)AqD>2#Shvcw|O9OpxGj`=|r&2yxY?#4t$4~kH@{_F*D(_S{u$WbCo;S zeVLSHQ(KM!9#dXf zhBUFKSAoU|%B;IfV=A1=sN#-@3Ar)}jeKaT2+?Om;bAeA<96e=#Kz7Ds3L|Uff`H$poo%4 zlZ(rn0g8o<{VkvZC{>Nu!kT)M5kuHO4d$C)`;sJy6sW;K@)q#dZ0zqf^)w@guz?yZ z6QD?wNP!wm1EAR0*gpd*fC6f;c7P&FA_Z!&c!1(yW48iSQA5~34YmwW1&j0>QslStQr zSc(>KVK(+ZfGTzf+XWaGK+zd(1UmyLx+GGd1iMwtnoJ#A8{ut!k4gEWxXx(bZG<8y zDl+#?fT()zyo6Lne{ayZQvzK&YH`$lr-Kz)E*HD8kAIe{P?A48_q^ zQMIR0D06J}Cp?YBj?oWVGn&H*C~=B&VtbYBt$1=B+y~%P6ISn5ys7Hpptp0%dG;WxQTFK>}L={0A*W~W}s)g@g@v~?Ua32JDZ(&dt4d_ocH;nvN za<?|r#S(yyevp>focbRnMe**#y4g!Bt6YC09M*-rnm}&| z6&F_kI>rGhC)Ze@0>;58yx9t$lD7DUYh#1f$~X7fDe@Ufm$iQ*G1LnunZT5OTA(E- zkd{S7?7JJPyoe#3F%vbUet}0cL&V&lyXfB2MVHQ8{L8=Z5T9RFV|b)z6JwNk<|1&f zZbDAF!jtfcF#Sz&QrNngf>mj*j$8jF)+p)B0S2Uoj9*W~_E!N?x(=>RSM0Rktfq2X3E-FL zqm1Dq*v%bkn<*Y^Y+alro{uqz%cP%u8 zjl*(Zgothl?vV56Ye=OA+@a#VnOVOAsslkf)eaOXMtx@!M1A$JdS3I3Od6)T2oVAwreb08{uMB5%ROWwpTr(|+ z@qD?HA);#-wsuyYd0gVblBhA=~wO9TPiHgr<2)$Qghj`DXIb^)*o{iUW0K-m%-{ zk_t>jQd-I5)gv2c1y5HEfFdbo?H597RsImV+CWy_e0+}&QQQ=s6yL#s?A{G-I0)%N z#^XCSzU2%=J-}Euo-<1}0y|DN@($YIy>DtZzK^a&4GhJQajw`#x8F-y|i z*W4ayOB9(QurnifKY7%OoaH4}bi`;4*83KHTsjd=s!ZL}R`3}sIZD4)+qQQX0UkDd zstq}R%eoyGtCrM@r*=BZwxhl6o~+dnFswGq>grl*`ZQ9KTvV0#G;ybfU$p8{s^yrP zXfCx7A(rbxWJ3cXntqv;*~sZpsyn$ls3$g6@_g{1hG$QdX(+0nNy+K3vvu$EE78YO zm34N6SBJgoe=5`2Y@E~cSShff39lOyq*+{C^mZqxKnf8xgEX$VgRY%bB~#voIC8Sd(8q+%Kt~t9-as)#%(c#}(3|Ra zYcr|pbd6s1onF~=2u{(fnI=Oo1>_9ErcA%X5Nd$Vky>c~<3km@Ri00;e z`W54U9nO9)!uoKyoBXUN#r>@M^oVkIg@esG!F{Clsmtn+>}jJt2B9pPGi4yTS7~y8 zb92AFE{k9%Ydg?g^;7O@)wF&}*DJfC^Cd1v@_2Unulz{9N2G8c3h5kHtnnYj`d{v^ z-*axGKIuTx{g)W%o?;oJ5Mt(~dmi>CRcjSkv#0y5id{FomiK-)>IVe%u`_gr&YI9Y zW1*+-{U&=-X~Y#$w3KC15CLPT`ycx!v>eB)6nEdeuA>E2=&vRDr&+h}_qGJF2afgD zKatC|vPk&&i+k>j@<}MdLGNk4X=l%I?tE%3oo&CXj~n-xj93e@e<(iEOD-LJr58AiBKz_n@clO2~oM{(C7pyGi6qVdgVklpYkh>i5*8 zdVfea)?x;72%)%uIS4kZahf}|iO$A1-8-Vk@oJ*0UO<9VxcYqTuW%aWc)<{jkm$RZe*@C3}*z%~xhC5@%cyzZ(p zt1}ay6_Aru4~V2@9Mt}^kwLBze8A6TSuExgxt`?s%s+z@AvBY75wkwazJiNa_6tlI z<6vp!>m8S8-SVGoYV+#Ekp`)36k`+~r}+m;<0Z)f&?JIJc4^Ky)LnU%*EN09e9!k% zLUw+lHq@bTJM=`i4D79K;BerUqykGyP7ZwD@~q!h*dwd&ierG3+I zM5ZlxV&x0C*&&7DWQ{EJR&X9KI3*JOd^aoj)Nd4U!hsU*ze$9SFs$ImRQF>im{^Br(eK>lSo ze~MM_l{*10pAH9qJHp4PS8P;@FLjN|GWAZDu=mJ21J7mE!QAsEiVpAcv50C%z1YU{ zw1>C1=Yv9TzRAq~s`Af*CU)@S@((PmF($<@kLKd$g z!EC{mg30J6Oa6;g$qLA7lj^BT+7X%H;B4l%!r_dm@&)rjOI28o=5sc~-sa6WKu_rB5X5 zQmkCXR$Vq;MSW<@pweaE!icEJ$SXKT3Mb~(Q}bICBpLhmNoecHgSv* z=koeyMf$YzY`wWw{~|SDz4?q4HTs13E;9Ry@*3rKn%0$=Q%Tl%iCS$#f<9uhHs)ix zk%D9loVT^V1lZW7C=nzt^nWt0gN}d9feYT-j(GOe_p8X4{!C#EnfmZuAL~>7((P;g zU$=>wZf`9eLt#GPtD|Oa`WHH_YNwb_!;rEekwNW zm1yfn5jTm=>Q`CCC6Fj(=#oUxY~-i+$p~=2p{x}a;MbdMg)99evW_UEoZmmMW(=JB ztZGt>kOKclBm;Vn8A#878Vm!0h-5SXz`%eS1_RL($;1PIkpVRd2I3)-nE=2m2Glqh z=pCR@!VF|uXCAV_0c!vv05LP5rocd701*IK7*I1{ASXZs09FRn92h775PiT5e9eGb z00YGX8UVauKrMoSasdqh*cedDV4!M7vk3sOGoV(%K)(TvGG-tL18N-%Gy!M;z{!Bx z1Oxp8Gyvc-C3LoXiQWzaT@lIrQ^xG(W*1sdO!mJF$UB# z7^n}>sAG1EGoW63q^p9Whm*&qkN-w+RFxD3s>MP^02zr#oZXdXPE|Ab)R7fA)`SUL5@r!dY78y*b zCJhN97CFo=I~fEx70mE>Pj-i)5)WEU|A5F(ea+Br6dX!n+5V^F~MA^OfczT!p$21Z-}V$gS_{PY4H=^!dAluJ9$K8Kl6k>SEl}C z;3&`QqGGYd^ppHK{({9GGpea@9FN5jQ+c(Z3xx%OsXU&S$C5h2z(Fp9h)$hh;DCmB z;wFm0y!B7Xc?M-aw@}}yt6YP0B>GONJSQd_$|^d9 z_{Kv}-S3Ciyb$?R;go}MoMn$dKDprKCl!`eXDt3gi74LjQv4E>6 zug}R@S}|GsrZirwcoM2z?~Jei$bR(Tv1jrbKn1T~x&PedAyl}XJKe(IVNkI0k2?_M zH^&RZRpRTBcZ{d)-J-}#f2T!kW%E*1HMazd zUm&oMWnu<3T_1Ug>%p4^b@mwl1`LrJ(4wcV;B}2d zq_Y#&pI-NYrSZngM;!KwCXV*4N(0ya@r&ok-eh>xY7kbc9psq(Gc z1+fM?^{du;3;*a=*n%~iS}!zL(rpA>ZT)YjYjuPj^?YiIPkNVX9ad_DSMJrv_XW!x zDweD$Oc(g!>P=ock=w6$mus2VUk%Uw(NDJLY};>qu;vtd8EW88_P9`6=0rYR*+$Mm zw?^jL#=SOu+%l)v^aPD{ThE13)=6tY z74;0wq4S{gQEAb(T+N3a;>*@_^=wXCNkl(#q(}&7=c_sSF~z0s&X3#dojNRd{Wm1m zM0V7Zu1BZ(r{R!fofUZPHYYii6pzVcPie=Dch<>NS7q{W!|I&T^P$no?92R<0|F1% zc?Ys4L#ed#HpmwD@UB5ulW$F4>X_*6kL_U?S#lLR{;c0hW%_mozUA=fe~!Zy_lIt4 z$)%+aV@1K+ZP1H-2Pts=!}CF0E1g6A{L0;J^vM7x)WBcATF>KV)St|25WLb+oSd99 zfS7u^&^KAB>Y&{BXU|!?@ClA=m4dY;OAWUiKG$bAJR(k;TDu7BY924M%C?`22{;L2 zkI9VS1)@!*My@B{ZC$-2OeVcooPG1va(_0h#*HdHpnY zuapU$N}XxLyC!;e5pj50i~%T7TidL^ohFPV*MB_cbbLizP>ND2pk3rFH(WThML zyus|SA@qiQN0(LTtpxrs7foaYRwmuOs_yr#a1 zYMjsfT2V7EZ$FcII@&>XG;zHT8KO2ik+4^J!rSRgx8L!%7}@b(8|Ax!#(#;Gkn9ov zr+{(!TVQ1rq8Mg{k#>-=soOA*e_}Gmp@u()Z*8ESG11g4IN4S1ZuQq>(KH10|MB*f zZEZJCxVRQcu@)z|w-nmq4uRrs1xj&uhhQzCc(LLR#oZ-P+>5&ucX!MAJ^%CKe1r4i z-kIH5zpf;ko!ObA(*@~~uC0n%IrQf@%=wh#QMY?PyfzCo7gPsYMsimLO{r`C`CZI` z*oQY(s4c$JA_k&_iZDfo?h$+Y$B#Lw54lmT(a-0TR6#0YHsNwhXxX)+(!mvL>RSi5 zbB%tO*iQhmcGq637>+9N=@KxWXZ+4g_OyFi2{MSUL=n34;JZ+j3!$ zd>Et<1}TC;N??#O7^DIQse(ajV30Z(q#;DC2?l8i5o?1%;4nxh4AKRI^uQo}FvtK5 zG6aK+z#wBV$T$o#34=_-AhR&YJPfiJBDM^JtimAwV2}+MWD5q_fkF0QkOLUx2nIQU zLC#>13mD`I2DyPj?n1;KV34N}F$8QEg>W2?HiunI$|C^Ag$!Co0XJhvI1<2i!Gz9^bNCVrI!0LP6vZrH8~;rIy9LR`ut z7{-MOTE+)AQ%kvu!ghmzF+f4=5>g%^FfMG+@=I_tla#A6Y&QoOgD$585PXAi;ewV) z!OdJ!u12ukKZN6vbUEzwXdd5TTzH^ma&Yr|DOU&B?)u-|zaz0nK_!w>9-%O<=-L;J z!V->tu-#{@PINUw0((NxXF71Rq?Buz6ljeUG|mUk@sW}u3xnCe1bt=%H-D0HO@{5h z2aUUda~>GbfJl4x#Guct;AV9x*CN=i7HGT@ zZK2AYIF@Tkybgq^1rctdf#q=|Vn|>toP?@|v^m{kQptb-6|{u`mdBThr-HEv6RP^t z=Aem7y$*t@p@Fur!16EuoZQ~$@QO=$1;JjTgUoQi@+4C6?8I*aO~s|=gJ2Wrpechu@S_k@pIo0@bj&> z*yR2tpYt7=;h;84$z5iG3prNPTTIvNEq=})m!HbWY(z^`r3<&6rwm5JQ-++JM&i61 zTbe|kX%*M@6YILVChw#pcXIJ>WTAI^_%LKE>V$$c;E7El+X~LvPwxz+DbEk+uP3Yh zQu4m=(_>lLOmT4~#~6zMpZW&d9g$aF?ji?n>F)&f&7IAejj|8>4`~C7UuTqBc*e{n zyne;tDaGK$w~=FRFclG=;-vbHYwW|u`cGLp>T`3mx?Xy2O5M-%d^e4IS zM^{WoreEMa;tvz7wH=!h%QUwQiqjlLJ6YC&=<9G8rc@^4 zh=Jx|7CjD0@M7ZJ(dQKG)7Knk}*WvT|29UtL~_PzKN7 zZoH~>HRJ5bI#y)CD080dc)QC?O&MI?IP!M4JB~6~rs6}%+0MHt(RU+I3%*`o3`~|k zcWLYHIx7w7^fm9XyL@~o@-J5BoO1Eynk(TC{9ac3lR$T(nt5EN!u$?&qx zSP`g@gVk9KISeub1-ZrUqT|w!<7#cwdUq~R0`<~^chL`A2-}Rm;RD@{i(dw5vPpC6 z6#~WVZiH>@_JThQEA=peo=bjw);!#{uX@O-&L(TsnxrwR9guJNT}t(v>!9uN02;ZT zi^%r3}o^gvxuKrgkya;@6yu{P|&++y`5-eB#C& zA;xbsD9~iO>ckhZ#yx+($9e&?4(fpYX2NdxQa=f3>P-9Su+XhoSWa2kea41I`LEIjtBM#(iXSOw5umEG zFw~j@3!(aF`VKEdg^C-ZB5evWII#n&zo@qPR9;qNCoQ)TmSOTEuJ>l}D~Fz!8xW0S zmV$KFAe`gbqZ5^}ojXSDTrci>d!cz@{KD7RPZg=GNxHm-T!QqfTptAVW_sM>hu|v; z<%HC1+NIjkGqWtL6!Ja;-+RJh6QJyLT1~nJ#oqs-sup4?diV9=N!F!}Ow(yD^2K~k z6&gOf7z5niVVkQP42`>*YZ&o~;#ht?z- z0v;Y;8SJ#Iy(bGCTH=sy6(%*hyQ|j6}et?f4M)^_mR7)r3+QhZo z_*OwMdkw^ouO#2!toPGXdn(%%l#sKBnuZ0haa@7CkxAl*-~#57h8?TF=Bvg+iJu>v zcpCHd)_w|gQdxD9Jkh?Ge15^zFGpbb}$aj!Geqxu}t+A-7|eI z&1rbUG}&RAgQ7Lg`|B4=pys6x0NXH4-lusOSVuZKQAgSXz)d{REztD`fF_xs-5ZlA zQhX$gto(6Jq9eVUvjbr*&GF*LDEbCW2aWm17ZOF(#4u|e>B;YeQvCm$uqr{x?)1x& z-Lr_*vrYFru+}&#qt^HcgVs0*C>22|*fpK$1{aW{% z8h1-rlZvaE%RhX^VQFxRwI2%NeTfYzx7>0B1*e+O00XJsl;Z|^)ZwoG=t8V6iPA#T zCx2Oru{^ax7we`d=bsIq3fSoeb*vhWmXf~abKBG5H&Zp>fcAp5WnQeul}6Cksoe1X z#Vw0R>iH{={r3gHO#fGc=cb6dcst63Ua;`-0M(M^d2G>Ks@rhczXasgR{Cm8T+=rz(}FCY7gd=w)5Jl~4#D*&mE+ zKF=!`+A988)mr|F*N2TKI9kvWJwqQ_!(Zv|`u9kNpU6`o-J?L^jX=g%YgI^kSivK8 zN_1koVOb4$&V31~i_wGIetsQ(9sFRv3VZ4+M}*w-mm$zXf1erQ_d+x31?S;Mt&TEo zpIMwxQ58JSkinMiUFZ_LXkVhPVA^?d1f&kEC zx*O|rcv@*2Rlg-|OXDu@Y)a3YetyMSe89@>LIAm^$0Qf4&c&8$x*8JXJi}Fg1 zAa+QfL|HWvN$JSLF?FF-S!1NlNEQoo=1Z z26M^%bN=F|De;At&DL^lwtED;JTqtNsw!^GR+@6%?*r^oNC)lU-f!wTF!79cWWkaz z4~TINX$ZcGvB$(!v!@G7l){>kYNq6npWZ6w(suJYp4sxEydC7Txb$pSuWj~XYopd? zbCMNak(%B>fSy+A*todfi=@6s@-2NPz;gq>VYBuTzQ(o+SFF-m$KSXai%Ej=-^ll6Kc$5Kk@2b3(L?2wr-(82 ze@n79KH5n_I>*7&qQ}&}dDj%CDqnOv1mO+({wEeL0a|!3(fZ`T*Ir;0&3mkGhenEa zINY04#2KPT4|Zj7jDjrbDQzx1WnXyQ8078~f(Z>%~+$5*y-CcZ59UJ6}>_kGHY|NVaLSXmd}PW%sk9bY2j-j zAj$D>0Me4tRC|5hc9>u7uw+UqX!?vnC&B zY55SDx%Wo=(SE;3|6=h~8=c3kA&+FyhkShXwatJjJ6JIkqe6>5*fmhd}kO71)_Bpe#PHzj9V zS)EbLQ#Hl4-C+rsB>HJ=B;t!*Th{h0$VT`6ZHs^J#t!1PfdoH52mwOYYPrux4 z$7wUPXtVm#11%(65SNf)i+h*F1$(-luW$Qi z&xCmXyzio2)(D18wd;0%Av+(w^7IF89R%%mwp2U_gq{~%rA$G^$;f~v_kDJXr!jGH z7^YUX0skA^t--+0l)VU~A))D88sn_P*9dj-O+eKY+lOcj6M8Ou??*Ks-iHVY7kb|L zNdM+_G(wmC{FWwncmoUxLf{6LzHj|ns8ezqGaZBR? zbagubR4xCnY8d_oGyt!rb_VH%92Q5i|04kjDbm-U2S_t{1VIAVvXNs`w})G>v87B` zb;t)iQYx=zFK??>9vw6!j12ui1x=>v&@k6up%FL zU9n}m&Q0mD3EW|XY?c8BU%1l?Kcxy+^@zlCz$b7SZ2^Du^V;{dG}f?@QmV$3%<#ge zx51=AOhv%6h|rNg3}wmn7k^q+4K%K=F$z}+fp}Aw0y??X+H+RHA1jqsXpcD}(WTiL z(LVW^61>$ZuMX8YQdDtkD?TuSMQ+-Lw%{JoC(V9qi1)e9U8?3e99fKSKUDWR{y|QPUjMZRbIw&cM=Y`ZI_AigVQ&>O1!@hSqIL9(zXGLtAwzQtEnjJoAPW$alcINc*a{D4K;;6jT2>cT}ZyH4q5-=3K)I!&c=`Z?p z)-R6h_@mnG7Yt)>o$0tA`jH?_>es4^{E_NMzB5CSC^T6l5?Q1;0Re^|kpvWAypqU- zDtMe|$jg5v2vzU_=IB3$0AQZ}V+gs?Uc0ptx@z=2h)o%z$>*TcW-&@+G3jBUc&~rU z`bfo%mOgnVp<)9tk(H+sF*L%m3-^da)WF*9N&>XMsa+z3VlMa(Vo1xv&NNtAqM?4Y z0T?ya1ZZmXbbskdhLNS!vaosO0-018(Nn~dh%Hgf%xh`TCYaF^#j+`NwCYX)BXrDo zvBD6sqQPzVJS~YVZ5)n36YM~9P^>j}pgkzo85HXdiuDG?`h#MFK(XIJu|GhuQJ~nL zpx8uEYzing0~DJBiY=HH)5O!57UPpeon{s?Q2)Q%Cwx*iK_l|`&@Ner;j!dn(K;1h z)u-9GWC5C9Scl6^Rk-VU7YzW`=P-1 z-i77>`-uGb!H|NBnVbEK@H;`kn!ev;gG{g$EPQgwyUl)9@^*C(FX(24QR10|Dj(&Y zHw2yeyZi$3P2IN~*@171WeMTD6b_fzLc~SoXRW+X4DljpwGPpt8mUCd-6mHPH@{fE zrbPHCle2zmoBWtGaDApP%hE8CW-eH$V__K#IR5BGCIXicm`h<5guu3?7AO=3zc^WN zk~L&|#cpE|(c9sg%U~7AT;puZNb49ohZwtSM4%V+_2^JBo(mZV8AOoC!wUL)baE}L zFcg21+I4Vvjc=>uHCyIdf6>Iyl6%plQUnJf&my9;BD~SlO>^j{hldR( z?&w~kO!OOWqLST2j|ZuTy*BS{g`036?g8)PvW(t~*b+}8SaYbC5$q>Bm32q?v(BRL zk(S4iHlR;7YIvHz9SJ#D9}x?_n)%DU<$!DV!GHE&+UaterYh9Rj>Wrb*q6o8uQTFY=c}epPnK5E&2ROPd%^B$I@xa?vQHIo}y? z$EOfh^KD&7w28-v%?2Z6@~hzh`wp)9VSNp z34#FoRQn3bHIA@Rzk=?vC0`<#nYV5lj{dD6dbFkXpJ-b24X(X^Hwv}>@D$?Gai4gm zZi`?&iBe-v%Gx9SzKNh|6RUZieBpDEyU&5Gn)E!vr;BNU?gm;JM2#n?X5Gmn2K z_BBgtcHOGW4?ky>L;N0Bb+%9@sr%8DMG5O9{dy!0;DophaPDG1CT>4P(tBbNH+%Kr z&YTkFl^CwNmNzyO+NDIq6xvg(+KCu{fA}yF=CBHOlNe7BYX|5G@~#GZ{g))G3n3RM zouT~_{1EF>^YnECI}5q|wKazTZ!9XKiV$+`8lf_hu%po1==Lby>GI2{3#`ioDhaxxIYc{oWDhSyv9n zK^`WLwGJ+0*`LMgC+<&YN>2u7N8fJBxky^;k9?|mp5&@G(opJwiX)3FU91L4N>1N$ zV{X7v>II$Bj#~e$e`Xr_8FpgHQDDwAG=QCQwYu_feA{ce5w`L+_I3HyufiYv&6tpG zYPz+9ld|MqO`6rZ;aW7Im9dzK798M8|FEOiMQLJG--g77HV5l{v*!hU*(zS}t>s-bHrAQ|v(MFXUTE z`uCofMja*`1rdTFmI@)pDpPhSGSVQDSEQODflk8X@_4VPq_GgwoZ=n3*qx`DY-&hq zD)eZ45}pMhlsekR@otF62RxeOW6=7bk5A(^R4X-vxUz;>s2LRMjFm+Wov!kc$?yD6bWBuZX5I|k?lEI7^9xFX?_ggks*9RLSi?A(Iyl>G@rXqlf3~29oKRh$11Z2`C0*&q5}xO_>;peZ1405 zh2TD~?WZ`?Y#(b}NgXtL1HPd~z2!oh%;Qp=r#-!N9G;En3T5nhu`leYBLUy3Km<#o zJWlK(2;s(V&&u{ih&d}k?)Ogf_rkbCqifV_C))a}MrRq8tEeVnSFkdIwSsRqND}}(UJ!Z`h|vJdWgNil@K*}b_>Z_4 zb1MRDAAFs^@>7ApXyB0pz~Na7jsyy?%qi~>H@sNMt2;q>_?t^|D*YKVItu)KjC{MA z{2A?8c!RVmqVUC5*Kb(LT*jBGp*XU>MjEI7azVF>rcGr$)!kU5zzWiWli>=_Vaq)4 z{qM3kP;S#IS|&-R&QtXH5Iw>GRZ2ZjfNo>Yo$%Y-C+-+4O8uAidXzV)5z_+4m~$P6 zjWla|fqfm#ry>Czkf;ab-pSe6bk7&Id25X{#sNT*75!h5xd~)vdON@+{1JiZ?k=xT zaXEJ0V35uo$9^>qe4^~_K*2RPd%?l;HSv$K@zCW1k{T}N8{p6iYTfP63fcD$*jI}t z$AAlM=x8e6#b@w*;FJRH+_07&=|lISK~F)b>DN#KtsT)V)5pR?gdx39__M>Jf5802 z!x06Gx96U;;+aX#wsp+zU1;?Dfr>xtYX0iT%BK_gy9T58WS$qL8ji<(=5|x0MSoAF zxBo=0B>V_*o4wa~fT(OVB)M=$MJ%>Y!q3+|{MZX2ezYtFkT9Py*&WLUk7iCRqhDMO zw8ZyaC`Gj3DngVdT5u;Vcz_l>P79t_`HdPPnT;QV|4l$lhk(Wzd=*~G&%T1-LJyeT*mR<`SWSer{)~A0j@fR6 zedzso6VdRVB2#lY>x+|#X+Bd$LEd#Z1^k^>n`58OQGas(zJ6RJpO$S@DZfQr)lBz% zY+dLm&%HE3_~l7eCwa^pHtEQ|3{}o-{d;e_^0a}hUw?;2=m>q>%c89~W7^?AU6tfT ziw*gI8S>X>{HKdK%Rf)9zLtGpzlLUG+mFZpqxC6`@2r0PEqWv?>kSd^)WWf+&yc%4 zJF}3|LvJqMguqK3hBAsk&GY+lutcqro+t|L)>SS+X{AX3eY_tqf&h+`csxo^8 zOX8)!DbW=WQ@m&iGwuN@4!0li9wQ;*GoF>uwij=f=WzG!9DWnK)|$G zu`%LIfdNx^KnCCYy{z-!nt^`tu|o*HxA|-S-Pfc8;$s->n8LYgQ(3Zj-p<;|xh-Ek zH}(@}MgaYPPK71GVgQ36{)ffvLCP94`f^}M@G`IHVD!|n`VudqNlIufdcNk zt}*bITOav=!GpHy_JXKKvJHX_X7I4XWN)LZzhd~1*ho)nu%vt1*m&`e%V~DGZoMxl z;{13>c1yU=quw2zpWTk4 zTZaVdz=;KxWV~U$#_`;=I9tYPsE?HCm$w&IXWSCYq z(p=~71`RenG`4B&LWjl4PlAJLd&L)cy`wIB#r4vL#q|i1gKKAa$nKiLDf65hUQbYy z-IWnhU3+8VFrbm=wXFo=)DYJq3m0(T=Mo^PFqQ?(?@*TX`0wNVB7f0N*=fAOOf?jf zI1n5&o(;Wiq7I5lDv%8D$V=cRqmIm6^&h1`zF>DWj1V$m-QxfLJ^DOmlnb2m=18grK4SUlqiioky!(UHSvpWx1Qhzn)JO+|AXK4^JcyZDbYi zJ-L&$hZjxkdB5(Rinb+%kyvn?xK8^>=RHIu(D}4#ziuAW`Fyt)!6yvQI1B7Me@*i^ zHR5yB5NBRm@|bYb{PoGa6j4=i^L?7r!dAiGp|^l_+QN3lz`a-21owoqX2HuLvUIzR z(`h2&$bV%;P~h-Egr4 z$I6Djh+w%Ib4oow=iclCVMEa+v)`VkOCT6RAWSwA+TSLJ2Zp__TRni+ zjhT`XQ2QQQv76s0iJsoxQPC!AXVo~R28$?q9oJ(M<$fbD){CP_*RHvZHR|h|Ir1Xf zp84YGkZEuEGO1iJPvD;%>3wVc{OFJgET+(L@#D&1^$Zo4MC+x1gIUvL>OK;8_<7ySEM@$drw3I8Sdz;U-B4Kx=j375O1)P+gH z{Vyr!0g?#h!X)AQA^l2}_jOq(l8NhoE6C}pNp3v6E}AJTNkjcbL0})J-6V;z*q)D< zEs`+X-fSZtVOgz>zYaun)fA!rHCVA*Sb=*V(J1BVsClSE1Jbc#?||X2bKL}tI||uW zYQK3~izbn;7tyVJYUHyZCCD&2GW#U5tt{Ghe@C@>i{>0;?^^C8gc;gju3`gqHKf{< z^{1kFyXF(GK0L?1UPD&vJVA=6lZ&88vlHE@dVq)IEg~3^z#YcoK#X!r1C7VOb0T#{ z;Xh7DKzv{(!$?H+&b>#JJS4)KK6D29B$+k~oxUG4sCjN#a5p?`?G0AlW|bj zsQYNa=UX>-98A%@v3=TD_26++FW&$+VD*E#m%H{^lsoQhaQ=z5(Z&+x^we(j$xfw> z_LXdGsLMEYX(<20?J|f1Z$B6P*YLIi0)I#ZeT+I`A^CNTH^{3ofnNqD`N73Xa=ga) zkLgS#&?7I0!~xRtgW*%p=bKMG(0PW0Tkj%`^GA+>n^Ux+sf@_1H=J2q7D3gN1jRfc zVl&>~15pxpB}t}yiDPax`Z}qnGwWQ@D!j?QbYmB~#apaN$$VBC7aQBo_8%Jf(ihto zbd$Z6I5l3Tr3}~9%D42o9}h9+vl73|33T~wxa7ou`${l$e{uMAq^bBup$ZjNfP@n* zG7euL7GUU%!cT}IuF{XqAWp|jh<=5OqB|%1Lj_J7gK>jv&nR`Sa*YcPBSeX%1t$}t zWYdEGJD1Rcn+Q?<-?@Yq{ErsAM+-iq1>ezvk?FwLbYKww_hBkV{N%Q#L26i-b% znr;t_K^!T0JWSDY)FmzWT(u;3WKW*=w~q@34*sQJx^zcdvyN@zmXBGyjTa^uBN(yW z(LNP5P}S!+Qf(r$EfDFqnh0C?bPMi*{;LXW!|bz|9P?CoB>SoQLl z!;ke$nb*R%GK?@>gfJ7Q-evCXb0JZ8&&JDcMw^DAL_@0X{7E=j5??czE1HI*{P!+r zg>8+3;eo%l!Nb8~blHWX(dt9DTbp(}p{QFCr@yx{bH;6PgRir`;N52W){MFu`$5sc z(B|X`#y}xK0}m53M-3CS+DPbK=`UaN&!RT+Enze937>jusGj4;MlG#`$^OoI+fSgK z`Oeaxe@k5440jvb=h76;dnnx`T{asdX&!>Mm9#kfR3G1$mp4<;DAxM6+y2@L*Y)F^ zIZNj>Vydn5oo*@LI&h!nSmot1YkIwA*QEIzxV5OJ>9)FcF%3MV6_jo|DFdD3Rt<}k zzrjDJdNOiU8hS7@5em0Ur<23I$fkwFnx#AlNtwuS2ab8pN9XuH|3yFpc3=yu8`7g9 z7Nt@V>Y9_pnzK9|QYH~fp*kC~XSySxP2cpO7Vpme7PG^j?+7Ej%TUK*e-(JcpZ_MA z@Qw!LhXlSymbyfT?Gg}8j}KGyJ&kX_}EW^YpopK$UyIvP|4Q66ytrtd%I1;7CMS-7}VrX-es zC~j+slRWC9@{HQ+`f2FtC*}HkIIz*rC9E&_zQwj^Av7b;JR|?1ukOz3j$jQMUZ z+@JJz-Cst_!{jJa=noLcfl61af%}iMX%UD7dd5QF+D|lmUi8~cCLgK2LnOz`SneL? zH$AttHkBlQQUkdO&_xZ=eLdw)R}I%%g7XJA>h;(W+h(QbBck(rmJxXNOZnwgLJ+n5$k-b36=W9gl1#T zcPtI(*5fNWo0=E=5K-EDRO4|?l#^e>rfqThyZ90PvySW7?=qg;W8%f|`e~`O%4=H! z!32RZrJ-9Pp{F-gm|7%LpOIZMpB+d@$S7ziR=Mfe1TZh5zoGX}h{s?Q&8N)Zd!zXg zz7_Z5RNbQ;r!l(i2yZs^XLrz8yWqH|{la~}%PgDOYQ`FwNOY;-tzr&e^ace^ACzzSG#n+3{eSgWY=>{ ziqsBfg|9S@jWb#6nSGI0PtDpl?qJLnl$D#Wr>!SJv!BtZJ!**0u60gpYx3Wp$(&Yp z<<+OlemYtg_;e)cBTx0`K%Of6L%hH=3!~e8ZqQV|CDmz$z1r-mZ>P=5&t{`Wf|Usp zp{<=YMu(tKA*7YFSDw8GY9|;p0kTWX(n+cK8Gpf7)1`^`39q@Y9_)*!5aPAOKE7GJ zYclVw)>5!ok1c6Vo$K;%HNUJaJZj6(by0Q8Eltx{OB(FPi1d6VN4L}?k;X# zAB^Hj$5zpOA%OQ8vRmWv2lxkPCCz2EQ)I?@9p?knVgynN}U4W#rEup7HaXVAKr2I%x2LXzuC{+Qi$KWpjb2~O#y z2hd^+pxi*~hBqj)l@m~H<9y|=)i4j8y=W4hy|F}{J@qw7p>BC-ZMQtU20$Tzasb%? zi0K`P*`EYZ*!@0)CA3#`$RkLFWmin{PuMCa5C+EiP<5a;8t!!-t@~ME0yJ2qmp8!V z3W4D`U$Q?<12~K5a4Wsc1*HE?6) zCt%T^3P@3=`6qN$6L8wf2{>of#41HXP4dY|92pr|=?Y3${5&T2bYj=Caijyr>n`Cq*EwvH6vV!jOO*_o>%L>S175Xs`J@M{JD)CMcfSGjS9Y(BKJ8kvw4WaIKQljqmYQYYKisM50 z2ggMj&(Px)c<50Qpvq>6cfilTmd>^^PzlZm48m1COaly*$xuD)z+N+a!dyKZO{Tlu zK&-nh3$!i?=eR%x#<+VO7^SnVoe#yd3N!N;Nj7^ni#2$8#+u^0|b~G@|7(n4bw={!TwY@y>xPBgZD1aycfdGPsdEl-6Jfc$O z$tP^p!`#4%xn*I*z^SSUvvxN~6;R3U+6^X(oYOpm-d>JO*Mq=I0@|K1D_GeTD zYH;GeS=D)qNa^(HHZos_5wTly&AA<%r&TLQ19<0r+!GYA$$Ei}=TwZq*4jbroWSHRj3Dy`W;4JYoCJJCqrq*Z(J=`e+U0< zM-_6lCR)j$78FXh9XnN5s-_UxKHOVRJDqX@KBTnY}Gji zY`Im^3?0Y;%{?5U~)xO>s<{bT!lnH~SEsgJ&XpUo!{n14Q{W&x}5aZ7E#e#@Ef+>1!~$ClfaP z-aFfLOdV7E-0_x+dRw1w4&~16a>s4#HC&Y(CCcQBCEH6SEBr=30yfq3cuR#FN=;t^~dXZ(cBI&fW zBL4xDv9y%j=Cv*#28Ax4bz~`dI8lyWoHg2m`Ls^QVDVW>tY)sQ*>vM=nsowpv@+qkH@(KaW?r<(Lu1wk5dkN0fNz}VS4Vp-B z{O7Qz%jBo(A>wx7zFc;(ykGdDaUX9EQ+sKaRYn;nI%MpxWvQczeYL*27Fcb4;ylZF9IPn|v%pMmxNT?nu)wVy1=etwZ=Yx;F6bAez?Ye#W{ma3_ zirro$9V-3#YoHauW>DOE=|XmUPP50xP_xI^@3Pb)p2QyeXNUUhH8byx#B7Ed9OH5d zpGZD(izw=wg%?M?N^>_mp{`y}83$YWwO1k6c<4aZ=vWa8h7EFAtG+`6Q z`1BuRK4=~+EvS%(HsnuA#5#UY#RQpI&OAetGe-Nf>iLq12Q|#i7&A01rsE~UOdu;H z4^=+~g4s(Oi(lHAD!Y`s(@)LY33?TBN@^h;D~AQ$I%MX-G3(`Wye3KpylSd7)K`2m zl1&tsD#+eKHl~nrTb1Pdr4k%QW`6$Vdnh}NeK|fTAe4zYDwN=4Y^|}&%QBl&_D?Qu zIQv)^O_fbsuB@tuwaNveZjpd$$-)@X=@je?M$o)4`ecNl${B?Y58RH8xHrrStTpZY zseWhq98UhJ&7dkiJZ7)`X5P~@{={SWaL-l8wnw>I z8sgxUw5x-LHZ)^dwmpTzWY*?c;7kyu@PhE=GM&^D<{2zEprvBBr5DyS0Wno<}?Wo65cpDn=M)7pleGL$UYK0hd=a$fqX1vo0LsUr{8q!55&0i1{68U+;^t z`pmPrXJy3Ym8dwK|Njo56={f@??cG5-rcZZKIxPX z!49WBdrU3TUqbt0pC0g(UZn|BAw_2*UZ#m4Xbx$L_Ynp;2-&$&zjsSJviBXos$HwD z4o5K0Yod_Z5HoW*y~tyN_#Nl6omSO}vW6aXZXF)iQYeGYgj-(bi?;!y9Hy&QU^jS} z1U_}_O-iM5t|F(8?>dWd-cefp>Fd+Uf|ts*j0C^1=zd|7{9fJsbGAf{mvvX98c0sk zdU(ZD?zLnPUt?@5YnU{Zy);imcI(TG6&0B`crolgUv*-&7K32f|LeRz?E6|hlbq=H z2cIY9c&u}%DFQVA7J0ksUX-Qnncxif>Oa$;Nrc&KqQSB39inbkU>k8`__D?!)Mn8EJ@t&~g^sWD%AYAz9A57tuB(FYdpqXW4q0lv)1Gf` zE<7d>$kxxI4Z8?ct9$4-{BNUj4Gn4&6{u;tYJK-Y)&bVk0x4=X>v`YnLX{#e!;feG z!b2I+Z?1d&+E7KPQvcg-9(>24R$7?hcZ+!tc6^OUI3&x@eHty&JpwIi4jf1#V8#$2 z^D|)@nKvz|=sgx*Jbj=X9{OX(ulP^w<0Wjxi@^hUp{u?pnyCW><((*v@q!`VW zuV()bObM8F%nP-{bP*b@1r<)XV`D7Y{baim`|04-35v>If z5V+d6;(kpP-sw{eHMP_0(-mC2JIS1J6`ENlP~R)ZZXC-;!4U#-Lfg=z^*|#cvVe$G z%e20+=>Eplr`g$c**(&#+`i(O@uHgX>q86gZ3`U6RLM8VsN6j%&XuDOb~{(YWL4b% z+q$fUhO~3*Rhf$TgvbB&S0`?y^{g;Q_N@jh6nzohn0v35${45`Kir1Da{)^oKiY=Z zc=CFScrTgf#K_E!8k-pDu(b6<_Auvr3PGsNV?_rmh#z@QG+Z$c5|UkOW&U?CkXVNq zMLxd!?bmwpIY@!XK=Y~+`pj5EQWDy*cTNo5z_Hb+Ry+O9I3A1T>HlZMYao+37V`+! z(wH+^HKTR~w;_0|RlG&4wYbZ&h|{jlY!actCifUymh7iyyAzQk)`v`a@W=fo^gtazg1KN;e2DNnijj(?5O2G{BEn?joAC& zlbTb;clFw}@*6rHV}@^x#_Mw*hziKI4W_(m&S*Ieqg!KPi`U%en|lnY7eU|@5x%ux z&j_pSM|LA){l(+{ZpRAu{MWk&AjtSsr#3&;jB(n-!^W2cg00HW{GzOMjl0WZ|Og54c)%}7IS&L>2;3JMxL_J6Lj?X6h_Ks=F97Qw>Gez8m4 zFUb5Ok85U7{ur|C3(_qGvafmen>XFRvi-{JLPh^vAx}DHuy`$^J3(vqXPilPv4y!; zuf3IIwXzudW}t*wS>ycz|E0Klf4y(8_foa-@?k7bgAEU3GSo>@8_Mh5A6h>RtO=de zYqjfOpEGf_MEk_NpH7Zyt@@!et?!UiF6!0%>E?^(dl8(|gqB}?WK#@NTPjT7h~Kzc zFee6Mchs>Dla7bspqRqdKszrYxtb`1BtA zALMZ;6{X@hwh?l2uYj^~J)~mo{kn%u={L>Y22NZ=>gzA+w9d0h^UFsEI zkdd09gD!yQlYQkPNih-_f>FrJLS)_Tx1<9mZ#%`v5IrvQu&}Xmot~T`@7;OycIQOs zFStjFh!pK3@ufrFtXwG*+vb1jbstJLKvQ2AW|yCC*PBf(SZ*bBdp8w#uWfpowhAgn z9W17%oXl@OyST2LCp@yl74@1#ouX0B`}eX@N|~_9>!|Pfy;q}f*g=`j)#~4uiLc`e zwT^=Fn7i)Yuur@Yi(fsTXJS7j7oWlGF5>;=Q{g&lG0$t~wew3`q2wL|HbYiUi9xdu zr zw@1FST}5W$5g*?(EB~fDgWFXrDZ|lGL)BwadMvGt`8OQwxD3~X>15rewuZGL%2UTU z1@3O}+soE?Sd9N^Gj>`n5PiIg>2qA&-C{FcZkkSN^!NPzXYc2I)4EfoyOTbFrEdcI zC=EXzWj^ywsde!cEFEGe;)bSd56pHH4XRu zfqEb2ja2R3C8r;fp_{N7x?ETN+4NMqYaCpO3quJ(EjQ73t&5e+L6H9%QGS8fdZ*6C_GOGZaSQqQaC zQQW&nNvLOO?;d^T7Sojs&f?Fm;LmR3&;HGyJ;LkoUkj*5JeN7-+N+4Uk3fpoA z^}dhvT1kLifjEBj;nBZ!C*9{GxD45f%ya|P`;VyaA5j!{lTwY2T@VTrgz5-FEd-$`f>GClQCb@GoIfa-XegQVDVf4U zD;W$A@$V_xN5jB%4dGNw-R~HYiem|YME$iIcREm zX=(*)YN0i?vNg3T1)95tS@jv1!Wo#l8JK7one-W%!atp;e!9lb%+HVJ=Vun+M+@*X zU*|_(Ux`5b+s53Yf#%jCFyW|L42aBjMtgt|2J&onk3e ze1x^hfu?KJ>=9D>zGf#7F01YxkuQpsmwQd*M9DvP_r)IcmEkAaCNGX@TV~|u$PW2t zOz!oA=`}sx8a_SVM1MhCu)SW!_@;rK4+H!tA&#PZyWtaZUG3@v`nPu*&NDyZ_z`=0 z)$m5}Ez65&jKq%^nycDY7dman-%^Ap!2LHE26B=Yni@Zvt@etTtO);Sk{n`ZG8!14 z>$Rgw_D1TVh=ia4SWGDN3FJS*r7ky`_Y6kvT*kVy6`dUBn(ck_&l+24H`yx{2<2Qe zhs!QEi~sR;i2zTZC=QPIw;=E zXU}aVUPTuJZIpw*%8qjHA;ar-S267xAr)%+bdC=%M)~IHMYZQ$Ftj_>dv4{@aQWfu z=s!PO;(tvz-mc$xGE?ak(v)$b%$L1>rFAA-Da6nHd|88d{o?Z(v=ZDcst@xzCd-8Q z{sD$QP%t-IQKHba4T&*g!0V;sr#?0M$J~0QQf0qdi;Vpfu>9&pd45~XCV5>WlT8uj z#LC?lx$|vNH-c%Ss18P%Y@BYtNY%syinrtj9*h~BVqHZ#Gw2eixJ#~sb;2gXx+sP! z8A8wH;&a>PWc9gc&bx0JzX+;pJGG@8_F&D{3u%YU^b9ik-?E^^!NKA{L%I?SfL#wpju_Ir2d z*H>8Uvm2iNzjZB1Ks$SwKQD8L zrYge)G7mlG%l0-&xFhAOv=I`u(U@gD&dc`aJn#9RUuYwA7OxAv)lz8vr-OR@ji9aK z8;t>9mcG$os~CC*7xP%5^)MnsVDyE*=Q~FIu=z-d72+p|C8kHVoSXqEm21MKS}0=|4Cl!VoS@{HUY#=m0mQr z{maV=U}RqSOKE|Tkr!aR(ZvRJO}#lc^s*Is(u$8_{Z`{xJ|?PEp{Yd?e$~M{SkK|Q z?)7(^=57(zvIZLuWF1UJYdKl}JdAFEJNJwJcgS{oGfnf46Wb)2 z6ZX6oiFB*eQ$yXITm-E7T{E*ozlNSAk2Q!M`4t)>sv%$QLD+eu=(P0_DQAg@M6Go- z_w31qLgr<4UJdcRr8e%1+mOADyX2+6CCa4oStRyT*@l2)_LL_1*7y!KwWlp?D~T=q=;4V3^FG57mQ6h4jUNfBQXR+sD~}IQo3vkq zJPqv@p_|>|5{KA|g$}k8{7o1E2F1co1ffiH+F>w+jr|||A08Us{0-vCYw3aPPC;J> zG)w6TgkOY)kH48FLt(ZCgx_^5x^>%7S2twn>+=DjCVvi*c#KUAiHAy0?1w)o4Wi_i zA+-Vsnti2*?11{Zl1fiJ79pP*===l7;KZ2b$nFrLTZ26O(&5hru;13nO|`TV(JNt&DN(J`eVT^WcfMJyrXRGj@-9ukM$|w{Px)?e6xY zfvV<+3JQFr(miVlCeovm>8sL3j!lg*Evg9UV^#0k8U$-lvO0nUW}$+(jY}}JqH}|? zGK;q@564q`jA6;?X4!vlETcx6X;0d>(CB=V0Y`pVNOU}V0W%ni6wGcr8MzY%hig7HbdW)O9IRW5~*2<4O4;@>kCoO8`|3) zd%KLxL0)oOS2S3@VjsRbRAET|SGY){#JlEVk%6OWiOc!e)g^@5(8g%MUjh3it&g|W zisG->ph}xPqpTMH%yty*(69Y%wT`z4_#x8r-JFF*Mf1k0EOqIJ(nX$PzgJ0&)=l3i z{*D)$yQa%Lc$MiO<`BS(EF5l86#0FC_+aPZQ`d3qlDa~7Mdt2qs+s-)-%`@Z7jspW zJ9{79Fo=REa&CuI#L@$Dp2|)+vE93LJ=>CUH;+oj);B9gmX_!KUZEcDteudycB|R! z>KLMxUvs-ruJ5YkpEo7`+AEuRKbd!;+1P>D$g@Y!Tgsx8P?QrkVs5-1R};e7W7YY7 zYUg2lzj-8`nwiN)bIm>1#6^xjWmOig^1X(#-~6TIOjJG>&31=RTH6fg_~V533#yGj zinpjp-M}%oIK6)pu7cz!K4?_ECAI(9r{&;aHauWR{{y~p>2f3lc)1Qy01fGl=1^blWX4X@3X4X?-d6L-k zLy`vL<{cdcc-X`SY}iuu?=Z z&<`~Bfp04C%~imxC)xqOTKXA&Rkjm+3M=6Od{Vg)6azoQnYEB+PvVa?N+^EI7E%0e z2MmTaW-W6;IHPT@0b_d7P4$=A;_5G-009fz;D7HdXtG(%XtG-z1W^zF3ZiOJ@mG&? zfq+>y#cy`o;AeGOG}(PGX|m~9fWUwtO7RAqu`l0%aqnaN)zvIG<15f1#TNaU-f+mKDQnQKWi3Ie`yO0jRH^jK!5ikK~$N3+*L)>#H)(1`@#R58YzCO0vDB5 znYFBi;Ebg?28^W{H`S{{MAWMvlu+FFco6)o2Dm8rfF?T>xcKy*Aga>BGPku5&e*~( zs=j;9p^};=d-xSi_A)TAIS7mx$6amI0tO_2fwK_5fCwgmz$`G7`j1&F>^qz>{^(9E zzj}2AFf(OPXMOmn=2^&p-@r>Q@?LPv2&(73NM=JEMGRi3#ndjh>_jYmz(J7SNdhAW5YqMz|Gv$CE@HAB%x)g<+t^wd z3zg%bG;SMP`v7Qqdg0G8DZjacWSA15MZ&x9K@6uP-MpdGOuHs1^#@XZ*nA2)@bOk$ zBW7+h%SX`Ag#Y_3tB^e(76{olL1D46IU)0+c!<~p3*S3Mx>v+Vof#;H#HUEpglVF6 z+SLE_2eqj*QtH0TCxfuHr2HAA)4`eEI)%c$Q{y+`A>Wj=bag_J+&gMMb>=(Jsco8j z0Ay-6_w-ufOXy%253MZBXbnROfvD72t7#}Mv0nRyR+!%TQKmLNWoXiVUd`P6;LP50 zqtlOA=zJT`H>FUYf;lkamh?bVn$hDM!Zs|zc#JeL;p=`C;n(~E(vOc=^3k$p#RrYBdFoB9#7B6#W6SMaSEoFxVAAH^{xA6zAA#KIk8y#zCdVrJD?XtS(}{6`P6dZ8fC!72 z=95T4(0!hJ^;NyoMIByFE!=Vgv)lM-z(o^Y&LG^PgV|M#8F0~tm$M1CJiSL~1Ozo} zIUjMy=)=N7Kkx29c;as=i5xmR9e=H3Vj{W`ze=sBXfeJ{>O^J`k?C9gvv}_lsQ0*h z&1D{&x<^9tML|w_qqRg4VnmVq11}f%uFRXfne{h$U2r$|suBiQCHk^uMqXJcn4<6ecbYU)7bosCO8u^>M>Ar^lu5Ix|8@U_e-nQ!F$V4(|gjKi-5Wv@x&vVH! z162Y6M5kw>a=l85qonbjX`y;+;+C3P)3gi8v;yyQvb_GZ5Bl>HnX#Qn`T~9v=gC*c zE>LidBwld%EK*L`D_mha9P-PyA+5EH4m=bT_4O|}IiQVR{xLemVQ`FPW+yiJB@bR2Q4i@pXC`!XHil z?H=_##pxzNhQr|>HY$X2DMCKbl~^A9|87HL?P2p!(u{kdEV$Ru?Np7;hnmfWgmgW! zA1yBZmSNwcooIFVOSb7hL$c}W?dNw#zuTyG9i1upec<5lIQwqDoS-+)?ofEk7GC%1Lgsm~}~ zIyjxI++dqdidwirj2R4QkVx5#EJb>!Z#d$RcvSSY`x}ngzWEv?C8bu+*FNA}y;Ktx;;aA&b*LSm^5jZbSLgSw0Y!0GKe#W+iDuC%b|A}63iW+>7jmp0+0g3O z@kOlti{+$(A7&{~lOi?&dPsu)ts>>RHFqc_txN=;T=du?t_?tZ$$GxYi?E;4y`A)( z*q(FB$1Ek>wb|qnPB?IxAp+pP8R;j$U9S(BU%|v4MM`C9J>t9m| zh@4*_`0uBWf{<-`TFD;vqa@_~hXK4u-TlZOQE~YF@aytCZ1s@NbQMus{b1_%?-Z!8 z6N*vE4mK49D=qQlqg8O}5#;$3%Jj&;rr^WHy4rn7g6;gOyzBP&2j}J^3n{p-VS9Uj zezf(j*y$$POHel}L~LCgtM%Sado%*8mxw% zP8W&2VtGzkV5vL0&eRRRz1zC0)@OAvZm62ET!NgxquGMBpEzassNH&}t9=sPA*H`k zzpK{LTjACzU(y+b{Q><*a=%vi2U?zc&I7hSC&;6a#+Sn5ujhF;Q2qXkO>b|hbIlnx zcFI)9Qw8RJN~MBL%%@2tNbr@ z*G6yU=+l6Auo`Mv8;*~0PJCX~_Cuz( zp~1Y9Ts2eE+NzFHD^IsKIsf&Uci7Qeue`Ff{Oj*Cvg*(jFUvdqu2eQKr=*IHwRUsJ z$SuI3?hAfj;+I_7Gxl1-oUGg6KxVnyRIB;8^ioI7AuO_{lxf;_T<+P2+DYVC4G)i* zn^P!h__Zv1>3V;O&A6$!$>>hK{m87T{hOy>pa;EQ5E)PXd@|6U$G61>!%I_ap(?AT zSKigP+~#Em7p_?itya~zR%BW22Zz*f_XWtl`Iud4RaP8wnaKZ&l{k3~Zud~9yXmWL zw~uNp8foF2d*_5F_t7(*++RN_a$ls6!?m^Pn>;HFM4a@7F1O)5cqT zr*v-ap}ltvPig(4NO^I2S^>&av2GMB7RbF0G8#p+))bKL8)D{*O` zWtybTTms!g2OP|khP-+axqmCK!Pi1Ptx_wp;N!FN)vse(Bbcw0Qp(>dt}f@oyefE> z|MJ#iR_No_;+H*RW-DlftF5Zu^#WVft-OGVvR4VnPZ*a4-GP6c2Ays9x~Xu=V1K$mv7hdgX5BeA=T6G zhHFYqEdw6!T85yiv7WbIgi>aWhtJJ9o1C@|_wGxEr#}nN-;kyG`OqkAF$ZbDsckiY z7^vLub?oMvw0>#T^ZGUC#`7|_FQ4KnKgLM|_$f|0Cay9jP8uCoiH?(wjjLo4k&cV2 zjEj?wkE@K2lTL`MOo)?CjH^tHlTM1OOp23Ej;l zcv$q}qKOx6!HY{KUUUU7=uNyBOuQIPyqHY9m`%J`OuR0ec(IyzT`}=uGx1_K@!~M? z;xzH%DtN(d;>Ba)#cSflXX16$#Os=g7r%*@fQi@jOM2hp^}3VqXnsovpDexZT)pnX zdx1>ul&sO(@G@<9zczeR8&0nS7uSIs>A>A};L$qpG97ro4t!GwPOl3W*M%GD!riG^ zqjlkBy6}Eo_@*wLUJov=2RG7#yX(QD_26ZC@cs(}Oq+UedVRRKKHMn0`Leq{JX#-K zMq`+O7i|y{hTRlyxG4-17H$w0hKUF_hzP?(g&RbLVYh@EZVAK0gd4FFi+@K%~g9$glgkg%p4T{1rCE*4oVVJUT!-@T5h z6jqg511&oiLdmAJhUU6%RTa`l$i(g_Wd8!+Lhqx)eU9``*4DAK<*tvu#DBDCu5|Ak zLts+y_QUa{t%)1VxjWTvd`noA^#R=uR^)$6rbFHqK zy+h^8dEpJ79HHn(m%}WymDs2&+k#BQ$+{jFuk`Y*%h6Tu2cM85&LX?FHdXImr5rDzG_+e7y9ifZ1es&j>m z8WsjO5)O4B?sFanaH3Du^5k?MS+6ylpV8lwj~^o3m)SU22XX@N3>lC(7E2oJ>m0R$ zPA}|amO&S{X#AUIc~z{{U97%QXS?HSe!WoRk0B*8`S#fT_GO*XWl)v)?pNX5a(!KM z5s7#iHnUZ5P)YW4dIvl2a%vAwVfy=@;H{b-`X;0)D<>>1I#L!=A4dASZAYKr?Pjk< zv{svDq$$g=Y&=*Wry^28*k>rck{=_5z4R$8F3FwYU&0_)25C%_~%={o_x*W;jJ=Zmd~^1vmTS2}ns8$jX?fWge)7vJ=?}Cu@opOBak3^FDRGSbyESw0 z>I1e98eW3L-Q9=QuqL-5=wC{St3Bq;)6a1(k~xplrXPD)VeVg=*tmqnRq$S{@{6(U zOZhcD7UVInfUB$O#$a6&^r4?y$}20#Y$x@U?YjjM^8Ft$$X=oA(k+&;(5lk~iAgBS z@x?HG8&jLD8Uh?xL#-tI(!w)aXd`DHBG9F)v9(dWAnpiqr~L6<8^=CcC-tU}M$oOX zPVq@YYZN{-(d)Z`SKj)E9b>DCYe)yb|3VACxS+l1LK1%Jr2>ni_qwQ$UlHWLC$tuj zX9|PJ1q3ugazg$lnf*Yr`E-FT@1t}Gd20t+9=3BjY}FNuB${SYd=xlk78%Dd6mZw{ zSM_vqoqxcET-2xNch%~VXk(^(X>h@+%+^kv;IH&0>&KghPrW;l0#lzUKB+!YdXn|y z<(l3~!!Nagool(vT5ue#NCua%irT<3PKwT0K-pQ}_BaDkBzzF}b9;>+LY{p%^ajw1f`aBeE87hrIfc7iGwcv$upjWZdUYq`T2rpc{2Px zalw+>#;naC?;nloq9vy~UGF32v7{Kj90q>t5`zZz^Y7bDGA{5M(wV3(6La!+`HnF7 z@*S+@i$t>~(lWyk(#N}Q3F?v~sdHwz2R-dbUN_CZ*VEt*p zLj#Vb6FvodW$Ty$4;?s`NBHzTAkYIIa5z>>`1CsW@U*k?G4hNS?Ks%*J`- z%H=_B>!Qz8zd?U)dOn67FFiCJcCgk?av;+mR2pw zO_a~h09XjxDT|f}31@JaDJz*MYZftnk(m?r@{O1aCv6Hx=ni@W&>*)Sn@)T>9lw|) zai~_WyPKIgfxiu|?ij&+#w|fxF>1|NMRFhw{Z{ zh5q8rcZXx6T^(d`KYu@Z%!6O?PE`8SGCVlwBP-IuQ)rcsnT- z_1u3NJj3zW(1m+itvYbxZL2r|S=BlaW;b~o_U5AE@7CI1dRrTJ6K3mlkNb}m656ak zP)t=Xp%HqPZ&r?wNbLKnit^c9*|heszm;7nz5F41!jYf(WA%hn^@MZuVjdjw(LbVx zkA}|bK4zD+e#{ZgFHI-t8{@`atNp!uw?^}BGBxX{E}TOTZg~M*{-y^X)q`{B!!7mU z$@=h7eK-dkZV87cgBL!);T)0Sbw8|3dTa`Q*qQV^D)`~>vs3$qqquk_hqq_U@1)4c zlJ7P)kL8x;_?x8I*f^pa>VJ3vk-Rez9w0(E6X^mXs!KEc#H?@UCE3`--!|00Ap6-F z%JoQGocp}D=j=ty!X?ZCJ!XLsv%oAFFnbxZa0Ro#j#=QuEO28McrgoCF$?^dh3ofb zZ(tT~VirU&3%4)};+O?V%z`v#K?buRhgnd-EGS|Ylramcn1ws{W;HMiTK8skFbjH^ z1vqBmE@r_9vv3cyV1ijN!z?`fOfzeNS+K$^*kBgy*xlZR#GoT%(6Q>FsWIrB7<6$A zx+(_U6oYP$LHEX>2V>CVG3fai^jZvhCk9Q7L6f7=v}iPoMkp5=Euax9ibmf?qm|HT zO*Gm7jW$K2t;v|*Iik@nXtXyP?Z+P#8iGbgqS3J$p{Zzejz(xP8eN4(H=)t(Xml?c zJ%~n+qtWwd^cotygH8$P5#SwBWShgmpQS%Z672vIP zk)4}OG~(y2SeJ~J_X;ZskK$~)NLcoMV(9vEL94@2r@V{QCHFzwbw!Sqz{YCL&$~R# zKuFwIselNJk zxG@@=2X+#kcG##Z5AJZ!SsET(&9Fs?&cXgl{0Di{c{Acf>uCwfZWrc`^u59%N^l358w#9GuaCg3fiFUuM=YzK=#Om6crma8Ecx`>;R3LRVtJnGFAM1_vj?`1m8 z4T~n+9tI27I|%V&s5-gzD1`OY&5m)E8xGKTCI$bLGhZZPSy;ZKiP@Tes-=ZyAG@se zpko^6som^jtKLDRTGVMqR-PvW?PIT!{(BV2jek@+h+>Jm-VqkzuHipos_B1yUY{G^ z^=+IHLU@zUIlgZ|ecT{&5(sP76%flb{T=^pA`E35by3IvIQ1F|_mZG-GKjj@;Y7~L zjkl?naOlguGPl$EgvtH~fo3k!NL68~Gt8OEzW*U7VNqb1(Y_&*iSXA@ZUm~)tYfmz zd`(G+TBK&0UT>l%lu9HBv`&K8rz!;mekE)^Jms#KlBOrLoG_vA=DcBt>M8dXrrMi- zqxkGL-yi;tz1pFMcSRi%gc&-*cet6LN)^T~Kk4gAbNSvAjB0C#7YOAc>r6sNPMrkw zxLSBuN!d2X#)<>y5^X~I7j#Jj?q``d8?PqqDDms`xlAJ4Pkc2%RwP|K_GAN5UUH);O z+cVmml4;pLiQcnJj2&ySDKN}5Zj61afXk~^u$v+&F)pXRrt>7#?l7_aYhv?M&)iV3 z%NmMgViSikc*YbtGO`)UU-GP&i~IsPn71>r8SlX6=9<<7#64kTp52s-Oo?MO8IHMP zGq8tGc@fd^58Av-CO&bada2(oTZCTFRV#~OA-^cz+(oK*P^a6pMexvO~|IM5H<;@+{0qb8O&Q7U)&>jHHxD~XK5 z4Ai&^s#Bk9`#Y1tBWk;3(ZgS45Yd!}pgreO2&23>2qg+1vTX54af_Q)8MtCt9=vSv z>*5x-OTxJf$G)zhWs67!Kyie+7+Q+bBBE>D)6RQx)hN%^K7h|4Hpsqu-|Ysmw6Wr^ z(Fc-cIPz=*m!^#nMet`@vJ zGDDn!$vu9c)S<}`vOMyoyv^J75Jb)On?botCmKlIcl+wSzGDpdFJ%FL@S<|p_bcac zQU4Q-pJ3FhA}IMYUQsJ|Ej=c~RlwAM^Apy|+W>Vt_+>veG%F9C%Ov}e0?Y$+)aGu06pwj6&B6``4@vVZr6A;G(M<`c=mJw}f zGcdaXkTh(*6dCi{t~>nVUYC`-B2~}f2oA+xy-#96%y$0Dt4z%upd2JdK_~`MT> zoT)SBWs$%)t2&_1MHmr1JjIYPKjrvdpsWh40|CCkk4apc_e32BXj?1dFT>Mk%+sX+ zTlVh2mJcP)6Y83Q@qPkJDY?uRx5w@UG4EZV;`DCXjqeh9jUNfD2if+9glH+{#|#^*m4l zj2d9X@8n#@{BKiW#mzUm zd13DU6EOcfjX-=1R9{ydup%XY3njJ!hPP7wB7qIW>^}kD;)c8!5YZnYX5}uSTdqL} z2Zk-~%w0-Ew6Qx`#=L1GaKpU6fzj9v%=6$jpoXJoV5GgmNoFZaOktR ze|72}$ZYu~3LL=@gzY~lu*E&?&~%BAaF!JiSh?!TsBi=~UC>5+SBCN_0g~xbB$Q78 zO~!oFc<}OJ1}}&z;1%dM-a$ub4F+bgB8X_bF>sOZENlifl{@}hATNd1fhx`XHt&-y z|1EB{%JV8NCtX=Hu)vohzC(uirWndX_ss!#DXPyBc z&^?U~NGXA#83Tt9YFIG)65JryA{{~70P_|%mYX(X-dqRR{<{Q}mU1DY?R~)baM6|V zw}i-P8(~s+237{@;jjfn9*6`#zG@c0GD9DT?wKMY8Y%(S z=Sg5m`3EA$R)cI43hm_&v;wt~`2<+Erv)CD4})|U7Ur5;z_)qpv_IHNixH#1aU8jC zrTtdmR(7gRX2`gmw?Yp9&qV-SC;-3%@bm=`b6dNso+c%V=uax-2VjL402nO*NB0=# zZh^>k27P8=jzb{;)Ruqs{=VJ|0CxZYX|ZkI-)+PIn5dY6r8Mhxk2jM^L2XYTR!59=P19ys_06WF$z+&-qWVn+Y zGLS*@7#=%x{N%JUu;nAT3P~ERvg!YZ>;G#QQAtsmI$^A>-gvsf1%cD_`Y-B!9Mqg!w=Rd) z-w9s3s1%6mEQIEdpc6rNrjph=$L7;Ltox=ocsCOibYIGLYB-h%E_>>0CZ8rskv2)w zWky(gsBiT5Z(peE4>x4r8eMGM^O~xuf#+7T?xO4yj&(OF?Iz30*SDOzuRQbU-ZUSN zYd;DXbgD>Auo=Z!cyTdUc&UxJR$%LYqL+W%HJIA4x7c=@c-(_U6c&E1=Ew`qw7wg9 zOfSQ_$@xTNpfRNIS;!sJilV7J+Z}~m<%QL*QqEA28^p8lacMaW*#P2MIWt6RQ4Y1CX zi9nu5+dt%|<_M-=3I_%=5_(db?>;b+uQN-Z?l*DMd}6SdB`YSIw^WCCE%LMwR*x=x zm@adrJo@NvO{9Gd!teBXQS6dmq;ZA!jZ7u;yRF|#Z%p&;C#~A1ITm4G#uT=Z#I9ha zGE-03aFsG2P?D7Xsy_w_M)eHl%dREd*t8W3qVvCH=uNZ^?uoCZPG{P?pZ0;)?^dQ^azrs6T0^N& zGwbIoae+00)o(kQecjN4>%%t<4l||_`qe0{8gIEiS!pb8DjeT>o`5gIBdDHw zHf9G|rd2+BbR}Qxj1>u3$7if>XDq2R)`#y;JQZUHgy~rGWzXm+AmWeAcGMnWTO3bC4zGJU~Rw5OU5$ZTM$TOAMtMh6*r!&zKhgl5&Pz z1W4ly;>nqcGsJl$SI^~4Lw@jDYZ zLy$6(w6u4~tG=8Ei0BzY4iJMggc2Ywcjha z27u7E-4t@Ap!@j^{_E?l^)ti-Ag+`EvHDeiYn?Ku;0ybimY|Ep>Od)1!541OZCt(b z;+i_p!FQ$ujN_MU01?$7-|2GJt#xP*YBlZ0arjKWzO}sk?~XMzy_9WNh(n6gmrp{& zqivIoB;DuR)4gj*I^)rPNm*0(C#%0rkE>%1#O!glZpp1v4#`po^b_Ovn+d1pYpL}9 zvE$qAt`dW7p5l4$a1p8W+KbXW`v=t)4Fv4BDCDXBNZ2v+;e#YsrK<2UYshUp@|F!} zhE4yGn^9oVJrlA!+RUF!_&=FQe=^bdWMcBk#9>B6HHI4#cNG(N9TRsG6L$*}Ct3f} z_IgyjL+NFg+z}*8CrRzLt~FDf&+ajuSEhiww$mE5IFzCm$R`9TS>d54i^#Kk2%t>v zxszw8ovLl@*}VuJXI!WTHg?Pe5iP-e1*cuaoPW@ZiXo`eRZ+{7=OnvkP*Xcb={A+@_?n?kiheJ+$8wjSXo|Lz$R zu{4Q+K9CT#kCWzq=Aux*mjmqU7g}{Q1>!2-g8g)wKY~kB52t^3pB|jxmmF*%r3XjQ z;vP0K&($gY`9ACCKk`>)EvILTu@1pA@1iak|gF-r?pV^ z=ffFJS-)@2A6U;sZJ7M+vF`OwR2oUsXs`uq-l)iV>7*v`z?MQ zS}#v#_A-Yzq4lNJXDyKF^JBlVUuSGCw5bN_2^7-?V~fV`54uO{w4+NV}8Cpf+$ z_k%suML3jP$*}baRCuE;| zl>hS>;@+`SG>x*u;mpF{{5ORDz`9eCbRdPS79u5CUkasPm!(rXu*{Y02uaz~RaP)o z>Cl{D?a+L4^QFb6m5IcmMwl9jLeat6^52hn=d7Hki-ToBR{jaCTa>Lj74RpBIXS`Vh52Si0vu%4%4zIjg!AU zp1%tH8rAayIOtA&|I_@p%d7s2qn#(~b}HhI)2oB_z^6*RSKh;-MEEmOH#p|rL|Zc9 z{BBsYUrFNCQxOYr&kDSJ+()(I9ceVxg$q!6a6B=$r^au$UAWtjYdUn*ZfNy(hi8kB zfSsN7@kGRxfX||8F{*V%CNBS;bFFy0M0}rH9r?QFgUzUOvd z#8&bk_!E) z^XkQaqeg34%T&!kNL2P*UKYQ*8nY7i`ENSZ%TU zWn-x`zwE{HJV@bSQmcR|GjQ~MJz{en6=|#bo)ylUX5~8J-Y10Hhh>Tq zM;~=8J`D`5_a_>mM3ad8WfsNa4%Fa{uw9kc?g<$i7kKON6&TEBn|LI=Z+ub^o#{QE z$W%ex`-W1vGO1&;;ul@NeR%1Y2(5&A@jf^W_PpIky;%PLNIL6)CZ9fl(@3L8D5a!; zlysw{NT-0bh_uo@5D7sVq)WO(T3{gE3ew%(jvRa5h#8cYWF!w=Yqb%qccg&3=7u-*MKWKtdXjw5M);CSetX{g8TAC z+JqzC^XrV|c?TA3bLUq(&q3~mC*Bjx3H(?V zTghVV27`N<0pnx`96^$vD8nL$+AqvQ8F);+3ve5S6cF?appfnalK2#$des@Iz90iL zmI2aAZ2&l(g%(Kbi+ixbW(#m%2|%ENu7Col5Pp=4l@cDYE7+Ca! z2-FH5fm$P|IRm@d3<2!3lPSV+R1h$2>=bZUP|lBd{taN2b^M(TBP*xCN?oubvsbwfR`$<;lpAg4PBCz4V7Cdj->fUJ#%%;_ zO^MxV88|)A?`;KSw%(3}0as`XT`7)905+k03f2QJ0|3n@jG)HVth=KEqWFhLZU?|> zF)#3uJ<=Bp>45_)$#O&)BrgF`NDl+y8e;?zR0hIs?iln*wE61_kG=){Xid>a6dqB8=gDRZ;&kzX^g@?OZ=3*-O|V6FGS zR#3wQ-txhKC$%?5?=`_Cz$*v9a&W|sXsj&Tozvz~#8te&Vd^M=Acug*YCnLAu>)!n z6(Mje#$dB#DcOK`%-#~Pug?L>V0$Ruvp;9UFiZnriatJ=*4HIt1MVcO24eJfz-7t^ ziUaV|DL|7%V1Vbm8RKRgZUM6PuMg$ZtN;?;yv2Gxz+Z7VfMJyHB?t^E0dl200jTHK zHo$?39|1mf0$?;e50ExlbE%mU1@Ljz6W zfT^62A2OB$V!ld#>c80_b4jg91|VFY1N<-8{|88LEPs|_0Ojwn%N#}zSm^C$z*JIA@B%2Bx?qV4m-g#(zK9t!l;f_oIT z65zLqH_%dG1-9CQbuj7{m1ib^(`a?G^x5MKc%AtJ5ZujMzdgY#oDXW{>u>B9P*{HQ z_v3{SI03CszzKo7iUoUo1ZSSRua@{&6I@W3q<}bn6j{m501$kgM5AY!1Kgin^k;&( z`nsKM90z0RlOB4u&4?0Q-Xl8}0`=KE=*hO)UoY2^9eKoXlY606^!9 zDZrxogVs=wS#Sy#Tft?8``#^6r2sVn5`;~wA=qIO;1nRQ0g7LwM$cHirU_aw47b4roRk(g2rwrZ+K^03Frb|D7N`OZ2H@;xE{dWb2RVc0kKj_#WCk4m zLE{`A)cDiE=aP(R0z{WDsKB9qX0-rK*K$w5{fZbxt3< zO$X~H*hOn&fEzAT0-S1J7?_j^IAMe{z@e!KfGAViK{CTuxJotV3OdGMH~^;>5@qns z)Zvai;G+OMk#8JhI#6{PDmX>aA%KYF2@bkwZK#tBnfXr^Z z0YGwNXNLCULCh^m)w)HgRv@XkC9@T`P*#c+0MWJw;#N;Bt@HPYLtG}JziX(K>W?BQy6PyMHP(u&cnK-+b@UW(eKJf5V& zl})SLFkUOitPR3pQ@K!TUn{Sjw*U>^YJVRR5W#p@SDuZfd?AvhG0vd0nnZ@KazdTZ zmcNLNCD~Gn)u>8vr)jWe1ACI0fAOP+ZOlKN7XyuAwYpZ+mp|nBR<>?3_(ziJ!M}!FQLVmeD0f z%C*h+#c(?vGP3@u+L8XY^5R|}BO}MSI&;kaXK01LwOAMS%yjeyZFpf<&*|h_`{;P` z(dqp0iG!hlZ8#kK58$$P43Th4MqltZ$rQWy)^U%h4Ge6nv-5DwMT36-W>971uL;${ zBjQ|fc_9-noEL4jQgd6VeJeA-+36An=FyXm6|qnotdNeeu~HkHt$MiZfY!Xw?fdaW zoD`id?ES&to^-k>flRv7#Rg;tQ&Q>Qfet)#QJp_PhhCwu<)2;#Rl`DI*W28rP&nwe z-QrgFgInG5x4LheY;T*WLDRq6rmov2=i4UQ!$jfv?3H*0#&F^N?3KT_`As$hHgI7S zjfj(h{I-A&3e2|!3{Vg_dKdmrS|(iaE||~%92Cew!R5B_02FEh!u~#I3|FKtRD8@| z9m@YHOdI&E$O^jRZwnlt@b?pA_`llBlrKMlc(DLL|zsD z{GmN@7VlVc@X{`Sul7{?JfkpokGi}hBwRw9ghQ>CkvH)ly##DDQgA+xo}*b#?4U_q z$}A}_eNaSGs)%S*m6=mrttaHY)o`Rb>}rE=9ei1Gr|~W=xqVZnmt=FKQ#MQv@!gSa zU30c{XZ;;{4Tt19qU3;O-=Od572?f#Ew%OGwvZo6$NO8VO8#DBvapdbuojcdB6R5lzUsir|0sFBsme=vU7UFK>Ra(n+&|75)-OYd zg303dBdXdHuz4-%R|bhDYQ~RnYF@q+rFJu3ZQ-9UWMLERudqo$Q+c`Tsj9MBsq#`t zG%BT?U$3w7ZL>oOiOw?{nZL@cR<>@(*)|)9z}lyagrh22qSpEfpSPyHCdaEouD3)C z;{P~ZeVcIB>Cau~O4q38HZbUmL)B#@T4|pw)a#t4!^_sBTwZZjBC<3;T$0#i>klOS zk|6Sm2{2)oOEvx-?rNOxyV4}XZG1>jCOdrT)%Q%ICidm*_-LIzl5*Hz?$t8eRdY2aUq}?`$?oU-vk+w)#!h5-U4%|% zA1|-NQ5<1W{zT=(N(VFbKm#LFQa$huCEN4FA6r8N2cs#64F&az28Fe0-m;FaZn+a) zM>Z`(8|4PGk#Yar#%=_<91|XjmxhnFB4ul33NuIlaOn~q7ZrYJvzpQ1?HH30?N5WWoC^x{RanpW|)`agv?-f?E-p z!`on}z=WT)n$kUte0KXdsn3uw{B=!U@&&d3^4;}Np@y|(-F`RXm8m*;qw)jxJ(7~2 z_ykPM)J)7AOw3|T%&(Z34Vjp2nV7wqn8TTvQ$OREFfsr9j6cA{Jj=wq%fx)m#7w}< zOwG*9!OSei%>0U(*^rspmYLbR%hxTaD>im0Hg-HVcK$`sdTi|eiy%a7>`iPec3dn` zTr7E9ENxsYb6hNET&zG`tVCR_TwJV5T&#{o?HjX6b+hT*x3lVI^SN&qU(@Qn7S^5; zX44m8n-*cy7iF6kWz!d9n-*i!7uTK=XVaHpo0eeHmt>okWYd>oo0eiDLeJ$Vj2=~& zR_Z`!Bf`t&e;Pgd8&ojai0QDd+(e0yZMvT>e4OBcs$oq)jN2%fAPDK!t=Y_)%J~ld+u4-B{bCG`&C3pTA^N zeQUdC{{F=$>|RfDB8>*=$>+l*?+Y37mhstxpZb?_ZR+s5#Ec%vDalZ=5n1H&$H$C* zkXMp<$VOz_fF?_5f15boCLeDT=Y}I0Lf6~G{WkHuO}y24|1D#X@*-~&tQC+D-X`~N z6PnwE={9+KoABQz;?4g>t`ux+!gBUKA!#zn**ylA|}MTYd(zy zK#Q@{!qZ?o5fca2%^~xxZ>Q@iw+$aeOt|vG_03@1vEil++?iy=8p5K8Te!$zFVyD(O8pEmBqPLPo94YQR-W(uVYu3;DoW@PB z11zovy3hGVOol6fmv@1emaX^R^ir@TFa@uwGjI~PT}0~wpG9JkDw-W+a12dW1ZCXlTWv}@6bI^S@n%8-Po zd3L+Q3;9Cc365Ot)QEbU)R+xt^<2F*mJ9)-}aSv+aKrSJer|!3`*#f9qp2$$Fm!_4Llltc};T^o{15k~~%V zkUKNQ)`PpFEJeGgMgkS^??u$3Q&fU`Gx-k9E?t$@)B3)rkrKh27ORMhZ#{&DtHs5$ zI53B)g%6djIToMIth!XMxZp7x^ZGkG-u>f1J}vnp@e-`3x1%w>X^k%!rtB4T~WF(C@X-p0a) zGvjw{doPaD%Oh#T3&>_8ywzbRuP$gXF;!>0;zC>CjVpJd35Wjg4-)my;W5Le4acY0 zHya5A!G=9ogzu;)BJ%9gC3^TV);BkzNnyq4u0{ z10t)`7E$@)S`K1C?@5PTFR9lY(%(}LoZp6_g$6er_N!qF3-2HbRGtY5Z&X|%_U+G1 zRv0tuv0Q}(i0U6s{3xZ$8rh9`?q+VfQ10&D!hy5q=@pe0(kU&j z9HrW1f3VG~x9@qDB{Jbx`0%G^W23uy(u~zr;)a^qU#;u2DBg2JJ=Oiix@6C`wZR?g zp-Yn)!5O4f22>H1a|zY+5pj3x9H%RL@OysY9WVJpqu0{eu!HDtxWd^*&MQsVCD!`q zCRbCa4?ndq)nYC0?WIbjf0K@FTus6KD25tIA6BuVyZ#f^)@#^#0pnr67 zcuB!&iyr8zPY}w}Yaq=E`sGtBIkU=vZlj@U3Q`7O%rBr;0a(X$%2A7Ewp~qe)CPDyTJGjvCPF3H?~3x280S^i;$IsvK7eH(Q1q}W_-JV2WNIM74& z#1_N+BEHt2_%`Zb>HX`$5HeuOZ)31T#vm+(Wgy4|AT06fU?o}4)MUy^vH697SRx>n zof?RRgY~!so9=_k1Xd>xu*DlhC7Sus!&pnsrDPDu?%SYGfR17y{TK+P0z%vDfKXYm zay(O@C*rM&G~pu9`=1OjjK~r+Y*te&6%H7*F@d2MC7^c}3?&AB)!Z(!ATXd9EXz_h zFdzsR{XrQRa0K?uE|6yGUy6+c{)w$WdaejUNqOtIK5(2CIKGtwr20%hd8`gXSqEff z)dLyMU{n280*+_jI?iH?fx!s2P;@EYoiu48=|@2z2;f;&ZE`^GT2swuDzNKxhu>k@ zWKobZohC%fOkliiz2^l&^BAlYVVpn!6L_Cz0CxQbX3GGFOuvAQ1a{S<(*#nv|3(*s zy~78*d#k%>hl0R|qZFv8)}LAG;ZZX7I0jauzdj+`;HMs%*KhaKwz{IL2b0P!6oe|E zY_6k~Q>eFatJ1AD@TaUiv>M(HzqvYe;9AKn+l9m0P$PN^u!`u=Eh$I^+P;bk5C3_3 zxBzn(Vzq@7Y$jmF8E7gVlISUt@M_EMV3&<#nN!1ezOM7lj0wmr=6H3U>g7Q}x{&OB3^5-^h z$qb3Uza)KN_%LsdCnh*L{O+jgs?At23RPAzQTK3C7iln*6`vVaCCF)`SW?B$WnRSc zCgoc!3*iS(_jMs*16js;H!)Ysr18k_dnLu2R zdr!)G`86)oH(tBubKv)$OBM4KP1bh2v?MOjV~H)MacI^psuZy7S)HzF?PjE%~M+MsF3?jGSx4($WOW(Fa=%nzg!z@ZT9f%YvHPB5R%Gg2m1zPT+h zO-7Vy9|uT;%Ihw_rcm}J|8po6pX$SXrlSm(DE!$J5lk5_OpB`S@mjmcoYq9)gy2E@^?A!v zY>%LmfYy-Qx&*_c=R{l|pBI#z?w8Xs<(<(iE+5I4=Zxzd)Evub8aS^H(<^VJAz|SOw0R)f$E9*p>5g*2|InmC%TZrR&Xb4M~9-GNH$$pW1&bmK^~2T za@6o~IcL7`9~%#kI*{xQfJVsa=UZZ0@>j4>P&6iZcmVypM6&Y!z5~ny75&%uIA*OlDVOOVfW{*j2gX<)Qhf1#4bqb z3D}ATE_mu6=f(lko&LPXaKe2>Maqx&)eHGV|n=#K(3VLzHuX!T58j>H$kT=Yw zpq9=~bgIuu5)~1r?C@hPDLO+_+4wun?mA=F!I#)o_R07a-MU1DjHX8sDW^2_jup?9 zbjAFXQ{gfX5KDNqpC4b8bm$MSmS+_65pz?+LZRB zCB8}BPlQ9MnhDMVuWoIL7iW^s5FgQEAG;=VTqi59;5e^5w^TW`4 zt==A(ITgU+A9~cT_OydsS{7}YHGi4?5yR&rvR9TDU)t{>JGCG9tvtZ<`$RiEq7m>p zbL&fqf2jH|wTmS%Ea`|$;3GEaUDb;(x(yhicb*_+iRk<=x<7@nAv>QD`3%)24YlDN_?Z2+0|2O$g zFD%RP`%RSjb`3Sl7PT0z_1Jd|6vCb?L&kWkA#M-IsLAO0z)yleXs{aul74HpyAf4T zdISaABI%2tPHk~n}4e#x@UjW z=MRS=mtytApF8(5>$S$yQ$I#aW$I?Vw-7p^%(vp8WOa;zsaCRG%1{o#WuI#L?IW9f zid@5-8ZQ}8^(l<;96Fj89Em1lMiP9VxO>C?++^JCrz|J=L(a*%ZkJI%ygsz!)A(}S zWxYo*kwey-jr_WyA3VKGhke(1z2DJYtN3KGS4YobqR~dTF(oBmTdE<%{l}2Hjh*7E zZTW?E4_qh^W|vZ0lE9w<6L&}}KWjs8_I?dBN_VrrY+48qjbgc3^U{%UK@4E`BDk+O zh$1HwB+e%_9y(mAIMnP^k9sVd=_gKUo_EHpIK?Rn&Brm49IIds?`G^AV=oT+W)7Fg zu^;Lr6?ptqYIZ*}&g+^s^BMT3qf|2ZcAC{S|K#7p=ML%O&;N|9yy;-Toxbaq_%icX!4}VyJ#mKWq2H1xIVnxXgCVu>~r5TQ7c? zzIllHUgQ%aP2FB2#?b_ae5kj*j6Bc#=zZh$NOBWtX8lFN$u<^`XT zOWSy3T>1XxZ_`SLt}Sb!?PTPuX$9+&w7BfPWS)(gR=9bPf4CF$DOl-kTfNg--&s?k zJmp0pZAMc-P%`rL6~1d=_2fyQmi9csw)5rV{IaI(qL6|2|Rcsv=tos8ar zPb@G?w!4|_)_Z<#ZV%pl<>9QM!_3>u!uk%%RX%ymq^7i(!(^lyW1^&@Wjy@wv0<#$ zlZsN3gt(BedMqQ1bBMx>pcf_g5Da6?kvdOynZO}dtvu9zSvS#d`vgT87S50jNJuapdMTMpXD)+Ci3mZW0eU=hN=r-d~J*jP1 zIhG&YlK(w0U69>dr3+Wg9)BuRP414EeKq-B%`}5WL4*pB(gmbE)dccg9~g7U1xy-r zcGyO!FV$*#c$7`L92qczoQk$t_X$Y_`S-v{j5w{w7LFUKrcUhdj|_$^Jvh08TC#oH z*0h(FYC*d7UwZUkda+b{c64OGci*FopU!w!i?(@!kk0tkM7wRo=5lTK_EPPKG8%G4 z56(9n3PJabOX$lDnLpuGV3Q~Ykr|hi6ISRb1WaD31Wz`Vw%INlh_DwP88qld;&U`l zJbv?235&vbmjL9SDU3PDGs5q1HGc!P@9QkpPSrZmkehmxy}j+FGJe%oTOu31T-&rV zqr)(GWS|19=JQsOduYs|iCfWC{&>02lrKt7mfET9ee$9A-Bq0tS3H`#VI?%z z!#-0gJ4Rnf7^R4xp=7RwVdD@YiHC*7>sQA%ba8#PI0ad+{)3VHff9_rpQs+6%IDoI z$ee0%+MXh~-D(AeQSmE{@S@t~iGHP=@r2R{N>t(2z1i)|r0Q4J_ok(xwO+oae02D3 z=FV|#dGfVJi(a^zi*b-%*+Ti@R`U;7Wt}j`jzPX-U0pv?s*BY%8?h~55kJvap^xB; zP)dC*cPO@?cTC`^bGTShcW=pZ6!!{MTvLWDmFzwJX!Lt~+%{xC7ayzZQ5+v#8{H0G`JVOOneKF~zB3Gz{N+*({Uo*2D8rhcWCEfekV;4~}r}@S3fin~oR*Yx#w^|R>agV8v;x;e6X!w-|i1DqQ%w#VF8%#>1&@@%a? z>=$=kd@PF(BWI`cVYFv)mqf@z8*G$R9vYNdJ?YZQhh%l8yMS^qkd6hos6$PEi0EG% z#3#B09eOQ6;EAS?hQtv>Hih<`s10&=X@lrX+h7{F$VEGB6qJd9GM7e| zi8hD?NdBR_1a0CU!RpB`pbQKo^cNoqdm!llp6m!lXNUIZ$rALc5?!{$9}0ABgE+xp zChQCN6c}WTj)FU2qujOqp(@=a7$f!teYdfe5hkj>1_O%fz#)ymL{YZmkd|Sh=q)OW zHqfE^ZGUJJ_?aiV1UDiiGqxVxdBAr(&V%KNCGl z={w5YVmX2}(_JjSz(vxx8EesEqw2>op%EYv@vjZ`llTIrLxMCRqL${T0FR0Qh8rxw z$$=YON01RPxDOvG#L)&}14DB$keTM#C@l$=J(FX%C?%Z|WCTg%fKM{}Nqoti3dYkc z39Ypm<#dTDceS%PR;rUl*#cY~F&hNE%}o!V-j&JBpT$#0K`dsZ6pz?63z z65<-dMBb#|WL8&0tLZD72zM1*2W;qi7k`rEHzg;@=*QEU9ixTP3J#&7j8^27blwXW z|5|f;l041CriuP54yE<_>(?bG3h~7p=UYa#E_o?UeFis6X6dQ+0kz%1DorUIt-ntX zIe$wCa_X3HSce@P3%@k+Uiy&kJ;mdj?l$l0+7gJMQ~93YrIWh7o4A`V4F12R2O@Fe zI3E&pk8Wz;A!J}OumbPgdty6Z#Y!IKekbH#c>NMJHUF`sQ7i945ASqrGolTN%My4w z>wt4AR4@7r4ksQ|+n})Ull|oq8E|$k#gG9RxiLT2d#b73S-i8eQyiJ33%6Ko| z`R-tpV$$$S9>|O~iGELdlQjtE-$$czux*q?yv#_jRt3o1V1;QES`JIFWi;SA4CgNf<1cAs;p z)O0sUb9?4KK>BA%l7sq%6{P&RX2;`T9G)Xbb2ZyoF)*y=rxu^rPgoSE0 z_%&p|7b!A3ZK8g&U{}s9)Hk5+XIEMK!bQliOLC|*p-J4bWG+9f_GDJZUgz@1L@@^m z^_$xH37Z`*Lqwp|&b%Fmy@24}=;WK@+5nhi9;(VZlgfr!Wm1anJTm*Nt;*}1=Z|`U zk(95|VJl7)3*Lm6&J7V_08u3>(U!A~&OUdbaqP$`B`3FveV@(-qxZack5ypmv`4@1 zV`y@d>-LE2?h6vry%%9>iRv?WcUoLuWqCA*=eK=b+2k}O{magC$LNI_DTaL{PBsT0 zj>}2=lW4r&uNcy?s#ry;s-OfxsvskfR}k^wb0B!|GpS7P=kJ=`U*K(f@^!r9Nhl6gkRRx81vwVS*H3VC zd32m^>-%Z)?bi2E#5o^kiEY;5jbfEI&k}Zy9n70H>DyXJ=I}7w3kv#UZ``&-Cc30sPq^XXEgR{e+ibe+>b(^_=T{D zkQ=zBZt%SM%H@djuwMnA=NtX(r0(OUs1|d2m|-55-&w!?&J;UbV#YWDZdcNDqL5SK z)~l-`@xH?ED7Jm5?vJrW$8(t>a(#JJOGI!_{>{j1j%}Ha)sLqUwYaK+4I#|byEWm0 ze-RX4=~RrX#!F_#U&H6rQ{$j?B$Rk5)g9{e;xf~x;+LOhx&*v@S~){#Pen=AQB%e% z?%rsE_@S57G~DC)@5W?PDD=h@GX2STO-*D9q3c#kCHs86xu$MI-F!8Lq%o(Y+U}fz z-xI$+uL``mu{VWDiA}+Y#nF;;O`(A!w71otZb+uvP`LZ1&?}ruNM39T%0l1Ew8Xl0 z3D03`Q2v$%Nd$yB`(^|R2pC34Sw=va zPsNanvUKNCw0nd8pzin3*0ciWutTKfn}9m2LFg)AyQNf{>Ot^RIi& z)vTyJQWN_6rf1?Zklah=m!)B1JncmJ-4MHZ<$==nGI*scNyfW<)TyUT=!n8q^q_Ar zPrVSr%yxpxYc1mMc^Z6jVz-TY;==&D;h`(H0VB>x&4bNY4|k2>0ZonCDwP6!e9FBH(HX_|UG_KolhWt-*UcdawN z12+%PXzCXo+zhq%*i$a)>a6i#&&od{L{2m2! zkQ-k9A}R&_=o9)y882bmsZ1Kq(mR~2FT^-)?*q|~e*8kam@!40E8{_{wdX-gEagFa zj6>xgs7rHDM2Xh3vx-22cYAaM$q9rnnlCpF(kqK(1n9p*^iI;p%Vv_XAJn|5Qv_H z2BwqYEQwH|f6wAU+c(7ujs1nj4|FT@wxMBx*aTCXqV4ROq5ZDOVJE=!&!^Qfe1{gg z@(XRs6e|og?hMF(Z{tDR2G)NuL%YvGh?e4}o$%lI9YRc=c^ph>ou3)7w z=|pIMtiMe(@V^~No$E)N%bFGXi1rU&^Ji@FA4xqLGt$wA_whUBx!jO;=nBnumU;rZ z78!y0f_J)B@Wx&Bdvn~;4eD5EQjj=TmAd@mtK>Tp%V+$t-lH*-Lv%K3Tp1ozH6JX$bNLc78{2wY)WRCtEE}Q|?A*YR-uAOiPce;o9GN5%UalJ=t z`c=vjG$h&_zIcRe__i`od(6Bup^)5}g~cHV%jla(+}O8{x>j<2}LL zTpoH)BTC{Z@317^{GhR23VE~@Zj(x?)Zb5>|eE`X0smqif3=OL&&R>(leN7 z(*;$vwQyhdtXzfn#EHFR{e{t$_BbMsk8=0D6&@vJ`T5_lZ{$UDb1#N0cX$OFT)$6A zG^GB%*<3dW9Z?rxI_2aXEDL{F$6~x2bDlfnjN-4FL=Pl&c))ySJSiJWMZ@)QQ=9b4 z#g6kV{e#lh=4H|pOgi{Cv@>G~Gqc0dj$0XG>PiUfCc&g(pj$te`@t2d8kR;dA4T$9Y+b2*XPON;-0~ zz>Pa!rVTq^(#3s#xgFk>J1|1Po!83Mnl`Rc$LVM1n`gIk0l)4ZWPw?0ej`K?heO|GADoAZbB*M#mzlIsNI9+|pNJ907w zJu)BOYaTH>OYu9IQk3)^#EenP%E2uz{uAT(a9dF-#w9mX$&E_j^6hAhpXhY<-!(EN zg)25o#>?E5=$+bo#6@%!%h5ZHpzuwOMl+oWR6a6**4qLK6b^0+(sx6&g}!~b<4Q{u zqVC*fJQ&yVF4Jk_4uOD2l?D5h=v~UEv1$@iULsttnKJz<5rz$>F@qSDuFW`gewx^Z zYGmybIOui~N?3+MciSeG8K#;(q18DI8IMh4)cFyD$@(4g&z##PAo8{efsgIxjB`9{ zR`;g78u9Cf|F%sy7MbqWKAaLY#x@)oZ<}b*sYF;YOnJql)nzb#D(k?mL@>RnRD4H2 zb>(@RKb~r01cv;$Yd5cmWe8*SFPr@Vj~Z63tS!K)8-81{t&DA0q0>H5j8m6U@~NzT z<&)eq@Ae6WkL?V}U8cL7x5mG#M3~>TyJ<$N^E1LSbidy=Q3b4yL8~JMzNCHlX{*8h z?WbB=+C+vQY2SS-4O~5&rqv1A{=T=Pe5$QOo!moxYdz@m>C{K`yeNVwI!Znk!R5n2 z=w{0dJ~z-(i>n9wT0zE29f(TL|dOR<4xm3rYLB$qCK@eAe-4RGOAqJFvWWv=5_``UH= z>5z`M%!iXD73<3TBOjwE$huJvSenT(jYsV2&7WOacXd6c6?sFmQn9=eEk8Gu6yX>4 z@ni2tV#*Mj(GMSc-9W)7dwGRar-`~dC)4d)eVm&3z-(Uuo##*XM~=7_zaRJ`yb!W$ z`z5@w)BepjMD2ZFRo{W{`F%5&OuX)ha8PwXDjb`h1p}2k3h&7 z;meBBqs(oGiwdUmpO1UO_j4Bna2+IBMs12BRK~fK7uk){nzfN4W?$l^T#7GVbyANG zIk!ogpJz(3>e$z8thG{KMu~fB7jkd-X)YrA7HQ)|PDFek-B*+x%VUf7tNZ9Xr;9h) z!$}Sb_dr4Awm?1F!W`(!6WfT^PhBR$cDVS`lek7911Z;S=-J-q0X;z8Yy?5=h z$An0a<~}>Mdy1g{AiZa@#2|-LO3A1u$l$tT$Z3OHrnXzNnMlLxYMmh{Y$;Zxz-=v~ zOdli4|C;Pqz0x18xK1<4SGmhofwFP4)SAkQth_9Ta%>YNRa8FT*@8i{Y@Sn(3g;Xb%M2>fGF!+dpz~g{$RrjEH5R@Ee}3i$>0$MHbknU(SiW~PX93_OK z2icAm>zAaT*1bI?K)R=a$zyG>S&^f~R}U`^8HtfXuQ4HeAnzr^h2s}26gAT)Xd$`{ zmfjZdR`nC~Fc7nE?(Lky2J1yd0v1Z_@h9k)@R3Oj4$2nqlJ)f4F@5sSCAd8hC60?S zEDnIqJ}$wFmZYB*w8LHj5e|XS0vc|t2r{(WoalgqZ>M@=}q>BQ%sf?ZO6$EcFmEr%;p+9AW%D~~yp zJGv`)h^gTQHq!myXaxNl%~<=y9}%UizL&+c-l{Q=c>k>Scc>)m_q#;7*<*we@9ip^ zCPIw|v+qiTx8!R@MY>%5iBWwnI%>W3l_&k~Mj2_{%Fvg8l1wR?SHfgNhDRnNym;p~ zq|ssr(D~_4znjd=1y9s@#~M7W3%)h!-8Evn7zrKp_(jEyxc;S*sY%`sOG+-ia@_1%UIvz$#-kDOC2E{^qq+4DE)Ziw2Z1%yZxi+z`u4t~Sc z`py>wx0fPOq!GEqxls(b8hdV6vlIH)WBHv%;_EnX^ZplAef|V14+DmK9*oVsPB zBI!`>C-BTnDB>E{8L^i_`GSJz0F0m~>YNs_@?Oc>>>RBHwhWE>#m*jR4n9v|(|Kw-2KwKRxoO7~D4I?ir}TJEuQMyP!akBVxnIE3skxZBhY-8(L$P^u^n z4rKj~F0%eD_$@E%^q$U&d9TF}#A~Py=b%Am9l4a!IoAWdYQGxc^Xn?Ekh#OjRiQ&j zD7NcU>DkQT1EO&$Z#Q)TglpARyIgT9r*3K0W4W@%6@dttH=(-T zrUc^>gN7wzJM+A6d$ZNX07|jMY0j=9FX2FA-)B|N~ zeixk@{46mk+;=}16Qxnr5z9{}6SI&1Yn4CSHB3*aMTA$<#l;pERDSnIr$hdG*1(^0 z8~+AlYca?VPyf7(y0cb+;)_<3^N-ZNGZm92OL(yHUX`8nX&5tuhUll3!Q_6eiPi1#jP?!MD!ovwFZM~}QtQ0V80b_z{4GdLRX?uvNch{Ei1gn&{m%OUm zw8CEiXEF4(MWqtxQSEpbm<)!}OjdS3QUOx;fsjGjN|0=}lf|iZwItNj0K5BfLzM3BglhQTqLma^nLK>AH@m)xsu^{N8k?+B)_yV_4v+Fvl;?n$0C~x9|^i4V6*- z#A&!(dpReoE_AA!$LHMmP2oMh{FHKXx~Kci*eab#28(lsSSIjK*CzX%N*7YX-Qrx2 zhmq2#NHOrOsm#NObGXPPERh@qso_EJ)}p?wbJ5`yx0a$WU$28ocFxfUYM(k}dyOBG z9q%L)7h7anep%}0q)7Mu2BTlJ_$Z(3ai6@*FN~uLQDYqa@ECV;%&VPU4!*MXMLX=E zdzb7p;C3h8A!Ce~!)3%}smEKi>Ue~H^_s&vOd97I6 zrtvr7+Vjp^kNEbQ9j(G2eB` z&3r8dE13>VpH+}i^ay}CO;p5wM}3*#EtB4 z#~j*Trp2_G%r)iO-zdJ19noD#2@aaWhw-WTm&AgW7uPTytl*7xm}sUX}dU zV!Wp>0X$;djWu3$cSX$Q}_Ov&Ci&?3rht_&l*^_Nm$XR3?eB^8ee* z()B2^Pj~5V9v1Kf$I{((&CgFW4FgMU-}GvJ zLSc`|ZS zFL(Xt*w`-Ft@GA=7xqo3y1LY##QV>I-SkgN-Czp;a+k@qJ0Y$Z^Nc+IH=Kg6ghaUZ zcWcAcd3c!i-xy~6+33H>Qwzng>T3DH%9}SKkYDCzDuylIXH~3^scg>d8t>Yw2}ZY4 ze_gXYAk{IGl9U}LGONCV-IxpW4yWq8=;zotqA`}8dxzFA>K!~qNQuePzbMn9d+qx*Y&q%bf2^ARIZ^(^UhK@qg=!a zFIH+Aq>V#ely)2JTlv(;wH0H9$ESsN_;iFf!yxl};slq-_#(@hLV}vj7qESjNacIN zLB_>#7H?fR))0&wmekMDO}o-@+*|n3QuiOS-wJQt_YZVp#Xlo5bF}uZ|5hLvi|$nE zJASQ=36M|lrU}_c^?6y|Ai3_~%x^x!FHCL8Kj(Fee&%pwIi3+@;KApCQ(>`>IO~Sv zW*50ia-nXAHk}H*vlZNg^|hr=xcVnH%>M^LAu4F%eEYF0}cC zGrEs5eg35!TEH-wNL}0eRJABC&+?-F&8Ma@UiE(n6L#!l>LWQ8rClMzOE+VOPi8(N zG*cW(9!?OJR8AQ0O-o?+$uL&@>*y~~mfjyvX6?UqCam=j7w2rV$w$RwBVr)M)l<9N zX$j|OG;bXFT8o@gMn*1KJpeY$x_=yFUbREgGg|iJ7)5j>%X!U*U~^aHWgg$p=w%ck zZ`73^+gDHjy(zdNz7sTow^A4kPW6kW#AvhZ$#2Dzh|{DEJ;0wcfBZ$Bk^7^x=$<@% zVvM%zT*Y&m0y|x-IPe0jV3DB^R$g6TB&`ojl47(?_v9^-qor-YC^<&k1&jckHyD8~ z2#iu=v?IU>bn#%67NeaGMxe_DBk8I2e76 z(Vhn*(5-_}R*d%k96KEi%#JQzO^)R@Mx;@TWXL)JzcY%L8QG2xpZU?DIFly!o7k8{ zl<7QLi$HP|8AW7L1+TX)$@1ZEX_*OT+stFSjta)U&9)Jege)oH-4ld>u$QdzDe)(C z=MWh&p3v-a=ZJF&T2i9WYjj=Zyxf@>?~>Ito~@wQGL?T@a?~ep`i&>>Lp#m9nY?q4rKwMzkw$b%MMZFa-Z(sf0F=UBCk>rBwDV>hzDg0h?70+CDRbwo;Gf&h+eCHQ@v0vUvM0uTg$%+$hxF#sqAfsAFf74m=V{FTsF_@)yH zC+IPn@O1SIAs)z&=Nn`5R8%Kzcw1rL$4)bMZ13E9;GO`uXC07R<$Q@6D5ofj6pIZ| zWkJH$sUTqqp(D1)qxAgybiqeox+jS+Ff>LgQ4gvmi>lwHB8;o^IDUp zMX5IL-@V3PrWEf)@(SCW{IKe*=N<|1>1r7jTFwuuELJ8H^O!waYgHd>TzS*~L#?lOxGRGup9^U*W+r&{5MBZ`D@}-`=^6JIUw7S1==T%GE7R++Cp; z?e+x(k0h|lnNQ9}0JIEKR<8Hs^dQ=cq!GQnBWn*; zzkU18<>*Z5HQ|$ir!J^P@S@d_^NJ=Z->*w~aW<}~)os-%lVsGhIxc*J{p5|?zJUaF17$O&0GyR!bOVosp+|Gtu z1TlKpbv6e%qvyHywv4`bDJQ*|jya=`|7m@Lpk_y(!RtIGJ2A^#p}9OZfZ6TK)>gUx zY4WCOvaaZ8tQy7mESJ|&peOMNbyD%0wF=~@fuy4~jjb$yShkNLDjCV40CLQD;DCoa z>rL)$vsy7b#S^}uN@_Xcf2~Slf`Xq}d(<4hNmbl`(R8%Fqef-DH+ydRQ&Hi^>Jxm5 zVX8nI4y2HxM<^H~f6)lN-#WhT3H|27(p@baBA&zJ)WhAj#ThHKOn)yQvC+uSWQLIw;pLd!WkrYD5BOmx6bSsjl_ z8jOXDXwQ$dAwn*Vplw$~nYyUY?>Mg}^XznYy=W)PE3CsiO_bBYcE0gM^| zp2;Mh^LAJ!=SSXyz^;}iG{R&eD)pB;H0rYs5;?eOk>YE#m`EWxz=@+iF4L1Rxty2d zz==I@Vho&I00=r_5Kf;=4iyNiI3rU0o9#oSkQEnDkvhO2h!y`9`6#e!3wT!p5M&X( z7!eV`)TEnMo!?#}=gae0@wFG2NROB)0=xP_SZA(yOul*_#i<2|1G^#sLy{dS1Oni@ z>5<~^J^=7S5a)6=VBp7!Q!fXJuk{1)Hnuks8k`h?LHCRgAU^$jCq1<4eGOFVSNs%# zNG2b}*Y3+jd|w0*@QFRZf;MT?-{^h>z`%_C|+go!@`vzJ?w^q|*Uc zb@m76;7*=957qt~$GxuJfm?yOdK_%w3N<6B`A6qvzVle98&>2+*}0NA?&KW`un zv7So}36q0=F&R$=W3LRLgQm`t6_LLel*G!MzSUlz{kNJ}e-%Nx#cH>?Ek6alKKf4; zone*vL`?G>(~*kWHiM5dQE%VuIMnzrx0Jo}OKP6RQVFQF=fBHvGYc`QkG|dC<#XnE z#_KoxJKg`3`QH!>(;_2Mtonnp z-(T+O!zM}h6{nusd-iB+IXsYVBEE}?R+W6Xx3OP z%DLrcmhf<;R4vBP+VS}hRlm|++e^!%GwsT!obHufbwLfbVM{_j6@S*t8`>E)^IQ?2ds3`c)Y5JJC6tN6`Zg{-sU+$7ua1-mq0#WCukG1ip0Nm-SB0S!;_PY0hr zC1y4)W!du!$BTvT{HzN?g(Z?j(Hn=O;0LS^h@6h4FF&Mr3|M8GauFAIZ`UZW3NN!C;n57>N@yTX=(8zoKy+O`i>>D;K_)kW7Jo$c(U8JAW z0M|vxIrSxV;rsnR+MkTjAo;MYn;_@Z(TuK>*%8@<1$k{G5V-D)+vQ|#_(FsYz zf`s3VNv$vcO{V`+$UmQx9N#kjoubKz!C@t0UrUEWXskC%nh^Q{mtnNcx^<^6 z6}CYY+TnM->mK+I^IZZ)luGH3ot{8s`b4p?;PLP(?fO+0j(OSdv{QV7n$dZ*Qv&8s zDlz+>WxcBIc`ki6Yl@ZYV^{TfOtk*Zr(H^ea*0=8qNP#i06WBv&b#laEv||=?U)RZ zTGImQc^um`)$@M9{%Rh{6ZZNZgyUX2?H)wsUXAfyE#Y2m0H~2$0JY?MHMM)S5<9x) zfv4|r?Ja1>bnE3mS+kOck5ns%ShE@AOPnWLypVR)tC#$c0tg>KoZl0l?m4M|h`i_A z6A5$jAJQyXq%-E^&q-RSV=!ap6tl=%sIiM@v8rZbXw7EI8tZJ6$Vp39bgSaL3&|VY z^|Tk~d49&QZdp9kiz-Foy;Pca`)PtgkWU-0w>2#~51Z)D9Gqjk&~Dcs4Z4{~XQ znoQ-~T_k1PUAkiq%|jy&>6T*bHfbNu^}V94j^le|PMa6iCo7B5C)*X>C+m&gC%b+) zqP!_#PD89?PE$!57w%g|L!f6Z$55>*!|*SiJgrZdC`~>J^!g9ezy5Fc=D(f3ISmI!pEOcbpY*bBT=;nvEkRXrpEUfi zPx^E5tj@ZF9K*lrkILOMVal729=0gjfY3iY{Spqhk!JW8{85>1FwBU4>ybGP#7#pW zz)VGu=Ga|B2D@lGMG%{q6hR!c2nebGkH$yPw|CUWvY+;-2JD{~&0J4#X*~+d zI`;o4`VS)W+jN04u74X`r;oS)>R(gE_Hb!T_~rO(4MJ1#)AqW~o`*2io^dZ`zTEzH z4$UumA`;HvGu6MK6~hh4?KeJRWtOwA{j`P=D?|8Tfp56fTE0X7?TRp>_NP1lLVLlA z?&t5M_Luhll%(|mKl<7?P8+a0yUWx5qsYmd& zbla>v;ghdr;8UMYHR@bb?m*}uUUmz|SJu)mZ-@+?7Dk@xsDzF@)lj={G|DlI3r%%S zuxVdEawPu2;+b};ap)^Re%U5?7;v+1QuGVaz;Bn5;V@XDe(K2Iy!n$Xw3_YNPUV

*!MQl}KM&h1x+kTtZzBkDcml0YrYIL9Bp)z63k(w*yA3kyUxa~gEU*JOR>s$4&L!QJzy zo;Qe#Xa8Rp%vhwxI(cEMN9x5_JuXC4{dpR;6Xz4BdN`{C?Y z;D|+tq1d#lE`@M=q1gU@QZW_vBAT#TF6E6@GaJ4kWocc)@fM%9AmPI&3o{fADxc=Jgao0$hax7X)0Pm=6g1>v;ceb8k zrC8z4+Vp;#!f8WJZV~NkH?}jKcdu-&S7&S*%}E~CFYu1#5LYfY(9Yz+m_$9tM{DW@ zV)64Ye=(`j!~7na)XZp;&fNs$#@f+65BGaOStFKhL%)D3ejfi9nJOJ{?w(z$|24mT z5awU)_dr@|PV_-;o*m_bG0l3MkGsr3^HmbiY#z9Lh-=>?`ca9ME$?}x-viXRLj3O* zp}hEQ7VlZc43i7-m+lE;K-}FEZvjDW2?$d_h};wAfH1lzER%Eb{o0p$thFoS(h)f{ zJWD0iJWIY4krTx+HMNP>0-6&RhogDvF_RzC!zRyl9+vDe;gysqh}%48r{VE?Ep9Wf zFKsh#C~kA}%txh;vZN&T^JFc_<-;$6A1BXg#x*r@zNGt?(eMmUeo3#;ic7DU`jT#m zA+G|9lvkMny%<4U`UugN^bx$cbOv;J6~=IR6;#k?6UU{iYR9F!6UL=ugI@MMJ@Ftl zk9{RLifMQ*0Gz1^=661Mm8z_eY4ZHS5G2is6`B0PH={0hGH$gL)G}_y+M$F~UK-?@ z+B-6C@*2<@vE0hVMx6cUarfQdEM$YCh7K-R6iTlhyW`{Y*Vat@bl0I<#wev{W~!?L zVGztC3(~!WLp&?chu>W=U6QB6rTJ_WKe~QwMibW-3^SrrM(>uhl`R_>Ar1-V{~5O3 zn2c6-L;l3K+2<3l zP^I-7#q=L*CB1c6MVFYQFW|ZMi$1D%G&?@_TIohAEA0{s!hUr&2LA%<>E9(c^K22> z8%XCiFTK)U7!y=nB5}a$D4#PPJ|Ur5UWA|-fANOdOTSKNuBLIu9(AdA7U!D&ITHtk z*W%$hqzWCMLy##XcC?tkm~neo#O-9e!h#I_{*>^$nnoAq+50ZQCgosHQHB@?yDV=}^j{Y_bTHslwda|EuH_mb25cSjVyu^w$u(YbMw^Nz8-jGUm7!oG9zNx_diq){; z$Jilsz0`jUEfcmrF<+38d4Uz{B>NZceF9(M6Le4>|I_Xq-oUWf5eFgrA!qIcBuOAl znAm=}J>_N>V?6Y(Q8$HC8tIhfF1*m$(v7TGWa-z*pnt7pr8tG7&%?KXQu6z=M_u}h zW%_f|`<+4zllUbf6vPVF^zyDam^6u+CS?ak*?$xas*ID6BQ63c=g?+ZCagbij~S0ip{>wns+{uB+aNTe3%x@_0)QJaR$FRHP?&iD52FT)OLB! zHvg$ri!IxPmC|>v&~VWbf`XzMl*sQ1sERjf!`-}9;hD!*EV(A~gx`W|Oh4-SNTRm% z)F+9c6J?GCsS@EoztXo=!!Ql~m2FKvfswCDx-yoPmc{0V5TAU4&Re21uwXh9CZoxx z74&RTP-dTEQSK0Nwv%5PJ1xnzLOUfaiD_`gRkChhn{~FM*oJ-n^cS0g=CPYMr|Ah* zKYvm3Y>^pb$#JkR_`GLU0)0d_g^-Aux!$%t^Qy)x`MqMJA!JQtTqM%5$zRfbR{N8A zL1D^IfzT-_L&|HRxCMP`r`5zQ%S*aPA?9{D&y#$SrL)^#sPNI z#evu3eOip&8m?XPQ>^2%jP-O=|5B52_hd^qdF|~wy)?;FY&Mo)ai#n^+{D`o;Sx&S z-G^=Cbg_Kh3VUVw`GXu090enZ#v}XG`tgkNU_UcG1CEz!x^U&^H?OY(K1~&9i_6^K zO|;DR?3*^9=6v%VJtBLa4BnxR1bICD35}w!LX!P~Q2)>a=s2kpT9p=5`cG@n3BAUG zi%B3s9c6*g%2p?2i3NAu?u6o=Ux~#dL#XdpAu4RRi3u_^vjcukmpKs=VhJ67byrRNRD{@zAZWpL6KM-W7+ zvW-~iQ$~}wI z8CC?U&?To4#CZlaHFBD*cc;#Vz65uqB0KIvkM{4ro_3hf!&FR`>Z>V&*HWiXC=J2bZWCXLC z#esu6l+}znly!p#TTk{$C@XESRPx#|_24c$_25bTn`GzK(qPsNMs&U36zF=g>A-*w zC~INseIBA7eAWvTN2v#K>CH*}z=6Q8juHgNAULGJp+r_4^$8r&;J^_uCrJPYrF&<+gs1KB=URNE37!q&Jq?aAs68y`ZSRZ+nY-`bu^$@;uGm8D3it#@-Y%{~iO+L{bUhQ+3_UbaC3$-Hwi4E(!-jq-v zAs0Bj`Zx1qqYTeSB`N&xdFhaINsZ7yL8lJC88LAjifvi?Y%eUN|P~bU-8l& z>xf)FK8Sm7I9U4Se{XnQWAvsZM>xy8qg{teP5FDvS%Lx4D$rYA+OnCdVNll|_{LXJNG}Bh$ z=x>`*|EOb{R}-O-=8;Lf&)pal9Ue_eaQ7B`VSUxXYwveg2@7a%AAuK@Uv+$p-`t8` zvGOjgP)i`5?JB6_#G4oNE57l9@y>g^tR-Ot-c!x#rUT|GDHBp;SRs? z_;Kbc&Tfp;3hLo%P2-27oLeqLPN`EUMniw7eWib4JQY zQU-FhBj#*0%w(6(SAC3a845*`-X}%2ZW|+>b?c*sz7e;3iNAL!`ZoW$AMHRxf2W8d zBSeRO>U)+lQVwWa^__uR!$%1}Tgb0Tzv~L`3ML3a=aaNow33JY6U)}5@I(GM^PcNO zFmsK@k9$@EXA|oks>m0<5b|^JCTmWezj#Sz)>{W2!cqo0@3hey#dIDYDd9VbsXji! zN%(jxVzKJ~17#5w7kE2Nx9X2W(YS(@swOJlc~`{0+V&gym|I}=P6{DjmAiFgb>6Yk z-H5mSZHXa@l$iTBobRAR^yKEY0~vXNe+Eh zs+HII+w`+?RIRBbIh?|@A@7&BWqrFpGn2+Xz3{_2-p z4o(C9h>-L5i?|b3@IR9pnYa!qc}1#8pRdc4LwkQKFf{17D9fucGjM3cT>OhOg-Q22 zC)p}GzgQTgRWte`JM0w`+c2{V-WP84=D#7*+@xGgv8j_xE)$=HI~UZL%E5K zlL$V&RtldJ>q`)}R7e=-8&pUkC`i#H9^kN&QApmL+0>nGNQn>a?n~J5QnY|&P3Gp# z4ScuV8!p|glfi~@O<2D#s`+b)CTT@M+J9(Pgd0!Nr%Fv%^X03gyd{lL$d@76MM01%Ma7ZM>0R=3`$|*R#j$>Yo&*?23Hd9hirDqo`7fsJ5yA1SfoHPEA=|4Ra&Js&axx#xVe`;sBn?cUsyA2F@aGE<5}wAi zg_yOPIldzW`ur?Hmrv{a0ILUJt+@d$+!uhQ=?QGmmceKMjPy^i+aBd=0xj28DaXL# zWHU!@6ed@SGEV&TSB%#1eUg37l{J(RvmHTnn8OJ|XkQP_j5De5|y~ zteiBapcX3bGM6WhX@w>H(MqMyEBZqKBCXzKzPdMm zDiV^u%Z588qqY_loYm!ec$|j;JPA>VJesC?YZH~#Wd;q%4-Pn-KWrq$_Npcs=S83hc*bjFPOV;4S)&9epJBVwZ{ zxP{+!eVV;o5wlR8g5FzFy5scMCJ!h0Hn<6F_uK!% zFrcN6n9az|HE`I&v}RTZzJNb|-;Z)`m;;KPvR!5w&g80Rn8eDjq(@Y2|983>I1v7$-DTtp_JD$8z_qq~;?aliqHvPR%VuukyQ`Wc5jQJX%bD0$mdUM( z=I))p+HO0RWSgT!L`?Iu(eRMHxq+U3gUxILJ4M8wo_`#8k%=Zk9qcAHUvmu|+QXpl z+h|*gNrZ@jt(gzHqfx7v)utahtb+fA97uhT8B}t62tu$_d z?N*!hNuNGQWD#BGOkBAAt;pJ5IA;Ch#t$c28QcH%Vg8B}8ajWWxP!C2jfF0k$j@oq=iC~VZgSAJXHDAhoivfi?yLeAK+eFx5?Scy!#0S+wa2}a($&NuzY7cK+9Ig2@9?&V%6d)l*&$VhG@Xy@GNIMVHcM@A|sk2KN zaZMl6EHP(${gb)$DCE2ejyv&s>CYscG1}(CgX6wJHLOgasOU2J;-Ugp}NF zZ?`XIuqgF)7QV!d*PmHv)rk7@w!3%N|7O8=yK$FNYb8ag#|;=&b+4-er#@`p*ZccSie=s>^J?-vO> zc-pRB`8bM@df{Mup%U2(hYteJu8o)E71QGqU(qtZ|LwDYpE4p~n3(*=SN#U6@u2_6 z`PIPlNab+yjV+&h)s|%g48Qv+d(f;EE*=d@ob;iFOcGbJz9NbC_bp8&Chxz5JoWP= zgIWdi>PqEakN{(XbS{xJ*Lix1@7Ycp-(VYbJ>oHP4V^i1YlV&XNK?yjo<&m2$go)` zob59y2}&wa|0X97dB#_lNH0>6$GS&75W|K4jdjn4ZOSJmy=2*JT(fNlm3zwPbz1TA zSyI}*NP>#?F^OHFckGz-MV6{%DXf-9qpc_gNRAjvVOIym%M+jXg`yX!(g-9)~Afs z)|+1Z4Q6EtM)FF7ziv|Bd(B$uWAy*tWuiOCE7*GXP*9&@T3d-aJvK)2Rs4;(yJw9U zV+l^a+~6;k45r{R^WUV)pYuh(P=z?rT?9U14yFY{d1^8RKPowlHEdhv;0&8WT!-jn z$;K%`jI}}==3wqUreM6?DE=Y3bwV%#jOa7k>tlA2yr^H z#8?yM)j-poRi<8~1q^4v7)b+eOY?9YNMizhnS+JO3UH4&$U~eA>Il2Yr!>&`vLyv% z2MD`lfNvqwLfncX;JXeO%BC{~XE+q%sygQ4vUe#{8%hDhi4TNbNqib;#4%=6E*NWq zyG+3???4C)F@dwp!7mE)aF6&vh-z*C{EsG@Za0WT5inwh3A_ACfa}&Y$;K@PjJ5g< zkg9bq?uZ>o_d$ru5VJZO>=Eb%)X|8Qzmd7(6M{SXHPK+7fPp_SXy?*Gn;Dm;UVO_P z;>4p&*yRQoMLZzeUf{zBV=Gnb8}1PbKxk4&^9Yp`*fwVh7ODU)n_?szvGg(4%0b;M z-!lgJ5v(h{`Ouu&93ET zNUCL?>w&p7h)t`u3x4xyJ4vt1Yti{?UnH2J(#UsNrZ;P7dowDEwPW-%#fx_(5mSgN zx`h4k%CQpq@`O5&TH@Ur9`#j2_9$kFl!%0w4?Mo%Z~q3>I5fBti{ za<}C8*nUvTN+POkQHB{;dr<#}7P`_m%bxEcwxSKbhSzCz@^kB&V}~i_J&jhHRYGbP zJsc+SiezoY98G4S+BV;FIoh16@*^8<*|@#Swd?QDZJYct*|tP@^igx*lMqA@k6!tlBl%FUr&OP8#w#Tb207YOu6ZaqVL(2-?LNZNBP+9 ze91fM1zHl*73Ugy;tBn7s$=*nV@SKDDU-tys&y7M#RofXS9PH*Z}qH2n@{H)7`@h6*PE(F8ZuJPzh(KI944B1bYsFhOqee4(Msl&^}|+}ZhlUqbTR zyb+&S{_4DUj2%b(vZy)NJW;q4dmQpuE*RCk72!%GENrOt7jo4(=m25Gr%<3W3Uhq; z*T1_PI>L70oP=s1w#ZE;4wD#_`s4TK4EF=&xiHESstAt6|AhrBi~Fz4gC_a^R_Xt2 z_hnD?&&Op-DE;MsU1CiQC7bAAkIok z0->Ga$aK<3;ijF@-v2X+-#kvwB4D5^m=UkEtC&*!|8DWWL9$Eq)4LbcF#p%M#to27 z!4)6E|G#?v56%&7w>72UwbS`7OIY^n+&NHPCQZ}k;iPn_n;eJ_c!uD$aG!h@YbrZJp1SgKFgX>Y4M;$!y$V zEYaUEJ^pjyb($sc@9JM0X#L9sLL2kz)BZGLIlr$`Lr;1XHb?=JHEf5=`7ROZ=Hcn8 zUOjf6s>ljkL5-F)Y-_|@Pq%z5?WHW!U8lMncHnIx2d9>M)O~^Fh`($fc3e}X&)a3T zg=gk+r`pK=Vtv~H6|X_LE%0jN=P_ObHk`zHVSqVoVWC_S{(fCty5Q2^H=BITgyx8~ z9?3byglTO{L>G~mFT(-lEP;l zYhhN5BND<5MMCS>h+nuItax}FH^aYY+Fmu0=1%!Bt#5r%!>uGdjPR_}A?fnDz=L#u z1)cpRc$flPoxwx*r*F>fxeqZriWlR}@2H9|AhbA6!<1NY+s)l)qa1#7{AMJ*FMsz7R z#F0ejt4>;#<0`yHFT581!Vp|k)ryrGV>hy>pXz;V4u0W%gKt_Dr-m2VdnnF2c1L5{ zKNpFoG^LBHBo{3+sUvCBmBh&^$(n)XhOS%j(<->*8~Ykd;+|Ba;}cW*3g)%(2i*fH zWmJM`>M2e-{tiKJrox(o%j$EvOx@6d$wp*m*oY*(H1#fyG%;INYEKjIZOBQbLD-Qh z*BZ$reWn%SS1@O>5`SfjEw1f|tWnpLATLps`=m{`;K5_+=?gyU>5OTLD*xe^-D+Lx zqt2$l$nX>UUtCfbV0A1=5uCC>!^@7~T*LP0W{9PvzF}pJnms=h=(aRzTsz=hAqt(% zHKFcKjvS@A;q?idwHbQdJ*{emTVQ;Kx3GDISM}ZF)!Ok2)lJfYf~OsKyISZXE&9wb z_`W)nVk}@Dty?wpIgLB^LEx-~j#qbuJt>uYMj}y_&)1`?W)YeJ>ix*bu+NuzXxWJ} zhg%5JH37Nqk3pzt4Tf@uu8#Dkt24KFw^`yMy^o@sJ|9mwF1U0f+Ae7x2XNy#f8^U2 zU^hnrO)c5Jz{o7t1sUI;hZkhTuSY{pY+iJq#6LpA`IfaoQ-cv)26F#YnU}SZ%K$i^ z4<$r;v9M%8V-VyWL%NIvOifw$1sw8GL8FZZFq^3W^`R^}7zRwuY=#(w1A*r4n0nVm z@lK<2KyKE?@6l&LSJxaDc#KgXLcc0>fKo*Y%*I(KT^Ca>fMEhE@L8BBEz+AN=n^~% zI1|E^_{MS~y~9l8YK_DfWH23JV6lx4EXtJt@|>B*sI#lTBmgA;=pB>M@&KS#o;HN7 zL}5r~njl}0HRN!Bbm4fw?A=6r@O>mtbsbc%Fabm1(k8eJ0IV#5h}gaw5;@Pczv3^yPl#2J-=( zmH}bv#aICK%ZSP_0`xv40MZfe)qN-Ql!oFZh``?%)K-WIM1}2lPqF3z-d-r6a%Dt% z_0nMsQRLB{<%$H7N*vt;0xt+0K*V3+q@a2SWFVlD4G_?T6mVwc5Ay!{7ep?^2j+uE z`)l5A%y*%<9LWI4Xdj&Hntwod&^mnbJZGCyyI!#<`=A6NxjX{lzH-UU!eW%w8;HpI1Q-)wwP)lgn(8s3r*;r zf#zIj($%*r8Q89G03G_$3n|vQUXm(pYQW!{WW%uh%mjhT=4Q0!-LG+Nq zgjH|=@Ik8GE`7faz{4xCyQYA9Sug`nqZvWi8Hb=ww@*L}d`6MpF^2d2OGLFA64t61xtX+BH@Ea7u0&gkpS)xEUJJ-shf6&f zY>HMFjpW~D*xtE15dcPEU@=_vQG@2E7+6Png1{(L86=PMJ;XbPk{2v!vDRItZoI!Q z#d>2m0dC&@K6)ikl}oXfZzKTa+|!zLg{R5@h|v*{T(7@Aw)0|aYW7AiYzSQO42MQf z3G{(M$JG-6Rt1XauL)KIJO!+V(*)A*bgkel^Cdy?Cp$!86J%280U&p^j%e?gY)*23 zKlKhM4@m%gy%m6O)gUF(J9-UfFH{FEpZ-S=fVdS2X`PSlm_QU1o8x3i&6DDStmDTY zk*@E+Oa=xVMecxA)FA>~nQ$G_t$UER%NovG5Uf{Mex_x21Pj816F_!Ug9YKQ;{hOH zU_o3>U9{jE1Al`B`vgZvL5-J9*v?BZAfWUI+)C))hmj0~T`2^ueUyWC=lFSsmYC0Hj+m8T{3{`McX} z^{I{yV!UHk#K2`MAV(Ce(j9Oyh5yt9xKfBIv}{m`fRbyxH5o7yw>I8%{q@3)6&3#M zX~zt78;O9G>HXTy2TKB~!vRzxxR3@kqXSC$6=>{$AzVNNP`=Z9VZ_n_T^%5pluR15 zSa7a5c6Q|(9|MD}`%+9X1FyEIKn^yMi1eD6fXN6AgR4w{*>eB@O9$c%WKiUW2w)3Y zy9S<61HM{HHkulkPiALqbc?m*rz2lE!#u#N!Bxj7I2XqZ$leCiLF~*y`U<>Gh_)NbbRie-knx8e1`leC3Y_Swj|%WBi;L{6 zg2|XEmE7JzYiU&boYTNy(iMA-F*nP#Z!O4sgxV?qjGMrkaP3<%#aJT;BT5}|c~8QN z%ul?!LS1#T-M$UNxMu;;-Xq^S(Si6;C@^NKr2wdFBfzC-Rv-|`+XImdD!^fS{RD_k z-GNAvKL?EUU=P=HfV4mc+3F#$j{h6Oqfu)(e7$FBe|tN$4H1yE;5 zfa0RR1Q=7q0P77OfSu(Jn62%LDS8(|7vDrh&*hpYi(TjU=5M+$#6EvPNKlKUhoGS5 z-_o4ueupkk}9*y-@v-pkkzZSaK)HM9rm zm{=W|wxII{Ou30sb~RI8yc!^$x%2%~q|xrtdM-*%{(d50`SU}OUqRlekI${GpT$?* zjDEdiOhTM#&jeT%A_n75K3S%pw03p1gqrZf4!*fsnHg?XRk;TsB2+L15$q*aKl1-R zNv^evQ1dk(OcybD8hY+M+nA#NuV!U-Mlr!Nf&+YK;%iKp)g9Cp8 zwodoL@#PqgLUxP;5T7hjNUs{Sv^p8&4NO&3MUpn{)#w#U*wU~PeuMu4qmC}gQk8su zHmt^YUFozG>SP>#bG)<*sXqMq^V_G}P?QS=h%Kjb0P^Pcr1r+{Pjd6KiDNEKut zFKw<4dE{{#>p?KZDd^Xf(syV<=}v6mwN#y@J<;8tCdUtkTO^(z5=w7H8(kYnoGsxS zGV_`ESUtS58D1wGZ|oAD(`UXDkJ^KMCU`bl`Zghz&}hY4^8@!ufVohlvW|OcwN}#C zB?{U*1YV+_m682TrP)f($;b9@?>-@M8A45%70janmmi$Zv@c8;_B-s*#jFF-N0mlm z1(-Olsxs&7`q?-PKd|1c1`lv;AuYM#e5Ff{i8Br~sj=lV$?UN})MW9=vd z-laDt_e|XjNa+F!O9m8V5ru&92A@atk=-5dr`^Dbb@|-xLfgur-PDuT)%a+ui;&*0 zzI$mt6fHbADVHXV$IpzcHvSh|Zvhp>`^AmZAh`kpB8^B&r^M2d(kZE z-eIMc3>erZAwKgaS=t~M^2Y83JKGMeg2V%yFb>XZL;f}=5pxAR7yeTn>i%Tr*7CJ( zf~h}cYEx5QI)3@v6r1$Id4lk^GVo~P`C7K+%fJ1^<}1vBKMurenFj=8>4a|mxnABS z)3#oO{1~V)Q~aO`8NVZkjOW-c#|yeYZZZAgYjbp1{Z;7`^=DVhz|X$4FL9%bFD*nf zUlg59EtrL=v(NZH@#{*v!r;Z$TK+I_wMv~CUv^AW8`6wZ^3_;Q3olPxs)qad^6MV6xDVm!1`HcT)hvpU^nOu@zRI)X?j$Bf6HYn5N zlI-*CGG-*d({59x*~${7_&pnOL9J)VxXC@ch0V{P)@Kzifvn#EEm^3`QnV}2h3j^a`#HYWy*?v2cKAaRwPmJ(n)z-vE1my1j;m;@0i42ClXeQSV={h){ z9@$%tDYyNa4=V;lqMqPHy^SUw#sYx%NP4rp0|b2Z0w8_>74C`IYYjhg4S6E40>FMD zDaBB^5}R9;aYM>+BX0@|pK=7|vcFr|D|Ios>AnkYOk%jbzy6kF@hm$d#1B*L0w`eI zK)19+mpkeKg7CQy5cIPE(7Xxw$!QXtmZQv*Zf7&0)1xC&oA(BLKw(5+^wl3?eERp3 zsP!=xAHdu|WcJ@t6K=1K1m1d#twIj_rD!j5I#TwnJ%bLc^O6!)y|m^|b9cCD9bf~2 zzjm?r@!<1;HafWbN1tTpyi%>XD)jv+jnNN3cJzt|{21W*Nz`$bq%3zlPUe}Ve5tyy z=dvYsnjGpTTB!7D<$G}*Z#|l&FM1ClW|Yy}K|5-OQDNy!k1*at$l@qw)MCt_9RtIt ztaN5vg_@RaJaU9QsL|z4pZ@^fJPE71!;YrHSRcc9XRrh7c&&2)B@y8f(E}FaP>1&% z$PD9kq#S+q-L1NhFrB&=ef6_|UUjLRC-Qec)MtggxF}bTE*M zsOFe&?i|6VK4l}#+b8w+L}q;)r)_H%{o}`kr)+iYlur9~c*T`7Qh~QdDs5k13+~s+ z%z&MS9Q5T$rDeevD51PMjRdunpW1PEPz26p67M%)AqKei)Vqe2p#YF?OD)KCq}SHS z=CIiHvXeQ%A;GCwdEV&hp{3K4j(ml0^f~v&)fTCT%sOC6X~Q3ZKex>tj2hrRIpxzx zo-g8@1Zt+Qa9;YEcIhp<%2ob8A=`Bh3TBZ%jQrC0Hnocjo#0h^bNFwn2%#)ILr^50v#P^FfG$NdV7JKUr zmLYHDNh(65pR5(Bddt^-SW?EHvC@=b5(27Vt zqDsB+nfYXgYEzboGWhSZ4OlxQh>X4-9+{wU%j{0iV6Q0d81*6AvHfRu_|LqUo>B08 zjklMbN6w_fvP8%1ZIgd*Vrj=H2HB2nMB)F;-56~Ddb>&g6HNaJem0(ICDnkeVCw%N zeIx%*Q2i%pE`USOjUnWJ-V8~1Y(s^$%-()n4b%Hi`1~h6Xqi2W|0|H<2#%%Mv6XF1 z&nT}f=ums|9GtjFWXOmBgaos?so8m~Ybd~7T9=@=J_pn>dvKzN@$rd?$J_xP|GK?$ z1hirgx|9Hn-*Eu2KfBV%{qn=d2~U7pRzKP98Agw#aj@diajCqs8UJyy$o|czYgc^2 zKR#&toQ&8CX^|>H_S@d~Z4Nu9Tm#t+sGu@NiG@F}e8b?{Kv1y+Fx0D~c@0}kK>DeY z+!DR0pyOFQR22h0_%=?BG35=-O# zrqFE7A1@9CHyW8tH=g*60$!dXrq^fl$dB-(E5+iawkLk84I=d)CRQc%m4XftUaR!Z z^iFQ4jR&Ytfr>dJZT+9_MlD_GY%6XH$DA7iGHT`=_ta*4&bV%42&Y2JI$3hIkuyg+ zU5!JmZ^Emy3KPl~_b1zS=kYV8F4o?s8+eTVP5#*4vCov{_&lxbV=7nP*2fu7QsbIM zy!km(^@G9CBCIKO(V=6X#00@9%p;};ii*aL{#=g5Yoa)ja!EfDlLe;eIyNQs`6UL6BKS zUhUggqZ`0@r!H?Qx|Hb!jqIrw6_)F>wB{>=BwgJ~ zo7%z<#f>3h>HM=RjYsqkm=1kr6}tMBiRbZwR| zD;#5Z=c12_nKB;shygL>Rm9#5 zsfnFbg6;PAD7du!JVvW_@?Bk==L*Rv=)x9+b`;oYaeTn{j{85s^Pk||X&KO}&PnCl zX}Qw|CI3!Kjxz`^_fqVQu`JUS;8QN1nu-sT=7VT7#7R{0Mc~=UK;k za?sU7*a-aj{5@8!9C+|XwgQ!Ma)8KJYXr8(U^h{Ft))S&eR*#FW}BJt)k^I4RW2Qv z7txhuIB}^kL1?GJGgUIn8_M9C1gL8WDq3@LyV9v4# z>m5I7PUe@oDM~>7cfkm<)CU|=)7ziggwX8K-)$?>9!^g;TIV10IEK*aFbRh`cIXKl z25^a>>ORoBB>C`4&5Ix8Kdx8O579q+&zJK}C*QQWH&gDeN~+YZme2l3G=%%m(E3Cm z;cGy;^w!H9X#K9K)H8kY(iEZfsIY~5xol$stlJ+HN4kB+Wm!5d8^*0w^x`}5&$Z{2 z%?0m{7p#K0Lu%W_lKObo=Q2bQF}2&t%m{5~7~e;;7X6xpnI9h?OPZQDAwMj5BIgW8 zf{v1_CW0#(tqM-9IQ1UeR9ysTOtRYtRZjP@^mB-Ge${JE5z!YvDXo!$4jOLZYIL3t zQYcymre@@YZ0)z1o_=_);WD;qUnNunJTf^P(sw@Yd})37DaWm_C&9uh>)$R33;o<8yHdT1YCV|ULbl);eF)uC%S$nof=j&w(but1ykytV z_D_v{9S{0H!s7$iH;_qH!XIawPY#^Z?Oq%#&-e&8CACz$88GEf)*wSW*VEuhv=aKi z#qW9CzQW2o^?_ACD{)D4yW{)gvjuKS_%~mqi=0dIarfLwr+wd5=K65OwW*t6;%UvP zQAkOx!RMMTzdSbUu#|eKU%vL%7RAAc@4Y@rGDg50ZixBt!YM*I1juT%j!appC648K z_@r*PMW_LJZn%|BG#5@cxA1MkXig^DxBqQ3ofQ!*bco8xt3>MWCb6|{DqG0RRbM}& zNA9KEUWyR0xTyg&*ygi`yc#=l@MJ8p(9Zcn!agSD=0?3&`J?B9s*cIqr!@0th_`Yl zRU7tU7SdrB^4|P5_RP72VpQ`A5i;lzT8#X?7+6~vSeTet%$V|WkuVFFKPiuC;$7{$ ze}jb#HgK)`aq<2g)MJN~>|+1F!}@QVEK4T-t)i7RL*|_;3M2MrN(ZoEuRna86j!`r zM$fr(g<-|sJU^q-Dw%IMe5_Zi{bro~qi0ZSRr>PCBh^+()?g^F%kc5el+GI-)-HM~ zvE8f80-ZOfv3Z_Ba4PoZW_^uTNp-HBtCB4$yfDuo=dbC@%e1Pko#mjD5K2&y0hMEZ z?Kd@@A3TFp6VsQ^>$Kmvb)e!J0VKgFP3*xvVw2ptvIC>Bp8&(0QfifS zqrv2}e3g<)#>C7vy3hAs)x3cezyfg|zX1!t6g`rMv9c&-d=}?A5uJpN*nZ zY~n&$6I@oKF~q45^Jz>=&eCtRMJ-prpKJCC{!QBzCI>2!Rl3Xhk5n6x`uY{eH6q%s zaCn~6oj{WA^7X8aD>S~1S-`VsDb)TTc~t4W)16PbI6t#$#c>9|wrl>YA84CDR_K^D zz;C}RlFp%$oYHp1x;b>eXcd68<%&)yX{gO+Pa;;6VzpU4kIG?a->s$XPHl!8uEgi`H1Wb zm1RC@AKqONo8m&L0$-05UwmpaV&>F~j%gBw84>vw$42G;bd#lH``(}tsO(&T8}yt9 zPq*i%QgW493yHk(3fw4rKc^rhdr#??(j0o1x(zQv*&e=!nj$mw1FOvusxrXN;#b7Z zI`ohE+43vou+gnX&C+!h>TWQo)PRS!)O>nle){iC7ostbuXO$~v>v5-b2xoiEH!sY z@-}~sO|bDzhu{EI(WH<1e3};+t8J|^q3}Kj&Jxo?56d*}n_Yhvudfv*0vh#hhaWU( z?)|{T@6)fZ5f|J*BVu*2bvnrD)%@x0@xgwG?VRED@v;{JO=LFEVt;BHYEEY(>9BMR zb9fnaXu0KwS`5sawqJHhjivMD&ab3V!U};NVO!xYnXHkZkRhg+J@OLZBkz-WKpvF_q$a}d0)48Vv9%_n`=DC z2^qr*r*w;XnEN=|M%TuBa^F+54HSBhBv#ZN9^N%6fI$|7=0q0DNe;*e;YZt3wc}VLdR(L>vd1)C$Z@mpp!@rhl+pi z-mQCfn&t8~pN<1qHbsh@;t&m}>05J-HvCb??=fp$ArtJHVtgv|chrXP8u+!)9*A-yA^2Nb~lLf(yTyOO4whf^%ex zFhJUDg3B>6|kDZZ6u7?AmYN zdJ)!byVQc#nII)Srd)?eS5o-cMD=+s!>95D87`Az5?v;5Bn1n5$wrBl?ir5i(6G^){LpE zqNH-tF}P#6uyaPW@O^G)v|BcLceY-1K3Ayo)rqHYvtkcKk`*tc4S(Ex*%r_8bEhHw zwT?NLP(!Y6VEn5YpR0GnQGczBPK7^uR#nICjI}+P2o; zqV0VS)x2?GWuXN6x|xp9745?zC)MN5CtN;1v$%ZbY0?gmRCeW1+s=!j*U$v27dlQW zgK|#y*H05PZc|wD&qs>wh*OH}ph_Qi4aSw9?-IQ;rVDg9qg^w0YCK3R_A#JT&b}xt zR$s-?UjF6AbsWE*ac`VlcOxmr;!|e#&Aa8Qxeq>}Gz7a=RxP~?EI)6~;WB+CoZc2H zJTT=d7f>E_YWb1Q-1fsoS3OJbVBN>O*%+11HgV&?y?qYsnf5miT1?IHxmwpr{S8?! zLz;R>FC=*`+j)nkV0P@Pf7W)U!jzI&w%g;h9qu?Owp!4qq*GQuev_&ymWl4Fj}q}n z?a4oV&y*YTJe4v-Nh=`txz_T4svK(A{_zW_67|){`o|4s6D4N-{D#CMGgE4zfV7=s z0$iR?KaVn#Nhk`~c_^N`z&Ci*%`YfvuHEcD2D1)!MC}=SdVjFyCA5DQV<{{kPFSC{ zj~;Q$$WQje{X-TblYj!DL6#U3e;O86{SR8EQ9<->3G{CHTKut5^3*I2{erAku(OXt zP{8D?tibd?#T-<~N;n$&W%V5Fh7F^;tO__T=%Ono+Mnx2cQI){Lh!$E;Pt4GJ?-Jk ze4$s6@rz0=PxrfjT*U-ZzCspPSvQ)lRP#~x&s~47G1cz*RiwkeJD`)n6R9;B zcmw}g@jx%x-#NS$0->J?2=^Y_T0F|0#9=BV2JMLN9D;=qP@(Py6;e=n@lPQK72|&j z1*m`w`GSR1ppyMhp{|gHgFxt*rLC+!U-6Vxx!G0Thh}NUPMsQc3aOp-18r}m!(Y4kH2L+ihUmhx%ZJsj@W|Emc27Y(b@26*m!DlK_|qgYd~`l!9f7=- z6{9vO(mT4g&w{X2N;$lIXfPYS+~DiIe7DT1AVY5N$pFnK*^yUbn^!AM>whTm2(-U- zB<=F?|518zG%w~s5Ld7Is(lVflEuY}?nwI$j9tN6Dlh5VM*#KCy|woVDeKN^DM)$% zVRbNklGp_M4M3JKjTm?ZBahcd`Z#S-cY}jtC4I1NH5_MRR%Y8_8@H&oL zLW=IA=og)_2x8%v!5yT%J2Fy(>(yFywq{8=C5B$9CGR-Qd(N%MwLi#jhP%lSxR_WB zMy%t=Wr(xs{W8(|ElWz-MH|gs%Vzn@WRvB+(q>O{--H4~9KkyEbcyl%mN){T#YbrS zY2-?F9v0?7LFBJ7d>NJ8EXFj_aCzjVIv~+}M7Qw?XZ`7p^+12OTZT)hXztvHvTOWo$ay1oo>&Z_n*u9Is9sm5s;S+)@d`(&QT2UW@@B|@{n zlt>RbG)N%v)8Z4W?a?)IX8WstDlz!a9GLWjJHKrpqz6Nmuv5Su*i*fL=d=MM+?O!7 zcSkH!$^dYl0N$ z>3F|GK0Vlj%3==A>_IDTp+S>C(Mm?v;MTX%mqe{T4$ z-I)5eaaM$TRaT`5Wp8`y9s9uZY086Nm=>|CbWfqZy}HK{B$0zs5Qbnj=^Kl%KO1P~ zcbK6!9+6|H;)!p64&KHdXd7xO)r1i?!YD;HSZ<-0_aZ?e}JpD)( ziuUtT-TS{3(Rq&_6EVFpZATaR*<_W%sG-g{hKCQ^+TriS7Gi|Xd8j(JwljK zhd6li#M87o_7k$`zC?Vh-PO!%Q9q6ko;*xzsvK8TTdM9tFXwh_M{v=wx*E_&Y!w|J zm_oq5c{#9e?EwH&UY)kX1@Z5$GkH|snMyk877KQk_5qVWHGK#a4@-sV@oaY7B4U6>2!VBIy@ zCMdF>!;{+7>x2BSzNcf3N>DBk~_B8nrjc#a;s$yQbdH1x1(@} zzNQ*zD_sJ1cB@Z-I?6j98v(_I3fsf^4_kMSYxgH>4vSU&(oXpbg$$ZMD%AQG#!gX!r1RNelt#%C0G)wJHjExze^tpxoWeUYof;m$0p3F7>@I;T8!Kor>RS68 z{%_fb^fJpyT+{E|Px`vHfuI3z#r$oItKSuW$nnTs%=!7ZuB2_7%!zmDL;+1n0cU5<@nIYF?<5xKUT`z>P0tDM*^cNE zyiY8fd$QdBEhX{RL{2c~-G^lOW`t|pAXH@eNa$Dq4`{q9Ak@T$z6;V)-P>husH zwV=A17{_r9hjoHrb+w>PgJL3%rX-G|_Dv?rt0Xd#lcr;KWzm*9`Ys=wr>vxh&`}hw z%}>;6#ud!+^AbGw7Kpnh6v^Cq!)BE**128*s466moGD}_xqMGzcoP5g6xgh~f(@|& zJPKG*(J``^nG=M~ux4G<`t+<^BP=KZ@S+d?Rq;zKAF)i4$GlkG{YSh&{aODS>D~VC zn1=4oS2rmxycYZCN zb+qmtETFCI%8ln1-t}PoTw~SH%P9PvSv>b%RH&g=`!$95jrK}EDR>{cla^zm^E%&tp14}*%LP04Ft8k;Cwl%>ya?zbsj3LI!L`LmzA6f6u~mUCJGLY!489A#1bW&+3NV}N5T{mr9xC1s2>;=sWOI}nIgtQ;IClmogFwoL|S z%f^5}RX!rvK_#;^|GYAp{=+aZ|B>fd>H}gVY=CXy!|XxZ1;-IwWE!p|!|!V2avTEh z=No(@T{ZKU5*{BdJz~cM)duPoaqt9)c^&nR%$)N3?C2RrrKK~Y!{UzX7hL4k9P+X2 zNhjx|62@Msoh~ie*BaqxE`Tll??0}LT;?#IHH}-UvYSAJ1OevKtIN#@&9$mHZ;y#=|?*_gWUv^b7l!fk1ec%p4+*C(Kcqz zqCabzx0=JvTmU;5|JRZ9zYaa7r6p&+LvUwmu=V5rH~Z25z>WWbJD26YwhF3ZpLxYy z+o{jin5ODdGn>6M#PSa;2^_5#^}j}Zp|2%Wi?=wPgiLIm4U(AMt!8L#Xk zUeqC)I82t`8u_x~W?biw_Br&53XnnCpSt8MHv6~VoGQxR$6V3}>RpK&-OtM=0sOdVAy|k4u1@_I7Gu(ez&AGS5JU0yxHhm468Uf32L#G9KL0!@CMF2_uhsQJt@6 zkieA#5Xfs}`eG4)!ZV!!@R%DCnP^$iiE!p$L#;|`=K;StT3Y=by}9FJbg-i2aiYqv zfH$}B0!FS5H);3pVT@U0Ky@|uk%sa$U@FY2cM5j}V44;VEzih~0G4a>KYz7PVQ<#} z9LsQY#tj)poIO7;pi@Eub%yHQQON=*R|H>>=mEApNH?m75YiWAFC#yH3zGo}RjXx( zk%z2i=Yp}ao*O>pb@lMrkocCyWw|L3G?r82{`rTJI5Csg&@pYM8{QY~H5$(+Bc zejJN)OSFj?1_!U`gw~D71++Eh{&CSWIlGl0VGMoWp6Tl0B^Gq6>DBu9$@br+$GiAu zemnTJsk!F2YI(1PX?OjL3TH0;jVmxa5s^w?9B(rdb+>PPfd`5rUYrYmw~M*6-t1xUMx z`#Bh%PUP(yJ@79Elk!?oy!aP_tlG6jaq12}Mbc6MS8Mm{+{< z(?<2CHV)n2Ec3_r>pBjcBQ3%o30bT-b#qY7opOsb+N*My{KOr=E zU5999v_O0DDBxdy2tV!z=211bfXMaTG*Hx>Lkp7rBf))H;mMpl^DdO6k z?0c}hgpz$sXWFRrT-3_E7$fX!#XPk9lT9x)vQmXm;*YZ_A{ociQ|BN3WYSXyhk$T+s>GckbB19@rgjr0_nyWFUyq+YK9a&DWP zd)vdX(A2z3m||&-4ln@U(@-y?)p_298fZ_G+!$!0M$5}k8oVDUJ6R`QfmfaK=D7Yw zEhdhX&7v9AvzQZogStx50cJNlc_>>loy!~guK<|W_XLNkC-5=pJkR9$PpoUX*y^xp)sYP(#4bu;hF;0sn} zPYQOHmcE?2ixkYxkXjv9M`JlUs28i}#>g)J>`V9U8^;T6%;N~v2qk|(-dX0|;;V9U zqoj53=k`%N*Qm%Fj}%Ks2?U7w_dU0j{vjl`ozSb|aMxMq zF`@49d4ON+>nfv%W&zK(d-JT-&3Un`4SE_?fJX%H%_%RRQxME@&;#o6JJ{TELBEhc zxTv1j%FM*p<~q2iY~y73(3ysiowm*{AmQb+*up`|zRB#%3ejPCj~|I=#hZgM$<}J; z9C&u$Vn-d&yWP>d{n5L_(7R*NyWgRAXQFq1M(-{|@BW6~-Gbiz9ld+dBact+rAIwTeyGKdbr!+c5?Q%6Ct~2mw zy>$y_FgC9ccFz1MIx6z%CjkUr+zg_|RnvilUe4WJ+QLEc5~gMZgricNP%o;a0LB=mn^EVR@#CaU$d^cvUuHd2a~)4k)9xDPt@AG%3|fc;cJ)si z0rR`o0|1H!jN5Ir{{=iM00W>COy7$k#yegWW}BK=hGa?c|GM0Qk!bKV+kho0NBYVS zZs{Lnh<+*EpzR!}{0W5HXvCibpIOQ(0DcXC1q$TOFZ_h@?7TkjK7wZV)UGb)L~v?Z zxm06moH~E-j#xkJk&9pQKD4|))n^y2b>W+j_{}9ghP&ELZ6}e$z_d$5Kjo=<`0%K6?2SHg z)3p_x_Ka4!Xty?QhWvEcA)H9p*kAq5?GZHv^^}OtPr%p1OL+%a-MH77MIHB`I2Vs# zyYXRbd;y1sR(Y-2!t3Q_%bv9Ky%Kqk+>y*^b*5!NuMfK@46OXo1GC6e?68S?F1zO$@ z!Up_@E54b-i6>t;bDR^(KSABN{fbMQBIbp>qRDa^%}A_9g{*O$olReqS6+(SSJVT^ zt#8Wu)BAPRgYS0&q@Vd&g|}223yFm(W;ZzD!td99Wus|z`^fZI&Euq+(o44}W?EX# zKY&y}Vrn-Fb032&`d~&m8LyE^(tn^j4qsmZXvOAkO- z&BxUKGxN-1o?3wJD6lsp?EiW|#(Uml586gOBqVZ=*!B#Pu25(;Strk9X=#mUZ%47sjCQM;4eC6yJBO-QqlTtcss_=WsbZYP*= zh5-lLVSvgf9Z)vB?KvUXfsyLheY=yp&9A%xV(#Q`^Zm2_LidBAQ!@sxuhC(SXlj$(0jthu%aj3 zQXl5-_i&LGC#pSh@ZK+|&0E=kNVit*h~gltX3+-3`AE>{!wc|vUpILRx>vkB zC2!W*+da|+V`{C-(&&;w)Pl1%zjqu9W>^hmUHT&l3w*eo2 zt!%d^rjVHp8kwt-jBB$hBo#y*7`;EPw)F~IaqBrT7N$hAltSzIgccQmj^~95*~MiT zO}*yE2pALlNU@&LfMRWkv5Cba9`i@}oqMV`0t2d=*T5nH@RIcmwt$%51%8S}U8e%j zEW~SwGmU@<6(y_S{S8m9Uruhsb#Gx?f7kJL|J<@&e()AnzPkB-wv^dZksKkz`THY# z&xGImyB|#ugIx$bugaR7;i??D%*WY`|8-C%2mB;R{%(*8I+|=dI4>3iGZzmaRe43T<>`^MipI$98f+x#xJWomUM- zf~hC$=Lc`bX?lW! zMz7zA%=jai32Rf)K1pf4bhLBP^OBq~`9_5aT(x@1D*+P{H~Yb2PCG=a@6IRxPH2>7 z#vI=&QmuJ?4*2n2t7niR&8M)RI;#0sr8k%opYM=GmwY)*N$b3TAoTb;3eg?N=1YDA zA*6yNQqj^1*hur*N%K0$+s%YoEX0ofTQ)t+O2v#Qz>KKHjOfOUn8b|Oz>GM<#6U;G zcz}lS5Dnuo8U`a82K%oL+aFi>2jD0kWGc=4_Gz@ffj0XjPjr{+A)f6>2_yB^3pPEX}0C~j3 z%qh*lCC%_$nt@xIfk&EwSDJxOnt@-MK|q>8@Zyc$5S=2fzx|8u;?zyed7*|z5}(4W zg6fNHA01j@)1>x}qnH}Cf?YmpV`1F{mDPS3ca4QQ^tO2<9%vIR^Y>PAtN)*f8 zH;XjO)s$|qtGZ4;H@vZ`|TXc zkT;pFhL8)Pgm?H+??I6FE4-Q@xd+gC^;oX6deYYOMDVA}!Q!-oX*qa1G(4dJPmMV? zVkbToQ=zNfR`sw+2Zkl0N7aFD_c^BK~DR zFRw|IVlK>?Q!mWL!!OJgQZMpxlfz?`fDjOav*sNtD9W1Q*%WLKYYHX;#i(DBPD_r}VIH3fjN+uFJFfT=-R5AN@Y&2Fk z-1f@>rU=5*3iJieap3Gi$rSK2xXjP}4Np}@xGf|C@hBai{LbH9H#d>p`vr(E=DH!h zc4ZZ(y*eQk!#a9-os}Je4~+wzIvRdM*#)^I8eYbP{pu=sTm_g-0Rz*sI;Mad4}!-C z$k%YQ0bf(-^K@2d3px0IV+yS{)%?aowDo*eoc(LBy>9^8}9f`rf38ZY-~cK&G-Fw^x&5H z_ovQyeDZ{WBBy66C_mLzpj&+vumw+eFCGIT!w#tC9Ut>CNt5ZwOVv^fq#}Slr;#_K znsWHn^ZsrCNQ&%XlEp|w&*}!M6S;M+fD7Gu!2TJ)>h`vL`so#LFY)4;8@>8yef99J z`1^*PFUk@o=l1(Y^wGuV3i(Dq-I5_Z5+14CKSQ|8+P-PkAsQ)8$GG3L=G&ItNXLDo z3)MA=bDq|XU-mN!{&cP}vbBT#ffbhiA@9|v!gPsmvK)^to3)o#`oDO!7pFg#)aS@K z%+^*H`ew1_YQn}{WvzVKk<4NybrgzhJkC$|oUg10bA|l_({ZwLhI+7-LBauxb zOHU*gRgEu(rOXs#!SwPwH@;X@52;v`J%uOULnbjS$~c-H>r|Q^#W)&S=TsV54+62M z0`DTmcK>aN({$?FBc}IqEov{R>k^cS3qH#d7krf`woH~Kwv3i1-f)Yik$nPAFabep zK~QH<7Ltl#alUx#=Xs{bW2phIyVx3D_VUUp)l4Mr0W-*aN$kse^Y#tE2AMyQ#h(qB zD@(ddrSo+fK^1^Kj}idm5FV*o1^xkgfC&IP2)TLn0u3n+w72;Ow1Ths*8GLp{e@jc z(cg$bHv;n5cOtKT9wVTR-QT&?DV%UPJg>Wh=83CWtDQgB-v4sboy+ja5S~Hkul$BM zEb1Pm*X=L8eh|_pmFrz5AM4L{BQg@ViMySaYPY{Q*|vG^n6h&t7xPGcAs{Q^9l4py zGw}vLJsKaq9j;501S6qUVHpeC8VoNgI|H~O$ z%Jw{CHti$TNf=_8pP^KJoIhM#`dfaL4Yxpzjpt+4r99ibe=As+8~e4NVJhsYLC2F) z-52$mW%usQn+;w$uXkqrRAvwX8Vyd-6;1AqO(Ffs81bzo;K7&sjT^kr#KWo+ryequlK_j zIzog%eHQdKVF?mJsyOOe)U7Po<}Td0F;_wF6DYIS;>dGci5_LsUs9UuY<>rEl_LU{WyK)EcMIH=^ED- zqStM2*wh#G6ii2tT5+9}t-TAYiKfd>eh4mPvE|0mYV9viWNY3^S)EESRY5yU_3=h$ z4IfAxv0ZfPDL+^$OS_{TEx5=Z8tLYI!;Z;RJ=B8whBR9g{aqqDVVy-Yck)iOY=Y9W zW!Z`E{#Ki@E=)PGLsW+9-Y=7?u@hPa{dCGnxa_3+HpQDlan!jok&Do;2YZya2Gv~@`-?M z8n37K(!QQnJ?BvZ0;(t2SkqWo=U7-|*jNJCSUT8PUf5V^*jV+T9UJQ$8;cAFOF+@4 z5wx77;m#$)JhFFoDIOiOn#9&7g_HFoDCMiOVp73s%f!n80Ju ze84dAfT@sN`tu`s#phwtJYmv&VbTI&(n4iJRI8t#S*J&4Cr%PO@heJ#1|(=8$HEiD z!qdfaFo8cvj~aA%04aL_`TYRGh7WPThm_$n2Pqm*EeZ@{U@c)_9mw+7nZSqG2q6xH zkTOEZZ$bzg5yXLrASQ9?oBQBzqEs!KN5qwzhnV1V!RmFJbX>d!T)Y`vybD~g7V_Vy zNd^W@MurJS22Cc02_~jO#qU2ZKP570P4j1CVO3yZwP9fmV__{}VI5#$A;7Z8*jUuq zSRB|`FR-yx6t0IH9zn_;L4H3<)#759;9}5xZm}8oO0hLex;;#~GfWyDCfyw--5Vy| z7bZOrCOs4;JrX8878Z9X_E-TEL!Ftw0tc%N2WuDyYY7MI00#?!gN6Tc*4kuzG0%4k zGZz!1xbU%nF)o%9E>_@68*&mn!IZ%=I>>K22pc`Gha_uZsPt!;v^#p(FC4V)M`#h^ zXo*+l$y<}A1Y0-LDgho6CB%!p_C}79=L`7Je~|rV(B!SNWD*aF&{G^5Sm-ffGV9lZ z%TeADtDWE&aPz|x&kh5P&c8@*^>x6u?fln=UmSX|J)RTWY{3+ zX5L^pX6w=qoY?;rw&lO21;XzmlxYUO7GI9#q^xvbFY|wx69HOwMU$I}KfT>@)e-*X zA7pR5ey{VJi-xmNU36_S5HHz}qVRI8-Rb9#`Sr`pIgwL#(&#Nodr6NDk!peD>*Ft% zV|Nm>Z3G zCM0$%Av7=;t|~L;?&9MXyEQUav3aA?{a3W>;2`qU@6t@42H=5{B?Er!N{A2H@lE$y+(jDZg&!{vbtB)(6Hk>nCj(12AGF_WEf}Tbet~vQht8E-9mE zmq`$HGVqiC;rgjrtvk*7-8vf0I&1}ffaI#7|E#xO$b+W&2lhaU@9oB&B%KM`fCfd- z=8fa&ooJ|V30`u3^Ki^5<@z~aqL}(6+&29+(fC&8cqA&pi>;k@fVf#?LsYfei)nL1 zO!Q}<^ONNI5GdimBFQESlY3xv@lWXMpC1Z6K5=@oC3|TcMQ=&awr$=N{?WQY1V|ok z86L*mJ!}b%=3CnN+iqR{afIHS(j0EljW5dWUrwG(usZT~s|Q*DY)VO?#FG<7;SpP3 zmfZnN1DEYLG?e*%m;)?ru$Zm)y|8`{_I%Tu(Bk&$p4#duQksl%@y7$U^+^LBt@xaO zE=Q1WS8ZDGjh=0^^mI{(R^PxOGzP|CIqMJCAfe4CHIQwKLlvar(n|t0Cu@SumNt+v zsPf@v#OdA<$m&F94AR`Z{tNQfysiW(YhHtVSRN+<|2Cx{;}=~sxXI%y+Y_zn_40V_ zC43-OJ*NorqaA~kKCg4LK+HB2O0=31Bqyp|2N~S1m2qdhJ{f}iJM-T`hNby;AlFbp z21p?lU=D7C;z1fAA$<^Cof~u;tWVxI?}i`)K^L(=yz|?6_Z23Pw`tyU+l=N;`2Q64 z=J8a0U;J=INC`<1x?CYchGZUYhLRydh9X2VRIVX&I%UWhNtwAM$()3YHz6b`vrL&Y zbIsSiXaBa(_xb(zJg?{VVx6;x_1;4=TOrwjWHUoC#jLPE)-=}rgImfSUABBbGHjA-8fteg80e45Rs@iC)0P#s z#GaqL?R)(KDOGZgmX!M9H;b=+OMi^AP!xI!?hOUQJz=eMwr}DZQ0%;z30@8PZJwUN z+i{3ZOXeHt%%PbHhPYj?rmZ#|SmWa$f5)L{d2SX#d40Z)LkbUKXM$4=pw>`ai$~zv zrAdX`b6PDP>LXb#9$!Bd!n}K|u;~DH*fb$XS?KtW^pGjeaF{>%R9VQUD(dUU3;TTa z6LV$kDpwRI6*5<Ec8nCxw6m?KYCa#KnNCW%E4CjRY|F? zZHus#p(8Mvei)YGFQ>Decnd8j$-w-tOY|IItc*)4+Q8hHM_mDIt+E$M4>K|0i@sHr+PXY z>IJD-xh@FB!$VL+!8UzlX?*qjea?|m|4PGpbx3g|>D@ip&{YCVrXWx)eKcHI=)Nzq zKi=9Lb_a#(OCMh$eleY`TqRIhs3x(8x$QqHB!;^WwMiI24X=lg6t7Ka8|68Ky2UeZ z=iAo@4a#SNNwZPSbT+G9M%Xwt-0TD8+dM2uVT1MaEjUv{i9-_q4d#EEaPFbs{Q%Rq)=wqqwwRCki@L>P)xjr zNJ4fow|D-E!DQEVfH^A0=BqFNaF4J`$isUUXjwxa%F`Cg456&A!@jj%+9PchlD*PV z)?|)4@6orx1EFH~28&!5Y(49+pv}%8A541Rpq4x;poiW1ND|fyxXvX;Uj76gnTE% z@o~KzI-j?$`@z!llyCH*@e8kSip^y_}YFo_V?o>^kzR|NVhDcG(T0DNZLjO5OFQ(hQg9QAb$dkBB?;Cvm z=y?e46l$g0tr|mw3L)1vO3-5q+)vBR>r!~HT)D9<3$>fa|kNYdPph5OWNZedTytjSdGeJ$R=gQ4S z7O=t?BtVwp0I_=^0of+J9VM|qJ>4#g0(Gb~FnO)87wAp{&bk z2#9pu6*EW-R&Eybf)>P)Az~|>Gv=mRP}7svH(KEoZ--Q~1+?NjbYIMYhQ!?#Km+ra zA*?Y5dh9C=5kCn)He`}oYc|OfYi*L~K%06hE zuPn5jz}Hc7urTztZ~e&@4?UD~#;i1?7}g+OhvL#a1hK`DxT`(+ijKC#`#|eDAHfkd zcmRrB7AUlO_lu1VKqrP}IH2GOQEo>6Kvt<6?*r|#SxMZ+BlSJ|2Wqt-(#Lja&JOf{ zp>7HalxgBa%^L>a=)xK}HVSp1Bg{YGz+p~y^}M+G>&m3UrYk)ZVf%ffRov-d6;{!b znrU7V_ZppR<1veB9+vGjNGjKN*lb)76r{!zMdWyM6HiM|`7qHeumR9UP7+>1t3|&qhzpEMHEyv*?wn zCmojF16hXVS%gB>2RqOK_;~$o-#r#KjYOevXNH1Ag!O*De{Mdq183M82vctMdH~^y zxgl`zi1m}HK3T|ppYAieo#%Tjw6{W0h=rvf zdvEV?#xoy<1DQ|`jUWs|c2sxaBo44*hN2D%6K*(SL**bb5_>F2EJHlt16!kd$U=ZQ z7)mxb%wc!mp@Ye9km=~7yQkVl%OO3ZH{hu7#zChh3ZcoXz#?X98NhLH03KsE8|a~X z!LWa+pW#%co`KbnLZ}6PLV@dm9Uugc;tL6TEW8q@Ys-Ph+y10)Fx98OC-87&+uQ4k znbkg5&gpps1-mS7htohxi^t7H7$5)m#qf7H&39;~cgdOuD(5_qKjy1H=qN%;Eic*c ztIy@RMq}>c4%Z*$PTWw;GDDT=b1!B-wU4$nx<@X@&U_k=gn9X!d>u{}s$t4GwSN4h z)XiiCm}#01EA~ZerFWI5!qG?bB`3aPSn`bCObflcSY$ z%Gx0n-(XDsRI7#UaN2!%5rW7-%68$X?%E{6WAz{~hL(P17I^&h(9O)#O1tkzGwRB3 z^TQVwc5CtAoWGq|aX96wWyi2`!;w7b{da?YYUK!EY*-Dv%$dq__ld-tH$S<6%&)i^ z;iDa7ykFeF31!gnCK@nKY#0CW7^}})71H(X^>nnhg!$JK}@Mtvn2i6hrnuM*yaWL^ZvC=-umb5AYf z@O8meMApQ8lsl)4}9Y=DPW_KO>fc;VL#6MvA?*=AH(3865I!AoGVRm)5MztK^ z#H$w*Mh;0_YWJ_RUJvZ@d+JoFI-1UOGr*h0@ik8F-Gltc7TkiqHs_I332x!|h_wBb z=X_sNvi7eWe&TcEBx_To^L*xIKog-eq#z!nbc8H75I7Ai3jE4|L@K#>7$;;#bSW>} zAig3?5&F5(4{g8MWZz9Gc+}jtUTIBfziT#^A`w$o!GT{1ZyG@J-Pr=pX^lD%$W?l3>#)Y&c&dg=8 z;??=L#O)#v?hn?4kzalGzsz@;rt8RD^y_8b52iLR$@M$Mpj(Oo6%*e5)w^`NehdBs zn9{Rt%{E(emS<=k3YTfT$FGZ12zlm4h^2z6>_e4#_0Fuj%yr1XyUYf;w+tR2r4lhRB{OMD(POB6JOp2j{};rkkj!7}W_hV8>L?8k=f z$1)tih8@5%&|t%8une@=Fxm;>o8g$o1FW<(teP~e!8ELmG_16=teUi}!L+Q6w5+rT zSv3!`1|MW?JjhB*$Ex|v^Bzrd5}j=jLtqfYn}+C}fKOff!kDoPEZ8s>EWExE#!8Jn zd<+|QYvd<

*(x6q- zpf$8>Y7SvqUHI{JE)9!L{7@J7iK2I%?8GZwy8kU!m$-PmBoa-xp=GH*!W+nUk=FPL z!?%5`KlWoeXf-7dQ7GMeN#>o3yA!2%lAA9PrbqneUDVK&Mfl{*({X#yDpX<4Hfu!*dil4rtfbj%6v zc7Y7jw5&%Mv5ChuB}Kw)Oj+tbe5hMe-=ze)snM=|ZDBfsP2|v&JRfFrm!HXv3g(BPC-Qp##mzy^`6E^!Mp1mUe zR_FzHBh#nwre!PM&TBVs=J*Q^C;^g3rM?kGt_b~Db1%OV?dK|UDq&W+&wNvBec6EE zzs#XJrQ515?+$534Bs1>U zslVQtBCqm}ZKu7DNR71L9+o7HGA~{2d%R-3BC~xEX)ZoAca_nsV{?FN%^ecW8Li&; zs>&K%#mmw!f;Sbp^SDrBz?4UHe!*#F?TM!nU50<#{kylDv;L^A)x@r%eDrod?9HDo7yIMS^-O8D~ZpfMNh`rzXM=>qdl1cM3 zHrV5_32igd`})!DTy+oWYgZ@GUS@+?69EvOiP6Onnqbq)k>Aqj(e28V0N zKi)e9^S^lKKpDdXH>jY4%*I+?-DRk;%95=^u4vzltqtNp26G-@5_}CBa>276Yge(n z#{nBv_PT_}MxWj0<8Yr=sEd(qs|Je(-8kypEepbc<=i0J6<;W^I5h6RLw#-GU2tP| zaOcs0DF{42TBK##NPH<%zcp7guv$FaJ-S7HySj>hxU}G3iV(LePHl7%hf9npgIbES zqm>?4Db-oV0se}M;iDc%{8F`G1-VY?x30ICQ`mBUA!hDzUDdCxwcO3BINaTBeHro2 zG)0?@G*$BFEj{}{w2zQ;|2^f=yW+Vl_TdX?^ZM8F3b}5s-Q2p761mzB*UTqYTZfZX zw~{J$`VOz3J0W$e?YHi_S#?}(Uu|+Gu?`~Jo@~p5q{nC-1_e+ zyV~I_+egCz0W~TjNB`NqIy$ny0WHM%c%d#~KItq8sl z>NL|zz1<=nb&AQ$^Ok-*7#X-`dA(gaM-ipihy1F^o9l`R_RFnt=2UTURa9;`*>nEu zC6jCZ>_3`%iY+vzOk|g@8-!&rgk>;BS#q+d@vx}zv8V~Ks0p*EonujxVo{T6DYaB! zQBz@2Q)5xP#Gax#)`J%f$R+3E1iHjV(Rl6AGWz-L!3sj4Y475M@uG zp)LEmKO&?drt7s#hUNWnj}6;N#>+~1R!hvU^wO3IV_Ry!`!Cg-Tuoch{cvRNn?AnC zbzyy4q*;Ht>{9{fdmf8*%Td_xa$z2l{8*)AlLVR&v=aDnylM zA2j=-e`o1PcQ{2dzMk(ro3EElG*N3+r(* zQ-5-~a6l3LXa}L4Ha>M!0M`5Nd=TN9(Y{J;? z)5C^B?o#pf&)>5tu(kcw=?>?Pv3C`k({u^7cNLz~R1LCs6@{tDW%{B^X)QS=N0#7* zaGTRIN-@?O!oW?REvMyk#aQca1m>m7X)X6Y91+(_YdH+yRfAT z@%0CD*?d{*FrWE6vEEJRKVzZ!4$6aUngLa1m z#Tl#Cf|1`rY$vc@tNKmSSJ|y!h?+dqc4nygQD26~JI8L_$rCS=9v(6#=4l02ly+&p zNT)gr2HMz%<8$mO)PKZq}`4~!|~5{fgQJfX5sf)0#D#fWg!J zEA4=g46ad}B=~`8ids8zYZ|%Iy8iOx$|oRk4RHPV0Ok+EhXBC&9?As4=l!6xlBm6p zIT;ZX?AqDs8x%YF3upQECI~$P8k7Z!-WZvwsSWpm6LBNQuLBF`8RV=PqOJe<#@QmH zcHDE|Uge?U@wE$;rkx#i?BSpG>p!yn!F~pnfB6H_KQqfoC1Piuk3VjTKyubTH1+z^zIJ=ak)z2%B>2x5s z^-DyB#NRION0_Vujd$=%5Kf>80d5`?D$*$08G7WGf1M3ArCM)*^F?!6>$8?vva7} ziOyp~XxgYFa$N0JqqKW;H$xB6um_=?1FYH8Y#zBYmsV|zH!xDof5Uze&wWPCW)Q6m zH1gv;RjC9&tK-acp5|U>R*3>m*DlL%;AIa&@=76}#-L2dX~>#-(~{~VAXD<=`qM}aj^lG&hHU{>apPm+26*=3*S11PKGgxwYnI&<#(@7c(S)s ze2vx~M2g;2t~|@DDp@w3H<|5k;xl@?>DmonPcpR>!E zTOA&fpB#xODm=}sxpg)#tmu)++-aGrH@lh5Z%=DIV3$&g*HK}A<(I#slvrUp?{d&KZdRm-wXTGyh=kha0?gR3Y|N>j2EZ0CIMOD!%6A zKYv%xOT_2R+34U@M6mz&?3B~VMz^=_zoZkhdZFUl7+_>u#P5I2GWXSmvr#coyK;p`Vr_!Gw&F7g*Vhvp=zKeAL>EjJy2$ z(d)F4F%eHYBe=BSsaM- zI(}X6=Pk~bn<_O}r48TKk_$M0odrf=Aqg3!3iw9;B<_SoWm&85k#!ojhHio-@>=Q+$qTe=d?Ai1K$d zsbv(4+6xx7S1f9Y9F_l_Lb=DP(`KsEmZ;O#tJD5gr~Q9FgVLbAtwHOiLHksLHdcc+ z({9o>lp!#b;Y}#R*HDJ>PzJhvta|&f9Q!pTX~Jw6Sn7}S21;F|^?1TS_cS&Io<%W6 z4THGdW0UE2+yKUhuNsI)IZ@3Jfud;AINYwIPJ?2q^6`qn2k;Uz?Z8>upFG4lG0%|jx6=vyn!WZv>pvQ5nSAtUEEpf z|4F)+1~EJfjZHbNDXAJ}^O&W6A79`hbz1&lhPw=`M_93mSWU^xVK&h$^}Kw6+Um4f zjVJh>~cbHZ01eW96 zH=RyV4)Mz_RV?*BPz4qoVb$Zna!6@P+J)J)u++ceLpwM;uFH&=b{H=wIZJ-|pTm=Isz0WcX=BQ$p*m~o+qhuv zs&tHd!Djh0E8Tazf)%aO+zz}4xbow>hzsHWHLlcfhymynC#_QG zwcXa5TgmX-%^Fs@-^{zOoEM0h>RC5zJvJO_)=@2B(Qvfprs}aUkoVH8d6|~Haw>PZ zK8J=Xn;yH12lsf#(ltm|nYH&c#(n$vOm5TT^d1S zIIsF7BCc6;QT*~aGOo!Yu!?iThQ)0Y^VQ7ss%r#EhMshSM zqv{$IqA3;yY8n*a{$v|GdA5yQeX@;2!YyAJ&1Y>EFhd&#L4WoS6@&q;J&Y5pM81m@ zqLCnq5r-|dHUhu!Z6r{SGHQfHmnGN$IKv9Og^_LGEf%fNB|yl~Bpk zRaSsx@}bu3yeM*2gMt&fNCBUrm$uMr_M~m19u~ERCDc|2kb69>ASLoa-^HlJ)QP;RAAKzH z)%0Mmd+oI0)zxt_dwmpTDE1>7yZAZo zFLvJ&p*ev&!^}Nqe9U8}HE29uSUc$B-Rh-=nNo3=YRQA9ftJfpqM-}eQnEbQ* z{()KOd#*=ifyG@LFwoR{4GL0vg)(|egED&c3~KOo zizb#07+2FaMpHTt#!xyg8BjnWj0D&D&{2Li;C(zCK8?@_OrTXR(5l<9+u#P==3`fh z>Wlkajx~boOiymG0ONy<@6;#~FdS5fG=eM`8kIw~N5$a0I}r|wUx$MnXp9hyDfJLZ z==>_e43Qk1>0L|tP+$(NvOI&1M%znb#(@?zc~rO&9EMwe_Hht;XB;7@Q*Z`?sP#J* z;9v-&(D7Ax0s@t>C_xXJakCM4L96T_m6|Xl=BiT&ylkNQ_&AaVr}*(UoBNHR1Qz1n z^L)7)y$zW5boo*65(c*QFp$f)ZyH9ZU>JsoS13sJvu)rTyG@8spfNs!T8pp&yipjq z1rHyJ2wWwJt%4co zHls8B^_N!lt}r$lhk@m+{{o3|={WjElF$gERJTmfAmojRi?CBo6v>ux~o)nW>Wj%UI%+%#- zugQbIJw16#ieu&PuqnfuIcWn|b7#Ufdoi5Z7dU?P4_*UlohwV4cssY^&h4bMs`pO} zoQz6-53WzB*h+)ik?BF%h;!+WOodcki+y?)BZ};A#QK!wU-NGN{M$DF)sf+{+6T7t zOQyF{=DS{(KOg+}M=G*&DR^KIjqKG4rS|zLEs-zHt`BMBXUgc=Z#hLRRfR~1_BqIQ zc_yLZK0#Cd!_PIWj#ZcY8JrxpE91E3`(sc==R`H;&&;CR!w8Pj2?cxy&D!h!p0?Ur zSqD+KHQgHW6b-w^$oaIAe{@Z+EDNM_7B{Kl zQ-5!$Y?t?U^nDvdc8zVns^rwjsXVN3+3Es^)O;U^>Db@?$@b@6y6|QXW^-XJ?I6hO-G*a`gPG=-N*UGFRI$}Pl zeq`3#M0iIuwJMNwbUdo_PMW`6_P^GJqPV487KfWD{}@$@!!8v@&qin!oQ%vr<*z$I z)tSx2*Om?z7~fs)pE##fX`WQ-m$eli^G?KLW^_KS!t=v<#68x#UgW1q?2UHiHy33* zV)TOgGLy&lLRFW|vZ_`4+gm39^Ck>Da0>YQgagDh3=H~qOo1XeqOm-y;Ck%#Xf8jB z5jzHi=EFcZn;JYtt6&uhWC#bDe4zV3$w{aR;6g*ovC{!E=4@0(z&!hkEI;s~=#xkpMV10ZX#KCP4JUB>@k28^P`A1gf`Hk5R>6tKNAM*W494P!Zs3dg#uFHwoaWw9MYu_3Ryq`bdMfR zzqW^sK<;cKXfmg>gXb{atN>#O7jz{@F2ks)4+BLR3=)^&wj4&%Z@Lul3Z}p7!sQdh z%uUz^+9BIWDBSA95I8S@64YRbGHL|gd+R6mRb>C(T80%cpM5J)&O1oe#WQFzGz|}fyC^hR^wKymgIjyp zEqoHIfXcW^9Mz@Zbm6gJMgYZVL6rDL5CGRxSL3#c{Llv9W55iysn4^D&_kkcXu;L- zF-T>-c`PXW-#W?;vcA!M^gXsVa18PZ&9UACs)Gj>_Wweg)ZS|92wb; zfH}Vh21Y}N9f^nStDkvpcM(V4R2-ZIsT;)3cfP4EefMPmUn}ya>z3uevkVq!p{(6C z^!W#{JU|iBpXl&C{a^9RtoQQee&NHelkgEQgzMZ)0mv-^JaZJK!^2};#GVsxk}Y_ji?~ul=ZWa+Dg1FBk^WEjKLxxv}R{P9{_Qb%vUb znr)NtB}svQ5V|&n9q$Ci!!pjO#!E1FQ}+G_2q7aIGYcSl6FfT)W}`XNuXzEhRPY00 zeVDtqYJCb%amWsNNB>vH8dS&&P0H<)7()rHe~>vUv8okucQ$WEtQ@IkqT578RC>J` zbE7%dsxxQmzh57D5Y`)HOj<05FS#O*9HU3=`hN;~kl z%tb2kn$N30Nh zM3$p{4&(;zivX=;l*{qLhXU>PoT<)^GaFm_VT*;%IHPq8#))jUgFCRn+?L$?=%zST zuM>Hse!nIUT=`8r*SQWK?W3K;l*{Dqe6l1by+th43#wQjHU1ktbsO@uON2kh-nQsU zHfYNxXp<`h8*f6ZBhYiP?+{OlH8gDcf>FuT$De1Y7RPMQL8C8$=_}A^U`#uQ8_^e$ zweE$W9_j^(n-aF`_=pSC{0W5r8cR8vId_6(ix`=6mi zDLzx4-IG#eIn89WXBV)(NBXUbr@P!|9ZX|T?J$#O^il+B_cb46>=0!m{{V~|xj2u^ z_e3OG%Ca}bv?6ag*2iBezJ@g~av`a&$=W#^KZ*XtxosWt5t2}25^LF7d$axu=w zPA|bzaCY=nh8MULjkZH4%cz*AWTeaB|2h)rw=3bkjt_@^P@z3=VH+;@&HaYMOZ42K zxfKEgl!1UF0FDIQlmNTMe@)4#@t|A^#t;rV{m|Q+O1b(!YNEFg<$w6B={_Z`_P%-2?_3 zJb|~&rLo`gTo(E*JgL}YfD$I5bdnkhmG z5fKg9NL;(4d*=$?EAMxh4bf)#h6E&?ytWMf=t-gz_fcaW7cK2r zcp2mGl}Y4}rPkysVI+fO%`=T$bbH!xYvKE$P_*zwDM}%aGECrp&LJPNzR2`QS+pZ9 zB*MRkCr>_e-uLAa2`^>Aq_z{fgE*|=zD;>mpK(Zs7ptZ)+Y++FhOd^-{!E%qopSB7 zhliTNzBNu2XtXVGo`b$g(r90jysv6!P`8|#b*5H}PI${4J(KaR-yqLoE7>|p4 zeI6QXAr2gSi_{L7R#OSDlxRrHrfZly3bXgm_!dGO~iFK|l-5SnaFcEOxD2l3D2BG3hR_j zKxPZldR*)M=){{vBytwM$YExWa7g3Jefx`FVS?_pp0I0W4<&T`%4rVr)@O4-;FAY} z1@QM^+>eQ$8~<5kr{S_6Q3j@218=5@`XtPFB|_u}CzFB31hIPu=NSw7ulyit7f%zT zNf^?-+F%OTxjD+-(Jid+@#jwz3wXsbKNE=yPb9N}XKw3r zKm3(Yss&svFC}-l>K~qxv6|hH5P2wn_-is!`kSL0mE$@mvx6BI6BBknF7E(6sh4V$ zY8d6C_y%@>XYq3w4|WgIC$VFLNz)!ltp+L5bhmoB454gkHt zuJqv4sH2iMHB{l~KSray5iF!M{L{4^$a1dNui;kEjsf^d>AM0|5pCTRP}#h)nu)|j z?hs`h;VNt-qqqss=(v+KdUWCEO>nYqD^H?ar$_Hd(@zmjI#1!;lCyl0`7iK1G4n%t zvZH6yp#!#u_pXU|TLJuwu3LH0fVnkRRzzxY*{uX6WNcuL#NUJ$WV^VRYMi23WI*vY z#b5pL?$(<%B2#Y$N|;- -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# +""" +Extracts as much information as possible from a video file. +""" +from .api import guessit, GuessItApi -from __future__ import absolute_import, division, print_function, unicode_literals - -import pkg_resources from .__version__ import __version__ - -__all__ = ['Guess', 'Language', - 'guess_file_info', 'guess_video_info', - 'guess_movie_info', 'guess_episode_info', - 'default_options'] - - -# Do python3 detection before importing any other module, to be sure that -# it will then always be available -# with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/ -import sys -if sys.version_info[0] >= 3: # pragma: no cover - PY2, PY3 = False, True - unicode_text_type = str - native_text_type = str - base_text_type = str - - def u(x): - return str(x) - - def s(x): - return x - - class UnicodeMixin(object): - __str__ = lambda x: x.__unicode__() - import binascii - - def to_hex(x): - return binascii.hexlify(x).decode('utf-8') - -else: # pragma: no cover - PY2, PY3 = True, False - __all__ = [str(s) for s in __all__] # fix imports for python2 - unicode_text_type = unicode - native_text_type = str - base_text_type = basestring - - def u(x): - if isinstance(x, str): - return x.decode('utf-8') - if isinstance(x, list): - return [u(s) for s in x] - return unicode(x) - - def s(x): - if isinstance(x, unicode): - return x.encode('utf-8') - if isinstance(x, list): - return [s(y) for y in x] - if isinstance(x, tuple): - return tuple(s(y) for y in x) - if isinstance(x, dict): - return dict((s(key), s(value)) for key, value in x.items()) - return x - - class UnicodeMixin(object): - __str__ = lambda x: unicode(x).encode('utf-8') - - def to_hex(x): - return x.encode('hex') - - range = xrange - - -from guessit.guess import Guess, smart_merge -from guessit.language import Language -from guessit.matcher import IterativeMatcher -from guessit.textutils import clean_default, is_camel, from_camel -import babelfish -import os.path -import logging -from copy import deepcopy - -log = logging.getLogger(__name__) - - -class NullHandler(logging.Handler): - def emit(self, record): - pass - -# let's be a nicely behaving library -h = NullHandler() -log.addHandler(h) - - -def _guess_filename(filename, options=None, **kwargs): - mtree = _build_filename_mtree(filename, options=options, **kwargs) - if options.get('split_camel'): - _add_camel_properties(mtree, options=options) - return mtree.matched() - - -def _build_filename_mtree(filename, options=None, **kwargs): - mtree = IterativeMatcher(filename, options=options, **kwargs) - second_pass_options = mtree.second_pass_options - if second_pass_options: - log.debug("Running 2nd pass") - merged_options = dict(options) - merged_options.update(second_pass_options) - mtree = IterativeMatcher(filename, options=merged_options, **kwargs) - return mtree - - -def _add_camel_properties(mtree, options=None, **kwargs): - prop = 'title' if mtree.matched().get('type') != 'episode' else 'series' - value = mtree.matched().get(prop) - _guess_camel_string(mtree, value, options=options, skip_title=False, **kwargs) - - for leaf in mtree.match_tree.unidentified_leaves(): - value = leaf.value - _guess_camel_string(mtree, value, options=options, skip_title=True, **kwargs) - - -def _guess_camel_string(mtree, string, options=None, skip_title=False, **kwargs): - if string and is_camel(string): - log.debug('"%s" is camel cased. Try to detect more properties.' % (string,)) - uncameled_value = from_camel(string) - merged_options = dict(options) - if 'type' in mtree.match_tree.info: - current_type = mtree.match_tree.info.get('type') - if current_type and current_type != 'unknown': - merged_options['type'] = current_type - camel_tree = _build_filename_mtree(uncameled_value, options=merged_options, name_only=True, skip_title=skip_title, **kwargs) - if len(camel_tree.matched()) > 0: - mtree.matched().update(camel_tree.matched()) - return True - return False - - -def guess_video_metadata(filename): - """Gets the video metadata properties out of a given file. The file needs to - exist on the filesystem to be able to be analyzed. An empty guess is - returned otherwise. - - You need to have the Enzyme python package installed for this to work.""" - result = Guess() - - def found(prop, value): - result[prop] = value - log.debug('Found with enzyme %s: %s' % (prop, value)) - - # first get the size of the file, in bytes - try: - size = os.stat(filename).st_size - found('fileSize', size) - - except Exception as e: - log.error('Cannot get video file size: %s' % e) - # file probably does not exist, we might as well return now - return result - - # then get additional metadata from the file using enzyme, if available - try: - import enzyme - - with open(filename) as f: - mkv = enzyme.MKV(f) - - found('duration', mkv.info.duration.total_seconds()) - - if mkv.video_tracks: - video_track = mkv.video_tracks[0] - - # resolution - if video_track.height in (480, 720, 1080): - if video_track.interlaced: - found('screenSize', '%di' % video_track.height) - else: - found('screenSize', '%dp' % video_track.height) - else: - # TODO: do we want this? - #found('screenSize', '%dx%d' % (video_track.width, video_track.height)) - pass - - # video codec - if video_track.codec_id == 'V_MPEG4/ISO/AVC': - found('videoCodec', 'h264') - elif video_track.codec_id == 'V_MPEG4/ISO/SP': - found('videoCodec', 'DivX') - elif video_track.codec_id == 'V_MPEG4/ISO/ASP': - found('videoCodec', 'XviD') - - else: - log.warning('MKV has no video track') - - if mkv.audio_tracks: - audio_track = mkv.audio_tracks[0] - # audio codec - if audio_track.codec_id == 'A_AC3': - found('audioCodec', 'AC3') - elif audio_track.codec_id == 'A_DTS': - found('audioCodec', 'DTS') - elif audio_track.codec_id == 'A_AAC': - found('audioCodec', 'AAC') - else: - log.warning('MKV has no audio track') - - if mkv.subtitle_tracks: - embedded_subtitle_languages = set() - for st in mkv.subtitle_tracks: - try: - if st.language: - lang = babelfish.Language.fromalpha3b(st.language) - elif st.name: - lang = babelfish.Language.fromname(st.name) - else: - lang = babelfish.Language('und') - - except babelfish.Error: - lang = babelfish.Language('und') - - embedded_subtitle_languages.add(lang) - - found('subtitleLanguage', embedded_subtitle_languages) - else: - log.debug('MKV has no subtitle track') - - return result - - except ImportError: - log.error('Cannot get video file metadata, missing dependency: enzyme') - log.error('Please install it from PyPI, by doing eg: pip install enzyme') - return result - - except IOError as e: - log.error('Could not open file: %s' % filename) - log.error('Make sure it exists and is available for reading on the filesystem') - log.error('Error: %s' % e) - return result - - except enzyme.Error as e: - log.error('Cannot guess video file metadata') - log.error('enzyme.Error while reading file: %s' % filename) - log.error('Error: %s' % e) - return result - -default_options = {} - - -def guess_file_info(filename, info=None, options=None, **kwargs): - """info can contain the names of the various plugins, such as 'filename' to - detect filename info, or 'hash_md5' to get the md5 hash of the file. - - >>> testfile = os.path.join(os.path.dirname(__file__), 'test/dummy.srt') - >>> g = guess_file_info(testfile, info = ['hash_md5', 'hash_sha1']) - >>> g['hash_md5'], g['hash_sha1'] - ('64de6b5893cac24456c46a935ef9c359', 'a703fc0fa4518080505809bf562c6fc6f7b3c98c') - """ - info = info or 'filename' - options = options or {} - if default_options: - merged_options = deepcopy(default_options) - merged_options.update(options) - options = merged_options - - result = [] - hashers = [] - - # Force unicode as soon as possible - filename = u(filename) - - if isinstance(info, base_text_type): - info = [info] - - for infotype in info: - if infotype == 'filename': - result.append(_guess_filename(filename, options, **kwargs)) - - elif infotype == 'hash_mpc': - from guessit.hash_mpc import hash_file - try: - result.append(Guess({infotype: hash_file(filename)}, - confidence=1.0)) - except Exception as e: - log.warning('Could not compute MPC-style hash because: %s' % e) - - elif infotype == 'hash_ed2k': - from guessit.hash_ed2k import hash_file - try: - result.append(Guess({infotype: hash_file(filename)}, - confidence=1.0)) - except Exception as e: - log.warning('Could not compute ed2k hash because: %s' % e) - - elif infotype.startswith('hash_'): - import hashlib - hashname = infotype[5:] - try: - hasher = getattr(hashlib, hashname)() - hashers.append((infotype, hasher)) - except AttributeError: - log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname) - - elif infotype == 'video': - g = guess_video_metadata(filename) - if g: - result.append(g) - - else: - log.warning('Invalid infotype: %s' % infotype) - - # do all the hashes now, but on a single pass - if hashers: - try: - blocksize = 8192 - hasherobjs = dict(hashers).values() - - with open(filename, 'rb') as f: - chunk = f.read(blocksize) - while chunk: - for hasher in hasherobjs: - hasher.update(chunk) - chunk = f.read(blocksize) - - for infotype, hasher in hashers: - result.append(Guess({infotype: hasher.hexdigest()}, - confidence=1.0)) - except Exception as e: - log.warning('Could not compute hash because: %s' % e) - - result = smart_merge(result) - - return result - - -def guess_video_info(filename, info=None, options=None, **kwargs): - return guess_file_info(filename, info=info, options=options, type='video', **kwargs) - - -def guess_movie_info(filename, info=None, options=None, **kwargs): - return guess_file_info(filename, info=info, options=options, type='movie', **kwargs) - - -def guess_episode_info(filename, info=None, options=None, **kwargs): - return guess_file_info(filename, info=info, options=options, type='episode', **kwargs) diff --git a/libs/guessit/__main__.py b/libs/guessit/__main__.py index 759c380b..b2b95cfc 100644 --- a/libs/guessit/__main__.py +++ b/libs/guessit/__main__.py @@ -1,58 +1,48 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# +""" +Entry point module +""" +# pragma: no cover +from __future__ import print_function -from __future__ import absolute_import, division, print_function, unicode_literals -from collections import defaultdict +import json import logging import os +import sys -from guessit import PY2, u, guess_file_info, __version__ -from guessit.options import get_opts +import six +from guessit import api from guessit.__version__ import __version__ +from guessit.jsonutils import GuessitEncoder +from guessit.options import argument_parser +from rebulk.__version__ import __version__ as __rebulk_version__ -def guess_file(filename, info='filename', options=None, **kwargs): - options = options or {} - filename = u(filename) - - if not options.get('yaml') and not options.get('show_property'): +def guess_filename(filename, options): + """ + Guess a single filename using given options + """ + if not options.yaml and not options.json and not options.show_property: print('For:', filename) - guess = guess_file_info(filename, info, options, **kwargs) - if not options.get('unidentified'): - try: - del guess['unidentified'] - except KeyError: - pass + cmd_options = vars(options) + cmd_options['implicit'] = True # Force implicit option in CLI - if options.get('show_property'): - print(guess.get(options.get('show_property'), '')) + guess = api.guessit(filename, vars(options)) + + if options.show_property: + print(guess.get(options.show_property, '')) return - if options.get('yaml'): + if options.json: + print(json.dumps(guess, cls=GuessitEncoder, ensure_ascii=False)) + elif options.yaml: import yaml - for k, v in guess.items(): - if isinstance(v, list) and len(v) == 1: - guess[k] = v[0] - ystr = yaml.safe_dump({filename: dict(guess)}, default_flow_style=False) + from guessit import yamlutils + + ystr = yaml.dump({filename: dict(guess)}, Dumper=yamlutils.CustomDumper, default_flow_style=False, + allow_unicode=True) i = 0 for yline in ystr.splitlines(): if i == 0: @@ -62,222 +52,108 @@ def guess_file(filename, info='filename', options=None, **kwargs): else: print(yline) i += 1 - return - print('GuessIt found:', guess.nice_string(options.get('advanced'))) - - -def _supported_properties(): - all_properties = defaultdict(list) - transformers_properties = [] - - from guessit.plugins import transformers - for transformer in transformers.all_transformers(): - supported_properties = transformer.supported_properties() - transformers_properties.append((transformer, supported_properties)) - - if isinstance(supported_properties, dict): - for property_name, possible_values in supported_properties.items(): - all_properties[property_name].extend(possible_values) - else: - for property_name in supported_properties: - all_properties[property_name] # just make sure it exists - - return all_properties, transformers_properties - - -def display_transformers(): - print('GuessIt transformers:') - _, transformers_properties = _supported_properties() - for transformer, _ in transformers_properties: - print('[@] %s (%s)' % (transformer.name, transformer.priority)) + else: + print('GuessIt found:', json.dumps(guess, cls=GuessitEncoder, indent=4, ensure_ascii=False)) def display_properties(options): - values = options.values - transformers = options.transformers - name_only = options.name_only + """ + Display properties + """ + properties = api.properties(options) - print('GuessIt properties:') - all_properties, transformers_properties = _supported_properties() - if name_only: - # the 'container' property does not apply when using the --name-only - # option - del all_properties['container'] - - if transformers: - for transformer, properties_list in transformers_properties: - print('[@] %s (%s)' % (transformer.name, transformer.priority)) - for property_name in properties_list: - property_values = all_properties.get(property_name) - print(' [+] %s' % (property_name,)) - if property_values and values: - _display_property_values(property_name, indent=4) - else: - properties_list = sorted(all_properties.keys()) - for property_name in properties_list: - property_values = all_properties.get(property_name) - print(' [+] %s' % (property_name,)) - if property_values and values: - _display_property_values(property_name, indent=4) - - -def _display_property_values(property_name, indent=2): - all_properties, _ = _supported_properties() - property_values = all_properties.get(property_name) - for property_value in property_values: - print(indent * ' ' + '[!] %s' % (property_value,)) - - -def run_demo(episodes=True, movies=True, options=None): - # NOTE: tests should not be added here but rather in the tests/ folder - # this is just intended as a quick example - if episodes: - testeps = ['Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi', - 'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi', - 'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi', - 'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi', - 'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi', - 'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg', - 'Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi', - 'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi', - 'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi'] - - for f in testeps: - print('-' * 80) - guess_file(f, options=options, type='episode') - - if movies: - testmovies = ['Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv', - 'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi', - 'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi', - 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv', - 'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv', - 'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', - '[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', - 'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi', - 'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt', - 'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv', - 'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv', - 'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi', - 'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi', - 'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi', - 'Movies/Juno (2007)/Juno KLAXXON.avi', - 'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv', - 'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt', - 'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi', - 'testsmewt_bugs/movies/Baraka_Edition_Collector.avi' - ] - - for f in testmovies: - print('-' * 80) - guess_file(f, options=options, type='movie') - - -def submit_bug(filename, options): - import requests # only import when needed - from requests.exceptions import RequestException - - try: - opts = dict((k, v) for k, v in options.__dict__.items() - if v and k != 'submit_bug') - - r = requests.post('http://localhost:5000/bugs', {'filename': filename, - 'version': __version__, - 'options': str(opts)}) - if r.status_code == 200: - print('Successfully submitted file: %s' % r.text) + if options.json: + if options.values: + print(json.dumps(properties, cls=GuessitEncoder, ensure_ascii=False)) else: - print('Could not submit bug at the moment, please try again later.') + print(json.dumps(list(properties.keys()), cls=GuessitEncoder, ensure_ascii=False)) + elif options.yaml: + import yaml + from guessit import yamlutils + if options.values: + print(yaml.dump(properties, Dumper=yamlutils.CustomDumper, default_flow_style=False, allow_unicode=True)) + else: + print(yaml.dump(list(properties.keys()), Dumper=yamlutils.CustomDumper, default_flow_style=False, + allow_unicode=True)) + else: + print('GuessIt properties:') - except RequestException as e: - print('Could not submit bug at the moment, please try again later.') + properties_list = list(sorted(properties.keys())) + for property_name in properties_list: + property_values = properties.get(property_name) + print(2 * ' ' + '[+] %s' % (property_name,)) + if property_values and options.values: + for property_value in property_values: + print(4 * ' ' + '[!] %s' % (property_value,)) -def main(args=None, setup_logging=True): - if setup_logging: - from guessit import slogging - slogging.setup_logging() - - if PY2: # pragma: no cover - import codecs - import locale - import sys - +def main(args=None): # pylint:disable=too-many-branches + """ + Main function for entry point + """ + if six.PY2 and os.name == 'nt': # pragma: no cover # see http://bugs.python.org/issue2128 - if os.name == 'nt': - for i, a in enumerate(sys.argv): - sys.argv[i] = a.decode(locale.getpreferredencoding()) + import locale - # see https://github.com/wackou/guessit/issues/43 - # and http://stackoverflow.com/questions/4545661/unicodedecodeerror-when-redirecting-to-file - # Wrap sys.stdout into a StreamWriter to allow writing unicode. - sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) + for i, j in enumerate(sys.argv): + sys.argv[i] = j.decode(locale.getpreferredencoding()) - from guessit.plugins import transformers - - if args: - options = get_opts().parse_args(args) - else: # pragma: no cover - options = get_opts().parse_args() + if args is None: # pragma: no cover + options = argument_parser.parse_args() + else: + options = argument_parser.parse_args(args) if options.verbose: + logging.basicConfig(stream=sys.stdout, format='%(message)s') logging.getLogger().setLevel(logging.DEBUG) help_required = True - if options.properties or options.values: - display_properties(options) - help_required = False - elif options.transformers: - display_transformers() - help_required = False - - if options.demo: - run_demo(episodes=True, movies=True, options=vars(options)) - help_required = False if options.version: print('+-------------------------------------------------------+') - print('+ GuessIt ' + __version__ + (28-len(__version__)) * ' ' + '+') + print('+ GuessIt ' + __version__ + (28 - len(__version__)) * ' ' + '+') + print('+-------------------------------------------------------+') + print('+ Rebulk ' + __rebulk_version__ + (29 - len(__rebulk_version__)) * ' ' + '+') print('+-------------------------------------------------------+') print('| Please report any bug or feature request at |') - print('| https://github.com/wackou/guessit/issues. |') + print('| https://github.com/guessit-io/guessit/issues. |') print('+-------------------------------------------------------+') help_required = False if options.yaml: try: - import yaml, babelfish - def default_representer(dumper, data): - return dumper.represent_str(str(data)) - yaml.SafeDumper.add_representer(babelfish.Language, default_representer) - yaml.SafeDumper.add_representer(babelfish.Country, default_representer) + import yaml # pylint:disable=unused-variable except ImportError: # pragma: no cover - print('PyYAML not found. Using default output.') + options.yaml = False + print('PyYAML is not installed. \'--yaml\' option will be ignored ...', file=sys.stderr) + + if options.properties or options.values: + display_properties(options) + help_required = False filenames = [] if options.filename: - filenames.extend(options.filename) + for filename in options.filename: + filenames.append(filename) if options.input_file: - input_file = open(options.input_file, 'r') + if six.PY2: + input_file = open(options.input_file, 'r') + else: + input_file = open(options.input_file, 'r', encoding='utf-8') try: filenames.extend([line.strip() for line in input_file.readlines()]) finally: input_file.close() - filenames = filter(lambda f: f, filenames) + filenames = list(filter(lambda f: f, filenames)) if filenames: - help_required = False - if options.submit_bug: - for filename in filenames: - submit_bug(filename, options) - else: - for filename in filenames: - guess_file(filename, - info=options.info.split(','), - options=vars(options)) + for filename in filenames: + help_required = False + guess_filename(filename, options) if help_required: # pragma: no cover - get_opts().print_help() + argument_parser.print_help() -if __name__ == '__main__': + +if __name__ == '__main__': # pragma: no cover main() diff --git a/libs/guessit/__version__.py b/libs/guessit/__version__.py index f8ec056e..cef422c9 100644 --- a/libs/guessit/__version__.py +++ b/libs/guessit/__version__.py @@ -1,20 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# -__version__ = '0.10.2.dev0' +""" +Version module +""" +# pragma: no cover +__version__ = '2.1.1.dev0' diff --git a/libs/guessit/api.py b/libs/guessit/api.py new file mode 100644 index 00000000..900f6965 --- /dev/null +++ b/libs/guessit/api.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +API functions that can be used by external software +""" +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error + +import traceback + +import six + +from rebulk.introspector import introspect + +from .rules import rebulk_builder +from .options import parse_options +from .__version__ import __version__ + + +class GuessitException(Exception): + """ + Exception raised when guessit fails to perform a guess because of an internal error. + """ + def __init__(self, string, options): + super(GuessitException, self).__init__("An internal error has occured in guessit.\n" + "===================== Guessit Exception Report =====================\n" + "version=%s\n" + "string=%s\n" + "options=%s\n" + "--------------------------------------------------------------------\n" + "%s" + "--------------------------------------------------------------------\n" + "Please report at " + "https://github.com/guessit-io/guessit/issues.\n" + "====================================================================" % + (__version__, str(string), str(options), traceback.format_exc())) + + self.string = string + self.options = options + + +def guessit(string, options=None): + """ + Retrieves all matches from string as a dict + :param string: the filename or release name + :type string: str + :param options: the filename or release name + :type options: str|dict + :return: + :rtype: + """ + return default_api.guessit(string, options) + + +def properties(options=None): + """ + Retrieves all properties with possible values that can be guessed + :param options: + :type options: + :return: + :rtype: + """ + return default_api.properties(options) + + +class GuessItApi(object): + """ + An api class that can be configured with custom Rebulk configuration. + """ + + def __init__(self, rebulk): + """ + :param rebulk: Rebulk instance to use. + :type rebulk: Rebulk + :return: + :rtype: + """ + self.rebulk = rebulk + + @staticmethod + def _fix_option_encoding(value): + if isinstance(value, list): + return [GuessItApi._fix_option_encoding(item) for item in value] + if six.PY2 and isinstance(value, six.text_type): + return value.encode("utf-8") + if six.PY3 and isinstance(value, six.binary_type): + return value.decode('ascii') + return value + + def guessit(self, string, options=None): + """ + Retrieves all matches from string as a dict + :param string: the filename or release name + :type string: str + :param options: the filename or release name + :type options: str|dict + :return: + :rtype: + """ + try: + options = parse_options(options) + result_decode = False + result_encode = False + + fixed_options = {} + for (key, value) in options.items(): + key = GuessItApi._fix_option_encoding(key) + value = GuessItApi._fix_option_encoding(value) + fixed_options[key] = value + options = fixed_options + + if six.PY2 and isinstance(string, six.text_type): + string = string.encode("utf-8") + result_decode = True + if six.PY3 and isinstance(string, six.binary_type): + string = string.decode('ascii') + result_encode = True + matches = self.rebulk.matches(string, options) + if result_decode: + for match in matches: + if isinstance(match.value, six.binary_type): + match.value = match.value.decode("utf-8") + if result_encode: + for match in matches: + if isinstance(match.value, six.text_type): + match.value = match.value.encode("ascii") + return matches.to_dict(options.get('advanced', False), options.get('implicit', False)) + except: + raise GuessitException(string, options) + + def properties(self, options=None): + """ + Grab properties and values that can be generated. + :param options: + :type options: + :return: + :rtype: + """ + unordered = introspect(self.rebulk, options).properties + ordered = OrderedDict() + for k in sorted(unordered.keys(), key=six.text_type): + ordered[k] = list(sorted(unordered[k], key=six.text_type)) + if hasattr(self.rebulk, 'customize_properties'): + ordered = self.rebulk.customize_properties(ordered) + return ordered + + +default_api = GuessItApi(rebulk_builder()) diff --git a/libs/guessit/backports.py b/libs/guessit/backports.py new file mode 100644 index 00000000..3e94e27a --- /dev/null +++ b/libs/guessit/backports.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Backports +""" +# pragma: no-cover +# pylint: disabled + +def cmp_to_key(mycmp): + """functools.cmp_to_key backport""" + class KeyClass(object): + """Key class""" + def __init__(self, obj, *args): # pylint: disable=unused-argument + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + return KeyClass diff --git a/libs/guessit/containers.py b/libs/guessit/containers.py deleted file mode 100644 index 74847008..00000000 --- a/libs/guessit/containers.py +++ /dev/null @@ -1,771 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from .patterns import compile_pattern, sep -from . import base_text_type -from .guess import Guess -import types - - -def _get_span(prop, match): - """Retrieves span for a match""" - if not prop.global_span and match.re.groups: - start = None - end = None - for i in range(1, match.re.groups + 1): - span = match.span(i) - if start is None or span[0] < start: - start = span[0] - if end is None or span[1] > end: - end = span[1] - return start, end - else: - return match.span() - start = span[0] - end = span[1] - - -def _trim_span(span, value, blanks = sep): - start, end = span - - for i in range(0, len(value)): - if value[i] in blanks: - start += 1 - else: - break - - for i in reversed(range(0, len(value))): - if value[i] in blanks: - end -= 1 - else: - break - if end <= start: - return -1, -1 - return start, end - - -def _get_groups(compiled_re): - """ - Retrieves groups from re - - :return: list of group names - """ - if compiled_re.groups: - indexgroup = {} - for k, i in compiled_re.groupindex.items(): - indexgroup[i] = k - ret = [] - for i in range(1, compiled_re.groups + 1): - ret.append(indexgroup.get(i, i)) - return ret - else: - return [None] - - -class NoValidator(object): - def validate(self, prop, string, node, match, entry_start, entry_end): - return True - - -class LeftValidator(object): - """Make sure our match is starting by separator, or by another entry""" - - def validate(self, prop, string, node, match, entry_start, entry_end): - span = _get_span(prop, match) - span = _trim_span(span, string[span[0]:span[1]]) - start, end = span - - sep_start = start <= 0 or string[start - 1] in sep - start_by_other = start in entry_end - if not sep_start and not start_by_other: - return False - return True - - -class RightValidator(object): - """Make sure our match is ended by separator, or by another entry""" - - def validate(self, prop, string, node, match, entry_start, entry_end): - span = _get_span(prop, match) - span = _trim_span(span, string[span[0]:span[1]]) - start, end = span - - sep_end = end >= len(string) or string[end] in sep - end_by_other = end in entry_start - if not sep_end and not end_by_other: - return False - return True - - -class ChainedValidator(object): - def __init__(self, *validators): - self._validators = validators - - def validate(self, prop, string, node, match, entry_start, entry_end): - for validator in self._validators: - if not validator.validate(prop, string, node, match, entry_start, entry_end): - return False - return True - - -class SameKeyValidator(object): - def __init__(self, validator_function): - self.validator_function = validator_function - - def validate(self, prop, string, node, match, entry_start, entry_end): - for key in prop.keys: - for same_value_leaf in node.root.leaves_containing(key): - ret = self.validator_function(same_value_leaf, key, prop, string, node, match, entry_start, entry_end) - if ret is not None: - return ret - return True - - -class OnlyOneValidator(SameKeyValidator): - def __init__(self): - super(OnlyOneValidator, self).__init__(lambda same_value_leaf, key, prop, string, node, match, entry_start, entry_end: False) - - -class DefaultValidator(object): - """Make sure our match is surrounded by separators, or by another entry""" - def validate(self, prop, string, node, match, entry_start, entry_end): - span = _get_span(prop, match) - span = _trim_span(span, string[span[0]:span[1]]) - start, end = span - - sep_start = start <= 0 or string[start - 1] in sep - sep_end = end >= len(string) or string[end] in sep - start_by_other = start in entry_end - end_by_other = end in entry_start - if (sep_start or start_by_other) and (sep_end or end_by_other): - return True - return False - - -class FunctionValidator(object): - def __init__(self, function): - self.function = function - - def validate(self, prop, string, node, match, entry_start, entry_end): - return self.function(prop, string, node, match, entry_start, entry_end) - - -class FormatterValidator(object): - def __init__(self, group_name=None, formatted_validator=None): - self.group_name = group_name - self.formatted_validator = formatted_validator - - def validate(self, prop, string, node, match, entry_start, entry_end): - if self.group_name: - formatted = prop.format(match.group(self.group_name), self.group_name) - else: - formatted = prop.format(match.group()) - if self.formatted_validator: - return self.formatted_validator(formatted) - else: - return formatted - - -def _get_positions(prop, string, node, match, entry_start, entry_end): - span = match.span() - start = span[0] - end = span[1] - - at_start = True - at_end = True - - while start > 0: - start -= 1 - if string[start] not in sep: - at_start = False - break - while end < len(string) - 1: - end += 1 - if string[end] not in sep: - at_end = False - break - return at_start, at_end - - -class WeakValidator(DefaultValidator): - """Make sure our match is surrounded by separators and is the first or last element in the string""" - def validate(self, prop, string, node, match, entry_start, entry_end): - if super(WeakValidator, self).validate(prop, string, node, match, entry_start, entry_end): - at_start, at_end = _get_positions(prop, string, node, match, entry_start, entry_end) - return at_start or at_end - return False - - -class NeighborValidator(DefaultValidator): - """Make sure the node is next another one""" - def validate(self, prop, string, node, match, entry_start, entry_end): - at_start, at_end = _get_positions(prop, string, node, match, entry_start, entry_end) - - if at_start: - previous_leaf = node.root.previous_leaf(node) - if previous_leaf is not None: - return True - - if at_end: - next_leaf = node.root.next_leaf(node) - if next_leaf is not None: - return True - - return False - - -class LeavesValidator(DefaultValidator): - def __init__(self, lambdas=None, previous_lambdas=None, next_lambdas=None, both_side=False, default_=True): - self.previous_lambdas = previous_lambdas if previous_lambdas is not None else [] - self.next_lambdas = next_lambdas if next_lambdas is not None else [] - if lambdas: - self.previous_lambdas.extend(lambdas) - self.next_lambdas.extend(lambdas) - self.both_side = both_side - self.default_ = default_ - - """Make sure our match is surrounded by separators and validates defined lambdas""" - def validate(self, prop, string, node, match, entry_start, entry_end): - if self.default_: - super_ret = super(LeavesValidator, self).validate(prop, string, node, match, entry_start, entry_end) - else: - super_ret = True - if not super_ret: - return False - - previous_ = self._validate_previous(prop, string, node, match, entry_start, entry_end) - next_ = self._validate_next(prop, string, node, match, entry_start, entry_end) - - if previous_ is None and next_ is None: - return super_ret - if self.both_side: - return previous_ and next_ - else: - return previous_ or next_ - - def _validate_previous(self, prop, string, node, match, entry_start, entry_end): - if self.previous_lambdas: - for leaf in node.root.previous_leaves(node): - for lambda_ in self.previous_lambdas: - ret = self._check_rule(lambda_, leaf) - if ret is not None: - return ret - return False - - def _validate_next(self, prop, string, node, match, entry_start, entry_end): - if self.next_lambdas: - for leaf in node.root.next_leaves(node): - for lambda_ in self.next_lambdas: - ret = self._check_rule(lambda_, leaf) - if ret is not None: - return ret - return False - - def _check_rule(self, lambda_, previous_leaf): - return lambda_(previous_leaf) - - -class _Property: - """Represents a property configuration.""" - def __init__(self, keys=None, pattern=None, canonical_form=None, canonical_from_pattern=True, confidence=1.0, enhance=True, global_span=False, validator=DefaultValidator(), formatter=None, disabler=None, confidence_lambda=None): - """ - :param keys: Keys of the property (format, screenSize, ...) - :type keys: string - :param canonical_form: Unique value of the property (DVD, 720p, ...) - :type canonical_form: string - :param pattern: Regexp pattern - :type pattern: string - :param confidence: confidence - :type confidence: float - :param enhance: enhance the pattern - :type enhance: boolean - :param global_span: if True, the whole match span will used to create the Guess. - Else, the span from the capturing groups will be used. - :type global_span: boolean - :param validator: Validator to use - :type validator: :class:`DefaultValidator` - :param formatter: Formater to use - :type formatter: function - """ - if isinstance(keys, list): - self.keys = keys - elif isinstance(keys, base_text_type): - self.keys = [keys] - else: - self.keys = [] - self.canonical_form = canonical_form - if pattern is not None: - self.pattern = pattern - else: - self.pattern = canonical_form - if self.canonical_form is None and canonical_from_pattern: - self.canonical_form = self.pattern - self.compiled = compile_pattern(self.pattern, enhance=enhance) - for group_name in _get_groups(self.compiled): - if isinstance(group_name, base_text_type) and not group_name in self.keys: - self.keys.append(group_name) - if not self.keys: - raise ValueError("No property key is defined") - self.confidence = confidence - self.confidence_lambda = confidence_lambda - self.global_span = global_span - self.validator = validator - self.formatter = formatter - self.disabler = disabler - - def disabled(self, options): - if self.disabler: - return self.disabler(options) - return False - - def format(self, value, group_name=None): - """Retrieves the final value from re group match value""" - formatter = None - if isinstance(self.formatter, dict): - formatter = self.formatter.get(group_name) - if formatter is None and group_name is not None: - formatter = self.formatter.get(None) - else: - formatter = self.formatter - if isinstance(formatter, types.FunctionType): - return formatter(value) - elif formatter is not None: - return formatter.format(value) - return value - - def __repr__(self): - return "%s: %s" % (self.keys, self.canonical_form if self.canonical_form else self.pattern) - - -class PropertiesContainer(object): - def __init__(self, **kwargs): - self._properties = [] - self.default_property_kwargs = kwargs - - def unregister_property(self, name, *canonical_forms): - """Unregister a property canonical forms - - If canonical_forms are specified, only those values will be unregistered - - :param name: Property name to unregister - :type name: string - :param canonical_forms: Values to unregister - :type canonical_forms: varargs of string - """ - _properties = [prop for prop in self._properties if prop.name == name and (not canonical_forms or prop.canonical_form in canonical_forms)] - - def register_property(self, name, *patterns, **property_params): - """Register property with defined canonical form and patterns. - - :param name: name of the property (format, screenSize, ...) - :type name: string - :param patterns: regular expression patterns to register for the property canonical_form - :type patterns: varargs of string - """ - properties = [] - for pattern in patterns: - params = dict(self.default_property_kwargs) - params.update(property_params) - if isinstance(pattern, dict): - params.update(pattern) - prop = _Property(name, **params) - else: - prop = _Property(name, pattern, **params) - self._properties.append(prop) - properties.append(prop) - return properties - - def register_canonical_properties(self, name, *canonical_forms, **property_params): - """Register properties from their canonical forms. - - :param name: name of the property (releaseGroup, ...) - :type name: string - :param canonical_forms: values of the property ('ESiR', 'WAF', 'SEPTiC', ...) - :type canonical_forms: varargs of strings - """ - properties = [] - for canonical_form in canonical_forms: - params = dict(property_params) - params['canonical_form'] = canonical_form - properties.extend(self.register_property(name, canonical_form, **property_params)) - return properties - - def unregister_all_properties(self): - """Unregister all defined properties""" - self._properties.clear() - - def find_properties(self, string, node, options, name=None, validate=True, re_match=False, sort=True, multiple=False): - """Find all distinct properties for given string - - If no capturing group is defined in the property, value will be grabbed from the entire match. - - If one ore more unnamed capturing group is defined in the property, first capturing group will be used. - - If named capturing group are defined in the property, they will be returned as property key. - - If validate, found properties will be validated by their defined validator - - If re_match, re.match will be used instead of re.search. - - if sort, found properties will be sorted from longer match to shorter match. - - If multiple is False and multiple values are found for the same property, the more confident one will be returned. - - If multiple is False and multiple values are found for the same property and the same confidence, the longer will be returned. - - :param string: input string - :type string: string - - :param node: current node of the matching tree - :type node: :class:`guessit.matchtree.MatchTree` - - :param name: name of property to find - :type name: string - - :param re_match: use re.match instead of re.search - :type re_match: bool - - :param multiple: Allows multiple property values to be returned - :type multiple: bool - - :return: found properties - :rtype: list of tuples (:class:`_Property`, match, list of tuples (property_name, tuple(value_start, value_end))) - - :see: `_Property` - :see: `register_property` - :see: `register_canonical_properties` - """ - entry_start = {} - entry_end = {} - - entries = [] - duplicate_matches = {} - - ret = [] - - if not string.strip(): - return ret - - # search all properties - for prop in self.get_properties(name): - if not prop.disabled(options): - valid_match = None - if re_match: - match = prop.compiled.match(string) - if match: - entries.append((prop, match)) - else: - matches = list(prop.compiled.finditer(string)) - duplicate_matches[prop] = matches - for match in matches: - entries.append((prop, match)) - - for prop, match in entries: - # compute confidence - if prop.confidence_lambda: - computed_confidence = prop.confidence_lambda(match) - if computed_confidence is not None: - prop.confidence = computed_confidence - - if validate: - # compute entries start and ends - for prop, match in entries: - start, end = _get_span(prop, match) - - if start not in entry_start: - entry_start[start] = [prop] - else: - entry_start[start].append(prop) - - if end not in entry_end: - entry_end[end] = [prop] - else: - entry_end[end].append(prop) - - # remove invalid values - while True: - invalid_entries = [] - for entry in entries: - prop, match = entry - if not prop.validator.validate(prop, string, node, match, entry_start, entry_end): - invalid_entries.append(entry) - if not invalid_entries: - break - for entry in invalid_entries: - prop, match = entry - entries.remove(entry) - prop_duplicate_matches = duplicate_matches.get(prop) - if prop_duplicate_matches: - prop_duplicate_matches.remove(match) - invalid_span = _get_span(prop, match) - start = invalid_span[0] - end = invalid_span[1] - entry_start[start].remove(prop) - if not entry_start.get(start): - del entry_start[start] - entry_end[end].remove(prop) - if not entry_end.get(end): - del entry_end[end] - - for prop, prop_duplicate_matches in duplicate_matches.items(): - # Keeping the last valid match. - # Needed for the.100.109.hdtv-lol.mp4 - for duplicate_match in prop_duplicate_matches[:-1]: - entries.remove((prop, duplicate_match)) - - if multiple: - ret = entries - else: - # keep only best match if multiple values where found - entries_dict = {} - for entry in entries: - for key in prop.keys: - if key not in entries_dict: - entries_dict[key] = [] - entries_dict[key].append(entry) - - for key_entries in entries_dict.values(): - if multiple: - for entry in key_entries: - ret.append(entry) - else: - best_ret = {} - - best_prop, best_match = None, None - if len(key_entries) == 1: - best_prop, best_match = key_entries[0] - else: - for prop, match in key_entries: - start, end = _get_span(prop, match) - if not best_prop or \ - best_prop.confidence < best_prop.confidence or \ - best_prop.confidence == best_prop.confidence and \ - best_match.span()[1] - best_match.span()[0] < match.span()[1] - match.span()[0]: - best_prop, best_match = prop, match - - best_ret[best_prop] = best_match - - for prop, match in best_ret.items(): - ret.append((prop, match)) - - if sort: - def _sorting(x): - _, x_match = x - x_start, x_end = x_match.span() - return x_start - x_end - - ret.sort(key=_sorting) - - return ret - - def as_guess(self, found_properties, input=None, filter_=None, sep_replacement=None, multiple=False, *args, **kwargs): - if filter_ is None: - filter_ = lambda property, *args, **kwargs: True - guesses = [] if multiple else None - for prop, match in found_properties: - first_key = None - for key in prop.keys: - # First property key will be used as base for effective name - if isinstance(key, base_text_type): - if first_key is None: - first_key = key - break - property_name = first_key if first_key else None - span = _get_span(prop, match) - guess = Guess(confidence=prop.confidence, input=input, span=span, prop=property_name) - groups = _get_groups(match.re) - for group_name in groups: - name = group_name if isinstance(group_name, base_text_type) else property_name if property_name not in groups else None - if name: - value = self._effective_prop_value(prop, group_name, input, match.span(group_name) if group_name else match.span(), sep_replacement) - if not value is None: - is_string = isinstance(value, base_text_type) - if not is_string or is_string and value: # Keep non empty strings and other defined objects - if isinstance(value, dict): - for k, v in value.items(): - if k is None: - k = name - guess[k] = v - else: - if name in guess: - if not isinstance(guess[name], list): - guess[name] = [guess[name]] - guess[name].append(value) - else: - guess[name] = value - if group_name: - guess.metadata(prop).span = match.span(group_name) - if filter_(guess): - if multiple: - guesses.append(guess) - else: - return guess - return guesses - - def _effective_prop_value(self, prop, group_name, input=None, span=None, sep_replacement=None): - if prop.canonical_form: - return prop.canonical_form - if input is None: - return None - value = input - if span is not None: - value = value[span[0]:span[1]] - value = input[span[0]:span[1]] if input else None - if sep_replacement: - for sep_char in sep: - value = value.replace(sep_char, sep_replacement) - if value: - value = prop.format(value, group_name) - return value - - def get_properties(self, name=None, canonical_form=None): - """Retrieve properties - - :return: Properties - :rtype: generator - """ - for prop in self._properties: - if (name is None or name in prop.keys) and (canonical_form is None or prop.canonical_form == canonical_form): - yield prop - - def get_supported_properties(self): - supported_properties = {} - for prop in self.get_properties(): - for k in prop.keys: - values = supported_properties.get(k) - if not values: - values = set() - supported_properties[k] = values - if prop.canonical_form: - values.add(prop.canonical_form) - return supported_properties - - -class QualitiesContainer(): - def __init__(self): - self._qualities = {} - - def register_quality(self, name, canonical_form, rating): - """Register a quality rating. - - :param name: Name of the property - :type name: string - :param canonical_form: Value of the property - :type canonical_form: string - :param rating: Estimated quality rating for the property - :type rating: int - """ - property_qualities = self._qualities.get(name) - - if property_qualities is None: - property_qualities = {} - self._qualities[name] = property_qualities - - property_qualities[canonical_form] = rating - - def unregister_quality(self, name, *canonical_forms): - """Unregister quality ratings for given property name. - - If canonical_forms are specified, only those values will be unregistered - - :param name: Name of the property - :type name: string - :param canonical_forms: Value of the property - :type canonical_forms: string - """ - if not canonical_forms: - if name in self._qualities: - del self._qualities[name] - else: - property_qualities = self._qualities.get(name) - if property_qualities is not None: - for property_canonical_form in canonical_forms: - if property_canonical_form in property_qualities: - del property_qualities[property_canonical_form] - if not property_qualities: - del self._qualities[name] - - def clear_qualities(self,): - """Unregister all defined quality ratings. - """ - self._qualities.clear() - - def rate_quality(self, guess, *props): - """Rate the quality of guess. - - :param guess: Guess to rate - :type guess: :class:`guessit.guess.Guess` - :param props: Properties to include in the rating. if empty, rating will be performed for all guess properties. - :type props: varargs of string - - :return: Quality of the guess. The higher, the better. - :rtype: int - """ - rate = 0 - if not props: - props = guess.keys() - for prop in props: - prop_value = guess.get(prop) - prop_qualities = self._qualities.get(prop) - if prop_value is not None and prop_qualities is not None: - rate += prop_qualities.get(prop_value, 0) - return rate - - def best_quality_properties(self, props, *guesses): - """Retrieve the best quality guess, based on given properties - - :param props: Properties to include in the rating - :type props: list of strings - :param guesses: Guesses to rate - :type guesses: :class:`guessit.guess.Guess` - - :return: Best quality guess from all passed guesses - :rtype: :class:`guessit.guess.Guess` - """ - best_guess = None - best_rate = None - for guess in guesses: - rate = self.rate_quality(guess, *props) - if best_rate is None or best_rate < rate: - best_rate = rate - best_guess = guess - return best_guess - - def best_quality(self, *guesses): - """Retrieve the best quality guess. - - :param guesses: Guesses to rate - :type guesses: :class:`guessit.guess.Guess` - - :return: Best quality guess from all passed guesses - :rtype: :class:`guessit.guess.Guess` - """ - best_guess = None - best_rate = None - for guess in guesses: - rate = self.rate_quality(guess) - if best_rate is None or best_rate < rate: - best_rate = rate - best_guess = guess - return best_guess - diff --git a/libs/guessit/date.py b/libs/guessit/date.py deleted file mode 100644 index ed38d1ba..00000000 --- a/libs/guessit/date.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import datetime - -import re - -from dateutil import parser - - -_dsep = r'[-/ \.]' -_dsep_bis = r'[-/ \.x]' - -date_regexps = [ - re.compile('[^\d](\d{8})[^\d]', re.IGNORECASE), - re.compile('[^\d](\d{6})[^\d]', re.IGNORECASE), - re.compile('[^\d](\d{2})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep, _dsep), re.IGNORECASE), - re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{2})[^\d]' % (_dsep, _dsep), re.IGNORECASE), - re.compile('[^\d](\d{4})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep_bis, _dsep), re.IGNORECASE), - re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{4})[^\d]' % (_dsep, _dsep_bis), re.IGNORECASE), - re.compile('[^\d](\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4})[^\d]' % (_dsep, _dsep), re.IGNORECASE)] - - -def valid_year(year, today=None): - """Check if number is a valid year""" - if not today: - today = datetime.date.today() - return 1920 < year < today.year + 5 - - -def search_year(string): - """Looks for year patterns, and if found return the year and group span. - - Assumes there are sentinels at the beginning and end of the string that - always allow matching a non-digit delimiting the date. - - Note this only looks for valid production years, that is between 1920 - and now + 5 years, so for instance 2000 would be returned as a valid - year but 1492 would not. - - >>> search_year(' in the year 2000... ') - (2000, (13, 17)) - - >>> search_year(' they arrived in 1492. ') - (None, None) - """ - match = re.search(r'[^0-9]([0-9]{4})[^0-9]', string) - if match: - year = int(match.group(1)) - if valid_year(year): - return (year, match.span(1)) - - return (None, None) - - -def search_date(string, year_first=None, day_first=True): - """Looks for date patterns, and if found return the date and group span. - - Assumes there are sentinels at the beginning and end of the string that - always allow matching a non-digit delimiting the date. - - Year can be defined on two digit only. It will return the nearest possible - date from today. - - >>> search_date(' This happened on 2002-04-22. ') - (datetime.date(2002, 4, 22), (18, 28)) - - >>> search_date(' And this on 17-06-1998. ') - (datetime.date(1998, 6, 17), (13, 23)) - - >>> search_date(' no date in here ') - (None, None) - """ - start, end = None, None - match = None - for date_re in date_regexps: - s = date_re.search(string) - if s and (match is None or s.end() - s.start() > len(match)): - start, end = s.start(), s.end() - if date_re.groups: - match = '-'.join(s.groups()) - else: - match = s.group() - - if match is None: - return None, None - - today = datetime.date.today() - - # If day_first/year_first is undefined, parse is made using both possible values. - yearfirst_opts = [False, True] - if year_first is not None: - yearfirst_opts = [year_first] - - dayfirst_opts = [True, False] - if day_first is not None: - dayfirst_opts = [day_first] - - kwargs_list = ({'dayfirst': d, 'yearfirst': y} for d in dayfirst_opts for y in yearfirst_opts) - for kwargs in kwargs_list: - try: - date = parser.parse(match, **kwargs) - except (ValueError, TypeError) as e: #see https://bugs.launchpad.net/dateutil/+bug/1247643 - date = None - pass - # check date plausibility - if date and valid_year(date.year, today=today): - return date.date(), (start+1, end-1) #compensate for sentinels - - return None, None diff --git a/libs/guessit/fileutils.py b/libs/guessit/fileutils.py deleted file mode 100644 index 40110485..00000000 --- a/libs/guessit/fileutils.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import s, u -import os.path -import zipfile -import io - - -def split_path(path): - r"""Splits the given path into the list of folders and the filename (or the - last folder if you gave it a folder path. - - If the given path was an absolute path, the first element will always be: - - the '/' root folder on Unix systems - - the drive letter on Windows systems (eg: r'C:\') - - the mount point '\\' on Windows systems (eg: r'\\host\share') - - >>> s(split_path('/usr/bin/smewt')) - ['/', 'usr', 'bin', 'smewt'] - - >>> s(split_path('relative_path/to/my_folder/')) - ['relative_path', 'to', 'my_folder'] - - """ - result = [] - while True: - head, tail = os.path.split(path) - - if not head and not tail: - return result - - if not tail and head == path: - # Make sure we won't have an infinite loop. - result = [head] + result - return result - - # we just split a directory ending with '/', so tail is empty - if not tail: - path = head - continue - - # otherwise, add the last path fragment and keep splitting - result = [tail] + result - path = head - - -def file_in_same_dir(ref_file, desired_file): - """Return the path for a file in the same dir as a given reference file. - - >>> s(file_in_same_dir('~/smewt/smewt.db', 'smewt.settings')) == os.path.normpath('~/smewt/smewt.settings') - True - - """ - return os.path.join(*(split_path(ref_file)[:-1] + [desired_file])) - - -def load_file_in_same_dir(ref_file, filename): - """Load a given file. Works even when the file is contained inside a zip.""" - path = split_path(ref_file)[:-1] + [filename] - - for i, p in enumerate(path): - if p.endswith('.zip'): - zfilename = os.path.join(*path[:i + 1]) - zfile = zipfile.ZipFile(zfilename) - return u(zfile.read('/'.join(path[i + 1:]))) - - return u(io.open(os.path.join(*path), encoding='utf-8').read()) diff --git a/libs/guessit/guess.py b/libs/guessit/guess.py deleted file mode 100644 index c0f401f2..00000000 --- a/libs/guessit/guess.py +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import UnicodeMixin, s, u, base_text_type -from babelfish import Language, Country -import json -import datetime -import logging - -log = logging.getLogger(__name__) - - -class GuessMetadata(object): - """GuessMetadata contains confidence, an input string, span and related property. - - If defined on a property of Guess object, it overrides the object defined as global. - - :param parent: The parent metadata, used for undefined properties in self object - :type parent: :class: `GuessMedata` - :param confidence: The confidence (from 0.0 to 1.0) - :type confidence: number - :param input: The input string - :type input: string - :param span: The input string - :type span: tuple (int, int) - :param prop: The found property definition - :type prop: :class `guessit.containers._Property` - """ - def __init__(self, parent=None, confidence=None, input=None, span=None, prop=None, *args, **kwargs): - self.parent = parent - if confidence is None and self.parent is None: - self._confidence = 1.0 - else: - self._confidence = confidence - self._input = input - self._span = span - self._prop = prop - - @property - def confidence(self): - """The confidence - - :rtype: int - :return: confidence value - """ - return self._confidence if self._confidence is not None else self.parent.confidence if self.parent else None - - @confidence.setter - def confidence(self, confidence): - self._confidence = confidence - - @property - def input(self): - """The input - - :rtype: string - :return: String used to find this guess value - """ - return self._input if self._input is not None else self.parent.input if self.parent else None - - @input.setter - def input(self, input): - """The input - - :rtype: string - """ - self._input = input - - @property - def span(self): - """The span - - :rtype: tuple (int, int) - :return: span of input string used to find this guess value - """ - return self._span if self._span is not None else self.parent.span if self.parent else None - - @span.setter - def span(self, span): - """The span - - :rtype: tuple (int, int) - :return: span of input string used to find this guess value - """ - self._span = span - - @property - def prop(self): - """The property - - :rtype: :class:`_Property` - :return: The property - """ - return self._prop if self._prop is not None else self.parent.prop if self.parent else None - - @property - def raw(self): - """Return the raw information (original match from the string, - not the cleaned version) associated with the given property name.""" - if self.input and self.span: - return self.input[self.span[0]:self.span[1]] - return None - - def __repr__(self, *args, **kwargs): - return object.__repr__(self, *args, **kwargs) - - -def _split_kwargs(**kwargs): - metadata_args = {} - for prop in dir(GuessMetadata): - try: - metadata_args[prop] = kwargs.pop(prop) - except KeyError: - pass - return metadata_args, kwargs - - -class Guess(UnicodeMixin, dict): - """A Guess is a dictionary which has an associated confidence for each of - its values. - - As it is a subclass of dict, you can use it everywhere you expect a - simple dict.""" - - def __init__(self, *args, **kwargs): - metadata_kwargs, kwargs = _split_kwargs(**kwargs) - self._global_metadata = GuessMetadata(**metadata_kwargs) - dict.__init__(self, *args, **kwargs) - - self._metadata = {} - for prop in self: - self._metadata[prop] = GuessMetadata(parent=self._global_metadata) - - def rename(self, old_name, new_name): - if old_name in self._metadata: - metadata = self._metadata[old_name] - del self._metadata[old_name] - self._metadata[new_name] = metadata - if old_name in self: - value = self[old_name] - del self[old_name] - self[new_name] = value - return True - return False - - def to_dict(self, advanced=False): - """Return the guess as a dict containing only base types, ie: - where dates, languages, countries, etc. are converted to strings. - - if advanced is True, return the data as a json string containing - also the raw information of the properties.""" - data = dict(self) - for prop, value in data.items(): - if isinstance(value, datetime.date): - data[prop] = value.isoformat() - elif isinstance(value, (UnicodeMixin, base_text_type)): - data[prop] = u(value) - elif isinstance(value, (Language, Country)): - data[prop] = value.guessit - elif isinstance(value, list): - data[prop] = [u(x) for x in value] - if advanced: - metadata = self.metadata(prop) - prop_data = {'value': data[prop]} - if metadata.raw: - prop_data['raw'] = metadata.raw - if metadata.confidence: - prop_data['confidence'] = metadata.confidence - data[prop] = prop_data - - return data - - def nice_string(self, advanced=False): - """Return a string with the property names and their values, - that also displays the associated confidence to each property. - - FIXME: doc with param""" - if advanced: - data = self.to_dict(advanced) - return json.dumps(data, indent=4) - else: - data = self.to_dict() - - parts = json.dumps(data, indent=4).split('\n') - for i, p in enumerate(parts): - if p[:5] != ' "': - continue - - prop = p.split('"')[1] - parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:] - - return '\n'.join(parts) - - def __unicode__(self): - return u(self.to_dict()) - - def metadata(self, prop=None): - """Return the metadata associated with the given property name - - If no property name is given, get the global_metadata - """ - if prop is None: - return self._global_metadata - if prop not in self._metadata: - self._metadata[prop] = GuessMetadata(parent=self._global_metadata) - return self._metadata[prop] - - def confidence(self, prop=None): - return self.metadata(prop).confidence - - def set_confidence(self, prop, confidence): - self.metadata(prop).confidence = confidence - - def raw(self, prop): - return self.metadata(prop).raw - - def set(self, prop_name, value, *args, **kwargs): - if value is None: - try: - del self[prop_name] - except KeyError: - pass - try: - del self._metadata[prop_name] - except KeyError: - pass - else: - self[prop_name] = value - if 'metadata' in kwargs.keys(): - self._metadata[prop_name] = kwargs['metadata'] - else: - self._metadata[prop_name] = GuessMetadata(parent=self._global_metadata, *args, **kwargs) - - def update(self, other, confidence=None): - dict.update(self, other) - if isinstance(other, Guess): - for prop in other: - try: - self._metadata[prop] = other._metadata[prop] - except KeyError: - pass - if confidence is not None: - for prop in other: - self.set_confidence(prop, confidence) - - def update_highest_confidence(self, other): - """Update this guess with the values from the given one. In case - there is property present in both, only the one with the highest one - is kept.""" - if not isinstance(other, Guess): - raise ValueError('Can only call this function on Guess instances') - - for prop in other: - if prop in self and self.metadata(prop).confidence >= other.metadata(prop).confidence: - continue - self[prop] = other[prop] - self._metadata[prop] = other.metadata(prop) - - -def choose_int(g1, g2): - """Function used by merge_similar_guesses to choose between 2 possible - properties when they are integers.""" - v1, c1 = g1 # value, confidence - v2, c2 = g2 - if (v1 == v2): - return (v1, 1 - (1 - c1) * (1 - c2)) - else: - if c1 > c2: - return (v1, c1 - c2) - else: - return (v2, c2 - c1) - - -def choose_string(g1, g2): - """Function used by merge_similar_guesses to choose between 2 possible - properties when they are strings. - - If the 2 strings are similar, or one is contained in the other, the latter is returned - with an increased confidence. - - If the 2 strings are dissimilar, the one with the higher confidence is returned, with - a weaker confidence. - - Note that here, 'similar' means that 2 strings are either equal, or that they - differ very little, such as one string being the other one with the 'the' word - prepended to it. - - >>> s(choose_string(('Hello', 0.75), ('World', 0.5))) - ('Hello', 0.25) - - >>> s(choose_string(('Hello', 0.5), ('hello', 0.5))) - ('Hello', 0.75) - - >>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4))) - ('Hello', 0.64) - - >>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5))) - ('The Simpsons', 0.75) - - """ - v1, c1 = g1 # value, confidence - v2, c2 = g2 - - if not v1: - return g2 - elif not v2: - return g1 - - v1, v2 = v1.strip(), v2.strip() - v1l, v2l = v1.lower(), v2.lower() - - combined_prob = 1 - (1 - c1) * (1 - c2) - - if v1l == v2l: - return v1, combined_prob - - # check for common patterns - elif v1l == 'the ' + v2l: - return v1, combined_prob - elif v2l == 'the ' + v1l: - return v2, combined_prob - - # if one string is contained in the other, return the shortest one - elif v2l in v1l: - return v2, combined_prob - elif v1l in v2l: - return v1, combined_prob - - # in case of conflict, return the one with highest confidence - else: - if c1 > c2: - return v1, c1 - c2 - else: - return v2, c2 - c1 - - -def _merge_similar_guesses_nocheck(guesses, prop, choose): - """Take a list of guesses and merge those which have the same properties, - increasing or decreasing the confidence depending on whether their values - are similar. - - This function assumes there are at least 2 valid guesses.""" - - similar = [guess for guess in guesses if prop in guess] - - g1, g2 = similar[0], similar[1] - - # merge only this prop of s2 into s1, updating the confidence for the - # considered property - v1, v2 = g1[prop], g2[prop] - c1, c2 = g1.confidence(prop), g2.confidence(prop) - - new_value, new_confidence = choose((v1, c1), (v2, c2)) - if new_confidence >= c1: - msg = "Updating matching property '%s' with confidence %.2f" - else: - msg = "Updating non-matching property '%s' with confidence %.2f" - log.debug(msg % (prop, new_confidence)) - - g1.set(prop, new_value, confidence=new_confidence) - g2.pop(prop) - - # remove g2 if there are no properties left - if not g2.keys(): - guesses.remove(g2) - - -def merge_similar_guesses(guesses, prop, choose): - """Take a list of guesses and merge those which have the same properties, - increasing or decreasing the confidence depending on whether their values - are similar.""" - - similar = [guess for guess in guesses if prop in guess] - if len(similar) < 2: - # nothing to merge - return - - if len(similar) == 2: - _merge_similar_guesses_nocheck(guesses, prop, choose) - - if len(similar) > 2: - log.debug('complex merge, trying our best...') - before = len(guesses) - _merge_similar_guesses_nocheck(guesses, prop, choose) - after = len(guesses) - if after < before: - # recurse only when the previous call actually did something, - # otherwise we end up in an infinite loop - merge_similar_guesses(guesses, prop, choose) - - -def merge_all(guesses, append=None): - """Merge all the guesses in a single result, remove very unlikely values, - and return it. - You can specify a list of properties that should be appended into a list - instead of being merged. - - >>> s(merge_all([ Guess({'season': 2}, confidence=0.6), - ... Guess({'episodeNumber': 13}, confidence=0.8) ]) - ... ) == {'season': 2, 'episodeNumber': 13} - True - - - >>> s(merge_all([ Guess({'episodeNumber': 27}, confidence=0.02), - ... Guess({'season': 1}, confidence=0.2) ]) - ... ) == {'season': 1} - True - - >>> s(merge_all([ Guess({'other': 'PROPER'}, confidence=0.8), - ... Guess({'releaseGroup': '2HD'}, confidence=0.8) ], - ... append=['other']) - ... ) == {'releaseGroup': '2HD', 'other': ['PROPER']} - True - - """ - result = Guess() - if not guesses: - return result - - if append is None: - append = [] - - for g in guesses: - # first append our appendable properties - for prop in append: - if prop in g: - if isinstance(g[prop], (list, set)): - new_values = result.get(prop, []) + list(g[prop]) - else: - new_values = result.get(prop, []) + [g[prop]] - - result.set(prop, new_values, - # TODO: what to do with confidence here? maybe an - # arithmetic mean... - confidence=g.metadata(prop).confidence, - input=g.metadata(prop).input, - span=g.metadata(prop).span, - prop=g.metadata(prop).prop) - - del g[prop] - - # then merge the remaining ones - dups = set(result) & set(g) - if dups: - log.debug('duplicate properties %s in merged result...' % [(result[p], g[p]) for p in dups]) - - result.update_highest_confidence(g) - - # delete very unlikely values - for p in list(result.keys()): - if result.confidence(p) < 0.05: - del result[p] - - # make sure our appendable properties contain unique values - for prop in append: - try: - value = result[prop] - if isinstance(value, list): - result[prop] = list(set(value)) - else: - result[prop] = [value] - except KeyError: - pass - - return result - - -def smart_merge(guesses): - """First tries to merge well-known similar properties, and then merges - the rest with a merge_all call. - - Should be the function to call in most cases, unless one wants to have more - control. - - Warning: this function is destructive, ie: it will merge the list in-place. - """ - - # 1- try to merge similar information together and give it a higher - # confidence - for int_part in ('year', 'season', 'episodeNumber'): - merge_similar_guesses(guesses, int_part, choose_int) - - for string_part in ('title', 'series', 'container', 'format', - 'releaseGroup', 'website', 'audioCodec', - 'videoCodec', 'screenSize', 'episodeFormat', - 'audioChannels', 'idNumber'): - merge_similar_guesses(guesses, string_part, choose_string) - - # 2- merge the rest, potentially discarding information not properly - # merged before - result = merge_all(guesses, - append=['language', 'subtitleLanguage', 'other', - 'episodeDetails', 'unidentified']) - - return result diff --git a/libs/guessit/hash_ed2k.py b/libs/guessit/hash_ed2k.py deleted file mode 100644 index a1ea562f..00000000 --- a/libs/guessit/hash_ed2k.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import s, to_hex -import hashlib -import os.path - -from functools import reduce - - -def hash_file(filename): - """Returns the ed2k hash of a given file. - - >>> testfile = os.path.join(os.path.dirname(__file__), 'test/dummy.srt') - >>> s(hash_file(testfile)) - 'ed2k://|file|dummy.srt|59|41F58B913AB3973F593BEBA8B8DF6510|/' - """ - return 'ed2k://|file|%s|%d|%s|/' % (os.path.basename(filename), - os.path.getsize(filename), - hash_filehash(filename).upper()) - - -def hash_filehash(filename): - """Returns the ed2k hash of a given file. - - This function is taken from: - http://www.radicand.org/blog/orz/2010/2/21/edonkey2000-hash-in-python/ - """ - md4 = hashlib.new('md4').copy - - def gen(f): - while True: - x = f.read(9728000) - if x: - yield x - else: - return - - def md4_hash(data): - m = md4() - m.update(data) - return m - - with open(filename, 'rb') as f: - a = gen(f) - hashes = [md4_hash(data).digest() for data in a] - if len(hashes) == 1: - return to_hex(hashes[0]) - else: - return md4_hash(reduce(lambda a, d: a + d, hashes, "")).hexd diff --git a/libs/guessit/hash_mpc.py b/libs/guessit/hash_mpc.py deleted file mode 100644 index fb6c52bd..00000000 --- a/libs/guessit/hash_mpc.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import struct -import os - - -def hash_file(filename): - """This function is taken from: - http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes - and is licensed under the GPL.""" - - longlongformat = b'q' # long long - bytesize = struct.calcsize(longlongformat) - - f = open(filename, "rb") - - filesize = os.path.getsize(filename) - hash_value = filesize - - if filesize < 65536 * 2: - raise Exception("SizeError: size is %d, should be > 132K..." % filesize) - - for x in range(int(65536 / bytesize)): - buf = f.read(bytesize) - (l_value,) = struct.unpack(longlongformat, buf) - hash_value += l_value - hash_value &= 0xFFFFFFFFFFFFFFFF # to remain as 64bit number - - f.seek(max(0, filesize - 65536), 0) - for x in range(int(65536 / bytesize)): - buf = f.read(bytesize) - (l_value,) = struct.unpack(longlongformat, buf) - hash_value += l_value - hash_value &= 0xFFFFFFFFFFFFFFFF - - f.close() - - return "%016x" % hash_value diff --git a/libs/guessit/jsonutils.py b/libs/guessit/jsonutils.py new file mode 100644 index 00000000..7d6ff705 --- /dev/null +++ b/libs/guessit/jsonutils.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +JSON Utils +""" +import json +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error + +from rebulk.match import Match + + +class GuessitEncoder(json.JSONEncoder): + """ + JSON Encoder for guessit response + """ + + def default(self, o): # pylint:disable=method-hidden + if isinstance(o, Match): + ret = OrderedDict() + ret['value'] = o.value + if o.raw: + ret['raw'] = o.raw + ret['start'] = o.start + ret['end'] = o.end + return ret + elif hasattr(o, 'name'): # Babelfish languages/countries long name + return str(o.name) + else: # pragma: no cover + return str(o) diff --git a/libs/guessit/language.py b/libs/guessit/language.py deleted file mode 100644 index 7e32af3c..00000000 --- a/libs/guessit/language.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import UnicodeMixin, base_text_type, u -from guessit.textutils import find_words -from babelfish import Language, Country -import babelfish -import re -import logging -from guessit.guess import Guess - -__all__ = ['Language', 'UNDETERMINED', - 'search_language', 'guess_language'] - -log = logging.getLogger(__name__) - -UNDETERMINED = babelfish.Language('und') - -SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'], - ('ell', None): ['gr', 'greek'], - ('spa', None): ['esp', 'español'], - ('fra', None): ['français', 'vf', 'vff', 'vfi'], - ('swe', None): ['se'], - ('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'], - ('cat', None): ['català'], - ('ces', None): ['cz'], - ('ukr', None): ['ua'], - ('zho', None): ['cn'], - ('jpn', None): ['jp'], - ('hrv', None): ['scr'], - ('mul', None): ['multi', 'dl'], # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/ - } - - -class GuessitConverter(babelfish.LanguageReverseConverter): - - _with_country_regexp = re.compile('(.*)\((.*)\)') - _with_country_regexp2 = re.compile('(.*)-(.*)') - - def __init__(self): - self.guessit_exceptions = {} - for (alpha3, country), synlist in SYN.items(): - for syn in synlist: - self.guessit_exceptions[syn.lower()] = (alpha3, country, None) - - @property - def codes(self): - return (babelfish.language_converters['alpha3b'].codes | - babelfish.language_converters['alpha2'].codes | - babelfish.language_converters['name'].codes | - babelfish.language_converters['opensubtitles'].codes | - babelfish.country_converters['name'].codes | - frozenset(self.guessit_exceptions.keys())) - - def convert(self, alpha3, country=None, script=None): - return str(babelfish.Language(alpha3, country, script)) - - def reverse(self, name): - with_country = (GuessitConverter._with_country_regexp.match(name) or - GuessitConverter._with_country_regexp2.match(name)) - - name = u(name.lower()) - if with_country: - lang = Language.fromguessit(with_country.group(1).strip()) - lang.country = babelfish.Country.fromguessit(with_country.group(2).strip()) - return (lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None) - - # exceptions come first, as they need to override a potential match - # with any of the other guessers - try: - return self.guessit_exceptions[name] - except KeyError: - pass - - for conv in [babelfish.Language, - babelfish.Language.fromalpha3b, - babelfish.Language.fromalpha2, - babelfish.Language.fromname, - babelfish.Language.fromopensubtitles]: - try: - c = conv(name) - return c.alpha3, c.country, c.script - except (ValueError, babelfish.LanguageReverseError): - pass - - raise babelfish.LanguageReverseError(name) - - -babelfish.language_converters['guessit'] = GuessitConverter() - -COUNTRIES_SYN = {'ES': ['españa'], - 'GB': ['UK'], - 'BR': ['brazilian', 'bra'], - # FIXME: this one is a bit of a stretch, not sure how to do - # it properly, though... - 'MX': ['Latinoamérica', 'latin america'] - } - - -class GuessitCountryConverter(babelfish.CountryReverseConverter): - def __init__(self): - self.guessit_exceptions = {} - - for alpha2, synlist in COUNTRIES_SYN.items(): - for syn in synlist: - self.guessit_exceptions[syn.lower()] = alpha2 - - @property - def codes(self): - return (babelfish.country_converters['name'].codes | - frozenset(babelfish.COUNTRIES.values()) | - frozenset(self.guessit_exceptions.keys())) - - def convert(self, alpha2): - if alpha2 == 'GB': - return 'UK' - return str(Country(alpha2)) - - def reverse(self, name): - # exceptions come first, as they need to override a potential match - # with any of the other guessers - try: - return self.guessit_exceptions[name.lower()] - except KeyError: - pass - - try: - return babelfish.Country(name.upper()).alpha2 - except ValueError: - pass - - for conv in [babelfish.Country.fromname]: - try: - return conv(name).alpha2 - except babelfish.CountryReverseError: - pass - - raise babelfish.CountryReverseError(name) - - -babelfish.country_converters['guessit'] = GuessitCountryConverter() - - -# list of common words which could be interpreted as languages, but which -# are far too common to be able to say they represent a language in the -# middle of a string (where they most likely carry their commmon meaning) -LNG_COMMON_WORDS = frozenset([ - # english words - 'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to', - 'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan', - 'fry', 'cop', 'zen', 'gay', 'fat', 'one', 'cherokee', 'got', 'an', 'as', - 'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi', 'bb', 'bt', - 'tv', 'aw', 'by', 'md', 'mp', 'cd', 'lt', 'gt', 'in', 'ad', 'ice', 'ay', - # french words - 'bas', 'de', 'le', 'son', 'ne', 'ca', 'ce', 'et', 'que', - 'mal', 'est', 'vol', 'or', 'mon', 'se', 'je', 'tu', 'me', - 'ne', 'ma', 'va', 'au', - # japanese words, - 'wa', 'ga', 'ao', - # spanish words - 'la', 'el', 'del', 'por', 'mar', - # other - 'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii', - 'vi', 'ben', 'da', 'lt', 'ch', - # new from babelfish - 'mkv', 'avi', 'dmd', 'the', 'dis', 'cut', 'stv', 'des', 'dia', 'and', - 'cab', 'sub', 'mia', 'rim', 'las', 'une', 'par', 'srt', 'ano', 'toy', - 'job', 'gag', 'reel', 'www', 'for', 'ayu', 'csi', 'ren', 'moi', 'sur', - 'fer', 'fun', 'two', 'big', 'psy', 'air', - # movie title - 'brazil', - # release groups - 'bs', # Bosnian - 'kz', - # countries - 'gt', 'lt', - # part/pt - 'pt' - ]) - -LNG_COMMON_WORDS_STRICT = frozenset(['brazil']) - - -subtitle_prefixes = ['sub', 'subs', 'st', 'vost', 'subforced', 'fansub', 'hardsub'] -subtitle_suffixes = ['subforced', 'fansub', 'hardsub'] -lang_prefixes = ['true'] - - -def find_possible_languages(string, allowed_languages=None): - """Find possible languages in the string - - :return: list of tuple (property, Language, lang_word, word) - """ - - common_words = None - if allowed_languages: - common_words = LNG_COMMON_WORDS_STRICT - else: - common_words = LNG_COMMON_WORDS - - words = find_words(string) - - valid_words = [] - for word in words: - lang_word = word.lower() - key = 'language' - for prefix in subtitle_prefixes: - if lang_word.startswith(prefix): - lang_word = lang_word[len(prefix):] - key = 'subtitleLanguage' - for suffix in subtitle_suffixes: - if lang_word.endswith(suffix): - lang_word = lang_word[:len(suffix)] - key = 'subtitleLanguage' - for prefix in lang_prefixes: - if lang_word.startswith(prefix): - lang_word = lang_word[len(prefix):] - if lang_word not in common_words: - try: - lang = Language.fromguessit(lang_word) - if allowed_languages: - if lang.name.lower() in allowed_languages or lang.alpha2.lower() in allowed_languages or lang.alpha3.lower() in allowed_languages: - valid_words.append((key, lang, lang_word, word)) - # Keep language with alpha2 equivalent. Others are probably - # uncommon languages. - elif lang == 'mul' or hasattr(lang, 'alpha2'): - valid_words.append((key, lang, lang_word, word)) - except babelfish.Error: - pass - return valid_words - - -def search_language(string, allowed_languages=None): - """Looks for language patterns, and if found return the language object, - its group span and an associated confidence. - - you can specify a list of allowed languages using the lang_filter argument, - as in lang_filter = [ 'fr', 'eng', 'spanish' ] - - >>> search_language('movie [en].avi')['language'] - - - >>> search_language('the zen fat cat and the gay mad men got a new fan', allowed_languages = ['en', 'fr', 'es']) - - """ - - if allowed_languages: - allowed_languages = set(Language.fromguessit(lang) for lang in allowed_languages) - - confidence = 1.0 # for all of them - - for prop, language, lang, word in find_possible_languages(string, allowed_languages): - pos = string.find(word) - end = pos + len(word) - - # only allow those languages that have a 2-letter code, those that - # don't are too esoteric and probably false matches - # if language.lang not in lng3_to_lng2: - # continue - - # confidence depends on alpha2, alpha3, english name, ... - if len(lang) == 2: - confidence = 0.8 - elif len(lang) == 3: - confidence = 0.9 - elif prop == 'subtitleLanguage': - confidence = 0.6 # Subtitle prefix found with language - else: - # Note: we could either be really confident that we found a - # language or assume that full language names are too - # common words and lower their confidence accordingly - confidence = 0.3 # going with the low-confidence route here - - return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end)) - - return None - - -def guess_language(text): # pragma: no cover - """Guess the language in which a body of text is written. - - This uses the external guess-language python module, and will fail and return - Language(Undetermined) if it is not installed. - """ - try: - from guess_language import guessLanguage - return Language.fromguessit(guessLanguage(text)) - - except ImportError: - log.error('Cannot detect the language of the given text body, missing dependency: guess-language') - log.error('Please install it from PyPI, by doing eg: pip install guess-language') - return UNDETERMINED diff --git a/libs/guessit/matcher.py b/libs/guessit/matcher.py deleted file mode 100644 index 2e3bc2af..00000000 --- a/libs/guessit/matcher.py +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, \ - unicode_literals - -import logging - -from guessit import PY3, u -from guessit.transfo import TransformerException -from guessit.matchtree import MatchTree -from guessit.textutils import normalize_unicode, clean_default -from guessit.guess import Guess -import inspect - -log = logging.getLogger(__name__) - - -class IterativeMatcher(object): - """An iterative matcher tries to match different patterns that appear - in the filename. - - The ``filetype`` argument indicates which type of file you want to match. - If it is undefined, the matcher will try to see whether it can guess - that the file corresponds to an episode, or otherwise will assume it is - a movie. - - The recognized ``filetype`` values are: - ``['subtitle', 'info', 'movie', 'moviesubtitle', 'movieinfo', 'episode', - 'episodesubtitle', 'episodeinfo']`` - - ``options`` is a dict of options values to be passed to the transformations used - by the matcher. - - The IterativeMatcher works mainly in 2 steps: - - First, it splits the filename into a match_tree, which is a tree of groups - which have a semantic meaning, such as episode number, movie title, - etc... - - The match_tree created looks like the following:: - - 0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111 - 0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000 - 0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000 - __________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___ - xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc - [XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv - - The first 3 lines indicates the group index in which a char in the - filename is located. So for instance, ``x264`` (in the middle) is the group (0, 4, 1), and - it corresponds to a video codec, denoted by the letter ``v`` in the 4th line. - (for more info, see guess.matchtree.to_string) - - Second, it tries to merge all this information into a single object - containing all the found properties, and does some (basic) conflict - resolution when they arise. - """ - def __init__(self, filename, options=None, **kwargs): - options = dict(options or {}) - for k, v in kwargs.items(): - if k not in options or not options[k]: - options[k] = v # options dict has priority over keyword arguments - self._validate_options(options) - if not PY3 and not isinstance(filename, unicode): - log.warning('Given filename to matcher is not unicode...') - filename = filename.decode('utf-8') - - filename = normalize_unicode(filename) - if options and options.get('clean_function'): - clean_function = options.get('clean_function') - if not hasattr(clean_function, '__call__'): - module, function = clean_function.rsplit('.') - if not module: - module = 'guessit.textutils' - clean_function = getattr(__import__(module), function) - if not clean_function: - log.error('Can\'t find clean function %s. Default will be used.' % options.get('clean_function')) - clean_function = clean_default - else: - clean_function = clean_default - - self.match_tree = MatchTree(filename, clean_function=clean_function) - self.options = options - self._transfo_calls = [] - - # sanity check: make sure we don't process a (mostly) empty string - if clean_function(filename).strip() == '': - return - - from guessit.plugins import transformers - - try: - mtree = self.match_tree - if 'type' in self.options: - mtree.guess.set('type', self.options['type'], confidence=0.0) - - # Process - for transformer in transformers.all_transformers(): - disabled = options.get('disabled_transformers') - if not disabled or transformer.name not in disabled: - self._process(transformer, False) - - # Post-process - for transformer in transformers.all_transformers(): - disabled = options.get('disabled_transformers') - if not disabled or transformer.name not in disabled: - self._process(transformer, True) - - log.debug('Found match tree:\n%s' % u(mtree)) - except TransformerException as e: - log.debug('An error has occurred in Transformer %s: %s' % (e.transformer, e)) - - def _process(self, transformer, post=False): - - if not hasattr(transformer, 'should_process') or transformer.should_process(self.match_tree, self.options): - if post: - transformer.post_process(self.match_tree, self.options) - else: - transformer.process(self.match_tree, self.options) - self._transfo_calls.append(transformer) - - @property - def second_pass_options(self): - second_pass_options = {} - for transformer in self._transfo_calls: - if hasattr(transformer, 'second_pass_options'): - transformer_second_pass_options = transformer.second_pass_options(self.match_tree, self.options) - if transformer_second_pass_options: - second_pass_options.update(transformer_second_pass_options) - - return second_pass_options - - def _validate_options(self, options): - valid_filetypes = ('subtitle', 'info', 'video', - 'movie', 'moviesubtitle', 'movieinfo', - 'episode', 'episodesubtitle', 'episodeinfo') - - type_ = options.get('type') - if type_ and type_ not in valid_filetypes: - raise ValueError("filetype needs to be one of %s" % (valid_filetypes,)) - - def matched(self): - return self.match_tree.matched() - - -def build_guess(node, name, value=None, confidence=1.0): - guess = Guess({name: node.clean_value if value is None else value}, confidence=confidence) - guess.metadata().input = node.value if value is None else value - if value is None: - left_offset = 0 - right_offset = 0 - - clean_value = node.clean_value - - for i in range(0, len(node.value)): - if clean_value[0] == node.value[i]: - break - left_offset += 1 - - for i in reversed(range(0, len(node.value))): - if clean_value[-1] == node.value[i]: - break - right_offset += 1 - - guess.metadata().span = (node.span[0] - node.offset + left_offset, node.span[1] - node.offset - right_offset) - return guess - - -def found_property(node, name, value=None, confidence=1.0, update_guess=True, logger=None): - # automatically retrieve the log object from the caller frame - if not logger: - caller_frame = inspect.stack()[1][0] - logger = caller_frame.f_locals['self'].log - guess = build_guess(node, name, value, confidence) - return found_guess(node, guess, update_guess=update_guess, logger=logger) - - -def found_guess(node, guess, update_guess=True, logger=None): - if node.guess: - if update_guess: - node.guess.update_highest_confidence(guess) - else: - child = node.add_child(guess.metadata().span) - child.guess = guess - else: - node.guess = guess - log_found_guess(guess, logger) - return node.guess - - -def log_found_guess(guess, logger=None): - for k, v in guess.items(): - (logger or log).debug('Property found: %s=%s (%s) (confidence=%.2f)' % - (k, v, guess.raw(k), guess.confidence(k))) - - -def _get_split_spans(node, span): - partition_spans = node.get_partition_spans(span) - for to_remove_span in partition_spans: - if to_remove_span[0] == span[0] and to_remove_span[1] in [span[1], span[1] + 1]: - partition_spans.remove(to_remove_span) - break - return partition_spans - - -class GuessFinder(object): - def __init__(self, guess_func, confidence=None, logger=None, options=None): - self.guess_func = guess_func - self.confidence = confidence - self.logger = logger or log - self.options = options - - def process_nodes(self, nodes): - for node in nodes: - self.process_node(node) - - def process_node(self, node, iterative=True, partial_span=None): - if partial_span: - value = node.value[partial_span[0]:partial_span[1]] - else: - value = node.value - string = ' %s ' % value # add sentinels - - if not self.options: - matcher_result = self.guess_func(string, node) - else: - matcher_result = self.guess_func(string, node, self.options) - - if matcher_result: - if not isinstance(matcher_result, Guess): - result, span = matcher_result - else: - result, span = matcher_result, matcher_result.metadata().span - - if result: - # readjust span to compensate for sentinels - span = (span[0] - 1, span[1] - 1) - - # readjust span to compensate for partial_span - if partial_span: - span = (span[0] + partial_span[0], span[1] + partial_span[0]) - - partition_spans = None - if self.options and 'skip_nodes' in self.options: - skip_nodes = self.options.get('skip_nodes') - for skip_node in skip_nodes: - if skip_node.parent.node_idx == node.node_idx[:len(skip_node.parent.node_idx)] and\ - skip_node.span == span or\ - skip_node.span == (span[0] + skip_node.offset, span[1] + skip_node.offset): - if partition_spans is None: - partition_spans = _get_split_spans(node, skip_node.span) - else: - new_partition_spans = [] - for partition_span in partition_spans: - tmp_node = MatchTree(value, span=partition_span, parent=node) - tmp_partitions_spans = _get_split_spans(tmp_node, skip_node.span) - new_partition_spans.extend(tmp_partitions_spans) - partition_spans.extend(new_partition_spans) - - if not partition_spans: - # restore sentinels compensation - - if isinstance(result, Guess): - guess = result - else: - guess = Guess(result, confidence=self.confidence, input=string, span=span) - - if not iterative: - found_guess(node, guess, logger=self.logger) - else: - absolute_span = (span[0] + node.offset, span[1] + node.offset) - node.partition(span) - if node.is_leaf(): - found_guess(node, guess, logger=self.logger) - else: - found_child = None - for child in node.children: - if child.span == absolute_span: - found_guess(child, guess, logger=self.logger) - found_child = child - break - for child in node.children: - if child is not found_child: - self.process_node(child) - else: - for partition_span in partition_spans: - self.process_node(node, partial_span=partition_span) diff --git a/libs/guessit/matchtree.py b/libs/guessit/matchtree.py deleted file mode 100644 index 19c1e759..00000000 --- a/libs/guessit/matchtree.py +++ /dev/null @@ -1,426 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import guessit # @UnusedImport needed for doctests -from guessit import UnicodeMixin, base_text_type -from guessit.textutils import clean_default, str_fill -from guessit.patterns import group_delimiters -from guessit.guess import (merge_similar_guesses, smart_merge, - choose_int, choose_string, Guess) -from itertools import takewhile -import copy -import logging - -log = logging.getLogger(__name__) - - -class BaseMatchTree(UnicodeMixin): - """A BaseMatchTree is a tree covering the filename, where each - node represents a substring in the filename and can have a ``Guess`` - associated with it that contains the information that has been guessed - in this node. Nodes can be further split into subnodes until a proper - split has been found. - - Each node has the following attributes: - - string = the original string of which this node represents a region - - span = a pair of (begin, end) indices delimiting the substring - - parent = parent node - - children = list of children nodes - - guess = Guess() - - BaseMatchTrees are displayed in the following way: - - >>> path = 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv' - >>> print(guessit.IterativeMatcher(path).match_tree) - 000000 1111111111111111 2222222222222222222222222222222222222222222 333 - 000000 0000000000111111 0000000000111111222222222222222222222222222 000 - 011112 011112000011111222222222222222222 000 - 011112222222222222 - 0000011112222 - 01112 0111 - Movies/__________(____)/Dark.City.(____).DC._____.____.___.____-___.___ - tttttttttt yyyy yyyy fffff ssss aaa vvvv rrr ccc - Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv - - The last line contains the filename, which you can use a reference. - The previous line contains the type of property that has been found. - The line before that contains the filename, where all the found groups - have been blanked. Basically, what is left on this line are the leftover - groups which could not be identified. - - The lines before that indicate the indices of the groups in the tree. - - For instance, the part of the filename 'BDRip' is the leaf with index - ``(2, 2, 1)`` (read from top to bottom), and its meaning is 'format' - (as shown by the ``f``'s on the last-but-one line). - """ - - def __init__(self, string='', span=None, parent=None, clean_function=None): - self.string = string - self.span = span or (0, len(string)) - self.parent = parent - self.children = [] - self.guess = Guess() - self._clean_value = None - self._clean_function = clean_function or clean_default - - @property - def value(self): - """Return the substring that this node matches.""" - return self.string[self.span[0]:self.span[1]] - - @property - def clean_value(self): - """Return a cleaned value of the matched substring, with better - presentation formatting (punctuation marks removed, duplicate - spaces, ...)""" - if self._clean_value is None: - self._clean_value = self.clean_string(self.value) - return self._clean_value - - def clean_string(self, string): - return self._clean_function(string) - - @property - def offset(self): - return self.span[0] - - @property - def info(self): - """Return a dict containing all the info guessed by this node, - subnodes included.""" - result = dict(self.guess) - - for c in self.children: - result.update(c.info) - - return result - - @property - def root(self): - """Return the root node of the tree.""" - if not self.parent: - return self - - return self.parent.root - - @property - def depth(self): - """Return the depth of this node.""" - if self.is_leaf(): - return 0 - - return 1 + max(c.depth for c in self.children) - - def is_leaf(self): - """Return whether this node is a leaf or not.""" - return self.children == [] - - def add_child(self, span): - """Add a new child node to this node with the given span.""" - child = MatchTree(self.string, span=span, parent=self, clean_function=self._clean_function) - self.children.append(child) - return child - - def get_partition_spans(self, indices): - """Return the list of absolute spans for the regions of the original - string defined by splitting this node at the given indices (relative - to this node)""" - indices = sorted(indices) - if indices[0] != 0: - indices.insert(0, 0) - if indices[-1] != len(self.value): - indices.append(len(self.value)) - - spans = [] - for start, end in zip(indices[:-1], indices[1:]): - spans.append((self.offset + start, - self.offset + end)) - return spans - - def partition(self, indices): - """Partition this node by splitting it at the given indices, - relative to this node.""" - for partition_span in self.get_partition_spans(indices): - self.add_child(span=partition_span) - - def split_on_components(self, components): - offset = 0 - for c in components: - start = self.value.find(c, offset) - end = start + len(c) - self.add_child(span=(self.offset + start, - self.offset + end)) - offset = end - - def nodes_at_depth(self, depth): - """Return all the nodes at a given depth in the tree""" - if depth == 0: - yield self - - for child in self.children: - for node in child.nodes_at_depth(depth - 1): - yield node - - @property - def node_idx(self): - """Return this node's index in the tree, as a tuple. - If this node is the root of the tree, then return ().""" - if self.parent is None: - return () - return self.parent.node_idx + (self.node_last_idx,) - - @property - def node_last_idx(self): - if self.parent is None: - return None - return self.parent.children.index(self) - - def node_at(self, idx): - """Return the node at the given index in the subtree rooted at - this node.""" - if not idx: - return self - - try: - return self.children[idx[0]].node_at(idx[1:]) - except IndexError: - raise ValueError('Non-existent node index: %s' % (idx,)) - - def nodes(self): - """Return all the nodes and subnodes in this tree.""" - yield self - for child in self.children: - for node in child.nodes(): - yield node - - def leaves(self): - """Return a generator over all the nodes that are leaves.""" - if self.is_leaf(): - yield self - else: - for child in self.children: - # pylint: disable=W0212 - for leaf in child.leaves(): - yield leaf - - def group_node(self): - return self._other_group_node(0) - - def previous_group_node(self): - return self._other_group_node(-1) - - def next_group_node(self): - return self._other_group_node(+1) - - def _other_group_node(self, offset): - if len(self.node_idx) > 1: - group_idx = self.node_idx[:2] - if group_idx[1] + offset >= 0: - other_group_idx = (group_idx[0], group_idx[1] + offset) - try: - other_group_node = self.root.node_at(other_group_idx) - return other_group_node - except ValueError: - pass - return None - - def previous_leaf(self, leaf): - """Return previous leaf for this node""" - return self._other_leaf(leaf, -1) - - def next_leaf(self, leaf): - """Return next leaf for this node""" - return self._other_leaf(leaf, +1) - - def _other_leaf(self, leaf, offset): - leaves = list(self.leaves()) - index = leaves.index(leaf) + offset - if index > 0 and index < len(leaves): - return leaves[index] - return None - - def previous_leaves(self, leaf): - """Return previous leaves for this node""" - leaves = list(self.leaves()) - index = leaves.index(leaf) - if index > 0 and index < len(leaves): - previous_leaves = leaves[:index] - previous_leaves.reverse() - return previous_leaves - return [] - - def next_leaves(self, leaf): - """Return next leaves for this node""" - leaves = list(self.leaves()) - index = leaves.index(leaf) - if index > 0 and index < len(leaves): - return leaves[index + 1:len(leaves)] - return [] - - def to_string(self): - """Return a readable string representation of this tree. - - The result is a multi-line string, where the lines are: - - line 1 -> N-2: each line contains the nodes at the given depth in the tree - - line N-2: original string where all the found groups have been blanked - - line N-1: type of property that has been found - - line N: the original string, which you can use a reference. - """ - empty_line = ' ' * len(self.string) - - def to_hex(x): - if isinstance(x, int): - return str(x) if x < 10 else chr(55 + x) - return x - - def meaning(result): - mmap = {'episodeNumber': 'E', - 'season': 'S', - 'extension': 'e', - 'format': 'f', - 'language': 'l', - 'country': 'C', - 'videoCodec': 'v', - 'videoProfile': 'v', - 'audioCodec': 'a', - 'audioProfile': 'a', - 'audioChannels': 'a', - 'website': 'w', - 'container': 'c', - 'series': 'T', - 'title': 't', - 'date': 'd', - 'year': 'y', - 'releaseGroup': 'r', - 'screenSize': 's', - 'other': 'o' - } - - if result is None: - return ' ' - - for prop, l in mmap.items(): - if prop in result: - return l - - return 'x' - - lines = [empty_line] * (self.depth + 2) # +2: remaining, meaning - lines[-2] = self.string - - for node in self.nodes(): - if node == self: - continue - - idx = node.node_idx - depth = len(idx) - 1 - if idx: - lines[depth] = str_fill(lines[depth], node.span, - to_hex(idx[-1])) - if node.guess: - lines[-2] = str_fill(lines[-2], node.span, '_') - lines[-1] = str_fill(lines[-1], node.span, meaning(node.guess)) - - lines.append(self.string) - - return '\n'.join(l.rstrip() for l in lines) - - def __unicode__(self): - return self.to_string() - - def __repr__(self): - return '' % self.value - - -class MatchTree(BaseMatchTree): - """The MatchTree contains a few "utility" methods which are not necessary - for the BaseMatchTree, but add a lot of convenience for writing - higher-level rules. - """ - - def unidentified_leaves(self, - valid=lambda leaf: len(leaf.clean_value) > 0): - """Return a generator of leaves that are not empty.""" - for leaf in self.leaves(): - if not leaf.guess and valid(leaf): - yield leaf - - def leaves_containing(self, property_name): - """Return a generator of leaves that guessed the given property.""" - if isinstance(property_name, base_text_type): - property_name = [property_name] - - for leaf in self.leaves(): - for prop in property_name: - if prop in leaf.guess: - yield leaf - break - - def first_leaf_containing(self, property_name): - """Return the first leaf containing the given property.""" - try: - return next(self.leaves_containing(property_name)) - except StopIteration: - return None - - def previous_unidentified_leaves(self, node): - """Return a generator of non-empty leaves that are before the given - node (in the string).""" - node_idx = node.node_idx - for leaf in self.unidentified_leaves(): - if leaf.node_idx < node_idx: - yield leaf - - def previous_leaves_containing(self, node, property_name): - """Return a generator of leaves containing the given property that are - before the given node (in the string).""" - node_idx = node.node_idx - for leaf in self.leaves_containing(property_name): - if leaf.node_idx < node_idx: - yield leaf - - def is_explicit(self): - """Return whether the group was explicitly enclosed by - parentheses/square brackets/etc.""" - return (self.value[0] + self.value[-1]) in group_delimiters - - def matched(self): - """Return a single guess that contains all the info found in the - nodes of this tree, trying to merge properties as good as possible. - """ - if not getattr(self, '_matched_result', None): - # we need to make a copy here, as the merge functions work in place and - # calling them on the match tree would modify it - parts = [copy.copy(node.guess) for node in self.nodes() if node.guess] - - result = smart_merge(parts) - - log.debug('Final result: ' + result.nice_string()) - self._matched_result = result - - for unidentified_leaves in self.unidentified_leaves(): - if 'unidentified' not in self._matched_result: - self._matched_result['unidentified'] = [] - self._matched_result['unidentified'].append(unidentified_leaves.clean_value) - - return self._matched_result diff --git a/libs/guessit/options.py b/libs/guessit/options.py index 9b8dc0fb..be24af48 100644 --- a/libs/guessit/options.py +++ b/libs/guessit/options.py @@ -1,7 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Options +""" from argparse import ArgumentParser +import shlex + +import six -def build_opts(transformers=None): +def build_argument_parser(): + """ + Builds the argument parser + :return: the argument parser + :rtype: ArgumentParser + """ opts = ArgumentParser() opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*') @@ -9,61 +22,67 @@ def build_opts(transformers=None): naming_opts.add_argument('-t', '--type', dest='type', default=None, help='The suggested file type: movie, episode. If undefined, type will be guessed.') naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=False, - help='Parse files as name only. Disable folder parsing, extension parsing, and file content analysis.') - naming_opts.add_argument('-c', '--split-camel', dest='split_camel', action='store_true', default=False, - help='Split camel case part of filename.') + help='Parse files as name only, considering "/" and "\\" like other separators.') + naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None, + help='If short date is found, consider the first digits as the year.') + naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None, + help='If short date is found, consider the second digits as the day.') + naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', + help='Allowed language (can be used multiple times)') + naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', + help='Allowed country (can be used multiple times)') + naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number', + default=False, + help='Guess "serie.213.avi" as the episode 213. Without this option, ' + 'it will be guessed as season 2, episode 13') + naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', + help='Expected title to parse (can be used multiple times)') + naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', + help='Expected release group (can be used multiple times)') - naming_opts.add_argument('-X', '--disabled-transformer', action='append', dest='disabled_transformers', - help='Transformer to disable (can be used multiple time)') + input_opts = opts.add_argument_group("Input") + input_opts.add_argument('-f', '--input-file', dest='input_file', default=False, + help='Read filenames from an input text file. File should use UTF-8 charset.') output_opts = opts.add_argument_group("Output") output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False, help='Display debug output') output_opts.add_argument('-P', '--show-property', dest='show_property', default=None, - help='Display the value of a single property (title, series, videoCodec, year, type ...)'), - output_opts.add_argument('-u', '--unidentified', dest='unidentified', action='store_true', default=False, - help='Display the unidentified parts.'), + help='Display the value of a single property (title, series, video_codec, year, ...)') output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=False, help='Display advanced information for filename guesses, as json output') + output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=False, + help='Display information for filename guesses as json output') output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=False, - help='Display information for filename guesses as yaml output (like unit-test)') - output_opts.add_argument('-f', '--input-file', dest='input_file', default=False, - help='Read filenames from an input file.') - output_opts.add_argument('-d', '--demo', action='store_true', dest='demo', default=False, - help='Run a few builtin tests instead of analyzing a file') + help='Display information for filename guesses as yaml output') + + information_opts = opts.add_argument_group("Information") information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=False, help='Display properties that can be guessed.') information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=False, help='Display property values that can be guessed.') - information_opts.add_argument('-s', '--transformers', dest='transformers', action='store_true', default=False, - help='Display transformers that can be used.') information_opts.add_argument('--version', dest='version', action='store_true', default=False, help='Display the guessit version.') - webservice_opts = opts.add_argument_group("guessit.io") - webservice_opts.add_argument('-b', '--bug', action='store_true', dest='submit_bug', default=False, - help='Submit a wrong detection to the guessit.io service') - - other_opts = opts.add_argument_group("Other features") - other_opts.add_argument('-i', '--info', dest='info', default='filename', - help='The desired information type: filename, video, hash_mpc or a hash from python\'s ' - 'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of ' - 'them, comma-separated') - - if transformers: - for transformer in transformers: - transformer.register_arguments(opts, naming_opts, output_opts, information_opts, webservice_opts, other_opts) - - return opts, naming_opts, output_opts, information_opts, webservice_opts, other_opts -_opts, _naming_opts, _output_opts, _information_opts, _webservice_opts, _other_opts = None, None, None, None, None, None + return opts -def reload(transformers=None): - global _opts, _naming_opts, _output_opts, _information_opts, _webservice_opts, _other_opts - _opts, _naming_opts, _output_opts, _information_opts, _webservice_opts, _other_opts = build_opts(transformers) +def parse_options(options): + """ + Parse given option string + :param options: + :type options: + :return: + :rtype: + """ + if isinstance(options, six.string_types): + args = shlex.split(options) + options = vars(argument_parser.parse_args(args)) + if options is None: + options = {} + return options -def get_opts(): - return _opts +argument_parser = build_argument_parser() diff --git a/libs/guessit/patterns/__init__.py b/libs/guessit/patterns/__init__.py deleted file mode 100755 index 1816d494..00000000 --- a/libs/guessit/patterns/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import re - -from guessit import base_text_type - -group_delimiters = ['()', '[]', '{}'] - -# separator character regexp -sep = r'[][,)(}:{+ /~/\._-]' # regexp art, hehe :D - -_dash = '-' -_psep = '[\W_]?' - - -def build_or_pattern(patterns, escape=False): - """Build a or pattern string from a list of possible patterns - """ - or_pattern = [] - for pattern in patterns: - if not or_pattern: - or_pattern.append('(?:') - else: - or_pattern.append('|') - or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern) - or_pattern.append(')') - return ''.join(or_pattern) - - -def compile_pattern(pattern, enhance=True): - """Compile and enhance a pattern - - :param pattern: Pattern to compile (regexp). - :type pattern: string - - :param pattern: Enhance pattern before compiling. - :type pattern: string - - :return: The compiled pattern - :rtype: regular expression object - """ - return re.compile(enhance_pattern(pattern) if enhance else pattern, re.IGNORECASE) - - -def enhance_pattern(pattern): - """Enhance pattern to match more equivalent values. - - '-' are replaced by '[\W_]?', which matches more types of separators (or none) - - :param pattern: Pattern to enhance (regexp). - :type pattern: string - - :return: The enhanced pattern - :rtype: string - """ - return pattern.replace(_dash, _psep) diff --git a/libs/guessit/patterns/extension.py b/libs/guessit/patterns/extension.py deleted file mode 100644 index 40a576b6..00000000 --- a/libs/guessit/patterns/extension.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# Copyright (c) 2013 Rémi Alvergnat -# Copyright (c) 2011 Ricard Marxer -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -subtitle_exts = ['srt', 'idx', 'sub', 'ssa', 'ass'] - -info_exts = ['nfo'] - -video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2', - 'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm', - 'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv', - 'iso'] diff --git a/libs/guessit/patterns/numeral.py b/libs/guessit/patterns/numeral.py deleted file mode 100644 index f254c6b8..00000000 --- a/libs/guessit/patterns/numeral.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import re - -digital_numeral = '\d{1,4}' - -roman_numeral = "(?=[MCDLXVI]+)M{0,4}(?:CM|CD|D?C{0,3})(?:XC|XL|L?X{0,3})(?:IX|IV|V?I{0,3})" - -english_word_numeral_list = [ - 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', - 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty' -] - -french_word_numeral_list = [ - 'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', - 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf', 'vingt' -] - -french_alt_word_numeral_list = [ - 'zero', 'une', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', - 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dixsept', 'dixhuit', 'dixneuf', 'vingt' -] - - -def __build_word_numeral(*args, **kwargs): - re_ = None - for word_list in args: - for word in word_list: - if not re_: - re_ = '(?:(?=\w+)' - else: - re_ += '|' - re_ += word - re_ += ')' - return re_ - -word_numeral = __build_word_numeral(english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list) - -numeral = '(?:' + digital_numeral + '|' + roman_numeral + '|' + word_numeral + ')' - -__romanNumeralMap = ( - ('M', 1000), - ('CM', 900), - ('D', 500), - ('CD', 400), - ('C', 100), - ('XC', 90), - ('L', 50), - ('XL', 40), - ('X', 10), - ('IX', 9), - ('V', 5), - ('IV', 4), - ('I', 1) - ) - -__romanNumeralPattern = re.compile('^' + roman_numeral + '$') - - -def __parse_roman(value): - """convert Roman numeral to integer""" - if not __romanNumeralPattern.search(value): - raise ValueError('Invalid Roman numeral: %s' % value) - - result = 0 - index = 0 - for num, integer in __romanNumeralMap: - while value[index:index + len(num)] == num: - result += integer - index += len(num) - return result - - -def __parse_word(value): - """Convert Word numeral to integer""" - for word_list in [english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list]: - try: - return word_list.index(value.lower()) - except ValueError: - pass - raise ValueError - - -_clean_re = re.compile('[^\d]*(\d+)[^\d]*') - - -def parse_numeral(value, int_enabled=True, roman_enabled=True, word_enabled=True, clean=True): - """Parse a numeric value into integer. - - input can be an integer as a string, a roman numeral or a word - - :param value: Value to parse. Can be an integer, roman numeral or word. - :type value: string - - :return: Numeric value, or None if value can't be parsed - :rtype: int - """ - if int_enabled: - try: - if clean: - match = _clean_re.match(value) - if match: - clean_value = match.group(1) - return int(clean_value) - return int(value) - except ValueError: - pass - if roman_enabled: - try: - if clean: - for word in value.split(): - try: - return __parse_roman(word.upper()) - except ValueError: - pass - return __parse_roman(value) - except ValueError: - pass - if word_enabled: - try: - if clean: - for word in value.split(): - try: - return __parse_word(word) - except ValueError: - pass - return __parse_word(value) - except ValueError: - pass - raise ValueError('Invalid numeral: ' + value) diff --git a/libs/guessit/plugins/__init__.py b/libs/guessit/plugins/__init__.py deleted file mode 100644 index 6a63e4e1..00000000 --- a/libs/guessit/plugins/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals diff --git a/libs/guessit/plugins/transformers.py b/libs/guessit/plugins/transformers.py deleted file mode 100644 index f2f746c0..00000000 --- a/libs/guessit/plugins/transformers.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals -from guessit.options import reload as reload_options - -from stevedore import ExtensionManager -from pkg_resources import EntryPoint - -from stevedore.extension import Extension -from logging import getLogger - -log = getLogger(__name__) - - -class Transformer(object): # pragma: no cover - def __init__(self, priority=0): - self.priority = priority - self.log = getLogger(self.name) - - @property - def name(self): - return self.__class__.__name__ - - def supported_properties(self): - return {} - - def second_pass_options(self, mtree, options=None): - return None - - def should_process(self, mtree, options=None): - return True - - def process(self, mtree, options=None): - pass - - def post_process(self, mtree, options=None): - pass - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - pass - - def rate_quality(self, guess, *props): - return 0 - - -class CustomTransformerExtensionManager(ExtensionManager): - def __init__(self, namespace='guessit.transformer', invoke_on_load=True, - invoke_args=(), invoke_kwds={}, propagate_map_exceptions=True, on_load_failure_callback=None, - verify_requirements=False): - super(CustomTransformerExtensionManager, self).__init__(namespace=namespace, - invoke_on_load=invoke_on_load, - invoke_args=invoke_args, - invoke_kwds=invoke_kwds, - propagate_map_exceptions=propagate_map_exceptions, - on_load_failure_callback=on_load_failure_callback, - verify_requirements=verify_requirements) - - def order_extensions(self, extensions): - """Order the loaded transformers - - It should follow those rules - - website before language (eg: tvu.org.ru vs russian) - - language before episodes_rexps - - properties before language (eg: he-aac vs hebrew) - - release_group before properties (eg: XviD-?? vs xvid) - """ - extensions.sort(key=lambda ext: -ext.obj.priority) - return extensions - - def _load_one_plugin(self, ep, invoke_on_load, invoke_args, invoke_kwds, verify_requirements=True): - if not ep.dist: - # `require` argument of ep.load() is deprecated in newer versions of setuptools - if hasattr(ep, 'resolve'): - plugin = ep.resolve() - elif hasattr(ep, '_load'): - plugin = ep._load() - else: - plugin = ep.load(require=False) - else: - plugin = ep.load() - if invoke_on_load: - obj = plugin(*invoke_args, **invoke_kwds) - else: - obj = None - return Extension(ep.name, ep, plugin, obj) - - def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds, verify_requirements): - return self.order_extensions(super(CustomTransformerExtensionManager, self)._load_plugins(invoke_on_load, invoke_args, invoke_kwds, verify_requirements)) - - def objects(self): - return self.map(self._get_obj) - - def _get_obj(self, ext): - return ext.obj - - def object(self, name): - try: - return self[name].obj - except KeyError: - return None - - def register_module(self, name=None, module_name=None, attrs=(), entry_point=None): - if entry_point: - ep = EntryPoint.parse(entry_point) - else: - ep = EntryPoint(name, module_name, attrs) - loaded = self._load_one_plugin(ep, invoke_on_load=True, invoke_args=(), invoke_kwds={}) - if loaded: - self.extensions.append(loaded) - self.extensions = self.order_extensions(self.extensions) - self._extensions_by_name = None - - -class DefaultTransformerExtensionManager(CustomTransformerExtensionManager): - @property - def _internal_entry_points(self): - return ['split_path_components = guessit.transfo.split_path_components:SplitPathComponents', - 'guess_filetype = guessit.transfo.guess_filetype:GuessFiletype', - 'split_explicit_groups = guessit.transfo.split_explicit_groups:SplitExplicitGroups', - 'guess_date = guessit.transfo.guess_date:GuessDate', - 'guess_website = guessit.transfo.guess_website:GuessWebsite', - 'guess_release_group = guessit.transfo.guess_release_group:GuessReleaseGroup', - 'guess_properties = guessit.transfo.guess_properties:GuessProperties', - 'guess_language = guessit.transfo.guess_language:GuessLanguage', - 'guess_video_rexps = guessit.transfo.guess_video_rexps:GuessVideoRexps', - 'guess_episodes_rexps = guessit.transfo.guess_episodes_rexps:GuessEpisodesRexps', - 'guess_weak_episodes_rexps = guessit.transfo.guess_weak_episodes_rexps:GuessWeakEpisodesRexps', - 'guess_bonus_features = guessit.transfo.guess_bonus_features:GuessBonusFeatures', - 'guess_year = guessit.transfo.guess_year:GuessYear', - 'guess_country = guessit.transfo.guess_country:GuessCountry', - 'guess_idnumber = guessit.transfo.guess_idnumber:GuessIdnumber', - 'split_on_dash = guessit.transfo.split_on_dash:SplitOnDash', - 'guess_episode_info_from_position = guessit.transfo.guess_episode_info_from_position:GuessEpisodeInfoFromPosition', - 'guess_movie_title_from_position = guessit.transfo.guess_movie_title_from_position:GuessMovieTitleFromPosition', - 'guess_episode_details = guessit.transfo.guess_episode_details:GuessEpisodeDetails', - 'expected_series = guessit.transfo.expected_series:ExpectedSeries', - 'expected_title = guessit.transfo.expected_title:ExpectedTitle',] - - def _find_entry_points(self, namespace): - entry_points = {} - # Internal entry points - if namespace == self.namespace: - for internal_entry_point_str in self._internal_entry_points: - internal_entry_point = EntryPoint.parse(internal_entry_point_str) - entry_points[internal_entry_point.name] = internal_entry_point - - # Package entry points - setuptools_entrypoints = super(DefaultTransformerExtensionManager, self)._find_entry_points(namespace) - for setuptools_entrypoint in setuptools_entrypoints: - entry_points[setuptools_entrypoint.name] = setuptools_entrypoint - - return list(entry_points.values()) - -_extensions = None - - -def all_transformers(): - return _extensions.objects() - - -def get_transformer(name): - return _extensions.object(name) - - -def add_transformer(name, module_name, class_name): - """ - Add a transformer - - :param name: the name of the transformer. ie: 'guess_regexp_id' - :param name: the module name. ie: 'flexget.utils.parsers.transformers.guess_regexp_id' - :param class_name: the class name. ie: 'GuessRegexpId' - """ - - _extensions.register_module(name, module_name, (class_name,)) - - -def add_transformer(entry_point): - """ - Add a transformer - - :param entry_point: entry point spec format. ie: 'guess_regexp_id = flexget.utils.parsers.transformers.guess_regexp_id:GuessRegexpId' - """ - _extensions.register_module(entry_point = entry_point) - - -def reload(custom=False): - """ - Reload extension manager with default or custom one. - :param custom: if True, custom manager will be used, else default one. - Default manager will load default extensions from guessit and setuptools packaging extensions - Custom manager will not load default extensions from guessit, using only setuptools packaging extensions. - :type custom: boolean - """ - global _extensions - if custom: - _extensions = CustomTransformerExtensionManager() - else: - _extensions = DefaultTransformerExtensionManager() - reload_options(all_transformers()) - -reload() diff --git a/libs/guessit/quality.py b/libs/guessit/quality.py deleted file mode 100644 index 870bbdbb..00000000 --- a/libs/guessit/quality.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import all_transformers - - -def best_quality_properties(props, *guesses): - """Retrieve the best quality guess, based on given properties - - :param props: Properties to include in the rating - :type props: list of strings - :param guesses: Guesses to rate - :type guesses: :class:`guessit.guess.Guess` - - :return: Best quality guess from all passed guesses - :rtype: :class:`guessit.guess.Guess` - """ - best_guess = None - best_rate = None - for guess in guesses: - for transformer in all_transformers(): - rate = transformer.rate_quality(guess, *props) - if best_rate is None or best_rate < rate: - best_rate = rate - best_guess = guess - return best_guess - - -def best_quality(*guesses): - """Retrieve the best quality guess. - - :param guesses: Guesses to rate - :type guesses: :class:`guessit.guess.Guess` - - :return: Best quality guess from all passed guesses - :rtype: :class:`guessit.guess.Guess` - """ - best_guess = None - best_rate = None - for guess in guesses: - for transformer in all_transformers(): - rate = transformer.rate_quality(guess) - if best_rate is None or best_rate < rate: - best_rate = rate - best_guess = guess - return best_guess diff --git a/libs/guessit/reutils.py b/libs/guessit/reutils.py new file mode 100644 index 00000000..0b654d27 --- /dev/null +++ b/libs/guessit/reutils.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Utils for re module +""" + +from rebulk.remodule import re + + +def build_or_pattern(patterns, name=None, escape=False): + """ + Build a or pattern string from a list of possible patterns + + :param patterns: + :type patterns: + :param name: + :type name: + :param escape: + :type escape: + :return: + :rtype: + """ + or_pattern = [] + for pattern in patterns: + if not or_pattern: + or_pattern.append('(?') + if name: + or_pattern.append('P<' + name + '>') + else: + or_pattern.append(':') + else: + or_pattern.append('|') + or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern) + or_pattern.append(')') + return ''.join(or_pattern) diff --git a/libs/guessit/rules/__init__.py b/libs/guessit/rules/__init__.py new file mode 100644 index 00000000..f9dc4557 --- /dev/null +++ b/libs/guessit/rules/__init__.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Rebulk object default builder +""" +from rebulk import Rebulk + +from .markers.path import path +from .markers.groups import groups + +from .properties.episodes import episodes +from .properties.container import container +from .properties.format import format_ +from .properties.video_codec import video_codec +from .properties.audio_codec import audio_codec +from .properties.screen_size import screen_size +from .properties.website import website +from .properties.date import date +from .properties.title import title +from .properties.episode_title import episode_title +from .properties.language import language +from .properties.country import country +from .properties.release_group import release_group +from .properties.other import other +from .properties.edition import edition +from .properties.cds import cds +from .properties.bonus import bonus +from .properties.film import film +from .properties.part import part +from .properties.crc import crc +from .properties.mimetype import mimetype +from .properties.type import type_ + +from .processors import processors + + +def rebulk_builder(): + """ + Default builder for main Rebulk object used by api. + :return: Main Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + + rebulk.rebulk(path()) + rebulk.rebulk(groups()) + + rebulk.rebulk(episodes()) + rebulk.rebulk(container()) + rebulk.rebulk(format_()) + rebulk.rebulk(video_codec()) + rebulk.rebulk(audio_codec()) + rebulk.rebulk(screen_size()) + rebulk.rebulk(website()) + rebulk.rebulk(date()) + rebulk.rebulk(title()) + rebulk.rebulk(episode_title()) + rebulk.rebulk(language()) + rebulk.rebulk(country()) + rebulk.rebulk(release_group()) + rebulk.rebulk(other()) + rebulk.rebulk(edition()) + rebulk.rebulk(cds()) + rebulk.rebulk(bonus()) + rebulk.rebulk(film()) + rebulk.rebulk(part()) + rebulk.rebulk(crc()) + + rebulk.rebulk(processors()) + + rebulk.rebulk(mimetype()) + rebulk.rebulk(type_()) + + def customize_properties(properties): + """ + Customize default rebulk properties + """ + count = properties['count'] + del properties['count'] + + properties['season_count'] = count + properties['episode_count'] = count + + return properties + + rebulk.customize_properties = customize_properties + + return rebulk diff --git a/libs/guessit/rules/common/__init__.py b/libs/guessit/rules/common/__init__.py new file mode 100644 index 00000000..e9da2aa0 --- /dev/null +++ b/libs/guessit/rules/common/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Common module +""" +import re + +seps = r' [](){}+*|=-_~#/\\.,;:' # list of tags/words separators +seps_no_fs = seps.replace('/', '').replace('\\', '') + +title_seps = r'-+/\|' # separators for title + +dash = (r'-', r'['+re.escape(seps_no_fs)+']') # abbreviation used by many rebulk objects. +alt_dash = (r'@', r'['+re.escape(seps_no_fs)+']') # abbreviation used by many rebulk objects. diff --git a/libs/guessit/rules/common/comparators.py b/libs/guessit/rules/common/comparators.py new file mode 100644 index 00000000..f9db1d3f --- /dev/null +++ b/libs/guessit/rules/common/comparators.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Comparators +""" +try: + from functools import cmp_to_key +except ImportError: + from ...backports import cmp_to_key + + +def marker_comparator_predicate(match): + """ + Match predicate used in comparator + """ + return not match.private and \ + match.name not in ['proper_count', 'title', 'episode_title', 'alternative_title'] and \ + not (match.name == 'container' and 'extension' in match.tags) + + +def marker_weight(matches, marker): + """ + Compute the comparator weight of a marker + :param matches: + :param marker: + :return: + """ + return len(set(match.name for match in matches.range(*marker.span, predicate=marker_comparator_predicate))) + + +def marker_comparator(matches, markers): + """ + Builds a comparator that returns markers sorted from the most valuable to the less. + + Take the parts where matches count is higher, then when length is higher, then when position is at left. + + :param matches: + :type matches: + :return: + :rtype: + """ + def comparator(marker1, marker2): + """ + The actual comparator function. + """ + matches_count = marker_weight(matches, marker2) - marker_weight(matches, marker1) + if matches_count: + return matches_count + len_diff = len(marker2) - len(marker1) + if len_diff: + return len_diff + return markers.index(marker2) - markers.index(marker1) + + return comparator + + +def marker_sorted(markers, matches): + """ + Sort markers from matches, from the most valuable to the less. + + :param fileparts: + :type fileparts: + :param matches: + :type matches: + :return: + :rtype: + """ + return sorted(markers, key=cmp_to_key(marker_comparator(matches, markers))) diff --git a/libs/guessit/rules/common/date.py b/libs/guessit/rules/common/date.py new file mode 100644 index 00000000..779e4b93 --- /dev/null +++ b/libs/guessit/rules/common/date.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Date +""" +from dateutil import parser + +from rebulk.remodule import re + +_dsep = r'[-/ \.]' +_dsep_bis = r'[-/ \.x]' + +date_regexps = [ + re.compile(r'%s((\d{8}))%s' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'%s((\d{6}))%s' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{2})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{4})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep_bis, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{4}))(?:$|[^\d])' % (_dsep, _dsep_bis), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4}))(?:$|[^\d])' % (_dsep, _dsep), + re.IGNORECASE)] + + +def valid_year(year): + """Check if number is a valid year""" + return 1920 <= year < 2030 + + +def _is_int(string): + """ + Check if the input string is an integer + + :param string: + :type string: + :return: + :rtype: + """ + try: + int(string) + return True + except ValueError: + return False + + +def _guess_day_first_parameter(groups): + """ + If day_first is not defined, use some heuristic to fix it. + It helps to solve issues with python dateutils 2.5.3 parser changes. + + :param groups: match groups found for the date + :type groups: list of match objects + :return: day_first option guessed value + :rtype: bool + """ + + # If match starts with a long year, then day_first is force to false. + if _is_int(groups[0]) and valid_year(int(groups[0][:4])): + return False + # If match ends with a long year, the day_first is forced to true. + elif _is_int(groups[-1]) and valid_year(int(groups[-1][-4:])): + return True + # If match starts with a short year, then day_first is force to false. + elif _is_int(groups[0]) and int(groups[0][:2]) > 31: + return False + # If match ends with a short year, then day_first is force to true. + elif _is_int(groups[-1]) and int(groups[-1][-2:]) > 31: + return True + + +def search_date(string, year_first=None, day_first=None): + """Looks for date patterns, and if found return the date and group span. + + Assumes there are sentinels at the beginning and end of the string that + always allow matching a non-digit delimiting the date. + + Year can be defined on two digit only. It will return the nearest possible + date from today. + + >>> search_date(' This happened on 2002-04-22. ') + (18, 28, datetime.date(2002, 4, 22)) + + >>> search_date(' And this on 17-06-1998. ') + (13, 23, datetime.date(1998, 6, 17)) + + >>> search_date(' no date in here ') + """ + start, end = None, None + match = None + groups = None + for date_re in date_regexps: + search_match = date_re.search(string) + if search_match and (match is None or search_match.end() - search_match.start() > len(match)): + start, end = search_match.start(1), search_match.end(1) + groups = search_match.groups()[1:] + match = '-'.join(groups) + + if match is None: + return + + if year_first and day_first is None: + day_first = False + + if day_first is None: + day_first = _guess_day_first_parameter(groups) + + # If day_first/year_first is undefined, parse is made using both possible values. + yearfirst_opts = [False, True] + if year_first is not None: + yearfirst_opts = [year_first] + + dayfirst_opts = [True, False] + if day_first is not None: + dayfirst_opts = [day_first] + + kwargs_list = ({'dayfirst': d, 'yearfirst': y} for d in dayfirst_opts for y in yearfirst_opts) + for kwargs in kwargs_list: + try: + date = parser.parse(match, **kwargs) + except (ValueError, TypeError): # pragma: no cover + # see https://bugs.launchpad.net/dateutil/+bug/1247643 + date = None + + # check date plausibility + if date and valid_year(date.year): # pylint:disable=no-member + return start, end, date.date() # pylint:disable=no-member diff --git a/libs/guessit/rules/common/formatters.py b/libs/guessit/rules/common/formatters.py new file mode 100644 index 00000000..6bd09b15 --- /dev/null +++ b/libs/guessit/rules/common/formatters.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Formatters +""" +from rebulk.formatters import formatters +from rebulk.remodule import re +from . import seps + +_excluded_clean_chars = ',:;-/\\' +clean_chars = "" +for sep in seps: + if sep not in _excluded_clean_chars: + clean_chars += sep + + +def _potential_before(i, input_string): + """ + Check if the character at position i can be a potential single char separator considering what's before it. + + :param i: + :type i: int + :param input_string: + :type input_string: str + :return: + :rtype: bool + """ + return i - 2 >= 0 and input_string[i] == input_string[i - 2] and input_string[i - 1] not in seps + + +def _potential_after(i, input_string): + """ + Check if the character at position i can be a potential single char separator considering what's after it. + + :param i: + :type i: int + :param input_string: + :type input_string: str + :return: + :rtype: bool + """ + return i + 2 >= len(input_string) or \ + input_string[i + 2] == input_string[i] and input_string[i + 1] not in seps + + +def cleanup(input_string): + """ + Removes and strip separators from input_string (but keep ',;' characters) + + It also keep separators for single characters (Mavels Agents of S.H.I.E.L.D.) + + :param input_string: + :type input_string: str + :return: + :rtype: + """ + clean_string = input_string + for char in clean_chars: + clean_string = clean_string.replace(char, ' ') + + # Restore input separator if they separate single characters. + # Useful for Mavels Agents of S.H.I.E.L.D. + # https://github.com/guessit-io/guessit/issues/278 + + indices = [i for i, letter in enumerate(clean_string) if letter in seps] + + dots = set() + if indices: + clean_list = list(clean_string) + + potential_indices = [] + + for i in indices: + if _potential_before(i, input_string) and _potential_after(i, input_string): + potential_indices.append(i) + + replace_indices = [] + + for potential_index in potential_indices: + if potential_index - 2 in potential_indices or potential_index + 2 in potential_indices: + replace_indices.append(potential_index) + + if replace_indices: + for replace_index in replace_indices: + dots.add(input_string[replace_index]) + clean_list[replace_index] = input_string[replace_index] + clean_string = ''.join(clean_list) + + clean_string = strip(clean_string, ''.join([c for c in seps if c not in dots])) + + clean_string = re.sub(' +', ' ', clean_string) + return clean_string + + +def strip(input_string, chars=seps): + """ + Strip separators from input_string + :param input_string: + :param chars: + :type input_string: + :return: + :rtype: + """ + return input_string.strip(chars) + + +def raw_cleanup(raw): + """ + Cleanup a raw value to perform raw comparison + :param raw: + :type raw: + :return: + :rtype: + """ + return formatters(cleanup, strip)(raw.lower()) + + +def reorder_title(title, articles=('the',), separators=(',', ', ')): + """ + Reorder the title + :param title: + :type title: + :param articles: + :type articles: + :param separators: + :type separators: + :return: + :rtype: + """ + ltitle = title.lower() + for article in articles: + for separator in separators: + suffix = separator + article + if ltitle[-len(suffix):] == suffix: + return title[-len(suffix) + len(separator):] + ' ' + title[:-len(suffix)] + return title diff --git a/libs/guessit/rules/common/numeral.py b/libs/guessit/rules/common/numeral.py new file mode 100644 index 00000000..7c064fdb --- /dev/null +++ b/libs/guessit/rules/common/numeral.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +parse numeral from various formats +""" +from rebulk.remodule import re + +digital_numeral = r'\d{1,4}' + +roman_numeral = r'(?=[MCDLXVI]+)M{0,4}(?:CM|CD|D?C{0,3})(?:XC|XL|L?X{0,3})(?:IX|IV|V?I{0,3})' + +english_word_numeral_list = [ + 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', + 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty' +] + +french_word_numeral_list = [ + 'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', + 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf', 'vingt' +] + +french_alt_word_numeral_list = [ + 'zero', 'une', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', + 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dixsept', 'dixhuit', 'dixneuf', 'vingt' +] + + +def __build_word_numeral(*args): + """ + Build word numeral regexp from list. + + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + re_ = None + for word_list in args: + for word in word_list: + if not re_: + re_ = r'(?:(?=\w+)' + else: + re_ += '|' + re_ += word + re_ += ')' + return re_ + + +word_numeral = __build_word_numeral(english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list) + +numeral = '(?:' + digital_numeral + '|' + roman_numeral + '|' + word_numeral + ')' + +__romanNumeralMap = ( + ('M', 1000), + ('CM', 900), + ('D', 500), + ('CD', 400), + ('C', 100), + ('XC', 90), + ('L', 50), + ('XL', 40), + ('X', 10), + ('IX', 9), + ('V', 5), + ('IV', 4), + ('I', 1) +) + +__romanNumeralPattern = re.compile('^' + roman_numeral + '$') + + +def __parse_roman(value): + """ + convert Roman numeral to integer + + :param value: Value to parse + :type value: string + :return: + :rtype: + """ + if not __romanNumeralPattern.search(value): + raise ValueError('Invalid Roman numeral: %s' % value) + + result = 0 + index = 0 + for num, integer in __romanNumeralMap: + while value[index:index + len(num)] == num: + result += integer + index += len(num) + return result + + +def __parse_word(value): + """ + Convert Word numeral to integer + + :param value: Value to parse + :type value: string + :return: + :rtype: + """ + for word_list in [english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list]: + try: + return word_list.index(value.lower()) + except ValueError: + pass + raise ValueError # pragma: no cover + + +_clean_re = re.compile(r'[^\d]*(\d+)[^\d]*') + + +def parse_numeral(value, int_enabled=True, roman_enabled=True, word_enabled=True, clean=True): + """ + Parse a numeric value into integer. + + :param value: Value to parse. Can be an integer, roman numeral or word. + :type value: string + :param int_enabled: + :type int_enabled: + :param roman_enabled: + :type roman_enabled: + :param word_enabled: + :type word_enabled: + :param clean: + :type clean: + :return: Numeric value, or None if value can't be parsed + :rtype: int + """ + # pylint: disable=too-many-branches + if int_enabled: + try: + if clean: + match = _clean_re.match(value) + if match: + clean_value = match.group(1) + return int(clean_value) + return int(value) + except ValueError: + pass + if roman_enabled: + try: + if clean: + for word in value.split(): + try: + return __parse_roman(word.upper()) + except ValueError: + pass + return __parse_roman(value) + except ValueError: + pass + if word_enabled: + try: + if clean: + for word in value.split(): + try: + return __parse_word(word) + except ValueError: # pragma: no cover + pass + return __parse_word(value) # pragma: no cover + except ValueError: # pragma: no cover + pass + raise ValueError('Invalid numeral: ' + value) # pragma: no cover diff --git a/libs/guessit/rules/common/validators.py b/libs/guessit/rules/common/validators.py new file mode 100644 index 00000000..0e79b989 --- /dev/null +++ b/libs/guessit/rules/common/validators.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Validators +""" +from functools import partial + +from rebulk.validators import chars_before, chars_after, chars_surround +from . import seps + +seps_before = partial(chars_before, seps) +seps_after = partial(chars_after, seps) +seps_surround = partial(chars_surround, seps) + + +def int_coercable(string): + """ + Check if string can be coerced to int + :param string: + :type string: + :return: + :rtype: + """ + try: + int(string) + return True + except ValueError: + return False + + +def compose(*validators): + """ + Compose validators functions + :param validators: + :type validators: + :return: + :rtype: + """ + def composed(string): + """ + Composed validators function + :param string: + :type string: + :return: + :rtype: + """ + for validator in validators: + if not validator(string): + return False + return True + return composed diff --git a/libs/guessit/rules/common/words.py b/libs/guessit/rules/common/words.py new file mode 100644 index 00000000..b73b1eef --- /dev/null +++ b/libs/guessit/rules/common/words.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Words utils +""" +from collections import namedtuple + +from guessit.rules.common import seps + +_Word = namedtuple('_Word', ['span', 'value']) + + +def iter_words(string): + """ + Iterate on all words in a string + :param string: + :type string: + :return: + :rtype: iterable[str] + """ + i = 0 + last_sep_index = -1 + inside_word = False + for char in string: + if ord(char) < 128 and char in seps: # Make sure we don't exclude unicode characters. + if inside_word: + yield _Word(span=(last_sep_index+1, i), value=string[last_sep_index+1:i]) + inside_word = False + last_sep_index = i + else: + inside_word = True + i += 1 + if inside_word: + yield _Word(span=(last_sep_index+1, i), value=string[last_sep_index+1:i]) + + +# list of common words which could be interpreted as properties, but which +# are far too common to be able to say they represent a property in the +# middle of a string (where they most likely carry their commmon meaning) +COMMON_WORDS = frozenset([ + # english words + 'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to', + 'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan', + 'fry', 'cop', 'zen', 'gay', 'fat', 'one', 'cherokee', 'got', 'an', 'as', + 'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi', 'bb', + 'bt', 'tv', 'aw', 'by', 'md', 'mp', 'cd', 'lt', 'gt', 'in', 'ad', 'ice', + 'ay', 'at', 'star', 'so', 'he', 'do', 'ax', 'mx', + # french words + 'bas', 'de', 'le', 'son', 'ne', 'ca', 'ce', 'et', 'que', + 'mal', 'est', 'vol', 'or', 'mon', 'se', 'je', 'tu', 'me', + 'ne', 'ma', 'va', 'au', 'lu', + # japanese words, + 'wa', 'ga', 'ao', + # spanish words + 'la', 'el', 'del', 'por', 'mar', 'al', + # other + 'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii', + 'vi', 'ben', 'da', 'lt', 'ch', 'sr', 'ps', 'cx', 'vo', + # new from babelfish + 'mkv', 'avi', 'dmd', 'the', 'dis', 'cut', 'stv', 'des', 'dia', 'and', + 'cab', 'sub', 'mia', 'rim', 'las', 'une', 'par', 'srt', 'ano', 'toy', + 'job', 'gag', 'reel', 'www', 'for', 'ayu', 'csi', 'ren', 'moi', 'sur', + 'fer', 'fun', 'two', 'big', 'psy', 'air', + # movie title + 'brazil', 'jordan', + # release groups + 'bs', # Bosnian + 'kz', + # countries + 'gt', 'lt', 'im', + # part/pt + 'pt', + # screener + 'scr', + # quality + 'sd', 'hr' +]) diff --git a/libs/guessit/rules/markers/__init__.py b/libs/guessit/rules/markers/__init__.py new file mode 100644 index 00000000..6a48a13b --- /dev/null +++ b/libs/guessit/rules/markers/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Markers +""" diff --git a/libs/guessit/rules/markers/groups.py b/libs/guessit/rules/markers/groups.py new file mode 100644 index 00000000..bbe69d1c --- /dev/null +++ b/libs/guessit/rules/markers/groups.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Groups markers (...), [...] and {...} +""" +from rebulk import Rebulk + + +def groups(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + rebulk.defaults(name="group", marker=True) + + starting = '([{' + ending = ')]}' + + def mark_groups(input_string): + """ + Functional pattern to mark groups (...), [...] and {...}. + + :param input_string: + :return: + """ + openings = ([], [], []) + i = 0 + + ret = [] + for char in input_string: + start_type = starting.find(char) + if start_type > -1: + openings[start_type].append(i) + + i += 1 + + end_type = ending.find(char) + if end_type > -1: + try: + start_index = openings[end_type].pop() + ret.append((start_index, i)) + except IndexError: + pass + return ret + + rebulk.functional(mark_groups) + return rebulk diff --git a/libs/guessit/rules/markers/path.py b/libs/guessit/rules/markers/path.py new file mode 100644 index 00000000..5e487ea6 --- /dev/null +++ b/libs/guessit/rules/markers/path.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Path markers +""" +from rebulk import Rebulk + +from rebulk.utils import find_all + + +def path(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + rebulk.defaults(name="path", marker=True) + + def mark_path(input_string, context): + """ + Functional pattern to mark path elements. + + :param input_string: + :return: + """ + ret = [] + if context.get('name_only', False): + ret.append((0, len(input_string))) + else: + indices = list(find_all(input_string, '/')) + indices += list(find_all(input_string, '\\')) + indices += [-1, len(input_string)] + + indices.sort() + + for i in range(0, len(indices) - 1): + ret.append((indices[i] + 1, indices[i + 1])) + + return ret + + rebulk.functional(mark_path) + return rebulk diff --git a/libs/guessit/rules/processors.py b/libs/guessit/rules/processors.py new file mode 100644 index 00000000..3480a9d1 --- /dev/null +++ b/libs/guessit/rules/processors.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Processors +""" +from collections import defaultdict +import copy + +import six + +from rebulk import Rebulk, Rule, CustomRule, POST_PROCESS, PRE_PROCESS, AppendMatch, RemoveMatch +from guessit.rules.common.words import iter_words +from .common.formatters import cleanup +from .common.comparators import marker_sorted +from .common.date import valid_year + + +class EnlargeGroupMatches(CustomRule): + """ + Enlarge matches that are starting and/or ending group to include brackets in their span. + :param matches: + :type matches: + :return: + :rtype: + """ + priority = PRE_PROCESS + + def when(self, matches, context): + starting = [] + ending = [] + + for group in matches.markers.named('group'): + for match in matches.starting(group.start + 1): + starting.append(match) + + for match in matches.ending(group.end - 1): + ending.append(match) + + if starting or ending: + return starting, ending + + def then(self, matches, when_response, context): + starting, ending = when_response + for match in starting: + matches.remove(match) + match.start -= 1 + match.raw_start += 1 + matches.append(match) + + for match in ending: + matches.remove(match) + match.end += 1 + match.raw_end -= 1 + matches.append(match) + + +class EquivalentHoles(Rule): + """ + Creates equivalent matches for holes that have same values than existing (case insensitive) + """ + priority = POST_PROCESS + consequence = AppendMatch + + def when(self, matches, context): + new_matches = [] + + for filepath in marker_sorted(matches.markers.named('path'), matches): + holes = matches.holes(start=filepath.start, end=filepath.end, formatter=cleanup) + for name in matches.names: + for hole in list(holes): + for current_match in matches.named(name): + if isinstance(current_match.value, six.string_types) and \ + hole.value.lower() == current_match.value.lower(): + if 'equivalent-ignore' in current_match.tags: + continue + new_value = _preferred_string(hole.value, current_match.value) + if hole.value != new_value: + hole.value = new_value + if current_match.value != new_value: + current_match.value = new_value + hole.name = name + hole.tags = ['equivalent'] + new_matches.append(hole) + if hole in holes: + holes.remove(hole) + + return new_matches + + +class RemoveAmbiguous(Rule): + """ + If multiple match are found with same name and different values, keep the one in the most valuable filepart. + Also keep others match with same name and values than those kept ones. + """ + priority = POST_PROCESS + consequence = RemoveMatch + + def when(self, matches, context): + fileparts = marker_sorted(matches.markers.named('path'), matches) + + previous_fileparts_names = set() + values = defaultdict(list) + + to_remove = [] + for filepart in fileparts: + filepart_matches = matches.range(filepart.start, filepart.end) + + filepart_names = set() + for match in filepart_matches: + filepart_names.add(match.name) + if match.name in previous_fileparts_names: + if match.value not in values[match.name]: + to_remove.append(match) + else: + if match.value not in values[match.name]: + values[match.name].append(match.value) + + previous_fileparts_names.update(filepart_names) + + return to_remove + + +def _preferred_string(value1, value2): # pylint:disable=too-many-return-statements + """ + Retrieves preferred title from both values. + :param value1: + :type value1: str + :param value2: + :type value2: str + :return: The preferred title + :rtype: str + """ + if value1 == value2: + return value1 + if value1.istitle() and not value2.istitle(): + return value1 + if not value1.isupper() and value2.isupper(): + return value1 + if not value1.isupper() and value1[0].isupper() and not value2[0].isupper(): + return value1 + if _count_title_words(value1) > _count_title_words(value2): + return value1 + return value2 + + +def _count_title_words(value): + """ + Count only many words are titles in value. + :param value: + :type value: + :return: + :rtype: + """ + ret = 0 + for word in iter_words(value): + if word.value.istitle(): + ret += 1 + return ret + + +class SeasonYear(Rule): + """ + If a season is a valid year and no year was found, create an match with year. + """ + priority = POST_PROCESS + consequence = AppendMatch + + def when(self, matches, context): + ret = [] + if not matches.named('year'): + for season in matches.named('season'): + if valid_year(season.value): + year = copy.copy(season) + year.name = 'year' + ret.append(year) + return ret + + +class Processors(CustomRule): + """ + Empty rule for ordering post_processing properly. + """ + priority = POST_PROCESS + + def when(self, matches, context): + pass + + def then(self, matches, when_response, context): # pragma: no cover + pass + + +def processors(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + return Rebulk().rules(EnlargeGroupMatches, EquivalentHoles, RemoveAmbiguous, SeasonYear, Processors) diff --git a/libs/guessit/rules/properties/__init__.py b/libs/guessit/rules/properties/__init__.py new file mode 100644 index 00000000..e0a24eaf --- /dev/null +++ b/libs/guessit/rules/properties/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Properties +""" diff --git a/libs/guessit/rules/properties/audio_codec.py b/libs/guessit/rules/properties/audio_codec.py new file mode 100644 index 00000000..c88a6e7e --- /dev/null +++ b/libs/guessit/rules/properties/audio_codec.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +audio_codec, audio_profile and audio_channels property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch +from ..common import dash +from ..common.validators import seps_before, seps_after + +audio_properties = ['audio_codec', 'audio_profile', 'audio_channels'] + + +def audio_codec(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + + def audio_codec_priority(match1, match2): + """ + Gives priority to audio_codec + :param match1: + :type match1: + :param match2: + :type match2: + :return: + :rtype: + """ + if match1.name == 'audio_codec' and match2.name in ['audio_profile', 'audio_channels']: + return match2 + if match1.name in ['audio_profile', 'audio_channels'] and match2.name == 'audio_codec': + return match1 + return '__default__' + + rebulk.defaults(name="audio_codec", conflict_solver=audio_codec_priority) + + rebulk.regex("MP3", "LAME", r"LAME(?:\d)+-?(?:\d)+", value="MP3") + rebulk.regex("Dolby", "DolbyDigital", "Dolby-Digital", "DD", value="DolbyDigital") + rebulk.regex("DolbyAtmos", "Dolby-Atmos", "Atmos", value="DolbyAtmos") + rebulk.regex("AAC", value="AAC") + rebulk.regex("AC3D?", value="AC3") + rebulk.regex("Flac", value="FLAC") + rebulk.regex("DTS", value="DTS") + rebulk.regex("True-?HD", value="TrueHD") + + rebulk.defaults(name="audio_profile") + rebulk.string("HD", value="HD", tags="DTS") + rebulk.regex("HD-?MA", value="HDMA", tags="DTS") + rebulk.string("HE", value="HE", tags="AAC") + rebulk.string("LC", value="LC", tags="AAC") + rebulk.string("HQ", value="HQ", tags="AC3") + + rebulk.defaults(name="audio_channels") + rebulk.regex(r'(7[\W_][01](?:ch)?)(?:[^\d]|$)', value='7.1', children=True) + rebulk.regex(r'(5[\W_][01](?:ch)?)(?:[^\d]|$)', value='5.1', children=True) + rebulk.regex(r'(2[\W_]0(?:ch)?)(?:[^\d]|$)', value='2.0', children=True) + rebulk.string('7ch', '8ch', value='7.1') + rebulk.string('5ch', '6ch', value='5.1') + rebulk.string('2ch', 'stereo', value='2.0') + rebulk.string('1ch', 'mono', value='1.0') + + rebulk.rules(DtsRule, AacRule, Ac3Rule, AudioValidatorRule, HqConflictRule) + + return rebulk + + +class AudioValidatorRule(Rule): + """ + Remove audio properties if not surrounded by separators and not next each others + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + + audio_list = matches.range(predicate=lambda match: match.name in audio_properties) + for audio in audio_list: + if not seps_before(audio): + valid_before = matches.range(audio.start - 1, audio.start, + lambda match: match.name in audio_properties) + if not valid_before: + ret.append(audio) + continue + if not seps_after(audio): + valid_after = matches.range(audio.end, audio.end + 1, + lambda match: match.name in audio_properties) + if not valid_after: + ret.append(audio) + continue + + return ret + + +class AudioProfileRule(Rule): + """ + Abstract rule to validate audio profiles + """ + priority = 64 + dependency = AudioValidatorRule + consequence = RemoveMatch + + def __init__(self, codec): + super(AudioProfileRule, self).__init__() + self.codec = codec + + def when(self, matches, context): + profile_list = matches.named('audio_profile', lambda match: self.codec in match.tags) + ret = [] + for profile in profile_list: + codec = matches.previous(profile, lambda match: match.name == 'audio_codec' and match.value == self.codec) + if not codec: + codec = matches.next(profile, lambda match: match.name == 'audio_codec' and match.value == self.codec) + if not codec: + ret.append(profile) + return ret + + +class DtsRule(AudioProfileRule): + """ + Rule to validate DTS profile + """ + + def __init__(self): + super(DtsRule, self).__init__("DTS") + + +class AacRule(AudioProfileRule): + """ + Rule to validate AAC profile + """ + + def __init__(self): + super(AacRule, self).__init__("AAC") + + +class Ac3Rule(AudioProfileRule): + """ + Rule to validate AC3 profile + """ + + def __init__(self): + super(Ac3Rule, self).__init__("AC3") + + +class HqConflictRule(Rule): + """ + Solve conflict between HQ from other property and from audio_profile. + """ + + dependency = [DtsRule, AacRule, Ac3Rule] + consequence = RemoveMatch + + def when(self, matches, context): + hq_audio = matches.named('audio_profile', lambda match: match.value == 'HQ') + hq_audio_spans = [match.span for match in hq_audio] + hq_other = matches.named('other', lambda match: match.span in hq_audio_spans) + + if hq_other: + return hq_other diff --git a/libs/guessit/rules/properties/bonus.py b/libs/guessit/rules/properties/bonus.py new file mode 100644 index 00000000..e37613e9 --- /dev/null +++ b/libs/guessit/rules/properties/bonus.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +bonus property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, AppendMatch, Rule + +from .title import TitleFromPosition +from ..common.formatters import cleanup +from ..common.validators import seps_surround + + +def bonus(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) + + rebulk.regex(r'x(\d+)', name='bonus', private_parent=True, children=True, formatter=int, + validator={'__parent__': lambda match: seps_surround}, + conflict_solver=lambda match, conflicting: match + if conflicting.name in ['video_codec', 'episode'] and 'bonus-conflict' not in conflicting.tags + else '__default__') + + rebulk.rules(BonusTitleRule) + + return rebulk + + +class BonusTitleRule(Rule): + """ + Find bonus title after bonus. + """ + dependency = TitleFromPosition + consequence = AppendMatch + + properties = {'bonus_title': [None]} + + def when(self, matches, context): + bonus_number = matches.named('bonus', lambda match: not match.private, index=0) + if bonus_number: + filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0) + hole = matches.holes(bonus_number.end, filepath.end + 1, formatter=cleanup, index=0) + if hole and hole.value: + hole.name = 'bonus_title' + return hole diff --git a/libs/guessit/rules/properties/cds.py b/libs/guessit/rules/properties/cds.py new file mode 100644 index 00000000..db1407d6 --- /dev/null +++ b/libs/guessit/rules/properties/cds.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +cd and cd_count properties +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common import dash + + +def cds(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + + rebulk.regex(r'cd-?(?P\d+)(?:-?of-?(?P\d+))?', + validator={'cd': lambda match: 0 < match.value < 100, + 'cd_count': lambda match: 0 < match.value < 100}, + formatter={'cd': int, 'cd_count': int}, + children=True, + private_parent=True, + properties={'cd': [None], 'cd_count': [None]}) + rebulk.regex(r'(?P\d+)-?cds?', + validator={'cd': lambda match: 0 < match.value < 100, + 'cd_count': lambda match: 0 < match.value < 100}, + formatter={'cd_count': int}, + children=True, + private_parent=True, + properties={'cd': [None], 'cd_count': [None]}) + + return rebulk diff --git a/libs/guessit/rules/properties/container.py b/libs/guessit/rules/properties/container.py new file mode 100644 index 00000000..747a3ebc --- /dev/null +++ b/libs/guessit/rules/properties/container.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +container property +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common.validators import seps_surround +from ...reutils import build_or_pattern + + +def container(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) + rebulk.defaults(name='container', + formatter=lambda value: value[1:], + tags=['extension'], + conflict_solver=lambda match, other: other + if other.name in ['format', 'video_codec'] or + other.name == 'container' and 'extension' not in other.tags + else '__default__') + + subtitles = ['srt', 'idx', 'sub', 'ssa', 'ass'] + info = ['nfo'] + videos = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2', + 'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm', + 'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv', + 'iso', 'vob'] + torrent = ['torrent'] + + rebulk.regex(r'\.'+build_or_pattern(subtitles)+'$', exts=subtitles, tags=['extension', 'subtitle']) + rebulk.regex(r'\.'+build_or_pattern(info)+'$', exts=info, tags=['extension', 'info']) + rebulk.regex(r'\.'+build_or_pattern(videos)+'$', exts=videos, tags=['extension', 'video']) + rebulk.regex(r'\.'+build_or_pattern(torrent)+'$', exts=torrent, tags=['extension', 'torrent']) + + rebulk.defaults(name='container', + validator=seps_surround, + formatter=lambda s: s.upper(), + conflict_solver=lambda match, other: match + if other.name in ['format', + 'video_codec'] or other.name == 'container' and 'extension' in other.tags + else '__default__') + + rebulk.string(*[sub for sub in subtitles if sub not in ['sub']], tags=['subtitle']) + rebulk.string(*videos, tags=['video']) + rebulk.string(*torrent, tags=['torrent']) + + return rebulk diff --git a/libs/guessit/rules/properties/country.py b/libs/guessit/rules/properties/country.py new file mode 100644 index 00000000..8f03b498 --- /dev/null +++ b/libs/guessit/rules/properties/country.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +country property +""" +# pylint: disable=no-member +import babelfish + +from rebulk import Rebulk +from ..common.words import COMMON_WORDS, iter_words + + +def country(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().defaults(name='country') + + rebulk.functional(find_countries, + #  Prefer language and any other property over country if not US or GB. + conflict_solver=lambda match, other: match + if other.name != 'language' or match.value not in [babelfish.Country('US'), + babelfish.Country('GB')] + else other, + properties={'country': [None]}) + + return rebulk + + +COUNTRIES_SYN = {'ES': ['españa'], + 'GB': ['UK'], + 'BR': ['brazilian', 'bra'], + 'CA': ['québec', 'quebec', 'qc'], + # FIXME: this one is a bit of a stretch, not sure how to do it properly, though... + 'MX': ['Latinoamérica', 'latin america']} + + +class GuessitCountryConverter(babelfish.CountryReverseConverter): # pylint: disable=missing-docstring + def __init__(self): + self.guessit_exceptions = {} + + for alpha2, synlist in COUNTRIES_SYN.items(): + for syn in synlist: + self.guessit_exceptions[syn.lower()] = alpha2 + + @property + def codes(self): # pylint: disable=missing-docstring + return (babelfish.country_converters['name'].codes | + frozenset(babelfish.COUNTRIES.values()) | + frozenset(self.guessit_exceptions.keys())) + + def convert(self, alpha2): + if alpha2 == 'GB': + return 'UK' + return str(babelfish.Country(alpha2)) + + def reverse(self, name): + # exceptions come first, as they need to override a potential match + # with any of the other guessers + try: + return self.guessit_exceptions[name.lower()] + except KeyError: + pass + + try: + return babelfish.Country(name.upper()).alpha2 + except ValueError: + pass + + for conv in [babelfish.Country.fromname]: + try: + return conv(name).alpha2 + except babelfish.CountryReverseError: + pass + + raise babelfish.CountryReverseError(name) + + +babelfish.country_converters['guessit'] = GuessitCountryConverter() + + +def is_allowed_country(country_object, context=None): + """ + Check if country is allowed. + """ + if context and context.get('allowed_countries'): + allowed_countries = context.get('allowed_countries') + return country_object.name.lower() in allowed_countries or country_object.alpha2.lower() in allowed_countries + return True + + +def find_countries(string, context=None): + """ + Find countries in given string. + """ + ret = [] + for word_match in iter_words(string.strip().lower()): + word = word_match.value + if word.lower() in COMMON_WORDS: + continue + try: + country_object = babelfish.Country.fromguessit(word) + if is_allowed_country(country_object, context): + ret.append((word_match.span[0], word_match.span[1], {'value': country_object})) + except babelfish.Error: + continue + return ret diff --git a/libs/guessit/rules/properties/crc.py b/libs/guessit/rules/properties/crc.py new file mode 100644 index 00000000..f655bc13 --- /dev/null +++ b/libs/guessit/rules/properties/crc.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +crc and uuid properties +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common.validators import seps_surround + + +def crc(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(validator=seps_surround) + + rebulk.regex('(?:[a-fA-F]|[0-9]){8}', name='crc32', + conflict_solver=lambda match, other: match + if other.name in ['episode', 'season'] + else '__default__') + + rebulk.functional(guess_idnumber, name='uuid', + conflict_solver=lambda match, other: match + if other.name in ['episode', 'season'] + else '__default__') + return rebulk + + +_DIGIT = 0 +_LETTER = 1 +_OTHER = 2 + +_idnum = re.compile(r'(?P[a-zA-Z0-9-]{20,})') # 1.0, (0, 0)) + + +def guess_idnumber(string): + """ + Guess id number function + :param string: + :type string: + :return: + :rtype: + """ + # pylint:disable=invalid-name + ret = [] + + matches = list(_idnum.finditer(string)) + for match in matches: + result = match.groupdict() + switch_count = 0 + switch_letter_count = 0 + letter_count = 0 + last_letter = None + + last = _LETTER + for c in result['uuid']: + if c in '0123456789': + ci = _DIGIT + elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': + ci = _LETTER + if c != last_letter: + switch_letter_count += 1 + last_letter = c + letter_count += 1 + else: + ci = _OTHER + + if ci != last: + switch_count += 1 + + last = ci + + # only return the result as probable if we alternate often between + # char type (more likely for hash values than for common words) + switch_ratio = float(switch_count) / len(result['uuid']) + letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1 + + if switch_ratio > 0.4 and letters_ratio > 0.4: + ret.append(match.span()) + + return ret diff --git a/libs/guessit/rules/properties/date.py b/libs/guessit/rules/properties/date.py new file mode 100644 index 00000000..0b6083bd --- /dev/null +++ b/libs/guessit/rules/properties/date.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +date and year properties +""" +from rebulk import Rebulk, RemoveMatch, Rule + +from ..common.date import search_date, valid_year +from ..common.validators import seps_surround + + +def date(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().defaults(validator=seps_surround) + + rebulk.regex(r"\d{4}", name="year", formatter=int, + validator=lambda match: seps_surround(match) and valid_year(match.value)) + + def date_functional(string, context): + """ + Search for date in the string and retrieves match + + :param string: + :return: + """ + + ret = search_date(string, context.get('date_year_first'), context.get('date_day_first')) + if ret: + return ret[0], ret[1], {'value': ret[2]} + + rebulk.functional(date_functional, name="date", properties={'date': [None]}, + conflict_solver=lambda match, other: other + if other.name in ['episode', 'season'] + else '__default__') + + rebulk.rules(KeepMarkedYearInFilepart) + + return rebulk + + +class KeepMarkedYearInFilepart(Rule): + """ + Keep first years marked with [](){} in filepart, or if no year is marked, ensure it won't override titles. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + if len(matches.named('year')) > 1: + for filepart in matches.markers.named('path'): + years = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year') + if len(years) > 1: + group_years = [] + ungroup_years = [] + for year in years: + if matches.markers.at_match(year, lambda marker: marker.name == 'group'): + group_years.append(year) + else: + ungroup_years.append(year) + if group_years and ungroup_years: + ret.extend(ungroup_years) + ret.extend(group_years[1:]) # Keep the first year in marker. + elif not group_years: + ret.append(ungroup_years[0]) # Keep first year for title. + if len(ungroup_years) > 2: + ret.extend(ungroup_years[2:]) + return ret diff --git a/libs/guessit/rules/properties/edition.py b/libs/guessit/rules/properties/edition.py new file mode 100644 index 00000000..429ba8d3 --- /dev/null +++ b/libs/guessit/rules/properties/edition.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +edition property +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common import dash +from ..common.validators import seps_surround + + +def edition(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name='edition', validator=seps_surround) + + rebulk.regex('collector', 'collector-edition', 'edition-collector', value='Collector Edition') + rebulk.regex('special-edition', 'edition-special', value='Special Edition', + conflict_solver=lambda match, other: other + if other.name == 'episode_details' and other.value == 'Special' + else '__default__') + rebulk.regex('criterion-edition', 'edition-criterion', value='Criterion Edition') + rebulk.regex('deluxe', 'deluxe-edition', 'edition-deluxe', value='Deluxe Edition') + rebulk.regex('director\'?s?-cut', 'director\'?s?-cut-edition', 'edition-director\'?s?-cut', value='Director\'s cut') + + return rebulk diff --git a/libs/guessit/rules/properties/episode_title.py b/libs/guessit/rules/properties/episode_title.py new file mode 100644 index 00000000..9d6e4abf --- /dev/null +++ b/libs/guessit/rules/properties/episode_title.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Episode title +""" +from collections import defaultdict + +from rebulk import Rebulk, Rule, AppendMatch, RenameMatch +from ..common import seps, title_seps +from ..properties.title import TitleFromPosition, TitleBaseRule +from ..common.formatters import cleanup + + +def episode_title(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().rules(EpisodeTitleFromPosition, + AlternativeTitleReplace, + TitleToEpisodeTitle, + Filepart3EpisodeTitle, + Filepart2EpisodeTitle) + return rebulk + + +class TitleToEpisodeTitle(Rule): + """ + If multiple different title are found, convert the one following episode number to episode_title. + """ + dependency = TitleFromPosition + + def when(self, matches, context): + titles = matches.named('title') + + if len(titles) < 2: + return + + title_groups = defaultdict(list) + for title in titles: + title_groups[title.value].append(title) + + episode_titles = [] + main_titles = [] + for title in titles: + if matches.previous(title, lambda match: match.name == 'episode'): + episode_titles.append(title) + else: + main_titles.append(title) + + if episode_titles: + return episode_titles + + def then(self, matches, when_response, context): + for title in when_response: + matches.remove(title) + title.name = 'episode_title' + matches.append(title) + + +class EpisodeTitleFromPosition(TitleBaseRule): + """ + Add episode title match in existing matches + Must run after TitleFromPosition rule. + """ + dependency = TitleToEpisodeTitle + + def hole_filter(self, hole, matches): + episode = matches.previous(hole, + lambda previous: any(name in previous.names + for name in ['episode', 'episode_details', + 'episode_count', 'season', 'season_count', + 'date', 'title', 'year']), + 0) + + crc32 = matches.named('crc32') + + return episode or crc32 + + def filepart_filter(self, filepart, matches): + # Filepart where title was found. + if matches.range(filepart.start, filepart.end, lambda match: match.name == 'title'): + return True + return False + + def should_remove(self, match, matches, filepart, hole, context): + if match.name == 'episode_details': + return False + return super(EpisodeTitleFromPosition, self).should_remove(match, matches, filepart, hole, context) + + def __init__(self): + super(EpisodeTitleFromPosition, self).__init__('episode_title', ['title']) + + def when(self, matches, context): + if matches.named('episode_title'): + return + return super(EpisodeTitleFromPosition, self).when(matches, context) + + +class AlternativeTitleReplace(Rule): + """ + If alternateTitle was found and title is next to episode, season or date, replace it with episode_title. + """ + dependency = EpisodeTitleFromPosition + consequence = RenameMatch + + def when(self, matches, context): + if matches.named('episode_title'): + return + + alternative_title = matches.range(predicate=lambda match: match.name == 'alternative_title', index=0) + if alternative_title: + main_title = matches.chain_before(alternative_title.start, seps=seps, + predicate=lambda match: 'title' in match.tags, index=0) + if main_title: + episode = matches.previous(main_title, + lambda previous: any(name in previous.names + for name in ['episode', 'episode_details', + 'episode_count', 'season', + 'season_count', + 'date', 'title', 'year']), + 0) + + crc32 = matches.named('crc32') + + if episode or crc32: + return alternative_title + + def then(self, matches, when_response, context): + matches.remove(when_response) + when_response.name = 'episode_title' + matches.append(when_response) + + +class Filepart3EpisodeTitle(Rule): + """ + If we have at least 3 filepart structured like this: + + Serie name/SO1/E01-episode_title.mkv + AAAAAAAAAA/BBB/CCCCCCCCCCCCCCCCCCCC + + If CCCC contains episode and BBB contains seasonNumber + Then title is to be found in AAAA. + """ + consequence = AppendMatch('title') + + def when(self, matches, context): + fileparts = matches.markers.named('path') + if len(fileparts) < 3: + return + + filename = fileparts[-1] + directory = fileparts[-2] + subdirectory = fileparts[-3] + + episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0) + if episode_number: + season = matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0) + + if season: + hole = matches.holes(subdirectory.start, subdirectory.end, + formatter=cleanup, seps=title_seps, predicate=lambda match: match.value, + index=0) + if hole: + return hole + + +class Filepart2EpisodeTitle(Rule): + """ + If we have at least 2 filepart structured like this: + + Serie name SO1/E01-episode_title.mkv + AAAAAAAAAAAAA/BBBBBBBBBBBBBBBBBBBBB + + If BBBB contains episode and AAA contains a hole followed by seasonNumber + Then title is to be found in AAAA. + """ + consequence = AppendMatch('title') + + def when(self, matches, context): + fileparts = matches.markers.named('path') + if len(fileparts) < 2: + return + + filename = fileparts[-1] + directory = fileparts[-2] + + episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0) + if episode_number: + season = matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0) + if season: + hole = matches.holes(directory.start, directory.end, formatter=cleanup, seps=title_seps, + predicate=lambda match: match.value, index=0) + if hole: + return hole diff --git a/libs/guessit/rules/properties/episodes.py b/libs/guessit/rules/properties/episodes.py new file mode 100644 index 00000000..65722835 --- /dev/null +++ b/libs/guessit/rules/properties/episodes.py @@ -0,0 +1,516 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +episode, season, episode_count, season_count and episode_details properties +""" +import copy +from collections import defaultdict + +from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch +from rebulk.match import Match +from rebulk.remodule import re +from rebulk.utils import is_iterable + +from .title import TitleFromPosition +from ..common import dash, alt_dash, seps +from ..common.formatters import strip +from ..common.numeral import numeral, parse_numeral +from ..common.validators import compose, seps_surround, seps_before, int_coercable +from ...reutils import build_or_pattern + + +def episodes(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + # pylint: disable=too-many-branches,too-many-statements,too-many-locals + rebulk = Rebulk() + rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator']) + + def season_episode_conflict_solver(match, other): + """ + Conflict solver for episode/season patterns + + :param match: + :param other: + :return: + """ + if match.name in ['season', 'episode'] and other.name in ['screen_size', 'video_codec', + 'audio_codec', 'audio_channels', + 'container', 'date']: + return match + elif match.name in ['season', 'episode'] and other.name in ['season', 'episode'] \ + and match.initiator != other.initiator: + if 'weak-episode' in match.tags: + return match + if 'weak-episode' in other.tags: + return other + if 'x' in match.initiator.raw.lower(): + return match + if 'x' in other.initiator.raw.lower(): + return other + return '__default__' + + season_episode_seps = [] + season_episode_seps.extend(seps) + season_episode_seps.extend(['x', 'X', 'e', 'E']) + + season_words = ['season', 'saison', 'serie', 'seasons', 'saisons', 'series'] + episode_words = ['episode', 'episodes', 'ep'] + of_words = ['of', 'sur'] + all_words = ['All'] + season_markers = ["S"] + season_ep_markers = ["x"] + episode_markers = ["xE", "Ex", "EP", "E", "x"] + range_separators = ['-', '~', 'to', 'a'] + weak_discrete_separators = list(sep for sep in seps if sep not in range_separators) + strong_discrete_separators = ['+', '&', 'and', 'et'] + discrete_separators = strong_discrete_separators + weak_discrete_separators + + def ordering_validator(match): + """ + Validator for season list. They should be in natural order to be validated. + + episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator + or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid) + """ + values = match.children.to_dict(implicit=True) + if 'season' in values and is_iterable(values['season']): + # Season numbers must be in natural order to be validated. + if not list(sorted(values['season'])) == values['season']: + return False + if 'episode' in values and is_iterable(values['episode']): + # Season numbers must be in natural order to be validated. + if not list(sorted(values['episode'])) == values['episode']: + return False + + def is_consecutive(property_name): + """ + Check if the property season or episode has valid consecutive values. + :param property_name: + :type property_name: + :return: + :rtype: + """ + previous_match = None + valid = True + for current_match in match.children.named(property_name): + if previous_match: + match.children.previous(current_match, + lambda m: m.name == property_name + 'Separator') + separator = match.children.previous(current_match, + lambda m: m.name == property_name + 'Separator', 0) + if separator.raw not in range_separators and separator.raw in weak_discrete_separators: + if not current_match.value - previous_match.value == 1: + valid = False + if separator.raw in strong_discrete_separators: + valid = True + break + previous_match = current_match + return valid + + return is_consecutive('episode') and is_consecutive('season') + + # S01E02, 01x02, S01S02S03 + rebulk.chain(formatter={'season': int, 'episode': int}, + tags=['SxxExx'], + abbreviations=[alt_dash], + children=True, + private_parent=True, + validate_all=True, + validator={'__parent__': ordering_validator}, + conflict_solver=season_episode_conflict_solver) \ + .regex(build_or_pattern(season_markers) + r'(?P\d+)@?' + + build_or_pattern(episode_markers) + r'@?(?P\d+)', + validate_all=True, + validator={'__parent__': seps_before}).repeater('+') \ + .regex(build_or_pattern(episode_markers + discrete_separators + range_separators, + name='episodeSeparator', + escape=True) + + r'(?P\d+)').repeater('*') \ + .chain() \ + .regex(r'(?P\d+)@?' + + build_or_pattern(season_ep_markers) + + r'@?(?P\d+)', + validate_all=True, + validator={'__parent__': seps_before}) \ + .chain() \ + .regex(r'(?P\d+)@?' + + build_or_pattern(season_ep_markers) + + r'@?(?P\d+)', + validate_all=True, + validator={'__parent__': seps_before}) \ + .regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators, + name='episodeSeparator', + escape=True) + + r'(?P\d+)').repeater('*') \ + .chain() \ + .regex(build_or_pattern(season_markers) + r'(?P\d+)', + validate_all=True, + validator={'__parent__': seps_before}) \ + .regex(build_or_pattern(season_markers + discrete_separators + range_separators, + name='seasonSeparator', + escape=True) + + r'(?P\d+)').repeater('*') + + # episode_details property + for episode_detail in ('Special', 'Bonus', 'Omake', 'Ova', 'Oav', 'Pilot', 'Unaired'): + rebulk.string(episode_detail, value=episode_detail, name='episode_details') + rebulk.regex(r'Extras?', name='episode_details', value='Extras') + + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], + validate_all=True, validator={'__parent__': seps_surround}, children=True, private_parent=True) + + def validate_roman(match): + """ + Validate a roman match if surrounded by separators + :param match: + :type match: + :return: + :rtype: + """ + if int_coercable(match.raw): + return True + return seps_surround(match) + + rebulk.chain(abbreviations=[alt_dash], + formatter={'season': parse_numeral, 'count': parse_numeral}, + validator={'__parent__': compose(seps_surround, ordering_validator), + 'season': validate_roman, + 'count': validate_roman}) \ + .defaults(validator=None) \ + .regex(build_or_pattern(season_words) + '@?(?P' + numeral + ')') \ + .regex(r'' + build_or_pattern(of_words) + '@?(?P' + numeral + ')').repeater('?') \ + .regex(r'@?(?P' + + build_or_pattern(range_separators + discrete_separators + ['@'], escape=True) + + r')@?(?P\d+)').repeater('*') + + rebulk.regex(build_or_pattern(episode_words) + r'-?(?P\d+)' + + r'(?:v(?P\d+))?' + + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P\d+))?', # Episode 4 + abbreviations=[dash], formatter=int, + disabled=lambda context: context.get('type') == 'episode') + + rebulk.regex(build_or_pattern(episode_words) + r'-?(?P' + numeral + ')' + + r'(?:v(?P\d+))?' + + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P\d+))?', # Episode 4 + abbreviations=[dash], + validator={'episode': validate_roman}, + formatter={'episode': parse_numeral, 'version': int, 'count': int}, + disabled=lambda context: context.get('type') != 'episode') + + rebulk.regex(r'S?(?P\d+)-?(?:xE|Ex|E|x)-?(?P' + build_or_pattern(all_words) + ')', + tags=['SxxExx'], + abbreviations=[dash], + validator=None, + formatter={'season': int, 'other': lambda match: 'Complete'}) + + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True, + validator={'__parent__': seps_surround}, children=True, private_parent=True) + + # 12, 13 + rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'(?P\d{2})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?P[x-])(?P\d{2})').repeater('*') + + # 012, 013 + rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'0(?P\d{1,2})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?P[x-])0(?P\d{1,2})').repeater('*') + + # 112, 113 + rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}, + disabled=lambda context: not context.get('episode_prefer_number', False)) \ + .defaults(validator=None) \ + .regex(r'(?P\d{3,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?P[x-])(?P\d{3,4})').repeater('*') + + # 1, 2, 3 + rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}, + disabled=lambda context: context.get('type') != 'episode') \ + .defaults(validator=None) \ + .regex(r'(?P\d)') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?P[x-])(?P\d{1,2})').repeater('*') + + # e112, e113 + # TODO: Enhance rebulk for validator to be used globally (season_episode_validator) + rebulk.chain(formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'e(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Pe|x|-)(?P\d{1,4})').repeater('*') + + # ep 112, ep113, ep112, ep113 + rebulk.chain(abbreviations=[dash], formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'ep-?(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Pep|e|x|-)(?P\d{1,4})').repeater('*') + + # 102, 0102 + rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode', 'weak-duplicate'], + formatter={'season': int, 'episode': int, 'version': int}, + conflict_solver=lambda match, other: match if other.name == 'year' else '__default__', + disabled=lambda context: context.get('episode_prefer_number', False)) \ + .defaults(validator=None) \ + .regex(r'(?P\d{1,2})(?P\d{2})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Px|-)(?P\d{2})').repeater('*') + + rebulk.regex(r'v(?P\d+)', children=True, private_parent=True, formatter=int) + + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator']) + + # TODO: List of words + # detached of X count (season/episode) + rebulk.regex(r'(?P\d+)?-?' + build_or_pattern(of_words) + + r'-?(?P\d+)-?' + build_or_pattern(episode_words) + '?', + abbreviations=[dash], children=True, private_parent=True, formatter=int) + + rebulk.regex(r'Minisodes?', name='episode_format', value="Minisode") + + # Harcoded movie to disable weak season/episodes + rebulk.regex('OSS-?117', + abbreviations=[dash], name="hardcoded-movies", marker=True, + conflict_solver=lambda match, other: None) + + rebulk.rules(EpisodeNumberSeparatorRange(range_separators), + SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx, + RemoveWeakDuplicate, EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator, + CountValidator, EpisodeSingleDigitValidator) + + return rebulk + + +class CountValidator(Rule): + """ + Validate count property and rename it + """ + priority = 64 + consequence = [RemoveMatch, RenameMatch('episode_count'), RenameMatch('season_count')] + + properties = {'episode_count': [None], 'season_count': [None]} + + def when(self, matches, context): + to_remove = [] + episode_count = [] + season_count = [] + + for count in matches.named('count'): + previous = matches.previous(count, lambda match: match.name in ['episode', 'season'], 0) + if previous: + if previous.name == 'episode': + episode_count.append(count) + elif previous.name == 'season': + season_count.append(count) + else: + to_remove.append(count) + return to_remove, episode_count, season_count + + +class AbstractSeparatorRange(Rule): + """ + Remove separator matches and create matches for season range. + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, range_separators, property_name): + super(AbstractSeparatorRange, self).__init__() + self.range_separators = range_separators + self.property_name = property_name + + def when(self, matches, context): + to_remove = [] + to_append = [] + + for separator in matches.named(self.property_name + 'Separator'): + previous_match = matches.previous(separator, lambda match: match.name == self.property_name, 0) + next_match = matches.next(separator, lambda match: match.name == self.property_name, 0) + + if previous_match and next_match and separator.value in self.range_separators: + for episode_number in range(previous_match.value + 1, next_match.value): + match = copy.copy(next_match) + match.value = episode_number + to_append.append(match) + to_remove.append(separator) + + previous_match = None + for next_match in matches.named(self.property_name): + if previous_match: + separator = matches.input_string[previous_match.initiator.end:next_match.initiator.start] + if separator not in self.range_separators: + separator = strip(separator) + if separator in self.range_separators: + for episode_number in range(previous_match.value + 1, next_match.value): + match = copy.copy(next_match) + match.value = episode_number + to_append.append(match) + to_append.append(Match(previous_match.end, next_match.start - 1, + name=self.property_name + 'Separator', + private=True, + input_string=matches.input_string)) + to_remove.append(next_match) # Remove and append match to support proper ordering + to_append.append(next_match) + + previous_match = next_match + + return to_remove, to_append + + +class EpisodeNumberSeparatorRange(AbstractSeparatorRange): + """ + Remove separator matches and create matches for episoderNumber range. + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, range_separators): + super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode") + + +class SeasonSeparatorRange(AbstractSeparatorRange): + """ + Remove separator matches and create matches for season range. + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, range_separators): + super(SeasonSeparatorRange, self).__init__(range_separators, "season") + + +class RemoveWeakIfMovie(Rule): + """ + Remove weak-movie tagged matches if it seems to be a movie. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + if matches.named('year') or matches.markers.named('hardcoded-movies'): + return matches.tagged('weak-movie') + + +class RemoveWeakIfSxxExx(Rule): + """ + Remove weak-movie tagged matches if SxxExx pattern is matched. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + if matches.tagged('SxxExx', lambda match: not match.private): + return matches.tagged('weak-movie') + + +class RemoveWeakDuplicate(Rule): + """ + Remove weak-duplicate tagged matches if duplicate patterns, for example The 100.109 + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + patterns = defaultdict(list) + for match in reversed(matches.range(filepart.start, filepart.end, + predicate=lambda match: 'weak-duplicate' in match.tags)): + if match.pattern in patterns[match.name]: + to_remove.append(match) + else: + patterns[match.name].append(match.pattern) + return to_remove + + +class EpisodeDetailValidator(Rule): + """ + Validate episode_details if they are detached or next to season or episode. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for detail in matches.named('episode_details'): + if not seps_surround(detail) \ + and not matches.previous(detail, lambda match: match.name in ['season', 'episode']) \ + and not matches.next(detail, lambda match: match.name in ['season', 'episode']): + ret.append(detail) + return ret + + +class RemoveDetachedEpisodeNumber(Rule): + """ + If multiple episode are found, remove those that are not detached from a range and less than 10. + + Fairy Tail 2 - 16-20, 2 should be removed. + """ + priority = 64 + consequence = RemoveMatch + dependency = [RemoveWeakIfSxxExx, RemoveWeakDuplicate] + + def when(self, matches, context): + ret = [] + + episode_numbers = [] + episode_values = set() + for match in matches.named('episode', lambda match: not match.private and 'weak-movie' in match.tags): + if match.value not in episode_values: + episode_numbers.append(match) + episode_values.add(match.value) + + episode_numbers = list(sorted(episode_numbers, key=lambda match: match.value)) + if len(episode_numbers) > 1 and \ + episode_numbers[0].value < 10 and \ + episode_numbers[1].value - episode_numbers[0].value != 1: + parent = episode_numbers[0] + while parent: # TODO: Add a feature in rebulk to avoid this ... + ret.append(parent) + parent = parent.parent + return ret + + +class VersionValidator(Rule): + """ + Validate version if previous match is episode or if surrounded by separators. + """ + priority = 64 + dependency = [RemoveWeakIfMovie, RemoveWeakIfSxxExx] + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for version in matches.named('version'): + episode_number = matches.previous(version, lambda match: match.name == 'episode', 0) + if not episode_number and not seps_surround(version.initiator): + ret.append(version) + return ret + + +class EpisodeSingleDigitValidator(Rule): + """ + Remove single digit episode when inside a group that doesn't own title. + """ + dependency = [TitleFromPosition] + + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for episode in matches.named('episode', lambda match: len(match.initiator) == 1): + group = matches.markers.at_match(episode, lambda marker: marker.name == 'group', index=0) + if group: + if not matches.range(*group.span, predicate=lambda match: match.name == 'title'): + ret.append(episode) + return ret diff --git a/libs/guessit/rules/properties/film.py b/libs/guessit/rules/properties/film.py new file mode 100644 index 00000000..21a56d29 --- /dev/null +++ b/libs/guessit/rules/properties/film.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +film property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, AppendMatch, Rule +from ..common.formatters import cleanup + + +def film(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) + + rebulk.regex(r'f(\d{1,2})', name='film', private_parent=True, children=True, formatter=int) + + rebulk.rules(FilmTitleRule) + + return rebulk + + +class FilmTitleRule(Rule): + """ + Rule to find out film_title (hole after film property + """ + consequence = AppendMatch + + properties = {'film_title': [None]} + + def when(self, matches, context): + bonus_number = matches.named('film', lambda match: not match.private, index=0) + if bonus_number: + filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0) + hole = matches.holes(filepath.start, bonus_number.start + 1, formatter=cleanup, index=0) + if hole and hole.value: + hole.name = 'film_title' + return hole diff --git a/libs/guessit/rules/properties/format.py b/libs/guessit/rules/properties/format.py new file mode 100644 index 00000000..aa75f824 --- /dev/null +++ b/libs/guessit/rules/properties/format.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +format property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, RemoveMatch, Rule +from ..common import dash +from ..common.validators import seps_before, seps_after + + +def format_(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + rebulk.defaults(name="format") + + rebulk.regex("VHS", "VHS-?Rip", value="VHS") + rebulk.regex("CAM", "CAM-?Rip", "HD-?CAM", value="Cam") + rebulk.regex("TELESYNC", "TS", "HD-?TS", value="Telesync") + rebulk.regex("WORKPRINT", "WP", value="Workprint") + rebulk.regex("TELECINE", "TC", value="Telecine") + rebulk.regex("PPV", "PPV-?Rip", value="PPV") # Pay Per View + rebulk.regex("SD-?TV", "SD-?TV-?Rip", "Rip-?SD-?TV", "TV-?Rip", + "Rip-?TV", value="TV") # TV is too common to allow matching + rebulk.regex("DVB-?Rip", "DVB", "PD-?TV", value="DVB") + rebulk.regex("DVD", "DVD-?Rip", "VIDEO-?TS", "DVD-?R(?:$|(?!E))", # "DVD-?R(?:$|^E)" => DVD-Real ... + "DVD-?9", "DVD-?5", value="DVD") + + rebulk.regex("HD-?TV", "TV-?RIP-?HD", "HD-?TV-?RIP", "HD-?RIP", value="HDTV") + rebulk.regex("VOD", "VOD-?Rip", value="VOD") + rebulk.regex("WEB-?Rip", value="WEBRip") + rebulk.regex("WEB-?DL", "WEB-?HD", "WEB", value="WEB-DL") + rebulk.regex("HD-?DVD-?Rip", "HD-?DVD", value="HD-DVD") + rebulk.regex("Blu-?ray(?:-?Rip)?", "B[DR]", "B[DR]-?Rip", "BD[59]", "BD25", "BD50", value="BluRay") + + rebulk.rules(ValidateFormat) + + return rebulk + + +class ValidateFormat(Rule): + """ + Validate format with screener property, with video_codec property or separated + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for format_match in matches.named('format'): + if not seps_before(format_match) and \ + not matches.range(format_match.start - 1, format_match.start - 2, + lambda match: match.name == 'other' and match.value == 'Screener'): + ret.append(format_match) + continue + if not seps_after(format_match) and \ + not matches.range(format_match.end, format_match.end + 1, + lambda match: match.name == 'video_codec' or ( + match.name == 'other' and match.value == 'Screener')): + ret.append(format_match) + continue + return ret diff --git a/libs/guessit/rules/properties/language.py b/libs/guessit/rules/properties/language.py new file mode 100644 index 00000000..3476d60a --- /dev/null +++ b/libs/guessit/rules/properties/language.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +language and subtitle_language properties +""" +# pylint: disable=no-member +import copy + +import babelfish + +from rebulk.remodule import re +from rebulk import Rebulk, Rule, RemoveMatch, RenameMatch +from ..common.words import iter_words, COMMON_WORDS +from ..common.validators import seps_surround + + +def language(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + + rebulk.string(*subtitle_prefixes, name="subtitle_language.prefix", ignore_case=True, private=True, + validator=seps_surround) + rebulk.string(*subtitle_suffixes, name="subtitle_language.suffix", ignore_case=True, private=True, + validator=seps_surround) + rebulk.functional(find_languages, properties={'language': [None]}) + rebulk.rules(SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule) + + return rebulk + + +COMMON_WORDS_STRICT = frozenset(['brazil']) + +UNDETERMINED = babelfish.Language('und') + +SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'], + ('ell', None): ['gr', 'greek'], + ('spa', None): ['esp', 'español'], + ('fra', None): ['français', 'vf', 'vff', 'vfi', 'vfq'], + ('swe', None): ['se'], + ('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'], + ('cat', None): ['català'], + ('ces', None): ['cz'], + ('ukr', None): ['ua'], + ('zho', None): ['cn'], + ('jpn', None): ['jp'], + ('hrv', None): ['scr'], + ('mul', None): ['multi', 'dl']} # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/ + + +class GuessitConverter(babelfish.LanguageReverseConverter): # pylint: disable=missing-docstring + _with_country_regexp = re.compile(r'(.*)\((.*)\)') + _with_country_regexp2 = re.compile(r'(.*)-(.*)') + + def __init__(self): + self.guessit_exceptions = {} + for (alpha3, country), synlist in SYN.items(): + for syn in synlist: + self.guessit_exceptions[syn.lower()] = (alpha3, country, None) + + @property + def codes(self): # pylint: disable=missing-docstring + return (babelfish.language_converters['alpha3b'].codes | + babelfish.language_converters['alpha2'].codes | + babelfish.language_converters['name'].codes | + babelfish.language_converters['opensubtitles'].codes | + babelfish.country_converters['name'].codes | + frozenset(self.guessit_exceptions.keys())) + + def convert(self, alpha3, country=None, script=None): + return str(babelfish.Language(alpha3, country, script)) + + def reverse(self, name): + with_country = (GuessitConverter._with_country_regexp.match(name) or + GuessitConverter._with_country_regexp2.match(name)) + + name = name.lower() + if with_country: + lang = babelfish.Language.fromguessit(with_country.group(1).strip()) + lang.country = babelfish.Country.fromguessit(with_country.group(2).strip()) + return lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None + + # exceptions come first, as they need to override a potential match + # with any of the other guessers + try: + return self.guessit_exceptions[name] + except KeyError: + pass + + for conv in [babelfish.Language, + babelfish.Language.fromalpha3b, + babelfish.Language.fromalpha2, + babelfish.Language.fromname, + babelfish.Language.fromopensubtitles]: + try: + reverse = conv(name) + return reverse.alpha3, reverse.country, reverse.script + except (ValueError, babelfish.LanguageReverseError): + pass + + raise babelfish.LanguageReverseError(name) + + +babelfish.language_converters['guessit'] = GuessitConverter() + +subtitle_both = ['sub', 'subs', 'subbed', 'custom subbed', 'custom subs', 'custom sub', 'customsubbed', 'customsubs', + 'customsub'] +subtitle_prefixes = subtitle_both + ['st', 'vost', 'subforced', 'fansub', 'hardsub'] +subtitle_suffixes = subtitle_both + ['subforced', 'fansub', 'hardsub'] +lang_prefixes = ['true'] + +all_lang_prefixes_suffixes = subtitle_prefixes + subtitle_suffixes + lang_prefixes + + +def find_languages(string, context=None): + """Find languages in the string + + :return: list of tuple (property, Language, lang_word, word) + """ + allowed_languages = context.get('allowed_languages') + common_words = COMMON_WORDS_STRICT if allowed_languages else COMMON_WORDS + + matches = [] + for word_match in iter_words(string): + word = word_match.value + start, end = word_match.span + + lang_word = word.lower() + key = 'language' + for prefix in subtitle_prefixes: + if lang_word.startswith(prefix): + lang_word = lang_word[len(prefix):] + key = 'subtitle_language' + for suffix in subtitle_suffixes: + if lang_word.endswith(suffix): + lang_word = lang_word[:len(lang_word) - len(suffix)] + key = 'subtitle_language' + for prefix in lang_prefixes: + if lang_word.startswith(prefix): + lang_word = lang_word[len(prefix):] + if lang_word not in common_words and word.lower() not in common_words: + try: + lang = babelfish.Language.fromguessit(lang_word) + match = (start, end, {'name': key, 'value': lang}) + if allowed_languages: + if lang.name.lower() in allowed_languages \ + or lang.alpha2.lower() in allowed_languages \ + or lang.alpha3.lower() in allowed_languages: + matches.append(match) + # Keep language with alpha2 equivalent. Others are probably + # uncommon languages. + elif lang == 'mul' or hasattr(lang, 'alpha2'): + matches.append(match) + except babelfish.Error: + pass + return matches + + +class SubtitlePrefixLanguageRule(Rule): + """ + Convert language guess as subtitle_language if previous match is a subtitle language prefix + """ + consequence = RemoveMatch + + properties = {'subtitle_language': [None]} + + def when(self, matches, context): + to_rename = [] + to_remove = matches.named('subtitle_language.prefix') + for lang in matches.named('language'): + prefix = matches.previous(lang, lambda match: match.name == 'subtitle_language.prefix', 0) + if not prefix: + group_marker = matches.markers.at_match(lang, lambda marker: marker.name == 'group', 0) + if group_marker: + # Find prefix if placed just before the group + prefix = matches.previous(group_marker, lambda match: match.name == 'subtitle_language.prefix', + 0) + if not prefix: + # Find prefix if placed before in the group + prefix = matches.range(group_marker.start, lang.start, + lambda match: match.name == 'subtitle_language.prefix', 0) + if prefix: + to_rename.append((prefix, lang)) + if prefix in to_remove: + to_remove.remove(prefix) + return to_rename, to_remove + + def then(self, matches, when_response, context): + to_rename, to_remove = when_response + super(SubtitlePrefixLanguageRule, self).then(matches, to_remove, context) + for prefix, match in to_rename: + # Remove suffix equivalent of prefix. + suffix = copy.copy(prefix) + suffix.name = 'subtitle_language.suffix' + if suffix in matches: + matches.remove(suffix) + matches.remove(match) + match.name = 'subtitle_language' + matches.append(match) + + +class SubtitleSuffixLanguageRule(Rule): + """ + Convert language guess as subtitle_language if next match is a subtitle language suffix + """ + dependency = SubtitlePrefixLanguageRule + consequence = RemoveMatch + + properties = {'subtitle_language': [None]} + + def when(self, matches, context): + to_append = [] + to_remove = matches.named('subtitle_language.suffix') + for lang in matches.named('language'): + suffix = matches.next(lang, lambda match: match.name == 'subtitle_language.suffix', 0) + if suffix: + to_append.append(lang) + if suffix in to_remove: + to_remove.remove(suffix) + return to_append, to_remove + + def then(self, matches, when_response, context): + to_rename, to_remove = when_response + super(SubtitleSuffixLanguageRule, self).then(matches, to_remove, context) + for match in to_rename: + matches.remove(match) + match.name = 'subtitle_language' + matches.append(match) + + +class SubtitleExtensionRule(Rule): + """ + Convert language guess as subtitle_language if next match is a subtitle extension + """ + consequence = RenameMatch('subtitle_language') + + properties = {'subtitle_language': [None]} + + def when(self, matches, context): + subtitle_extension = matches.named('container', + lambda match: 'extension' in match.tags and 'subtitle' in match.tags, + 0) + if subtitle_extension: + subtitle_lang = matches.previous(subtitle_extension, lambda match: match.name == 'language', 0) + if subtitle_lang: + return subtitle_lang diff --git a/libs/guessit/rules/properties/mimetype.py b/libs/guessit/rules/properties/mimetype.py new file mode 100644 index 00000000..c57ada77 --- /dev/null +++ b/libs/guessit/rules/properties/mimetype.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +mimetype property +""" +import mimetypes + +from rebulk import Rebulk, CustomRule, POST_PROCESS +from rebulk.match import Match + +from ...rules.processors import Processors + + +def mimetype(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + return Rebulk().rules(Mimetype) + + +class Mimetype(CustomRule): + """ + Mimetype post processor + :param matches: + :type matches: + :return: + :rtype: + """ + priority = POST_PROCESS + + dependency = Processors + + def when(self, matches, context): + mime, _ = mimetypes.guess_type(matches.input_string, strict=False) + return mime + + def then(self, matches, when_response, context): + mime = when_response + matches.append(Match(len(matches.input_string), len(matches.input_string), name='mimetype', value=mime)) + + @property + def properties(self): + """ + Properties for this rule. + """ + return {'mimetype': [None]} diff --git a/libs/guessit/rules/properties/other.py b/libs/guessit/rules/properties/other.py new file mode 100644 index 00000000..1c51eea7 --- /dev/null +++ b/libs/guessit/rules/properties/other.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +other property +""" +import copy + +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch, POST_PROCESS, AppendMatch +from ..common import dash +from ..common import seps +from ..common.validators import seps_surround, compose +from ...rules.common.formatters import raw_cleanup +from ...reutils import build_or_pattern + + +def other(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name="other", validator=seps_surround) + + rebulk.regex('Audio-?Fix', 'Audio-?Fixed', value='AudioFix') + rebulk.regex('Sync-?Fix', 'Sync-?Fixed', value='SyncFix') + rebulk.regex('Dual-?Audio', value='DualAudio') + rebulk.regex('ws', 'wide-?screen', value='WideScreen') + rebulk.string('Netflix', 'NF', value='Netflix') + + rebulk.string('Real', 'Fix', 'Fixed', value='Proper', tags=['has-neighbor-before', 'has-neighbor-after']) + rebulk.string('Proper', 'Repack', 'Rerip', value='Proper') + rebulk.string('Fansub', value='Fansub', tags='has-neighbor') + rebulk.string('Fastsub', value='Fastsub', tags='has-neighbor') + + season_words = build_or_pattern(["seasons?", "series?"]) + complete_articles = build_or_pattern(["The"]) + + def validate_complete(match): + """ + Make sure season word is are defined. + :param match: + :type match: + :return: + :rtype: + """ + children = match.children + if not children.named('completeWordsBefore') and not children.named('completeWordsAfter'): + return False + return True + + rebulk.regex('(?P' + complete_articles + '-)?' + + '(?P' + season_words + '-)?' + + 'Complete' + '(?P-' + season_words + ')?', + private_names=['completeArticle', 'completeWordsBefore', 'completeWordsAfter'], + value={'other': 'Complete'}, + tags=['release-group-prefix'], + validator={'__parent__': compose(seps_surround, validate_complete)}) + rebulk.string('R5', 'RC', value='R5') + rebulk.regex('Pre-?Air', value='Preair') + + for value in ( + 'Screener', 'Remux', 'Remastered', '3D', 'HD', 'mHD', 'HDLight', 'HQ', 'DDC', 'HR', 'PAL', 'SECAM', 'NTSC', + 'CC', 'LD', 'MD', 'XXX'): + rebulk.string(value, value=value) + + for value in ('Limited', 'Complete', 'Classic', 'Unrated', 'LiNE', 'Bonus', 'Trailer', 'FINAL', 'Retail', 'Uncut', + 'Extended', 'Extended Cut'): + rebulk.string(value, value=value, tags=['has-neighbor', 'release-group-prefix']) + + rebulk.string('VO', 'OV', value='OV', tags='has-neighbor') + + rebulk.regex('Scr(?:eener)?', value='Screener', validator=None, tags='other.validate.screener') + + rebulk.rules(ValidateHasNeighbor, ValidateHasNeighborAfter, ValidateHasNeighborBefore, ValidateScreenerRule, + ProperCountRule) + + return rebulk + + +class ProperCountRule(Rule): + """ + Add proper_count property + """ + priority = POST_PROCESS + + consequence = AppendMatch + + properties = {'proper_count': [None]} + + def when(self, matches, context): + propers = matches.named('other', lambda match: match.value == 'Proper') + if propers: + raws = {} # Count distinct raw values + for proper in propers: + raws[raw_cleanup(proper.raw)] = proper + proper_count_match = copy.copy(propers[-1]) + proper_count_match.name = 'proper_count' + proper_count_match.value = len(raws) + return proper_count_match + + +class ValidateHasNeighbor(Rule): + """ + Validate tag has-neighbor + """ + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor' in match.tags): + previous_match = matches.previous(to_check, index=0) + previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0) + if previous_group and (not previous_match or previous_group.end > previous_match.end): + previous_match = previous_group + if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps): + break + next_match = matches.next(to_check, index=0) + next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0) + if next_group and (not next_match or next_group.start < next_match.start): + next_match = next_group + if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateHasNeighborBefore(Rule): + """ + Validate tag has-neighbor-before that previous match exists. + """ + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor-before' in match.tags): + next_match = matches.next(to_check, index=0) + next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0) + if next_group and (not next_match or next_group.start < next_match.start): + next_match = next_group + if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateHasNeighborAfter(Rule): + """ + Validate tag has-neighbor-after that next match exists. + """ + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor-after' in match.tags): + previous_match = matches.previous(to_check, index=0) + previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0) + if previous_group and (not previous_match or previous_group.end > previous_match.end): + previous_match = previous_group + if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateScreenerRule(Rule): + """ + Validate tag other.validate.screener + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for screener in matches.named('other', lambda match: 'other.validate.screener' in match.tags): + format_match = matches.previous(screener, lambda match: match.name == 'format', 0) + if not format_match or matches.input_string[format_match.end:screener.start].strip(seps): + ret.append(screener) + return ret diff --git a/libs/guessit/rules/properties/part.py b/libs/guessit/rules/properties/part.py new file mode 100644 index 00000000..d274f7fb --- /dev/null +++ b/libs/guessit/rules/properties/part.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +part property +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common import dash +from ..common.validators import seps_surround, int_coercable, compose +from ..common.numeral import numeral, parse_numeral +from ...reutils import build_or_pattern + + +def part(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash], validator={'__parent__': seps_surround}) + + prefixes = ['pt', 'part'] + + def validate_roman(match): + """ + Validate a roman match if surrounded by separators + :param match: + :type match: + :return: + :rtype: + """ + if int_coercable(match.raw): + return True + return seps_surround(match) + + rebulk.regex(build_or_pattern(prefixes) + r'-?(?P' + numeral + r')', + prefixes=prefixes, validate_all=True, private_parent=True, children=True, formatter=parse_numeral, + validator={'part': compose(validate_roman, lambda m: 0 < m.value < 100)}) + + return rebulk diff --git a/libs/guessit/rules/properties/release_group.py b/libs/guessit/rules/properties/release_group.py new file mode 100644 index 00000000..b92ad168 --- /dev/null +++ b/libs/guessit/rules/properties/release_group.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +release_group property +""" +import copy + +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, AppendMatch +from ..common.validators import int_coercable +from ..properties.title import TitleFromPosition +from ..common.formatters import cleanup +from ..common import seps, dash +from ..common.comparators import marker_sorted + + +def release_group(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + return Rebulk().rules(SceneReleaseGroup, AnimeReleaseGroup, ExpectedReleaseGroup) + + +forbidden_groupnames = ['rip', 'by', 'for', 'par', 'pour', 'bonus'] + +groupname_ignore_seps = '[]{}()' +groupname_seps = ''.join([c for c in seps if c not in groupname_ignore_seps]) + + +def clean_groupname(string): + """ + Removes and strip separators from input_string + :param input_string: + :type input_string: + :return: + :rtype: + """ + string = string.strip(groupname_seps) + if not (string.endswith(tuple(groupname_ignore_seps)) and string.startswith(tuple(groupname_ignore_seps)))\ + and not any(i in string.strip(groupname_ignore_seps) for i in groupname_ignore_seps): + string = string.strip(groupname_ignore_seps) + for forbidden in forbidden_groupnames: + if string.lower().startswith(forbidden): + string = string[len(forbidden):] + string = string.strip(groupname_seps) + if string.lower().endswith(forbidden): + string = string[:len(forbidden)] + string = string.strip(groupname_seps) + return string + + +_scene_previous_names = ['video_codec', 'format', 'video_api', 'audio_codec', 'audio_profile', 'video_profile', + 'audio_channels', 'screen_size', 'other', 'container', 'language', 'subtitle_language', + 'subtitle_language.suffix', 'subtitle_language.prefix'] + +_scene_previous_tags = ['release-group-prefix'] + + +class ExpectedReleaseGroup(Rule): + """ + Add release_group match from expected_group option + """ + consequence = AppendMatch + + properties = {'release_group': [None]} + + def enabled(self, context): + return context.get('expected_group') + + def when(self, matches, context): + expected_rebulk = Rebulk().defaults(name='release_group') + + for expected_group in context.get('expected_group'): + if expected_group.startswith('re:'): + expected_group = expected_group[3:] + expected_group = expected_group.replace(' ', '-') + expected_rebulk.regex(expected_group, abbreviations=[dash], flags=re.IGNORECASE) + else: + expected_rebulk.string(expected_group, ignore_case=True) + + matches = expected_rebulk.matches(matches.input_string, context) + return matches + + +class SceneReleaseGroup(Rule): + """ + Add release_group match in existing matches (scene format). + + Something.XViD-ReleaseGroup.mkv + """ + dependency = [TitleFromPosition, ExpectedReleaseGroup] + consequence = AppendMatch + + properties = {'release_group': [None]} + + def when(self, matches, context): + # If a release_group is found before, ignore this kind of release_group rule. + + ret = [] + + for filepart in marker_sorted(matches.markers.named('path'), matches): + start, end = filepart.span + + last_hole = matches.holes(start, end + 1, formatter=clean_groupname, + predicate=lambda hole: cleanup(hole.value), index=-1) + + if last_hole: + previous_match = matches.previous(last_hole, + lambda match: not match.private or + match.name in _scene_previous_names, + index=0) + if previous_match and (previous_match.name in _scene_previous_names or + any(tag in previous_match.tags for tag in _scene_previous_tags)) and \ + not matches.input_string[previous_match.end:last_hole.start].strip(seps) \ + and not int_coercable(last_hole.value.strip(seps)): + + last_hole.name = 'release_group' + last_hole.tags = ['scene'] + + # if hole is inside a group marker with same value, remove [](){} ... + group = matches.markers.at_match(last_hole, lambda marker: marker.name == 'group', 0) + if group: + group.formatter = clean_groupname + if group.value == last_hole.value: + last_hole.start = group.start + 1 + last_hole.end = group.end - 1 + last_hole.tags = ['anime'] + + ret.append(last_hole) + return ret + + +class AnimeReleaseGroup(Rule): + """ + Add release_group match in existing matches (anime format) + ...[ReleaseGroup] Something.mkv + """ + dependency = [SceneReleaseGroup, TitleFromPosition] + consequence = AppendMatch + + properties = {'release_group': [None]} + + def when(self, matches, context): + ret = [] + + # If a release_group is found before, ignore this kind of release_group rule. + if not matches.named('episode') and not matches.named('season') and matches.named('release_group'): + # This doesn't seems to be an anime + return + + for filepart in marker_sorted(matches.markers.named('path'), matches): + + # pylint:disable=bad-continuation + empty_group_marker = matches.markers \ + .range(filepart.start, filepart.end, lambda marker: marker.name == 'group' + and not matches.range(marker.start, marker.end) + and not int_coercable(marker.value.strip(seps)), + 0) + + if empty_group_marker: + group = copy.copy(empty_group_marker) + group.marker = False + group.raw_start += 1 + group.raw_end -= 1 + group.tags = ['anime'] + group.name = 'release_group' + ret.append(group) + return ret diff --git a/libs/guessit/rules/properties/screen_size.py b/libs/guessit/rules/properties/screen_size.py new file mode 100644 index 00000000..80d68c29 --- /dev/null +++ b/libs/guessit/rules/properties/screen_size.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +screen_size property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch +from ..common.validators import seps_surround +from ..common import dash + + +def screen_size(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + def conflict_solver(match, other): + """ + Conflict solver for most screen_size. + """ + if other.name == 'screen_size': + if 'resolution' in other.tags: + # The chtouile to solve conflict in "720 x 432" string matching both 720p pattern + int_value = _digits_re.findall(match.raw)[-1] + if other.value.startswith(int_value): + return match + return other + return '__default__' + + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(name="screen_size", validator=seps_surround, conflict_solver=conflict_solver) + + rebulk.regex(r"(?:\d{3,}(?:x|\*))?360(?:i|p?x?)", value="360p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?368(?:i|p?x?)", value="368p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?480(?:i|p?x?)", value="480p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?576(?:i|p?x?)", value="576p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:i|p?(?:50|60)?x?)", value="720p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:p(?:50|60)?x?)", value="720p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?720p?hd", value="720p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?900(?:i|p?x?)", value="900p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080i", value="1080i") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?x?", value="1080p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080(?:p(?:50|60)?x?)", value="1080p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?hd", value="1080p") + rebulk.regex(r"(?:\d{3,}(?:x|\*))?2160(?:i|p?x?)", value="4K") + + _digits_re = re.compile(r'\d+') + + rebulk.defaults(name="screen_size", validator=seps_surround) + rebulk.regex(r'\d{3,}-?(?:x|\*)-?\d{3,}', + formatter=lambda value: 'x'.join(_digits_re.findall(value)), + abbreviations=[dash], + tags=['resolution'], + conflict_solver=lambda match, other: '__default__' if other.name == 'screen_size' else other) + + rebulk.rules(ScreenSizeOnlyOne) + + return rebulk + + +class ScreenSizeOnlyOne(Rule): + """ + Keep a single screen_size pet filepath part. + """ + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + screensize = list(reversed(matches.range(filepart.start, filepart.end, + lambda match: match.name == 'screen_size'))) + if len(screensize) > 1: + to_remove.extend(screensize[1:]) + + return to_remove diff --git a/libs/guessit/rules/properties/title.py b/libs/guessit/rules/properties/title.py new file mode 100644 index 00000000..067d432d --- /dev/null +++ b/libs/guessit/rules/properties/title.py @@ -0,0 +1,347 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +title property +""" +import re + +from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, AppendTags +from rebulk.formatters import formatters +from rebulk.pattern import RePattern +from rebulk.utils import find_all + +from .film import FilmTitleRule +from .language import SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule +from ..common.formatters import cleanup, reorder_title +from ..common.comparators import marker_sorted +from ..common import seps, title_seps, dash + + +def title(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().rules(TitleFromPosition, PreferTitleWithYear) + + def expected_title(input_string, context): + """ + Expected title functional pattern. + :param input_string: + :type input_string: + :param context: + :type context: + :return: + :rtype: + """ + ret = [] + for search in context.get('expected_title'): + if search.startswith('re:'): + search = search[3:] + search = search.replace(' ', '-') + matches = RePattern(search, abbreviations=[dash], flags=re.IGNORECASE).matches(input_string, context) + for match in matches: + # Instance of 'list' has no 'span' member (no-member). Seems to be a pylint bug. + # pylint: disable=no-member + ret.append(match.span) + else: + for start in find_all(input_string, search, ignore_case=True): + ret.append((start, start+len(search))) + return ret + + rebulk.functional(expected_title, name='title', tags=['expected'], + conflict_solver=lambda match, other: other, + disabled=lambda context: not context.get('expected_title')) + + return rebulk + + +class TitleBaseRule(Rule): + """ + Add title match in existing matches + """ + # pylint:disable=no-self-use,unused-argument + consequence = [AppendMatch, RemoveMatch] + + def __init__(self, match_name, match_tags=None, alternative_match_name=None): + super(TitleBaseRule, self).__init__() + self.match_name = match_name + self.match_tags = match_tags + self.alternative_match_name = alternative_match_name + + def hole_filter(self, hole, matches): + """ + Filter holes for titles. + :param hole: + :type hole: + :param matches: + :type matches: + :return: + :rtype: + """ + return True + + def filepart_filter(self, filepart, matches): + """ + Filter filepart for titles. + :param filepart: + :type filepart: + :param matches: + :type matches: + :return: + :rtype: + """ + return True + + def holes_process(self, holes, matches): + """ + process holes + :param holes: + :type holes: + :param matches: + :type matches: + :return: + :rtype: + """ + cropped_holes = [] + for hole in holes: + group_markers = matches.markers.named('group') + cropped_holes.extend(hole.crop(group_markers)) + return cropped_holes + + def is_ignored(self, match): + """ + Ignore matches when scanning for title (hole). + + Full word language and countries won't be ignored if they are uppercase. + """ + return not (len(match) > 3 and match.raw.isupper()) and match.name in ['language', 'country', 'episode_details'] + + def should_keep(self, match, to_keep, matches, filepart, hole, starting): + """ + Check if this match should be accepted when ending or starting a hole. + :param match: + :type match: + :param to_keep: + :type to_keep: list[Match] + :param matches: + :type matches: Matches + :param hole: the filepart match + :type hole: Match + :param hole: the hole match + :type hole: Match + :param starting: true if match is starting the hole + :type starting: bool + :return: + :rtype: + """ + if match.name in ['language', 'country']: + # Keep language if exactly matching the hole. + if len(hole.value) == len(match.raw): + return True + + # Keep language if other languages exists in the filepart. + outside_matches = filepart.crop(hole) + other_languages = [] + for outside in outside_matches: + other_languages.extend(matches.range(outside.start, outside.end, + lambda c_match: c_match.name == match.name and + c_match not in to_keep)) + + if not other_languages: + return True + + return False + + def should_remove(self, match, matches, filepart, hole, context): + """ + Check if this match should be removed after beeing ignored. + :param match: + :param matches: + :param filepart: + :param hole: + :return: + """ + if context.get('type') == 'episode' and match.name == 'episode_details': + return False + return True + + def check_titles_in_filepart(self, filepart, matches, context): + """ + Find title in filepart (ignoring language) + """ + # pylint:disable=too-many-locals,too-many-branches,too-many-statements + start, end = filepart.span + + holes = matches.holes(start, end + 1, formatter=formatters(cleanup, reorder_title), + ignore=self.is_ignored, + predicate=lambda hole: hole.value) + + holes = self.holes_process(holes, matches) + + for hole in holes: + # pylint:disable=cell-var-from-loop + if not hole or (self.hole_filter and not self.hole_filter(hole, matches)): + continue + + to_remove = [] + to_keep = [] + + ignored_matches = matches.range(hole.start, hole.end, self.is_ignored) + + if ignored_matches: + for ignored_match in reversed(ignored_matches): + # pylint:disable=undefined-loop-variable + trailing = matches.chain_before(hole.end, seps, predicate=lambda match: match == ignored_match) + if trailing: + should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, False) + if should_keep: + # pylint:disable=unpacking-non-sequence + try: + append, crop = should_keep + except TypeError: + append, crop = should_keep, should_keep + if append: + to_keep.append(ignored_match) + if crop: + hole.end = ignored_match.start + + for ignored_match in ignored_matches: + if ignored_match not in to_keep: + starting = matches.chain_after(hole.start, seps, + predicate=lambda match: match == ignored_match) + if starting: + should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, True) + if should_keep: + # pylint:disable=unpacking-non-sequence + try: + append, crop = should_keep + except TypeError: + append, crop = should_keep, should_keep + if append: + to_keep.append(ignored_match) + if crop: + hole.start = ignored_match.end + + for match in ignored_matches: + if self.should_remove(match, matches, filepart, hole, context): + to_remove.append(match) + for keep_match in to_keep: + if keep_match in to_remove: + to_remove.remove(keep_match) + + if hole and hole.value: + hole.name = self.match_name + hole.tags = self.match_tags + if self.alternative_match_name: + # Split and keep values that can be a title + titles = hole.split(title_seps, lambda match: match.value) + for title_match in list(titles[1:]): + previous_title = titles[titles.index(title_match) - 1] + separator = matches.input_string[previous_title.end:title_match.start] + if len(separator) == 1 and separator == '-' \ + and previous_title.raw[-1] not in seps \ + and title_match.raw[0] not in seps: + titles[titles.index(title_match) - 1].end = title_match.end + titles.remove(title_match) + else: + title_match.name = self.alternative_match_name + + else: + titles = [hole] + return titles, to_remove + + def when(self, matches, context): + if matches.named(self.match_name, lambda match: 'expected' in match.tags): + return + + fileparts = [filepart for filepart in list(marker_sorted(matches.markers.named('path'), matches)) + if not self.filepart_filter or self.filepart_filter(filepart, matches)] + + to_remove = [] + + # Priorize fileparts containing the year + years_fileparts = [] + for filepart in fileparts: + year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0) + if year_match: + years_fileparts.append(filepart) + + ret = [] + for filepart in fileparts: + try: + years_fileparts.remove(filepart) + except ValueError: + pass + titles = self.check_titles_in_filepart(filepart, matches, context) + if titles: + titles, to_remove_c = titles + ret.extend(titles) + to_remove.extend(to_remove_c) + break + + # Add title match in all fileparts containing the year. + for filepart in years_fileparts: + titles = self.check_titles_in_filepart(filepart, matches, context) + if titles: + # pylint:disable=unbalanced-tuple-unpacking + titles, to_remove_c = titles + ret.extend(titles) + to_remove.extend(to_remove_c) + + return ret, to_remove + + +class TitleFromPosition(TitleBaseRule): + """ + Add title match in existing matches + """ + dependency = [FilmTitleRule, SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule] + + properties = {'title': [None], 'alternative_title': [None]} + + def __init__(self): + super(TitleFromPosition, self).__init__('title', ['title'], 'alternative_title') + + +class PreferTitleWithYear(Rule): + """ + Prefer title where filepart contains year. + """ + dependency = TitleFromPosition + consequence = [RemoveMatch, AppendTags(['equivalent-ignore'])] + + properties = {'title': [None]} + + def when(self, matches, context): + with_year_in_group = [] + with_year = [] + titles = matches.named('title') + + for title_match in titles: + filepart = matches.markers.at_match(title_match, lambda marker: marker.name == 'path', 0) + if filepart: + year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0) + if year_match: + group = matches.markers.at_match(year_match, lambda group: group.name == 'group') + if group: + with_year_in_group.append(title_match) + else: + with_year.append(title_match) + + to_tag = [] + if with_year_in_group: + title_values = set([title_match.value for title_match in with_year_in_group]) + to_tag.extend(with_year_in_group) + elif with_year: + title_values = set([title_match.value for title_match in with_year]) + to_tag.extend(with_year) + else: + title_values = set([title_match.value for title_match in titles]) + + to_remove = [] + for title_match in titles: + if title_match.value not in title_values: + to_remove.append(title_match) + return to_remove, to_tag diff --git a/libs/guessit/rules/properties/type.py b/libs/guessit/rules/properties/type.py new file mode 100644 index 00000000..6d798b64 --- /dev/null +++ b/libs/guessit/rules/properties/type.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +type property +""" +from rebulk import CustomRule, Rebulk, POST_PROCESS +from rebulk.match import Match + +from ...rules.processors import Processors + + +def _type(matches, value): + """ + Define type match with given value. + :param matches: + :param value: + :return: + """ + matches.append(Match(len(matches.input_string), len(matches.input_string), name='type', value=value)) + + +def type_(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + return Rebulk().rules(TypeProcessor) + + +class TypeProcessor(CustomRule): + """ + Post processor to find file type based on all others found matches. + """ + priority = POST_PROCESS + + dependency = Processors + + properties = {'type': ['episode', 'movie']} + + def when(self, matches, context): # pylint:disable=too-many-return-statements + option_type = context.get('type', None) + if option_type: + return option_type + + episode = matches.named('episode') + season = matches.named('season') + episode_details = matches.named('episode_details') + + if episode or season or episode_details: + return 'episode' + + film = matches.named('film') + if film: + return 'movie' + + year = matches.named('year') + date = matches.named('date') + + if date and not year: + return 'episode' + + bonus = matches.named('bonus') + if bonus and not year: + return 'episode' + + crc32 = matches.named('crc32') + anime_release_group = matches.named('release_group', lambda match: 'anime' in match.tags) + if crc32 and anime_release_group: + return 'episode' + + return 'movie' + + def then(self, matches, when_response, context): + _type(matches, when_response) diff --git a/libs/guessit/rules/properties/video_codec.py b/libs/guessit/rules/properties/video_codec.py new file mode 100644 index 00000000..2ab1cfaf --- /dev/null +++ b/libs/guessit/rules/properties/video_codec.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +video_codec and video_profile property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch + +from guessit.rules.common.validators import seps_after, seps_before +from ..common import dash +from ..common.validators import seps_surround + + +def video_codec(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name="video_codec") + + rebulk.regex(r"Rv\d{2}", value="Real") + rebulk.regex("Mpeg2", value="Mpeg2") + rebulk.regex("DVDivX", "DivX", value="DivX") + rebulk.regex("XviD", value="XviD") + rebulk.regex("[hx]-?264(?:-?AVC(HD)?)?", "MPEG-?4(?:-?AVC(HD)?)", "AVCHD", value="h264") + rebulk.regex("[hx]-?265(?:-?HEVC)?", "HEVC", value="h265") + + # http://blog.mediacoderhq.com/h264-profiles-and-levels/ + # http://fr.wikipedia.org/wiki/H.264 + rebulk.defaults(name="video_profile", validator=seps_surround) + + rebulk.regex('10.?bit', 'Hi10P', value='10bit') + rebulk.regex('8.?bit', value='8bit') + + rebulk.string('BP', value='BP', tags='video_profile.rule') + rebulk.string('XP', 'EP', value='XP', tags='video_profile.rule') + rebulk.string('MP', value='MP', tags='video_profile.rule') + rebulk.string('HP', 'HiP', value='HP', tags='video_profile.rule') + rebulk.regex('Hi422P', value='Hi422P', tags='video_profile.rule') + rebulk.regex('Hi444PP', value='Hi444PP', tags='video_profile.rule') + + rebulk.string('DXVA', value='DXVA', name='video_api') + + rebulk.rules(ValidateVideoCodec, VideoProfileRule) + + return rebulk + + +class ValidateVideoCodec(Rule): + """ + Validate video_codec with format property or separated + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for codec in matches.named('video_codec'): + if not seps_before(codec) and \ + not matches.at_index(codec.start - 1, lambda match: match.name == 'format'): + ret.append(codec) + continue + if not seps_after(codec): + ret.append(codec) + continue + return ret + + +class VideoProfileRule(Rule): + """ + Rule to validate video_profile + """ + consequence = RemoveMatch + + def when(self, matches, context): + profile_list = matches.named('video_profile', lambda match: 'video_profile.rule' in match.tags) + ret = [] + for profile in profile_list: + codec = matches.previous(profile, lambda match: match.name == 'video_codec') + if not codec: + codec = matches.next(profile, lambda match: match.name == 'video_codec') + if not codec: + ret.append(profile) + return ret diff --git a/libs/guessit/rules/properties/website.py b/libs/guessit/rules/properties/website.py new file mode 100644 index 00000000..8563ea16 --- /dev/null +++ b/libs/guessit/rules/properties/website.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Website property. +""" +from pkg_resources import resource_stream # @UnresolvedImport +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch +from ...reutils import build_or_pattern + + +def website(): + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(name="website") + + tlds = [l.strip().decode('utf-8') + for l in resource_stream('guessit', 'tlds-alpha-by-domain.txt').readlines() + if b'--' not in l][1:] # All registered domain extension + + safe_tlds = ['com', 'org', 'net'] # For sure a website extension + safe_subdomains = ['www'] # For sure a website subdomain + safe_prefix = ['co', 'com', 'org', 'net'] # Those words before a tlds are sure + + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)+(?:[a-z-]+\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_tlds=safe_tlds, children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_prefix) + + r'\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_prefix=safe_prefix, tlds=tlds, children=True) + + class PreferTitleOverWebsite(Rule): + """ + If found match is more likely a title, remove website. + """ + consequence = RemoveMatch + + @staticmethod + def valid_followers(match): + """ + Validator for next website matches + """ + return any(name in ['season', 'episode', 'year'] for name in match.names) + + def when(self, matches, context): + to_remove = [] + for website_match in matches.named('website'): + suffix = matches.next(website_match, PreferTitleOverWebsite.valid_followers, 0) + if suffix: + to_remove.append(website_match) + return to_remove + + rebulk.rules(PreferTitleOverWebsite) + + return rebulk diff --git a/libs/guessit/slogging.py b/libs/guessit/slogging.py deleted file mode 100644 index 00fb80f7..00000000 --- a/libs/guessit/slogging.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import logging -import sys -import os - -GREEN_FONT = "\x1B[0;32m" -YELLOW_FONT = "\x1B[0;33m" -BLUE_FONT = "\x1B[0;34m" -RED_FONT = "\x1B[0;31m" -RESET_FONT = "\x1B[0m" - - -def setup_logging(colored=True, with_time=False, with_thread=False, filename=None, with_lineno=False): # pragma: no cover - """Set up a nice colored logger as the main application logger.""" - - class SimpleFormatter(logging.Formatter): - def __init__(self, with_time, with_thread): - self.fmt = (('%(asctime)s ' if with_time else '') + - '%(levelname)-8s ' + - '[%(name)s:%(funcName)s' + - (':%(lineno)s' if with_lineno else '') + ']' + - ('[%(threadName)s]' if with_thread else '') + - ' -- %(message)s') - logging.Formatter.__init__(self, self.fmt) - - class ColoredFormatter(logging.Formatter): - def __init__(self, with_time, with_thread): - self.fmt = (('%(asctime)s ' if with_time else '') + - '-CC-%(levelname)-8s ' + - BLUE_FONT + '[%(name)s:%(funcName)s' + - (':%(lineno)s' if with_lineno else '') + ']' + - RESET_FONT + ('[%(threadName)s]' if with_thread else '') + - ' -- %(message)s') - - logging.Formatter.__init__(self, self.fmt) - - def format(self, record): - modpath = record.name.split('.') - record.mname = modpath[0] - record.mmodule = '.'.join(modpath[1:]) - result = logging.Formatter.format(self, record) - if record.levelno == logging.DEBUG: - color = BLUE_FONT - elif record.levelno == logging.INFO: - color = GREEN_FONT - elif record.levelno == logging.WARNING: - color = YELLOW_FONT - else: - color = RED_FONT - - result = result.replace('-CC-', color) - return result - - if filename is not None: - # make sure we can write to our log file - logdir = os.path.dirname(filename) - if not os.path.exists(logdir): - os.makedirs(logdir) - ch = logging.FileHandler(filename, mode='w') - ch.setFormatter(SimpleFormatter(with_time, with_thread)) - else: - ch = logging.StreamHandler() - if colored and sys.platform != 'win32': - ch.setFormatter(ColoredFormatter(with_time, with_thread)) - else: - ch.setFormatter(SimpleFormatter(with_time, with_thread)) - - logging.getLogger().addHandler(ch) diff --git a/libs/guessit/test/1MB b/libs/guessit/test/1MB deleted file mode 100644 index 66d50a84dfddf2af162389d19170d62caa342668..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1048576 zcmV(xKp~ZHj@|BFH zh`fmM(rw)bHIT)64(bNM{DrP4KcNQtR?;RbeZH55vy(PtC1i}wT;AfigX)~NzqF@p z=3{2&wvL*cw4t1s_~6T7zD{>+Hg9am7Al`lEKjbM$c=`pRBOOPBOwAF z7Y}m=mjx$i7ca@!M8RRrpLb7W@&?aqSs)Bkjx$LVY)!BRwwSjjC`Nvn#I0tM3G$<> z(Vi?3KnSgO_|w|t<+N<~5L*@oz$iSdhc%|Fxq3>QkDWu#A)m#r>pv}2Y?`zXL2rVs z+@0#FVStJYVFA8=i9S?Fah=w2m1r7(`shFGftG1X_ng1-vYBk56uM06fv?IfMQom= znGhIsf@-dLK%I)qmvo2ie`1x_F*RJ<8!-P%9TKM%_r+)cCFM(4HC*7^% z*2i;Wv^-c+YGPY1^>x$H!=Pw1@=WVK)a0v~-SHzpWW+u&QcW^P@*UlXmNJ35u7OI2 zba?QJ+cb~QkiT?!J{!p~HllxZZXrU|ZepjRV)5%+r<}{{)3a0^y<)a9sANieXitL+ z0@N8(HX9pwjqPlo(;3mCPu^aFPGPtUY7Z<47%VvxZQohaf_SyKnA?xg^%h=_lBX+* z@c_M`91Z9u?h#*Mvsp6qzyBcDD5dIh(dQLYu$*>r0InfyBDl76kdMiRd@dcE41Fu8F%ZB%@|`J zn8-@o4aje?RV!75~=i@iU4T zVF0k4s)DV;ND4UZRk>%?fPo+N7Bc+Fd0?_Hbd_^O-M&y2pAlLWVB0)fY?NqRmjG#S z6O{oj1Lh-1(0)y_mh%smX;C$ACJK}~_Mu$l_g0s4D+4h|ydn{}?h!=djfus1h0^jy z%lb(P0mnFEiat?#=Gupo!N%G~KYW1|#j&hx+v8s)V^OFvxhO~V>aH%Yzsmcw0y4M0 z_^Sp_cr-q0>J-YXWBF;>}`f8<<{P0?2Rw)kqPETAp7N?5ps{$EPiom91l3O#7V0K!I4cfL^$VH#-ER}rS0U%9Lu4o}5fe}JvUK6!@laQt^_gEQO#@LcEorwZ5 zDuLaE-@o?0U^L8qbuE!}gL8a?HNr-17E2Hu6MBuKx^YjIZYoa|nfuq}35WS+VCgl~VMpc6Fu~1E zFNS|*&aSE?U))J#puG*3T1ZM^#}1d~z3C+|>qypV2grCmY_{0Q33CyJShwU%9PAM zGl;f`=2~Hi%dU{j{#(oVW)%rHtn`;J{@~KNFo&n`()kI9$E^xHKEzt1a3iMSUK5

+)qhX3Cd8BL%gVYZjf*D4~xW z;^L~loT{b$$*#+lO*>f=JH4KVs_~Wu7h-*bR_(HxVm>+uN2Z^=C7E_~IUh+cyor=* z0tj;POr@tfEg`4d`!X{R0Z_}79pYgSdLhjB8BNB`SxA;y%SpVV|7DH@@|t-D>pCuM zz5h%freSs}*smeqTH1oMo7qFSlPHzAR9zk4sKyK5A6gGt`**Wy(viwgL$9eA7{zK5 z_I22lYLv@JLzaRPqe8&{$|$%bCfMZyUPW}IvjxWDTlwgzfqL&|tZF7a4!5iU?x{`i z4Myu8kJdIMA10JO&!D%dS{8m#I8k@Jv8nT?m~yoWx7O4Z4#^p-M9W5o)#z+)00XDk zEB&{;&M$1{==R~0tj*f9TD&sJ3sx?0+?!{};zupB0foraW@?qWh-l16xPqRP znePOvnneY z@OO`sq7#H`(W($OiKek>vx!|tUZd~VO(upw`Rpu|S6k z?b<1z?J~tB{e1DsI*5^VvfdC4efxj5SmWU1qo);A*r@x+v1@$ec*IEOGMs| zgc_|>p@>5$L}N{n(~GECLAn~_4i8j+?2y+jctvK+yueE8ToE#uW{n_sl@Ma_Hyiqq zDf)nQ#w}+Mu@$fX{eaXHK)wAhhk<>7@7(I#F6Q-JO4OpkKh-2IsUhZ2-5`1tJ5I2A z9?J=*egB2(LW|+cAX2m_-V@Vnna+h}n=PWxVb-AFHG4jP%lbq>P&(@#KpQZNYf&~R zdqv?-MWYN)T(*d8>j9Od6Et1p&yyNn;%6m){TE{opm=5Qn(jF#?eycCvVSr@hKqML z^io*pDuQj^eT`B@Qh3aBo1_XQ`!)63uYjy$H0Jqope!0rAAQCbfm^9yp`J%MvcIF@ zfKfZYTzxpH)d{MXli;K!U4B#^-SeZLi)nQuoBI{`61gh zYlH)huA4K*Lnm-zST$37?o~7ajI!Fmu`BL)$9WeQmwO4n6l9=PCn;VXtlu7czfsYo z?b)cgszcj?t-Alo5LwFIER<)7wcpxeQ@j(=40E}4r*kU3(@+%9i5;hHDwPaV6hTib z>PPMk|3zl)FA8-z0g54=FAWLXf0h84H*7e)u)w^^BaVw-gdDw&>L;Lnr)_JXb8jOD zICHF1Mpo6545&SCO{*1i{=pa$P!*>wH?Rr%$wS!aOhAGLC3AS3?jX_K7Gl z=tLCNYvTz+K7h-IeptQX3GY^`Fj-=A!u^34nN-taR4K$`JKqK0GV7hP!{N97hA$Te zGJOG#;h+<4V)Cl#t!OhUVfVys)t0MS_uPJj^fGCpCea%$9vWL9+!~tzu~` ziR~M^1iaEQ?=dLfVL+|nIb1DY*a}`$`Zxm!s#P=N8D!v2uyfLDVo$4@Zk^r$1|uO+ zGzwFGkH^8xxygo^{YA$7UtTc9iNgGdU&H2HSpouwG~urf5n{jSvIVl3$Pt_<*W#h4 z^IOMp4ay(O;bd~Q$Nyy;mAC8tpGXsg5M;kiCcTAsVOx{f89o6-VW>04=nQcD&TF#q~`d6*c3{4081YDivRSXgf@nSdtl`!Cb?e+`3@K_);>=kgy7 zOxh$Ibq?yT+=M?NHY3$Q<&#eo7Zgy9bFp4r)$sE>rlqTO=swZlZ~d@KwiTf*?XQE_ zR4TY{e=wAtcErTEhoTsV1E_kEVKv3=_e0uV4>4{gZ5ro$>^$|isWWM+Irqd9{TeDn zHSl*HWy^RwRW^-9Rh>(0yPTTFvcl;u%Nn%*f5&0<1}X4-qsSH^#JXZ~r6g3FtXNlI ze`Atz2S`#&7I?dk)tEjkt}&C6pf2A)1sgS!{i~Znyf3*nGjAD|MF&CU8UkY~-BEMGhruwaM#C zl)i)`=O34-pMF3a}{1dLQSCzta&$4x2l91O9kah@c@N^>v*_vPHD_P-PJ#=oN z8$0v<{0~z!sW%o6t*Nqd-ekas@Tb?kHrq*d(H9~fAvN<oqA?{tFgitwwr*3}P zwC1sMV>ZVi#@%C*75MFVwO#Mj^WSCxN;Wy>p~uc+m`G_mgMjf(LQfe(i^_|OV!_Nw zeyZivfUqeo{NaE94*&VgjM$X&>1z0(Z2n# z&6eHU7(otQIjMtBU7rMr8w+Xq_aNV>QZUr3?dq=Uu+kD_s0q~}hFzMiKL7(T6{~Q? zR0FxKw={|9ZDVgw`fb6p(?MktyVR)l; z%3hBQkRK<7%Qw8r%q`Rs0`hSu@@$bwlu)cYfWh#y{~&f7S&1C_Jt(P2FX>jlcAuqg z2YiEG%td!Ua_j^NMhVKRwz#->)_3X2!$=yc`s~Eu9D%EYofuCP65NMTBcB2$AF2qw z<}H|zlDV3LE6hcPCnW8~T}BO_X{2TP@Rz0}M)ZqM@DgN6+Rw*BzyC7rlZKiSxE3>kBw74fqd-l)>j_`D;E`fc^IL618HaXIR5*kW$iC4* z8CX0sC4^7FEF~fD8RrkkW-nr}d{5s05Q$^wRkkZs;~$PMA2x}K-YhI~`z2D#t~pF8L&kev9>egQz%h2(9NNTb5V=_ zTgq)|TX*%*i580ES=my)MPnIiAscgm4BE^XYa5}LjLsacgNlrom4D;fKTW#ooW|f$Sg6ls-=7!;rxwPJ)O?GQ znOKSg_-!B?sUmqWYk4?%{X>fD8#4i{i-#nBxm5 z@}PHSR^51UNTOm?8@6slDD*rb>Yig~c0`7w?DwE-#dXjjuj=hzW9OsgQD~uXc~%AK z5N@VaJrM5_R@H@30riij^`h67+qZTQZ@wZ%7&wS&b``ZbQVN&E=*!qer z!pP$@v7%YP`rvOstngn-m^jyIY4t)Y(=VZ@nYlHnjQr9m)G4GaBO|36Y{=qq@|nY+ zBO$`gg|}aV0I?@PtH!gh9X57uld48WPbj_$Ofa?o^_7HcIt8N~$s#Or`($pV6*E-3 zBUB7|+IxLn_*6B}`zT5kLP&KNlUk&3ebh_qxf%5t-1*y~i3*Uuh(#lH?Ob-#8d+n^ zy*wz%ubcY@Fp{I+UmNrROsR`ExVMf?HZOxAU3bu6wJBSl(Gv0Q4~dQTqC*zZXLW{f z%`6iJt=p1-7ul(}!9yxFz*I0gxSc32DVVGC8L_$N@nDjP6;$!{e{j|{;M6r<8Q>8U zMNGIa1m)coFrQ{+Ai>9Au%IAoV>-G=$Ma5AV@%pPpr)dwr(=aMcihC_3UCx24AiZ7 zzoxGWk#J}cmWQ(A#4W{n9wNC-8}RdocG*2v-Y`gxpJ93V_m~ql`gED^Dd8L3s+<4n z8(&<{vnZ6eGZHgpKO;nAv&@2q4}w#tRs&s?zF@0DFkN#$1gS;N1X}~>K{K!hoVY07 z8F|~747GW%%KrJ&X7DxQuLNERhO>gRe`{tUQ&;!}H_a)xg4#1rFqH%aVlRPE{?1mI zS|3+s_9z*lPgh-Y2K1Ym$9-Sb&=1-0uY6EX=@lb|0AH&ZcEiA0!IA*kIFSBsl<0|p zZh^>4MP{~o*UGm!GuLycNP;5+yag0K4gk0wed4+SjG`8QmtWHl4Mu|Y+Q+3}9akk0 z#f8OaZHYWZu9*Dw54Nn4!SfI3uM_rlf*oT=T2}xjys0||+^!J~BaL8mN{#0v8WW!> zNIYS&k5A@MCiLo8m(@6BdZn}z;sM1XvVkh}H9svzx7OW%M~Zii$HC_>qY2%0Re`B= zUyMgacR&6OCGi^O$D^OY_qZMKE+ zW4T2)K_@HWz*T8)QD;#NymT^PuN7p)zjS#TE`L9WI0H}IbFb-VYbQrG0QiMKuUbO( z=^X4%Re*TRt`V|97T`Bd7j&8pipune%`L^p?>TDJJV~A#76FFyHXXTIbOs3!q~u=4gGq10MyF`D%9NUvvGp6awNd>d?9K0!`e&a--EX zEQpgre(_JMvct6m=dWPk|7IKszBN7-uZp>~LPa1}l|j1K-?}*>vo?Gu8ng>j?fbwB zhs<{ONq&>`IGJc{qhD{P^@ssQE%{Z4xflBTerCuea)HA-ibvi5(6*D&FgpMG8&CMS-s&X-lnD+H8=n|%>`g7R@`7Y(tX{yY38M4 zV(}J%Obsx<>~Zlo;wtmm0`&UFi%yPKx>Rc1;3>ZN0YbP#B7W__)u27)>L^yHi37HD z>)qHBW5CDwqf_6*viLTia<8|820HQU5U6XC{lik+a!Cy%YNX#{xAh!?zk~E2&{fM39d9a@b$Oshj(Ctj+gvt%p(G4~`#gcYDai)j zfoBT#W?Yhe!E8m_l(SNZaSx4-+_x*MNJtgn?QLwVa>2wVZkpPD%DKBW-ZAb->iA0< zJ9h%gJYKCGN3oAo|Cngf?kgJCH4elg`}NHiTFmzKAyRSszxwJB?7TM^DrPoC$QxR} z-gM+Fvfs}}k(R=GyP7ksCv6k+K?>@4*R1%IzlTmgayJ&NZA3`mE2Mr^`Mg|@Gm0Ks z2WEZLiELE{<~oN8M}r81gNHuiBs0jIi^io7*-@cWOZ>_~WIgiYIoN*|hX`hNUA5^fzG7|I_%0N+i+dN#)4D3R z4#`IiNmHA}VKZ&y(?1|2Xmyq7kMc=1C_Prb!L&ln-LGhqZQJ8&$EbG2!7$-WTJmFF zBta+~fDz>j{M-bs@Sp}H){VD}kt!kPdxLcNTMA^(QRJwbhfES%_SbIfq{-DFDZO>f zJJ-zlAVHw~ob9mkcr{xK_iCMrdRvvcyf#9yu2jyQl$Vlv7p{9os}{ZqCVX;kZOJIV zAnQ`#K@mOb{_jpm9ui&QqT%yM?^fPqos5$}!_-6ND-V6CM6x&qagay5$93uyACb$8 z)!)y_4DLd-e*7jXtY{q%m9w1hsqWWa4yzSkcen7Dj+2W@7a657!1DWIQLe3TAfOHezExrT6$Fs) zu9e^OO$1*5X&w!CQHVU=?NtYb`GhrBREjzt0Ai*p5(gDcPpt(4l9#eI7=sv_ zQmg@4javH1&IYUBZsa-PgnU`?I34NQJW0VW=#AjDG# zw>vlO7r_dMXqflT$l7d1A|s8JbQ8}P=1e{)=T1NktT!1pX5KuXLljSRRj+_2xz~aH zHocOhY#;{9g5jVVSLz~=!8YGR=I&Bo!&|~P%NK7C%tO}hq;$$g*l-+Tb2S`mjqqlJ z_&pX#Yct>xUCsb&agj|F6Eo{fYnf|y(eUezK3A&Su z1Q{1z(x7&GM^VxSBqX|pdcNm{uzI+%uwext{|Tlx|4-JJ)VREi7Y8ar=i=l`_G)M; zAJM6bVP&mxnpS*0tD6SYO(U_v0$XmWPj<`Be7Y)?p?wxErc5?(3QO_9i|v+6+z_WD-YOEe&-+%FJyjR@&R#M8P5=+%9RuGk>QE0~m z;r!-y?{HbR;@1lr?uR7P3ZU3A4Tbb|)Mw`vbtLQ=1@Ydk-gD>q1<`)LWM>zCz!$5X(Rp#iWCIIUWj*mcG_E z?~t4#<9R-5(Ys6)HfIL$8=gTaIjN>ySJ2`tGV7eMD89&BpqATo z5=L?m#I0O+dZst%C2L2jq(qN(dwe4klaF`>6oTG`f!g%*H zs;96bWa!F@{+%y+$!jb{HA`t1f|>S4a^?pQ(q56nc4|x{(Ob6olttTnHThATkAKVp z)-9kAv1+e=;k&q?3v2^l9tU&TlyQPQzIzdQlxl!ek&o<1$vkprt2Y+djXQ71_40m) z3-WDct7B3&SvKl+J7o@hf4i3Al?*OjTB)ys3rK^Z?D2ri7pX>{38hqv1WH=hNF0)vqT0M%X%e%){1=YazUd45L4N_VGCeHFr2Y& zMhOIbqOzS~Vg$QFj0ZPf`~Gro!*o?M%Dg|Q;MDYOj3F3%`vi)`f>F zk|-GhpfnO#Y$y+?YbrOl;Q%xC;O6?FsNX=Pkdyw^2k4wGPq&8veYtf)pwwdEh}B5d zQjkilE_hkN^K8f2DmkHojRaJK=}eP12AODThPo$vtHUB(O$@tYWCGvHHy zZ5~`67b81%c7;QePYsR$wkqVE2$0LMCTKR)pPQka44q9Z%w%4Aw25KZf_T8>4HvHn z`?h!8QmXf_?Pk~8zSceCUDsyZLf_@vW4~A@eU-b z;$AzE&G@cgdr1eIpHJ{f1u=_Hh!f)--I~m){!_e}EF7hbT@Q#Z!Fr`6r}3_1l&jXj zg4H6=dLFbgD+FOEjBRg%s$}f;s9lW?ZNGKv>uD5HeEb(z`PC9WT2=$S7<`vGf-Jc( zzTg-nNEBhZsmMmb?+&bxRv3uXQmva(TF~|JC_8Dtu;i1R-CZ>9*1G6HTwuxCo*YOah#H%AB9gOL6w)2* zyOsyZLGL0Zt_xWHo&ANyS^t@@`668MP5+5aImAX@0{St(~) z#D98G;zzoF7pcImHVMpV7v3g%05{kBJ5Ae9A}>*+*E1e+;*f5k?aX5^KXU#SjeymA z1MFFir+*feYieERSRB^_>sp3~aDKK>C(zq@H0_#nR5)Cg1?|_ zpGr(xOWsgmZE+}3`L{QsaY8Ubw_a*4B@iqT)Mq?yZ~WP*FeiQsfokf13jccHJZUUI zk6*Mv-yStPu|A9iG`ZsZ-i~oe!zI5>bm}7|l}puXJN(}dNSMhI@ z_~LwNpJvST`VXdNX(7rt+0wK{#9mi{%YAQ;pV9~yNGEYwfOT)-%+wg;ob%bd8WVR) z$-U{Biu;}-_dHc6296y;uW+|yW72rfAQQ6)t3ya zTodQ_MEX<~7RG%;$I8q|L6QX6qB4vX=jd=$m)8Ofr^+rz*HuPCgST%H&!@<=C6vrU zk~0S?hS3jOiT{5-!*HAYn}l%dOTbj>!+S+uIsb9{Yq)# zk%Ha0JC@21Y1G}LndU1&o(MWkl#}=k?=G?%9Qc*veNw|8u_f;^-Gn^J*w@|2zi}J% z5FAeZiqFXm%wDAUZ~?CRN%?H|@9T zR7X5AhNDkwz!WO$)Nq>S`z&YqBtM0Z1jS(S_F z?&&yTc@SjNMqvi0D9b8KUSlW@!qXN8BT|F?@-+^5U)f1}KR?NnC?F+mnCWJDP531B zZG9&j(lI?JjAr2xQjADWg02f5nv~_Dt*LD~>0(TDA=H>*Xl8As+x2`+9B!0_u^W;H7Yv23*Irq1cpM>G=G6@9=joUK7iN1N zV?7=X4HG`i&_rAmPw9#Z4g4&>)FNg^VX2+Sgp%*Dk!MSqKTKm}-BXI9c=((CUybq` zI=e|h*4V3Kb#+NTRjh*Oo$t!Pn8)FPn8eL!4xecEpVW5M9SXx|paAY?3QJ7Pb+TY@ z!ZCt!+Q_L93PM!Y4N;zfHLc2$EMp&-*?M+NcL4KTKlwFnQ3Lx`VX<9Zs~B3it;?6l z=gmW25$c&=QCyh`PxzJ-95664v2`(e2Ey#XAHJWgX)1Jawboo_YMTQD7*h_ifkvBr zn`9Ppo5@&D{n$X0z3rbRge6`^)XB#daV=u9v^qc8A|eq56av-keGF-lTX04zwCOHf z=vAoEod5uCXQrFOnP+gD+nNx9PjSh|U^cn|<~IJz3*zw2c|T_q0%wKRaHaV40IE+J z*sKkAi{No)MgE8|sL;2zcA_DhGW>w(>+)8*KE?q%=j8HV)pm8>-&HQ4?m>NB zh_mP0EG3JXiMJ?z{Y**3-B{uMV2I8!Eqn-;!?w))Nu#qf{1-E@?x+tI*iZ$N3~Iua zV1NAp<+iS`9Q4-H>nk)`nH9&zB`^=SdAC^Hc6Ewb^fsKyrLs*%XoHmb(mX1y8>{*D zqWf?!a7BkMUC}tEX?@^P!@7=l zhWwwRScHeyc;wX2YKbNPnZj-|iugzpRM{$HK6vZQ0cVhraWN0#41<02Dlm5R5Qszp zOI+=avy4uEStOYN`3KDd`IL@lvWq1KdQ;~kVm4B>GwFb(dI8V5koVsrX@ z`P@#wH|7*zq$?dF(c{bzhA4^nwyA|90z4f3Sj55aJJ!2ao560A?R-GDa%L9>vFn^V z=>o-2S1Y~x$`=UVYAxsbCv|kiy7dY=Xw9&kB^66;D24?&5NrQ$1arvNbYV?Ie;nRB zcGiFM_lxI=dLY%c%bT|2D6ZBV0+kgb~7g7d$(^lEV(Ck@N*rIeH_{?>%qxEF^>=n5)?CIvm6#gFf=&2DZw9VYj)4-KxImHZ; z%n!G4vw27(x;=vLsFc=DK{PdKoYpFpu6z##nfFCGGogEX*Ux#TwnE>h!wFbNuYSt} zkfQeXr=%el^|xanIi=_wy8BsA?nAi~;3-hWhlQ(6V}uVhMSR0qM${&A4?@SBW_ zAY|R9;Q?~r8}cD|RF*HB#ad zr(N?F!dbN5YOBI2?uK7N)HjG3B3Nw%wW?Tq-ffV|4)F$WqUk|fMBFeH&?*R|7vWX@ z%E<3e@9$t30675s8u%LhfA65{_AJy!Cs#Czp|oTC7n1)*0k?m$h$vbM+cf&v4+TS;>G1&(wyhZ_x72~IPMk#Edgv^aUi zhpdCiL|o)YQUCkYmYt;m+pm>9eZ1I3cz4Pe5P+#7(5-M;3ajrbHpT@CQeXA+%FBQR zqdQ2-)ICwnlFT;IRoDaQEll<>z-k7zx6Seyg4az&|D=DEWu}GASs|Eek2JMJd(^70fFe*44zKh`r?kiQuZbZOR?qFx7sNHW;l^nKkgcWxGCUnW zv=HCeOK$|k-*D;SZ`ysV=h&FL?Upb|M)9}#4#mQ9YvNx<@D+xqz;N1e+=!F4F!~x< zq+wAV;FgkEw+glyt#);ddN;e$QtI_69zl^ zK1c_&X4GLJD%AV7L7{F#sA~K<(PB>}|3t(4`v$65#?RJbVj85p;6yI2GCFh_QGdOt z2>~bD`Qo%85N0JLDWmUk$_(L-#z0t%GobRt(5b0=NCHm8sQPJtWrM4E9tnkyN&`+s zoiF0ashg-+@9t4&ysFzKR4U2OqhcsRl-_MA8e@CSTV&lCAcPwYQV@`LGDN&|E?+ty zIewu#2A?&`Eq*?WbA$_6&A|oG54?So(XkLvA^H2eDO;1=!l4s^Pf_30(F6ppIAo`6 zS!xh{0S;qU^m1 z&U?Mo=u@CrOr5AK&g$UvAfwINRN?dRkrxy%S8^)Ar48@a(v8;lb%JM_6~>#s%U&-2 zAL6y_h|EIKKXCxsa_5G?`U)VE&DCYmdRf}bdsSrlL@S}fSOZQ4N(8tBf{iFHKik8> zJ(@8|edBR2@^@D03$Avgfv=}j8MnEBO07SqR+$v(tp11S>;eG+)6KcF%6k}MleR|{vz5@ zbSUVO#yLHxx!9`TmDfgl#$Q4Ex+9Cbm=UHju(=$a2JyMh^tH5O(ZA7WlOJsNv z1mFl%QW#Bn)iJ*0b-7uN|45hDVt9H7uqvvRcq)~G;8))BD)G4`1shd*0)VHq>KfAm z&7rT1WdNinZMOlcn>z#@+Gb=8k8#pQBmV_2gZ8UkSCh3rr%{e6O_8@v8miyNe2LkF zw#9N=&uM99`*fst45L#BhQ)S>*gC-ex`ZrvAU?AB9VVxdAX#ue=qE1N>_94{K4leRX3op4CfJfL$x7E{rl$3dMk{pxFrGi;&2v0=39D| z?-XL&u>5ZwV;(a)3TI*eNAnF<_!yKhL49e60_uuptg z@Tp(3pE9HeF4RktlORn)q1P?BaqJbv^Wzb|f9LCgQabjLET>6lW)x88BenefE9FJm zBE#ndlbOK0Am)cg>B&G?D!Uf>j>c15oGqa}<`j~eq7h$@Q}X_j=Zr>wj0yf)b)?g8 z&P+;NZ;NMT_kz_6Q|+YJYOn=3y@L?}f78#CFGt8iK_bgm#sVP7ikO;ZwmU1IWGC;$ zo}02tI!WD|yVN$Yx|7N=Ax;oykUgy>>8wgbr!OuCR|BJ^aDW?qC4>yNVd|?RaQc20 z7Kk)q?TlfK{+aZQrY=~v?q!-sR8 zCnO8b>olB}oCBYDcB!0_fO(d`k7UyWL<{)d{vWAmz-vBtS1^Bbc;Hz1TY?z~Ml%9Q z@v{%YPzZ_NJH42ESvR)|Ya}~Q`6EZJ4BDr{^6;3;9ba}0Tze-^r=T~xNZX$z$_|_% z__9#WY+$x_V8-oBCCp#?z^bnB{p_e&>x*SDI)nJ+RCm*B4yd5}#If$+vh0<7e5GU` zU#lYv3HRdQ`-VK-&I$Ie`Sj}-3bQLr{m7>O_35;fAJ~MbQ^#Q!#lZhQbrBJ%_2+|! zx=aj*6nP~QX3?u zj8XOF`pfx}03nK0+1#w9&FeF*CEj>)4sdhDfeD$L*J6PbHuTu{-IjZom2e39LB;@Q z=y!mv*n-!JSTqs$hwpuMT9y!fYD$))=&zmz%}z zJ$~Nfx<65Z`6XCJ%^vn?7}t`MZ96S4+FsnW!LR{FE+nwZ$xYXfUK=Ew6x1lVB1?e4 z2{C{IP_GGFW1PTABM}&=ZY!V4{{u8q7myhRH6xOCFhM>1qpo@yM0_m5THAR8ynjje ztF^%e{VwXil>;R|3Q8E_(;EfAv4w_%*Kzke7Zypi+MCLHfH(ei!@7h)R_wSC!j>0Yxn8x#xO;=cqbRRisNo33ELZ5(S3lhe?nm zGn0hsj7}u!5JS>XG$xl3H53`!QXrD4Qt0ISzD8niS5{3_BT&Q!e_=7jpzYx6ZFRA8 zFtpJV0Na?OYA7qp;bZvT*+cH+PZL}4?#&2f2>6akmF_>HggH}4w;`NYHfQ8L^#T$p zhRh@!S>Ply(lncKn&C9^BUV|T@TDy+cLchJ=*w>g}D&Z^BeDr>J)Nj4ZSn<}s!NZ$D7d%Ik4g>)&S5Zl;d`Sd{3%B{1+@snt+P`+GVFPrE_PRA z5bO0X=YXd%N<#GVPv4t>168zhu=F6?_tMqk=#xroBR>@pGZ#^X4cB>UzhLcCQ23bG z!Q`?V${IKotHemQ_=wLB$%;+u121OTd$t>v2*_k=}{L ztF6?Lec7NC{-Q7`GFZEX-jIUNLo^J>RUJyWL*)bp_{>F3ofyHhSuz?u3$PY82JBY> zT)KjGp$t1%28d6>QrLZ(&>IJ6loHp|il9Us2v zJpYeY4DOTxUgLcAqI&z;M10wSHoPfFCZ@4W(qQ7J3%Tri31|aU5ml{KI3tO?(D}fj zMm@5C)kZI6JV!770M+X0j)2eHc!Y1^4%83=9rb+JpFv>G25Y3kIGq^EC9hOtIiN02 zDdZ@@&CziIHZ4d;%T`{#bR6O$+YO`JY^A$jf2_%T#w@P88J_?(`lE~rRMG#>QI+o< zy$p05KulC977~GpWXph_r=q0P8?7iffS87xrR)ysy{o^vxRQF!OV}UhAp)j}P+=#o z@EQUS<2#$E#<_wj1eeUX48`RYDA$QYjzAWE@%>6f#9#TMsj$|4_R%9eO^>U)# zbT9O~^Wg)n^t8qL-75N+j_8iR@E;?f3FXgHKhd3HGb^ZXM`&z7vJvd5O&s6vm4&`miy@7GY4nl6_6tkieFC?&m zlr*2m*QOHIFWK+Qj*6Gay~)wb1-#y(iLEg>(>>v?% zRy~lJh!L}^tya^{z6l6@>u4NhIXbQl7s%6pY(Z-mD7>Sq>c2MW^4Oaa_0-jkoWCOy zA{ZqbkAs20Ju^RR>$|1_W?hpjGBa~wpPUy%@jjIA$+$LdEHlndi}Vh@RbRyRQ5^)~!FpGy0Kf}dh2>rB*ig_4-*dNmOHWcEH9 zN?d_itW$(mu=D7B?!%Pysk)CAI~DrGo>peSB>~r|3*Z1LK-RxGCLRfVn8@ix_*!^#t1a`~DpPC}!k2hZf5O2%CfWFx62#^*3J6s*n@rzfg7sU5OY(4O3(Jp**U(-tZX9g|DfX=e$B6*fKje9g~6V9D{+xn;W$<|8cdyv$y6#;DL3Ln zI{cfpDMQG5gIH?K^Z|B@)v0}MRH0ciFD2D0CRqth-&;K{)!Q$gaND%O;m?)F-JXAc zx^wQCqG^Q$ZJbmpPaW==!$-FHdf)zd?3Ykqj}1!uqBSUbQ6I!4-2ZXTjH`02o5qOLdGL-r zXlc`#yrHRW8&0WCA!|E_95T8si*!Wb!+V}D>&0WTr|c2uOe<(QRs*v=0V9eqivhSt zHDR#yCG*ul0(pW~>-VQMSyMal%74~~zz5!+@NT2(3>aiNiYPN36gE(l)|g4pdSUuIkm8%7NBIV0wbUbZL><*Q5sN6M~N# zTbrljE8A|7AFq#6TLuSSs-vJp&9Prkr&U zQQLHMNxMa^56hkee>D3FbONO_=xtkk^*64XaH#xNs6XLpl*_RR`)jj|i|v{T2m-C7 z_CY7)fB1hA4XQq8&PQ87QjvA3P@Fh?xzF$ioTQO%`&VFQ7yRm+OrHBtElj;cky^HQ zj096z39Box2X}U87|&~W=k+L6r`cVyq3BO+5v+AV?i%9u0|s4e(K(dVCz3Z^;6_}W zF<{TKr=hqZTfU!L0yKL4?R0bDn7o8-MTm+Hur|wwLvHfgktO)S9vB!ZAyv%cbDOEvU{c zhOtM&=cmbQjbUehwD{=~-8WQ?j}G4s$jI@}*fi`c(MDTf%FFN5yv^tJ zQ$+H;wNw507sK0sfmyff2N^o4N+-DVZJR0i|Gi1UC+WI`)-xG|H}n+_YTVv%6a>qn zlRc{}i~dj=Dg^jhc^O!ko-!2F0JU;e5~_)jytw51@}@57J&yR;uy^RIy~?JTR&P>6*vub<^l}*15h64bQWVIpy(6+RKaBx z>%ph92BCtC`@_sUvFM8&@NCbTP;;9k*vnvZD_j9n()WraTUqmB&XMb|Nhfl+T=os< z^2^0{C4$;IK@MP_!gy-BU1q_Tcx}tRi0az0Sf>^>P8neaG86vTez+mOF-z{yytrES zHFa-={|4?mrzzr@_$yGT7h{J?Xy_ThObd-Y`w8@*O(6f-s@w)EB*QRO)`!Oi`bN1} zFb|DQT(3Cvd9gp9G(k()D|h}oTwErkx5VkhM2AAPP@8W(lCpjKqY|=mgpweYJnq_q zM}7R%>FJf2#dNRoZi!8p(hf#=`2(0rsk0y?qg01htiNs3pPEys9pyDHV!#9XMTo*J zr(uai<*_O?$Z_;>>{&&x8+rIQx}94T&1vW9uO;3aky@@xHYo1xa30^Q@WVj~dQ{%b zkBf5&1$1|N%0Y#rlG)&rs`_~Iiwnn`zON$+DBW&bMiPq#=cGl!v2r3Vm^>L5;;vqR z-0MJu!`4exP;64|(kfhlP1>skw6;MQ=$He6mJBC(e(ir+(FZ~hS?_T~A2nQU-mzu1_Mf~`(JeHyTLLPP|B3ZDiYZpC_FfIQ^$9KWq z>Vt4-tOnn$F6r((D8T_HV@SeLQ3Tq6T^*Ge{juxC)WZ|Mt_^t`dIEcaC;?|~EZO6J zv~P^&sqhPraK^BF{wZf5|)>y)`Wdg%Ly93Po&c!5B5MbHGPw?iJ z)<3?bJIfJuKGS+Y?Iz^d$ZpmZHZihhmjBb5NjN*@aVy;>LmdR~@m9S1OXFqgd;$F6 z8qr^*jaLsl_;)J?mz2-JNxhV-YUU*1bha9@n&=)#JX*`2%s83s4#@X{6QrTO zY*BSHt$#1pJY1m=SmVk)$uIQ~B&eLC@2R%FQe>a?M_=3On82w zuaKFcDhEk#TPXZQ|^S9KGyVl9w*c6W)d^-sYl&TBXA=k^#JK#FveZE6h1DEOKZ3nb1M1fbMh1n z^-W@i-&o@p6{yu%QX47P`>F0Hg;hc08^E>)mSNWY*fdS{8VsmS4qJ03z$k z1nnqKs3Z6ped?ju`qFK5u}5-&i-9Dy^=Ple>`IE&JsY8A#8IkXT zkr1qVf9~AH1(i&di)S3Oru#0YlV+THZ7^i8-7Rv*^|~I-k}3=NP#08VMlXp$C(DB# zVo2{Rb21H2iYw>WB7~sPNlzeq?=@C?5;!wGry_|FsZ?aF%kx@b9lUNOFML74kGS~Q zs|YVG@^cwF{?DjqqSh_WAM*xP^uS#+$1zKl_i?W7DzIjuB{#ZP+8<&~6bFM@PIIA~ zvZCYas{3WZCxb(bA-O#9Z;YwvIj(!vUJxI^uJT8N&=Wj}&3pDK(fBV`+|?{v%8}{9 z%DG|#Ck~97+@%vWJiojSe6%CO86Msh zZuKonqalO>76Y~zu=gyt1*Tm?RAmG>GwND|3EOYQ<>0LoS8sBUwOy#|rZju*&SMvI zz?#7b@bnkC#{c;{l<+doNf=+2GtG|nycv=HZfv%t!}|qG9;G5*ukg_54101=*2L!rT4A#{i*1yAnQASxNvuZf$E2^}Wj) z`T@TOLdNYgIs6re8;6rgvNhetwiF9Px>d%wqY-vEzs(33HRrUQL+*8g+9bV7BAFu{a4Qchjk`L}MPa!l0_!IY z8AXcuKGayppNMFdn0u88?ODf@z1@m=tmRkYG*N)>{jf3J4=UfB(npL1Q=~S^%&#`7 zBa=@&yAVuSe0Gyf2zZ$2Nx1sMc`LznP=zK3u8kFh8rRHfVD%#Y-G(1rv_AaW3{HR) zwQ?}`(%I`Z=|!sVVGc4etS*OCmHX!M3}wL)iA?)o4FS)mr_`8-3hWh_+%Wp349hLu z#w&igj6LgZ7}>0yNlD>gKZ=UjmH|+8%AkN$sQ&~C3@Co)kz)tnAH8u{I?}jAw85CoUF6r?(oe|Vq zFw5yhy{&F9a|u_Rcch3s_6Va^{ki&7rdaZd76G|NSVoK>)C4)h8b_t$UEdR zy*sc+&aQtV?vpahWZut%+_i4Iq3o^EeCW8_JjFiT*lzJY={K918K!wyrl7859?2|b9XerdWPobHvus*ql>}{MGRnxw7OVA`84C+ z)?lDDn-lkfID(u~x|3uB|G8EwnIIVB54?ccJm{0}h5YB&VEmSllt0nJZw=2ORa8Ph z-nd1zN4zH9k{(7-@2$+ftc{`rBgU3mPgQdP>|TTCrBe~jw=s{T)wlh1`GM2IKoNDw zvOXLbBI{=INF?zzn_sG%oc{-U{{Aj&$RJ?Ng0aEd>AE#%H~6c}%AH%W#>v{6*Ec0W z3i`^k5U@`HikWYhViro^Dnn$WR64L=qI{+QM#lC_i-6GX@PSr_sj5LSEyPZ*e)@2f z(}L~KN@)?33sAenq9hCAMO^Qw?A`jQ`F=CqF7<#*R$_MvlL4fMaKd|7?tP94xoc`7 z3dg7Qh(p%PHZx@w&u?u&tO8F%w`e#iVxhZx@F^t1i(WyFx`KgFE7?&zU1f=X4^3RR z942a_kA9Qph+5hV52A{2E#HPJfd3fY7yHcp7dvaC%x$J*^q>Ab?wZHY{zNUh+89zW z$1*k?4UCJEv6jwE?>9f9{3d*k;7U8@H%Q;o%@QO2={ww-Hi=hf6l;oD%X{F*1&h72 zoWoCi_fX};t+>XYbI)V5TcvAr&lP%gzv3FaXBW&tRBvJ>>|dhhq>{Q@r0eDT7cFaF zAWGziN!h&=H7H$`>x4=JZ9d2R0));yS{~8V{T`({tZX3(ird=~qK>e8H6(8!py7_# zW^Y}ad#5Zb2Ws{)`@Lh|N%UVUg-0GuQ!0M>oP#JDGHg8*DdBjO9kV?#n3%nGP9!iL z;g|_Iz+QdtLkresWU+XTfKIk&d&>>c3hlJJYzV7(QVKblV*^ymV~J4_+}6zJATRd+ zZOEY&iZ?IQsltOhU|thicit;}`s)Ppdi?b>AkCUR5v&V}Xe19S)PW(O1r@P=!*Xvz z19=32#IKuQ;G3Y&i|-+%46+hN_KFqU06Aor-{Y6h=J+Y&P8~??ENb` zfg5uK(TlfW!Z$<9iv@nq43P}U!#K-L7SF1L>n(RoOEoM)P0<1jszP!BxrpT zmRF*~I#hmn6x)MLgq&E*(1G(72XxNl*YXqYO~VQx`Z=#iCth|0IGM=~!SK5Bt59pA zb!H19h`sDcL69p@&|^_c?GU7K3h&)+s=p)WHz7m)SZJz~EsVcuaxTKCZFoLio^J%2 ztNE9PiJW-JngtLbO)dCmxV{}=Q9A$IEHwc!fyGG39H>%W(Ig6y6!y+-9-&)6fYznp zQKLz4`ny?Fn?^^~yhjA2+nSY>vx`hxYm&W-*}$=?A82=YUg2aG%C_~+uKBRnoVH}; zKoBg&SMLO9{?2;Zl$^Cw&x=O-HGj(dm42&o;pOflTgzzq+(fk-S#b^jaXoN(Tr zMd_!ROmI(ozzIV_3{tQ&XOw8y*tZ`nVisi!Vt6GVBnQl5D3BR>Bow)}kv=D*{009pVJl@#qByoU1 z3~U;={@LcC(WeveGuo!}X*lkBW4G$6OL{)Kl~DYcKXofGo=+#+D&RR;?H=4=H{^lr zf(G1Ffp2Vtnkx%|t9r4$uXa=!5fe{>ev!N^p5216&JPthnbxl_;-WzQ)wr#4Y#y<} zkZ0R(wprHivOwm&s9Lg{WOyXm#w9jx(owC(8L+4UmH-UszEWwZ?VkPj#q4> zhycz0Ei8PZ|BK7Zdnj{9o>%Jg@jb_-9mopaEKsWPX$*Sl+du^}pRK7Mc4kaRK7=+m zYlc+DYe8Zrp8FGa1SuU`%U!f5KHu*iB1l^gm z7}({tC2qHn_$ugK{LhtcRB{!6*|`3&rcn)=Ymu^^D(sIzjL*yKb|AYoxU&UkWX?&E z#~IpR+BFZ;U>zi8cS$l3KUCp_rzr`!{mA$7OQtrQNFmk{5ie#}!1sz+`&7Pffb=I1 z?8NGx4DRUXJOFAka(J=P&Wk;5A;BD=LQe)7g*uh zoEn0*+7SQA#br6#7WHa=_PZg_TeC$;{l1MJ=?d)+fc7YL_TrywS@)Lg?8mCyGO%v9Hf>k6xm87XuWN1e%JMN0 z84v;(2;QIjd>S^))*8EJLuK3IILx0c4}%NsilQRRqSfL}NS_ zPQ$H|I{dhA$#bJybnfXnS?zt{=-;4?0Qd4XSt(I|Iwv$P5l{`oy+IG!hA5}}bZ&}P z=36aqA|H}M3V{}&AtfPAxwI`Na$IzRI~fD{r}B2Uil80hUK0jnc&O-Tt_}!Ykc!~(hKin{*&b=K3Zw%US}BFLdJNA z^|2}?f(f+mA4E?wB|90a)cjKb;FJc*`18ajJ;L73j-}MG>c-xnvhlGK)d;sjIm97BvAG)K5RQNS`w zI*lnyaefY2B@$Uol*%mPr3?E{DG4UoHucpgyNXGJ=-|5rs!I)I_WIBX=|G8T8`hRa z#p59{W=|A*sO6oS$kk)C4<^@X*SMy|7hd&-h}UR|s?47$Z$_sayrg!ucHy%w@*?uNLQ7j21|@RS@2U zOhcmQlp(IrJBxnSY=W=nqH%l^b|=E(X`3kl+LarbeSm#_S2cL6xAwv$m=Ns=XJHlO zl*zAOc8f2)~OF%>AnH(UKHQV`w^?K0~ ziCF{J^l_&h4R!Y3gq^*u(@yQ6Vv`XT*datfSNl_O#f~GuVA12*m?qaFB|#6D9!s&O zj4-hENA3R8EoQ|MB83_CdHhWQYZC`MyYBIq5v(E%Nv!0Q-Y$7_(-5pOV8sESC>b3g z{jWTDDReqn`)9#`L%GOxpoAAogW4`zXVu&G>``v`jsOmtaBFL6^dEO@PAl%F{EPmh z0@9u%f#DHUm_u!<_{s4@sxAdJ8Ld}iV!{kO<~90YQp@(dVd@x3A07IxYJ+^4XXPKE+HIV{?UK%qe1?!iLrHU_ zT7VA_rTbHxFbR`?321oI!z`$1HZuo?9+%TkpW-U^mwc^M?_XDWcKwJT*&FqM^Wm}< zp$l#%c!}{2C&4-0p1Fs68FbNL+n1NPYH<-hMZ6rf|yd|mRZ2(9^LMagv~*N4eE3ZJ1wV`(PS)H4U>;wmF|I}7 zdXDQyXo`*A1Tf}^w7Bjk=S~(ruH6X~K=y!j=#b|BX>Op! z3c#Z{f4%GI7c!)wm7B1gB}s_N$q zem%m+F?bh(%uS31%-6cvvSDi~wg%-0?^Qhne<{@q!Q|xxS-+h9mhJax$SP2Q(Ya=4if& zzH*5qv^!|pc@oyHO~T&r3CjnA3E>O_s}bJ0iuvB0&O3on{ZqrF1=1zd>k~C7b^<*c zVLE`_W`=m9T(3}oG&$cAK#lUX2V#CUyEhn^+qOc8yTUt^yQJ!N+8{?&f~26aGb88e z4~G=~yGbE0Zs5nJZO6oN{!3S!qkP-u8!5CGmu6(R&RBUftv%Rrg1QL$S)u2;-!8M~ z%u7X+tS;h`lnR&!dk@$YPLRaGbLAT8bl{D8LHajovwiG?6Xu71T1DJ=A^?6#$kJ<1 zY4LOBF(=Sy1BUAxs2}Ncu38BVwVneZRj_Civ2?kQP$GhF}^FgHZtbitT z(E>#zi$&azm8)r9x8n=zFOu{e0!g7xXLUNq{nN+c96g+jqT4pZDZ(NfQGYoQHo3zY ziwbdwVfVZh3KWvUcB9w50CFkd&xJ-^Af zac{j_7BOJk`qbLsgHEzGGBB<(RGc+JQOO)MHsot|vUcl~E=7Q%QL!Ra69TIOb( zxn0-%7hLE|j7948e?KaV@w762s@fuQn`T4-%?(6m#(tDP#&EM66dT~t*n*mYzmxW* zwCC~OJ7r+87w3L|0fR08tLnvp0TyYF`B9k}%fmrA7oM`;jkN?}4*~=&ong;aQ<~M) z&UB-mePZS6%SYu?r9^0A(NA*q=3{aH55D<9I%L3`E7#) zC*baD1EY*UaZPE4fwX2g*~MqQ?MNid&8XvIw3wa~!NCd*e*d0VhakCwun$IPNl$(= z2F4cNyCR)hz$%Q`u2sdrsad+(Cl5BvkD(jAVR{&38fp#tsw+`kVMLZ)l;G4`N`}7h zm81;zk4aA&2H^Nb5h%VyNXiy|2VHpLdDu9v&A@ZpyeH0fUKVR%+c#E$OHfN|A&?S( za6srdD=B@p5TQ&(Y#fQ|T1e$FU$mpp2ltjJjm?ZbuJdbrKbA@~nY2+l>JSt!#$gfJ zs^B2Iofj}Pl2s`~6c3kpunnri{jr#;V`$~~;%8wiyR(wb)3DwrW)-1BiVY>(d7FN@ zwoTjlNOe~8N#W*O`T0SgQ3c!*)=OhQwa4 z_I`>p@bl^ORRZxRy9+@}h#86FL`J8CjxZ#c1d`8wtj1Mm5r*#Fhxh6mJz=-Uu z8I-CV`Ra%u%qkH{mVHMQh-m*`sh8U6*f)3*v@aXla5=x)xFV7M-J92aWV;Zl~I2q#Q2 z$Wd_KyOsVcX!M_!!?74Y`RE>k;@d?+5VcJWq=d!ew*`V4Xw5C_45OQx_LuWGkma=? z3CU0G3ogwDNbrZ_FTCEqF8*5m;_wHU@0=`?k6R;&KN7%=3GA;$KLVSk>91F-<8G)@qTyD^ zEOMc8T+&DP>mwl0leQ~`8P7`KRba0`MS!m%veD;VyZB(1OFigpyQ90sj)oU4Ln1iR zmv(Z?!(c7jA|*u{Ji-;5Rg^m1=G`Q+rRlQ1X@cI5m=wlDg_YCm5DZC`UZ9E0aeUaW z=v?f%QottJu5NOmN40?`{LmJ_z)#EBla5mbU*%2gsxPNJCk*n$Z}~l#O1JAATq|6|twnd%9y`U6Iss4F%I4-1?%=W@##heScCtCbQ1!VDBo zV4OgA*A^PX;Of4p1$(nE-+hgYn7w#asb2zR@I%_17U!sQ%OzgXSO++qhMA-=;^HGk zeu%cBBGnWYWo9S~mx zmyodf1@6Yg#x(h`wuZ`3HpULB$>QbezRC@L9`MXR3j?0zFqN@?lXOJ04HV;~vmiHD z0kt}{s>VAz;76v`XS$`c=;a`w6pt2FoiW|2=4s3Qxph5N$mlw0T6UgH&TUImZ!o*g z0G2{>p5+&K>5x=*e5f7rd{65Cs zMY|(OP^5y>UgwfuR-MRytNOpPm~G&efiZ(|f% zj$as~ECZh1D7@wIVEK=AFR~$uM(TZ4>$LTgqRx%~d_zJ KF6>-UzvHY!0or^Kwi zLW&fm;lsCh4huS3F6DI{wob9rd&izmQ@&8$brS=6>AY3?JD1+}8@31eue7}(wFU<^3G?L0lk3t4$PCQ$#x%R(c12HHv zG;L^Ose}G3G&%Fv>9rq?lJCyte1^%hRDlGB|1lH&rP>?EDQBMxMA|eJNQ1-!)-uf$ zVnVg8q_qwwe|IbcCG|27nOI{hP{X02QnYH&y#TMxWB;0BByA&fS$B)ft}yRYQM74( z&Z5MhpcGs1N$0AN@dB=^*e>llAV`2KHX6BW~`q z-_hv{S0D&swcHwriBmPA2h}iF`f@DP}cX7AESsmv*2B7Sz2hQ&d;J5iuiStX{oY zX1X`Gcdgr1K84C8UI}Ks`M- z=T^O0F(yNiwT9%WHx@0`0?o(3>~JlejPwAkm;(oftSkloB(}WXvJ6>Wtr#6mKtOe> z%aBxeC?Cx|C71cKeqQ_w5iYPy5avBn`ei7(|M7OB|WQ zVBq%O*s&~Hg>yLPsG+qD z?4_f>Ilcf%7g%#Rl(h;p*51=mQ>|Kl? z+sQ0~iZ)B_3}a|KkoZ2FqiR(~tzj9@yuDu29n7$UPsq-MV|kNL%IsKgsc_8~Vv<6E z2u5>xuUy8=*muxBvgPf^DY`f+*9CjP392Sb??!Noi7t8xRaDdmvc`8(92mUAg9YuV zSs1DU%Cc|;HG15U21T`mYU6U$;jVYw??PAC3SjaJOgLglpE=wq|WdbUOp$M-Oi1*2`5%R1_;jS0-lv)w*Q6ptq#H+ zH)j0ADXYq3bIrp)@?y!0oA@^ib{6jiIgDRlxiI?93vYd*E0PvkK5KS0+E#H?HTqk$ z*0-l1clL{%OKPb?)AcigvK8w)AE)kXMI$XqKm2YCz#nS%Bg`rKQ$p9#_AgXcaN-%zoT*fW^ty1UVHeq2(>Z_(010x3FdU7ENu|m zy6DTKI)$*U_b!X$ZsH|wThoD9zy?M> zPyi#Ty2|TS0oQ*hT#|yL8L+(vaG47;{frjHid+{ch?A&bryjcHX(oUz1?ojuaJCH) zNIuTC^zW=Kv?a=N=5-5ARUuEV2WORvIfMupJ(>E;J?p|XIsqFXr$IVzfW{+Y{7_YCBra0F(07%ankDvxPb=FNs{GLk0zzoC3 zR80q4%=eokh?z9Dhb#BEL2eT;XuJ$(&eA@pe7&ur591D z4)N%6Hg9A`sMei#$2u9e7Z>kOc4ppXBp_vxraPkJHK=#P{dNLc~RPf&EZ^)#bD5I9M_xQrKkmRJ?YQ} z*@{wVjSSHJxbliZASot=y;}-cx640dBb9%rzTa<;fz+u&kM#e!kJGm)Ojy2qfq%8) z-*-<9OYKDWj3_?Yg8RWtf8_SbTTsd46crcB)&nIzDvYlRg`D@HuLvw=l-{OMmvQ`^ zi0g-su50-<)_Ng`1OKk}Ot;75>{S_4Bw|lBZpAWW2Z**l1t41)qQc5G=A&bjFt7#S zsu-~|(KKpH9K^;DN4$V~NpJ8hTdhGUisWp7{<;Oez*4&J)`D%(wGjQ?{JQNbBIrxN zz}bcT)`zI=$LdUBzX(tC0gisuw*UJ_d#72Z$$m)&Ak{;+JFhl!7LQl$jdh$(UI2Y|@g zZ=~)4TMUYR>jqNad{yKCmAA{<9G)-^rwEFPBx{1&oTGv++w!_LWuSvhQY2Y2L~vM7 zwC%mobKhB0fdE;5gm^>Q)3qd{{gick8|1r!uUAF15;7*P=CHc^m}j+QM+3*&;eTI_ zOSMuTcR{s}B-f)VyQOSVQIzu{`X~yO-w`wy9O0oa__k+1b`9n9J2)D$;&y&W42z6sEeHj42Wl02c(o&+1$N-CV9*sm|<~&Kbhj zD-;wu=NzIHsp`Fj1CwglG>lb1b$*U=*C=B~xggDQXFBKW0?OI4M@LrjpD0-tV-#gU z#DQY1hN5D=xm#O_9ZDX_tbW#MvZMJeC!5o+&rIW`x{R*n*EwZhXd3wJ>&?|v3Xp2$ zJ^YR!hc$-k&$I7rc(%@woH+4v77#4zP)OP7sq0+D^Zn=1goJnG9Kh#?zncF`&Z+7vrynRX?VB0{Q3f zv8nb@I4iC5v~(xi$z5S~b_OpN#Uo;XORrt%w~C2G$d+)uItzZHDH0m-wmwQFf) zu{ok7vj4IF<~j$LJVC%{)NNkD>_qK04>$GIg_rR=h_~+R$g67Ip0XW;4&{Tu2us~a z45)Z)OluT-3EO&4Nz4v}Q`Mj-0)ZkqCut}uvo*~*xu`DSKY^cIm+#ZUH_kgk*8glW ztF%Rng+4^%L+QxW4ndWmoh?MsHbapTtzZFJE#_0R$eNA?W1?FDJiHjKC;qhdIhv%X zx|{rGBQxD3JnOYxmJ|$gvX4Fc(nFMbE92|0tP3ELL96&Io+sij#5TlG2D=v$@jyhW zU8&Y~27)_3M=p3V%OV@<8{pN8tqnGue?)U|%-Fqb#&$KB)q*A*>!RnDy^@9GpDgzg zYPkGQayj?H4$E?Ha13fy?Sd7Vxw)STC7|BzzK_tL;H&`-dgUgYi)0NTFnA@gCouLL z7G7Ae8SyF6TL}Yy`qGZX3S8Owtbf|Ado0C{n6rw4R8aJYyHr1cV2veR>0qbI zRqnu>PXYv)7kpMPM0DpyNDOefXi-cDJB{=nS$xDAh59+E^6oNGOj>b2{t!Itrr#CZ z@azmxJ3Uib*&l5U;0*~YPzZQ>X6GuHmo*4Ej&DL`8rF!yX7jIT zi@XESe^nxsurzW7ZpJjv5fQNlH5tlj+z0asg+|0(Sr6{2VkZ)|B5T9qvm2exc+HQ~I2=eKOziEH z79d3}rvgH}6>DN1^Ep+CYIo&9mkz`}a+?s&JP|x{jfQ9TYn4JZ{DyD%BW*r6XSN_nHQ^ z5RT=T9~*uoFzyk#0A=mP29yL}?8X2k-pUY$5~ouii0pHYFQLsL@S^ZL#9o3xHkj#S zmZr%{j=#B73-=L;oAEId@m`i>#^Y{D)x}|eN6=4sz3++PBPjrH=Xn+QkY=CKEpJZO z0)(BiIDd4>UO5aqsF=&EXHf9RUM74^&33qMC5}_rfp>Q}Qr4)Wv^Ej0Y}!!xoAYbj zeujD62vFt1#|5O%-r%e+l70%%ECBM@#BX(N){+loY@#fpFl0nEgARpKX zn%Xrnb5h1RZ}FT5GIFeM+P^Y|VOF2Oab%R!2B^~p=gO8yK`4f-)c2DAO4EEFlv!S2 z?NX;hJ9NMgz49~19+n_z++!`VIEArk#GiGY=X5`;NpcvJB2o>^Xm{@6#LMBdvlK%x zE37AHp*??_{vszxvxa@EeTznkR$)oOm(Qj6zQO6!F%E&9OR)3g({jy4Po^zBPBk|t z&br#lV`b3Mn1|dwIaC*<1mIxJm|?c#lLK<_p=hd!iopbTG`BhB-yuv)m5&Vij)^#J z)M<}w1NJsw^WQKp+*7l6P@r&4rhDyn6>3cb_jh6#FFdp$F_?2bfZ6f?@f>eL+?A^C z80*Jbd>K}ms4{4KeHQ?rpOZzmEHli_iS2~>`U@vv&2s=mby}ksLD`f}aCL&(9M7Xj zQ1Zd#9{SLalwF>k3kR+V7Q8D&@GNyxpMG2uYpUHVXx0q$9ThE zqh44eeQrgdoxCLJfO9k~_0w1Tmaw_psF4_`JMv5N=Y59>hVETN)r8&7@0w)C!WMJ% zA;-w`rF?qpRh?w2a4`Y`>nf!ObrU5K((CLFEK%hoepk^Sw3K8Cb8+rCATnLOmtowP zacq7Kp|&BqkZivLxIilzVB$_!#+#G`B<=B^jV^Y}bxgAo*~5PQa;bR+vg&`|2Si=9 z5`@v!*m>!*#epng_4AHvjTt|jWKrZCB8^cwZW5SD(ZRUzd)+!;+R8Zbd>9bEToh97 z1i(En+yJDu>a-r&KS81kPAe3RT-zp6zLarRKfEAKZegoOy1h}?s} zBcl%^{IQ+J4;?lenjsOoO`@=ZA);8Bk;wZA z89s6OA0K*XpcTRoCoP4vwAiG4Z3y?Oc>&D_J$wNn3V4UntUddCr>Pn@-=SLc9i!yA zBz~I^N5+30QgbRJ6rcpIrL+il+$hzA5Y6MC!bz@l6Z z`1z0xLli&}N2?D$q^f^aT!h68=F*+GUA7j!^{bCm`FcwBgb^n$MP^1>%rSv!`c{rR z-A^YbT-dOU{(>8u9J)m*Ux?94U6#jxj6>@zG_1p)9aMLzQzM$JVVN+)OR7B)>sJbK zghSp&h^U$v?1WfRzI=YjL!8z-?j-2LW&YSWeb9b|75k13-DXi&bohFPn$N-Z>*WcFP3hI zBfHdA92NUdmsy1m1Az!rAljd%CHBML_2K@walBV(08K!$zxFL{PaFlfxW6yM_7*11 z?xaSgRrbeXEcoko=D58!S2D=U#F2Q9M10IKN=(P0jDrABI_O4!fA95pE$jL%|ET}UhZ5>}W` zpCt26B2NSUsQHWFk{tr`O+<$#5T#k}-0WKo${kXS((P1PQ96uO<_OUjmF%pS_HE z-6>#lc^kHI36JCbY}Bgfh{roC+v|N?JtAh1q#yKyiIQzK~mSDP+b`(3EDXSR2%^)_@R?kPA)SoZLx*pqC-mS z7{rz@iv6KY>kUrY?ah>ZUQ67c*fzHk9;!2+*fxd6kWA)~hSHw<|3zv*;8;JP90!C} zXQ~t74TBKh$pLhB-M%XTuwYLkP}u`XWaIRt_N!M-v6(6+c(4xhOf z5BRBu3xE}uQA%|+exn0fDP(8jg7nJnuA#vdzS_3S+HAqNgxA`*?(4C3N&A2;vR*E# zH`bIJF1puhzW>>Gld4-0;!4#~im&sRLsO-)A(UfZs|=I`*rtlf0C4&55jx|apl(WM ziQONE&abrFC(lxtZd{2; zH4dkJj0Dl&cg|`K5|-bfqyCu*HeGHIR(12{vF(}^eaL(}R9HdiIzeRrM5f4)@3Qr9 z!haLjpq3^U5$i3AG**yG8Kg`~y|B;p`EJXwtYY^6_$g8Tw56NAF)G$h6WPk{6@GV1 zW~#FhupZqp%64h5^)8YM1(KC?362<&+JNC7qurY>>#LYec0Wy)$7%Dsz$aPcN5JWV z=mJ96ksvoOJ-v?X+dIQbl|+-pMgOgznqAVeC;Zss^cY3yXu!Kx2pw4o+Xmo-WDZXQ zEF(SXRH8jJPK5|S<*N9?_S)gph&{b0rK7}?d`(Rlt<<*A+FO*ygN6r!6&NEh4jqn) zEqZ$HW)?)T-l@0&H}8`W^e5+TE}U`7vtyI&IU>)g%UjG zHuFRgDR{Un1h+6sBw0dq1You-z9WNTGj}sD@9YO{SDz@$e%T4(4?VwMvYoYc#MU7s zTMsA_Z{gmtMTk5Yu*33&F?aPUSJfSWVUy~+RX?)~?s}LdE1+VOgzRon?Xe0*J@}<`Ir>)!25j`- zCKamUbS0TiHYYHMkU{Kk@VpQ?Z*%Efnm^L>B99)wtb%QCLX6zV7rPG@$Q|VO4(Aw3 zTf1Ql%@V2yxpE(|j#s-$)^}W7Cv;THED2chqF@61qe5M5lIfkZBE8LDnOY zy0mH&nY>R}xXaph!0<0L4e9(DI5;0T(yt@>#XUNNw^V?`X;4^IYwJ6B%>S{UI@w8B z+6q{Klrll-m{gK`kXFBIZ)?iDk*2Lo{HJTv{)E8n^Wv(l`~NOC4{p5|S-|LZKSeLj z4~@n%T?>Tm#%}8rl%|KRtP*f=J(H{==$7+5zV(y1RPxiZgQulEM?=BVPS@ND~Q%I5}(FU>EY@Z zsgr9{$dy>s0}Hd*M60BAPAWiWnivW>s>JAnL{3&RRqNWA`j5=Z2(fe@)&A{g`%wYD zpn`mmi7}}JtksaTwSj_J&X{n~IAyQfTygv8obu&^S4?Ef zs1NcbY0B02$_O03&gen2fnDW_01qmPw($95Mn#M?t7Dtmr}m(2kGK(3RXrNBu`&Ys zXWGk|2rO=b*=y?6RY{?;^k)<&`vLz2zWm;&}chbNsK2aMQmDz9Zq zC3ijW--DVCGWDX0z??zp89~uYaLogi+1x;J{l-ov;b-p4RNV0tXXgx&gc4nAOYT(Y z-5J?fbf*>qLn#q#r+cYbt#y?=P`%`dz;Y1Af7B)pab-xyjh0ue#_rHP)}ZPMX$8I2 zSs0igHBgrVmQRCj0|NU&GU3V>9_V&q?WVFIhX-AG7f`fW%_x#R{#6xrYhgOmrKr}2 zJEgaAp*J&*op*YV@(EY6PNH-QPmLJK(*O30Yu|g{Fx5VBnM1dg^#h-V+lGW;3W$UP zG|V`zjP0JGK&T?V{F+}K%HfRJg?UL&4~pDi zF{jU=20=HAE~8st+H>sd{Cco!IY-lYhS{ONON!e~EQ5)K(iTpm#r`#}0bNz^%}Osgi}OfW8ASVq-wGw9SxQtQb)H;cc3n+DkULklDW6&74vaGq1L<#6r5TP=ukUQ}^%+dwzNWX#Id z*o2L{{^v2&9}J|lm={;ap8h3nerB5PnzAU=-2M`w zr(0HW>YgA>zg+c(6ae5GJoe;lJg{=h6G%VZ*kkH%bCUgdCTMm<5p}hmz05tX+`fnN zi?}KF>5E;WUqf_Ta)uIt184NpfS_B)`_m0a3XlL9+xO$UXa&7q8oLO$CiAn0V3s?53`4aJ7I@EEc0UyWoKq{;@?%h1T$&G2U3wCSrE7Y5U zyXaz z3~pzH4E^VS>vFEu4x#x>K@**TI~axa&Y!Hj9l^C*DFYxC+_Xr;H2K6eca$+5^VCoK zZ`3>V=yJ7eP%2_6yi=UaoW>!O0wFQmuXhEyF*I7|W^SR`%idfCgizkO~pEmoC;{oWEwQUxs-}EQW!j*4kJo;{!<9 z|E>vbd^^o5E_AOS5McgmPa#LHkS^i_^Cn5Pvgy>6<^5ppw)ik4xK0|l)?$-&-0j)I zyP8SS%msQ?@fdhTpBp(701vfR@FVp>6e%o`lGEA55E9r+vyLLLe%dmi*3=lfD1UX$ zID*lbqzApC+33O^KeIKBL+)U4>;%{q@KtL+5prk~T?>qjKtHCd(SX;L7ZYJ3pNn@n z9|Yhtsk^67oPLpJfUm$aCZ|jTNVK4AEkpj?^huwF`Y!WXD_P@(m{$){?{E}nQ!e}W zK6CMFV{-*&y~XkE|!VLO{YhPJ&5(UQ+&tG{JeDJ1jDQA zCN4B~2mr+v_D47-L6ikH!{ih+a!+|i|DE*d*_pX&>Ykq-bJ2c0!r#FRM4^mmwmu*q zu6KFYwe5Cl4BR|jjm|ux_2lavms-flc63BfleLR*@r;PC`?u_{;8Dk$v;pJ{We<7MfyRbU|ev4zA zYerB$nPx6{^c`7V9WZ7_ft(R6T0N{>p;DLkQShu|C|$hL6XCCWw)IP5$U7y9N%Vf< zc{f5Wne?1S&o?FmaP>C8bdxin?Ur_A@=B9tEex$(a?9%R8;&_1L};si&PVan1!NYr z!QJ%Yz}-7eU^W@TiOP|s3Ph#lS`=G^>i3qbS-@b(_V=5V*9S2@Q|i4APU!RmwM)pr zzc5>@fp7Q7MvwKN)=|unFZ|8}6K9feR!ubm1CsR(_@4>IobD254`}zp^c^?-8)MbD zeZ{O-p6gaKIrYV~n&_uCNC)*n>Spt+aL#)<6*%UgygZLG^06nenBFcLqe3YQbpf>$ zZIsRy(E;YqZyg>04khpz%C1ZO3ShV zz8*9w>%!jm`M4_oSqXhS^3Lzy6H?5G0WM1k&WYpMy}?#wod`XvnrinEEkYo+u+EEn z+|%uAQmiPvchx9x)lfp1vl*Z2kV8bNTe@1m4U|~`{%8E@)anxP;D9v6L54is0Eh#Z zFQDp}jzkcbQE0E?dnj^kR9M5w2CWJY4}WOXeSW@#k;2ejd}LdX*?ORN(NRel6wipL z#W|3HmfMIA+&mi6sw+^rSQPN@{WR~44U#^+HBY~-^%lQZF;2#z+e@;YU(L;>0Fb}{ z-ZCfl(|##hJaNkH5}cYc35f+QJ18aU~$~@HsdK z3AVD3g!vBm*E2o?4K@9}o_{Dk>{>31NdrGzF*;C-V6~gr4L#U#9~4!lg8~9PC}UG_ z5&Ijjh$csYy0XWeK_E)xTOt{0)U#p@6 zvhv7U+(WAX9{_;2e)_iqI6b|s)5>a30I-Wo#5U(uU@sc;7axdMK~To^*8lEO0l%0b z?es9{hj>~nw_iKy7%Emz-^A<~wPtLC19f-+!Z}I>p2lcw|1oQx#ao}^6y4mcsh@K_ z(|>D+h^*xj|5<5%6gEmy!s<`0SWQz|#)@twn9Nrr#tkwaTHxbSOj(|6@*&OLl8WiS zPsQYgEE2%}J-~S~0!Fw81S|OA2K-?EujXf^ypWO(0in-{8SEMvYD#TWJHG-`+d|n98#|_Clq3rmCVU7u_QEt=%M30m@MYSXLDU_z zS~1TuZ-X($`fBy{-~;mRLNr`?i>8Rq6Rsh)U<@O`23NfGt<}kMkZodM7dc7obL>|P zc%&$_r~n?oVR~imM>Xz=M*Ki*Yv|}5LnpS{Wi&!`v=&HVE(lDmwg?dw2y__YO2DRf ztLmYTybExFNkm&MIPf_!?TuSKuamZ7*3uQv4%CpOjmyc&bhxAvtmNGnI_C7f zWN611G(WiN_Mvigv$mjYCvUwp=sBk2FhjNS^!=14;L|`P1F@Af0J<#pS?<2}LvioR zDZtb#!sO@hiN*E!3`&&Xc6(@c+K4nQ46uqpg}<+Z4$1x>{uN;#$)u|~#53s+THSxg z2zl_JjHezFTBd8s7INuAOLXF38c3`)-T7$Q#(U?dKj(Ut*j|%X_fN$mIN>BxNvFf* z`yZ{q%)7#pK1EC4zQlgNXe6)fSH)w*jmmIl0Ikn*kWH1LkFPSM*u+ zJ=Fwnwc|ZQzQ(;LwJj&S7isoBj7w49bGOEvBZ#Enyqe(R{5@+`e1%`tL-738zo=a9 z{u*fVgC+a@9H1XHA>C^mOhv!i{-DAJ>wKGoyYSq5E4AUFG45X1L2WqOWht{}h2e^N z=z{QGSYieTiFiUR!=nmo{K@HE=IEJ_XXc^8$u<4jWnz=5#ezuB=vGb0DhwCFA4h7t z_bikD>0kG9z(hS%dT7UC5MicyfEsm=o6u%k&flVe%R=Kg&y&Dri`EXL82L`ZM7FTL zq3}E0w*zDt`JANSp}dGVg^rSapg``a3($6jKY>xg^aSk+paQx4aQFS)Bktg&KfanW?%FA#(>y#HJU$c5Ep5tdqjc4 zz3jtoMjib(KpKScP-vb$6Y)im?n#(YQ~kzFzpj6|d`GmFpTszz6EELQjlrUd z8U)Yb#E}PdQ*j$fRscGcJRH93DE0WRbF znzro1&S6dT++M`^LhW&^L@cylXH|s&veQ3{@r1}MHw%K`6oT>u(O`fV5Z<&$$kW@@ zdV`ZhG3*cfBEkb$mYBdcYmk@1Ik1ig)Nb$89{9o@sPnS4cRjBta`)j6T_}Kf9)8?7 zI~kkF^owu8{5#z#|KzWu&?7{j%j=X8W5f=! z#~O#;DznRl1GOOkR;(Hnvc=a8jO>;NX)gej|PHaiwostSd2*lIZtzpbE4)_Pm+cU`{jPi!f9z4$A!^_%lCDZp{~zt%@) zM^4{9U)lr-&b{*4d?*Dy;S99jKZhD4x^E#C31Tmx6<4PLjOrQV^daLos2Q{Yx-%EE zA!jGQGrNTFxa|OJj#0xUfdsMk^3m%Zq~d#^yC7WEN_>B5xP!H7=ZwA(%pbC<>C$XBug_#%LWOL@~nTs7? zLEl@-8pcq7A(JQv7kGP+c0j@=T#_D3x9wEjDe8OV=yzVgug9HXVH%?GaS+Gr_*QgT z$oB)T!4DHGadTET7egMO8oJ@HqG82B@GRM;CTQf-b|M?_bg%!Pv~ch3!)n=ooqR&@($XnbfNNV(`pa7~ zuOf1};YfZf2wwA8Qs3ek!_p{;|J5c`cw38_W!L~#3FJ}$&{#24{TAqR-q8f0i9(Jj zqtY|l`yN4syyM0%UY$;Djp0dapwo-!?6Y;JSLzWXO4zeEv3Lct7OR&UP`DLAf|e7g z`67qUMoR5SsF^O((xc^t>KD8e+?Q1JJ8_sZoiruD)@UgwQRDDuioFSaeZ@zt(~nka z+W*XA&XSYVvAL>rbZz@-dA>9TCRK?ajn{W}DeGVk-Mi-3oX1$cP#m_=5|s!K%HA^- zT2MoUb*$9(8Xc;|S(}!*ha=6hWp=g-pSTS8&S$JkxJ$H+cVS}h@?oPhaqxI|{!fUe z5rD7cnmFdYpxh|4a9<4#=Ltb3z}>$C(u0|}<7$xg z4q@dpz^oA$o;Q69}wccep(esT|8_y-bf{!AZJaYZj5_w;Q)fp|A^F zhE}p%WFwtfaqX+p2Un&6=wYu!^+NRcs5W0)0Q7J85N_dSnlt^Kt6}VYYro!Kb+%wY z0mf%UVY#y|!$MhoezL)sc0XseV-c8?R(6c(SO4WolrQ&52Al=pf6v)z2yg$n{YL3F zPO9ASF4OMnEy~x#T@L9_x_FY=IfgZqH!q9kchc(-7Owm0QU@C|t{bzp_Z1BJhOhpG zv=9Brbw{}VM+b6R-o~x26aW0PTwW_3(}0~JBa&dp)Mi}=u`+ZEP95u~s9DxAXIJYI;jP+!P4Rmi;+lf-9m6kF5MWgLkJ&qxfa0-%+AeB*SLIG$PwAEi`7; zC1*Y~Vec|I?WWxG(gB;~u3+p%TID>y#opI!(asM~5YZ$%xiLoY9%*KN=X3BghxUHW zME!g!HTVPVu`{B+c(O)qKGIO}!3Ij=l2BA)}F_E@gLgc$3zg zL|xh1MO`K>n~Rt$U6v*zeu2fu0pHiEU>7I8R(ifcrX;!V<1`2{mc%fX3uQ)_KU4Ph z&JUYqhAw(7HfPr%%~pju+%`Z;c#|+Zu0fyr(T;xtgU551h*W8{x!e0M?GMPc-wnG7 z6RdtZ-d?sukAGaJ=+vDC;_k4B%!`nnm;g_odFmR&({}IINjbPGoh2nYe^W;oalz8@ zT{Aq08{nbc4;RX%Wth})<;A_JHihb@WmX+Txi&4h7$|0+I%m6Rm)aVJT7cB&f7LUr z1Sq@LNR=YR!(1fT=@N}RF*G-Q3RMwXYh+C2fi}hC1A(*eZH`x@MD?XRwLzRn^O&_t zp0%N`7hi1TNj*f2mq68MUY*XO-IF*Ju-2t1$OG}{fO|zv-vrnVB6C^?u^6LuY&7Xr z$r5Y}e2xA4BPZlDt*b`6TEw%F*43>MoAW`!u8ipf~c=?oWd zdbllhJ7Ssf?yl4v++x$NprE5C!2nt@4Yi}CNJKhn^&2q#s&Y@MeCZyyunWQ`^{OsN zXz;ChUHoT;zx1i8bZRHTGS&kvQ2MIZ;-Y{8@_VqI7V-fFZMUDU3d!k+Ky*V!WB7RY7-D8cNU0Xs6siZiGG@DuYiSb>M=aL>{ zIR#+yAC@m(AM-}qo4w=fV6g~sMgOD5Vq(ZJA-;l2MJZ%h`%vo>blPb^ReoH{4RJCx zimVpa6(m=>joj(^R&q)uB4vc~6I3BZUm-|qjhe)de6w1#p z+lcriM~2x}GBZ`0|T#DunjIn-#Tb@=D&C z*H&DwK>65+dnWkGUZe*-BwWD>2rYdEzT;5QyFD4&5{yN`&hl0N83XnMiz{OH4tPYC zh}4m)*;Vjds0YkF4Ia4CUq)^+2g2_KHhFuKfLX#r_<~VA`kUu?0r*xDHn{d|bC8(G z2@;RS<%@G)+FjdEM2SU;%zD?uUiL5!YuwL$gAO0?!TCAm^NUz-D2JV-90$#;b8Mjb zxzuSA;ec8QyY<}6J?oB2P+yioB4?Y|Sd8&x4%?6bv!S{wTO<+Ev}h}aCcx{9$M!-HPvx`yY_8)vRT@X;bWeTP z$T7IEO$40KXEQI)3T@H#f=$QG?!CUOLDqbkAw?3`LKMT6J(0poNaL0`e-kk1Nm64b zhp*`ha|>^FhXbx5AXkj`synywihP9%CxtC7rj$?5;_Oe7D!3vtAxy(XG;|mzhB`eC zn{k^9D&FJn-a^pxv)(^62>Q$M7=+J0xIfmiZ3U)lRzWmyP(x=+)ihM!d(e_rHRB(Q zZcO$TpPg@O(q0vwK7rG#{&H z|62L)FB0aln`=IZv-k3WXptw2|MAooJa{m*tE;n()D?+~APw3$I2~4mKD+R{h{CR- z_#bBuh2pcf;xGm9x3ILGZf0Oxw(e8nno!t)hjcCJ{1BA1u6qVPStC{l7iP?j22)KT zVpHo0%Y}0_2{JHk103|G8~^X~U?KsNm`utvgG+idCk%$DC5z}N2hlVq^5_HlawTyG zWLdyJQCM()yFanMxco);l}mw;7su<4?b@KQNYBDu*>ca#OGj4B8fb0EA9io`qYPyx zq~Y(JZZT4KxWG2+d{pH{EqAO=^tE=bSt4(DYpM)8YMZ(EgEC(k_1_d(F@a3VkSWM3 zU=}j&YCuCw!rBZCwEBSHo9)5+7?W8Qpd5VI)}-b^;^5mEaLYUicW7{hq}SSiorg4Oq}E~ClhzJXCT)OOtw_fxe0pOkSSa7EYdThZQy zkQ{sN9aC1$`768Wm2du{2~y5`?(Wvh*k;^@)}K*Bfx zj1qum&w}>|d9D81%4$mttbGsSW;>C`)p@Mo{R*6h0-_7R6}$O3?$`6Tt+T~=oW@E?qbnYyOq#~ubsfUQGvr4W8!~7=n)yopvzWfR0>A_L!ETbT ze{{q%re9@5NJ4YCD2j%lYJ#1r;UMs{+Y8;b8OQ_0QkR#eZZ(-YlLfYLB?gjsIb)7L z(C5xygAiAmg0L%pKdohu^58Nx>0%b!X@HK^cw~IFI>hyB$v7fi-Tt`a*)}kDpaO+i zF}8Gz4#tyZOGsb0bE!XFKIMl^xI5Dw2Vo^ z6j)%>WwhClaO`-M$fqlJ!H~qP&C9OJ3IG~m$)uCPg+#4k$>($k-%8>&W^4~luhoMj z$DH*^s+P%RnRmy_Shf#@`O2-@8@$bf=y_wF@XfLFA#eKWPGh$-9uID4gBRSdT#HlX zC#eVv&vIcfGb(h<#nY0)_zdnxdkwZ1eLq zRK3xy?EJlIa*vaL=nqo)2Qq@p3|f1W;_2S`Trz=i z+lPHs#Wg5x6&?MViJ#tMfR!f|V<-=`2Eiqir&!`)npQ6Lm6MZ@-p3y#qSN>tL<|^e&pe@G;Bi z1}L(<5jD39GvIxdu5FJ=rrI&(@qCj*zc8`P(Pu^CsYz8v4A?A)OD~cO;uuqho>3VV z|K<0Im2UpvgzGuXnw#rn7f3wKt*M&9>4;#a-6<`9PA}rbQp>TO+Dijq_Qr5VEbnht z6-;PVOEsVj4OatNy}5pBxhE1#mY(urr8&zZ7aDr*y4Pv~$BcO#E2WQvNT{PcNCu4r z+#|qtSq85WX?^wc+!UbYPJ5y(uJs`C4sRL!mXF&JjCB!~7xbvX+-I_`ubiec2)*sh zV_{b30?uga?6Z2>q2p#}0}0Mr)8J1>LUKWJ$mR6ep%1MqA1ahcjK0(%JAvaQS$~bvGxev_j-a?UGZGlTNv@Kgr+oRkAdZUwc z{fpVa1Q(u*YBw^aY&{!)nkkks_jLTAl zSH_t@A!{IX1$dHY)D zF7pc4dv?&b*{Vwtx$HN4oQ`%iST@nJO%W0AH$Jr(aFNu8uOmPnoNw|?jm3o!d(i#r zgd~snB3FuOW#NFD^fRI;7qHBDPFR`fLxUxUi~w|ILCTX$0^DO)nbx?w?^jq_B3<~( z1caL`i>9=mG6LOXY#Lc9pI0S2O?2OQpWgZFD$8Xq2&rMcFJ{18C&eX}_*72R{x7=A zH@tn1q$7>2tjTj{ovvKNxSRSomdF!_Pe^}DkR|N<5SjK%EYTPX*%Po{QN8&}_lg^0 zG=em4sr5fwqe2{q+;~<4w-A@-gYI907_~K#Pc>5o35mvzWrw0Rk-TV^_bGwdXZ4cj z^EBT^BC%HVz^)8#?iPJgvB#b-ZeUf$hR2KVGh8ZKqH7V=s(@>=F|Kywu=%Lqt1l>@ zR$_UQ-x$AJC1${5gR#qk1gNMmTTwo~AFD$IGl~O1@_+S_zz~kh?Gj)9E+yLo zhWA5jWVonp0x~;#BD^s9u8WX`vKDYlTr$$w5f@(h&p>@VN0-|<)`yOb)D);O;$~2y zl~qx2hxZRM=FeQ})IzG_Kl8E{uU4-rh~jofilO$a$Gc;C?+vw2hpAHrE2O!SYQ^#H zv^nxWrY7g5fyk#s*{~uQ-Z1kqK4Ojr0;vDvDCFJ5uLt*y$RyLqB@oTO0S_&DfuXcI z(ZH`!tL#1gCIoz)>*AHsxs^}4Tq+DMmCgk7dW2$zFrxDBSEl0J=G37flQ7gCd0WBk zQTg4bvw^(a=ShR2q4jJ?@1=PYvU1ahiR+tW`sR5b!*(?`#Y{Wa>IU_lQU8FQ!;3D| z;jKB}CRH8C=2zhX&}82#z&3-UX`?BueKt0UYW<*Yx1>E4Jys8IE_3jbB=gMdAXVIr z2#RVxgpFbFACq0^w{uD>C&}$*mjYbFKWMq-7r)Wu#jw`5`41GMF6^;zU9!&z1ym=MQFC4YDA@e}go} zITga4ty+r2V1sZ|$-bR0YtqvWVXrh*J^>v@0b`f1V^`PBfu($O3_lQLpA(Qy74TQA zuqI`S;cQ>5Jww9$>%s4F=g_UjRbB!ip3t@`aLzFh3+nzn@=WU$MbJ0Md=q%xBOhl( zLXlTeBtz>Ewlq6$5wh_C!n1#xZ zpUhyY5Yj5^o?yA%E$Rm)<92Qqo6tH5IB)M?g4$@|y_d0@j3a5kfh-XJA%f$oz>(&w z2n5v6l2&n{gnx|$}@XV=N$fC3tR8g+%UofwgnR$lmIQdYygT4A-u`MaVxiuI1$bf?RC zv&`ZIl0EqU$MQK_qQZU<6AXqPO;m+n(@iL+iM1cDlYg*TjU3^Y6~2e3*c!@rzCkOO22n zEYq!$fp^=>|4`N`s8@GX+()Wdkj+-nk(j<1^MV8x!45Ec6av34<;SR@wIpQo z7Cb@ZC{+QpAqhH7kDA7POuuNFC;LF!h|K_{e#6gMG?C%`I}`14IeK_(o~C}GM@jUjgw4tm zjIAaxx<0hkcdRq`{^tR8-qv0bSX+?XW~CR9^>rVFowzuS?7X=9#-sG|a)qN`NPo$# zZ@IcL9ZAU)CnMkjjjvlpuXBkZ9%4(iW}Rx`0=GwYt8oGjQ=~6h80Z?icAS3B1;+WX(e*=&Gp>K-Lh_B@4rj6Mo{9JEI z5R|dRp1bc=Dplipep$CI+-k^r$7%{uY=dr-O2jONDURLhQp;cEe>J2s%xP#%)~rX7 zll6k8ieFe7OXbsJI1f>yMTn^Z^`Yro3;KqH8NcbG_Spa^#%-UAw{}ZBR&W|PVKeU# zhM#~PdtlU+!wi%vtDvx74=LT)=p9LaR;>h@;7%gu(AgVTS>ml_m8)DPgjoeFe0M+HRi2$T98TILCF+m4u(JYK8r}(BRa3$<#lJCU zwfnr@)=Cq%Vv({AUyn}z9-tx7R;&@7ZQ+Ok+-Y!J9CL6gF0kTepG)?MEc^b(1;Q01 zo?s9N=^k+P!V%i|AXV|~9wgCiy}B_Awuo?Y;=%kGR7<|RV`xARK<%WCaQ$f42@$7z!_WA<5-Ghad!@CJzXzKUixucqHPihYC(h3=% z5Q7i!0Bs1~gm6;|HM)N2sz=e|^|@V07zf&n>l5hGY2<}KtQlVW;Tc>>P>(s3>n22t zIX0_TCdb$Ll6>-Th=8C_H9b6$=+yz-Tod<*%|@Wb&s~`_IV~FGL1k!VC+oq`@918w z>awC_G|u`G&rT~$+bp0e=4=-SBUev@Y@D@mQYV?^=knVfx;6i5GYAy(IUI@lOP1+^ z(N>C*VjuUH8HvBBv_@pZLHKALR(2s6T2q7Jr@*_Xi2)8J!es=NNhjzxS*)V zCpQ!%fo@L+)u)7BV*_fOG17nt$kcNNS}M}k99EWd5?+)B0`Cv``Lo5A;`2sD0iwa? zAE5tJ#16_tk{bqj2e9zS^ZMBBdM&DUD3WSW>^Dwl{GVjk@1;{aXpbMo!&WT_h6C zoT__cVi|A*oqUd%q|jJv{?pS0`Y$iWoLa-$u=8p&Kk)?&ybiK+O550g&2Sxd_HkN? zQ4Z240 ze{rI&&H22?TY*gZ;DW4N|3E4*b2dr7+7!1*&$5AaL(Yd8g-lsesHQ2sKvcK7XU*ko zVy?+aC*;$(L38)00M@n=`;ynQ=GTbL42*q+UH#z!%@q@r1U>Sgc%Tq9u7SO2o}|Ck z_A!NuCHy^`W=>l~k@ni_!yyg5pEf#0##0`O&lBgkJ`4+-kcn;w09QIHMa-kPr_rW- zmH{=u_dmnZ-LlM6XQBm!>ONaJvjx3J{15>8uV&-@Y-%Aq)=Q;O#hfWki|4|l#vjdT zc5TU#L+-=oKFK0?&d%oifyl0(nF@3k+;6-TO3F+aX8b8@OK;A#}A4nVZX6Sh^xi~1W02Xj`mrH0y2LWDgPhy-(C ze?>U~9E6)`D~S8GIQg-JBol zt8+$)yx?(I91_u@Y+f2e)nVYNGCgM3FtbZ7jP8$`Vkpl zG%I2ByStH=q76SbwRQwGMN`C|+hTj8&E-OZ`%?p?Mo>wczrG}CQI zvAIz<-Ls3$0<=Iny*6LlVdfw5IN1*rEobt#S`5SIGDogfejQ zV@g5SsyzvHz|(T}@mL6Q2)pJv`^ zAJhltT4eZBx#0*fBPW73T?*@P(TJFbRKh1}cnxi%CE>#cVh8olZ2KrVFwY9cf#ZvGCuwi6yh-nv1~jxNsm^!ES3;_3gT8evyDG*xld( zHcbuD&J%*Qrcy$jOYTVR)<`9MS2;q%Hg!1nrHKo?tG>BvV^52&>$b6@svRAVNs?2L zw`8gP3QC$;+KLbyM~3l?G~J4aymyeHh@Ti4SE#NMHG2g|6}(^>v@+9Rrj%PGE09`j z2+Y$!(+G5HF@OOw-2LavfU0+Rd~wo>q%ZpPL-Xy&O+gB2r^Ry&NA>43F`hh_M#C!!g4%0x86RX9&Ri% zbl-+R94SmxQW0%T2H2KS61^#jb;ij{BH%!J!|x7jH1iM`dg4dSR=n)61n@K7NbHVa zM5Dc}7CId~=+Tj40s63 zLa7mY-)-{?Ft(8XEHpvb z^F8-A*oHprr|vj88`4j2ae^Ui{)m&ZHQ@5Pm$1(L%@x5bitQ3Zq2#IXG`jCyW+9BK z6V=*lsYLl?2fkh0MEPU=iS zJP0~93c!p9$At#|Fx}JqP;<(1&ML7)<{LT(0 zzYkQ=qH}sYriz?k5;XuxK(@bX1{(jV2MrPH^$UgM=p`(G^2jDX%Ka1+03vQ&B=h4} z56=hq{Uiq&ts`H>9FLM0dJ3wT>-L`r2I{Efp0&pTC@$3*9y5Z?!3~k7o=*31k5}Ot zNlR-OFaGipdm?-ITwR^+xp??ThuqiRWcg~h^EOpR)2+DO!o2DqKyXo>l9)+Au?V*p zUz(C^bB(k$lsXd!muTV~}$XH2jC>u*<4zYsSB!8~S}waiJa-C+Tr4Z3#x- z4)zKkfC@f7(A5U1GWQXOdfwB3BtTKZa$V?4i&}Z7h09!Yyr3Zwygq=Nn|0$a*y^#4 zHpRNmq)4qJA@j3!TV5k)nw-koy}`uhzqTg)dAM?*sq1mS5suGu^ZoG1iwZwHQG1m; zAJ)-y0n`ufA+V`vfD+7b?POL9%va~X(feAocIvAABR7H48T`IIf8kL=REJ1-KF?3> zC5djx?-8N}d0Kck2CGF#k@;5frWuUu1*B_Av*LE9n!VVW$hhJNS-8U`Xc|1ILjUJP zd0s{g{e#0xrdUyy~S^h1p;;+6$V8evVE}0k&yEp zBF~0IE37>*(v5HwYH;}8Xs@iH>9g<&^u^6@<-C`b_)kzA>+rx>#6?Vql`yhGgwbK* z<5Dwk-hEGI-_A-E_1YX7AKUYI?o-^PW@vN75{v5-TeY#J9N|80pjvtM`!*vDiP1tk;2d{UC0 znd1yv5nr{C=GdT7+}i4)0Z4uZMsa4TdY@eq8CKEU+J8{A^ z)QndseK$H!&;Y&xB^@lw`eke;=p31*%Vw?&ieq_TB7U!#xwr@Ypb(L4;iTCK=WQ_6 z1)EH_^08bNh%Uu!hF#yLZjuo~U2cZgiY&w?jin_VGoxFEGWVtkTk&;rOC;um#WI^z zTkUw36?HAe>^6zx2&g&Y0$Fr+>jQyn zz9eW;1`M15>BoMn)i2pN=kvEc=(ns3?ZN|Rs_1${zV(~rXl_3qC~wmPVQ)`3xG}u< zOhkhRcwA&e2{l#$Ogrr0ll0Gcw3!rn}oHD}(oXag=Ne3i}oF8PT^Uf^Vh6 zVM9Z+FA5{hv*yu0h1}K1L~?Hbh{SHWgCSrM@`L*WCF~gJKSWAraGh8qDc5a0{O;ip z0Hr&f)QT?-suJ8U>&+OM#O#RxCkBq{YfF!;vm+G*q3QyEv3nG?6g<6N!6-LezSMUQ zWZ~%g7f>;Y`+{1A{n(BG%b4d$*wpLmNiZox6Ep4KtOcLI0tjeeNsGFX;x}RFnl_Op z>@GHt_5HBsC+>B}m3!LvInMsVFNgn48d(Su&p@f%qQ~-m&JW52SAjTN74rONTxze& zIL`KEF6nlg^qv>zM42Vch+HTs1NmyMPNO#`U7@vQklm!y(Tz^!m@W{xYEc$^%r4#l zL50XZWJnf^u}(fx=(1L4^iur$4MULC>99lnjJsPd!?Ys`6lQOTcEdAD@4eBu`mcw2 zGUN09^z&4+1Q`DI@(_nUiG3r}E#u&`6eRJbW$M0w3XB#FE+3N07;9)67Dn%DDM%bo=?XgjhKn&M` zrM4}Ojc#agg#Haj{0CuW?uLXnT(Si2NaOXk$(PT}5^b(3G@H1)-KVTb%U_7*Q0$<( zhoB_$u|Gwsd9kMLE5qnW>9f=nT)D26NA&V$+dzS-b6$p4IH zweDMgP%SnYxu#NrobKlSr%my8mub&CVgdlJOniQ$3|Op_@uFD!PZ1#}F;`vC^5T$s z4i?T!9v~b}EQriJD6VxwLQ)I__4X!)NT>-5q%gZ`>;skdiC}fng$d5aMdbg2;Tnyg z)r^H0???i677(fX&0Thjf#@ag-j>A5Wx9=3Y7Ak{@W+fe_w@C(Br@M!Jk6DBaGq$pxfgK zOL9rkC?RnWt!ZFcx$x611V8?wj4=9eeL7oxvG4pM_Z@`|7PKj@aj!{NKIsKAxwQUZ z-9ddDc-T|!#q(diY&~I{YsD8(7EdtU=b|x`G%3XV@tj4rplXyoVLR!4P>bsUhib?3 zovuob5TG;Fr@y(JFJjEl_^SE4Vr`Ejz{fZpdjy*67K3X zo5t65{!xK)7Z3P!JAqDulb=#(2As3K4QbFKUBm`=v}OQcSmm6&84${TPk199(QE*6 zn}RR@6B%fffL|3;34y@Xf$l1<1C>wp(KK$9MzK)v!;{-FHUUjOeF!|c`*(HCxK5qp zK~ir9GdeBas?<%+4`v zupL5HA$6>sLDC)x_*S~1bskdCkfEzGSBOCD>mqW%d9p|LDp*jiC}6t*Pm8c*vIt?x ztXG*1auEKRG{CYRU|x_$4#ajH&ZMU==Bnb$*W5Yc!Kf1<7&wC(S+;p-t(>9p`)XD0 zl0*9luxTjBEi=JahR|KNN^ULIKwQHYDn5cq2=UNthf7d>tqXCl5WDRr^&#TjKkBB8 zJ%6n>f8^b9=ia|OVpPyOfZqx~{e8uSZk-|sT%-yfOGGaRof`efE&s?d`>?~HQKwyC z-)D}$xhKcs!qpW*)S>VmCCHs+;Xx7pwUmm!*@3sTN2)&KwQo*#QyT?X>4jZdz@>VK z3HN@URgeiJoe2p*9$@<_Vhw4vdM6UbJGm4vH!uA(2QI5mE1-@fW+~W85yd%(iZb`J zJH-y-<+x&;eA(@73)MOAwX6QZenn4EPRO)&e~YCAI7;|JAjxWTKJloGJZe8Yj7rnq zG&MB#b-t4v1nV8}GwC(J-1pDyL5qWpoWd03iFfKXxW~`}76Hs*HBE0))VNqvHGg1~ zdgq;}vZh+Is;`rb0q8eu7&EEcDs8s&95A`$X~3@v7gf5^Dko5Irx~|~x#Cgmfb2Q} z)r4sbFsY1z?R81%0Eu<0EoMUS;yE)BwJ$k2-^^ouhOIP`>#m@Ho6mwV(9VW1ZKb{% zs!1N4QCL<0Z&INk5zJyzRpP*Z#lch)wgExssR_(F4N1~Gj+a4`SDz6{E@2=8Yct!5 z;XQkHEUa2DqID8d$pmj#wM1V63E$RF&3w&PBx}dAtn0|s{+F=Wo_#_> z@omXNAITIQY~8Fu<%O_rs-ZVFu3)9!JDXV^lZRuAos?Z)vxzrbSE@$OK8Nmj=0Yv} zMhlaW$La@zaQ2Dic_E`wCs4W}YTVxh8rGn+tl&3MS!YF&0L+-kg0t&jAYoM;G2WC|`VMLww7;4(N!^k*Qe@;3C*i)zb65siN;}*dNHzo`A zFhVrIp)OU2@M$_Xs(mnt_I<3gzF8)LN*xmdsMLa;yh0>A8hT=?{T~acq{7x^iPco$p?(gt@tatmEvyVOC~p)05`D6PSA#DkL-pd)kBn4^y` z5&<5<9AgBQIx67+tYyjy#3uI2Rq94gb6}iKmx{(1MG?iI4dxTYs4F*`qLOMlS!oV& zti;+Vy3bC$#jW68Ulpep^|5-=euvaGD&^CfFG%kD4)CQYU+y zYnJM)=%Pt;u}PRuWuTn;4(J(eUEJJjrX4Jp{i4}TS8Tg7+!kU|rD@gOK+44L{Wnc% z4Gunzxqb%AcCmI3a7y_2eB&1k~IpAGX|{yjralNq`nH}CZpU!3~lQo z0#Kn?YxP}oNJb|eAL9iO0=RM!t>qLmwgSgS_P#YGN$88>WmT1!n~s}Zm+i(voy5dqUTSR<>gBieNzO17KHJcX$$e&H%_b8n{ivny(a5fMz48IJ0D zA(1a2WL46I)7UJhY*Gs^*JQ)#e6#xaZ}1K|Y6Y;Zn=c9MvIz) zpYK{Gw4>d3J_MWhX3T~xoN^4T$N_f4%}7%@ZFu(m+Dolhp}Xaczui4k`rw{A(1hbe z&Bz{}c$QdfP@@UO4xStu9xjXEjDPLx!z0M;su^kNm9Z}pEDzrcV|YCq(|AN%!-E^4 z`2x63+B4f#&1=FoIUp}xZwl@YPJn$ALedvY-)$-;{m1W<`QbPCGm1U95n6j)j%q@tH1`5d88v$GfE|=J+UzU~@neOge49JX- zqZYn?3q3`Zc!)OluM&&zIWi7#$>n@X&SNBY)o;2zeK$1(L*DxFQA01*U!G@&V*er9 zxf{p6nscwG7|*~6&iQ$F+=r}NHys~^wx;Va{GAilP2YrKaCd6YE>hWogC+-NB=4%* zQpKa3`#}ivh<~elyXm{`bruF>9o;SDM4bYUo`eew@;HNsJm+UGZ+1qmr(MRGY8mkj z=tjMDm%dnDK6q3Ebe@QUWizpjuh;damVZ6w)@dY#ll>#Cdk*h@j|lMgB11U{)k8Bk zXJcIvP!`@8fO3b`Q_paAdfW$BLo@!T=Kq9KMKlqUHSHZQpAuYTFW3DYff}Mx)5DGU zJlyj$Wwt6%l8e(!Vf3J8u|U^ofTAaCHsL1ATrq!wrWjUO`s&H)=L^!K1F^e|dOV?$ z>?L@awTAtKkY4m)3{a&=okwfOcI|+5(1U1&C7DqlknNU%puMAW>_u3J7jl=g40G=? z(b^pTqyot?9e7{&4b;ybx+ACHr1#n}mG@m<>$Q2YsUJ?Z(U{zw^l?^n0j?-L*HUNT4L?j(0MJRL^*{%_h?LxU;)}1qQ-!LF6+$a}i^6}SIwjOaAov%ehpukuq`kbd3xfn>bhqQTa=YZbLto8- zh|4Dn*W#n;a}7A87Q*LOg?MlWy&NxsK^@X$6kA>>;O$`8ma*F@-y82{emZ2CyRfl8he9adrl07swWN8;0H6xvhTt+>h11@{<@H^6%5QuKc<$K;NwkKI$Hqh z0g=?)9M{h`$w<8|jQwH3yw(ahvJRp=$e&>;!~u<+PDKJd}X;>1Yb$f{dBn z>0*vO{hDfl?g5owOfEo(c=X!|%QIreLY+_ukOoT?&>p3Vc1LhaN)S&w{~@WnD1vH^ zv5*nA0Lc1ZNn!EmwSbN;h_w+hgpaNa{q|-)nn%wsOutZ(?K;=13pPCXNmru5$d!c=af<{ZsNU zN{40b9L-nZ$5f10?;iLrWu|kfYma0c~y&&_k z2RxtBwWU&sa<-Hk`g3hZxv+YciVzF7yz3kQE98MZQG0m%u5+a$JOI26 zM>#{%ObN`=jV%3QtKDA@R&5*AFPv!dy!7UhKJ)2{Q~hs(r%AIS7q6})eHBp96PC@- z=eO`i`r#~c(6U%`at$O8T+N{fl@_-Lzce|hDmv}^J6e5A%>KnD_6hRznykE& zlrIQI76XXU|LKRusDwwB#a%fVkJm$92zwEXBg1VHccl~iF{b1=(X374cK&^G!CWpp zbEE^j>#9DR@%*hkd~{9%a7$z!c>zY_ieTpn{h!LmlrW#uqE-)wXh=rY{t#vYuzLb^ z_YlHzULZ1x!Ze;yGUlgC)O%nVJlwS~x^d)r`s7)Hom9_+ZY_0!B!$E@s`aB7X&Wsy z8t6G1T9LQyJGhYD+Z(jq79!oJ1>W?x7lDSw&C3wpwj^h?8ni0hb%;i6^ygTPK*a$@ zK87zm^nwG}3-TXtsIn%vwEx0k_W&_{mx#hJDffg2%E_QG^eVuZu~N~4Y1YI-z@XYd z1E5YlRk5XOz|yZ1Z5O595%$7~KPs@LFRm28#DH&z976;tdSlu{g;dwyqP-PSG37AI z-cbSB@2L%^oaCoJVkh<~5_@E>8-4@pQqco)n8J_VP^GEMLMuAaRWqlcteAXz#_T_} zfq|}HP8Nn9rJ4@eRmktXeJkeYL6TVK*J3`oREMjqgV|lq_DX#$q#G-`B+W|XslWc4 z1y^+LD94BE*v!cu?qXGZY9PikuDU`cXP&|e7+DaNi4dnOWpAY~!;)0kQVq+(cg}f; z_{{7|6i$YU1botdyfl>FyD1m@dzNn;Wopw|(g2f5;E0^PBwQC3=o%|Vzt$Tbc4}~+m)E&p5&;rmYbacgPPO3V`9+gqdOBcJc5RxwVkcZSH|K_+ zGp9SXY^?S(b5&c$Ow$RY^*y;)@0%qCZ6EADErMaE zY%4?fol1C(SX5JvXs0;KU#IrRq`stvwz{_Heig+b6s;NAeET6_d}7~5N#SP-sXGvb zijb2Y=hL%e~-d;V`QmZ~f1VEK~scTcdxjoG6Q=7LJXRUln z>#YCvPQuS9fNYWIQOrM1oc|Fb3y`Zw%3tk6rX_BqF7sDdL7yz=oGLy{tmB%7Txfe| zoLWCx0k$je)Qo{z{_MJ4b>|KI(r9-dxOf%!%f19kH;3NSFIiP2TuK@0q+i zjI&|Eeit9(OX2Kd3Vwyx^wIDDq$LTr6v6mJGd5SRhax zsl1kFs;R8K9^0?e?bHZmHf^uZ;puMJ?xGBPdMWD@DAjJA4&iQpcQfa<9b|ddk>Qx@ zaezF(O}=;toH0W6@0+R^W@=Yd{xDar0=T=0C^qVukN}~ln66MzXlCL@@$$xjxnXc7 zibL!{D2~0A0`K2tMyO=&`2&Br83yc_O%c2g@j1`O#lsIw6kvY}@ViZThJF7?jn{~_ z;kXThS5x#C(XOM(AQ+&7nHbucgLKDUxpNZG5p;qYuo&hIwv6?_YiKz_7lT-B+RD=#S9(6<#`+k!V-T*W84;wYEe2# zuc1=o!51)UW|PO9F>jV1Y*rz;cEn2%n&!7&Z@A0JYN_TVxjF#NNV1_XO5+vtM)o2U zFoDzRRzgVHVAA@o$Nn*giE(8yR#R}qBOAuH(Zm>embaPc)%(tO@g{9{xASL9v5jif zk?_9U5f8^Ydyxp6xq%lDWJ$4(qwPR1<@pRRw0Z?YT14lj`MlP~GnD&Z9n!ru+g9gj z+uSjuua5T@0+@yYjKx89nQ<4Va_d?S@!%2@Jl9lRlAULM6GZlF#lFOw4*Jtqkw}|r zKS1^Hfg-Wbd*jIigqqe~aXMQDPnZC$+4x?8C*n7ZqUi69f7Ih@LivrWteJ9Qvz7F7-pGl}F47{h zo>)q`+hW*Wv?!>LxUn5_wvN6l)uhY*l#@4oKw^tkO1T(A0Xfr))&|{(QXOGJt3_-S2;`$%iopd6iQOm;BhyZcp23Hlu)zeu%8g??7K~Ba*F8 zv268Whnm6?7&u`9qwR3wrT_jqZ}Kle+hrFxHRI?7PKg}>T6*G};caw9CaNa^&wa`y zlyl&}J+y`)gNfWVP+YSa&~Aq-9b#Aked!u7Jr!uU7{hSesa?dS^S~Hakw5jx_6{&$ z^`8bZ8s0)vnN{R%AS4`&>(_8|s^K?XNdRy@^iVl3)tP~w$pajVavG5*P_7XZR;XMT62gHdA{`2T znK23YWqv)*CrM9h=hD{!BVow1lDk%INkAnN7g~I)_?_}V5t+f#aYE}1Sn)7F>wp8m zRaflY+MBN>SSq&7&7*Qk;Fj=-p!Gse&q@_n59KJAR=ifAu@q)?>YI8-j2=F;e&%0P zk9r_r6`T!^g;y1F0v>h|P0^UkFnXT^di%q%3t-qOhSLb#ob`(o2iEVBh6qG$s4Zu$ zO_U@Q+8gB>*<6Gre(UTm7MXQdMufy#iwu5pwp9q-`77Ma% zxDRH@(8-C-opWP@Q>H6Z5Bh&2mh?!~NEryg_~cfh{`n>8#1kRqX*A-T4YyaTp1_JB zl{9$7$B}@y!O7IQ9KwZJfI^P@*_ydx5v$0M!dxOaSv3pFfng#ul4>NNUnze|GW{xC zF^)(3dKI!8$nW)Jrkr5rwz}6JA6sc}br=HZEPY+XGQ|eU?GR6FuLo+Pb-?+<$(WRQ zB?>*&CD?^w6UpvMk}xvhm6iFXYin>9@W%SJ9zPzF>E)g$h%lrTitSnD7$6Wtju@qPT08Xv>ecmo(1WT=1sC zqWExP9FPe^$gU)^yVz*hbuy#ky3rL8a#8TefMW)_D)OZE2@a|ft|-m9r``L%%l2d{ zOvDJ#<6rR0ph3cAsA`DGGjh&nhmkXq16*m9RWSUD|J!Y5>eLge9^Z0pWoQsRR56QL zDEykZL(sa#Gw77Q!YQ0u7}H-}Z8 zt@q~V(6ZDq7YM=nH8U)*y!bzu5_~X-ke`uN@jLFi~)wuZwN4dLR8?l%j5mZLYYG) zxC1pCD3f$2NJ#`cniUO~kI^vDw`EdVllvjpvkc7%5D^xqJE?Eih-*hE8S*l2{;WF3p;&^f!frHBYi_s)d zn-xVAH^3xU(3nm2Q%+ih3u$Oxt=K}c;UFxkjtyfCPt~47Wi)po4Syg?D}Bc@C`R!j zX~v_%(~>e5lX=L)G(e?(uooWI@kdcDxA=!+So(rU96_XsG23v`0bq@jw_O(N*GYj9 zCmJvzbae{a3eLWn_kgDIn;cBkrcBk5y1sT=;+%{RcyJ&B5Q7xE(Ur$Cq4S$Jwj)ow z{H$u*M4x80|F8INLa!i)U!>s}2#^C2gre`qBR-?PP0-EDzF1p_!yE zD_={ylVKyfEWZ2FIlBuq2C)_rEI#Mg4``mAtd%!1Zo>#iD!~;4hau=tUi64?1f4PkP^H> zlKoS02KScX`jJeO>;6MO8i%!)^i(gh(TVp2&E$&Aaaep-^F7tdh3&h9ovalU4vBgQ zT_sFC&Y_@u|B)0H27e-wf$dD(vFZ^An5q;cbK>?xX@YM{3_plbomtBC*rk3Rii49N zJ)Kr^6GfyWje7_kn$WC-5Y~o5W%@r@ARR9AEcKf3Pt^s?EB!dfnqEMy<(zjM$wJbNy4Z$#{dqfou z>oliyleaGDCIVw7M+ODgGsGxtc}h}6^qEghj_i+^x`;uwI2>3TiEcTw$q0Isc#nfy ziu?kdY;MR$rpWROh3mhbpf@tRK-TH%Gb(XWGcC&lGxr%T|H3Q1e-SU?Y;Flp0E}@! zT*n)<5*KPh{C5<1{qER~(vY6s3w1m$rU-t-ARev@|jAB>yd?rQo}~WOioan)4C7D0270iPtt%Z|>q zIM{K#1Ngp&MW}o*)_)9B5EYNC50V+{T=iPQRWcO-;>JK6>+2M!$;)2Pb z^+J57(mYQ649L@rb3aQw;PORRojU#{<@HHdQOb>89HdksPlv#@y&n7Z!2J@1a5OuA5;A4Pl*OgLCWVYWT45 zZwy`_!O`<)ZgS{!)E^O)6gw17MA8B;e@(4Z@B-u>W`SAbLm@5Pr}~G z<6*0v2``{@B%pR|j>!#E#x7Cw4mhVuH)pn0z)A6$FSR-Z*1OphfVsE^gDwoO8#IMs@-rY4 zy_4R_Y|JbgnO;b85G^n8N$W%az6Z4(p*&8ZNFWs^Y z?;upR-oPg?eTvd~NFj~sWEc>D2z;H6E4?mx7k2dF!wxsa(GocyqO(fZU1F}ynmcrr zR^0w_g2svc|&S-tpa3F~%{-4q@Kg0bYvd55z(Xh!~CZnL3)v8#9p#{y+2ODzo3J)CDV2>PNgA_s$$j^ zc*ggK8BKv$Gx$)DpS%-hD)0m3quUx1O>waB;4fWbhnxcTN5-deA1i_^gXNKulDlQO zUh;9BtK>PSoRN0}o9+n%xf~v2NRh>OSniK!;2vEi%A*67D`iS!jR4h+w0@A4+>Y3G zodIyoEcryeRnfT}mr(Yn@1yf?(!UHXesM(2pgGB#*3AdL>YI9FO>MVP8+ zwu7YND<~24HELj6Ms+t#%5cQA3Z*ndtQ!wx-C~u*Pa>PbLZzGvBy{|?EwgmP9}Sn( z)Ad~uPhd3=7_1i4)td_>*>njc)4GqAsC^dsrc6c9%E0hi#ENs_Z;bMIT`Ay^r%>vu z1aklxWyfgIN<3-IXGRDVsJw_sda+Z}ykae2kbotk1t49~omyqa@b8xhg{5vcZP7^%yF{ID zcoRB~ZAnW$ldni_jAAE{uA|(F8cR(V^!D<;Hx}o?gYH?0Al6x@q*-*q*HskmPk zXA)H`#Uq+`R*eRH$A;>j2%c})N;Kxc)_1z}OXn1Rw$NE|nJvTpJK<3mQuhwl6$}9j zNQcZbE0k4EXr?S~c!$RKEO-Um+H#y}taw`CU)h7$L)x3QkU}vGXMl+-UhS)F`}(eIzl(U*Iz`S*6H10j}OkrS}u5+ zBBE&rA_$jNxswrBwWU{P=DvgWtV5*GB3fKwn+Z1b~%U>nAOCAzEvoG6}|KXPt zn~G)#`c!VnlN)p&AiU<4y|(HFjPpATdrdzv&iBbFK=xBnu41Tn@LeS@s`4H8NJiI4 z_K}9DPFjtv6g~NyQcK<{XqDz#4s#VR&J1CQFi7Z#Ab~QkZ>s`S)%#+yV~N~f6!VU< zUwk=(d{)^)_yv8;RdIXK7%#fLlahrK5M#?kF9CEtxiu)GN3Jp$(ZE~KewJMF4hu%) zIME->_a<@~i zfZXVpku#cGa22ix1gDLn#P zGQ0~Ht_6cNCBw`*14x5mNnBdaCO;>@+v?jm(`BePRhgG^@{BN|>CL19om4=+SbBEN z<1;n$luA3pDNA9%8kFT}u@G*(E4buXnk6HKsT|)~@7q)%WTf7xP;GdgYvkt$Z`Tb- z47s5yPuy5;C$$+2YY5OkK*IzKF~)4ZKSk}fJ{uO&50_0gIV#eJOmBdjQ*1M06Q{_l z(g7AlOylbb@1igZo=5%DyP5?S@qEC-MbT^15{jahQ)~YXfCuSTKFFAIZ(pZ* zqOrMEKvV#Gco_F(0GYQlNAFX#u&>uC zSzTD<#NQZg3=->YydW5~(hEu5=byl_c;}X9AWFxa+~Y!i65+0*ndQ zVQk*E+>Pd{r*E@*mM_)648$u{xHfuP5lf#?to@c^(U`5QOTm{r&k*USmDL0J7+E*!&`{tY-go%{UJ?70KjU`-NjGA`1e zT&^&qV`IA-%aVoEJsBwMZhZEW{569FzY=wy$-q;XMB>7x+0?p`}mJH7y1n>kE+~vo}x~3_34#@4VrGRNm z7CR9F*wItEDnOe8_q8{BfOI|h5l57M&*W!0-#PRaqk{;^-U2)ek`0}D3MPP9k#R0D zLU20Hr;{@DX&Kt^ON3u^tZtosH!@`dsTy;rV-Kfp3&PVfNID`)&q$$oGOR9!#|*0@ z)Uo*w3N|E-Ac~M#a%|;^u`DZbOj51yknPd^>+RuJV}zPn$ipWQ7L+PQw@3!X$EDPo z;5pZU0}6f}WhbFBsX6xU(u=_D1sPknC}=oH<_k7P7)Ey~tQc0lL<+3#^Y@#5QW*0) z<>!^g-XK|<(l$0(@mm^V!puRrH36vlY;D9a%)0i@82Vu`v9L>wzq{-X(^LJJ-8j32 z*E$8`KAKr@Tq({MKxaYr`RK6`fwcO16`Cd)Z!1%j{&4r*^?mafR@yL@?9|R|5eJ_1 zhp7Z2KZ?$zb3gzv)jAl@aAN>_^_sh;gk+9D%en_>jb02@Y}KA>uTG;D*#N+@y#M>I z1|t6K?p~eS{EgVSIGCb$EyH7)#g@#b_N2^&quYeczFA^xUms=;H^Ocqb?V+hT}Xxa zn_=j1R#^2)kr_832@Lt(H8&RxT~uQOC#o?F#fv6eR*NuQaq<-hDI9zqicJ`4@Bj=Z zF?X9r{pCokC+UO+^W4gP+nj7dA`s(1*hP5Sk;fawegG=o@abTTs%hXrmv$C57?S;6lFPnz+$aJcwBiO+=7J5($PwH9%CzgjJz^%g=ZyjP2%3LYn5;Ewgrj0#U zNP)oKy)%mj1@`647CB67JQTlkm~El72cL>R(E@0x^*V60W#iS%3^!;fM_h5;nu_wa#@(0aV1z2N59B(vI}rjgs( zp5EZ!t>~TKdOwTI2WI{NU*F~B@UkAL4TJg*wZWeCCQ3sY524`b-Q6gCR2k8|wAn$% ztM%#Zzc|q;?u{zAp_Zn-BjxbD|NRIPiJ?>UvOQYcfY(VeVOLq#(6;(evvAwKa}$}* z>4V3u?bc%+kMKS>>AHxY6E zx6MP)mVc==c1l@KT-Xh#Tsyn4G11siCiw}vn*8CjBZB7DMB zR>92X_4BEncJG^dXr(OL@i$*P)hoUuye@8nVxfN|^yFP(wg2E$cgYz7a+*OrcD!{1 z7Rx6q1F0&;g(o`HaCe1-N6IDfRtv)m5xcM=?J*0cD|-zny)s=4)f&%GjzyCD6ukAT zmxQbf2pbtl_y^fj8;}HniY{y<`Uty2i89fYoxB)F9uD zVkTsOsy5HzMsq5J!&uz?w~Wmk9lc3?tOp8wAvIR^lnMBHW*0&Xj$qcc!K;vPAi3K7Gl~`mqjZzxO9Ze&~ z^o)~(y&g7%Vk8jvqx9X;x65O!;z5-_-;(NtolR_e=UQI^_EV!PG_mIuxRnmLGbw@C1^3-Wnk4vg{!tH8JQn zv|?sHdt-L{)U%x7%j^`t`CM64E>O(YMhF`>X6@Wp4`9aIlDm7BI0Q{}J3|Qwnk{() zcOJB4m5}#yM?aP3)OH@;<$@Z${+RrFHR&;(~GzsWxXI01FweAGq zrU6a8iDl|Nv9-sReD3?Rd@UYCVBt8kWuynp3j&_~EPdJ7f@`(#i9oM@K~nB{SrcQ2 z(7%Ai$eCBoBaU*6i5B1oK6xkQ~4{#Dm@cS28(B%fB?bIp~1n@?`GWU z?qeaGRcQsaO<}t&7-*$zD8u;=mScbug)If!jXy!`9YNT~9C!AehOti`MU=KU8&s++ zZOl}P;tdujV<{*NAkGihkls+R&0LA(6k^WtcFe#yl@)(c>`vqkV9q&Rh(Z2#gx{z< z1o>VNLvM9}KP9o3Xz+LnPrm9w`9Vg|f%a)ONauqDFA&=8 zphXpi{jE=we{A4SnX9%Cb+w~Q=>1^112asEk=#)x=^M}Mk{rO3B45@N6&akIFmjH0 zZI1<_#~o8qWS#$iF>b*Rd6w?~uRURDYw<+gL{^CJ~(lx_`o= z$+C>;5=rB=Mj*qVbE|&R{pTPzmmJE%hSo5*A~>X2?{d*S1lGz5obHOdl&G|NlgbFw zUWy8>=*eO{87qTR7dB%rMV?`~98jpt%DRbtBVd4RO$0EVFFr4-PjxJ7WxwkHMmOo@Pf8{hHkG{^1u;OHh{kAU5 zI0!ug#O~no9Pa9`mpf5Sj81*g0a9^s&1X=n%EoL0h+;jC0%CBG1wsa()E@VBkQV!| zI9$u-mdi9wcSyzA^`>B`&t6Bw5b< zB+>%3)CEzMXZ#X;&Ol+!ijmo$U=rt<4OFGeME-YKuIKdUxiU=g}Ip%xY2@DZ1Zzk?ze`zD@ewQr_`tp64FDGn|JF z(do{`|6U*No9^7)lTrzW18d@v>KE`&uw5#JuIK?xi-Y2Pl9CL)r(G>hTonR?-tj6WrLp1SQc&zgz|!MMZ+2UDlqU3d3{> z7?RhdDx=`NFr8Ucp@EHQhR{dM z&(v0hkaLC(EMSVgVF~QgwBCR$AlM^nrdoIK9d-F*D{p|3cp>51rVsBMzAi@MKr2kB zP#diFvFqP8{Fd;xXD5>JiCI~b{H28u=?AQL6O^yw$93_sQxx02JIEhzq-c64dpJ$oigqa=s-1%x zCRlF;XnJrhIZMo%0ErJV$(;-j0NPR} z9;4Bhm@R30r^&DYB`eM`{DDzXXE%vX!avcaz_q^f=VOaUSDl^<;!*QA*$)h!&)O4z zi2%Rn8G_YWT*JEY3cMJ?poZFB;ze&3LsE3JGc(raqCaZqq@yj=$VIOnmI5ps_DTIo z|FxV$kf@}`4k9Hf?bA=oQZXI;SO%4ymQa5|CRe=aHI3%%qb64T|7OOfABHi*vSG@^ zECBHNAOEaCC}W_uWjym>>|!S?+adk(Kkt1IAH0sljrYAYobAn{dKS~0#ScK^lF`;s zIiBeX0{yu!ft(<0)!Zm;f4;;2l!URwPGKxCubO>L<~@t=4YDzwGo2#d^=8KD=F?=< z4c5E0>?ORR(TRZO9@2)FQ8{1CjU>$XVbjm&n?p(4HmN@kg4F`(_K?l09Xr1w;4hf1 zbC*Wx3dBMaGR&6(q)JP{H+a98VIryr zNoz(t*`_^wy&k?l-$ep(@DBcS4gJ7Vtf`o zxu?|wk0y(tuB-oyu>>%cy`Xlx-l2hm))k@lrgn2IpB396AE#4pQ7c9$N-mvK7Z3x& z23uYiCQ>YUg_QX?H`DE$C=YfOObBVSTUa=#cuwDya#WT zn(cP}@vFSe8H52gwPeMIt*PZ@6{(tX9;|^)XTfH7B-i_eIww&I2Q0Go*|G=CNNLHJ z8^rSPQv=^cDQ;$19f=a&Xh2MbSK40a_oSF?rL+b8_s?8kg5`)XtZEG%F_=zIz zE(rMu2K7du=v?9L684iIRPS5@Tu4i^; zTOvuL*VE!Fh972yZ#FSrC+8iNSJRmCq#Y7nl<4r$8{&A+57a%@TwD$LsQj)No}k#r zN#PHNiC6x)!GxEdFO+GFy<$#NsYW^a#-2*3Z~4FT4=9K_)A2>P)`Jz-#Ba(Ik|A3= zQH34?s1(8K4OU9W#H5Y}3@*9M9F}}0F<~}V(>4`No`a!@3Wk!tFj}jCIZhHF;KT=H zXpSGd2>8)P-f9-sK>aB}0tEbqoBEu=>iq2DP8D?zF0tefWaWVpv`gT{MASb!CD9(- z$AQRh@?Ovj3-0*H^arPp7#ZChgX+;w8rFRly|KM~_nAno=yn_i6?F#2Qy0-ZEU+RA z^Z{7iuy|7(;JQv(F?`WlzWS!3{nrN$;|lx&1VHHxj2AkP(J0g-&OI*b>%zYB_V7Xb${U&t66{b7Y&rW)0mQ4V>gCGWn_2(5*@k_-2vPIk;(!| zi#|mfw_J3HA;zQKQ{a8=nkTan2H*sao^KcxFp??>l}OJd@oS9<@9^JCdECAHgYRZ( z7V$%>Ik$XALtj^poSH`p5Lo;#nyDcb0J#;xvKx^>E5P!k1tjY_??uIsm{70Yl(OtA z^Eb|H&@HNfp}LeP%@$+ZsG|hyZNWcP%U&HKVO06S4GsT})v{F5hu^ew0yq+7Us z=@KD*Rc&tPP7zodI+6@_aXQD$ki_&uSZ#s987*uF2CiJJtd=QvgB*fm9wJ{Zzq5 z9%Zl2a-ye!$mjM_)qkGUEj_p#c>y0Pjg;kjOZ=8pm-6jM83n_cE`e=p3GgogwMz28 zoTbejMub7#y(~4%QKGuzl2*DXdLHffld~b5W#h#G3D>vsCC>ts!{yuoOraWQHB4_` zkIF@Tx7CW0W2AW5XEVZj>c_srQzc}SLhGyYnYm{ebY*fskvck0MQ|}6Z33DZ-k^MK zj;jo5Ww1KSDwp`GJzr%4!Gh?pjc$J8^hG8>QIQk)Jo$>qSP0tT|8FI&cZcB)oR?Tz zxXj=mEh%&#iTZh%ga2(q)SRifs0OheNYHC-V;G~kIk!M?7;Bp^le$*~^^Gcb8J;-+ z?w_-<8^hLIz6ZQ^)~vb`t9u7F%P| z>Z8LA}eYYk4l*yuE59Pv{643$MHOIn7I_#Xaop#t_60Jk=Wow?Ck{NRB364`;nH zdO|Spxo!+34@Ao?j6fBv_*&rV}4g#*Kr|g(rf4Xyn-_%yl$@cbgb8{>RczR~E zfJ0<>;AG*pzhQkMtZuH-&63O{0~xT_4DVQ*)}=BnhU48kRr;fQ@}CZ9c+FHoDvkaB&}?ol#h3P@i#DO1;cYD<4bGIprQkFsyAzw|jLH~n-` z+-dk*4^3Q3Ei=csZe*|H(N&fj%Cti~;)J9Y4>$lT--Vw6QzJIz8eN)O>b5>xMO9Ye zXIh6SCA)cws6{*VGMw4y#I2C6AF%&y8RtMYEDF7b%7HT*;MIG(fI>&bSg@8y#r3~vN-iQTnsAYXq+uuXWU<)kri}^ zY)Hn-rB6aH&zgW{|5dC!jM}%%Dh1>;%cl#M%cdv%aw(Gx%K8+}9L8G`FC!GU zU8r7XW03&;L#kvZx7gP?(nHB(MydiY@D>;ZOZjK0lq*W+Xd4CLD3!?{mhKiq?jk?z zS+Q`lr7m|(c>A$QL%flUj`7jAJ?J{I;p3SB0M!*Kx_VCIx|cvXqqt}-(bj^^uU-9*LDn)f^yL}4#c^@-QuMxJ%pks?>;VZ>!OdKhLVB!{ z7Tj*(ix5jCZO|Hy>h7a#zRo?N0o&E0Y)h=h1b#NOO>h27V+TrZEdT!>ZqNq>DI zc^&h*IW82frJV+hy;{5=>kd~sNlP+s%moZ>$te)ODFgWd%rp%; zC_~sT>xAu1O3oxJ!2N_~i4---(*JNDI&wwH^5JGQao8;Tw(R1h$=l_X=qphbz~15i#P zX4Dva*nwq|8<~-jG!#moWa7Ksi^+xbVWhSqf{c%urIs~DaiUSutDTwqlhKWg`|5uo zm2j4vP9z#M%R&&23Ha-t-UBVdk?1n*Fp6Q17AYr}4Q>N8|Cskph#UrJjSBIa=I^cd zyuFXCKB;WP!-^iF8+d`5^tYZy3*b3);>lnon8v9md@tKi-&gikEHjDz{$xxPiB_gB zP-g7ggc{0pNJlLF;r1o$oie+f!AKg|Ry%fmd`z4{A9~gmL;uUfjDZd12aFsCK45lM zn+a|QOcRAkq)hnys_NZmAP}jZXZH|5J2lbC9&SGz)eblj zS(WEKvt24A4lV%boLjbLaZ!Ts?n){?TptKQ zGHh~Z5Bm|_Kc#N<26T4-D6>U7wiXG!dg%D5g$=>XRXt;&yO`LaN!TaL;*IHg(kd3I z+0BX^W3=rj0%=$=^e>|9;h4_ZRYrvPV7!tFfQpfc57_# z37=-Dpr$a$Q8Ugz0ZDGViN}NUXc4b{5jmgf#MnJL3MSNbam>)7jODZEO6{pYt&;lm zmz;0-@adLj2S)^W_LDT)bofcU_TguPUB@#dkX}17-bkZvx7>w?HEEhe;5+fQxX#SA zNIm_wH0+uvwsO{}3(}gc@WmsSa>9p63wzw|N#`LuBgNd?9fCMIGFxV}W#_)o>jKwG>HNGUaE_7DYCEPvdw&usAsH6H}QYgC8=e!L^EUw_%WDynSPlarkQUuEF~ z+K#wLSLYAWfNrKu=a=I->{a%xrXPkS02y?mfdgU%a)l@F1lw45a0ooS10V4GuVfPn zZ#7-J2$M)Xa&1q144~R#Z}T+}{C+&0;>sc5DR}Z6?^l*RCv4|%lN$vCxWKPtM2?KS zE&~nm_`ldaa%U9!dNPc(3Y|Iw3&eckc{$}G8oisE>^AP~Nu@++l&StK1`CLVGx}Ml zQ;D!?hAel#*|s9I%Xtk%M4xJ9S<-G|#!%aCuwd>WWb{X3g@k4uErgEVhDl(I{X{^M zkn>G}8cw4%Q3(%u#{y8-GOE77IZZd!Ly+5j@J?`RA6k!aro~7%;V{f2brcqa`G{Il zhJRW^ym&r|b0tLsn4?;4a^Em*Vus%cfD>OYdRK&J^B(1&08F$lhQ7qR>&c7Jx~UNv zSnTispi#o87!MB|q1_L9zPfEDD+1=D!XzBFh>mp8MrscRF695NiVGEYuc#Uhx)Brq zJnHUM224JCGz%b#nmyGv%?M1{yS|$r>~8cK3J&~sdUcgsd$|DNrh|HZ>WkfR(&Ip} z#W_TMPtIwVS0r&I4yg|%sCv5;2Wm1mL>)C?k%LDk&PuUCFc%A~P-%*S5>#(Me7MM0 zCoHO32MswgnF$5d+)7u&_|Ei@ZVtqr;IJj|t@PvBy6^v?pjJ8~-q{mIW3{En{HsW= z!#C&|XbXB)=SYO4B`g~4OSL1Rl+m)4`+Yhp3SIO8tTh&b2j)4I+~YfyRBjr;){RD<9;Kt#+0BqzW2(U2${!& zT`p;$dz>(+PkD@_d1mT1d*8#0&mt@akz;StZ++WCr;^LGBHm6%O?qNY2>iZS43i-# z)jPC10%H#tEL}O#ok{%Xnl==jk$XKTU7(p*NHwix$~M%Vb- z$%X^!LU&L=kfh_W1=Sd&;gKn)y!#r^dNSIZH_rahRELs<->dr$9*|lsd6kyo08*^A ztxz4!Ycp^UoU{nSr4n3zLzBccl^cjFpzK%WocWvNAxR1L=f8z6XJ0YY74RI$b7kqb8=v1b)-;fsSZ742^|9_w)k+Bwj5xaa>F5g zv#U9MTsiT^v&Fxkz;a^+r3?p52#Yk&3?$Q55ybPdgVVMC)W8VOEIe}YInmWaxm)&S zyVP|kcWQArWtz0^(G48!yks!s1UDed7($ggPmrCq&5ic1U5$onPtaVf=#-R+W7`!* zSXXvqelE5qWqT27uz<_8a%C8#blb6vF?&lm`noYkR6h^0(I`FryVFhrKPqTv$ln(I z!*J1jrs^^R<9#fuNy-&*aT()EC>!XZjIK-u6XS0hqcnu%ZITP-^*S0pr=+-b6!-S8 z`^?m*hcNK?o-6PF4G{(vMll244gFN^0jm1w_15tm?Z6+`U))_iq7z5SOByhioue+} z^B1B1cjl(tH3$O7I@1wM@-MY&)W$sewTq+Xd1Fdas17LBA0S_OHGl`25Py$%2;-w} z?Zgi(+owqELcn9uy|5oEsyi*w&pb6#zM<(g{43p+z}#fx|NHuQ*K;;>95vivU}VkA zw$h%lkGpO;y%X>k;>{7^OFJvBLACi@1;93m#fjCIUJ&2yKC@IrjlsIaf*+DkBl{F2 zI39}t*+Hw%gynQJK3KWY?j)m^=jS1R`!w0*F6U**bg*APh29hV2Zg|>O@)RkX24+n zSA9HD{R{=9XU3qMf2zQZ_<2kZ5T=h}CwMY-uCIS?hh}m2a}Gjy*>99V`>-eUkb%c)$smxgQ_J*0CbQQy-CG+gA!<9lidJYjphtNy zZt^@`LOg0LBxy^LGV&uWX9d>W!gN+tMMv)5h3=FLQpD3Uc6#%m@a_k8D)jvOO9`Cp95;g48ZJsroyj8189Qm5HR@s-0Vfj*HN7CZoY8`n) zgXD-iu$hegvV1=H;awQpq>2|sS1F7c;)slzyj3xt+-Jr(abE!-OP?Egmf&hdg<1E#PN!ao1*74$8&R03d~DzX2Hdb*S$>%tJ_%CT zob_6)JT42c5YHlM4~#+j26O!>|7bp`sheca4nj)&PzV?$_Uvm;VK4DExkgOPJf0NF z3@O+&wy^@C$vdLWgeDmVa~uR32d~J_b_efcPM4E!Er9SRFywAU>B@v9jU%y>EJax$ zn)$+9UbFBf@7WZ5T8VWBae^MUvv+^r z5}6lNdFybo{D0mo;JNeUY$bcuYq$t^nmMq{kI`(56`IRYpzp~8X<~odt4Sxgk2*<> z{HkXaK?V?0|TX$>-I4sLh68Ak-hu)8BC2+q0HHpZ?KvcIk)Mzb3X3U~9!mA&A;zLK#uBXW3V{#oZEKpy&tP3o$LMy)nXHI^3a0yo3p9qbjyKT2usVR~P8%;x`WLwYPiDUQjuO7a5-nkgNy!ByL(R`O zcSIW0UmpzwQcMri@=N}ub&Dl@c?+$B*!Q1=777V1M}dh7OzN9%1i5j>9*&p&%V4j! zr&GRcmY4fzfQar#LgI^X!t+>*@V9rrndfCnV4=CSTTzAd!yR4X9>A1LamnEpb1ji#H{ZT&JZl@Gz6;0*DF&rWCgUx<*dy zSi*)z!lYx%U!a{nMgdBR(~q3p6@EZ_+ESqyxjyPd=r}i;Azp1v%*Tjg>dU>P$u2G{ zsc_fXfeUPJ0!k4fwpdmU$*^a1Ax{xa_TG>WmHbYN55AgsU|tv%^8QJIwq~o6b+6kQ z#<~ewUy0(QdtKTF`-T$tz1^*Amr;Qu1&^1IGX9z7AvMHRwLloxdvuzWBtwNTQ?%5V z0dRqIIOSB&@=|ep!}I0F!Ms9z#n$g5x9k7H3T8ZhDFNiLvrin)>`3G&0v*yYUA}IY zaaYnSfL^^!<&njU7V}fW4m1;SpL39W@x~yT8P8=)_oWQdF1avNlF#W|gIN*}$`2tZ ztNr)=$5uJHJtQ+0D|wS~=75t_1is(Ui8T0^v=Pn;tvsOmtqu#=fOZ#doYOA}#-S)m ze9R>59Vc{>UZMpoRViri*_gxEb-UD!r*Sj31@6%j=!A{n7_{Anns|BLaq1XzYCte8 z-cZ!R_?Jbi^Sz93(DOskn}^mUyF5MoLG~NGSinrhy`(djugvIyZ!weI>k18ir%2c0 zX$x|q6WD(Ndx%pramTVOzpEh6o>jGqs9+E`>75l2=xJNAGLeMWOy>pW-e7AaSK+?< zCkI0lRp|M%$4D35b3;l8YXJH!CrN|0P|vGK^z7dV-WllOB}iLJn9vUc&?j=56=b+q z=aJLhgN(z5Jg~ppX%GKeT4~`Uq_CA&I)LUVT@f>nGlwBfMGIY+gw+k`5Hp&=C>TJj zf|av2{CuMWrkg1%eJ`lt?Xnb43rL>MqOC>;3h(1U!PN}WAMz~`qVqxEpokXIeMSruu(_bcfXS@MkZ7^}(!3L_8x5 zV{adkA+`bUdjHvGo<1ribE=#jry5`S>@I2cZY-cHH!5m3+U{b$h2?wkjmY7)mxk7v zp9N(|4<)4XObUQbUal^4)bdM(NZyyhpKfdMLh(YdVzw#mdIcZU4R$=J`%H95VU zz%AOP6?aupM>oYQ^v1{X>hq#lIEy<(nQ;%3O^rOF(C#0j-w5tLbaL82z|qwM5#8fK zmt(Vnj51e{x8+dGu#}~!z1HmPXSz4IKzwo95JQEB zRG)hR9D=7zNAf=`^E|1m?q@_n0*_Uft~pOk=Jz_L$b(KGn_M*6xso=KAAr{ zGoZ}&G9%h&hUtcqT97!KR! zCdGTh2rg^LWAU1o{UHEJXbtjO({>9Zab6%@3*psV=0>N=2j(fkW2*+6RCQi{h{d7@NuMs zwA`So$jBbRAY5?(BRw$hu=K&ghhcNNuKV&uZc%_Ar-5CI-r&}H4sbBGH+p=)aQP$q zu;TqwY3BRQ*4a_c+frDf-=z@G9Y?bkFuZ~^e=*c_E`@Hzgb5<}@Fy8kdi!h`^h zy9r3pXvoH|?p4div#C=0QN*p^{DANcgv?r_O|}bMoP_B|z`AnZKIX5Hsm*E;tv2ue z*CCNR89PjzZX;nhNpC4HzzaE+QO=aIHy-LxosU51fb7by`Z_f#8gV0`;`LvpRxoJ;IF1qx60 z|31V>Ffpt7>JX!@gc2>;F9h6fv%}A822tM;nO0Q{*&8RgkkULZ&;gAy&i+z~9TN)x zF5yBuZ&*R!Qkq|ryO-5z>UBw6$F7~RHk_OY{#EmaOH|$W*z8F+kzLz1qWX6>DCwT4 z$*(woM3Dv3yE)+frr1bkx!sWdD-z3U(q<1lBxc_gwoAR68(p(U=XQHnecq4Bu@QVr zS)hHl~uPDo7T!mUHd%*G7sH>6E+{&IPzlu+_#M$R+-=m^7^g z#~>gPiN_&u;JOAoKALRB4lj)s^!%$-f%oiibvjz2n1xKpbE}L7RP^w0#Tm4|jAQ=tt;G$JjE83h8N0eWLfDb#2wkHBe z;t|C=`W?h)196@$iitW_X}dy?12cuYOTs5h!qXD?7sU?F+!-q zYtovr`F4lfMb~e|0?2+zlX65GDW)es9Nj)hI&}V&v$T>s88P<%+M0e^Aw^pN5%&jvZ&5qNV#k_deL85TJ zUyI+j9e%3Q&db-pRFWBjH@zyUY^-ECWvcvKUbKgy1}m;CckDGy0$@=Aw-ZD%qxGPK zc*jz*CZ$nxflhQ8Lp8c7o>lFSQ<{Y z>s9=mAVycjBK{4yl?w$w40}-L=Qyv$5c41-qyYI!dIEjE2a6t(q(QHpv=biRGJbX4 z%4W{8rZ$W%CNv)+1j0R%jYc?tUA;r5i4qa~)IjkjiX49vG|L##|L|;YA&i*u){If> zMf89TBPS@7B#&9;L;0BKLMdLtE@pvjP}GI}c^@W7?gikLoO19CqlBz)mxnAT^RTY0 zEswn3a}&K!qAMB%X%P247y?VsI8q;~x)IgMr(Frjad{eENC6>;w4rKuX{QE;kM0ie zWZiSya1tb9KJ=$VAyJd>^_cV*;lmu4=%+vu3Qjq>P8ej920!jL+`dY{`XyEv6c>>9 z`S%Z@!QAu6(GpnjyYBS}?EmPsslGZQd0d^sP$>QKy+OZM3B%GVOeJK=W?BUdFitEK zF4|2yyzD4tKQMQx6=gDcE}j1pLMM(p-_dY0^*Iq(@oW%ySw+xv;MRA0#tA*qEG9RUOap_4T)aX z1~QLqb|sr6^g)kw5kmT)!L%t&-&+W?_WNR&(43JnT_D7OZbyRLxJxx)zjuHeLfdy( zUlI4KD%^H^!vs`Uuv{1LeR(l@GE9_|Ca3d%dPY_BB{|YdDH&Fh(FYl`_ua5!Uyz;1 zsr@Dsd}D=!==K17uU|5DiOa`iBcG@%GU1#^_K_dzsEkS{6Ti>Kl+&u(2Au<#t2x+{ z$T_46RQlkW#ypHx*A~ho8eU#F_+2v|_V!ISF~IcZqYl3Iu)k`+j_k)RZ~`xJ}8 zU|s|L1b>3begiMJh^J^3W^WLWYcdK-U-egLM3~2{2P(4KWn0Y>*T1HY<&iAvH~&$L z1N>GkRJffECmMw2JA$uX^o_3zqW-w7jC*Cy$!#%3Mmwns(}jhty}s(bI=@q6;?4D` zPethkL#mFZ4wamk+d1S##GmB7NyuSvVmYo$oqB%<^J6*9)<6JUg+TPJ=xAa&T$Q;7n|@Fi2_O z9B+NF;!H?*kjcdvH6a#JPk)zHJhEh!(uJ~DC9vg=q0v;$z5u%cI>TzHs!x}`HXCEG zuoRCkf6e)A}2&` z8B3D}%ARySSX4FFFl4-WLtMfCvPeZd{A6OX^HXsq_Tg7JlPn03fxahsnA9jMA9d@ri_I3A!7RQz7HTKa9f*l#tchnZ9f2dSGfjD1^23&^?WyZ$$$n(TFj=1b z9H)iV^%*~4OKgY=s%B?&vtmh7(|hA{oFbboC8Gbt;wF-d$fIZ2gEnA*A>q7Z7Vi?y z4@)Sa!x1^sei1vVY{Msn)7-2N`f$&k8)H$sW;Yi#k1@2D{qe`PYL!}X`?eHbxj2tH zLj%*Fe2ax(zkO^*`UM#~50Hl974@WL6L3xzd_JBX6OG>bbrg24{x1=rai~NU5R4Z`lby?yL z`c$)?Hg->^F^4xkSequK!=h$yVDD~uj4WBwM^lWA$+>(`B;Cb9RffF;#7OeF1V<~b4x9eiNk`>J)G9@T&5+$9>>F=@3!I-kvYEV!Ch3vp@c%ey1kCf^Af(^H!- zOh5L`Sk&2kgwD@>ujR2q4!v9{F2)M8 z!0p4hB-P9pAcq_2Ug>xex5R)#B3u@JE*~{o7;+<;kh~=q*>%Y50Ob`A(tVu*Ef$Gs zMaYq3*F}m2pmP7MV}V$r!(li<{FFgFMh2{~fio-t>AF%vQp=4Y!=B>Fzf z-bZb*hE*KUnAwAEMg7F7ERc8sA*5D0NV8SnTU(5W^k5^QOrt^6rEmKZKi)~{qXHz9 zfjp@>@ib`C06ld*O#FD{*&|$tPNQ71s6_c{YHK`MHuwd%x9P7KJ8#ry`n|1~&BW)Rqpj@mR1YGBPguu@G-80&zu(^*5K2qAS# z&waYeh?@F6j(}WbaUdC)miv?z9aXme?Bt#d%{7H>WavKar2@1TR}j9Jj~hXqU+2fCwJfly2U2wBoG>DpkO$`AMAxtrY4f@U;1`BUCV^nQ2g3Yz+8o6z*u+e0i;(wbK4+%(AG#HW~ z>a-2236QExv#=z9SQhqY-h$FRmNPkqa9Ac<3>f}uJCp!rtiz-qCK%^3TN=Lj65w{R zMOKQodxko&YEVC#vbsdW7LmIe-Le&aOa?1enk`2wp-d#?3Q6iFi1W49*i3++*81Xs zC0o;yK?`bhTICiz2o(5bm0-Edr<9WF{XVAuhKhclOU4C8d6A?3-*wBP-FVK#R2mC* zKo#1N$?25>WoquvW_IyH4zlb6yu0i%$nOkvVmO{Ggb1hWtEchKshL zE(f`VD5=QV+z^t3zk*O+7`s=QHCzWAB6q6Hv!?yzD+xV!PuUGZtfR8)4HUHltKLq_ zG#qTv|D9s6{`)3260KOZMc5BMt`zK%sv8at!loeASTe8=oNITnc3ecTe0ulw#3 zlPPwz$ci)t$v_-F1#Q#jKsrwbwtZML6ILKQYC%Vdwo;4V4P%f=7Ord^Ln_ekO!--L ztms@7&_khdY5ALn`I3mT;`7+tss--|5j*hchB5DO=+WKH_H_=*!XDrp6XYbYp&boa zAbiTE`BVgzQn>3l3M!r_w7a})iMg9Bc zV4-vqVm^Q-U)FvSae@_d+A<$~<{J8Il@VBezYFIXL|_MkAv_Amwyi>%`(>A~lAaC| z-G2W@rU`ve<^pC^?_|yqdXrI|C|1Tf*I#cid8g|k{M0jNzbQALFwAq>TaqQHJ>9r1-Qj~vk;-s7bmXL0a z)O<%1lYA1S)P9rrNH-XqiV&?0KdQg*K&}^ zU;LE26(dh9=!`Fnc*2B(^Qkf$9tBv;rV2ZX%;*H z?cnm9Ed*(N0AGD!ODKY-tmrac_X0@=MQ&4Z{gIKBQT7&qm?uAR`eT6yQ5zH!U-jgG zEWL;l#*aiW9!O&H6ySJz+spd%T)fC_Isw;MOX%`V8p&4lLq-%QeLX zE?p7+LlG>MdrKVT(|ui}GA@6!=n25V5ksyKe!_@G_mn8gE0a~us+Bpf@B=B4&OS_> zwh|pAolxumVjCB^!`AISRg!t1*susv&=JrM8XZDFb?lGicLOckrd4Z?et+v373w=X z#jV2y3CdK9f8OOq*w#eWMhVAKWu5thMS0XvCAax7;RkxiD2m&yuAyAWZzi{=I~CIm zBk9&Bvp~R)^#f+e>34g<0tZjrnwe(t>Guj>$3 zMnhPkCHtmLiZ?aUMe*mEtQhH~xPY0~3CU!1JEQ(#Nd%Rh{0OEJTx66sTk7MC1|v9^ z0&AYese+eVmogFNIn;|%Nz<=1T(YiC~FWzY0_a7ueBQm*}mVxlm- zhLj~NBTL#w2YGhD#QA&DcBOMK{C1;^{sOE)VgI^GAj_c z$D?LsVE@nsN}S;oy)zjSXQYrr@4=U-uRKwpxTgy`;{$sFOD%RnVYSd$%w9R6p;GdV zn#{-s3%<1@tuIWXkf$)Zw-(1bE>7V(@ITv81F-*QwSAu!I(7Dqcv6%yXf4MjEr02X zi??4)>Wvl5;$tBFFy`$O2O57uA3DO@xv$d0ceW#h#&p~L9aAf35p1r!0_=6)PB^JU`2RQ zzG)*Z#dX0)+i#3)&ytfb7#OpF=Unst;M5=x*%QKP*D(Yy>{nQm$iTf&V}(6J>!f{0 zk@sm~btFviRk3Zyts|A5jbuW>z#q3Jo}-&vm)#{bmV4LV$D_H(3THhRQud=j@1L}W zVZ4WOKSRROiSb!?j5shdOSRVOs(Q9q;-J*1|5|JJDW22X^ktuf;O9OW4#tWZ#c}jH z$jaR21ul@DZyM4rg<($aWL7=q6;cVqJe^4!7P{h&W-%{b!R*z=)c_R66c3ZEc$~Fw zGYuqmz!<7$rfz5vzaCzr|uztYr^M23h-%w=L#OeGuGcq!6X z1A(CN)xGXCG!;@&o#1rGc|Sav<@{{@#B&~X+E;R+#}Ya*MYeO)i4C! zXC;pY6rwC*o_Si`qOpmJnkfPYzpP^aSqdgYxf!3SQ!YAL@|l>Ma# z7q|u!NbeTYP^@2sza}kUyf>^06{2)XK1^N@rr5p0@==19HBVMb6iTMnUsAh7`(Kz` zP@D2}L%Y@dkp?Vx?wyftq?Rge;bHa5{GRLUks1?6Kw)uLUky`wdRn;+9WLj=!AzfJ zG%9-Ur6zZ!(CKP-^W`^+JM+Kmzyi|AMb>21 zv+Dk52aJ5wTHx^m(~uGyABwj_Lb7j_7Zub5kj^3Y5CEyFq@Qa45(=4H0=pP6@VBYE z9wCzfdudSexOYU1zWVag=4W<*AGa_!7!#6P{G=4wBxcv|ZKy9hPIyO~#tobY$VN$s zK`EPO@LcF;*7WuhVrpktBLihVI65(>aFuWicZ-v@Ta1|d%5eV?zsO`-f-^W)h{4VL!Om(rb>g1XO=;*4HFFToq zv@yYqi{LOH&$|BBcDtmLYG;~3X1FVJqCVe(vUDChNqZ!Ab1t8%> z-L`RwXi!RirGLK8D#+tW(*^RlvfDSyEAZZ!vKrbCnFE_sJ*eCVIsOG|7(~R|HhsgRQP~e2uaqCRlCKj1f0l{Ya^x9874TMJo=nW z=o#$$!4dGVUec(kpS&5A(2~Y6`ywco&kUP&&lcXtQRwxd;Bz94W!Mg zGiy-A@l`9eWy0`$vzq)A72d=KsMLkz?~fbP*G?cD6m!}@I^jxroivQ;Cbs3^{~t#$ z-^T%8=4;ksI#}Kpq10$Uzq{HrluKxIN1MGksXEiQ2k|^ia(i0N3g?;5dCAr`C%%99wi7A4VY z|0K>56(rdMxx12vMNYd^POPqWk$jYP>82{JszbC5k zoLZDY2Ez4l#?RvosDvFcyuNiosL@NrCREnblf~VT@lBELKCB6erwdv5=9X%c;ifYH zvS~@m4$vF1>ml6$@lzaH^Dpxdwdg1jgTOkXW6CE?5VWc@Utn zvm;B!&#Y3s@{7l)Fz{{_b(B^tP=$Y8O%!e)OG_2mqCYuqkr%jEbR&8Nk@ZzR>GB-q zxpZMJr0saSJ~D8pcI^i6xBr7)-2b>O3VO^~zc4mgE-)Z2-3rR1p8Od>veS;E?qr7Q zZoUY*yfh;zAs_n<{1nLhlsZ8r{CuMS}=b(Xq0SiT&T1eIw zQQEbR{wUN%x$8dpwV-6MED0WGW@*HfUO?X2FyGvxQBw)dmRxcFku2}~Vu;CLrv>zYQ(rid9v1^9WZ0OFOt7)Vf zs8Ti9{MDgaR0?s0WH%S6SeUT3eUim-9np7zaf7*X#oK3M>POW5%I?^w`T%V;(IrL_ zxbB@6jl;VtT_EVuQU1V*&$PV}#Ix&;xgB>LWI?b?J*T?XK2X&rjH;ii1qW69KjUQ_ zXf#d*0}fnoOrFLZZ=I8>L+VR@-+mqr5ShdqH^kv%a~c z*#opKUN;IH2x72g5f<t2qn0NPuuPIN^^sXcA>a6L;5;A1-wa66YD=Z9|NNy$SABwwLtO zT{BQ0y$pUtTjJyk1ubBG!~dvYXO748R111uBoB>zg99_`-BnHnXw-&^Ux9v*sc@0E z?~`oKY+K8+k{};TRA-0+`E;Ldu%h^q2t1Y3jq0_te%6iemSlf|iJWstwK_KU_04ycFWyKZMPj? z5CK24KJgvw{rD&-9u!pSh;go&M6dUxVN;%C(UV|KfO-H@KrbEMB51y)Cz5kwcJsje z34%-7$)2O;rs|>d;Sf zNoS}z3_fqTAA$=Q%siGfm8(N@xdnS6MGlUeLoehH?0J=Jy9@9jFpq^>CUTq+zuC7x zjN7_H>xT3Gu+(+$p8P9PrYU%dx+6m>fFS$Qaedoqccg<5zfpiR7x8dg8kM{%MoDUw zU@nAdmypBCiy)0M_zQUXcpH;dsYG$fvgZ%#WYLT#5=IG0AQ^Zsj9kC4U z=u(JMPWgaARzNF&3&ff;^deUkqICP&v=O7N7-d+~^WeD6*dDK7Y)cIyoi3@0Q!fkp zS@%kFR<~3dp7`YHn$i^{)fcUyH~6+_oz5QNgs)`~ z=(sMS;D>cRy~5ynGwY(KDs#2R%arHFJ~ol5E(~5x3KbhoVN4yrwq8%O;IRIBYJu!Q`N?6 za{%FzdAZ{dh~+zUW6{k;{qwlq2QFydKiN>0o+0bv>W2oK)nR+>DX2_n6|?73tTZTK z9q404mjY6sX-siv`B{qG$}w=9c*?Y2;sD5u^8M**_>05=B*6sI4%>v>H|1`;C<}*s z#Ej@tGX!IoI>xMPV+U?-l!&(pEuM{>M%XC)k_=<#c5S9>qjAn4UTl|0FBo|+b^s@z zQ+`e+|4R&8+hzqVV#W0JVFJq|x|XWI21IQwUigNR<-?QsAAf9DWOGFLXKCeWB!ka$ zGz8_m(~Tdx-rxM=-*QNvG5poxmUF&FQnFc`Ujqr-C=y-Z-qt0NV6u3HcVHOwAx1bS zuI}BzMV@LPG=p2))*-sh6=%R&*F%8g`Z>?>nq+$z%i%0KHJh71Yy@51SDmj`rAyzr zh22nUZTZFw&v=H=2SN|@^naLn%z^IG=A(>Uig`;;g~F2Uk;2V8)sAoTevQ=>jk;nb zvt{fA?tO#FZE1p5EjLR=upwhHvR$M-68ltH&Uenw7bcq@)sFuZJ_F5b3>n5oL62i= zS1@x`z^?N1C@TB)o3cL!n=5Z3B2V(Jk2x{;Gz}L%6JvPhvPEAeoD1s|2-nY9+P|w7 zA3;e7KE%d`efho{620O$I#^Uy%t?k8W8S8?Y1bZgVgVk~8E>fVf7q#_P0s;K^_L%k zYgJ0tyE3F-9t|C)+ex?)o(~?%ux8ctpySDt+*`uYxSR?NwnrA{%Sg|0x&y}p7w@d) zc~#K_e^xfwXb^(JOH1xPKC4<*`8WP&mSj1d8^wI>uiaQ<%LR>K5k7vq#tbHJ6p6)b zl~wFQR%vY;9htnNla&)~^NVsh9{EMOxI>|dJ;{iY01#MN@f8eWmvKXPg4-uB`MG8q5h;$>Y(I9`KN{5jAh92A*2YuKA5>_BNqn@a3_j7Z`uM=7{do=J6nt10p3naq=E1M zD)lnhg6c)pE-h?6FKWQu1YT4u1nWi`%%&t~vcy+jDFe=Wrz+4ceKI%>?@X?j9dJfP zEyMaguNVv5#@-@n)eRLan?^R;jU}(qvN$V%_l-$GeiATd2$0K;AM={1Upk-PoVsq0^WSqBSZcP!No$*V~T*6s)$J@bF`m;1Ki?AEIzQKqu-ozO#m z-Sm^*dmB+c>wvSPvfq}rY-;FUQ7Lwzfw8)W( zmhm{Wy%x+6;^rOje1<~h59h&G#p`+pYNL{2IfdCv!#AQfSz<(7p{ZxG-1oyuvg57ar{JDawJ)SN= zc|?^17(vyGr$|@?RHI+GQwl)rOk}_kSta!xXx_sjQPsM+zIqD6A~S$Fg=`O5TFO^4 zT|x5|goCN_Ot>S-g*LK&=%xAD0Da0CK|bidvg z^esXYc1hS3&;=~GidKf`xjU7M2$Rq|c=W77?tr)vU>cGA%ej#h41_ps35|&5K8nyd zDN+w$TnC$hoR%g}O&VBm#EBgLs)2ulTVpZmY{vs*uYcza3NWC<9uRnTUg#h0H5eeZ zZqbdX(~Jc9)vy)Rlft0RGfHKY=vd&I2?-^0LqoMwr%mYH^=sCW$NZhsmHDf#B@@pKuqN@mq2^b)C(H zbjRDwB#NqkR5_#FX2T0HAQ8jt)!tN|&&^Q}{$=opyrq4>?$r18HEpS7%{ntJP(c}F z+TgF2ay6Xq9f0wsIRlpNPHOahc>UQuIp+^K%Bu~Z{+qNC!U8Ty4!%bO?i z;&ncYveX|}JC?6{gF0?O7AtYC0SGJbIyV|od_|nXDshr#YxDixHOw{(^k^L3i$pk} zDR}VON=;CExy{=3`Rq>-s0h^~E|hsS(I5QM8#~504f~j~fVdnUqJkr5@j(#>7z#Cm zZWM8q|GNk(P<~&22cqcxQ=O6U?6-lea1le04fMGX1wzUBiUYF5u<^|#BBWqXSQNB- zc}17ElEozLcy}FK#7}s{*za>t>dZj<>0OG;vGh0jKt7kis}W|z-o;Ef({W$ZESq*| zpk$G)1f0$_J6Jn|KO-B3xVRX`h_7)Ze+MUb{kESKe$H9kt?9cDLotbliOBh|R@obK zg0JB5@@y^rbyhcoxFrQ~%0Jl7{Rx9tCF+~E4t60Vn=F&g?P%#hyuO<3`2@{l!ieP- z0Zn#!D~Zbo(01wFNfR(CRh}NXSk76JHoMAVHIP^vKRWwe6 z0yMnQ01<3nL!WFlM-Mo>y>5&*0;#XBr8usyOE(HjMkZHO8Pmdk_QAXS^#ML8yofmF zs26})gbXalW4sd!l_2jRx8tMgs|`Rw?y|b0`i1ht|0AO>rT)(iru!Q?t*>X*5*~5b z)3G+90ku%`8xNchFv?>(dS-+x-07OC9NK@sfRH?|Q8NyU-QAE^1ZEl1F zWeJi*&!^rLxKL)u@aV>R=v`z&vw4!J*ekboHGt-jKQS7nkxh z3w2+!K%j0XfFS6q%TWY&?CyN6VH%sT6qyr6?wS2lZRv(f+`s-HGO)-~4f11*i$Vq| z8b#Ca*{+_ZjVU?iB9%Pi%ea{=?_7GZQb(gAHio&ZlGHdT7M5`?Ay<OZ zsMCqkOtp&zP47ThsSsqjo)LEpo{KJhH;0#z1iDZ(>6&aL8mHeWw$g!7L9p;L_N%iz@ zgCTwiVl3(w)E^uEHwK0ftSdXZzD(an{5J+^8o@A@$NJG%Stvn zc^tQnG&A}R3J)I@>nu3|+9sqN4D02z^4VEE0RJvGV2&D7Yq`K@bMuDe;_N5Hv@#*2 zr5P=aHv*em+Pn=5;pnto5$2U4W<04GWC79!v6P9<-HDM$V2vAS9b}ER<$D79H_Efl zoB%y&ZxksuFqNrAGe0d?R&D%*$}Ix{uk$nM{@Bjv;kz;2Oeoh(W92x9yGMhjPOGEJ z%`O9P{5!dP;XpT6$HHWI4NnR!m9QY>^?OEWaE+Q%Gz1y4x5D8NZ(D04uHeA<;tH7m zpmsDJ4IX)b^-&B{rQ{>RsIkCHZTh}CMEo`tx=w{<^_#N3&-|pBNnjfuP8WX3y#7xy zN=%WaxB1u+=w!yG%%@%oF=g_s3?~8&-k|F&er#C0{!<#L3JeC;Ba^Y@nsia8xpbFT z3!^0?N`uE24XcuS3kj%rjJE$JUL1*db{%qQPDFDG)c~d@k0ZY49H0+1o%o-(*3Y@ryk~`#hTB4DL72sMnxylUv`H~ z%A|6}Jc0L`K14P*ymE)3ZTtdPbcJivd~4#q(RlV-c4Vsps~~1WqcuO|0u&+By@h0A zbTHRNIiE_=pE$_HP1?a&#)S4wmcO$@bY{1n$TGA z{H%SYpURmv#5BVt%>|Bdzq0Ycv)K8htc3Ci72^RAbUlPsN*G`Ln3FFuQWqr&gTgNs z?Q$a|ve@_<*N`9KGP1CbfH@|oQ}ZYvsK-*2Ug?BVwCf`R8epPwA4DYwb58%CXO)Ob zoQT8uNdtT9hGe|$49qM?kFIqfmb8Zt%^<8=$g(ZSVLG-eC@Q!X(MDhR!N<{*S)HlNW@Ot0|Em zm#f3TEz}=0ES3XO=Mpl(OO#7T*Q;h`pm$5KNA&9ZbQ*Z|vFFROuJuS~i)`G1<-$(fkpWB=MSo65?BtA3gpOgzJs4cU zoJfZ!aD>y5KgwF1VD%hh%_>nHmTvGsVBnS_-SGty>8}awf2+==A*js*qHEBOa{c-e z#y=`8SVzW>?Xj~?#7MHR>@VXYTde!;q3xdZ5D2i}_VP=rgxi!j z^OC1-o0}%MLub2XmW_Xt4m+01pyR)C94lHO8`%Cr<%|B)##e!$3a$m|Z7F;+Qri;g z){+ko(lt?VKn0v?XnBANkF3qNx%I*F-qr;3t|rbu#T9E(d+q>{1rc8b5t@MWsBxsG z4qGqa(H$`RFkZnLHe97s4lR$Mt2?o%mb~CZRuD=(fhaL*<%)q_y#-*WPejhO^(^Ik z;RO29s)kAxJ?y0)6NH!zD?4q2tTE!I+6h=u?JG|yE6!VeE?&Q0Kx`n;U_q42tiXx6 zzhTd?AyoLGF8C)AoqO~|C>r0*dk#ZC5D`Y1b&aJgp_OXN|EpZtW*#s{L=H3LOi!bc z;JiMt9gm+64qhyvQ~GZR5h+D{z6NJq-TeCj-eF87=@U!kR=KQi3r))h@{jaMpDNQL z3MSLqRpHVhl$|iF5(c+Gu8Of&ebsSsI$&x|2eD$ajwu_%KXH(C=8M~I93J%R*t41x zd3&WawPK+4c7++@I(;7K>+}FU`8#m(`2tiMYkzfA7EU@6gXRt0(VPEaariTz01N=+ zvt1iK3bQ)04TOpI55>&vLJMRB<|1rfJP&{Y&b1p~evqKKGyDk9t6I=Z5eQMNEJs}R zr~Ceq@Vx>1GCAZ|XexJqBoets1C&{dQ6Ee*z0}o*SthrGk`Kvs`V5_Mc6sCHk#lh?i6*~jWcmMkE@ZXA zlctFlD}M~B(`4af3i%o*PBgu>K-EO;4&CfTAgzgYM4Jamd+xAoqR?a|L|a{I-q$Pn zrT~GxVw?z0lE;W9+G`(g@zASagi#W3a2}|$e}ZO6fsfBb8cOHiC1W|cQ`0A)X#R130NGq8hTa<{iE!VlgW7pnEyRt znb9L*P*U8r=o)V%-aZBhBknLEgG;JL-|CO9z9E^_krnnhG`nWVo}nS*G{>@3!f@lN z&K%uNCf$K%!Y=m4fSHoXN6yq}h-iZ1D#j8QH#+N0J?{r~H>eR;IRFK3s#;^3H0cfW zoO&T;?~omP16?sYPK{LS+vr%h7R18|Eb@O>_w#C&V~S9Jxa{0VQ;r9R zQl!0u!s#b#(RS7!T9jX6?Ty3sF!ch&J=HVs3=m|8ygRmtHT#tf%>TvgvoIthjCxwe z%Mn&xq4-7Zwm4tG5l>?t-q$Sg_qHi8FclCCun%IAwVu1G_lZ zO=->*=OrzS=r}y5)y)fYlejsB@b5ZLUzL$8I;%gb$*n|8&!)n^D_T{3Yp=f2_-D`+ z%)rE6E`PRClmJmY`H|fyiN2)m^?F&Bn!V$&*S<5;+A7`&>E#|z@NgYPcDvuI0)ZBb@~wZ3XP+?A+5CP^;BMox3wt! zSEZP4=9&RGo^xVSW}wDW~6_APtyoQ=z5Ue6&9?#(>`T|0)k_`ZhvcH z8c_C~Xa#J3B~FAl{v@*hCDblD@JpT|h!eDB3tbYMEiHI^|q*wXGjPH@NSEm~n;1YPvPJ4={fdR#12Ty(6C_IBYrV7UO9+e`1 z^X17Z>|t@;9SLj4Z3vh11O1kdEKY-!a-r3l)9bu07I3pa%e>$w22@v|Y2d z(HPPw+##dmIyEK5=M`!Qw`iwj99Wm8hNh*XrT&b$BR+7@(hR6RTR;QVqxev#hrVqE@esjwnG_H` zsTA1v?5%85XI4v#F?Zl-Xv_qxNsR}!P+R-^YE81%;E)_fXbb7}jQl_`a^cf?FIf%E z4`lQl3?rtHvT*1bq;;#ITA2V`;tn57P1Ztghk?rxKjp@JoB%pRWmrQS6DE!QHZmTT zS+VK>M5oU-;W!8|rPtZNuPy{yU%U5;%LF(lNkuqSV>P&r`o%Z&Fa%>QPZD-KOwLXR zEyFgQxnMOvZm8^>|Fg|+A4|mP_7EytSIRoLjr`hs_b?}){u@ANi=hh}!4dw(KArR-_+F3#Z>Qo`WW%&t{*u^#^;_Y& zAvJ^yfzpT7`elW@C++~~g7rO;=1vO7?XVm!IeyUNyF35G*@S_eAPigoKKL^alJETh z9Z%BJE4?4kwOE{dl%W9Z?>!Ixbf9tB^9zafeGQyRfY8QlXe^f;WU%MQaNcU1_eUUA z;Qx$#-T2=hG$~W3yi)tb+mnOBrSqA?xTWLrITsOnB!zT|f&S;-IZ=z4&X1QT^F3teV-o=P$LE1GhTmwg+1gk0Z&`JWFzg_bpF zCN5vF=z`%0VoQ`onS>mXsw>@VUj+@PGxIZSPA2mR70J~7I>M(EoD$eQSMgME}) zxXB41Ah>+=PjtCESd1^wupU?FgvifK?9|_x3%=;kk(p0Rzq8zeMzX~xS>50~Y8b?1 zCal%)`Moqu*y8DsjOeMqJmV1jLDizs0;uv2^P?{E`h=?G!(UMXe~_tT$u*jAGa>(n z2*)B(2?Xs?$|YDewDy(=KEwOnkqv#ZEI$X$4T9Uzqo?XJVOiV1aR4}3ICHMko{}+e zB_Z|tNpU2~{2xO{NrsXQ3uLC{=sawZ)FgPgkTfrQP;0D#vhe8eG!ep5z3>mQ;h@Jl z>`aI0wPPhE_-r}I@W}Vx+7MeCd$zFFdKuK?6vx-^Z1j!;4zUcpexZmpH?*^*CHCSjeQ!u`WvBflvo@KiR zHouKHTL*WuvF_urS1v(~pPI(tipj$hPRIxNv~uB6Bjl_wd2wYf;I)8cO%5@Fgr@BT zp4xkC`+4e;|9}2I4Uvp;f)EofHJAX^NM6Q2mxOc;h1|1AX+Jgphte)#wn10Bg1-NA zlE2Re+NC6%v#R@h3h=ow-9iaM+Z3i^YMvej2MWE|gaCWS4Xtm(8<|Yp*0WZ|h$h`X zw7r*p7;Kw7h8~ou?IQLGb@L-I(?%yk$jOVhM_7L0EPW)$Y5lIC5i>mf1fuV>y-ZP* z0on2`W7$+SmH}2kG8_5w$TBMqj@zJ`P<`CV;h$ ztl3Y1j>}|h7m;)c>G~(O)csvZ{f#l@!tr7I&FubJA!|-9nO^WkQN{eV>$XHh=carq zBW;l%r)s^OX=$D+k{aiItoR*v3>Wt|kbC(ItQ{04Z`C%m+AoGLpi|rDCeb6&&c>MT zhz1=Cby)q7G~|6 zBN}{~Fl5kXN6*wpa|y;T+EE0#^da0BFXvUOePgHbJEFsCsRKDvTG%%yy@y4vU4_r!V;3yUj=>Uk(_-2+%vljTG76FaqUrBfH>B09n1dB0DwKl$11OU} zP}h-hwFu2*b_sh@^3FYZDAbhd>t)_hO|=U+*Ah4hcT=#-Sbd={)^RqD2f{I1R|wWF zl&Dkd`9UQA{FRMk(Q@0L9&sV6Gi4NdE>B2{gsk|)tQ>GakX^ht+I z8m)`*yEH0%N38eVPs~5PYTn9;V5UI@cOp)ljA{;D<;+H;CHvy$xI0hgFZAjiwHd4x ze*WupYlV85<&E#$otpQ9g_@Xd{NvCiR$JDBI5FpQz4_ke;nF#ufk^=?OL9n+{AZXN zfIS$SEyW!WulHcHrF>^c2zagZC=iJrrESBCvL2x7{VAPJCfXPOJL6lySgi!m;HD}fkTGOb3n z;9`?L=zo9hnj>f8sf`BLglNMaZb$4USAq?ONpfOx!`qGo2F%Ho>q6jX$xP9mdqLC5 z>L*5+c5UZoarmPs!Sx=_{{hgrpDi;iW`tY|uy6|qo|uT7l=hC8rWZ3Lm)43o|c#td*QS{6Dt6q!hpmDf5b@dm#T@uev=m^Jo_mO&6pzsS6)IpH2njCdX#EMG<%cD6t@38f1Z#KnW zt@p`2yc@f(Wus;@WM?8loP~>Cy~C|kS3zj-`=F(rd@uu}Q#g*P&YPYnJOZm8WPk2t_TrEsB!w4IYv5u>(z@~$ju~0rQ4{t9=h`WBPog6Z z#cK1()`7*tU6c;eXl?RC)}f?IB@hzh>y0`}TcZKVyi|f{$0WX}}jk5%5s^1xzs1ys!AYt0b1OZ3IqF2^dr4u+3i zY4J&9;#nmEF&0*a81l0|crmCQdc|OF(jH;QDjv9OFa-=03TFWlrwxTQoj$-k6YNLa z-f`ojklT{P_g&W#UFud7<)1SUFqU>~Kq}CaT_vC8U#D@dn`81QXQ(W&Oe2qFGa(v1Rf;T)<~b6H&<3s zyxCc_`?$=`K$a5iQ?&zX6Pr;M(?xju-(?&#jGC7F##!|B<fdGu>;3^#%xL^X(; z1wVdwVP4dho0&nS02e0u!48B%_S&7^*$VHd;{5Jc68iOq+kKOV{SDjSvc+xm5rlA)g;adi_M`|Gp~}ocoqqoVcaSRm$W$;5``8SvxeyGH23q zXdAn<0lX{ZbBVfhm01JwMaQsKyW?a%Z3*F~&1R%VS_J}ZCu;uVSP?^6z-t$ zsbq8cGkaQ>AqN!>M(bKp0X@k@`V^pTJ-1JSwmMl9u^a%1)Jrot`bwE)N6eshBK>vN zBwP2C!v(y1E7;-Yt^i#s>(YF-3Xn^$dpag?7ujFA#e(lvBx}UakGBpYX|lIQZ*_}M zfWOR?4@Vh^-b-Xscqbq$5e5Z+4y`6qfha}DgWmz7P7gQ+wpvEqQakWOQ@g4`k&88G zZwo@wn0gZD0#FA2Nx}nY?;3Tsz$h)#z2g4Hegii5s+lrF&1P;qM_<|kH1Yb8xIt2P z7JsNl-ftDQdrWBl84gsikHDLVtT=99PAi_v=WPu=vA*Fe@atq=P`APR_uS|vok{`s zrpe3Q5J&j4(Miq@KPyaHEGfUFTRk`QcXMfRLU%5mYZjO1^8^~G^RbNw>9f4dkB<6t zid~y%lGkOO-iL5m8!@6OlVxWo4;HbiurLb5x1Hg%e7X`2dM;HPo_xw{Kr`#O5tBG7 zC(>kdtns*E%%SE$rM%r>9+R$R5KD=YOhsvYL{hUPWw(u~R-a;sb@NXBmWsxNR*RW^ z4LGK(xR>s6I>&75?4`Y zhu2X26Bm{$xVJ&AQb7|EEF{hVmanTY*7PFElh73;*Q_77vO9{>?1$(ZVFAF=NDxTw zh*8kjEFdKx^vjm#p&ZqwAzNVZn*0+F$B@s}G*E(qh$oze+eeF$6>Hrf@Bn#4D;luu zuvb%yJ;4y1QBdooNE2RqfKSTV(9YX0AMzf?7mBGFpqA1x(-24+H}62je>vFPqW;6& zPu3jbdn9a1qDh6urqKklK`o|m2e6^wjTNuv8bKr&$IZ#fdGEEZqyBjAYI+{7H6@#M zS5wuWG&@vgb%rlsC2jxN2Rb-WclgW*IJ0&W_5iT;rmw(r_$rh>n@>0#&KJo2dPS%D z?MnS$hRkPx#X)a%)jGO%tc&5@rTVrgzppwV>Uo&4S+)mTv0&{h!xMQ~;KN6gT~bi* zWXV+Xl+~Zskw%tFsl6K?GG4?>p0yc~fLuuYXr{xMD5*`4VHC&Q!4{f)eJ!verzc?k zjs#Jzu)}gUjiYVrgZ?A`Z;ZVjm*L+d+)sOTRRiR*!R2jwJF!=$no?>EY2tD$V%8fnzRGy>bUem31^0+2w z+WEKd8;=>xUtMj8#RU?VoMTMLWMne|tOpCqdJNb?dh68mvb8wyZF&Z6Nz!lk! zY+eI)y8(JR4ZMKVpN37#b^{TZ?h#b;#z%x!6Lu`hbv_83!zh&wE|@X1!H%~9gK z13;i!2@kJyd?0k@A4wu`3~$w5u!|6~lFzt4246%ZulL4VW7P8^&obUC~f~V`91YB1PJTCUX)Ve>RsAk!=TxQk9rCE^l z1F^!5k2>{<0oiG;Y1$a@ZZ(y|HbY;Mr3y- zb`}vGpT#8jQ;lvkzf^6j0!VX!hWVs^81(E`BcAsgASH`qaG)!zSVBDb zO-*TBWIiJE2ug8T=E7ACS1jq9-{hTC2;q*a&yW}nG3=?@q*#s z3E^m-RnUC7j-Tt3t*hNnTgYQHzI<(0y2-GQa);TZdgY8fIa>@>^F1ijhc}<@E_2=~ z9;_gZbP?o?AM8g4#zz>(9-tNfybeHtQ<~3%DZ*j@sjr32#Q(@Bc7HYo_8|;F9@x)~ zUEggGY@a&WL1ANDi$`4+!b$@l*X)Qq*Ex$ZjgMqe^3D^iB1#=($z1(e<5s-XA9x!qdJT4KuqBR!^Z^Q^M zA~EXsR?KVT5ju4?o3)^9l_;2WA;J*4%biBhTNRG^(>-+HriAKJ#g701x!NG@l&f?{ zER+slATTSrhMI@_tb1c#LBPfkJw~sR!VduPd{{fMphnF_X^59l%!GeMJ3mquO$a}a^378k$xL6d+GbAP~lQxk*d-=hXSlV859MdL? z#>a~o#)6C!=8veF%ZfOhTUBJ$jOa_xx+M87I;6kkF^L6hnGR+VseM`OQ76dFp}`l% zZ&Ci;e7s$vh+Wx2&%fu)~N6`z#oxy26Gez`cLl*v0|KR zkVt8-X6+Gq{IcR-yW5^vX@FOUV?Xa%34RVZ#iZ^5iLPvCG+2_;4H$S-k+n)6u$Q4Q z3Ka)YSr4V?-r2KQ#NgEpZZ;$k?u2M^UuzQiN6p>R@J5C;whUk45IE3m9|uJ`B5YdvMw6kx<=W_66bK$%)2) z)?HF_GXC~f*5a}+-o3Cz$>fT8hkU}0;x=25Ld4m(Ko~?p8Rh$;& z{!I8hfIi@LGyDZYRRPGcdfTsx{|mE)4%&1PcPGEVV=VH ztHl4q45F`CfXp0vHrXVLbBItGALRqcQEW^S9}UCH2CO!-t@BJ6<6npeqJiah3PC<2vKv2qH*4TJUBMMUHm1_hSmqy8tZ*r)N%55fUCkqCKdlt4O`Qg zX>*(wa~NmD5~3P^m0?&N(-*cee`x-ZcJP@<{nNrL18;7pPG{-62`yvZkwdV>z2ScL zq@o!CbPJz?ejmp^;O=`S^03ZpE&3-?1R(!1K-7vGRm_H9R%|mv$7Vh0WhDNh$BEJm z5)#O%d&d1!3vEKGl&t9!XB*IleT$^Wy*=&UB3x^>0e%QV1l;A1Z{RmtB$8FQJL)^D zxlE6TZUO8)*&Q&h0gI^?q^A#xHb54gR7s*L=rp7_A;0{!?t#<;@Tav>uZUQf={^*b z+pu+cHS#hfUcyzSdH9V%kMk%Z(+rx9ip1^9@Crbx9}rz}NrQjo(BY07{cBPmWt+lnwajdr+*Vi*Fm> z9{qso9i3;Ys&s|TmReIb7gre!z?F7qGwi4qzm@KL8t}RU$*B;sCa||4hbmAZ{C1Bt zRu3MDh8ean^ib*)g$lB!pHOUVM`z^^-d5iToQD}n2D&8+(J_3m^54DI#-f^ic&dS{ z2_fw6DG#bJI<1s#HlYp61#Lg&%{Jn~hD>%uOHiLBl+T&b z)f3rFrG-Tq#cy=j9inK-VJH2q!x83&Mm8K-a3rSd$i5u$lk-1F@yY!c*qtY+=hnk* z((4VxEK>$=XzZz>$%zkNhITb>=(N206wj`x4s&stU^yAxuvf%D?9ku@A6!mOj>sZ$ zpjCt&FB7m`q+MY2dk;X5a5AngvGsu$)VJHXfnOR%PyqQ0wEL}{r)QgvZ|jK<8@}FK z8T7xa(``w2H}~5`Lm8ub7B$;+gb{{HS@NjpEz6w3fW9qUKu$MDu{C9{i&aZiNDlet z<7w9jZgf%M#9&HnTNuTJ>_-ki2kj7{L z)G+8zrifgZwM{y4zbl%hIj)_@SZ>Rw5prU(f=qJY>YqxX&5&wM}O3HU|#O(Zo?ailB)%4dD=eEcHRfr!+ zNZ_i9IG*IdE>*5_w7#u_g%Vz zEK0I-xZ)M|ft9Bv3_L%VyxS+!i4NqNPZZfX(>jG8*>(VXsV7a%Ph}QVC@d4r zojq%AZ^Yckj_I@gw9AwNW&d9=-=AKId8+2yfgSiG0Sud!&lj*2nK)Ub#`t}lM`~A^Al{lOSCOz4YNPfDAcP~TATbJiDEfUvGo{zWr9OL1A}9vPv_2t zfJ0|9fVFU-%NX?c7E%$A*&PX1o3Q_iZ~xDvaRvr*&32$g`vuMXgm|(|jY}!9anj2& z-goNHMBT%I<;yO_5rQh8vCNgzPz})|{;~Wc)*dpRJJgg_jL-lbLQEF8N$orfA&UQ{ zJon;WV*njX zcOBTpX5QM3?7$xHonEI4E=Ec#CQRa9I`3ho`5~=z)3($Bw4B@%=porYPfY|us1u4E zwelq^J9&zBd^D5G6R8~LiltYPYBoXya-$FI>mU8*mXN;Sk*UTy5@nXOjSefNFdeu9 zy!o?#uOPUXYMqN+Hmx;+>O589-yDAgpN@BAcWS)NX8J0e-9IpJ@Ga1E+BkjS*{Hkp z$krpDdamFgsZ=2U;l1}fgP5hO3*_*zOA0Dx5tfS2uE+(D)`}u7UJ%Ee4bYPuK|x(W z0}$xr<#JPRgn17V$x%@lxMpSec6Uts98{xwcMc@bIZDf&-yy1FHnOSZ0JYM*e|lrv z+hod&#O%hA098P$zs}hBc2z&ve}%I+mb?PO!_8*&>_UAi-m z!Q4|f%R$W^*rEo$UGZNK=iU3v3rYk3McPF>q-|sgnb$dsEw3-S+$$S1d4PKX`v|On zzGsAX*i`ZH{E>QVJ^dwyx6|S`twHbLF3^R z6yxslh{h%rTFa#>Vd)T8?W<0JfY-P=eDrtpel<4`SG@ffjPoL=Cw)7_7BmX~b%<;C zT_^3j46BSy>PHd*6iEZ+88&44h^ANqobw__>Ue(j$v?6%&O!>{X4JgkYB{jYF(u_c zByn%H&g&i-iA7^_$Q?J-meUs3D}Ufu8Lsb>Tl0MN`(EMTIBk@)ta7q_V0^BxU z4h|Hb-Mh?IQ@hqQFQ4FwOx^>UaWk_zEpkJkU`k+S9=ElFDlvN%FAI-n8zS-1X^a3Q z*_`a#TiJ4Icr5h^TW=P3>RezOV(n{Xq)Q(QfpIp|Nj!~$%i1VVzY(1R7KIF}40-1b zEak!LF@`n|x@PGH_94F?;gqZoG!I-+P-sscKXd;FiYTXJdZJN8qZpQ0CzlNL#q7#= z%-7i*iEo?8Hy6-HXW@IzdR(MGLTGCnc>^lATv}ODsfRkVPm03DNsL{9t8bAOD`f$In@(i5I45eaz!MoU~)2wTh8&AX^>;b!2?HUCyI zh0W2`fw|p3+}?WM|8g4n@}LC9}zOy}E= zIX->R$qSO_ZE*|>>1Zss_o07l(07?9xfO(ZDcv7S^<~#}%B!w!^zGmbfA0m@oVa(Y zXK%m3q*WKRhZnt%Qot62khbhh@)368myM^%53AmBW_06)r54e2lltb}FHyR`*nvmo z%qzH7`Q0iOu1%%fI7}$r-I3mU<#@RGDw(rJ(&+GwWcj5#yo`*<>$=88RSKlyDBR&k zqs;$PbH6KJD&#}OT^Dyhr|lt71r?7fv2vpz_0S#m2C3ob^z!-l4LtN zx{*PSJ=CE991Ch$#!bGoE#O5~Bqi_IcZamFie-+FmEt>fXIoh$ugBn3!bCtRLNgGt zALJF4qG%VjAn7IqAjehtJ_qGtVg8B^VVf&ksN0sfrl-zOD}(X zM~cWomeclwoieG>;J$e3-dh>l<%nd4oAfLi0QzHmT-QM%e_}8n-{Q(%a+N^B4nx!M z7NeKGCPAj6r`v;~&n}tTKIJ zZ%baPlqa08@o1=AxpEkL;i|2DtsHlL9H#z6#0u*~QiTlI^*KC;tzfztG_zqWBe=H# zg#fft)ZdNnC|B#}ePaqVWj}_eZ6?r6VqHpUMA9J)Q|Z4_(7FT_tWI84b<&*BsV&Ci zG7pqrFKF*~l_aTFHQ$t!q_|_NI&k|R$9mR~z&?mJNwd~`q)}g4E@cOL|S9)AYObYCUe${P-g-%u4L|JbAYv(Mv&$36N6vbmyXY3 z#H7q>Ycr?EB}Pw^)hDB<4`(66rOKv|d5aXH6#ewVm!NJ6mxTL3+3j@yKv5d&;I_&y zZ412m{XFpB9|W=|QjPmHxtS4)YvJm+Ri{rVaVf%GGsIMk<8usJlN=$`I!vsyRpl{9 zr9Z@sAOxgU?8t5QLRIvRIB!u#jJEerzZQK7`<&8@_jrh zlk2+fNOa5!hMHPW_2K@I3h=zJ{+>GB8CRRnXO7(#g|R@7*@Ph3zJ2-TO`B9FJl+iOhU@XO*k z!gTVO_L?|~@-Fw;TgHFI%Vuw-$+-0n%)qnS z%Fo`F#X3CceO)@yL^}+i)x|JomODrFy$b!4xVGRn=U%`fP+=1wiIzxiE1qDEtW`M& zk4!_3hw0b4J*hFqd<5#|dg)Meu@Kw<^V!NDs&D6Feb}d(8Znz^iCN>|Wr6V~w0RT4 z1;ti=Z$g``9=H(A!OlseTu;kLP3Y-ZEc}~s=c8&cTO?h#f+#B2mF-B;NEIHII8DlA zt;%&A+x<~|*0(TgC(MIsqk$%; zER{aG7ZrTN?mCsD@hqZ8T;T06wVPdT!$&yNm8ElA-jQrW$H;k^#2A;HA&q+aDmx9r zl=bqFIMDXc-Ki}|$2r7czh8ipXH#sP1i(x-%sr`En6Ss_pW6!e(y6=~>rU0^$QnD# zyLwwdiB*hugTMoz|I@G*aooY=REWX8mr=2`>tt=R0^Oe-Rfk_mPLgvGr5hU&r$Y)7~BM09~(KmvGH zE#QJQrVb+_=+QjqzY|)!#=wd9>BtelQG$$K+S2-Dnnu=y@yqZW6v7Xh3eYq#8OE4^ zcz|tX&%xp&g2hJ_*4RvWJ=V$l&urr+xOTOHO@lFrA(;{a(v+;fvxEJTX8;$R$>4sH#atza_jAog9cD#R z8z>@Uimo{bn(d{BV?)4z96yo_vknHubBhaK$7P{RJ*##;-m?)Krgrz&x1A%ivP>!7 z)H$9FAzknPquM|;Ojt>{%z>h5Eq7%-7A_9fXIHFxJi>Sc3~3yI!xDJRHN%TuQ)}lj zslc#Y(_#1XP8pa>EynkW-$krrWFn;-{+S3;qpyPeaC*>u5gakAaVasXc`Tl9%L0E6 zzJtw&8kJr-Sfo#%mYdtN^?P^269Mr$@cN4m^ou{g9j z<;^vZw%-qJF*!Ie3NWALnER;SlNA0fNsQs@f;0qY|v5Q!aj_YT6!r^6hxVYCeIb z=YQ%uGbQ;@jsG~J%u!hEmr0h`&pxMO@A0$~2@0`nSb@PKCva~rG%XiuqI|HRm5eSR zsHxVd*oaF)aOOsXP0UJ4kTk?5+GwPqCp^G&Koov`+>yhts~Ah4BrLs7=ad}PF=Y^7 z#|K3~1T}%1$JhZ}XUV6$2%DdH=p+KIs#dy^AnHNe10J^2OM|L*may1MD`Dmy>|o87 z#+P@Z1elerdhdJOmnK+964G#l@F?OqOtykZZ=9yT9jX*;rW4Pz?)>|wHT$##37!iY z??|SdwLVr(6-++fNV~r55$mD<0=<^=Ejj~G0ewHwkc-dN19rA(y@8#fz8qe8J;(DM z{0tW|v9oY`I|WRb4Qx)m>*?v5H90|nAN4g5+=&x8iXzp7be$tm20SddGzFAV&Y_M| z-dO0u%9azneqWuk@e>Z}m&r&@#+a1&4T(${VdFwO`oCK-Iup6wiDuHKo_WS0_o=!B zSU`F6Fk+5!%|mH-v{XmYrb&w3W*wz4_$R1D;R{Ql1L(j$o#qy!Pzp z@wO7G`F|+U($>yjTe7(QrGey%TYccPL6K>G!0uaVIrZ!C9luYE> zH-nwdviimaani+XiGPj~IMN});sqACA8&I;aNtqqVEOOolOfg^r^mVf&q{S97(){-3 znY!sD2cA>Y1dV3%_13q3&y#{s@U)W`l#0)rXGy3P64viYU7>pYRQ%}}d zC4QA0lkJ?=w93szykr_QdU2Wj&e+r&LRIg)C$_*p?rcUycN^M8UJ>R)ztsSzJ3?EtwS1azMrmYzLlPA~}oqyodvxMmN6bvgn zLT+Jc4o_7aa#6$aencGSRMnW>YFCK6-%3b_H)Qro|FU8?ORw%k(!8QfyzW(QXaS)< zbWYLg=BA3o2hHUUTX^!z;^dp9=Ti!;wLJdt^w}RZ!?)J3;dfD`Bq}`x*Oc)?{E3W& zW&2kkL?kt-sF}|#uCGg@qCx6vlJy{4g3bn124_=O$gJSL&sWCfYP{w?4mc)F` zWFhkrIvu3mEyVP(-r>2;nxCf&fBlkFj4vQ18J<^0INvYsYkXg3z7Xr>Jc)yD4X@Sr z9dS)(j-G`J{eH!cY_56!vC3i{KtWA!60H~mtg#qs$zUkPlE`1p_f%*Y>l^wt~_sZ64@bACs5^&HvomTU-J z7}M6ck;(%XxSy>1jW$@1KC@yt5Vv*hPOtX6Gc+E@f|d^;|73vPNQ(Q^(M#yQ#7VK^ z3C~PmUg>J#)JWrmfaQ7jCpVDUGKBAiyrCQd$2a%tf;+ylNv^>b`ko2^r*~8A((spz zx!ACU-^Qf9&1V{&Q27BxCdRFKc6#5FdGFpuO)xOlpldLyia67Q3Is@!gKe;`bq>V~ zLLC-A`5X)I=U#7}H$?p9$X2@U^D1v~F-#B)J}}QdYb29`RPnKGEaIE1l&e}laLeT% zQ`Q(5uA~jD4F9pWk1;V5v3ii!rA@*RkHg%B^3a@fOR(yc40_bB;{{+XG2|R$c}I=* zIu3aT6bFgw$cVZ;Vm@r%Mx6&XrQq|R?3LfEr+EviLZvx8HspW=@#*JVOC-wkcs(54 zKvz&%8`&9tDBVI&d=L4&1TUvz90EkV`IuD5%Iq_0TrO(sRo@_8$^qOIrkLJ4D-G^Q zmAb$^6O&+rgi*9Rs@U^Rqzz{!4oa9BCNLFYxfq?j{_51CZYDerI`NmKPJ~uYK==*$g`~f{=Jz znNEY5ys4e~D$}WBas9a5CWoHK^CrtvC*NgN>uG|?{ltVXc9botw414+4991A=rkHDB5A^XWWkd~oaS4zP8 z56`7|VNo8PPkgFcX}epzP+nn^%B?vlCEWSmSAPw;h`vZnWy8fjq5I9HP;SuGI^+xa z=q)f}mVHx%S3pJ2Zt>Bpa}QUHm?QDe;FQ?53?!p-bQ95Aj^(xIa6sl^wf#0;og`u_ zjDBFmJ79tWq?~v&oY7lbAaxy$S#KLxJdhgVU3nSk!guHPvkV|DW3xlk$g{-bqT4Sd@DRqraMEJ%-u%QirQJMrP|rOK14gmmY^4j524L7(_N zGbzy*sw?jrdH&!JUn1|4u7rOe(fyF&2WOWQ_f|(NJp)b=Ajv{`+IcXAl-OhIDZYxm zAJ}U(neY%pliMOAwzJV()7^39K?O!w%?eB|1gz^h%ZUtSml=34xc}w;Rt?YK1_l>P zOr(4nk`rf+qv1d2@aR`8r)&rXKCRz4hQiL{$%-98Pvzi>1uqD;Bz$Wvn$ol-Nw% zP~20cg~^Z&%nz7kFS32Q5>rVG5e}D-V^mlXz@M#a2{KJT`q0oMZ-ccaDPlJVJ%wxWIew@e24c^`g#%!>ZRgFactblW(@i^j;B$Sap$^F`)p3| zyX?gXNO(ueur)=>od&w}P?42bLx{7~HDlotz`_xISMiF4r__M%#4mEXDt6 z*7dA1IPlZumoy6QB*${Ado@jb5vP+B0$X#gYj)-O!>FcVL|9F?0h?BjB$qRq6ie>+ zc!O9kVVE%7d?k4yP7o`6AQ5IG=aqYQQ3UX6C)t@FhwgztPj>*UC~!L5AvHl0Y`1v> z<;xG_2;tpbr^U3Ri&>dqkLxU`5W4$!TAoxK!`%LFUzR$&-<>yZnxX_^yOvSMo8W|j zi_r2a&zOj5jlr7B$3#T*AKZZQw7w2*wV1oZO^krzSbXGCV6{&V@HyS)-8E-?{X}u`Zd* zH^~Ic3~;?eaf(JnUn+-yhc0{O>M^Zg_AlcfHHIvB2y}h3IWAs{)D_@UuoYv-{BTW4 zgM`io8>7_RjGhhZUd}w~tkgzX(C`t|P+73y%o`%aIpXz$rFWN=S%LmG2ln;h*?1f% zDfD@ga{9Hx^YRdJdr7f-*tkVCJh1;x^4rW>WX=dv{`#6lX<`wp!PTN?ag4gYM0p7y zGw17g^edp3E?QkB>AZ4*u4@u?rkZnS4wv0!APp7j#pfUw4k1uJ!g+mXcYS`e88{Hx zW|MlEbA)PLPCIrWMRH#OoLh5@8`QHc91QT087NEW{XB#+ZHA+YxAX9a16(2m9efzR z#e=<)-%WgHlh{Kg?~Wn}?k$uRx9HR-g;r#5R4*ue(_Qu2|V`Su*#QoIT;o2 z=mH1aua>Q}BD{U4WTh2jY^jO-+j9(i^7VHbl(mM?)`YbUg+iC4Qn(4rLr`CuLEtG9(iVZe(AJ}C zDrvlN7UUOJ78#EuB3)l~xAgrTv(;`8y0`klh5;4P2f4QC{Fk*~I0%C$PzCXIwhRW` z*oL@O`mDBI=~d4a1ic+-rgnHBKAz;NQb)hF#e@rb`#AZ40AcP>;8%6xIc+17=!woh zljfZ|p$-DG>;af$N>v)m#GGeEh$uhiC<1ZhBrVN6w#?=fn!euhUOQyozY&<@Q&-JP zA&9qYrneHGoF0$AGh42HZUry~zIEqJ#9=2EX^oK0Ux6Df6nbvstUgvitfZ=AV;Xf% zCdZAep=zf^kNY+cjlo~BK!IuWmrPoJ9;cumw#2he7(%9)^lyu*(nTcp2PiM}4 zJ5ucOUr6A?m$U>DuhA-ZNnpNCo7e7Q-qzG_Kvjs1=UahJt{|ajm?|KUh&uO zses~RWQr7tb8&(}EO#qYPu!B8DSKiwh{4qmI}8v{ewpKEcpm9mm#xrxwF6FOU^By#lf&CF@;C;2s_0I8z`kwsl#cL4)3r*;Lq?^OUuhY#Np6DQ zYetVIGD?FrU|-_)mRi!%LVmv9coOcblXDzF&YUt)JdTG>{>l@HG8AazWUl=Lmq%~Q zu%u>LSL2etE<$F*ERnef(uC7@YBj55stQNsb{DCKEN7vYCoc9HI8<8juB5lp_4P6n zbNFC;_yVp?o2^I}nBSMh@FL)&jkrG*+Pk&|Q+4lv*f2n_p3`1DX4e{dB z6^@0vxk{itOsV-MzQ-~F%wxaGg1RH*IkX9uW9Rr`k{oMR5LkMSrv6qvgTioz-=mSL|;Y~H+=){H$Jx*tWF&UBr~m z>qk2p>mO;2HySgwl#t*O@M0ol!kP`vFeOUGuo^87OR0G#HBnLTSl0zVZc_M2)u9dg zp)U+kI&A62)uY@U?IUXI7rNY@5Hkzvl+9q?ECKHh3^Q&UoG0tZXNwxN`sh+h=M6C% z#BS+sBq6RccfL&gXhnZ&OswzoYjm1WGLm{VT zDBwZuyZu*O(1LbuI}mob9CKIe3W^FWi7djBlO2yV9B#Ou8*$T$K?xNov)dhfPmWs8 z$-7&g)f8lpI>pGc@Vrf4n;!*!@p}_5*o?|4MR_SL1Q*?h)*SX z@~@%bOW20 zmFR3-64HL+niffuk-^osvD%?$p#n-aDMgthGeVuGY>g>5U9|ze@NFI2jM&BjghR`j zVKeoTo;EVgI9sk)c%dc(n4MYs+2eOJCnJ_yuz(7jNwn9yuILbTkUHA1TuJ~UEy>e48EYdF1H4zL&tp8>S>yxw>mMCaF!OEnAgB~33EFgFWY0r)lW|+rlN;jM$BMPa??tx zz5G+bQAQqrTBy<5QU#u6FRL$S>Iv-54!Km!c5ekWD3D+?h@y0kG0)BZ(X9$WE&CwG zq~pM|4~4Ur@=*xtqMNb7q-)%bHlQ^cJnsEquM(bnPd|}G(5)l`@7$c<#^PHw`$(O_ z9+Qs*t)+J$hf?U@vyGc$3T*NBN7!NM))g_ zSEYJH^)&!kQXY-K?SX36FWau(oYoJ|VHn1Fj6c{HcDk=}QC13O zL#?`x(6}!+3-Tita+-7&89VWm@2bqBQS73KuS(-ak{yLlo=6P1@;~q!UQROg-m$;%Fv}6LcC6@P zoec(-5C(Ripi!G5HtFhvYz+ZT0R|KPi}Du63)0!~U#T1AI9Ur<*OOF@6`i_9;EqLT ztD9k$`3MUg@UN53c&WmT&IL*m0;zRwv4-gGIM3yMjc`W8M}+GyPVHwW0EQK8+AVn7 zLo#LpCG+KTA06Z%$`(KeV*f=+8XCkDK~8NeaeNdDRth;<3Phe%o_G*32u5A%MGXnt z=q|x2ZBfA2g}MQv3E9fO+~xfqVDPG;3X==_@Pp-Kb2uX?ND%F~gvQV0c5Nsxe4Pzo z5bApOgC8H-Ma1R-)SWKSwxM>+_(YnSAjT@3YutZRc7;=kzAL0D@e*>s;LQ)&KUa9# zCP8P*P~g_R^n!0`MU#t9Nu?2s{Y~5E(b~CEwozRZW27uJ9*8*IROU2pq6vCULcoui zM(y3%Wr6v2Cxi#0C0queW>zu5Y%?|%8EiwyU~3Td!o$sV$5YOnj>5Lk-;qd01KH;5 zkIgI+x5mHuJunu#a}nTTkC53DhmcsOgd}G4mb$(Glht2ctNL8cvF5jO z|I=gon}L;3k2lmnYmYZhtLw|u(b<2pHPrrs32D|nMN)>b!E0F*C2nwA03Pxj!ww?F z8{?rSQ?bRygAzt1VVCU=Ni;#30iguAy4=vw(0OyXYw`&BY@kpw+F-DNQ)-lDYQzm2IHzYVNKsuPU(;}%Z>bDjGyoyH5*ov$Ui&E_zd;}G9{go=v9`Lczx0kBSj8sktFXXiW;}I_g$Drsdh>+hz4m8^G zlUuM?uH$@){()krfCQPCPZW>rW+MrH2Zl%|xMsdsd0TbUG+k2vm8A$pJwd93_D1yj zr0$GwQZY~{npDx4!wD@Vj>Tj0!jD$K8^l1y+r%(&al^EW&IW)24Jl+m({F(^6NL3J z;{rGom&;R*@rg{9_<3W&EI=q?6AV)8w{s*6rYx-IhxwtB z4J*aln3*!W-WQhGaF37ASw9^#-*wC~AdQ8fC+Li}u>%hnxbQK!o z>Dw)ZE{v_u><kM%KPG*v~u z`I!k+@LvO)JwMM|U}skca*ofZ;w|Cd1|U9*Ve2@mOYo&2C&COvD>$%UD>7;)ohk68 zhMwEYw<=B0A$U1a2P6=1!8hQrf{c5re1fyowHEC+`;#QB6xwM0bUPuk{$E%IBKl@# zNkAj{A~8-ic6l2MiUCz~0ApL{&f_~qYRRh|Dy*@)uZQltZz_u8j5CJ&AQdS53FHR8 zo_?2Q1l*2Ug)j=r$6s3m6Pg3n*mzr;T|gyOp|2E0rt=&z?})dPn=xHmD7!Pkxg_as z;4tMSBm4-%w>h`g9K*+5=g|uxS!r)O^0@U@nBGv&JLs*%H-PNmf>DleX?NgWJg=8XRh%3hfqp?{ zJr*;JN$6whk9aC(O~DL#`ulIxyyOv2*3OO+2?wLf zENH4)U#j1wAOUkt-ppOJlo4z&-KrnC2y&~chyn9#B|>s)O`e%_E! zy!XYEs0;JE3d&pn2{7z!3Eq5dG5s6EUN{fPA!pDH42 zVIv;D+W|Ch`zx8E`A+nGKRkb=@wV?$r%X?+3shGjD=@3+ZZXsAFn!5UMYGf;y{~&S zfS0bSGs_o`vW8@*(|2~sq6-fWX_Rzy?Vb(vcPa2(*d(Epf&jIm8ALg$7$SYmJPU%c zplr(QB00U4y&@NgmIuzS*5OB@R&4YoTFYC@AV9ZlrarrDS+#OWLm%b#EU*#~Q_c2T z#Nb;qcchV4-STyPYpUT_Ok!0bZ~b02hyM7gxk`6ZNnNPM;)wLMBpYl{s%MLW&$`6% zc-8C1fhDYYtmd7PdLlFp0vmM0ZJ_*9kq(O=a(3JPhKAa6a^Xb1+j|H-ecIKCGkt%v zMFE4?*q1ePnv4cD3!e=1NWhyR*0<;Km%r7wI0NkT_b0f70|T1ctk5G*bki~3&f!0D z;a%L08IV62!F@ZHpbNY=w7`Nz4_Vmgt-+ixhOA}w2#XIcQIo$? zF)@QOwnZh3PRXxXBf#wO6)#tvKa@91mH4D&D4AR!0WTEY)ul0KoDbo={0!4apB@<^ zJ0cRC`Z%4wp}=FbZH`y-tPUG*{qF%UvwKd7C0xV`fI8*h9e3lM2y({TtSnD$wBMVN z0;wcRAmZtM=@Xem{8=+Hsd&~jDt>2+DJFx4E8*2e!7~Tf7MZ>}j%2j-*@50t`5nVM z31()5LYN}@Ut&@RK2w9pQCu4kx|^oO$#Su$M=^~K9hp2@&4QGDcEsB&60Ej{{uTh8 z0kVN@z@WO&=Xcb%?41<6(SrTuHhzmqkIx1I2cG2Y2$uu+GzWN=*FY5H@vz~iO=;hH_96T=St@k zdg>LQ*&SvPU-!`5{f5uUgSf+UumbBT{7q7x$bN;8faVDOu>{x}RjKik{unf|`cix@ zhje6j{_LWQ%Ew9IazI=be$p3z;HVu25m_tb8KI+f0#*nNf{^~jVN6esU=bj_yB#wq zI{Z%FR$XSL!M$=RBh%g+H&B%A(^4IiS&yawIc+KXZhJcg2r9KDDp>@i+9hg@G6aQc ziA7cC-n)dyw`OZolyy70#Kr!nKo^X|>dJV)K+7oIMONI;4H4Zw10GHjWnTnk~+}p-p?BESk>#;QF{(vVtQg8Y3mWWC0Mm+;zeW zO5r6q_9aUIRRdu1X!AnWVZ|yu+Dfhxgls|b znPY7Ij$fLSVHikYfB$K%A#eoaQ!j~bCB`uNNZZJRS)mE1!Y#T8S+8_Se9FDs-~jLw zXI`Fa+5XqZFP}<=Ps9e@^}*}(w)=^sx$DvE0-nyqRr1-oWbXO5!{6l} z75iOl)cUZP41B%Xg#wzJs0-Uy^CzFfBF{g|M>>}(Zu&CsSFk4t?l!mBH2yVWR9+6Z z1dc53T$xD(yW~;P<&$fQ&#FWBFXCh4sT^!7ln|)gW?62Go-$Z zoq(_Kj#dIqpof(@Tt>ba;AXTFPoL?j4|?pBc%a3y5nC0mS+cvn!s) zXM?PV_BkHAKKc}6y>dUKvH{_@U~XBMjTtbHr_R$|9R(FldvHlAwW^V*vR>Z$OZVTg zN#enT=6MFxV8bc-LU2^IyFPD?;#U5o*N@*c55LgmLY-s877@t z&Yby^W@h9HDS{_PsGkO~MqT+@Plfk#`PVE6Sq5_+w4 zoir}u>3gok9HfLZQc-4SEf3)ndsN%%?NlOPR4+t|e97^qBDPIW}IQn*R$ zTv#<^ne#Ecp7t)TuB1z3dLYE7l?g>O#SMp*lZD5Su3dvV>Zq#drUynLd7Y=KD&RRW z^rBW%69Au!Ynv$X6A+UiGk+FyD&~Qd3Rj*@9|6pEskC`t90iURv3w z51KC3on^YK7y7?u>lQrSC+WX`iU?1$?KoR;8b>$Q1?2y?BzQ)1$i63~cRZ!zu>C-E znpzzON_*&K2}zL9!s%cK7lpX5qf55S;i@~Zl6b%!2Ese5$N6ljkKk&5E?44>+@8xT zMUl}@qfsQFKR45niF3*SuCzZM84e%9;WwR%$gb1#td-Yw@#_e6)XQpE4cPX&jpE-Z zU#$@~aSv{5w3^tEWZ&fUn^X?MtIA4Eq{p-fLF4@8&a0IFgP{cCeVxHM^V#$*^8p?? z)E#`3V#AWDT4gHHo$|uCOXYC-P%K3b2@qP!(}om3#KRG>FaMmt4qAs7a_^e~7dzq( zkt6A+*KLbZX~)*nQR)oZ;1GyQNU-bmq%NBobXl{Pqy?^^+G*OsbV^r8gS5qn4@fD=KzJSx{`S|JQ@0~rwo*42>_a4l=@$iMQB+xJg*L)2wb?=r$-H zc~$6gvCjUvDNkJcGjU6LMN?|CM05!gw13+!SpI&2-f8QwpVSJpjwp;t_Vrw|@irfT z()(crOVqwG^Uq-_(b^r!g`3-PNVX*#=f54kBPcx~-~O-S2wl0Qq*D>}iRHV|8H^k!6sM*-!K`sT(l$CC`g$=DMW%l0gY6{hf= z(T^WWQpYE6@s>ok8hy~Kcp3_qF8|eD{6?hyIm>XN5JS#GL{;V8Q$+Wd4X7&QDYs_dq~8h9dL}3tnVe5JpEQ7xcPz5x9wf zUo2uUH1O~A1xrkG;766BsP--bT%i{pw_pU?mEH$Gw!hk1zr)rVZ*Wz_`O`+vUl+|9 ziH9V;6+?N`>l9umLk+wjZ93xQ9}|=7Oj0G|88v%;5bd=un;k`bN&+uh5iWr17A9gO zH0b(u^f+3)53XNiCUSbY%G-KvD7)NI5}wiS8!u<``B zQ!7}@Pt~4kIL%OlF6d0XpzY*(l0im5wM&%Q54Ll3g~+ZgBt(S=3>s_(8$ch;$h=s? z4v8ibuTHO9-IUku@Xxwgi)juS3EXNU3Uaw~Rp`1tyE`Y~JzUiz#Lb#Nv zBK%r08?8L0^P$*zvj`)qb_2wm*NrUa6(_mtljSmVGG{PhRW#r9V4@g}s7;i%#R14a z^!L$~&~qI0^iUEGZ)HYH*TPjnBhWlFbe=!RlL94@9P)lUmYR^^80-HJkP03{21wom ztA`vUx||>~G*e*lD}eEQ<_onwF>@(5&@H5DfBV5oBO>ON)>?u2d0wr=iVoxu7@`0- zK*+yb`;(#3H(lgYjQF;)IFc642$@`^jEcL%)5c}2!AX=--Jiy{f>EwG@doq(!>e^N zbV1v8y!_B`*WLYb_&Ab7V`J5s%U(*i>m;*Ek%UPk-7-sh5OLazwK4sT-JiaCq6N=QnC!ixBB zg4x+pq{D;C^xSdRN=gig?mR9ONg7m7|NMj%{CD_M?;_@C*0mo>?3n1S$FpnyJHeKP zT;nEZgo;zBK5fo?;~wZIWTUe<9pfjlGNy=2k%ieh;d1v3`+nm~Vs{LG$!yB0%0ru% z4qa$?yL2_)0tXhkla!lrCfqCKz-QH^*Qn^l+}?mq=|L-l34c+CNCik+U{mZg(OH7- z7q-`@h{K^0jBo)XhRM%SXcuRvi=>^p9(hHByR-CzT#_|2YJ~en-9Gx*10~d!gPY5_ z@!Ml^7#xTWeH~ZXdw$3Cl$__}93aU|s>HYXrLF)V=Q@#qF2Wj3`|QcU=OA2#UEl)d zwCA1C5*El)Wshmj95(Vm>_)}Wf|=+qR|sW_Xfd0_tQu_poxpAv`o`yMgZiYfE*zK_ z8!%)V{%v^)u|4ZF^Q9sJ%JQQu*R9vPvU{vuWVKsNn1iW&wNe2vMrCHa+Ov4x*rDb- zcx*pyS*U1&!5pRg;h&pt`=(5I@pI*JM;hV&0?vfO&mhw*L_8u5>3(i0ziZk%7YO1q zRBcKHhRh~D$7z7_m*OnJ%x^2dgFlXQ1iP}^Kc(jR>r!rE)y<`M734*L{d|$#KNxSQ z-6F0O-`4@l8cO~lf`$e~bjs}h%CxDU`VMu6Db<`OHa;B5qgjIf%h_GaBX+5DENbNK5SKhhS5&aU~Y$*E1VDnIuKe3!qI2aucIJgcs zs!c3DJ?+t&RztsJXT;Zq+$H37?G4ez^k5E?%FaR`@4v8BDFpH@i^&pL6|WU z)0j$~ah2{{Hj~+_rDk=Fk+_K=oN`k*(BP#Q_e1`w%`7~Q+mcm^!(8Ht z4-a->MH0=B@YQ4Tma#D#;vfC|3L;c*HN;#!iyBp%U~?I26e19Q#~gL{$w~MsU^=Ly zU*>-7y9X~n8ydd4t_N5tz4$;%re|jaj`sk^enCn_m9Xp7oT?kw$+iD zGwGG!v5-GsU_Z5kv$SN-j2SL#82ZS6w*1!(<)t?Eph6vp8(YX^?N6(?+C_z8I;V7< zrX_$mAr>jLxiNXV3BKZRmfW*2P|AJDP7kDTcXZP|F_=($!sKTKF%zr2)$y(zV-?I^ z^g~SCsJH47ak)K2UOb3!ZD+2rb*=tAAHnopVTcW4sB}o~OLNi+`c;&8)n;CpXg;p| zW*X>5L=L^?!;P@%tJ<9D^O_|2l%uyF8HH%LVq5hh*%AAVivg)Tw(Li2rK-*P%5|j8 z8MJ4^W63SPrUis&n2isb-ws{!@aRLRTuORwxNYt2wkHWBsW>Xa)asS7Xy4Qqn=>1{ z7qZ+jaldC6Prn8^7nZPagzE68LJqnCyJ+HH4obY0x-@SrEUdz$vs0Ke%1LD9B>sU$NX$~a_O@1uA_ri3?tmlzwhheEWIXOXZ^G|H$(rFS1Q zKJT9IL+GH7om&PTeJ);3iTz1@fsM~OhZQCy`!b#{WCK8`c`S0Cp(OSTijmOqO_!yXCxR(kHLf%F#E*^ zG>LQwtW?q!+O?v7?%dmcz`!!#TOTI0JxMwfUD#-}lCLT8G)R9<;g7qqftg#g<^@)8 z$>pmRW53H|8osy!Oz!MIv*}5AK;-_+WOOp4?T0N`f%xjJ0~3vPd>k_(*STbqtsMXS zyWTJ5=HIdIjVE=IkzasR1U@^TTh#j`u-o*i5TEwZhcKL@$ppqEVxH(!rlF8`IS`I+ z;b5WW;tDkj>IUBJF{BG&!rhAhco#gp?52GWN_}zv5Qy+6kG885`3GEylee*1sWII>E!V%OLz!CP;e1 zWWdulSEe7?b=WD(G_U05=Yy!GI=fOGW1Gx*iEQi8p%J8P^M=VVphx6lnD>7iJQi-F zpVWw`Zne|A*`-hj>{u!y3wZKix_Aci*YJgF8?Zfc{zx#*iTnLNvkX@H_uF+p&0G+~ zBMs;$&6Ux8FK3`BXx#~yreQ$!dEhCkE-w3^FW-_R1Uy$$d?bU5q@#?v zjQJ5A7*ELl`VUCEtXR95<>{%G{5S8v!|vP zIPjwtJv9H7tc}Km|04i5iGu}Vp(+BXt7`;gPa<>)k(9ZKj-@nR6IA@LM{W&d>3wnu3 ze4D)Ne0m7ZWp9`~wzUXSXIm=K%6}LfZ9$p3o7k(_GfZ;4Cci2i&VIH2AqH^E4Am@w?=&u*Vb{7ePLMeux@#12OC^}w%2V%hy2Qt0yr@bn_ zc0Xmf7#HKit5It{Onhv>4^xrmhztdW?FGC;SsB`e|Hdb>eX4U1R-4qe_JYKV#{J(^ zD?>JFIo`_>TAyi=cIH2YRJlTB;DC_rH&2giKrnn`QPG8X~BMn&ajDz)-Kcj|!%Gta04}T{C`EQ}W60QeEi;hr#j*F8d>P~~d zNubRZ^Yq}^jXm1VH_$P&Yna~Nd#?+zQwQ-rMwbb}{AjmWqKnKP%`Ui2vw=swTHm-* zZ=^$-lpMnA0-}@kGa1s)7}ycu%fJy$_Pzbmz%%4#-?mX$B^|?s$l)6+n!_5Iu6U(^ z&e(^qlJ%t59`GyAy)71e$a<>#6pI+@rRK9UqhljTj$@byFQLURdF*G{wo7entew#` z&8cicznluJL2xs=%;G^=l@ClNw0eRlBtJA+ct?f?lJ0us_Ekrcml^H|xvUG*3KQ3y zP^y1ZCqy2AXs|Q-=`ASTXk24I$hwc4jCpT&G+|F_AO_@@U02U$w{6i{bXs16LhqKY?mrq()p>_ro;ckZ%i7)kk2Fx#}$lN*3 zotICIe6v!MtwDnQKKbs74-FuWyB6FyvcD9&gDN4q7K^(HCV7`G8}w#Kt@l6;-K3Yg zys8q1yBFKMo|g}UAIImiKILPUb@6h?uak?qeh1y(lV<*Fc{EKAo!gIu<>M&k5RK0G zK>81Ed6to|oEGQwVCuj^3a)E&xSM_LFpcdgJY3PT_O$5mRBll{!h!HFNHJ)oei zr9i>FyzKzJ=jasvE&DLkiAaVQT?60J7MC1)kZL_=JlR8YQ`_+muoB z!o|izLMhiy{O4hK-FGP4YApoTjMhe#HS*Z&=|ZTgujPmhD#$BLQ|A`>*C{oO#V zk9%h8e=!YDQ0rv5KbP`sFOQ7dswfWuJ2i45y*2F&0s1)!3S1c;fisKSCgISEAjorW zP-O#WY-F%Wzv_}3{a3d|Ug%-db#7UAoip27Wak3(!hgG*_tKrEtj}MTpm5#JDvK;P zXQ24v42;P*iB{#;=$=`An;K1vOS>ke55m?hRSYlKy336rhuM&731l2r>W8iZ7hf_D zJ)!KYc~a9!wVdO8CJY@N5I6V2isORjpU}I96Vn#<+8>)3-{Gh<1xB=NfuAqlYLW5Z zXK#gC9|a7lmrFI`hU}sSaRfatj>5e|E)`|}pxhmT;Wm$Inn3y4)&8Bi5z^+B{V8o_ zQ^HWBvOz2gpA87QF0%knT{vqn3uIc5B?B^BF3u+ET+R)1@3BxxyC03_t0s&CMfQ#O&{gm!9T;JiG&XC)h9@JaCeDo5PJPM| zs++QM{=A|);^4st-sM%-P`VU^w8~$D1LNFwX|<_Sir#&JX784GlQ7_>WgN;ud9{d> zXhvP&JT7k<;4INCHg0_XXwz_EB%JYs>A4-|8JhjHEfwoTfSg4)e|wl`U$bxbpv&2& zL?>^{RoeD~5QqUqFC70NgExN5jX2?`1c@XzN+w25!B%hvrXTO7rNSrodQvz-vrcnq zOb!?eMaIX=wZJ&)L9>L|tND6kN7yCH5$P_mQQ?C7vfTo*K4ie(SS0Y}i9NN)&R~|h z(-0wZ=uP@um=$`{K0P;NvC*1GIGPJ1=RiT!GiKxQOUEdpypGE8YGX%2svM_G1yL6K z#!rDb1pSMr3neHqGKh$F6qKkU2B_WCQ^%sLuV{)o{yGVJFFFt`+S^I61cUZC0}~DyjO@JDgw3sZjzczgcrn5c3m^e z-dMKo*RoVTFly1{>~ye326Zqw_7)*%;!w@{^JOc?E3VpOp_)o~u6ND^z_nixXMIQk zE&A2RH%$zOD5xEd#~3{2_K4 z*xc82Z&iO9q~De%#ni17Zir)sAW+?ABsEb)SMe~;M|qtn|8P(`13SaBugBj@H*LX4$Kdl1pe{MMrTFfqsk{(ZfD}OD)rNmpPW=C6j6@>n z@Bgda{Oae?HkgzPO&kjHw}{IkEZ=fKV4J|CF4QYw)&;kz5HeboEpMwm@pK9?@eHj5 zc>lW(jB0&gDMF%RY%?@RWntokEj9GLG&N4^k&m(=CMW6FNv9m8wfb!8%t+9_K*GzA zDk#=M3jW(y8BOa~H0)aNqmE^QPs77? zne!&MfpjJ*vP=5jy($Wr4vnIN%;_2G6j<|gf2IYkn!M1e z;fO-RZF^E*9Oi6Nmo#$X!`p9sxnd5D;6OLi()`jJhZRFN1qBHfn8!s0n`DtnG$ZQ1 zOjGcGdbHv!M1PGR6H&{8@H^eT(pcp390~D!uI!($5);ldB*eT>Bizy%IVSN+nGag{ zMJ(*}Z~kI&!exC}-`)-XgO^<_evFy*hReA27pLQ&k)~Idg5JhBbSK!T(>TgA|J#-gkzCy-;Nk6KEwNHBZ6MzD@p3zWvjOJt z867TpV@A2RU|E11uLx27z>=CU@?BVkFBt*)F4GZ-i6?hzOff(VXBmQgP$Uf`kT*f5Mo- ztPk$dSKyYj*}NWq*b~D3)Z_C>7+b>yuY_8Orw9ZHK+xXkwHcI;6 z(Obb>sMTk@bx^TAwzZw}z?i(H`S)o`lNI zZYH#w$x~BNi^n2wTMQ1aToE_UJz`GYjG~?Db+?ct!LIviLGo_~Oyv>pV>*(=5H%Ey zO#G1x*wqdvwrShcH~p=w5u!{O2_3pPLBRU*vo~rKm?p!EsP2UG0)gscC53taMe{T7TMa?ooyDm%6 zXx0U-p=H>~YR--fp|Wn%8z2;NYM1ISuw$_xD&?^rlQy{j93}Os*asT1;O*&?UtTAh z8rWvh>(=}|w6~vy9B+7P`ER%OD{7izd^t|TdL6;bz7_<#(B~9JD7f_@XqA+3b$^=h zr!a7@J_A&;hd9EQ_0z`&YrL5eT5!#Bb$R;drQJ^Yg3%+5rc`~*l3~8^^<68J&W#1Q z=7^OxV*NS932hCk>c|u%usL8BKg@plvfX_bO=cMv*p(FdJnSo|8o0wHR1^j*NeAPV zE$(?43Y>(5fFgIJRQ1tRv>WHc4Y&J-30S0_*HF$zf`sYm8~g8|Xr?ImK_JmyLa}%* zA0UTz^79X|_vNiHQ!pUU4CpzzD@R*qj{`;Nhecc^*Pte1{B!mw>piG~`S+v{{{d23 zc)tSJ#31JHttKk6iHBIT3ck~s@es}Jp4e@dUNq{3@&c_Go=%Jm_;Uu6tO5gbpeTQH*%L> z+)5rctbWqyI*|;wS0zi4O@{828H=ZH&(L&}e`;}TWtorTiy@2}b3RUkq;cg?w)Y~j zgaSrUqpoJ$3O;GjAfQpcmS_em`P3oXJR{3q>LlHWNWbV}QawN1y08y4)qY|GA1As^ z@bUQhWmmo&$1i)(NH$o0ZG$jqF};bJ84<_6dJhH&YSPS@O^t%k0p075uL7&T zs?EYVF};+?msS3vgbX2kA^?DT3}o4Gk=4K*#%A7mJvldgF%vMCU1lLS@$4aS!4LADh~ z$qAw3{O;UmwP>>PCs|mxO1;;@?+#pFWRhPtntg4VE**OsTahQDfD!Zr-DV z-&14CKMH@ql$FPJ*h!;RhRv$9i4%1?N}fajW~JD`n+%EjZU{;O7IQAQ;XYCbzke;D zGw=D#-6(*e#eZ7wy$9ROz^CVZgQ%Sr#Fh@te_YX<$L#D1s6^e9co8UR`^*8g`CRI^v4$aG*d z%|(y*yF(yL z2xc9kjPZ&v0x~PqQr1+oe;A}Q!ADT1epArL_gl}1u{`~*NS z$-sjc@$%FFLvSz~sUK9rp2_hFO8*b?@f|-4gWR{_B$jMprqDV!t)@n>-z8*q@{rjx z6E^0_v5+Uzv&nsDxKH;Iv+^9yj=EfyE>Xe& zbPJ?d*+URgeq0&pu!?0gfeB#{K?Wr#Hh6j2W`Ue%1{i+!-*psikzJNdOqw&FdcqGS z@Up*c=6oVx$v5l#zp~<(3vN{l^z(k>Z`j+LMcQAZa(8!BSP&3mQ?sO6a+-~rxSEp- zZ38g;Gu!?RpoSXm*hh&+I*mX3Uh=p>Pb6{GOF`qSM^iR$MtdNA5(C1_n6-f_5Jg`q zT9>VQO7E->YAC{Aqi?5$5gGx%Y{KYUFzcwAqDRFA;ki(+rMx6o&Jh#pKw~K>4g47O zL!iBwuTn`~M-przW4wLp9{1l=^{eq)ul^d%!hejVF3-V1`?vs8bH)z4r0|A>7yS)WG^{Ig0!|GU+s|Jw-|xJFD0G2KKG6#D4@^l>J3)^cQtZp! z0+5`hZ=Ajkl&chylIfA7d%+cAp*Uo=1iagsKSka0Bj13R*V^2n6Z~FdH@3b~uH|I(w*q1+x&cjoN3slVEtuYiI@YpEES4qPd|9$3KeXb*QznW)u*W z`Zg#gepFe~gy35Up!v zHp7GUO4{`kspJh=zG|i2$1eO=?<-;`7)Q>-?2y|1TK^!G?E@b#`uzz7+LPoJ=D6+Q zu@iL)VOre+3^6Ad3K0IjNf7YF56oTiAzV#_W4YZkqxefki4`-FCLANk*75qvlqJj7 z$0*X|wS(vc3|OC1RP%Ka#kVyx++LpYsgxTqVT15J?FMUlC1;TaEiQ%4e9&~_d+7z- z@R#{%ir+gSXSkw?Xkukehx+SXCN2i8+0b>^A33ymeiKYp3gyCh;=SjCsV=*`k*9-*|q z7kLk7!=#N0w-MfI$n&`4_em9r&zdrZ=v3BJGlsK{gFZGT|uM`$84qU(?(w;AmOv9O#_U1g@kJvxtO!IxbgY)~0a z_*(C!0G#Sh=VYVM)(jK)D-(wY*_*5k*t>BHj&iqV|DOz2!1qtVdRX1rm%os-gJtqr z$Psm2t>Vl?D&N3#tvt{-0K7Le!^Vt&zSDIXE4+v(fL)#LDX*#CDhrmIm}o3o?pUtmxs%Umb^~4kixi%GD~zn;z|}z_4XQ zRJfCKA#Up-7s%!#XKC|mzrclbBc3^dIl+SLgpS;l)!$z)Oo^8k1WD^z zOa{4G8ArE_w*PqYql`zFNr*yFgZ+>$fuM0@xU+tm=UanjaezMqVgw@lKaIREJ;5m;uA z9+E`@Eu;;cwq7|D=N zIvm(SqEAK`z6AXJGvQOybHZP3IemKYmVk`VDD)TYnNc zc68%j&hzqS&|239pmS+0tpwh}r@PW#z{MAngW zl(gn+0s3g;d3m6ulluo_Asaove^*jls3XOIe#h&Q{_f(D@}&>Y5@ z{@R3P+%g@b8ScxF=TfzMvr7_#lrAxjzdA>{dQ9+I|BdYZ)lTU(a%Xv&Y%ASHL({&A36Ww)hdVcZZcw3i~2P8MlY)oV|?h#XYksYgl=KCGMinvu$w ze-a}uGS?ds)DlzF`P283BGu7JW5&7~kpS)p{3rRnOVHKANFj)^0YVCv^mZ0&vVs7CPc!|=n;dFBgjUJ!EM9$H?e1-U zzSnu5ITZXfEF{$sPjWo>jFsL4X)ku*Jq~$@V6Eolv7#Xv}i z1))j3!@?K)0ZoH!N}z4zbWRGjiPiAR@&RN-9a^fweZXyPNTOIU5*d6mwq&7*tYxxx zHIn`>v3|?KUen#CqrvZrGXR{?_S&OwS-cdDU)z_sr!$S}!9Bdm*Ok&fJJpc&bQe{t z$l-un?~m5>YCU3tJSG2dGzG}@MAP|T2V^sYEFKw1hU@UgvN8HHX2)KdGEf2*)*r!d zPG4vB=O9yyTT{qOBO3cqH8{Nj3YL$3bYCsEVH~UsU8?yeg!ro8QWA=Jw{St=u&zI=XR=oMnGI}WOzML<@Xcvhf7MVM?tCH?k* z=B7k9o5=c8YapQ9IC=w^5l=JWeKEoVir`iz4?H~UAL7WA%5wvKMglKbwZjU^mDDiS z$rJ*$f~6&jEcs(-gnhw?M~g|jFH~CJo?b0CG@BDIlsUsS@~(g=E@-=EC0kh zfqX*;3s07){a%Of6Sba~HIyKzv7hPbC>SdGae1yVEQ&r_ zex`)fa!(`hQ`9&>&`Y|1mG$LA=a}`Mf)9dfZOfMBeKNj}5KJ~=eEF7C-|8wRzb~6M zr22E_u;fx=%(#PxK2yV;jVo?Qzk|o_0inQ{Z_11dKE=4Do{SNF=QG$awYi@ub0gJPwn_Oh-CYyo##d&j8t6>K-itO&0eudBA5s zL4)-PBFjMa_-*9Gw=FcjdCVrrpP35McfT$s7GO<_;QA9dl%c%@zvEw=Jbq_<`r=apcT57*>4q9iW1kGIGgN+2| z58<44mgIBlii7Qq)&cK-Wu#%RJGjZUyJNOzF}8~53S7>`1AD1<_SEN#Y}(EP-Q0WM zD^#mj80Wh(E)2QT_yvM`Gb()Q`s=x7&^<`tm9mOdJFC34v39H*z;;LK8xxXeS5wfk zV3FlNn?#1%p4y#zf@GLYNzKW;ePAhrq|QsdtKo@3;n(!3#(WuU@X~$e+019Qdqf?I ziWD{fXJMPn`N=Q5QCpd-aPEiAvs}xo<3p*}XOux^#QwL7xiDt}CUgPPD0XCW(X>*d44K=MSLT$3fa< zv&-O{VsS*Jf%$ce9=jFN9)v6zNb#bMb11(x@P(SyB)D97~Mc$U)s383;`OUjS z6~Gc z&a2U$?6hZOYtX%*h->1Au&lg}H+(4Zv-c|)ghp|+>Uw9!MYy7E&h{4=!y;4CU$^fz zV%aZJg5c}*h)&-mUI?`g0~|v53Qxd6WDhQpq*=JlS2|JCeX5TUA5^2B$fon zf!}ME%+ILV!+RY*HSnY%xNa48uV%;xD!2c&RwYipZ(ZK13Br;-mmHa^vvfUd5Xeln zfsGd$7GrA_&+>wvEnd#p5&0MO+t%Ka%^g&>d6T4#*q1!wMZ}b)SAA}!n)-cIre#_d zuvXivuZ2wc54D^;PW8o=A?%Y)WpbTfXF2%OpZ^aOIF8+cypb}$cbb->;4)UFH0U$B z(haaaxP3!)jd@uQrHBK+7em>Qxzp9l-1~hr;!~ct%v3@v-Ji)nO}sMxK!*HSE|-n* z4x<77IOSoz{|=Km+oD5IM%dzbdYVG~pw^03{v-uU#jU6*Q$zCiJqtfSDfl9uL#z^3 zv=nx9U&U9+lJ0OI&p=%8!ePN?5u*Q+phxCyFk3ii|NHH4At12cj@JTxrNc zE%F)4wDYxr=C!adKU#h}at5cMklydDf1%smg3?;*v)Okvabv@l*RME#TS&94BlN~r z!6>m#18hnX7ufz3TE={4;!4U6LJ`T}rf*ROC@C=o8L@xIW#3~{@OUg%fu8CHjh5d)8_xIJ z4-mm+!h;@^p(RLOr-Xw)4YXz@#OFAA+V2DV>hETJ#E+0w9{rJQS(mk2Rf}E;6QD9? zXwnxb*hY%deN@kSNp zfFeR}2I}_vB0E8aJ7Dy_x2T_su}Pd|43$1EKbl=iy`}4paDlZqd}sTO4&Kp7 zrinB+h23FrtH`^D-uIw5ms$Z5y%8EOlV#|onz0~Z$Q7en&XN`1Lwn+=#VuKd$HMls z+`o#IZv>+t^}N;js9CGMM zQtl_V+#iCes~?M;MVDfNeRMwwo%@?>a$|1bpY@xpW<4oEHOlSWNN$#hVv%e~^(HDc zgpC_lQoKWXv7!pLgx@v@ZXivvhho7lWD*GA_9W_g%VsCpA<^!5hIA{XWZ(pYTV&eR z*>|oY%$_Y@t^I}_2$d| zeLbTDN}KDjB5pD*-tR)A@Rw$>PqDp%4hbQbF87ZoxrP-BahnJn8e9@_e3(T6TuS0y zfEljLfFbrlc~gQb-U@SA+BPy(j!KvEo=gRH#2-836fvit*GP*`q{gDg+UJsTB&+0g1ap<8-RUU7@iiaW7L6>s^YQ8r_dO;x|34Z_!Y~CV{siD8(1_XNk^_c(B%(M zYn6L8+y;)Zqjh+PEv8}v(vL~te<*R8y zn;e-1ASsAXhc1uEj-vxBIsj=sXw=`GEbCfhhNQzn-PVu>hp(y{uST52H z2a_jC{neJS-`JRfFYh=X_SvOa*@Za7> zj`Pf33|kCS?x$2V@ed#5D_FAn$}H=cZV++?Q8WL2%%J*jePq)S-SmV^R0f4f7jlP} zEaZG)2Q={e=mi10r`Bgs6tHKmNG;Qv_aXlcUFxn=kt z2)P~YYh@FI!r5*rz!=MoP9y2RU#^=<#cD$jffOc`&8bg1YvzIQvz@yJkS!@toi%`# zdpW>a9n&WPAp-%evf5o2$dMvvq4*5J^OU)lLGp1-l#m#c==B_zzkkoKn+;U8&=H3W z6BzV@E&gVR8IZs(lC>Ek!pSbvbc(1VDP1MyKm^AxYh^floRwzkkCc5czn~oE^fB`W z;qD$dJP=e4T|XVnjpW40&RSKtAve5x`Bh%&-f}JeCm-OK4^<-0x~u ztjD9BakzW-Z_kgKkU0YfLJeTdlDhBAGc=T>6Emz()S6;S9k4_cG!$EG26lcDWM1sg zkDbKv*Vk!1B-i6RB0t6T*177E$V-@gUxUTGwShmhN8Q0T)^Kx1MhEa*F(69Ev4}jX zbXo>O1xAuo5v+%zm@f20evThyMx;x?TRTfWIQcT10eV%@4yx% zm`KFfPds2-1lUrtLFZn#m+sSiDBEiG`}`wMx*Hz0G3}ddBfNjUCCD@7|7A4$ayC;iCh1VD!(= zt>H2aEm&?b>%yK9f+$;@(&=PvWILV#1#82qscE^sH@?iP>%3~0Z(VCkF35ejM`opW zkYZ|aLof%yl*EUD3E^1dNA+UX$x<-p5QI)OuF8xxNo#TMxOiRa)OtvfEP9>l zNm@|AEFt<4GrHf{e4g5uw}*a4l7L9hn0NtB3ZJ@`=VR73?oAMqg4mRSt$KalU0Nn zlkF#@f+YmTN)@S9{jSdItzZm}L?cNkP!HIfP3C{KMwG?Z?e`An9(%8itFg$dSrMW$ zNcPolr2bQKOlf&zmGlm1==S}#Q6zB5bh~_>M=e1+L`-Z?zEv~sqnw+x0ZjO}qF=&d z6zt(6h79}Zg0(9iCp{iWhrg{ym zI=US)VE8gb(Q(4hMzZTf(}8%KGx0QiNN$r}14e0I{PwS!b`C`_LIFoj2#4FRKx#V% z8Gjo|hM?*3WYj>42xHSw15nMxJV;qGs^)dTlH?wl3t~om+GZ5iB~#39z`RAIw2G_m zop#F6Ch4D_Dkxrmwx@rl9anapJNBRHd+TxSq0tNoIWGK^m~>U~#&xXx^h~!IJ)ehN z6b`ed*Wgtm~;t0{RbZVjB!qE>?)=3&BnI!B!~)>RoYbQ)Tmw`iIN;6Iv2|Lv%ap zBN-z)R&;}PpJQ{C`~x(kh@=_(D1Ta~ijUKpni!vr*SzxhDm@q~^H|{_8+}6{WLotUUW+;m(!QwMuM;xsw zB83=NrRjqGD(A5b8r>#~?M`s1?mbJ{*_}Bc=Pb%C&SFg`9 z?O4repg&Hu-hXwlf6>9>;EbDCVC_xou;K2d`NypH-6LM^nVU^lXBhZ-VM|heX!>Bv z6n4@83*orFP^h{V(tXogbJIvfKSc3J^wf!Ej&MEyXX4{#QC;J=1W|w=zY-E{*i$laiFw#;O-$&qeTH>p30xXeyfsZCwqOB>$s*g6! z9>spCFkMFqdk}}K{`r2p)T#`?LKu;K{UQF0p!W>!I*x+{m~^xd=Nc1;jfJ_tiD-R zPU)scd4O%c6R5(!bEs{oyof*02&QY?06lM!r;UR`i*usb@i$0(V`nqdO@72VLgaKL zeuzz{e#%BW5!)7qC4e*l29>sjKf`RHbC-^Sc{WeN7+%BzCi+K9IG_uJH*1;j8tvLyE54eJCBqySmaU0>A*O(1EZ8Ec?jit#&hc|N|5{fKj;w{ z=_hfYTe4y}T5bJ%j11T4z(q{6OkYLFqiE>J=v9XRt{=?XGW5;IrK})Wb#S0PxDz}1 zn_kO`;;8%c$(m3E=KRcTK>W7Lc!Uj~KI~F6$Epw28I5F6R6+TP`P%*qTpV(Eqcr`z zTyTLGP%BIIR>&(KihJrg#tq&dfAVYtW$WX3{C$i^#e07uHS&*F&W_~ z9R9g3p5u{D=qY=AIGA!a0zynmeC`SV`l0WpfnxDt_-A&mnn?;MyRbmah_?6^ZAsql z%$BPy^}00t^XtroHyVS{A;X-wHzbeCt~_@`so6O=gaVXeyGgj;6%91d)=#_y$b`bzdK()Wclkx--Ec-<)%e2y;>xdRh*WdYb+XQ;ob<>0D9~OD%yc7;x zWo^PJ1T?w9`8#F9(n+ace4Xl!cr2V*cUuj$JL==tdAa3j9no&4O1RxRZNL}RY#3`E zDS+fT(P*%#Cj^EMSB^;-WI#fzFGEDYPGak}LpMkwf5ff%)vTDbAjN-0G0x)Ocm8oJ z*1zxNJ0t6EefeeZyJL`(;ZH-isE&$h#Qu(efDAuG#T-n()yKZFz$rH;V2_H;Jx~58 zySal%HH9HPlxyyAxgaW@7ty^Lk&*X)R$~b?bihI}4j73GnFNVYrGOn(JI8H5Wb3wT zk>r*t!UXZsd+f;5D7|5!mmdWaNNjTubD=|z^~%tQ3f7+~fnP|J61UX3xJfL2Wy`2V z<)R}baE7)a!C~C796!iX9XB61)^Gukn7UXhbj2zvA&R1BvU<7us3+5~+9RBJfZAX! zt8j01p_7cZEsU)DH?ohBoM4}$ej|^roKUrN9p9;$W&M7!8!g5k?E=jldf0g=lM34( zJH4)l;lS8Y-58lp;}VuRGv0P@nt!!?RdO@L*Nx#41c_d#PxjDSRdqtuX@5CrLO=Di zlx&5&2Qe~kj1zfJHik)AwC?IX4jURJJl_-6ILm^Um@Iyn1EQcE#9_`s`|_URHs7>U zRJW#hQeb~49$PiQk>a-s2(eWWP0e_$P1z|*Krx?L7cN8Kg$6~VBY6L4EDf(2I+WRj zilPRGKKd$nHZn=H}llqvNg5uoRJ+tdvTmWPQdwF6wskK~AvP z6}s+l&^d6DZ)z=TXIGR#1};PKOvgpR9*(~JaO>(-GSs>CB_PRjBe*sCPg~U1RyFMt z3crj_bS+XsdUPU$fY>;bo}L@#WzWr&l1p*Jx#y)i{?|%#NNa)k&4&1|mauwvB0)H~ z8V0R9@%XzbyN`*J&gX@wjm1kc93mLr)*k-h(fXixuk9fBDG!2Buvk4l{bk{`q@{&?-1nS#*Th|Li>Wm<=NnHJx{j(5krEhQ7H&ns;^eo0DMpEf&b^6 zQfnIegVqrh?(~(aBUrDijXRH>g(cIIrL4qhiu3P1XBw@gD2OF{7xQbj8LP9W&7)sN za7h;}mxoo?5vPj#1Xx1|stJPy6%>V$xgC1>_#B+(l&H)d1Ah*oGEQl5Xm7E=XsZ0f zA6YP5#D#Nu4O`yz3#4XTizNjs`r^{Zsud|pYq^n5AK1n^cyL*L43w1CZ**kD2vs#h zq|wMdhH0^`TqzDy%8%7E-7Mo9%ZMLlP5f6jv1CCOgHEbW4~eu6zpEkQg+L78pE2j$ zaT<3VvSIIv5_vg+M*cWj;K~$Y!e1CE3{|Kvp*Cj2sk6~dkuXi~K z>Z2`~*A~O3<$5(NML8a13y*_Kx+Q?dNCduiwm^)@8k2U}QP~seLe3}Neiz3!Xl4vuz8f?}qM6$N)i9nF|74&a}@4u7% z>hq&m=b1PFkO)8T7&eN5l;mv?k0+=*2@ar^vQbyl)Hcs1$bCesdA3@B0HNOBYVPj} zQi5&6OK48m_@PZcv1}URW@Nb*pu(5g%qKf&FTpItVoZQi$PC(GFKe>bXdBOyhWvO1 zHQ8#9>Xh~*Eaj*-bcx(#?8q5fiCO0~55I7*SEu4LNhgnp;qYfO6}B@;!+T=iKWeAG z;~jk5@iu=eBu31I19k09`BQ`YVC6;9C7yAVT#<3?03=W=sa6Avs>aImijTPnV z=d4}**r~Qt#p}+J)Zc{PCBmadyx58))ph81SR)5VtAzzr$5K-P{6po1ZG8F)DP}0k za-J^^Avh;uz&`t;nB3TFNeoAx!zwi`kng|?VT;KNrmGBsGTUb%?Dl*!d zF^QO2Mh`_}X>JIYRZtiL`_w*#3dR>PK$VUq`GeMs6?!-kZ^9W&nv<9&=YbpwKu+~m z^i+^)ac3E^fgft+mwbWR7zdI$y7B9FF7Qdec!TAtjewZJ>dl`Bsc6f3DYA}h|3UIO zyS$tNXc;_#9a%X2I=@&K%fTN%nE`B$&xbQkXCJvqpg$s6GUFiXSAgZ{+PNlm66sI&T0Ew*ttoEN_0ne`WvQ9Z3#f7?0u zEUHzUq{D(oXdX(qXn9m|zuxwePnyn@Nm1MCuMm6QNjbhX#ONEDdNOT$s$MDA=sTI9 z<))~S+`&>(=a-mQqHAw1yB@99!d)00EK=#}CF!Ej*Df9*Jmo?l_lWEam}8N#i>B~tTu!ec&hs@={V%G{Bjfp$IvS+p3Y5lH4@Ar zT#N+eg5}y+2Tl+yZ2b9LK`qMvxR}TFeiB;V=n&y%-P+MyGygFcm%ILWzy86Z)bKI~ zNX=-&#Sdn0iu`Z)ZK_X_DxE1o{IX~Knoc!;P`3&A-Ps)waSoOwn8A#iOa}`UuP=Bu z$MFl5Wos#R+2$V8^LH7Pj6Wb72L!1)Cd{ytLm<#0KGHGl&4{W6GWyOuJ!7X6eUMJ>hylp8kZy7!oDCEKOVmtYd%qi&@A84*fO?2((LB!`c{ zu!P(5CPxTRI0AFv@MR&j>$7~dr$o@rf%Z{@jh z3b;1rnph$|P2coIy492mNe(g0_aJaJ1k>baigUSjIo6Yst~Pip{Cy6GUw>T4)lxlR zwC#H7NCDx-bi1?JC4LL!LIGS7mIcsx5$V;1b{mp6Hdw(+Td7K{RI;~0$~9r&b9jc+ z^()G+9RS!~D_l=lDSM;My}*c=x3InBO^>hKi1iWvl)gd=juI|g(d>ZuVqOoOf0QQ@dyf&Vc?}qSoYcQQ+MQs!X%AXHFnIb;9x7 z3)kUof%$5|$e3juGBi;~Xfh9CET>FQ!=j!9nk$kEk0MUX^mp)Bb{j$w=9hsp{n^w) zvW~E1ijt|{TV*Dk23m~}<%U!6LbvP?WIAXpA!`T0S5R31aq2)}%@O?&^`;Aq3UEz# zh2#JFqM-jzDSf@~U1UVH0gRveMe!psy4xGRFWe%q9gsV8QC?lq>+v{GQw_3^VpfZN z(NZy#z|B9$g+L%oTMW|8DbQr1FU}8BxzpaQD2iO!QKa9>UZ=E!9+ph1(d+a^JLPAQ z@#szhL*3i#?~!ka zcI9;tWVL|bTMZ3YAY0cOUz1=^fbF(aP57ZOpZMOPem=}SRMrHcTc;(xxjSo|d8#U0 z+{v?vFCoKh6Hq{6FKgN2j+w@Zj=%KS$M~8Nj0Lvz&4IocbP}WdctttiRp>;7d@kX}+|JO4 zZDL%=W~V6l`14~m@UWrMpO_fMsA4F8*uB*>AmW(ilE7FJwcC}@9YE({gbtI;jSnY8 zwF=)0I^6K7((~9ATYaN2SC1UT0!XK4ddB+7DF~(zyHqfU;7!-r>+Jq1Xwe1-YGZZ% z^I}46Vt9~q;wFE|OvqUVO&d=Mw?o+$N`3#R!1G_mlc^n962m2Eykz&qY&6`Tc>lRi z6l+GXi7O)HdFQk4#<@15s{r(g_2x`dOcbdWpx`{Nhxl$bGgC~O<^GC`q!W$CLn^T zL@MYhe{WIp@D)sKGLS>cZ2F3V=os~^+b6wlYTv28WRib1|*3m#fKRe()*A5nQ=ef-Oi(6gf}Vt%mZ-glENMVr^sV( zOpay_=Ba@BwSAW%K3{-KZ>wm}`;A(`7BGYl-HlVx4$zTKXOwx`wl79bY)-2)Ler7_ zUzBveG^F>oC2<@lDa8|R%`xNJGL9I_MhsQRgiy>1`&V&!T+$lCoWiw56CWg|J3M9( z@3zU&e5iRRPcJgmq!KI9RGJd|7a%=w*weTt=dO%L>bx37j-UOrSn_jikl>&J^HNcv zDr@=Ad0F_V?@~aZqU!4C_vS;ionE3Pt_P7ZQ z51a$b$kQRHc>s1d9;-vP&1)sB8V(Lfnw)dm208(Gt|x`R=ux)sDI>h4-Hhi3juY66 zlU?xvj=C``U`L`q|NM#fCWFp)-J6*$78QlVTar=WJ!!HEmTi$JXepNV!l)C%4F6`JPJpB&4{ zEIzoS0Ian-gxX}r7p`c!0om${!iUaC#9tH)xui9vy+WByhzbMp(>u2Lr5EL9F)Yx= zGC&5UaB$wCn5co^rN0Q&6&o@3)%UlD(bS@>dac`x)NgWqJM`QbD`HwJy4F6un)(VxCOBk%;8kR} zXM^yYFWC`2MsLiksJZTZ%3>L?CAE8+(Yn``Y`9I?qImhBC|CRypsu+0eaDnBJv0@zfMzgW&;p+`%r9h8){601kRoBS8lEQ zl|dLdqZ+oi=~h{nE>4P5RT)ZmwSeW{@9LY+M=%?$yio4Lwq#4ok>Kh!Eq(Wnp|s{b zBQ`}8TR2I%f97wvJFD8=V8@*B&VT0AmaegK^9?SOO5KWv3fX6V8<9(@aWVUXq8csP z7xizXPjG&%hStV$xZG`BAyd+aU>?;xGdyEtvoFus-&A{5Zsj+bTDkW`%h~dj#GPPI zNTzxCA2V-7qx-y z1ysoRUpi!s>`6H<1rDQxeBV3Un}IGnJZPU?Ck$|&5GA!nEpJH?95EVaZb(>4sz6;h zdZkf>C5*fFn+9g!*_t2VSzK2Ql_|e!XUEL+5LC0~t~VoV(=j2Svr=^x%Q=nIDR}Ws z=uo0j<>wq;z{loCQhzd`cC-vAJGWUvw5pyhIrqz#V`lwbmC}Xm6vE)_Y%aq~2kc&S zYse8X_h6hmNbV)b{7&TSg47J7FCPHHc6xENZGsF9`vZEd>~QMNBsN?_d&G@ zW7;C|0v#DaETzR$2s%q7*F`Q6$AUO;WLm8qjDC_<@B3(Ow#nH5+Gl#!Tm8umvDh=Q z6Qx4op|XvraQw8c6K`Y;(cNthY%Mik2VoUvke_U{O5sCExQEOHJTZ{ovdqKlnY}#J zY9&PFZ{pcNoF*60kN{&cEE-cu5NXmoomQ+mohi-JVwXuJoL$-0u8U_C8NW`kX^_8v zY4XDWQV2*HHJ+n#SwwC4w>_24Kz*Bp0lXsJ2>a0tYEEi!(@4^k6~Lem==HnQQdRma z`|&>OS3dui=4%i-&6WrY+WjPMY5}=_-He(I9nX z)L#w>v)BTc3}@nRM^D!gsE*_Nx%B!qNDayYO(kn33F%!RvyuVr-gH9>Hu9By#BaDm zJb9|$WZA){4ggB#`@Lpf@wZ+UjP)EuvG4v>cuYOrY$)iDK7G5{(Oci%DqIeDj5mu7 z5u&$7U1lzH+G{Pr!fble)Ycz#`-Ma>7CstO;D*UTz%b0oaiTKVPe%S_E*=}b;ya_Y z7Z(wXUWPCpOWKrFqU!*Wps->xqempPhTa1AB3yB$;}a^W9zSd9aTJVovZ}*E9f=$= z*l-cg@?8agggE@~Xzp>2!B=x(M0dfDyRQP=8o%0@Kt9ZJ0b91gIqPOQ7n}^kIW&RB z7Q60YuYnUoS}A12USqE*A^wbJp|K}e5QlgkqCTVMJ`{n3P;0_~M{;HmhN9ZD?c$<3 zryC??l$B5?3G|^HhJJ-g;wss;I}9E!N*2;?t;8|EoM=8H?;P*C8=H1g(MgI39FWn% z3G4ow{Pr4s!{T~H!{LA(Kw?|PIv$#!mdsrMjLK0Q@@}@UYNIFk0de;^ATW;b$!y2E zGz^F8QC$QtnHllJ7UAgCSibOJ#0500+;CK=F49Up7&qqfc~h%-8sbg zxhsFtNWR1G-JkL-;nMf{b!~26ai#u41KUU_xV(o1s6J)a7Zsf+2X8O`1u_P%2d_o87bhWdSUNj_cD6M!w0qx3n$`0PUFKqK zcP4Pi)B@}xyfqfp-6VrQ@-U133%Y0eCeZ}I_r3}-#kBh}$#Sf7$r5#bWeI9^QnMH> zT4Z6ozjmSn{wpf|D&!C-DNuEQgBLXTMd+0rW{++|rrp+IQa*E2&!}{r=yy|q_(Dqo zK9{LOFKBnx;D()Vz2;X<4X@Vf-6FBtqLvWOloTT)Lu~~PG?Ovu$DDFW3kBc-9|m$s zYhffC=FXs)Sg1kFVtFi2;Yb*Iee5GZuxQVnwhyk2q1w3ON$MzN@|ckxTBhca*^nUD z?)b}pKI3bB{sZDk`m`#C!Ab`9AF_w#S_I_^fj+|}?$@8G4>+V{f2wEiwMZnUdtcCx ziYd1{apfdA{b2mSKfPKa6o7b$rl5CqOP4Pa-#?BaZZw+2A7nTw6l!Q5uBNc~?g5he z=11x2u|n_Jwh~&t(%MoAB9#NLNKbS;I9bM|=QkwXtdlw>OR*B%bY5}-52;JjRHfBe z{^GKi)2*>sdrI+s?R^nGpDVv{f+joZ+;J?94f9Y!Vawjh;iFTY|GHr*ZSxe8mkl#1 z0Pe)|d+C^Nh_E>^QF_5n5}u z#ak+CB5A=~wI{QT!J0mX`ab=+6-1<%cmMjz<}=!dQ(L4=d*o(p)~zzk*+5fL`6w#V zU9zEpwvqHAZuWM9Oe;BM_3{c9*OB6964<{stEkbv2nQ;lxg(Y8??nCJH?M!K2JT!3 z9u)h_&pB@1(!F=aCAoP;0GO=OTp%WslL9beSP?J1OUa(}3|`THw6_>B%s0AzHae_s zL5i#X?Poa52@Hn7j=6eV8ihEa$1EMB2ld8VNU-&~27K5uDCG2xu2< zO54V*DwkNSy<#{-K?Atm)+?#~U6anp;*$0Sp2u;#L>T>4@29t7c%8{@G*bNz>zOjcz!5PZUwd{8Db?$L53jJZ4jBa!$jCTd@-pOD zyNRS30+VxATh<+(xB55Zo=dSe(_j?Rlpp%{*wig8mK{)^Fw%T`J6Tsa`(|6tkyXE@ zfvtsFb<&06nS6eSsPG4wsh6_M&o=vc2;9~!#4BZj6^E?pm-Z@NO_}1q+t7~-U&1B% zqw>~EvI|j|`&S-q7W;A_LmmOMZBu?VQ#+kFOv}y!25jFVv0*lgq>*Fwdv&ScU2n%g zz;cXGbD;NkZW&B3i5zCC98hoK=XDO-?}@tJlD*p91C2qH-;b^kYQG(eMKfz&4*tUp z|K(UpW3XWASA&+k9$e6q`OV!OH1{ztF2pNQ+igtN2XBKs zG&XCYZ7}lR8l=mKNKm$MkPgP=Hw&T2DQ}O5BOZ0Dv*j!BJ^(0M*=oXe zm~Xm|YokTrrAW=4J7V``zgiCY1Q2&45S6!}xlHlRNGdP6wEr7_*^~xq7ml^u3B#Lz zTgh^W_m_Xw?4>7IR{&HIGCi=rK8_!62@MN^;M$k=v2t;LxTe7!fJT!vZLJa)Iisn| z65Bw{sRDlLjQ#Wy4KSzWj@4L;dy_x+`|AN*`t(RVFw5iBF{9PBDA$mJQGg~r1}%tW zOC3*tn@G{U03z_0wYT9yP0_UL|D$810=%Vq#l%(3Yvb_}dt^E2(ipS`=J?H;pX2W} zd7WgZx~qNPqF4Al%p)w(#?Hp+m&+OF?p?hJ?+e=D)F4yNeo9!-=UPR2A9y*#%>2~R z90r}OgnhVO?Q`#!d>3b|&t6h|b@l>0hMvcEB>Ft^Hwnbef~H9Uk^ACvYp(tX5RCM0 zs#jYk1YsxUwL&@bHvM06Qkg0#6j17V6pqo?puc682F^&|uAa#ueq&2c!S6h6Pm8s8 zV#-=sZQv4ikWWSS+@bAgw1333&@eQe%h}3GOvJbMA7@j8l*=9#wK60n@n8 z#sQ~UAgNT*DC{FF#xU*fY_j#n4EHE~czcqsDVQUEs<}=GN8208nW3ehFJx%#XoRy* z-txoG*ecsdME43NS^(>9aLU?#X6B^Fwp3+y!lRQQ(=yVhzEH~0-l(nrX$>i{cS%loO%%{|cYJIHg^wDzpB0R2tH~k5W*{h8D*cpbVORx&ebTnl5-l zcI8O1mLZ8%$d61(7nPaZ^W~Z8LWdnJ3f}H1Fw5R0p^itY3ZF7%jdD*Azfor;Xs1B5 zm5%`K#~Y@%GGHbBu_ma9PVUlEvol=KI1OyMcgewsiYbs0j#%RE`Fb)fO;TT&sNUCk z)$IXcGcdNgukAl+o}6-rmX`YU4C7ij0WN~&tD6Cnx13btq)z=epwR}_(fzd$+rbK7&DyZg8)Gk&Qa8s6 zc$!*AjEPJ{%PC{q$~Fq#@Me1_%Kw_(?1accA8F-j3j1C*qi%yybjFmVcGLysSWu+v zyP!aL0DW2r!3!i|MEcRr*VH?K<0`TPYxQRre&XoQ#rdvjyXH0}6MB$op)z2512zqW z6e0(a?K}+&7DI~OB2}D9OlK4Eofd6qnMh$cl`A*eVau@UlltmKHCZ zBOK=bwV(K<+SPU{FhUBwn)+BT>ysIC2Az230TrTtyLTkGeZ1{bB-f+^wB(GWc(}9Z^8u3@#& zNz#wF^qimVwbdG1(kx*tIJ5DHoHiOa+4tcPL;|c9JLoQucL&BQ^~Y51yU;)fe4~TH z^#{2UxL4o(yN{`|DFYzt{n52B?8cMT;VXLxx7m}Rf9WAJE*sY$C&_PS9-_p!uvH1G z?NZe)bL26N+t)?m`v@~Y;q1KP`IZPGS1h+aX2Vxv(CD%(6Z61I>PkaUgpO+!Z-myv zm?p7TNwe2RE%GNAf8r1lRR%2XEOPj-Q@EbU!J*IL?3Br=He2Fg8+jb}BP-FNeOSO~ zSm&1}E&>FR6b^)oB1J3f8E4%2B+JsXRRD}A=KM-&;Ae>i{iAv`S5Qn0^pMov|K^Am zd&WbI730t@b=;~K#2c%-yC&{-?5CpBf5yMWE3PO2bTjQ4BmM6g(oDX4r8G#`bLHq@k_=_f5qedgX0i%tk)2^sXW@Sya?O83Y6k+-W; zfn=j2ZHwytLc!5KE$};JqCkfeoaN1x;NDg8UoyGII*XAetK{^3FZXdT&$XCZyJA>S zFPOKSmkmOXHTazm2MPqDUlh2#OA6zqbDhO2Mf*1W*=d)e)b)>^p=3ulSz#x|vG)3> zshb2Pv=J!!M;A&u!Jsvsb79VEg&G^@!mLXYbpU!#0FP5XWO44o%#>D&8Vz;0D~VB0 zCy*hJe;s27#TA4{occO4P4_wha|@mD!;s9hHRZNtAt4(I&05D)Gk(n4x9 zPEgw%^DxWqQLfK2FPNBd)5^q&IP~(24&~sqqO0QdD6*YiPYZlK_|WuV7s`luc6?0; zec}6BT~7@SP!>z2%-RZk`Z^-VPKS|a0Frb>U1vTeUfFj?wn6~^{%-8yg-(+_zzcml zT7|jwPn25A+sLoz*d<5uV#3C^IwHy6o3P@t{x^hD=>p5yAc6NH=rklXz5@tf2BF&h zMaBU)yV+Wwv8c_XL|y;ZT5i$d%cRZ){*)n?r-Q3pT!suI0h1j@CRlJlXMpfv1TqDf z&-pT;_xm{~xWo4@{BTCTQQOtVP>Q;8Gmf5pqrj7NeG-jhQ)?^*S@w=&VL?OpIWi9@ zT|P)eKEQ2gUA%+$FM_e^;^S)OuFq^*{8kb2N~S|`uxb}S$w(xV{lll7a+WjRkXaN~ z$0>S^(e6m-u6iD}coIg%YjGf%zSrpMD9`6*A~Ry~lI^L7@QH2P06gS^rSK(SPi}G^ zIC_}yK#pYs2I9th7%U`-%Su9gJ8 zV@Vez(0_wv3UuCj!btE)_KK;k*Z1-aTgitRE2rCK$*rtJZT4TPLeMlvjh!>eWe05| z?N{b&R}0Ap_-Q&~n0c`joU7XaD*kE*!>fM1mcFzjI=6Uo#zF02ELgJHmeQ^h>mMw+ zENuDj?BA%Xj0|x(v=%4f0Y6P!zIoHqOObK#{cF*7k^A<|zKMnBm_pSc=XfN4s>~9s z(b4;}nI@xKZnv%>I{K)&AX*}Lx@N+Bq_gYNZ1F}gSS6&t|3WmEd=?x zv4JWc2V#CMmLC^A&hF`Z_zL12I5?`;FMd?=anc3-XB$rv%V)OGx`FBxHy%bz5%eA3 z?t89y)>7|@4UD|R85-8Ip4YFCR1smePL-e$I0xW;wIvViZPq(^cr3#%#U}b3NRzHQ zM2w5v8TTqdvfWVyt3M9&3+hG4oHiIJPBQT*nuExsS*AHCnljRLtop{9+r5K@7Y7ku zP3D~qbYhO@1p%fm6|AcT2Ig7V7tb4_aDGjYuWS*Ma%xv|2rRTWr0a01RF{P=5`BL$ ztvN=0%KM!575z7v@DjYLRXF|HF(ET7UvW;@_#S>S?cnA(B-yD9kG&zMv(~yn+S=IK zfTaJ71{xk%t;7BIe+H}&EQE!8j_^x!0E~Evw+#LsMWe7t;94>YyHH=tz$z!LDybD# zR9s7tS&NH#p_HMa#ow{pRb(&HQG38Xk$KTaU7;gkbG$)Wts=qV^~H(icnT(t}-o^aaVrs#V9a=Fwe$0c99`~b0MI+D{D#9)W%`s@4p%)24}~V`F|sc`BG}&9 zPWzG6;W}{lUM^L-($lyvdG+!+SBJ8fHUldHFl-oI`v1f% z<)nefN&21M1R_>or7;rd&rQd)2ADIb1#iX*?ipBACy^?@T=YSuc&O zI0~)DQE%*dl+*Gn(6uK>^A3NLEm@c5PLCmulV@QThQgx`#nU>HQ|^oD3D?=$wd8o-L6D4 z!zb^m5Q9!;GmX!+=u&I90Raa|_t%`QO-`bgQ%dx$jIt2ZPMN*3k}>+4z^q4i*+zNX zrusbVib7q%;J6d%nIR}PLYHW0LsYiL-2v>O2P?e{&=>}hbtFg&Jp9ty9uNVagb*}A z-4K{jM0O5|RlB*h`UV(*8kWEvPFyTUg4?Ie08JeC_IoeDfVEBk7QeQDLLA^1_pTfU?*H{5%Nx%kE63QLF9SqX;a}w0VJkRhaQm66w+6Nf_z=Y z7L1_qYCGu7Dkr?rG6$%_2{fk)#{O$5RVj=Yhd8~&hqTk&vhmx-JJ}^)VZWRVDV2Io z)KmT#A$OZWuhB;GY4=KfG|UeeS>ev@*$c;(jDPL&y>IU`w!6DJR+<-TlQQ*bbRXDV zg<I2p7kB5Y)oH%4Axm6<#52r;u!1di<`Xe5H#~<>!Vz{rh2E zS6)o8o;D~Ux0nD?;zx*5=h%M>X(Ih=NpaZX-5aSI{KOSVM7+I3(~R!pr8#b%Uo%%F zdsXx`Vc=#hT}DHj?&ehN3Kp%SE|*ERjZTXBnK<8&0>U?dR~dQ-JnSSW5UUXXma2@;6K$n**mvKmNlN18QYZa0@)=CtECU#2w2r6F%=@*qDj~g$i z;X#?)QO3+(^o~?&r2fAp33sc?OqwHY5<`Z2e&IDw=IdFMu{bhcsp|-O)%0NNSZ^2@ zaAjIY9W2QWKvCZlSOkQ*r7CpLMN1%Hk*Sv*6fn+~einbRh;`&ABk-((ldYXsVO zJIJ)3n4d_4EiP(py*Te1pxz~1hy=OVNH8Wn-_LT_K@e-xSqeV+ilT?i>Y=-*bo(le z-hS^l4%}C0cnYIlypt=e%pAg&pDfYleeCU~bbrX?rRDg;AvDB%v(|ur|0obwbr0HD zIg{ax@38dwLch7!n`eucS4HE=t5N1u+?Fw9q;RSc_eK{Nd!md2;fFmxZ#ldT zgF^uGupseH}=0X3;w(eUF)4b$OP#v?!xN z3(bE}QYnuh^0aiM@t^)-TMPBvwmaaraJL=?Z=TRJ@~@k)p7$U;LK`y38})0|do^CE zGRKO~?8o1m3yH8)$sYm2^gGuOGt}4Kqmn!B+J9r7=$soYTe`8Ln+ZM zar?b`EbhkCD`il^{dE+*K7}1PitI~ghv6|RG{80?cV5XwX@~zd1@5Y{G;X`}$tIk= z3z(vWuU7I)y_%qEE-p)-tj<%nhUYSzL6|ZaCOl#dBzYFzyF`@}l(cwjPC$eOBO6Eh z)8#fKWd=eQBiST?IFd%R7;WQdIX@%PZpTgm6D;)#^=CRhLh*RD0e$egkN7jY`p@mP zINrgN!eq9dw?PE<yLNY7& zH?d~O*{0gj!|umXIN9|i+CC+L){0MQ*YiwPABhuRqY(Lht{tD)?9APBT5Q1d<6_>*^o6&3-8?}oDj5cQ7A6|^^%T*Q#(&n)iQSQv^xl#WQ zu3I?*G)N_;oSPL~1<#Z=;r$JL4e;OAt@(8m9rc@n%H+l4X*Q3sfl`tO)t;|;Rk32;Z)vl$?FDD!Yg>G z$X-t7hDTtdaQltkSOL2tQ%OGpNXE$-eT?yP(V&)rdk?>ik6FT*r^J@##sC+{Fy#Ju z>vmxQl{IMjRqfKyWej3TK`)Z}hNWACFym5vbtyvZp&XS3uD$kJ@W@2D^2LF=oy~>- zQUvox8r{*d;_HMgtL86Ac3qoqK`2L>CSWmN-N} zG6KjN`ZeTvOJ-0?3*CunOOhWZa>|(Vzyx_plZDhOQ=fF8d4HnwYjK5z^ey>jjikT? z7#_}OY1CTvyqe|3k|y?SXzB3F?<~6jL=*p#3KG2U#n2Ho&%6v3NZvP`B!_Ud$lATe8f6c{_vyC%{C=i$#@!Hn~k!7#aX z@}co(PSNyuS)eteYi^drx-ACn?wEDad`E$5_uazpcjk(7C)0!KFUol4p=zN-|*ez#f0c~#6 zko4&f5b2M^4K{Db<+l2622mDjAtVD7Kq#EKUl zxq55Iq?wX~I6`P4YC~z{XQtw;Dx-ac!BU+@uU)o$_pZ!=7&9gKyg?#TZi-zTgvxjw zsB82U{pAKy`%Kf@i|i`AcjwGAf$8t*fhrd^^ntV#qrK@%OvLMmZt;(&Sj z-DNdNM~N+yv}a?Kao`Ty-yT~=e#2#T0BNIgLV66cG5D` zZYDkI&*Emf@nP`|GkhnGHHn;ZCFqjB)bybl>|!{s+8SV&5-Pa`RcJ8yOF04tO>Hsk zHNdWglzkT3Kuyk$C_*wA8;^Vo&zBs$xS4dD@|w$kF-&+GYuf{r6yL-MnW4`J2I=d* z@g^Ofqw&r9347rFEvL%_+JoEz4(6*?*zU3zCK8DA zMNUf!cdxI|G!_K)N2K%8}$zWp8O|-RjxUY%J|K+FAexVD@p!J@O@y=6P>c5@@sR zjLN!WvAdxO3Mc>$>=jP@o=IqnpS#Z5f;|3S-j z!QabupIg5Okyr*Z^eJ9bLsOxXc3YY;1&OWvoQh;Qw@5;nH8h7XHyyd)=irr_?&!6w zpg8_zMVfCFuET5x^vry$=fxfm%W+&;qPa;V^AZiS)wh{`N!(PM#BqUhv9jzMWa+ah z%BA2g+oi*cP)j15wYf4pGyP%;aDq!(@93!S%r3wRUgPL5)@q}q8OTa_PvyP$f3N3e zhpD(nS%yFI{L{W7iWfc6&;n-2S-tS5IMNCUE3j}KMp=TjDxlR+E3v0W2`KG1sOu+v zNgoZ$xDgMBdo{;QW1-tIm58P09GxO}wHY zME6iLf@HJ}bN`~K)Q7`x&a>s*i;bP$L8rdC$2GzzF+uu~qMxMa1MjAit1KX4m~xq! z*2R^#T56AE+3}zgC<^CG`<#GAyU+4{wE|!Hh`X;bpLl?>ap?2VbNMf~$U+FK zrHN_@>O&qJ22_|kep|pDL2q=hiZ$62sIbz(pE|U2`Lt92P^L#)T4bvnPqT~IiM_+`UAl;k#9O4IljBeLMGWcv=ihxPc8Cvn zMB6a60Q%BE?GXRKbd*est-x&D(|6~{as|#Ffu9m*Ey-z zQR)Y8g%yXVyF0S__y!Ebr5zcO{hcQhaDeLjFGSJ?@S%ji2iap4X!+g2t7_aTCS+8( z9~arP!u03x&tW$z-DL$q`=k6@EGRU$k6-}+Ft72fn`|#V1=mJ zr-=U??$z>KjQC(`wXKFv#W0SsPpa`;7)yd$4QQqw7yBb;8OrJN-Jpiz{eUOb-v~M{ z_x6hYm!kusQkYL8QbJO>iXJxlN1M~HBMx4eB(8e|E_Nm$aV5`o#zvq;QDTp}Ep17_ z(m&{2Bih-BL~DMd97^@v8!|{_PQWw(`i)+}f23-+WxF!(jmrI(Oh+Y_p#G zCFOAu3g}{$d>09y0LDIH7Y0sIG_e>5j@dW-WLH$n1#gmnNg(8FXvY3^pYs2;Q5oU`--HTH4;_sGov zu=hp0WBJ|apNF!0CzZR^*W&3F3pZMBwC0zfbIAm@Xg%z4sH#-2K*~TA0C?M=I-4jJ z-?#O6oYn_@VLf%$gZmTdsYxs?jycsp_eE_x3-@K$q3_nPM|0KEqt{IXRcet0W%2g$ zZ~KO&;6A%JZWC^;l?QHjXRwln+Dn@Ul?{rZQNziPFkI_fa*NcD=~|-D%@1QK1o6)~Dy8}bZ}&?KPWqm+jzJj$y~pRFZk_D^=W1OP}m$#34O;#-bAruo`S z66Drf{Y=q1W)U7|3^ZiSd{c?g+h-Otw%kVGJ8q?>)7exKc^bR8E)CPSQFflL_QJqY za0+m$&7!3s1@Qm(Yj~70-%?Xzn9!lllwRULK9}iuu2VM2X{tolbO1*{xWAgRJr3JU z+2U)?q;6fhpJ<^KZ@^~aZU%-(7(vR(H4T04{T*Y!xDp9m(#b^%P?(ls$CT9-4!A@& zh?6sy7^0Y$@o&6=1>#*@w+RrW!F>0Bt7!qDIA`ER*eo|C-f=`PF|8Qk+!R~oHg{eG zr=WEUo3re$q4r*ik9MipsyKc{#NcxiV-};94IFB~2-}`Gw=0P%g_e#wPL5J$YlAxe z`58aPEn%mH5LKasi=KkB*l;0!uT#G+QaHrb!#paA;cUk4oYxCggaa8b^$D)93!aVp zVZ2YJS`c*foYBG4D;A?4*OM0kWz71kIgdF=i4@5tuP-Pi9U+vAV-~owK}7`KW{=o- zhm4h#!K7=-)+=g=2%IGR?tTKc{)(MX6G&5)Nt?M`nT!%SpDqdH%#LqUf3goD&B6@GE0}!Vn&aVubuhrn0M0MZm8|{ zfSY^9W-d5zcs4S+9z6zAnTC#-rjP5IQ}&AH1x6Hs@OM;~(9mr&4jvhJYCg_daeqVT z8C6g5ui2HI`7+Z>nb#pHiCogn{0xM{nC}QXCUHy-{vgcO0bSH5F z`+Lv6%f@>=v0q1Y-Q_R#NiTkehm~s@%+A)19Cn`ozH*9&TdB}u=BqsYwHn^cw_k8Y zsq1@Y547_9@a-Qvzel!eTdA6#UmKs>SZ!so?+Mqv23l6q_aky+Pe2Xk6=F1H@#AwhGV|w*sijX=F!oJ*m&y1xbjXKS71URD#*5@B&Dec_ z06N>z=vm{YP`7be^N~(VYX0yjTpJ->oN>h-!6u1VdCYoU?k7zeVyn_v?F!|?1}OoTi^8@!-o z-khFHDnr5TDVxC27Wa-z%huRPr+vM<0rZW^=RF(O5gW4C_@cm9o+Z{OFB?Tw_3(8> z<5#?4-r`F(w)Y0?fa$>dMYmHa6@aRNrj(53uQ=59$JX_v-YD3@4zsE`XIm#)9@10#eF#QsoI ztsH(}dZUD@*!^7)+9Fhox2geU`*>Utec*??epC4E^f$H)8TbGM3t>x{P_7r@&iPjw z{p(mYzii@}9LXz$1aBLnYe;gwsr*@)EW*If$)@au``+&f-hoZ56=&!|Hpi6Ul5};3 zhna8ylD`Q2s}#?5+@^>ofD*&%`^Bk36c=MZtr#hXTm7TM+3d;);tFf59^qC zB`_WrKzL2-yo+*`ecrooseg;o{`&MP^5QKdR}$w8sE#uiI+OJ8bUh8bkSJ{TftO~G zU&eVWJo6=9ODL48q-O2!s-!g*ikw}+>^Paqb~U5Ft5&Sx6$si!ZI+}pBI1C63)g>x84Me|l#M3m=jT%1D~jdX6A%3yN<-!ErU@>6Ar zJ;LaWw~WpE)(ppujaTWqgIS@D9toT!02gT%j00!zFI_L1v$tUZr*p!+nC5ebMy=18 zvDRudCc})MBDZeRrPf_9C{6~<_n_yiM&67Wqv6lrG|fL)^ZRq}D#70H1@9Qbh(llZ zrURl8?!WCvH*)(y=Z>D&mIXX5+ZLfU!uO?C#>tUjf!H}QD=uj>4J){~#heItQ9vt$ z1~MFvVQfzK8kd1>;hVn?RBfzin+PATUg>Wo?=A&}o()+~yQ$B<-*Opts!G->-1#Eo zl*8!$6KpVe{V53RlKjupL2y&6p6!V!?x_V75=BV$3?cE`C0jdAc~-%-YA)oidIIQc zrgufAI=>HUXB=j@;1b_(3iPh#Gz2k6EYrQf#8;UQT-;BU+)ANkOVQ_C@CnpcPIIG% zR021lMg4ka0Zye5gH~Gv!%CFEQ%Ra)9o1JO{$+Aev=h5orL9li&^lxNEY{eWt!4~^^@e0N74d|u3yzRJ5%V@Te|izhSdZcvz({IT`# z*c6-Olklv2(FRRFaos5ve|ZaMnymABOe`)M#@3)$8{}`N znr+ApUHUxSq$3F`d_aXoT+8}%jBQ(vY2$(XIk-!ss$Ie?))%gya2M7oA_Sd=P+Iy> z>H8Wi5;>%Kc|fA<*JXbnepqfr99si*mN9E&M*FMYz52SYLX{ewb2TO~bm>vOg?N_}C?R(~n!sz~B)Eq@W+9E1gkh z)8~}K=o1xT?!*gY2zNIrxqa7D58O*ekcnUz+EQy+ zvD#P42XokSxTZ&ddVIhn74w}+iGDGB;Dt)c+oHIsLzsRpA8aB;rDK|DmCDN#$UFn* zv?0zisoLs+DGitw?Vdg2{*nZ!(O9J=%sTvru4ZtNZJPul4Oz?=%BFkt*yO^gS{iIQ@*eZ+D4PQ-i z&|@ffH02;|PVSTfOZLw|kTPKrbN94qZgy7^f`?J$PYZEq?=9;| zt)jVeqQ|odX6ULIRrZ5tmiA%8e z4ID}H>*+9Y-I~>PxW$g>D&T25Gtpc5KDQ|n9!!wvKseZKB$c+)OH**!F$PEBqA78x z;dI12z)!R^zaCPM3hXt1?I6UIDwXixV1mduZ%1$S+vf($bepWy;eiw^0pHI=(Z}?G zWZZsCgyq{lhkV3$u3WVurt2V0B;791qN%eT5KBYcqtqEGAOMLAXmB>wBL;2YGE?}dkC;YdxOc)i$i z@iRm~5C_uQ>gIdS1cl!hF>~Lr;wL+u^erS>0ZVL$VB-|=x~rerN&2V>3AtcgX*F*I z-YAgD_ohta;&<4ddh~4|aP3`&(*!Wqjg=WRHFpk=(6% z8BNvS(%dvf|p3usulBL()geu!F?&^eDxeMAM>W5!KSb$BdS-g=?$j zf!0UlQ3y2XICT<9TO^J6n+UHDfu_0ySp5QDKa-k8*xzy;;owO;p1LiY!--|0{+$+m zyPRX%97Bs#2Gyvg+stu$vTqln~8e3t>9RP;8<5{SU|7wPYh zhFiWzES+Kg0=&DXbp|NDLN)&zZh2+OJ*KnpUP`yFZ_pedUO^AvHUgWNIgeBfU;(q@ z>eL|&_Vfyc>2RR91PnH$>IYx|B_QQ%-OZ4#QKyhxmes_YuMa9|H;MJdV&jx7S+F8l zpt8Km@3k%{V_h>s;6ouUNSpN9-S)|xDj*2@Ib>6zgg3k68TzdH;Q%)HtxL)3AA^7ZP_%^V#v|+;SO6a}NrkD={c~9Ayq%<=5IC*krx(+K_9_N@?0)5ULky307jvVikzRYf2lA)k{ z?Gl~?uu6#Fk{^^I5uwd5kcA6U$R%qWiQlL0pU%jAMjUGI-pXr7%8Pn#h|Yl>{y)qO zl})tAr-FIWqMAX&RuXQN15QJSPLO12ns^o-^p z?gsPF#dsX5?!I#us^GzzE#ZxlmGpFfAXxGAN0D=|8y%VbCtvSE&LfugTz#(PG^`=zMbdgHjnTZWPH_mb1iCp&E^9b$sZKr#BR%8v@1&FL%K1p_B}S^Kc=s zSu1fa(+Oo^B8%dGZ4D|i-m4!R=~U8KnYPDjtg<|_RwHnQ8cMLJKW zI5>_4o?8er>WP+GMvLi&*l(7jX+un*uFD8{onJ82YJJ)z7Q|!nKbzTHlcMFT$yD9e zq^mvk>+M$4QjV62S;Ftgu)}Z^45MVtY48mUriMFc;A zlrVdoB2E32sBjIV@FkQq=umEAxwvsk{({+9;Sbg6{MR%wFUXZbG2c&Eh88%N*A*+-=RlLfZa`Z&v|v zdRAs2Xb*3!RA!BAflfXK^=k*D?dA%jA=k>OyTJ8Y5pTaVvuZb|JJ=n-rry?=XgR4+wK4h+HoW zcL?8o=NEmKim!ZO8e};#=_3rqwv^zyy!aFnuR5a@`*(|NK{srJmJ75%v?N-p&p|t7 zPij`;Cndn|18V0`3ua}b*vs&;wummO;GhMUo@qmF|31c%A*e=BV;s9zqZl? zP>8O^JF5bRASh9pCwWoLpwodbh15Ga2m*$DY`56wmp>kMkpES7roa16Ean8N5@xOI zHKKicO?16EB!(JjP-z0^TG~CJehZ1l+3gJ?{@ZB<3hc7p zDGY5X8MmTfW}7-l92H$;w+$t?vLRk!x@jUs?@K=7i!s~%eu&!(0LZnb;RpkcHJJ+4 z_LznUt_yFklw5EcTv)y9I$z~4G8(t(Yqx;0W|R@-QET}W>qO6%))>RJCUNjIlsuD_-}@Rd?z%IV0q(8q~xT}8@s^>wovi7(ZEnKH%S zDsMS0jtjEK_6t;DBQ>TX(2&vuZ8UB<42!kEzthT?Bj}4^ja=YSk^+6{AW`>Ih_bH; zU%_(r001EZRq_ZTK)gOWsYR7@uz+!Dw}ctGlwmjxB*8dgc9mg`JKp@D`dX-C61=KI z9XnXxPzOGH=4Ng5x%&Qogx$mx;|Dm=gwSwZ+qJ0Iw)F4lX;~0%R^_MFa0ll9TPoi* z_?2H41BP0Em*JlTv^Y=EaC@%OCjpN;X_v*=F>VF z_;5@iL&AgnE*oXxlb7up+kE#D=lM%`e4{ljVB2j$)Tv47nbmn|UkXw%ya_BxRy{&$ z+$b~uB-5mzj8&Xcts@NTP6U7k0*$iVryIL-ck9<9nF3|2c1p#?G1=S+ZvTRr{6hOu zygX-5byrvj7cnTHet5a6y9BA0y7RF35?v3bDpV1I4Agma9d}~EM%riia%A*ecxg33Ot?k7i<0B$ z$2D|OY3vc37K_%YQvz>OIrldax8{r6A+?Glzixqxe`Q;0_k^6_uUXBct3smEZ+KtpDkc zo?TDoIxL3zm!zkf^pQ`UHXqYG>JhY zj#q`on;kXS6}^w`M2dO^1%kY7G?ovT1m)AU)7+KqW{7RyWsi-bUwp;H;^vpre~C1* z&D8JsNA1D3N`C}6D4~bD9GjnY^%)Jf&`P!)P5?GZr{W)xnRSAtX3xC2Y%KO`1U}Zw{8c{8zqe zxDMdL7oYK%_v22%<@NvfTc;ltD#2N4<$bQRn1e`Hw^X@1a-7nJdNOHhOMpaq4xV6C zH>f}Nw07N@@IC-^oA(KkJH&*))tQ_3SnnQWIW(mCBl8~)6vBr4zc`gUGWE|>cCtQ7 z8liuk4M?BGSa@K&nFdg&*ZX~VJ&FIE17E~qAjl@X<*9TEkEVvM!cydN7gZk? zI?x$+xR{)V2@&!e{mBum!*AjQc#UQOgH%yZiFo>dSoHT=9bDW@4DJDav}&>WK{&6c zavPqlY8dlmM@;;X7jnZ1(YKFgla=?0jOa`mqgW15$WshGR!Z*#Xm|y{@o;8Ar>7^0 zsYw*wBNQNk%i6&2J*>#XW`(kc$BO+LkT;kdf(vo`FomaoKQ9_WDQ!58xE6v%r=r^- z^`F5WOZiRgdj$W$XvMy`1WAN8Yj9p!Qg32;wNiB7Fi-}N!-z9Q;w{D`Zg?g0Sjj+M z9i+8{L1)a|m(w>XkRXp3zAg287C%%ysyXZ0N^c?S5f#CUnu1PSyjHCb_MMbX=nOO} z)Wt~NPFs7Bw)#D!g}g&ObV zj`&ME28m1yG0bD$095X}G|Q}e$Xk<`x%B!B=bMO%t_`k1tlLIJmM`$!;{nQVm!fvM zZF3tlWwL=0jzA-oMA$F#rmPNGglFZLYPPhvDT{SQCCwZ=@_XF*Sv0m*3|p8A(b&l9 zlN>b#je^Lo{>kV9%HD5wyYv7n&=vnsv5$-o9;)V1!iup7mlmyWvMu_MOtXtF>mp9w z*PR-D9pzO1_PCH#;+8)@EN9-OMv-`;8g2yr16?HhTM2n2a5L+&!mfDwoTjd|bV~w+ zL#dWVC3s`5;8yyWl$7|`9U9O0dn@9Y7=8tFzfXy`Tc5bGb29(<&^*y=v)^S1uGN=W zdx{H8ugBKDFxY`2kTxtUGe6^QbzC@*mIVs(qDNC@|JGQ;+a|w$`H3mW&mim z67LCr#d}hm+M{=-U*)69i_WTXe34kgme=<|j;FKN3g4CDR{BMEL(=j-BD1gLZSB37 zR`)u9mPP39gAolf@SmK2Pr!7Mh%+v!pBbU!Cll16>KlO*q!ddK5m)mu#lKF17cZ2o zcqkUGtO%ak%RSFcX(xv zR9QC^tli?ws{*lxInMDFLyF12J;n#7_;aAV2a9J?xjdpI@Dma)e12#!9p%!(k>3^V zuclFsQIQ-tJtoBlD&n_Sx`Nxk#437}l4> zy5)5gW0Ej>2@02B`<uZEuapNO zS*?e~=K6whjUuPgD!-$p3HISgY8f;Ux`Io*kX}Uh2YrBiHW7D8L-lQ`#o|gmT*9r` zC+JQ;d3Fk9ug2gn-LK27J|=JYf0qSytBOn5Hw|CTuxrpBErWABd_pn{fA&}zcs;`4=H;45t=`;+<)CCyXWD8z z`j+?hvIB~#V~^??cgge^8H6RkO*rzCOvfpBwl3-RUW>SvC=qnPbkv`Twhqi{{<-2xgsH{+*l;>JnwBe|oeRAn zl%ct!H7-rh#33}xM2IdpaoT{i#=E_^);T8KsQ>G$P^{Kn|2`E~TlF}%J8tz6X!9<@ ziV0fSgRgk-n#`f>CPW->>$SUSL~+-w34gvoQ}w%z;FD#Dk?rkZ*UVp{QBesoO1 z6{$afJ5qy0heukv@z6aSb|K=Vn}80zzp_Yyd!~;Me6i6Co_d$qK2twmK))Q39n|Gr zlBgw>nt{NH3IWLBw@N-g|1f_0i3w#$K(BrOmbPQ3z%qQ)i#&22x(>}fie0Dmf!vbq zR_WA;LfusB1i|uCogpBF&S|s5zFh9D1K7p^_5LTdW4hV}Z-?07eBeR(j z?p{=dp7f!DMdis`;j_R4n&V{B3iqkU7K`M+l-g6+ar#?|O{+&6wH_!aKQBrw@cC0e zPsRxLwT*8Q5aI4>lJM#0S2ecpoJbw(&wz;LLAcMFhZBjb@?_P(J2VT_Fuj&eMPk&u zLu8`|sT?OX1c=dC5mV!SXFl}vU-h$RJV?3`>N%j^;}ljRaDf5UJfOR?W3$5C+85^s zZQ)x@u{+9OVHQfu=<;;@{lE?7Js-BeIsgNShTb>QQkif8xd?GIx1e+;Rq)DjKE~b| z3IWMZE1MIrI6>Nj-gkITfpbIkPuMM;tB+|)HIhod{~pLt&?40x^uYXf#qdYocYN>5 zg6YaJ(=4N$GRb?T_8Wu1t|xw9oh1N{+_%@dM~l+AqJKOVQp-fe6bs@maXI@CynHY`oRTY|e1QQs{WLSRmm<3u%sR0z!f z8W^M_uE-)3^U&oeoRv#+2Jj{RKvmRAi%fv^pT#gOU{0k%-k4;Gyci^MdM5Zz`NDi; zrm$#p-$}D5?NAwD`Z0^=d9prFvi2}7M_+cQ+O+pxZS>jBb1n=7b@lQ%6_7b=Qku{ z9qRZcjrs%vx4Va$F2KW1rojSI6gyJ};LswN`v*xIQ^$18<(Ty;kd^IQqD#XGx>V>T zHPKJum+?is*J}r`a%)CrO!jCF%-On_0A0y_1c#<{qQ#es1jy>o%1YNwt z>)Kdb573gIjj*r2jbM;Yg~aZiIbKtteUbv zZUO^#AH>vf;DL}~AOB%)@!lz_qHI0Ih7V;K=X-5yKUn5bgv9kt3T z$0SZk**fgswU-p!$~dr zN+mO?rZ;E>=Fl^Fag>9HXw_5q)TA`=lLK2Gwxkq~4DpnZV{&PZTuI-c(GU$5jxVb1 z_BJJ@@Ie-$+(}MCyrXS@|MDdEYs8z1#gia{^+b?|`6ijx%)%0@8~2FI>}M&(e$~t8*;JNhu~+mC+K^u#@Ie zT!OG!2RQ{sQYYfB*gD;A zvx;y|LEhZX_oe%)cnM#((fV)G3*jZd5IfY-QYitv3^Z?~?$yP&bN7U+HDjKM#L1Z6 z%L!u9Ww1U43JQG9q(7cSnY+pSHMn3DU3rPDok|Rdkt6T|(Vad$mcOlNdbS7k#aizP z0Gp^1sZMn4CX#5s{qsD5PMdQ4e2Zmv3-_6(a+K#nS_6mmdfr)sC{h*#R_$wsd4;Db z=ktwqHZlgWw9R>vt0iUnY)1!hLGRNlbYUMJj1w*F?7drOb`riw*Qm~0*o0Xzh;SM) zemCg&A7cU366htNppB*N%Kd~|h`yMaAb3&l2kw>|`1@|P)nZ5!5b8DVf-SpC-fL!x z?7yhLo=(`N5fRKJQVOV~9`6`vwQxzEiK!R!y#^7ShYKz^Q?3_=ilG7i9T1#&gcdCB zQ~uO@b_q80H{|FF05F|K+iy{C*pb~7Z%ToA4W4eM*&;}YHnYM>cp^`wxc2_+a#frZ z#k>{4AixYOh`i^RbwM%R=M)bptOxhDLO5G^OqB+>0$Jf542NiTrkB9GJGL)&#UTV< zMRaJr#b18tWt86Ywl0<%U=z?_teJroM>~3q2;{OWkf(Bu@grBQ2!9a~xXlF&9~2g{ zP^7Y}@>6z-Dl%zO@$+Z_We|MhU~H8*>@TAZi|oZTS1KXKvxbw9aReT3ay@RqZp^Jcu z-CU?$Ne9vk`7A-6?iE@v;k?)^*{M-hoWIhetb@xOuVzqZ_=H7rTErp1a_VQjaK1aAx z87h0Snm{{l#v{7gS{WSvkF7TF!>3nVpKjQ3-`t)qKk*nhI>peXqY`Eb%C0!w)nB2I z5XX;l(jF2kXUUK0g^ZeM0OiEW+>uSOKu(eIHT#@@I}IIpVx?povZX!Rf_#KN6wlr8 zn%1;`dfqXFj3M=GEDx@CZXpI*fT?pY<%y z=Mdnnq@tF*Qxg7)rmBJjE;Brvve+CmSde(oPE#aAJu$Fiks#f9!9y7caa|<|Ta5W@ zxR4zW^sH4BD#dL8?Yd2L`5RtEzZ1gxYHoDGTDgm?6rpY8%E?aq)fgMM5U~(D z8R`}Ewysd&>RYP3P^+diE}>sovW-KGb>En#_zUW|UOA|5qn7n%QH=U;ACammaps6_ zQMZVTrip*o6{+doaq(Uk0ymMDtN8~elhkeFG;TE#f_rABu^h-579LQiqZ|D#Gm|Ge zRPn=DuwnSo9Gs%2bk|(=i??OwZg+r;YhT8OuI=!WP1Jc?J>xEVj7MoiuX=F5w`7U!-uO{2`5cNx8E0 z$R)WKe^g>=B(EeW$SXg%jao}Ms=OT?r5<}&d6u?v1H=Y*dR&Itez#n(fJoZV;?(W$ zZhA@#2%K?X*$VKD`LZMgB*#85)N#4m>km#?H3QBi5@L zmb@DM!B?!C{}UT7luY9?uA7P~Szkr2a6~MTwa+?j{#y<0tOmy-PSW1j3en(t>VzJM zOM$^QcWjJ(DjMZ5*{v`+KK6yE(`VdX8lZb<#ScCj%!{XW-TA#8VyF@V1~FW1oS z2-I}BM=Nywq2rr??_2=Ff<-D#+d)MW&Z1ZE>0zUncn`FqJ$Ulb$=bX<(f6FxfBd!S ztb!89j$!Jh(}fd2Km!Qis9uE$Q&@xmga<*^1%q$wtGVj+{OuImj!gbU#(VwE@4PG_ zcB__C{SiWjAQPWZ6(*F17!u>VN~(JXT0lAo3UlX4S59^SBbWE;s*SsPt*tJf=6AH+ z*m2j)FOT@W*E)QxELQo(u#{qf8u<{s?*Un@5t<>Az6x;Vcd5{no;nh65e`jOv}f;9 z6*I!$U>e;mHcPBM80zgJNeNzh-$QVo|0-dBQM zNfWr#pz%6Z${cOC+L{}`5!TdK7{0vf(-%~db9E+vJ6uPZpYb+1faDEN*0P zxq4KS^8+gtmdTR}ucZI5FTlMAsQB!Fr8@!P$0@0%C>1 z2hA^W=FOo%Xhm=|*Q3bPXPuE4Q9ISdyVy&rK=HHmj6Aez0dG`yHrT>^3R$ck5uqq$ z&}?wa_-eKdg7yG+k$b0_?p@OONE+dIcv!k&D=e9w$WA05%;|ELxkmIr*h}h;u@W$| z&M+Ldi}zBXuvFqzTw$+6SBT97>pMRIC83`!Z7Z-C{N>P?F>RF8meS^BWS--9o2NG^ zL@UUW@EefnSR^upqsdu!kQWi^^Z9KVRob10y(p5!^oP{Z9X+QO+^T$WpJBn8MQrAT zl}$BEy{#@g+KJ?YAAbLvvwV>!t%97iH^UWGoc`juy8yHh&1lp77CURw3& z+c!!mKs7c7BAle{CW#$Dn?H)Dfn>2Du$xIoD<#sxQaYF>I~8ukUSLBWu1t{&{>%s; z&rN1`g(-NX>+akA%Hf*p@b(3U(8fq1Ad_)p3O-3*9JBKT)FGh}oyyp6g+^1&vZWda z#)3`>1VmQxxD=VKrF`HQ?v)qMRrKfv?b)2#K%}u{BSM#%WaF3lvLaAa@=jG~B-5eaWBHInqohW6$&~gI$f**9PSi`r4}tf?5b_ZVl6Yaz+0DJ^P1m=HICPN zn@F)_NUb!iVdnIH_bCNS6W*KM7l#`+- zNHuz~bjbLB-Q`1^^W8p0gJDwJj*+Q)l=*^EUuG9k;3xt2ED68U#OzlW1sG8TO|)wJtr;s58=KD~IQ z6SV*z`wIT`$wFvMFW(uX+aC7$x7WITHDau~u3?>3fYB=nhI3#i7>8|>OrJRc=0o`( z$A9iT`-172T|-A`q!Y_tv=AL=zRuwkOK6M$Gv{ zs%O|_a1{UJ;+X|p-7s_LsZA37<`O#h7m2#KiVOO+ z1f{%xmhtkpZUliW)6M7jCn)7Ae#&ii@Y&Dk{_Sv9yAU$ZD2v3nTU(CdkqsnCqQa12 zCD1oHI7W}WA_};GlPYn-ni$ZWV=d1kJB#GvwS`4ITn;ho*!h>@`v{d#q76-FoMEqu z@$~-q02KbLVXOw7#e5`%`G)DCfbO7Mmw$jQK7B=0*1Jk*5IE|@`s#pA0r-n0>Bwys zcMS;xRH9&klhAJDXR^i#GQ4!aoF%4~ZBbgD^jk!A4=>!wvPKha*Jv;*e4FP^GJ*3g zQ>}VYW^$DRNWzUci}=+y)%{oNs0QdW^n4o9X$x#}Xci)2wToguu@NN|+W4M>#;|!b zDF=m2#714}Si_k-RU6QC)^mbNnix5k;nG?B-Ifcvk~m0Uk=2}UEqLbccj&;sC@HK{ zEiRc6&F*XR>|hMRx8JHA(r@Ge))O2eF-sr#5}MPr>9~tyo;H%AX{v(FY$Z0BKBMfo zM5}xn5_jK%&vM{Wjab+IR7QTA-9eWtkx#Y^6^FTQtr~AWJaJJbRuI$o$}c|?O*cgN zBv9J}6i|u6!P9L>xpAa&=$XRf^;kvBZJ%Lb52C-_L%~X2KWi!L=Z<89ql-RRi_I&u zbzlO_3l5cZ?kBS!6psb>Cr2}UaM$~0^T8?NVF}hkcME9ZzkN|K*0lV;+eL2^hDI?% znXW^fWt-SZ<+BpyQE;dUCnGqi^#Z25+B=J_QWjeq*OI8n$I5&oqz6kPp%!jxon`A| z4u?*c2Z+Kd5G(i3SN9IS$KOF+rW6G2w}ctQ%^(o$D)%Drm9RpQV0gBx^xo^82eNxDrI}y#}F+R`<^dnyd*J{2> zH0+=tIA{6zgQdy;LO*fGLI@<;@Ro$%GE86HE7xOAv2o~-vZrLz7}a2mbkWDPvX{co zM&w$aXu$332OG?*LVHcoR^b(zqfo#aHu!5{g{mu)6_W_e7D_1cxp>R{2((K1gZ5s~ zf+s8WBtK_wRg7j`@Hm#Pa)G)GxFg{!g~Jkpu5sRuOj;4`XA-E_% zJ5m^5M*nbNE#$%Dt=JKkCa6bX+2bhyhO-(Lw?bqR>)liOLb9i4qDQL!_ql*MU3RE- zR3<1`DHqt1GneM*CwEEC0+7yCFp1*!bz^}AM zAvAP!XqO-U4hvrcplDH%?=u^UMk<=@U3Kz#tiTL|Eps1adP!Ry+tM4(w0@{IZtPf$ zlqvuBnuiziq@_|$|J>FrN$eM;*#56`4$C*+&Abc0XMgVlb0^C3GH7`!t8NQO_CD6#o(dW=M| zn^uSM={PUO85|>#NhW-pL=)q1zb;F}fl2<*e!F>4!mJM++?^Wo39W1^V!lrSHC-!a zsMGOn3&L=pI$lU!qtniu7hb=o`0!^E!XIJ6`uv(9kP@fvx}p*pEPA-AWe$js@3QLG zo`Y?-Y~QNuVXdS+#3s#UU^c(RxWMu)bW%>60e}qfc9;y_~d^b2#%D$ z9f8EuTqSCO6h^lhR_O1LAln2r@AjF-${&(?`82!7nyu1PEx_sQMIXF5KU7u__3r#F zieJwr=>7>klJAG#i|>A@6`W&W@fYyWI$wr%$by9nIE_3z-mGgbwyarIm)xyS)vo$j zikmxfZvO0ucawAvYJ8{tvqQ>hF*T}1zMDB+vNxa^h%A7eQpwEUzv8wcH%aOnHLskJ z_>K`w6S*cj~E5euoZpupK7?n-4Yj1S_mt75N!340bQv!K@%G z1hX3X=GoV2=RYE-XVawPC677JT~E_}(Ro1!STeE%{rmpt^g9tNR6}&t)^N!=(M+~7 zb5PE--<&9Im(LNkj2^3N)V%KMehqsX+y19uuLm{@e1MRwElLPj5~GxaGQLQ+#a(0+ zUCXm&)U(*&^vAuAnFTovjs8B0vzFJ}dH3iXweOo!q9LxZ%qrJ;aVKL+<^=03g*+<8 z9@=2X^Vsa&%e*TEENks|SBO7iCdCqfP zyks!3qH{uAMZfrI8aOG6H9#X?9m^hdI$r{L)I_$aH)`C0Y06x|m#U?bU8kz>bOjaN zqU6=^M!I)GaL9)M2~IMwT&EywjhkuNYO6G*q2$J5`8&P!5(tmjSZW~x3N>*cMhLW$a2)_3%;l?)%+$#SN*6MmbA z1V!lh)pZQ#@Dn>OnhRDv`GmGtSEKBo$a5j{1BMB0N+qZLpyGvKY7D-D(eXdM8)?3e z$U`t-Sx7HoR2k!H3C`qnWLcaS{#EQsp)KuDpGDF8V@#=qTv9<@Yd*+`lpknqdOX$lt3A?Glz56ZOOTK60f z;Iio1FH1TMheEe1yu|DNu8qUxDtdsZ_%3f$0A&f{EQvwg2=SX8&g{UoPb2fxSB zzjND-BE&M0#_q$oaKY^RB?*qGVJ)mzWh*2O`5o$fT%?P_VItDss2Tzy*NAyMIIA`N zFduJ`$Ac*Mpd**514dlTH7c3|H?RLkD~f>JvFFxK=j}{9HTnA8`P)B*gIFGrE3F0& z5i-VsASY%$14~7?O-R8O`XH7&FT4p{aCyYiYw)lH;m19+C(9cRlfBxmm4-JumI### z7RugKDQOU&6pRudy$Q z7-a{HB5iWcqbFq9SZ%oVQox-efiZQD->HQgMsYfjTCqy?oniBPb-fvr5YVQ64juyU z6buOG@OQ0w=U0a^?9_j!Bg0ql%pt797vg^o0|yN#Zc2PksAB~tvwD;sHs_{1QT|#> z#u6g(Uq#Oj_@8#)N?yaFS26y-zA-#fjg5>iJ43w$tifShD2eW4llLwZ1hmxGBn0Fj z|C^1J*3V?oxS!Ir;c+sE{AYR2LYphb11C<{xwH#6U!4RIG|h!rB^;~tBzZEDf=t`I ziWc2uZ^n_Xfs7afl$upUY#>uF;UK6c?>C}~^t)DV*-xxVzo4ORJJ;5k1gKz&B}M*BE97mGpEibVAz*DrrhI9S~z!ejA(@ zXjwdsI&8L`Su0K`j z@lE_4Xf@&#!5PY7j3y-=GX(5obnNH|Y)j>03#qT|EO41vN>d^+&TWFyvtF;JfqE~g zQ}T5MW%RKfPk;#*YTY#)jm*>E;<}UuX%ED3ckr zrccJ1U2dCBpQC{hpm?2RMEfB5*nK?R*hZoLu7BkSB`WDDKL6Z|_@uV~gV3(*%>$4v zavm`%WWXSJ%v=skr5)UqQ!&MX2r$W;-hU z5|I23MYRcdcrn>PmLR{BD2-ex0f25B2!RtTw;;*DBqvPA+P+=%u52wWXNT@3>00L5 z-T?ZMBH;$aq29q*XBiuA)A+e+FKBv{Qb4nTlI}rVI`H8mEV`umxS3MWtJTF#YkyJk zpnIuxhGstDErc<$oMat$IQT*tQ@^s76{+ccdS(?9olv~LM{AtMd&0~!XP`7jw@wvv zJU5--$r4}Oe8FeOZygj%s9rG4TYR06@_sILb)&{BP@>jlvfK4E3&2UbTLr{PjOd+M z9!VF#nhowsaN3NL7X#2pMGvYPuy5#AM1#JxniWbYI?W<5BSZ}}}kM{!I zP|)uphi4@K28&V&An=|qt zwbA-r@G^MlI?0s7qs<)ko}Lav@HD5+CG??Kf5a#5Aw3Nd5#B{ha+cD-e+o!WHY(oz zC9f}mDJwYpkRNDvp{1c^zzXBQS;Ej+VdqX7tWzOXO-5pOn{u7XQ+%H%(BmNl>27W69$aJ#F#r6)zhh~#EY^4Z^`F>Oy%%}Z1 zidhIq1(RH0sv3G|YlU_P1<3DpoFK=2^^X>ZDG@jzc6dK*?Zv|L2{MgRE1w>82uu_M zw}+i+k&04veAG6W;#u+XMG6`}2K;PklXX&6aiM?R{4{9p+t%7*b4_+N`}A}gA|Gg@9Nt@uJSi<0| z6~E1U2}3sI#WxqXRlaA^u61=pFK=&gj8$;yR?J31q5Lqkv@ad=cJAWHrgI^RG!66P zO`ChAPh9S_(@l=~@zoVrjSKxrT;U+LG;UC8`^ z?{kaG%sCmSkkxU@{kGXQu<6u*CgGi({tj10}9AoR|preH**njT-=1v~%>;y8S4-uWO+wnKHJzZ34VC z!`^=nbNhG&IOb@Y${osx3_X#mOC><{Y>^vFWdUxBU(1!%^W|Z`gd>B<0ta4vXDzNT za1rUAo{6I3;4PpwcilVxi1d^u+|eZ{qIdIUGoRx(Q$HF_;r2IbLVw>^f=o~)%wF?y zn1@rI?7{pdl|8ONNoeCY!|ffBy(;8f^3J7ICeE4Et8kyp?0aD#`ViCv(4Gw%Xc%JN zL1MLTqxP&F9Hs!rq}r6&$rnpUjQy9aRo{zK!Het-Yr2z3yD%vA&qM@N5Dj4V2{ceq z%^Ffe%Lo|6Ygk;6x$;};M@t=;?p1h4`9qvYf0Rm$I-5XIXit8!7-Wie=X07Kv39S% z7Pw+~q6$igY|us44$oE|u?m6s%xi#($^l@8sD73R<%@NKILEGC1olNoAGBx}9D8i0 z^tbV=IM}VAs>I*zIXv)!S=8q0TO{+=B{*spm`G_X67Uo*naW9D84j~R7grAB!{)w7 zm4Qpp!yASFZkGsQTC&?oC&K8DiiXflaI(M6Du+_BiXoSfq(o|dL`w031ySW>e8jr} z{Vu%^vHf2WT)5TdL{`^18%w0)S87T?lubt-&00}q%TcsjWQX3SDJM*U1@4FP4O{3Yn8ljLi>FCkTqGL%)KN4XC({S=G2h(C z{fhYLEzuNx!X=IER*aGK*^=o53{b7JxoXNeRQB|_=+VgmuV62V5S=)$!$-!OLJ}FD z_1L_v-9#_K2ihsjFw_Nv!jMIHV_x)c7yj>Zq%ZeD-BrU66&xdKF>Hf1b8LR^LP6NE z&7eG%7%eNwhC~%~O{-Fie4^h? z(=tW!T1C4hQ0iV(jk|%JbhExlD0DSEM1-Q(eH7@ErUS=>J|pJSY`bsUttmAQ&~3WOPAnejyz_} z7bN%ai@yMm+bNxegae3HgAGf{OS+$Uxrum{qTO_^usNPIZ4H{BVSxrIyRc$N4vFA^ z@SA{Lg?}~yMr!Q-wC>ePWK`&ObE|2}v~|1)CNPE>{PeyQxk0Y()|grLlm~Ro+Zf}z zplJ+6GPbA?!!FtuEls$QLsEM+w zIe;K$sdEEe!vjpuSgh&cXI}D`!o3O8Lz0GMgjMROA!PU~ZI(waT9o8sK{ILRRF@4( zmKx}Mc1x^_vNg>^HP zBRw+F-npaCyA$qCj_oFQlBah#3RGR^gMf2lUG$2xYkJ5EZa^r)*yLniK=LllP|*tA z{`-s1-%3thML}`Lh&$pFBpV4*f3#mE@cyQdQl=4$>aDbJ@A)eu@ydLcbDh=&4;rX6 z$hTfen`PhOqrMBDFXi0ZhjJt;@aAE(H^;J5C!v5h{oTVePioYO8cBaLzTIez!1{fi z_I^X*RjCrxvy8a`KGUI5BB+cUo|+J*W-j$wUnP<(B;rATn&I80SOC_JkbU&(I~Hpf zZejjgZJc^1v$%AP`HkIMk|DQ1%jUzL!+{p)Itm_1xq80BBP9}KN$w>xabQCm% zA*>0bQa6)(P8|MmNlNyWKSfcP_w-%qLIg{z9I^)3-@||f`bRwWqNz19C67x&a^v#y`^=8=2980jjNkKgkq zwLc{l6bTh*J4Y6uUSbKD;Gc?Ntyzz%L{kH35^j|)%b>m3(Y{rwm^ZqgdfZ{P;E}ZO zRFQdHuY1tJ2CY`ivgp*kr7^}KN#MzLp71Yd@lA+;xuw}TCtxU6?UbYH;O^~`2c!?K z%9E#b$OFBHeD0-a$^J=wJ;{bg_?qi70-#p}fi+u(z2 z4$7ulU-CEVZVbGx27!7@AnXsRDsrKqWv%Z~7>d;*8uUMNGh4?CE`_z0t6YCWae3+P z(b+$ZY{? zVI;}TUY*_?5bJ|h1T#WVNK;>(yy{;M`ZDqAtmHw!sdhbl9E9N%#Vs9slCng9j}6Fl z4P&i##k$igtn_azslu#|_Qf>Ba!S~b&EtJX?r~b58_lyu$i}x}u(fE=z46h{1+|IF z*#rW@iyo=rJ&Qd$na~m34`#(L4$>36;t0{(60Jfw=a9t~%ka9Y9M&WycrG^V;Pk+r z@LIeF@kQW*P0r4bNd-~~3;Gcss~ZwGt?op$(d7Fc?W0JI)V?K!4>(;;2b9}$+brSb z;-K=?M}%1d0W7g-(y*eXyu~$4)r=2wy~8jW642PB0~I?=uyRFEWg>&)hV^_`@iw*< z_mmJ-k$)G2Q$O%9Qw(IibXD|9f92=02rTW$N7*Oa8p5q^1m(LiTfzJ#GEA`=soO3vc_KTXXEStfmm{y+*gn|toJ^mV+ z22%lkeb^}L-j~N77PNzmd1hq9G!gbgB;q+{v|)IjS)GBSi5xLha@NNIj)3*h{(z*` znpI}>T)kSFpTRbL9x26mGT;JTH4sr=O7*m(>}Y5-yrKjHH;hKi!I!nLpGx9oyL_G= zeMX^*1PH2U2)3;@SpvX(CBZV8+8iDf_f zJNSe~ssk=#Nvl-3&pm7pOG6ADsG|JN%Pf{5tG^MHG37JXTetuvjDvd8`RR+od&^E+-vgT4XeUs|Jh z{&m4?GZX)&nrXyIh!)_cjBEM%T^2)KevYzk4rr+z`?wemPcc zdwfcvta!mG87UqBp{4oZi;&i<9O`{FxW5<>(8p3<6M*nCZgF zPFqOPX28&V9o~*|4gc7zFcJUlCQj-o`+&@#<}uE*91Ni_OBCZf$eND^T9}{=DWqYC z>eQkDaUj;_^e>j{5ZhG%UZ-IjEyE>@zT?ee-@kT2&_>8eYM{R>KK)0T1f%wULZ7aJ zLq%FAuNFa#QgjA1xKta;5)GEU=QZ3gdy9x0?M@#*5cK|v?4g)cUoqLr_7dT*h%0O~ z3GkW?Xep~BjV7Q2RK_I4Rgm3qbwXaXToO5Gs@Rgj;_5aaS@9ym^>xNMk=1oC?i^b9 zPW!*clH{;f??ZXDlina|kOXRDZ5w+4dnF`>oC zvi6EHfWJy@z$v-q$?G@zYdSw5cJ(5mNXP_he{s=~YLDB84nSsWB_OmI671*@*sDi- z%zkvd)amNCyWj0kS0{Qp5U}ej)71M_Knagq-}92xDptko8XmGGR<5r_GiAW)a;^X< zzZ<9bgCBg#T8`JpOc*2pVvZm2{l(QKB()*iNu=p9^J*0iugz%tqUg)V*68>fK);CN zjt(mMMuF4K{h)!hq^8s20~S%0CL~W4`VCJzy#|_z90sE0KpV&{`I`L!XB8ta6&4sO zZt}cpz7+pBGM5nd2KSks1nr^cO(UC(CQh#9>K`Vi?Qa_?NjE1e9FY5Bi3a*_dxR?| zbTHjYS1?3xWg1NqaB2yb+i+CO#dXTPC^-TjR6c2D^ZqitlF`u^^SH(WLg?`y{bZ91 zFJEH*W*;FQEhHN!u;(9fH%tTyo;=tMugH2pN8=*zs$DjdoHSN)u(RvWou5%!$$#gt zUx{qaY7S12n_FRSevBPF#ET8EJpS%?b~zmvw!!glppIjwkw|3DQ9IVh zojSf9#(oNw90=JuXV3z>v(W+Ni32)F&XDACHg0p|ZIseZ^!Z zcOj}0OcA;altUn+(j5z$W$nuR&06qV>zy}>bs;4CE2T8K{g^SsY!e3CZ|v@aryj$Q z`)FAlF-|6~+Ao)K9wm4z=u(}`y9STBQxxp}ue0M;UX=gBOc;l*af0bRe3ZRObitZb zf%eq&*q_K8VkrM6Oh6QhCAHwt%I;e@9a25NSE7pU8IIIvzYFTEPiss`gTwGUe={V$ z;mL`Uo152$VhF{*oe&JYYplu|%QAsqq7k)OVF5rSe#{L|1VPLrGh<3drHrg{wldGt zXbSVLNGkD)=6xh!$e}^e1H8B@;HKth=Q_F-WkaGaF>SkZ*j@+PP(nSnKk9x2O51Mw zd8P8gB#XukZ`&oK+VwA6<<9t|wBFgCXfG)JytT@S)Tp$JdihUGnE7gT9h%!ox*{HY z;>ksu%v4$QW5`jt5u463K%?R8eya?-b7XkyY3@f)w9p0PJP=#x8vUmD zjR27^f#pyE3^4-FZjQao)%+wfbQ0Nb_@!=wPO{Lh`>=DA(*os#okl~0%{6qcQjQZI z*mHz?CLuT5cFwX{VN}$<8Y%hG7NC?a&Q_tqFu*UrpRv`4Eck7)%L)w7lj$LOVYZVg zl|cwM-c_v7fOM+1p7Oe|eoT@fyGRBMl*O9mG$&wsECwoFshz0SOq?)QUsf=nFLC=6 zBXVs`I{c#fe0N6q=&6wEI zY8r57oK7~0PiBR|5!fup+0Fp#Ia=ivp->(_ZG3P1A>>&R+wUGprkPxn30ZR}t9n-d zm+Y7iG+V_>F!vnO_K2S>e;VxRueL>?{ZR`?-Sw05CU9gmOG(^+DbpFRe2JZax<;vy zj%-lir}z@%PkwGXV=1hLBWmF_K5f6;os}bVw2F zWph&@taL*W)t)ZI@Yn8P#q(yM+>ZW4Jrs7nO3;>_Ej=;Vu0ss0Kzx0e%4)ie@am1} zu)K5EbPnT~{e{N`2a%VvrWLY+=DJ^C5N3JOS?JAQLapp@$ysBdUvsXyw0Mv>(+B6; zHwpswTocC)lo3A^w`amyr6%f3vJxY53#F@<)%G@1fTwlNm;y%fcM?w>a5qgf< zMb7f_KIH(4j?j5ojStEn2iB@S2a<1K#h%*@-3(8($KBYQ>S6X*#sOSZH#Z}Ps? zYDq1TTlG-W0nl9Z@`=!MR4->RWH_lk1SO*_c}juV4h15IoR^+0D{l>PS=7qD+_7-}r^-R0EF z_~bf3aE0=XqN>A{-n!{9R04T7Y(rhKx3Ms*=Jl;|Ai2d$b6thg1n7@|Qt zJle7Xa9re;!=`k@i|cga6N^_gD%ih!&mdJ7+78DQZ#R`}!ym;r1cf7DIM#r{L^7g{ zOAQfZ8aFaf2F$#*-e6NxRR~t_V$Zmnxf-P)3 z6RyT9-E6PB^f|F}_!LQ`nDHzlwUY=@?)ckA-D-AmCzh1GbDV{`ftHuE!@WPWB;7xe z07!>_rgc5!kZ9hC!aqx4fQ&HvYjUC<{8WxGxF;h2D&=wB4-^;vU!W-%xDD}xao#kT ztwbe`rfGvw<_s)oM&DlC&TUd_bTOODNQq|dV++69^NLdDz~eZkk;NEObHUQtt`xKc zjL0Kb0={K+RiA{C1WHu3JGD+32;JA)K{h&}KnYfj>ux*pb=+j$wk@VNxpW)? z!It_$d$raPorNFEgTpmTN%y9f4q-rFQEy_~HlHHR893jm4-*kJmnv27y_j3__gn74 z&4%<}R-5q)Qv2HeVo|b6<`52S zAGzFIhY(W*`8fCD7yeBFQ6C0nIL8v-jboFw#Wm6cVt2gugrf>HEKYjq4giEv35pxW z3aB(!ru@$&eV#wVX7hlcLRWmP6w*2BOeoQf-a)K78VmWkLu5~f)8@8~4!46Efa4Cb z4Q;VFZ%GP%D?&v(@dc<``o~?~2ld%58>`!urM3+EGWsp8S;0y9TG3mPpn9AM4<{hG zhA^ralY$(?ZgtpoHA)Eg*ZKiCePe_*nMjVI4$_5wmQ!>BPZb*0rG$#)`Q}_zailR6 zCg(dMtaX1&^n10N?^@sC)&{7;;3|{&*q=MH>^U!LpmA5O7*SQ(md@fS&+ql02YP;M z{BJY+R6CE5{?)+23PA20scyZeHkEIoak+avpFHr0YHxF7d5|I7Oa*8@ejLJgf34oW?>VqRA3 z*$Gb+`;>;J(9kDfn<{gjG?;TJ_r+SxhU8o9m(0sr$O~NceC7*7e4poW<~)1j*PnsP z>n-#XuY!&iIhqaw1qO_rRo(~ zd^0_}ft)tRn? zagQQYJ*1Sk);&qhp)Fv_nV~_e;{J2E$>LGpZA#7-6#5O(m?zpE9%7@_HIQ)`=8@uw zB@V`N9BPVMQ?*HqZZf&1AdfO~$0Ce?Tb_bAa5wndXGHM@Gn=(8oPWz`7#C&(*UR`; zUvI(!fkqd7=TYe!ylo^ZK(up(+5^@|S0iUbIDcWKKB0X0g71UugEvP?m9ML+^E|>M zS95$^nR%*S-_u2B@I%}i@hqoNFopr6yo<7^cf3&iy*^FLB_Li1L?sQd@!wie(vZ2v zlyLIVa@n5^#{%XegGs8g^%3>a&7TYlcu}$3F<{l_1Bqu$OMF;MfKWj~Kk6)~G^ks_ zo9OxVWan5RD8k%kEdL+*;a+BWxvEfnUdqgkBnhaA1QbgfkaFl`BR3avX z$yQ)KjD>0hbQ9)|2n*Xt3qK23fL4?zx%^0h@aPJ)|N1BFaS4@%?|z=gXuD7@DOwac z@-#+s!Z?MGJ?RvT2{4Wgj=OKve&4_%px#A|-U|S%k`I z=oxe4mFzQVtrzC;wRux>Yx(#X!|FGZ;D3P??W;?Qy=Q^8sxK;2A(q16P0dHB(g1*9 zjIpNu;@|@oYaM$5%Ud)~_?^f*&ZYM)C|5-iH`zn&D(n6W!GqJvEEMMh8o(6snE9_X zH${==x_Q-#oABO}*^buWT_}xp$^H(yzVi}iLQ{D@DG#;fSr<`9tw=6AHB~B%>NJyZ zY&vQ=H`XY|W^G7Fcb{p>+3|-(*G0#G(9?uy$%cFmzX{Urq|e6^ ztWKCFbLweiQNR7*@H&WX&3YzXLIhP!WQTz14)1(w(}Q7gajSc}5=t>JjQ~%@T~^3( zI90Z&_)Ed;X@6B>LX!+_*%$mG{%lJ4(W@&9^v>=2^WyAA zS*CN$3cl6Si(Ry@^PM|4VUD06p4K;R9JT`lIK^;P%|%{-mt=De|f!nKf9p)8j?KtJgSp>6{~TAEDIC?p4$?Wv>#Iv3)7%{zw;Q^ z6`M+~kFG|;a5X*-UKg3+O5YA-s0>K^KPv2L3KzQ~lbURiz-4jolusgm;6$_YI=?aB z<+LsdQcrJX!ri%8c^rGhs|O(EZXVBB>Bt-VcK9wz=G#5qYhylf_6#kzmdXXh$nHoo zpG7llk)T@TdF5&*{CL1r1U!`C%vqr(Fface}54x7eYa0T+OlDj-wqAP<$?O#B*NmgWm zF@1>A{ZxJO%0<6+IcaP=b7~(4n%c#8KffjtDvd)E^Eg;00%zvK6$=VxZ%lAN@xo)w4xs+N@aoDU(-5kjdTyuf>T>%&lg2` zMein5l9dck24RScOri3dg=3UzK2yd+(fG~w45eY6U8dh#KP*Ve$LnP)?1Y8kVZh&P zgW5XsDdU2g$Q43h|LZQM!IxWE8_J_o?V49ja_p5$#`PJ4Ywa;xsjUIAs0AzlR^K1W zWQI|=Hs%G6i!~_QfA*RQ(W*MsWdN{k$?>b`ZdUh`%**FLq+v{i=cCYbg(WeuYa%bJ zq)KIE*5mI*I~^c2K9ml$4_ak#UbXGPFFfd@zxjPjCrj>vWrAtJaR*4sf}1t)7JZ=* zy}nKEU86La`%8sFpw$iXrHoIF@TC4%Y$89D2KS9ZSZs}H{{JMxP0+ z4U_O;e4=qe9UgXz(>|wuSx(rxH=LL_1;*E;e{`|JgwpnyD-?`B0V^&WOI(g=jIBvn}UzF@El!cvhx7KwuXojwqRgEFj zLC{-0o-zV`%d>r43)o25$JlJcao>&WBZQ;q&Q(*YOzo+UIj)akf*_~_SQsdfJ=M>A zAzor(LvI=xjvB6YU(*jBF1eo=NeVzvH;9u8TNA4JhSrGgi%3k#ly%Z)ax$ca2nu0@ zHMTPs9T6$I+z-BcaJBA;OCgiyY}SO+H`(3=;|>B*u_nXNqj1rA9+B;Yw^#SQLmWBh zcY7CKGGO3DG9=c`4QHtx;vvDu)En704`ZD|kc~$Ax*vB*jjhWh zHeZ)rv^;k*L;qnBV-NZAlao4)fTBsl(##;ph|*FtRh*G^gnNP0>}&p!D>Rp@CWW>6 zb3LPz71WF88?LEwtSr37euQlo5oe(lqqvuyHFcr`99Y2@`2An7@R_$g!j0tqc+oAP z7oF$}oR05ZJngck45d_5YoyJ9CA}wiuwzmd)R-k?NC519aJGAD4FYom=MXU*IggXn z1FU>mc5qucJK2b&$4*`gxz*#eDPnRXN+Xb;F*e*p1yNv|gpvpUT0t%}dCohSvL<6= ztlp}iIav==;JgO~{6wC1?!$&A&Es0Iw?0bFvY3w zpFEYwgu`t6uaj)&&-*fit9Q^YVOyga!@H~s26RgzPW^0_x(u_q2() z^m~m%nS2Du7Gd=}M@pH}j+>y|)a={)-?Gt*?QIiM>XI$fZz=s|%i7KHa6qX2pZu^} z6RqP2pOIwds(9O|PNPt@9mQg0o;eZvAL@&q3e)WHy|+F+uOu|x#1^|!|0SZ#@blo= znoGUfoayMa2F2XhjOy!_WfBGWk%XX!eZKnoN4~8W^AlBZ`eUKSJb%~zI^@L^ISXgq z?dV7rsx@5Q?(S?y7{ugxm3f>2aX-5ftfEbs`R2epR?jI4%0I`%2gKJ-5|HL~08D?t zZ)R++EH-Hf=)>d)Eimv%n%S;ZvDf*nXKnu+yVsbkW&q6aV|(3{)xX?C;Ljl8pPt$# zo+%EOY`;doHcd@H6C7va?2X1F7th# zxw|TasU4iR@~(0i(JvIZDP%h@eL?EoyZC-Dnj~pipxt9YM~Rcvm&>t+tVXs%wqk$D zw@x8=(M`#*o@^Y=L>PDT%+zq8(Px*jk7F@(K|FalrJ$lxyX$ZIzu`73uj4|K&cb%$ z@!t4Pws7cf8&q&&^W-R`b|pCAWp?23F>C^>AfBdv#g|9Mr&63t0s-D?aML%-yPh1W zij{nN{s2uSl^*`oXM|}Hhp2pIxe_Xz**qwm%$k>=ZZ;aZ1cEtek zH`ml!cVzsG?IHl7*&zu${)5{~^4{Par>E<>qI>k&#^80Qh+{K!q4Wo)*Y;&jf}Mf# z>9qb?_qVgpctPoMEhpRft6{X7PiCtA*EvP7LYy}Q`U39};T*8huw?sR3YB=U8*QQ? z>E@z?3jcw8n(|()*B@3T9Zh#p zRuXRp{kIsv3qSnbZnxp`$z#jR!Zh^1ggX;T^v@)79YTKRrxg~bQ%2TX^?^ShK4V+C zfvS%o9qRs@fk7>e1lugQAV z-qB~q#43e*3~vlh3}XOolb=TuwI{6yMyt}cF|;0LNF3_7W`jS7E6aW6_&|)ciEAGa zfo34KBwv3SSITln9vv*jl@8?L`!kXU112zAfhCkvI-EN} zWe~8!em4tUP9w1a~v?&W0?Mu6S6uBt7Px!7mw9qE_LRYuKkx=bQ>m1DJBl0S-^K#c9F98-walx(66N z`&w!_b>AixCm#B4V0ja#ZBEQNl^%KM?N^nk%`RaxG$hZbqZU8rLaUqoh zA{LGtG|pONBH6d~0HHVdl`qe?rsRz7N->4sJDxDIy}pB~HWk&Km|^|#X{W=&Ye*{* zMAK9qIeAEAZ-JxU zhO4)-z|>dn87-8B2Q?6hHd&n5Z7Vy6-2n@2QXw<#AC+y#XB!>S?J;~7KF$^dX}&EM zIVdq^BoCPB@$RSMqLL3m4qCaXrw$K zoDtd>-!rUA=f%{*<_HAboe@m5@axsy!|10f39zO{%H#LqLhy9)W(`k(h(#QXe?R(H zScfmLJN#7ljDbNnACC6|5F(naoLWxeoK;nR%Nnc6wNC9rvqq*xoP1^E%okz$P8xe$ zq)xm3IZYJir`9ZS)^~!?ptMfPd{hq~^l24@^II>lo=+ralkW*;GQXb2Q1cE|m5^DP z%dy{3KAQ>8H;?j6rhC(eQX4$6s_2)!wn@(cTEOpP0hZX6Z86ukW%m;OMpN~oqmgX0J@$;+rvlSVIy5WZ#I^%G2M<0v zZ-j>q+x?!xr|bT+z39Ug|2I|BiKi4=9^=m_f)7Mgqhq>OgDpbjIv$@j9;(+-td7L$HJ zHD&RO$G&~$*5c~fySt>3r_1y$;6=5OVxbY*a*_Lj96QKAo9px9kUiz+4cHhLY6P-N zpHW}%uwW=9tfF&8=V!ft^JVtD*AkqgU(SPxiTQAD-mv4!+t-dd!_GDC$)5nF8<->n z4)$}6w7ihw+@74WG6iNggz}^u*1P!$+^*Tb&E0PB)oWnrJXIAQMyy^WF@n6qILIL= z^an0d{b{fU!yhZ%ZJPqp7qz^lFm|ygTrS%@V z&hkrcF$MK0kyjMWN)JKmuMUZrlqZSJN??)w-%_*u_sllR`Hh@67r{YI`fe+U9SYW$ zY=tGJXnrKobNER=q!`wH*mv+Da#4ZP<~-BfH3;9;dmNB)r;lNKY0AVNcLAy5m#wg+ z8MGuC1X92(t4Ew`7<9vjh-B7D7In9$jbdBmlkh}`S30}L_jPPk&o$5?Em#HvJt0~% z*JqEU{Ctg~_uH7d0~HkBxcrb3_knrfq?LL#Tvw-Yl@PD*su#+7=gM94oE07zLnli? zkw-gK?XLf30$4?#G$thSWh6A9F%>HW$3_1^JZD{^MQ=A4T#|2jYnWm4TMBqa{wxDq%YMm_l0Ww z(q01(^@7Dh>0vQoI4q??Xstluq+-tI)2$bI`YX06J8(#Vo&gG z7u`S174$IvetUB4b%x}j`WU>ke)VAYe|-yQb4nW@KM}c|=C{p+gD_U1XBx#RUsA1P%|^OIN;oi0p1zGE*O^oyS4gHbww58DYBrj?rXxZZuR^5E zn6HRH+6;ufyD7j>b7|Zctaz?AB^=lko^_{te0Fq{^@xe5z-3vf5@X^)bxKD)Mk;DH zly0Vb)^nm|We7k;v@2Wc-!xW69t0!8IhUSOEon4S3q1ga;SJGD!}LQ5Bb|&&?_FII z)FT*=O5{R5)J{12)b2TVD%1$lV+w;Qz!{#L3shv#3nG2zByl|b%93|1Xhux>02^Q`xG{1%bz)-%AX8qjbr}k6qHJ`I&TJ{@Q)=eL=MahTbR~+)p z-_Z8|o*^50R>RaqRz_U|j=`HrISZSe!B6;|b@+>m1BVS|sk6p7+Mxjxc-dS2YlBsm zfT@zxG+8}PDSiZAH%URxIQUxH`(kdG3Wy}=^!YLJqxA|}^g1w4c6bk5oTJ+>myE@8 zDSMh8I{L}1HB?$JOcJ_5KIhv0$O>bFQWE=Stn^W5G``6vTI(SBaf2Em9pDd&=$i1b zvd5|(7Z=PY|6%|@1M>ThNt5PN#^8-a@IqyA0&#v41fjKT6RHqGANcwXg4!J_<$*v| zvz_VDIb|-@vRWmMxf}=Cd=6=EzBsz?NFezSUx8yUed^(WC(`+k~p1bW= z2BOjrX&dUhqZqMx&ZAwrkJPJKTFoLy6L0!@Q!aOCO*MOtR!{w$`FIIqi5T`T4sZm? z5R?2ke~SD1A#iAQp-3v2D9>X+*AHOg!@j=zc*<+yDy>+#IYJK1R04X450t?{?Og^x z48!sPGZ}Tj%m9K`1cgiV@Nr+}z?baQl-JWP{Cy1?#4OcjyH|xE%}oGPfFy&8NzG7u z{vdvJ*BxzKMn=O<@@JCRu;e(H>^~WDmzYy;Q%L15ERh?dtzeFw>M7cWim_lBlB?Nd zmAH?;1`ORl$^8iZVU!|ZOYSRAR+_HEWs>OTH#*&@Zh)C|0$iP;f=|@!Al)VPkN?f1 ze~AwI3Cmh^Blic7A775(`>kH@QHsq5#=@e9Z1Yz*Z8<39ai3~70w{fpNJPxQPDJ_P zq1{sTya+Q#DbffqpwA)ZHmPr0LMQAf3_`wv_2D@u`E{0$<})a1Yaw z)t#Pt@G0neNwHE#ncy}mIFkPZfCca$8NCRL&jdws2Um;YN9eZ^2)37UJ@YWrdxk40c&qq$;mE+0Zd83;7(gGvk9k6R2i z-23uX%S942xPgPBIO8I;RJEA8i*bzn0wU2M@Q95wIV**y#tfwB4GxN!g_5+NCsu{p z!&0egZ}n2Bbn-(l^G|6EU3>0G5U;F#=JR)OZ87+n1G8}Ef*Ux{x%_k+%37s0D5ycK zXg$gkr7VMce&RYp`Kl~H--3!lLLH72JCU=DPC`e;@Junq>0Lopm%UQg!7?ErE@f7x z9sX%buC>?{dG{?ln{`pFcxG3DK{cv{%ePYrZz3OXJHMX&XCKkEi1r3d#zB66 zsug$Y*HDj0@5y{yrmFe_Xg9tM!ptf#Qb&wp1gY*hh^bH39`QlzFjWqnqjeA7`vT;T zgK@px>PMH4#4STc+d|U)S|sFm_S4;LzKxCCIId;_#9p=+>vHSYS+OSq5y-({qY;DG z9c-Delu#dCy1Q2v#zBF7jJp!(I1L>{tJ+)j%Pcu2Dv#t?#0Gos6x9SWzdB@YD!!0?DN(`;QzcB?NLF zj8CtJ1@j-zX=xm2dhrhu&0VD_?Y`YD zUAFde6Bq*GeIL|>xAX@`ryX2e?<{}clinEQM46}r;@KJMwrL%Bvs&ZI9y31KR$YP6 zVu3y|HWuGC{~ey>cF&^T+Z>TT|Kq5YNwvUVpFfW07ucd;n(4EjiRgKiy#G^AkzoEg z%RVR6nrK~)>n2P22@0WnDmQn>~MF|{I3 z9SqOfjkp!X(48RUu~vRMoA{Orx>E2rsH(-V4AdB_Pk$jbg+WG74Z$E}5M)^UI?HJ} z9v76#8<#QS^JKaFgmjfLHA%Hvu}A#)GK(ieQOR{cYu}AG>Q@=<4`f@Y2qWK{&nCw3 z77mBj8k?7paPE*P?s$=<=UqqLFvm+x>s0U$tBZ3h1B5DESGF8ZzleMw(=oY_kvg<- z-{`8)P zZ3|CAUF?G}=E@lZm%uPAObT-jcp3|(ahfn?>sICiw1f<h8LywYvJ5nA6L`|GLpDSGlgBOI^q|i_~qDiSMZw zMkYZ*&4rysuQw{`QiG5vL?gTU)IsEpC4?N|Ol1(LDYJXod5&Jv8))zoGvDI~Lq$+# zGp|DgLbM%7XuJ7ICxO482e+@a#pvE^P|#XNp%b146{&Q3Z+4f&aTRG_Zl~n>+SMkhI+X{@buS!7*fx2|!nwQS{fTuxfJb*TJ0!&Awr^O-{Tv)ufn7HdB(>MCL z`ocebpl~X$uVNHh)EBQ~1D#{Mmt2KG+7@BRyB?MWhJ1TK49NsGnsR9RL}KHC2#6px z(>)=bJpH1H4r23>%MR36n{HM)nJghC^o#v?GvL27{b4akTiyc?CweG2e%3BEjf>`) zGUuG99Q>%T04h~3qQH}C-HiOaYeo?s1nH(7UB~LD`6U1r#EL6kHB%4#CHa=UL&R7C zk0~ze)qbNgRf|(NKdv-B`Q|a26Z|NA&_Z2}#=lBV#|JS{D#2Nj@ z9t5EAq878jgqYuKu)4@qm1KpgJ6>#Ol-w9#SAs=3>l1owNhU7fVSvI;7@hh=4rk$D zlv0Yom^fX={!?fIQl@~MKwS)PHl<0roE=W~;?z;^pyZ{PiMrAd$YSp9A0eE6<0p}Bjiu|U|!aJhfA>7)8(Vvtqy0wqFmRBZXgSZTzU5;=y zNqi??%UmqYEk#)kj9*w`c%rXhqa`t^Y+&yA)GM?8D{C{Bwc4X|0sdDinZmr) zsY;?McfhvdG}4@U#^CT<p5`j}$ecq- z41cQJ>zkYuQV_M0(;st*nwHf{-gn|~ZTD8EWbk{4&p3fFMa%0GA{Htg*G)5jbD>=_ z$N6iNojGpwMD?&?lr6%VokBy;tHLM8O~jcX!`3sxlS#qW%>5Z*?y_gd& zZl+Bhjr?Z>woI_F&|hBMTi$$eA4p}kiqp?Ac3M$#9)5OH4JMOD(3C!xd|h6NFVP_0 z9HI{;K$nxcEjEo?`aY3S=?cy?OEsX23e7n!bwvWDIdWBeZ&4F=lh_!V2I8GQ%M73- z1MHLL0fZZJU1mk-H0U9R=oI3Y_gA5E90r6ZF#BR?q~4)T-*0>CHJ}+SlQ3o;!4+1v zDG4mAN+HSn|MWp&1KdMZM%1WynfGB=K6KvfpJiqnrK>)hvGoGJrhn@}U$-f@?GV@O zZQUI)r`2uNLfu=LjwZZVou{Nw((2I}3?TQ0ODF!1j(M)fjSckNh%9@_BvG?C`p_9g zY=;_x3j-yF-R6oL-pmUWc=XDUv&YD$ZV2_dZNH0p4Y?u)Un{$c=WhR@p2%D+;7r_@ZwBOX^-3Sqnu-1^y~#Cz#4GUT-*dWI?KI_zxUZwyp|5-q)C!0T!E*M= z#)E?(KTH9&T=lv9ONxWQWt1E1<|K*@Oe3thqvR!QeDRZNvBXPkuI+uR^?=4nhsUNZ zRtw^x`E}D9phyyT&`p0?_ZSbLNUuEkr`{OMU1BlYKKB)H4)Jh1@G#6~+6CIE0Blii zxCP{=&guFZT?f8B7B@NBHx5e%r_`_*K<}-jWBQKF zJE?{GI(6-hByey~gA?##{$*Iz8+{$Z%yJP28UpZ)?Fq(IQ`C!wQCA{5I0_jo{!6C* zp}4TUi^DF*WaV-qf8=3#Oq&iV+0ul>TjPnrS>y@NTk`5OSqRCHrNbd><^Q@=sGs0t zJbWb2H0)xiIc#o!+@i;g`7Izr(tU&#uo&K3C^Q+l1E!MS%^Lu0rXg23RiTeUI4xUy zew>iL_zoT$4F`V&Q?z_G5a++$EVP}lCQ85#iJT((*_}IQ71QRw{pDVsp;P9dC0H_0 zZhlV6|KFIcP7jfA<%BD>c@-f%007`eRsRpuV<71;YSD~aPH|^7J2`RfQ|-AOCov`v zH}+3%od_|Rqut?K~Cj`H(oMe(aL`Ple$fgQa^{AXl!??cD&=cOfA z${u4(^@rc96AZfE8W%K9X_1lI5+4gjhKtxsD*=F@%u{FDbgX~*J`pqTtc6?G4ta=G z6iP*_5*@{Auakfhj2aTguUXl=5|^9->>QZ@F9AKWc`A&84!g)#<`qh+ks)G|bCoEx zpUXphu0y}}gv2_oR?e7Wj*qzWk%HF!O!05Zxdx1y*oYFX=MU4${L#}P+MHCwhyVoS z+%zH`^+hkd)AT%jH=xzcyX$*?yY2VCuCyYsovcE+0;s*DW%anB;1>1=lve3E$A$-- zg@q{ZJz{P^B7YwzvV(e$<1lB&GufV{_wXxN%joDArKK}{fX!zEhIf8p_s>-k+getZ z9zkiV=Y#l7Hx+a_mI#3iQ2h=S$z(sfT0N~Nni_p@l2Z6&JcyP3)f&rj14PR2O-aDs zOF+!J0j*G~)zGpXm;|Ro4;|0PXAnQHna;Qsa=zJ7S)U|Pg+5}AH|r<`Xsy}*orm8r zdH8lZ0UaPR^*T5g_F%;M-&MWPx@B6jNx~DpA zsFfFN&B(BjQ%88tc`wzpA~U3Z_0QW#r$39w7R`^dDU!k~W|^TR{Nwe;t1YCzCIG00 z0=swEy3fRpJn4Ccx5cIAZK_U)l7N8{*%X|4n#-XUB?z!>8|iSC)f4CwX>Skah`v&L zu^;q;I%WQ)GE4JuM`K{F6gbO+Ik&Tamu;wO1-9|rc_m~86fQEg>&s+{A9u_K4q(zx z^3Or}Y;@|(y_BGQSr_;r{#%*l1s0fYQz>5{p`lx(iSc>i3dkB)UxHmN#;)WPzMr->F+`OaHN>)gc%>0_o=kwgJ^7xf%@kJc2rOmA~5v z{r?>YrA->^MedCPq7dV)P;B^PtaqcuoMJA?#5@ibMGrp$%-~>X=jz_ej0#pPo{c0a z$9P^obsb2R#MJ5(Ov7Bg zth5|A{F>{89@jP=Chqe4DDU4FJ8b&zN9O&+?IlG>rEH2~$QaQW%^_Bv<|%L0;_vKT z<3xf$x-IV(oF|s}lI?l9WREfr9>z%-7F-D(b|~WgIOt_y>Po1aD7t*6i(L!&u(fbg zz1%(=`e-R$B+{RAM3}&-h#Air=9cGUN~4arP!Mdvw~jztsYqplAW3&s8wg?YoMLd7 z=2mPb_1e-`*>DaMW!=2CS7iS`vrPG%ACX8wL7W@ypw-)8AsS}Sb`7YnJ7e%YOskqW zVld;GUC#z%l8_ZWRUmVa0$Y?l{xHa;q)B1Bp$qkv0pp5q@j?Sq69PQu_i9=i@L)Rd z7F;2Zc4(2ntwcTmII!@SVAiN^Wv>J??=l(&;@^_s#piJP4DB#KebDEF6o5q z!Frd}F+DzokGdo#e}T4@|Ch%-{OWROUShmGmo*Co!ry;YZL4x@8iFH}%e_dw|F(IT zB1iAm2rhNcfdK7VuO4Kd0$?XoBw8YQ$KRcy8U8-uyL01v@2E^aHKXepmB{0*S*g38 zjwN!(Bz6xek<_()=Dl5Drwy5u`6{KG)s?;yL~13C1Vy zV!pGN_G`rv3fO+U`j=#8m)~?w%2C}zvB_0J>Y9=yxKn0QTfNez-yLCPSt18Y)Rs>v zQP?FB;BVzE7J?>lMOg%=F%k>f49#z>?|12s!3u3uOKH;JE>&^s*rkwN_V6D79$*>5 zigX=>X?P)NhE;3}6w)Q!h9h{~p&$ZhJrTD4SLtzJ@vF(!Oip&TGYDmZID|+f%R_nk z=sh87a;T2v3kVt-+|6yMQjZ$&oV@Z+CdKieF=v2p;qh_Q#$qYTg@r zvO-KIl!&kLhK-Ie@BA%nlV#tj>-sN`&6uL|;Q~$Sb+=(HD^ec;uW%1~ki#JQnlYIL z|8kJH61`aa=DZMuK%=@hngBsTM&0>(>b3p}cc)meXh`b1EP5S?Ih`fTE?PWASrpbKs9F8viKB%Oieb{q0H-mkRU3Znq*Xcri6+5%gu=j&f$ z)kGrPSc~{_8+`lUdaKT!KpdI<3xJ0f<)%#`LD&6@ypUm;jM#yHs*Ta>8#weYG`(rc zQDIh8ybSlUJ=ZWpe{dt|Ni-2?ni^AmubcaZ{dEHOR0$SdoUy*?=D)FHe`JIPBSM0s~B7XS;8f&LZcsxF2;) zTB)ZQdSqRVxP+7o`6XXc8>H)E9|V29+!W;JQLt&-PHj@=~vQ&p1?kK zi=IWUhRm=h9vQex*z3TQ#sJ8aZ-(v3e+48Tyl|{)tu+ULQP4MVV!27g9JXrymOZ?l z_^Jm4uzW9or;%!{ZB3n{Uv<`9uE&|Zip|{D(%rDs3NVH3IMIsTI!OdO(nrT+qxbWE zQ90;~a>EiA>L#@EwGemMWD6z<516MVX&FC8nF9UW=`n)QNcwwqzjE>uloEx)&+q@2 z(dlowk-{Nagv);CDcJ~vjir!kkg!UerIV3YlYDoo17-;T!pJ{`*dp-r^b6{{2>~=I zUX%tPp3)TkG0MdURMh0c&j`W9WeSmQzgDQ3snt%ANKH#_U+H1*v_e!4TP=DEiS}#8h#AE??f8-?DwvkB z#06PyUZ9?Thl-G(2hwpa;^cjj3Y=Yy3)VKh@#w1`^TEC3^y7NzSnNOkh$S)`hGa%Y z8Uv>&r^{l9YT%8Y-f^u)6^%LnX_Pu0TwVm`q#8DjqGbf{8kIdMfbxEKUZ5ZscWTN_^I*RN zx4k4H&rWSk`-u)|_;Qz7Fs<2E4a(nh)%|cIf`;;yp%dVPkJr9L+C?dZ6J5Y~@tv_o zhiUcf-mp#5-b%}LmwCwLkrT>U@05JVp;};gJoho=)T<3Q9rLB$iR)&NdbI7DmD}ds z(^yyY>@7=!Pt}&@80MhsYmmpDZtKqjE$mM0ZN}nZ5mOO+R2oa~ry-8CRe*Ps69Fy+ z6G(69h*QP@-(D4o@-@}O)g+ftMcDRCTpO;ebh_DWm)=dtS5M>E1)ERAKMxEiKqupVHrIazr5Y!YI1i zxMd2n0}+}XiI%zp?`Oid+CFZ2UYaz)KcqSJqDC6QBWLsPRy*b!le^Ig7~Faaf@vFr zxj=m#Fi3P?*Q9$xeEHyrbJAd*SW@Ss$t$U1Dlc+5DwYW2>ejyw$A?7jKaPwa{PLHu z2z)AMS=mzso)I;uF+v#S@cf;e5>nUE4fta_o{DBSm%=cvYoUq}gSl_DQJlQ}cBd}? zRQjE^rCC{Ebr)w7s{Ige5o&o)a*P>S;vs)=J7&f9Txa8*Bzg!EM=6_YQdhJrZ{N-wQBI)FQ`Rkl_TO$@XG5Z4+E%{LyDnV7l~ZV^?*BT zUOQx$W6YK={&1ZdX&t^eOmB#zkkY#<$d<;YOL%Y;=Xde6W>dpLYrOewV`05=`n`JaDg!VjpmlD!tRR7Mv zQis{GVZ~CI{Ea6%6ugZUYvWeN)ueQ_3A=tK71->i{TtMA5o_(%5}Psh&jDuJg}wQ4 zs0iw<5+kRkM=OIPp@BX5=o|$9^?&met_3K>i$1ciz zr7^3bzRov^kJD2|Tev-@7cv=KG;Y)gVNJa7Dh!jaU()@fw@U?_!Ymi^53#p< z{^gySv|*Y%qS0n}>f)@dXVvU}#T z=4R~7O2Vw)a8Mkv{97zAO*91}kL08R;c}2F{meGc`L*|`l9yu@NxYb`xVY?kJoLhS zb;Kh9!s_eSGJ)cw8cerGWQ3l!(D2#PO!G-7h86Az3AtG{k@e=MtBm5v+Vq!e0HhYU zRt&6#>QJ@Mp~F!fli8YJ^MbW*p7shv4H5WQTiz$({x!1w4AVj^rS~hro z@244G0%&;ISX^_%n{OUnCY)3sHgqnB?;;6D-1q$@WvTL398&wFTXN{*qeD!~r`OuO z;aNv}d6ID7UNHP&Urt@}eaAu6p|Hcmi%a9I)jpsF9Y8}1VRzk$;u}RPb%%si>->bLQRYL`z0$BxE&@W5^sBoTGqhp*Zm5GI>_LS#ym z$w&3x6qU#RGwNm_{f{osh%Ng0+_HW;+|qSK{L#T;Lo%?b%8&w&N_VTANV*_^_sjGZ zYEicr$h!eSAA4BD{HT12jTJ}vGq(b-K_ZXdYR!5j=*ombxCtW-BqD- z#{262qEZm%$={GFHqeB84C+xuqxEWQ{OIq!ZLURJQ`&0b!ue1qi26paFh!81#P>Jy z16$6~k$wWXV^eqc$peiRi z2XeeKcnip9PB=%d6F(Pe=fAt5m48lS0cLx`*dTTef1~|5Ia-;|H0OEv5$@Pu*N}SP zZic0ZBv~>Mj|Gk`!Z!wEl$ z_E9nrh#S}2p~$U_%UpCRV9!u|RBGCQcUzg?DMa=bJIXMvz11(kMFnLknM*#YSL(3z zxx+P!cCwf)f%vgp(w9*IE5~b!pO7|B1+nHU9Po!flmcCNl%+QdJOMsUR0viA`8|D* zekD{WjOB*J%WV*maB;}cIoWK6k6=xmQtA8rTI4a`a`wS*tu*Vd9&$g=@T}b8K!(U5 z(Dwiy*S6N8>WOOSJ24)5uvsDn?Y(CmAbqW+4H&%C8xanB^`n^gT}i{02w0TAj$tzA zWWzEOkK_GW2{Dw?&6Uh4BBF#1z$&Mt`O+20>FpM)=GAal-}~$mr9SQ}wDHGsi))?h z-6SSC8;u#f%{6cw8tLr#C3H$BwsB~JJbUm63!^-JP%F;Ct~NVS;V(sS&e9Rf1r{2j!z+aa&JrsQSG7h{Qr7+HQq;S z8t2V*W6~j>2iXdwW@(Gw%ZhPPkb(|b5TNHKlD^c^xn04hf~*&yO|AfVhyW>-!5x1UTvlsN`E8bI}9gft7X=5>&GG5h-QEf+@}qM^m*?qPKLa6NT!J zK#n(?^7$4+{86x$NfkHR(kw-|mvxlAE2QX?%TU$y86Rr_pkhreET$n=JpB#Eg@4v2dx-wM2J=43_{hT0r)22B zXO7>Z5zyCH%Ve3JbI^i=!W6^}F}|4h(DKb=xFeyteDbkkh0;8zigB%IP(&?^G~JmHW_vQpI!-33MDsO)%D)f zLpnGU8lZ6G_U9(LbUai!a)7#Q`42WSsE>l0H8)m56hCyh7n0VNsF4Y{I8WWp(EjRF zI&#`G1O{q~9oDbDkmIH`$eSYIsU*W_27dWjvbD)KO#%m@WI(eKC`@_L;z}n}si*}s z+zP4|u5vAg3=r684R~>m+XT>u=n=PFtMY}sR-~{Bu#{RuSMl@m6DS>~Sw=V9mF99W6HM5Z!dT44~U3 zD|eS+SPuls?Yr_=6n5bEJyb5X=R1xx@4N3@`A%1c6D!wBvY|QTCP8Pf1lfNAT}b#% z)Mr}n>0)x7AE(fsWNbv}pOrfbZS? zeyWrW@3RHQe-Cz<+q15(D zY)d+WzPC12ZxV~0gwftul*!M}L||~VUN8GnqQnW-09iP1NTVO>X|%Hsg8w)VrJm~_ zl@ja=+Jz*KyyLQ~2(_Flm~Rj8spMsltYI==_Uo(!*y=&;Y%G++e~>PQ0SE@nVh(mT zj{0@5tXyAsQpB0jHm=ehbg#Z_+Dvd6w%?AUTV?#{bar*#b&x?1%o}jK_FPX>HRxEg zPzqc^id*{3&b%G%$gy0&IP|FFZmyrc+qENEkUDL03`v-28ZuNGmUKZ7{)6*G|Bf3$ zKXpZ+YMp1)H=sO|PO9L38S&WL!roZ{A1pJM1NQ%Y%YWkbIIh7{qI4?_ z=oO){C`&cl+z%32M>=yNcSw69^C&tXt5(6i&TX|$vP>BYih*yqz$Bo&Y>BS@JD%HG z_y`%l+kghqiFXZdTEwRMAZVBN*~4Pbh<&pnr^JFUl3G=&Gg&SQdz~&w7cpX}=l@KX-1)qk>EnXq&D70wJs%3_}!#vPvgcb#(x30(O3r27Oq5-es)+vj(qYyiVTA$ zKI!fcaWE-`e|5(a--_={FAXpT>XFfbg<{#$@Q9$f``UWwY4=~|g}xLBuSQ}7jHV8K z+5mNlrRsX;K7HZQ%i!5oq=H_K@T6`p@MOi z_p(pi40G714$!)4I&7d$!`NQs71yM-g{B&-GasheWa zU)`+nKz6S0ZUGqDf*0bkr|GbAVHR>^*EWfR)rK5`ZgVqJl6t@QnkYlU(^3m>lB1}= z>hNz#Ye*Ui?JF7R?h2H3BAl7NtJ?k^i%#q)gN%0??T#1^FUtBsDe|Q-VMTR5v{hsqiZmdDbcjR=`YjT;G-Lg6sbQxq?vc5sr`8I=PL)`mMk9oxv4}!uoASg?OqtSQGW2=y5v?# zn}l&ewr4Y)NOj0*y`W)gW&Mtl<;Ay4Ww)zx4$Hv)@D-xq0jm9P4+9EEtj zR>Wl1%_d#-$I4pXP?U zf1-e1;~B*kv6w}4X3!J~MX4E3=KPvkXrk9hiOG5C01g?msQo)vPnbjI>po{v-)npI z%{JA82PXDGLVS(r9$Rx*wqamMZo&ahxU_C5I)!N-PJ|rFIhL$~2Kf}y3R}mI$`6}; z3NwcaG>6Jkx6B5;6KFX04{1KJ0%vs7g^H;u`!YolC z@O;QC@)BJILv3Z)Zrju+TYjA_6Kzsk69<7+aHpEUjXgVTBIhaUvyg3G^YY6VgO)%; zjwB9(DQy_)kqDKX(xpl;pl1bhRkL}6|MO=dJ899`W>+z3F1SLp&pueI`_YU}roX;!|YH4LuUkT7*Wn-ST@ zn2`2CD%XJB160oKehoH&bDa_$B44r?qRWOUS4fjqKe7p7zXooR)#0DvKEPRn`t{8I z{=B3(-rk;GQC5Yz!FaCA1l7T8E`UfuztGy@aRV=6e3Vy%4$y<(XAeMsTjd|d5{`c) zG+)ntb7Oc71HJ7{CL=y9AS=8xQs4w0K*{CWu@ukZgbOYl#n}(5S9aX0BJx-_rt_ud z(M!@(TChHw16uq|aIQ4IZs-v>INHJ#n`ADz8tuIpC_F-i zi(bmWZ}w0CwD394X`O%s_>kfnUs9{L<08Qpo#P}lYp*g89Z<4*G&*C^%LQY)YjEeN$Dxs&@d1p4(?zM|!;IQQi1=L!b9@a?UXR};`;-v- z(Ra|emWfBPhgf;H(gLVIpl(=aUw#90Rs(ze!54Mr9v;^lCYT|UIWxx(l|Fkf*B;jyUJbq=duk7axAXl}m?OFG5y)CuU8)}6OX#vA6_4Uc_$PpRlG;1C6u|>os zt*!A@9{B{+dN8&u_) z(h*G*V&35m5gqJZf<42j&%dz8-R+mfm2nda#`6yd?A=vlt| zfoK9Su%kC_5HcIo4Z%i&BCk)AdaAT(gZnB&;eP|L*UWq?sfy zcvuOc$w`O=h$CKZKiu^XeTV}dLL9TF6|OrWku$N9dd=pXS}$u7D~PrMPP^VB`QI5F zl?LovrDBX54Q6vya?JB`^;on{9r5$JX0cgW&lxAN8kPKX;EVS7qE7|H6kv~n$ZRa2 zE6V<)6EgJk2{KFcKEgZP&wr9?V!W%(>@KARBG!>(?1zOD}2Ra-?KjCnVx4UP0E#^Uw-*u?c2HoAtnnM)IoIG>l}K-8bMY<#J`{{Sb}#5vsO(>3c9azN{v1ZE;Y{ z-R>?-?_Z*T7;7RSy8seXs?0ofJruG0T`s(*P3w_#BAE&g;y?+q2sL7TSq^=^nbX9e zG}?~HJu&XUqxv!|+OGd0^#gHg!-{POvNNMku>K8tC-6T=3PRDaQAiFA12vu?YIT%Z?+e*GQk*ndtd~7koZ4TG6SALSrrrgPMAlU%PL0pE=mt+m(lFpJzc4K)FrVyyp-e6L*TPX3qbSOp8)|`l_n+ z;9-5Qj`#uEA|1@hO`_R&t5ISouPJj3mG&MK6~q^n4p;VDYGIEWcCU;`0yaddvf0t7 zM}kGS?Qj{G+U><%1~$6&t9=ei!APZ#= zYLO(igdF##Ev?!n*Msj}8vhWPaTrHrfOfkrE4ZpXTb20a3AyKMR~4Rj&~3Gh>!3u# znMvGINxwA?V99%Q0-ef1iV9O<7^Oj5dC-&-z%N0HbmpTHxTcy(gV_p=nkjTMqI3b^ zOyjHc2Z7jmDh&LJIPi`WIKDpxt_ z>m6`H1#DPTz=<8pumJ=ovTfo?u22I zb?-Zx2O(98xs;`*A9%+s?OvMPzr2-Vf5X<4~t%>O1 zra;uC|+|1jnb?_cadK@Te3jx9L}1=9BELWLbWKj&dT zt_<}&FZ*H5T(L)Y&woyps;_HYG||==b0?g7606S${;ZNT8t$!=%WJJui+>rzH;0jr zd<6>JHQdPuUgx^8;y-yK?HPN#0oRboB!i9f4%tGA8b6Hqp|$=#C8jmwvHNgL2KLn* z4AhRKbV0bXPSyE@r7)JGvw(p&8ab6PFuuN;Ggb!Mi%vocBKpI{GNOR;s>&_r);N-%t$FFJH-Aw}PEfV?HVWXf^f8~l5 zV9m>C=}Bpn6&DqtO{>ct@Q9@H7;@A$dJW%4F3jrtS*%X_IFf(rIAyl8HH756JRKzUmT7udRcQ z`CB((oZm@3utZ}AyuNKmTI#3PNtEmc_K(5*I9FXL(SV?KG5=SyKm4rAW$TRqe9Q2} z2CPJ7ZZT^N9!&*U<~lYz9rhJllk}HO=OLjB0>OW6W|$%*J39yelS-8G_fzC5v{B-P zwUjXt759BjkYEa2&6n~`7!v>C?iI*LU!+!(Ja$&F-ubuDW)TiVzPfKSKQ|>`VU`l2 zQ302q%;~Xlq3RGB30Z#A-yr7)1G^(Mrj!UMfA%q^WVB!1ViyH{U!9}Vs#PiGxl(^Y zuM3}+-D3GS;ED38xN5ak`k`>g8fm9x7f*^%Y9ekdcklLC70CYp9?hNAtN9y0pVaFY z;kPG7V>y0}*r?3C8pSinb6r3(p>U~9%+k3YG)l3SH&;r($glLhber^JO;n}h7HDg=q8kv5|S@&yF zQV7~F(MBoSjg5M%2X+vMGzj6L!V!)H3H=nWwSto$X9bUSl+vx5uhwCw=&$3Wi1n3G z7Rr!8detV>GI=En`dUN*=bH3kTSoEgujU+C8F41R>_Q2gbxr%etl4NbuvM*%YvQ}^ z05kT_*`<+_xy-u2Q)V8xQKyTVo;J0C-n%+kIgaf&wm81HOgh8RyJ!U89;o6332=q;ygfP2Wm(pM!vC(hnM;lmu!2!h1T=H@KoBs zW&7kI32QU`5ld3Plo-Zv0-;y}Njck`_CAdZNq=;+kB7fX+K+-9WBde58oViN5Q4r= z``C9YA4n_9-6yk=*Pbv8S$mdp9z32*HIRWi6e0V^z$Li05R)pBWVb&%Npf_2j2#k` z>`XAfIx6;$$MLtjEbP#+=6U?ZQ3MWW{9*&b8w9QP4rkhQ_4$0|otnyW|AP-)p~99D zAWTB&KFGq?Co^-xL%^-X5?j3O{1+&;Ih$vBQl}tUZkyJ@B=I&aHl)63CkQV;)=No7 z2B{VaB#f`3U`d645*k#f>g%5%C6BgOSig@kY$&4#UD{H_gv_0nLX^zyA3jzLvxZgb zt0^FN^1iWT{eCq!rhOtf$B!c(P6*wR!pS!pXo7v^)3{tSx!cjj8#tQ8UX(|o93Dg? zVm6zjOOLcEhnD<$08nJ@A?|+bzFqDg+Lai9ZX|z- zXz>V9G}gv&I1wXSdZASE;o=>w2Mx8XO-FX|3zcMU(c#hZj7-PVmf&oNkB}iaK_#Ay z2GWCDJX{0pU)5!zthI@m&|+ql8cG%>pe-G|B|Wg__tyIi^sKS#zPBz~*YO^Q5PtTjwP*Hx?6Q9`wy)zK&JW~@U2yQlF16H%P}9te$n zOb#-MyfNfvu;ab_k||76SG{bZM>?JbL`yA&xv#iLwK!47o}Do@$K6oa>GW^v1xTb} zo9s0aa4kh~1YWEGDs)F#*M+sZi#OS7>ctyR+d_bG3s1#mf=7pJ1dFaX{mfl0PD8mwMnDsYPRS=+5H zzI#zjhYMbzl8TdRRL_y!Thvf~$RK@z=K-IT=0id|`f~@guacQ|>J+4p8S@QW->jL0 ziZ5oJ{?KKHYs$M$NLO&+$J{Q-I@*)7eC=rT^F*~eslKqv3~ILCEE5VKSyo2Df-I8* ze$Yv4|hB~P6v&vRNOrlxVnv9i0_bO0|}Nv3wXf#eI=vAi1G zST@}*G9ut7SNmzf|H^e@#mW_?c6k$j*y5$@Xo7U=jFoSD)QqQ69|rbXI zrgLT9B3@QceD%N8hJQ3^TltPSBg2Mncf>;@C+>W|Z;eA3FpyYwHdoauNG!;`fKgj@ z8rs9-?e=C}8Zs!3cmQSiI~+O6pcetm?pr2G{Dz{{k_M!e%3v!wD*9UlE=I9O$IpRe z7m?Hh*!J|1&>LZtkW>3{T+c4r@BpBuyZxP~s@{Vw1U|h1GN8QEYno^dnZ|V5OF!%Q zpr&&d@h(T(SWV~hm@|-3mH9ur5?XIon2`+iiNaiJ@{5dJ%BBzmS>bvfvvzi6HX=n) z05Ii7HywUoAyQJ7@qPadr!t4{={sRD-%z6>8>YTyeF|y9rA8&JC$QoNj^r&hc4)o> znpJ4U&a-FddR#<%lj|7f6Fp~O4cf;zSX!d`BW0b z0kZIm+$R^1V|t7cKIg(e;RzEHRnj=gln+ybBX^UyPsW7+u0 zh;WB5c_#PG)RU}lTR@cU3@N?5Ty`Ex!+X`k+rJDqJ?R zAto=mt?^`(Pe|o1t6bcSwYD6}&6pzUSe1-bwm+IFEc%ch`NJp{yqq&HVZza$+^*$-#)y~;Z^xe!gFT4a(ig;cW$Aj$y{8Ls~XxCV&F6Z8MODt=>z3DAN^#U z1=f5~x3}g2Ci+xDyTT35(5#W-K4hZ3grNs>h-Zb%_rQ(Rorqql`7_%eV6_S-_7`b$ zYl2;&G``FhuX1?4bcjm(H8?OwP3Pzfh=N_umf(DG{SshwW9y&ayh!?oyB(|7Y7g78no$_BVElj2^#;IUDAkxrnNW)a>|s!A*M?<>?!SuM?A+QB)9!!2$t#hxTuHB# zcc>(ll_8fo0j$~Dm-yf|eB^aZ`-XO^(abx+&zld9*3b|}n)F7>M4=BR6r?B7OIkO0 z(_@+NGJ3?x=GJLtol679=bD^{ovH^}cTkorrD=6^WS6>gHy;!rFn#21H>rropT z&>$;9Sh-`u9jqD_3*Y;jKTSyhcbFnmKfcFZjNc_im3nkvnR8qNEmhh17$uE*ZE*uy zI5(L${?`JT@dhJ2FEp$HB)0c;H9a1xqhTBRX~T>rh*@n9 z1UI#PflA?*FWox5wslk8LYCY|4usJ|3d5{+IE9QN3z@;suRm zjf>828`~=eg~NpM08U28!Xg^1a#B;1k! z>)`S_I6H|cKA7H1CT5tjjb_cJAGf*IC>Ma{a+(~5e%&&8T7{^>WaArXIXrDF><%^8$72qPsCfwMU_T{NYP`X|^!-RI2dR22^s^NV#F!PDQJEsL?tMC>1dmF6uw zxCgVDYBhfGP}GPWfHGSFa&9IL<`|1^bqoLS$4gUg=+s*5*xRs+RPIe!0+dS;ksb~w zzaCVedqg8%HI6C-ff_Wd*lsD8zMHxD%Q9@?!0ukDJS?usfVps!w^CNd#7NiZU!OQ4 z^>=9`WLwop*g~C0FM!BYY-=kX{*~WrMDsL)TZDljwL7!L!ii)gt3)1_wIU*=e^nT<`+e2#v0r7gH?7sq_4iIcKyteWP}L z@R{yx-%W)sMX41xPZdiu&i34S*33B*3aFfNwSc*~dWGk1pF+FI&HjJ?-o-&A8-Zzt z$NZ-xU)y0fuoHO2@LRmfF5HuhI})qcOgc0~W_VUK7BcaD-op>X;Di{ug}M%c33v8C z?cGbXQP0;UrXA2Yy+p3QYCi00&Cjd0`SDSbE3!N9f)_?P=RGVA>**cCwBY?f3s4x! zii>d_F_gPtentB8;^4KV{&m+a65fC0vgiUB^ALvHns8&Fz8+X{d%XbFY=o;tPTBQ{2QKZL&hTQdx47#Vy}!WUUn1wv(QjC7 zS8XcvKXP6&m*YWWw9uZ;ys5du(dZYf#LML?@g=sG)_+A|#cgjzTDgGQeq1V#gcuVi zwUwN2R(&vZj50uSEUAq&iaiKVIdo?jXFM%vpjM~Y5|83QjaQheQDDeUprI09&hhwlCxM(0tq~4_ zq)!u1v9N^hYlBIMmCbPNa-b;R7rRu`Qz9}#9>%u<(n@P}2>+9x4XQ7zlJ zlSn2A;WRy@^}oJ@tQa)zt4~bOOky_dw>3IVqltDYJLR6&>Ro-974n5K$#(d0Yfd(zCi*+yx`#4|rN|;B`nZdTqt93(=n?@fqkdie5;T zE?O=vP!UuN7Hph>m640mO&ClJmsCOl6qRVh9PnDPFvLQJM;hK}8MvCBG#!v%juqu` z5j+0b7c3dOsF3d^5$jOVPXvoII^^@-x}?nZ1PL?-3hAHq#b|CC0V2PM$aIWJ%)bzd zin2E?M<5?_U?R4kTwqw|lY0#T)pWWsAd3bY} z?IglXi&$YSbp)K$?x$Bk-q-vH>Ev(T%P;%cZLf>R!c)dz_C-To0;?QlGsl1Ug{2&! zd~2==wB&$WT!_ELc-0Y`;Py(Hl%57X#|o+huLD!alj%xVq`j;W#ODEd>{jevwd5C$ zTZ0YVWN<;MI()2HH1;x∈b9N;uT4os3a&9ZXNl?8Iyj+imhRdjZ!-JE%qVBVE(` z?(;(-)|CHE=Jg3m~4d~T!+s*sp2_0 z`&7L1w5W5{0n}ROUDeIfj$_@AgHG&6QVk1eK2!FusVg3evmT~~(*s_^5}m?^x&5t1 z1gf{JXmp{coC}w&-Kst@w?ZFpyjQz`@F+F3N`%7ZSxgBLe5mVwl`yFCjAgTwc`MfqSI>>sJLe! z)QMO8O3Xj{A$b#<#GvSzB5FOf~`duVO3U8Z5$>*-;LP;L60)UN;aq#qWn~IhLJ!& z0+faWAKCPz#LIY;fHFZ>nFk{;0e(VD1TJ=E0prTt+vYt>2iD?fnvDT|y%d&kyZqUH zZ4o|$Q9AzdV~?ZY0i3y-^z~4~CA*Y#eZ!6Lmj$T=3QKRvr@8se*YE@j>LGn0TKs&) ztQA?YeaZ%ntPNRS<)7BJEb#I={DbQ>w}^^dMr@UuiR4%G^1e&mvuhv9m!>94O2+hR z3PH#yDkS>!K&eMScfQmBN8-e8q!fmbWFegpVLuUncPo%b8!pPt#8#aNB;Qil(Ix|E z0*1!+x;VrLhUt?4n8(OWE{)_E7xt=6c71Gx&&57FJE(o}WSWfQYQaX@!<=zk76qz9 zLF-Tq+j)OI%Re~5t5?PEO4sL<0RgR6uh5Cv3=nR~8v6QBSaB8W8crB1h=ng~*MSi} z9Th@AJ^EY=@1-%i&~B!%Ty>Y(!uxu%=z`5Ep&SB={xGZsxVjQu6bbxv9bH-)-hU5a z>NtENn~ztzEi!7-qo=!{!lCz~V>!gQ{{@wbC$dW-4WXaLvx#XbV^=TYpDECGqPkZ- zhi(3`bHpN+`OK1f=19c?2lVVXILF3a$0#_Iy}8eBnlD+(60 zRNINYf-V}DtkIAKmF3Q)0?vB8@ni6b&&eV^?D1KrxIanxHjF>Dh|s_N zqN`^VPAW;lR9qq4^Ghl7Nj*pQClQ^%Z*tk}|& zvS{{Q2v!w7`vDbCc4<#mM+Pi)f9L9-*tp4qGzNe~rXtki=ga+}j1RXKp97Fhs6<_* ztXvh!epZA}jgblTfY7N^m1}N$35w}(Fw+3kCwtoPsN0xH4GCqCMy(V21;UljT0OWu z)!~~VbEKK7R<9@$w;4;NIA2AJ2HrJWckXL#J~4Xg!krq&KX%q>aJo5`g#knBckq|{ z_bfK|tU#)SIV)@>H9b!m{)A=0h};`Xq@LDwtAF-QnSS$?W+NK^%u+B!P848BvMKEH ziq&!&5$jS7X*JlnG#-n#Nx8DyX=U^^tk?XIJ&wbHk_5uT3)qBv(F~TCd3FykiK_ni zb`gjq;XQ6T?!wR8a#kiVTgR(#G&c!c&t1DU$IJ$kkID;yd!6FWX-bR^J-Z|$Pu|nP z{*A;x2P{>CfjM#^yF5#$OQp5G`9+ig2F!U*NMB7b*J@J$Z++LQ zDHYrzhS`%03_wcgREa2cr%9oIEM|3tkJl4Gryiuc!8*X>XmWZiIBY@(6=d5^LETv< z+gE(h z&m>o75Wbx<=x4M5Q<^6ot-V~!0sK`14Pm&?0qE{IKmz)qCwf`_Ul1sG7G{!cEKk{f zWw`p&&Qn;f2F@`5czeK5poaA`WL|6W)ZOf-&r71R4sg?l2q)6hA*%S1;3)r_r9sT5 zS*98WO$q<;X0YG7D}L|@r#MG>v>_3>X4(*|-BAIJfBac!Rn0p8C*f^y%ROacH}>>x z`cm>ZlwhE8_)`-Vu2DNLB!{xu{YX*Mw4m-ObgpyeT$Gb63Lw<=EXM}a%~!&C0X99a zL>h0g0Um_&a(Den-z>Qrz78b>v%Rf0L45^W-gGBEam1oQCE+XUoZ*AD%DnZUo~XO> z5jp&aZ8>m+87CC|P`=n3iFfqe<2h&2I{0gZhh)Asxil>}#~ak^ai`LC%!OnNg170m()9sY3T0Pn!c==&!@&4Giu8Kx4jr=`ZgoV zyz?y%SfumTZi?wf)NTK}+;JsyQL7)d)eLglywj93#UmP~k)1&*v+u2_fP^bay>!)r}uF}N=MvkfPc_jJ~LitU^m!N)e4 zbgkVVaU2rKiYA)ZngrQIi&Hj*Lvm3PJ6*69iE%3YG_&!8+767D`&|jp+Wz5%6jzXL zkg-+)T+8GXYyChN{GFob5>;w^#-EDiX@EC8dn*7j;k;{uUGOIkDtIgX9q`mVTl#Xo z?GvZ*CvceClc$(K{0}!kG9QW}E?iPhbBxD$toz`AxI#qD6nR_hS4(<>-|GMlxswV! za8Qqs!C8Kqs2?tv#u$gX%4saP!=1|F;m&S{jJduU@T3rEva<}j`@usu9pSvkV@5+y zXHt05g{-df>h=!rU=^q#2OkF)U2(huT9U;85~n0ab#JkrEIr}|1R2l;vs2dxIhwyi zJ}dI}m34UX=C0sF(Z_l5V+r)hJpmF!p|(y2yS>JD=imoPeY**^#qS?hIgB&w+2x}! zrS%3q8Lm~|CvrC^=V)8;z%X>E06hdv;k*hIf@3V!8J5xV*~&1^W+{(Txb|+)F+wY; zaQiQ4@VHxCq(EH6MPUz6muVOF|D{6KuCbNtO*!L0H-cpzW@!N-ZqzCz&C3l}=N>u7 zDmALgxmmeCBXACCT^x+V`|#;4X!eIHdmISjqdo&K02>to*P3JvufhHP#lXn1KT2En zGz(uet$$CQP}iL0Y~N}@28ruoi`;&u`G{q^Q&*!{;tc!ryx~+=OUJy}B^Z+CRfns! zU;1iSjk!A|8k9--G(!Sk<9383G?Nv?AfU6*2t&?>)%Zc*3w5@S4V&!nSJYZo zMxN^peHA-WnjfCMs<$ihvKJ35@h36g_*rVAyYj9%Ve1jnbuxiwPRKt+B5!k6aQZ$# z#4cnWZ3`45s+g9iab;Y;rC5D`m8sn*=?I-q7765fJVjDyB!&>4=z2h5^A0<#+qtMOi zdjBM8TyZTZMdfQv6$>;l~LVQwG=3&Iz^q>MU5{X$$zsXjz|9v-7?m zvJ^a^&J~~SenvP_jDXxPa37y1cyUc!_Mjt@eMEO)N4Flj%oHrtjcQ{f(xyA;d~o8F zQk(s#cT{H7bagQ))0k zp#m$Gmad=Uhfc@Iq{#z2WBRB@s7~70;@-W!k$aG9U2|QrkZ4AeS@_sogVRXm!;|)xTYqRpR4mn8YIV{_4rtM$Kkx^2#v27g{#J{?v89g;TxGe-~ zCC^x^RVMqzqRfPJ75rq9yO-JhmfY@6f27U(_n)GY+-_@-ZraaVHqJ4TNk*`k0T zR0h;Y;0i3TS^v%h7Ym9YSD~2|I8BT{Y4#I26|f>y9+V~HAY9?@dag2Yh;QLAyKcod z32W#Ed6nPl_w$>m-FoGM=BEJqz^j^x8ydFy72|!T>gr3fs~%~JefIZ<6N!`#ie5<+ zZOeCF(O&9b-sRO3b@aXRE;850J#~G_vl4cZhNo|~nQ1s5a_r$uGzzPjA>$Gr%%R^{ zhl1AkPOhrH=vde{bsl;Pyq$kRP>PZ>$>gWd09eK3#}_XalR(kLLL+3`F?1sWXa-^~ zA)N>P@S`qi2HL%klk{(0?__v;%)9?BCAI-o!KG*V_Ljma5Hcn25@)?KREfvbx))TAooSFb^@SwTA#bq~jpRUhH2q*GOsmJcdKqb)iRQ%!8CLz*Y$71m0T|nUWG{~M|?aVln+2UxF{4QVT`}h`c}0bj+eIC5SyMtVBR|JMbYe@h(rfk6ry6r(1NNS@2-$IzywG@GYin!Gc#- zr|D7uYB6cP{TyHx&xeM|6Q0t$u?fZ!=pa-QOVE6#`_f3{lw&+vOT5CdU7uBwfy;OMp-w=S05kFK{ zJ}2jKtK%Nb1C`(y^3KQXp=xI~Z&T%S)pgO2;Hg9n?oL?QtU2{WZAfy0kGWM5Nt5oU z*B$5UL35in5s&LyP|_)+b?rcCbR)4yMDrI^+K#?^`|UerD;eT+?J)AUabrfR18hA2~_%#<*j+_zY(z?4MHaXv^N+K zHUnDS?P=}}aZ={}BAV=hs06M>EkaRSW2t1b z*t3$SRb^(H`YFZkmL+rVYP#+LG#EDYiztp6`bBH$Ym^T+7um6 zzby|hVC6rrYj4w(!sfh7`vt`d33Q^6$dTx^1BXm3p3DqpsNDHZWWo)zAO1$z*_W?Cg{kx7SO!{N|IlE&t1_k;dh1d^V zg&p-Ss?;76NurqMh$k2z`LqblUdp3X+XpaL*cCm+O2Uob#b1AS+fcHXB7Ljg?Aa*L z&qDsYd7)CGq{limWYe3*nmAjTG?DQQ7z{FATTyHO>}>T2!_s9W4cyZd6@4;K;pIfL zevh0!Q1GH{ZuCY|TslYh-ID^x(l(4&zTEhl+2ee647Y(@(6+bIFZi_>mVda~Hac-l z;(g&q14Mvx_z07Wc+f_qx#=e-+%K2ffOEk=HbdDbWGbM=ITImF z6H0Nj@zw^!|cx4_1hR zMWMWRS=n86k#M_)JwM-$Y4eO+4gUZb>5HIKVzNWvcSPDJ^fBsD*5$h1rY+%vzz+m7U0y8 z55;sWy!Xojykr9_)Ey_1uwE3%NC^XD+`A>CXt>GJ>@Xd7h$5RZF7v6N4%YvbbZ9lW z%L_;a!VymZ(kGb&n=n74|E~#-!{M4$Ut(1SypJ#cf)EGc=NPqj(wblgS)~Nsms^#^ zU#IoyaPy^KyGlbs_nLuWUUzL_`f8xt8y#@;a#p*CR~`?)BYCqq zpRUWz>da}~_v-;e+~P=#`KE6S?ZUK$b%&Q=Vx)x@J39@R}QSZ(4+Qywk4zx1g{`mBj zK^yNLRZbnF(uk45F{PEr?riB)mCRiu*#&DCctMB4BGEZzRN{W;sz>_0neacV5bJ6>g9uLstfP`@dEb`Q1!Bc(ai$=joBJDg{V1jwT zygObSWlqG$%tZ<+GLZTlOXQaNGsn(Ii>@bo9d2}~;A+_s#K?>U!h>`#DAGr(e~eOl z?0<~(Z5EKWCrGJX(P2r*x9C`cSdlldQl1~UW_$dR8~MCp-sGgNIMv;H+7@2Nh=25# zGteSiN8F*#_Sa9VOc+7erRV9=L#auEw7u0kKvys4hh$w#)?pa}Le;lng)%L{)rKOV zmLXG$hM%U224#+)r)VSRy7mD>&gYM9^6347j}&IPHI7kc5Yxprx{v(aM;N&s4n+sv zh2GMxk>;13q9pqA|I}(27roZzG0KQO>ar%8z>=HS($8)U+&(+9KNP+j#&d-t}vKxzXk1Bxcf? zM385~b)$u2>f;Fuk>Kc!)%lwy6Ev6eK`c;X4aK}};*v;7yBKyi^qoBW>tF5I=~oUb z%d3VE`E-FvQXy3(Cz~n$m!8tSgJd#x_?rjeKX-bzw~(SX3^-6mmMrAbM;FvjHSR!5 zgixnucn=p{Y@Z3*r=MgWvVuQG zNc4NJz)s<;tr6z?0UMBNO~^37>F`?6eZ0?f-SY(I`M7X-vZ(ryvqrIodq`Ma2iDLjge zK5W@`$UL0ncca5aW2j`0^om@ByN8r!{KIocG(rL|wE**~D^7cs(Jg8M(Qq|URtS`z z;0DpQ*K>LV$W*X2p_Gqew7F!a55=;M9Fmlajw0|Ytz%EFE({|2$d^TV`;L@F) zqwbFyR709Y-tKm#WQ2x3Mc^}d=B(10_capcGFPj+g;R&W(==M^qK??*04NyuuOj&W zo+P&R8}=d}FtZbfL%JXl{l{w%hDX*|)a{th92zzcU#lHpoRFj35j(o>dr_f92M7A- zw&!_PPF^A|T~Y!Td;=(#oWOK!Bai<|jpyBhOH#-bA5K_zGR6@;ZFPRWIizuSDGI4Z zla0kE=q|};fS}?EitY9;s<36&uQmRy5I_;=b!AvVseyr#i7}4@{HndplXEmntwYUu zrP7yOju#fE9pLdBNT6W|YhLdek8t3I3xT1ve5-ZW*v`I6;b{dt;LrN4t{1c@C-VKZJid%aB6tS5`%@)(m6O#&H76Fu~v(ez|O0gHloNE9}1_HP>-t*}NIje3L7PicX19stAZs9^* zpiin3#KQD}Gq$l2+SGv~7>{lUW69N%uI@YAx>q z4fkEi3U(>pDQQa*vlS(-Km>FfNOw|@Dskaq%G%WYdD%j^@sMhjzEoimXhPA^p<^ykE%Od;u-bXBb(v7N#ENh)D=;v$ zY4Nb*pj~BphGxiTR|y~rzLs|v7rGxL1gT8j(V?brGJ57bLn_Wwbp9$2(CA>hX=ZVi zxfq?V(Tm^q5QnJbz*schBIL)D$Rwf|j}TNFNsx4FFcG&J+%{^sO4mZ{Rz**Ew1Sv~ zp54uqb%Y&9D25&&g z?3Ui(+?nGr>53o0`}8K3;YJQOmW%@g!5%;uwr}r^1Z(v5kZ_Y4?awN&B~5o_z6Pbh zUPmzaLTZ2WWiTy&QKuUGA41cKT6`hb=|6fZfL{7e6_$XN%Z8XQTZgpARvmU6fI$8Rm*- zIWunACyyRIoFVxK2I-2Y3LbN_rl^6E)n~PDP*RJhl(+~NV zp^1E`6xgX##JN{+SLJ-WMQ;AIice%=ZZlf{bG~h|`GBO}# z5JU`8BpYaz7$lf|0Oh`?+IW_*tZuC6LP(wFJ^&&od2&%R3%OCcHjloX-Qom#qup9N?C4zyC3wQ zh_yaR@3=DncIi#ZyK&U_u|lQE6r9{WIbJnFkS~ukF|KC8qP*s@7G0{_rDJE=g?7$) zc`AzR*#Qucg7I%hS{RuP3ldfJT=cTH@c@|C_FTLfU=_LsBzowetW_=~?PGzA$y&~G zA=n&+c+>jWXgZoDG*#wAUX$sNDzr%l#f{01e+&$pj!O54`8ADDfDbrwx(V=?eLXHV zaXmQ|=Zt--)|xK-wrLt@0TI$3_Z^))Z!H(I>aNWO`Mr_` zn4DRYRO@^VsRV)|xFTFaDe;Extw~zCbjrUd%_jr+*A&lXxIfAap^%emtM&TGQi|ON zH>6lkL7yIpD^x#$0f|baQud1KCVEUAweSvq&-d0sO8dbxyMH4XAVJ_vP!VV$s#bPe z|MK9yS2~$dke|9Ve*8BE$QS6+({ak>ng7hBhwRmPfqFt$8Ux&c<dDElKPcShF?q@*l0ppK^0Y4>--Q$K2;;U!_{*94 zb(L5JwuYnMoHZr%Dx?vIG}RCVu+8MPRQ&k{k{I=7-_Wo9=>{ik5Kn@sUmnF|(k>Aq zx|B(%u%fWKjGat`K))1P^b?*guB7x}N;+0GHkCAE2GXlbh6YVWRczpM1N~{A32Se@ zkmGa~py*z8OnZd(Y3Y7#6v@-=##h)u#DHB)(Y7nfC3VswUX`nzsMq@1=;;a?fBTC- zmxA~?3o=q4ixH%jPr1fXGGmG_7#0I0 z>kJ`bJPF(k*>|Z&M9ynuhAnn|L*zV@Bd)ypb1A}KKd9;+$uEE_$I0A zpoZWE(2hQiOyilbv`Gk?2i#sa}OlW?$ETUY>0fY-md$Ky^rlh`NNaiA8M!nS^^ zjPoR&#i(;%-Q+X#;!QB!icZr3{a+&P+UUpfS{Rn3*;T-dvLW;zZ|_~Mr8D*X%>^8EBWx z52hI|>z^uJ?HlSm{o}hT%)RFL>8=PubfP0_6ETg647Cwt36pRKX9aPDxDjw*Gg{8?}g zGZpkDv*=GK-tx(KrjdFoqX0SgW%{q7f6JFCanez>*^<-H6QdwG6E&TFCHL(7uuB5` zA9$%bWm2Z+Q6;wvp%kAJzNr@f-vHz6vT5{W$3Dev;h~SlB%V zGy@zTlzH4V)*lG9tdToRem^QDK2`#z7xu8qQ)@21Ywtj}(C45#_&f%^}9O zpY0Y51{;El#x6%#uj0$kdeZ)+`Q=G&=Wh_P7<8d|-V;UPz(1SK<%JDl%_uJ}8Swb~ zA|k!Iq^Km83(#^>jLmwSoaqlbH!;3**yo|YKjD`sd!|;rRf|0s8w9}>61&v@t&@>3 z;Jw4~V@ct>2V_?%I7-&$cfLjttoI)>aH(M(6QL2TPiIHI;$>J2seuu;te4!#yosfXK=)|dIQ>xm~0vH4lWjbdo=4po5%$PVO86jZK<6a<92`_{8dw7~3aIT*jDt%tN(=3z_p&X=7lKnd27?>G(`-AVmrk^bTkJ+Wu+&&oU@^C` zA0gB?NW=P<1;{OijVya0N2i+mS7J#EZ1EODDgN2j%xR&rmjqkXR2+nozQ3-6SU3#1 zm%CcL*T~9OthR0k?^wDci^+&XOvR=;-L_4vLXzt6@*q7B4&>U-IRhu2t$m|r1d8~M zugPrD6(HXsdma=T%Y78i8mbhg?zss_o(POz6XvJ4nOs09vuykB1-Vlj?hMi}Y1B+w zby1)8z#}sZlYb4Ac3psPJ?tB<~IPY79tG=a?jAHYA zxSgy|uxgLf$)eRaiO%{d8#%J_gN${*${UKPNXP}#X?ceHpz)VdyOqV386*7M!+Um8 zuK@WhDC~um-{*<|oB_@cNd1W^p87?PKEs7#`UM0o0#$^NDPZia*gy77sb_ zU8bBJj}T#HCS_*0j71vcwpqql*%v=HLGGzSJWz24aBb5fP{bsyq-<&ZoOLAk zT1H%nE<)1>-}Zus!J=ADZz!&%mzfpuG#IpOu0om1%ChJ7^XJ$un#)64L7nUY5*v+l z$9`YHFltp5j%Hb%T(-OpyJK31Pk#!`R=cm4EV5dYv?T;fQzpfUlEa@ga;5!>;N3>9 z!38Bpbdzdzy1nSz494g;vT?hVp3wD-;MQ5Nz9hF!Y^%|)0KC`0yim^7+QcZVy~WsX zA+Zqf5SeG4Ik~o$1(|MO^f@>7nfusH0w1v(t*!I;Oh_%en&F>kR5^zNnK;itR>!!6 zj^WiN*uw4b=S%O4#=S|sY_i1?n$V$Nbk?)VP;kZO^>1oxat7nq^R9mgO8Yz8?h(|Z z0j27`KXtMK4LPSD7PK7^ebd69>$Egl*l z65(EXTjlLJAtjHBCb8&#>8ur0eN1RAu=sS8#xEN9y7QxMYyQ(|;*Vq*^b+&Ps0qV? z2*#qucreIga9MNm3zyF`4S5pl*sF2w=TA7D1gz`4Ynbo!QQh@AHE{Wf9B~d^L%+H& z_t>diRI}bG_$yw+ZMKGTr`qB#gx9rbvgp!vLE2eF^1Q{9?~|aN|I9)P5;YuU>YKJ` z-HNIoBSETc;b`a`(HYUP@$-WeIz=$Jm8w;I%xsFKB=Bcv5~gpi=`_2R<0LuwWNUQx z7@{tiTJnl{>n)&l-a`1`vT2@6gw!epiJ8=Ja9kK+_mho+!p;pJ9QMMzlt#FrELleT z%qen1cPS3Q9T^CY6h3`QwRRrkTYfK&5^ixr;WI=Q795q%AxR6Ez)b=I?KG!V$ZhMj zJ7b?s`s6VPxXa$@)$w@vsjew(YlR~Yd@P{d#~XTdNRHdGd`Hwg7X#q*ZW%56yRNGm zM1M-PK#|bRLsLMUP->w@7RGyF+U!(kr7oXi|D+-qa9}UVL?vy!b7A4c<1Hy zGf6*KD&i0l@omnYF()kWJJ$m6RFCS@^N^)siks^gzb)k^AlIH@6kqxYI9ijN7qiHq zSRj6%n>aswn~)}@AldX8BU z<7H+<8LJL`&&-`OWwL@df)scEJsR;fe0Co2S{fd1usHH<AX7T?dB2WHh z7g^KDb1}!GjU6MktuKNtzZ6rGW0uSFHA~KmcKOW<8@N_iWGVWxCk;~;ColLnZ_;EA z`)dJ!j^L6fVmvLAP#2o@*el4E8KG=eL$%BXQrk6-)KA{S6?}Fm#&VPffMiw3BWFhU# zpOtQ64ItC<+L6O+>}+4Tf$7s_#uFt?NR8jz2&_7Us(r|*GN)wDuI~>$ycodQwq#^` zPKX4IZy_3gLHFq`RzARQqqc_Gg#@KGWa#4`~b>t6Bujyknd38Ere`$x|^zt#;()?dFPT#-I>dBbe`f zDkmo;RG{&>AAo&(bk&6Fp_@sVg(ok?(3=MwQbEpueZ9D$EDP@qg)7EwX%F-98zs1i zPgnQ?_+nMLnAB$7vHbnE9OAx}t2+}~V8L6#rxpM#h(<9c#kHx*wKjCH#OS+i5k1s7 zXCvJcL!P2H29yzk(>hga%GdE`IrHCi$6}Ru1@5nELC3{qX&&Mx6LQ2MCGy;glTf2M zOm6>kLugFk2r4lcu;tZ9aXfzKqh_CjV98%S?vA+UJQ>6w16?i`Ex3d&68h}fS#lfYt+2JSzZW%b;#suC^nw$m!8lkUc zj|z*}-Jvp@2TJYm8BTC=eF2mEKwu@Z)y0>7c}Z|15kDl)RZl0)=A71i7Vb4mE0ly8 z4zwjN5im9CDk4sbJ#a0zv8ujXYOb)^_dKjTh5L+RM zcve0~o`I!1JRFF=U_JPQwSd1gx1Zt61L7!g2T#^CD#>ea7|p(zPp&{Og#^(&ARd>G zo9xorb#g?zSTi*2MpTrR(TQv5!6)PhUa?015bROuxJQ^h_mcUgi2^?ydQ<5!Z#rw1 zvaii^2g!QFEJUkp;Y&3CyH=k*kHucU=*W%>$Q6**fx0#cc_cM17&duCSJFB!AL?}& zn=A+IP!5YfI&S+Gv;av!w!hTtClo7%mLYH^SxXvc6^&o)-Jo|mRI^C9Ynk@*sXAkj zg60z%^JU)mDg~~gHm7#6+kzc-Jerns%yJq1Dt&w&=!xVmc|`bl!G8}=1*d|`#ccw5d^v!j41>zhG7zGPjK37m(jnq z6yiCE6W;m^TEu2hBb6rZy@z|_UQK?M#JogA`NF0ueOlZu%Q6P7FZk5L`@u-MO6#u> zRoqv{W23`Xnzj>{9Xr#>xcDN9fX=gKc)Xhebcy;66P4AV0u{rE%>ljMxg1!$ zxLqX92*1njZWE6ztnG*t!Igp-xwG8~+H3>4e4~Q%5%gLURcf{LTFl=A}Ld8r&D znm_^sfE)CiyApBBk(8-auw0N#t~-@45Gi%$Y9v3?hF)PpRX3|>fl7*S7}p{IO06y( z7|?WHs?zjUr42Ec(w;>!GiLsIu_()Auq75a2X&mi^&+~p=>nI6LsY_Z8S|ZblJCvq zuC;6M1Q4x-yt_{@ade`uqfc;!j$iaGRG^jTuv)%l=*@SjV1cH{0#^ z5`kK4ikeDS9&EL<>s_j@YYW!Kv$-KMC^}k{u){OC42mZH;&3IEu!$J?O6vss9Pj)r z1t&yVU?@UQK(?k^IQ*w!H>=C~=0FuHpeN`&HePqY#s{?JM$DeNF4a`};=I64M54oR zBmzdn5gE8RKLr@!KIW7$N5LCv4G_!_>l#SoW}5hung^W2!0*#66J>fRymP$#Cyxe# zhgoA2HZ|Dy(bUnU4TVNGNzrjw?e12mm7`O5UQD~!lywV1qlk@)7Tj7slhhkGEQPXG z9<7v_0=s>Z;b57C82S;Hc?FZeL*_vdlNS`gh#XAWnPj=<;Br6c3;L+GLe(aj!-Ksh z@$nNkY=Z^rr@RWq6Ts}VKr6Nf#;UbQJ&Mo1@p}CQUnS?KQGt0~Y*ax`@Z+L_=yF(> z%(h@gJj(jvvqtzVzXWFO*hFH#v00jNX=hhi=uFjrB4#+K#h`13hnT4*TojM&eYL5i z51Hk29Pi!KbD;TWaCiw=%)KZ$*xNW6uX7bu*lXM}qA+lYfW?f}!IJSyj|v`5h;sBIII==n0)Z&U zVgrko6!CVr?%;-eLM+8$@(Aq=YRfF^57!3|82JFYK%`OTdKfgFlV)D0?04S_?BQqn zlp9=@*Pw?y>Smzz;LJRp+urBd)KGW?ZG`@p_xmF^k_9FtN4gK%koU7K#%aeOJ3%lF z`Hdz-qgW|Yq3}fRLug~#rWjTs&kaf3oJ&3fz2025%N0EY-{OGpki0pdvc>6L2JCtx z>JL8bL`kPu8YY#(l#U`UD_;7;muHX?)2J!eVc`AHWH`&b*HFNfIw#a>CUy)YqPsGu z^0teEek&N8AkX$WD8*e{!(~?5on(k_k|yh2oCP(eu2$Wytw%kqHzjpzFq%SP7_neH zWLF7|EgSGyKfC`Lnox_<=dZ1MD5|1(?;Sq(X0BFB>F6Jtk9=Of$-)z|nk;Jqp5p5CEf_Ew#Rflgj13X2sHlAwsPljSYz|q5l373KD&%t~O3@6KLZw za7jlW{dj=wcXI3<^YVq3)Ge36G8G|w-+Uw+q@lYRqc(0~Dz7`XWrc}i#LEVP) zj|XMWb%}w0lLv+XBtmo-H?}ysOPoy|E9ff3z6s*}WbpXv>QM*pE+?wCkP~TS;I8tD zrOT-Dxk#doUcAAIC5f9fNs27xuSkm|@|TPle#Ybv7FSr9M+GpUu6dDiWgw9TcQqPS zf(+YSKZF1xyNV8%*RpfXF#_yES&KI{w;8_C1%{wWtB#EdeEV3!1r6mSYWbS7t;5xI zNY?xI)up5+#V;K->BZ|vC23JLsVP27r4q`%e+(o+uqy%8la@5sRxA71sBT@0nL|!^JG}_+*Jrg-XoyN<8TK=Qwe|}r_)W0WB zLeP}X4^o9z6GR=2DUH#4STNlq?bzHUt_n)V0!3nVvyi@Qe`_;JL3M8y;1<5}tl*ua z;R9i6^lu;q2ftL!;i%b|&Sm`V{F35$X~smDOjsErsJ@f1FI0FBN-jf!AfYrjJS8MY zq%*fct`0#Ma{Mvt;m+*843GN z22yyMgtzZ5eM3Da?Hn@WDDYdWM9PZHYzy^2NO+=pZvAzY%D2#7q~-DIhizj@3b^F7cep^X2h)x2QL*F5h0 zpoEbvM;M zKZ!K9r`>r@&gU2YOZw+oUFYjRbcimdAS7kx<)6C zOqR*CE&PH&Z-7Ie5GQ1Wnfl=%FN^@6=QzH4V|PBq2S}p>{j9%xyLa!tkpmJ@RDWH+ zWd>frmMP8kJ_p?QLVcLXgkXp4J7s0Kok0!bCTu0-8h&2@>EmtF-pM_)oA-PZYe~)Y z_;7;&p`{~L8|lx|;rHFc9+%#s^Ro?dD6t)-#QAvKpE2yI`(Ix~zZkgm`fAdn8M*b- zYbyXnV-eTzh<%VEaq}~;3pB^RIGW+x`xtk<-+GJ-1f_6uF{ei9hi|(=hn9l}HzJqi z@i&^$N+e23nHmrd@r51Cg1>%b?%g)Y-kwxtm2ZZ;;-?wR3ZEzpUHngNUae6Ac(;Xj zqcist0MPTR(B-r$lvFH9Yf!=CmvRU3j>&5JAvMum#LHtb99oMB6{9u?dDKoP|()}_)UTE z`+5p~OSHNj%6^R?oTzbVKsXO{7w~2io`Rj@b@qb6xR9b4pjkg{z`&huSun<@UT(#} zJq_6k6!+lB7S^ZJ{L5zx23W`D_@T2_Q}?-TyY_?!F~WnZx0nN{d3RUp8Od^v$-AKi zr{2e$S%S75J#q{nQJ|taN4r#RRUd7EO36Knge#-~RexVn`sYzJ;pS@xYBrd7COAe7 zW-_pdaYj*)MJ$*R%?r!YKu|wlY9Nemvt||)F~nm`Qv&e!0ymcj0|O<6f#YsNU% z_~)WVrb5&UFg;&^$V@FYaj67oZ;Q#PG1nnlEVij?i;PqK^izC{1Jr|9Vr}gKECcPq z!Ov%G9{Cl|AbQ?;l=SsG_%A>mEq7&_2XIEj=^rgA$>*uOCFbAzKCEkr^$Nspcd-ck z!qAjSYRqM=vQXs!OtrAAOF0!0P(|N>4BMprtJqCA7cge{QeRP7fiXz!$7(%5S{EZd zoA#`H@Dx3dCuaiM1FY#APq$j@=e&;|{qeY>o@AnvwGW*jgy{%9A;K~lFK+1@fg$jt zy*F0hW{!XVT$}O2#cm|@56G8guN=xHigxdg2%I2%bN|+UxxKmeX$j$m(Qj11O7si3 zii`81Y%o>up7Hj-dUQ|4n58>W4x#&|Izv<&AudD{k!bPg3pEjlXvtT25+V#-ACD-c zxw)z7k4o&*-Zz`sd3o^!nJj z)x;16&|{wrxlQrpK0Ij+idIs44*Qg=2f9mQw`C+f9znf9k5dL)XG{cv_Q&tNe!irg zr(LSqFlGK07(G-|D73}>emg5j9RqsM7_^?<(0rEap zKy6I0N$KGpu)a%*Sz3~z9|UHb7gOP2wCTx5xpF?&P>r}#I^}q+0`hs5qqxwW9=9U> zjG{m6jO+VfAev-8e{?#_p+06hi@2F2+Dg6r87n9PP(e$rPRzzn=@^Y?jyo1FegymA zQ0Q^g2^tbYF8@aKaboR0D2#p8cNb7|D>0UzGmU)}Fi5dz(08OXx`ZI=(wX)wP1pwU zhb3q^m;I>MgK`d`fic7>u!U{0=tvv#_GX}xJ8Du|hK!90-*?c`&djh!b$@7~)XaOw zYgGw83Uxy;To#-eAhR0vqq#{rw3_1^nA3cA{r|mg?~3m+f=>WyGoW?1FqS96Wpn-; zgF+I}aqXox_{=h0SuHA+#OQKUx!(m8BrNIzthy130io)wcOJl%smXC?Z5UkwY?A$a z0jgo7J^rgi0;7sOyQo{ZLVX>jDBY^?ZJnkJ&*+sJy_JYpKt^wQa(j{nKQpVuyrsbI zbXQX(|8X{-SD$qn4R3%ZAOv^(4x}@!N~5qk^5Re`C#R)lL1b6X56>af1$X(+vW29# zEyLf?B&OM1!2GRsPfPM;${OUmt*Pxkf?uc8s^EugJ(&Dt$J9ZZiuw%_BCLeDgp&oA zq38;P)}T(r{syt&bU`<_PmZ4T*-0_7o7x=J+wM*m+waR&jd+bFhJ2iaNiCk?b{?YkCLJdlZ(u*C>d*# zqm=c2pkb*B?|Z*V7}||rcR5o6MDs^()M3dY;JQR3(9LS^Vx%k2%9BPE(pmMm{%tnz z#b{2jO^2yIn1(7eHk^1$aGm_q4udzatumZ@YY`-KgH>H;e!8OD`s_DDX%XEu;HyYr zaAV;?F`WV$>n_rUVz#m2KM~8d`ytpNO!P;aRc4#O$-@I6a~n3{w}|k}8L;58U;OsY z;F4udiof+eR%XS8k`le2vyp8h+v@&vF$)Nr6O}wzZ_MB}UbL07Ci#m{!)rMP^FjX3 zkJIz@l=2*Yw^=E`Dr^t#U#+jk(E+H^mu<+K*y^HdmlyoZ&&nlMRddyB z|Nqj~))AKGLRPG`u~#X*q6yf6}!hgor6EIiVen)1ZPQ@klbYDOA(p=h$& zg^h?RnC=;-{{mm=8*d4}ETtHhKyruFF zMgS#UnP7c$bfb}F4nRRqbn3y}lXlscV4})v$y~W`r{vhDaeD+)DvLKYjA23?{|uLI==Xqn#l=54YEKBEi&U#@JtwC0SFXb6Dgehj`C;vY(Dq8PMg zF0bo#V|9^(f&Q50r0in@lr6$v8TxwJubLzuS)W`-`Uj2#`xY(pcKl+1bk}qByGPb_ zR#ZzgG!n+iP;x5ap;7FeUS9})|IX;E;yjjoaP-LP6(tzG0>eiQB zK3H{!Yc8BRuI-{B4j34^U`F5sj~^|YEaXZU96;?*mIu;sacMe^y_Gs*!GZO7F|FD--(_=nArVzK8~s11P_Xlt|q8}S{mvbVQodgYg19aUvf-K%zQ zAmUsp?gL|o9E3=o zrZk_p%HI!M1gDoF<~pCYwE#>NbdiNuRKJcEC9^jp=$sb7rYLQ-<8E#h;qh^_W315| zH!>*|uSa7GQ291caC13IBi;~?};T!9 zoJ5GB03z}~&;Y)6M!s)UhlJN7&9TTZF-Xq^%L%I`NiE!*l1jBu4$+Rpr0%16u21DO zP;IyCd{}U|Q|)xeRhTUq;?lxB&W9EJBLj>>rB+sZBX`x@b?_fL+5P<>T*q$i~&kBaujN z<)tYGM3g^c9Bk!EtM)t`3njTlG%Rs&t5(wSF*pnjXH=?m)g=#iBL3q}5LuLU z(_lB4y+qa8PlGELMp`oBd-7yOpi2cwntN0=Nh4oT!Cf8E0CP|ye$ORQ`u8!&%K_6J z^-k4@TJHWq@Sl+@B-vHyJYnGnqCv*cCbLeWNsMYpP(fUCh?RB{bq+G$KKU&np-GEs zAj^BRhXi7$pIi82HA&mzeBxlH>dums4(E|9hAjaA0RJb1#;EIqGtY7w70;>l9!sRZ z=;VM=Ip(FGay1}wS@>c$Ay*sI_uO+DTX%jxKAa9Zh(Q+|f7VLi14|J7uK&OtY;x4b zFVM#fab@+vXJuj5C*|rG;6}De3_H-P%XMp9w_(%E$43gcc?zo;;D~sh>N?iQ6{tKW zyNbH>031R4`k4v_l9dnN+cTcknw@Eb!ZN{;pQ1(&B&`oUjTW|D$*>CTwP>v}6UnA@LnmfErG%Roecwv{W@ZSQ63`%KNZiPBn6FX zV>+X!NzsjF3Fy$V9;r_|uov*U!a+io+1phoBleOf^H-Z%7q z!kITv#o<>S#b$fB~=H;Bw`O~P8vB2AVgT=GH%T0jpL z3G-G>(KkE))ALj!wW5W4=A47mLq~Em-lL=V|7}a?OG7$qH0rgy>P*U4$qT^_5O0HS-7rNf ze73z$@alCw=mYtvW5D2?0yj^|fIC-^>D>w&{SSuT1&Tj2j(TR^m4YS=cH0UZP^l=a z1{W(JfvgUQ&~P;U6iONn((TJs=GoWj;r`TN6jT2?K~0ZaI5C@} znxNd3=Bz9g5?4U+kL~iKL%H{Z*sXJYar(T+y1&+-QYX1Z{XzpY(Z5c;5)G8P7I%EuJln3ITeMPimdHD1UN4oxExWg^EXDB2<7wDZUY2-WvW`dV~6?;bFMSU zpA~F6l%5)H)ib=%@wOT;p=EQltsGYw;G1|bB11bx1b+GQdAUH)>6NB3?upB+Y{_d5 zI({01zEWEGCFx9&_jb&4y(qWnB%{k1btNlT(9FYF*ewc$V|FwqUq`F|NEJGFyjbr< z5`yWMvjPp6CITs0CG@$a?18+zuVaS!bFSW4kS)EY@OuP$sU#%J}iiPx<$^sWZqzfk}Al22dx7fFo)HI8f2N)(erCBbI12(vj!c^LG%r#Ifv( zySeh2>3A5Xj_#8FCpzGsqA(un;X@g9Xib#~60tspWJUoU827HKe!-{X{^+r}mU0%9 zlL4uCpM5Z!;|`PDund~vIKqaifpcshoHloVoZEAtnyK?*oQZv##}mhIF3L0UPSAnW z>}(Ssl#1MG2C)AJoAW{j7&k<(MC#*ito-rO0);WmTAs8nmekD>trV3O6Tj3thb`F` zi_wn(h@^6unF?ose>1H7J``_6X)*yYRm@uKD7Hz}s~SLv53{B5VKsT8trVBw64(55 zy_Kz5&t9v%N``OZz>?Kv@+9qwZhhi2xL<=oHF?=kxz-mJZ0EVK2k58T#+Kq6KQZp^ z>Tv`=vU#9z+iHij3x4x*XM;q-xZoJ0S_Q-+X5}RR*~PSl$brhJX17^WZn)>iRbw^Xl56jb z-~A>t9$`ocT58cn%5AFVxtv@8hr~3Y9sSJ%uSRTc)+eUQTb7CiZ&3LJ=b`-Pxlbha z4(|mT$b=RklSEk){bFyU$KQ)15mt~l%P_io0p>|yB6eTrB8vjofN63n_T~nJGFczl z!~x{EUd>OK$(kRn`}y#O0*8P+-L#^oUUOdzJjB0L^PkiTSsG~GX#ZEfBiwg}cs@HiV~~Gr*&N`0k>VBJ3z}E4SwC-XHUxCT zT3FkFHlcIhvt+p=6%!%FC5}o_5u8C>iojuNl?iy8n7F4d<14vPb^86_)5_cv`D^|% z=;JyXt9@B@KnO~z>>i6ThODZPS2crE8!C=Eba95;t)-vmDirvrS8A zpvjdD-Od7-ye{1#8Z1pLf11FgDul~83h>>|n%0>gh!@1%){`h0!xNt5T2LK^&!`X> zjHW#s{aga<@ihMBmA#n)XmD2Sa;V`J=I7qJ(^mfR#()Q+O8b4a>@HgB8*rEnL2X* z@?Oq%$8TbCB?o;yL||frvBADkwh=_AU`FvYJ34hzL6Nl=y17 zBh}*mp9yhwv<^Zzo>JX<{k{m3g>auGyOHg-7svDS=NcPaD?h_504U4Y!O|&m990=64^5{b0}mK8}e8w*U^I ziadtY07||&?~wpxF9#g`OF*2jX|2BQO4O@na>n`$sOrp;moE>Tx#gH_U~JMiag4lo zV_!JkeV2(WpI65ymh-zrI*Mf+FaY&~6jF}e0(AA}3WKRhvF;WBrQ`f!9&grcv5oxa zCZ9%PYPHBK39KqBV&NVWN^z1*b!B!<1CwQ?d?G7!cn!a5JVxI9Ch7M1xzPYTQNkfV z`>=ArDfwR4Pb07AfVHwzA+y#4la(clZ+(+8k|ISzOO9;+Ij z>>jLTisn$PW-LS}&-Uk(JnFe3a+4d2=ps^J1dkKu4YkeTKNCH)u9V!n!-g|MC*;R7 z^7t!Q10j59l<=t58C@9~DFsz2tps%4wqM6$?hZI0$lvr8iFDo_m((D=>;LFuxnlH<;OyC?%=RoQU)W;ttZSALgHT8Wt<>=D1%V;KB! z!yyuBQLmH=MO!VK$Uy=`Wk5PcFtFro-9NFZX(|9b+yFI2CjuO&>5K^eTmD$94tsMV z-nKszJ*JBh{_3F`TJXNe3%nd-Ba&RCr{xge=LVbYx7}VQz##~Mb_=Qg>+Pg!K#!kM zUl9LHS(|HDm8&WEPqbrwl9vbRw43Z0=w7(FL8YHqW=W2w-Vw|twXo~T;jynbi6_{o zhqax=Bj#iK;<6U-4AGGWCB2@3%i=TxuOT-#uYoTb&Gyi+tjdD-uF*pSn)|*FAE^<{ zh*#aEftnNouPjVpg8U;D0pM9%I;Yb4o-k>&V<#5|aAJ{m87eOlwSY&0gyVso*r*OZ zPqmWZgyRQMFkqjE6h`kKYOMU$u}7x4Ur=VE(6RA0Bi-Rt;lFM^^I|e9 ztT&AZ57~h;ZcL(OgyU~Ik2OxYyT!B9W>MIn;x@-jn6M7vORuW;FuupvJABanPcyH* z6M1jugt?%;0N_wKg;vbd)sFAqi&u&K{o9!8fZ9;6hY2_zKqy?159{_%ZI4mpuly*% zL(ek1@N?*>A+^K@W0EM_iB6;b>lmg%MT)vm( zFeZeg4iNZ>p~*WS>uZ5QpF9AsKw^Ca+o$U5mi;rlEUAOVvvnu$jW42EJ;~WAfSpwz zz2~LQses96CJgeUm4Ea?=ev$66%Zj2z$C{K&A@8;s1*K01GOA8tiZ{EJpRMJ)R#fU z4=W0-#*+?&xwn~Rq51oPfv`R@o?3iVN1)ZgUSs2~mDsYZ`bIE(deB(5O z$qe#|gCNr=h=F7*jRoXjgm-<}u_Z*=95Ip>3u02CsgZRVx-Dx^o7iB^Zx}J9@fy_twxk1Pc}s7wJ%}SycKqD?a3Y{u}LmLcq9t| z#C2p;+L~i%_ceB_jH51%Hx53G>&x1-QGoB7l{UHbi-q!Zu-v>NvJH5*Q*xON8=(qn z<$o3Uny&*|s>##9e1aecvANt@QXARbQzBsB=Hh?zEKd#$Z~58**oom1YzQ3K7gy`z+PgJ-WV%7)< z-#$Sx8>`n zOaVjUR$n#t=yb}+wBf@if>8TsqhBOa!tJpE-`~T`Z~ciXJRJ$kTJS+_sB%V6Hyuh; zmbx?T-0>i3t}_=1u>M)MSeWZ%M=%Yt9k!`Zreey%m2Wn*$(&6~{h%Bs**U$c7?1Xz zG3E_st$@!aN6^AdX;gDS$ffPPN}pz?6X)6F`TsW-Yb%hhV$cUk%_jsc-O#*+#(d&) zqB>KNfM!z0r|T?7n5*DD!^GbBo}}+1n>fisDpVbO9mI4o*#0j2eL=2gbTN`_}on4n-Y?iup`5Ge5arCL0Gf2j$(P7 z-aB?$K)N?%z(++Z$-`=?UMT>sGiyU49?EVjR|6%LW3U2Wn>SYV>t44sVsj{d?g~|9 zVasWA_*6S~ObZOXBCp69!3qs0(hI`Wvkt(kq3}ITt-myvM~`Pl`{1eHQTh_Hs}H^7 z{4SDK36#I-l#tY1&c6?0doa;4qQ)%pVd8?h{0#t-GFni6E!~c?(0#FL3``M{>aaI@ zJ&Qm#0ig6I5eDstkU=SQZjm&dq8?!+6~g8e(D2Sn)`eB485(_!=xCwJkSf~BBb_5z ze3{rDDjOg_y5ChHH=?l$?15TS6Q{H~eV3H#=Ns`e%yiE@tu}rA`90DJ5`dEKdC%UV zLeNFj&m*l9I{OUl7t4K)M<&lR<=B47(EVJVu1x{E^?85if0BTMP*1j!mO3 z)%8Z!e$&(?W)sf`n_Blv4eQpFIG)YE7~>g70Ipvsyt*^~U}eDo6usD^ zcGUiz?>33R0Zr`4MG^0~L9k0-i>(oL1qZO_`_XcH#VSWb<$G7jB%dd5&T6?%bqT*@ zad{ZY!-$yYg`&hT+He9I$*j`FSb;k0hN7pMPGNEK5)KJW5q^MO<8VZR+V+%PXr6`1 zHu($iJ}g2Y;Qg2+G0`2&X0-`QEOp}NJ-88%4oWew+o!x40vQeEJYRg}*B?*_*~j*> zl{=ik7bCKU8?q9W%jign6$Hsmry~x{`1}tYX9%`NQ-2$v3;O}yXNSs(Oy(Ky*2nYI z{izCZEYHXr6OjhBk<09SuGfX=x^evt z^#J+@@wW1?F?X*}Nh0?&DZmU(uuYL@R%&fYjnL+Y0$w2e9!^VGLp50$9GfzW;84H% zWUpMOZ;`NuNU0-Xkta^-0{Rx(bh8S2^mt3UIO1tN5bGmS0(gy#p2ZNE68DGoO-G33 zIpl`0q~WvnM)po+K;}@U1pDZUZHhdvqi|^a@K$Z`T7v^DWpb_{#Fj;2dH+TxqM8%d$@I z?7QRcFfeq1SbLs-yQ52wii1_pjYOm_!QXbc46!4^&}xh}?ta$hhoKW~(s7fy9UKcs zU4LSz7PT!=xx00zSlg~q78RiktZql^F*aLIOTC-#rQ2mu2;?6*QH3qcENm6_!a<@3 zd;gm5H+FX4S>6TwAB6&Zc3nv^^=G>$L3+Q%A!LjS8RyGeQ=Qati{B1Vp+z2$BCL+MAf=w!91=Ic|)noxIZ9Qa5+xMLr z80Qi1N;+bsDDTy&+(a7*3#%g(H{;otYzcawqv*gcgr<_4JUbf8a9aK5j#yAAAzMH& z(~03U4S9n=z{3DPE~X?kv4G9bkyxYMS-!aEGaGlH zj&PoPOX7>6j$o*i`-gM!Fx@ZaCnYgEKd;{9IpN6)~@ zC3YN;a~-$u;JxA?rCkP63GV+flLeBsaTG@yC^d%9U90g8WjOfKC-C&Gaige(_YCS- z&yF@9<7XdU)}6+yHWI`7YxwNY^(+aa5amP7Y{4QNOD+J;bkQmP7@GWwDM@x;;I0|} z9~okzg_95dBT@1cd#Yau67+?d&+fNdFn)Q)ZshI0s3ZL`%Uwy}!7AIwr|1uV>TXQy zD1I+7VFq#r8cC2oWHW46tSvmWtEJhc67aJ~Z~5H+m;sh4^vlr1>qEtNnVn*Ee7DlP zF_%ArrHUSKQ!17qZIGp6okC;bU3bZ6-(d@Njz7z{Cm_xM!V`mOtzU7$tfdMG_y5*8 zRPkQ9&e)?veyQRUUf=ml!lj^!zWFo$1_-)3RswyFXp&Qg7ENvX@ck zGnwG1GW<-9-wGiw*;K&+C62UoOestT=@KiSSy=WBo2O+WN`~nhyXt~gOPwzMuGIr0 z;@z7GOxnI6(5*9u6cdv%@1D1wq?x#3r}n!{<1O)oRMIQH3|!W5$H__!xBEdi3H=Ta z9c6w=BspPK9&$XQyst4r=+z{X$Vb-OPO7S`wSz+DAK_;2Q{kD?`hZbYji^xw$^K6? z*n39Sj-&c{kF}qhFII!vNJ8%hJ5rfP_JK{NS)t>$G!6};n~-6z=O9Bw;k6|;BzC`s z4@mij;{V_bR#YJE(&%R)NZ&uV;6EQ54pe49To-}k;#{Ze6^Fr`uvA%+|5bAMP6X~n zHVCE9DcyIDn4hpe(>x1yL3<0p9-{}0=8PM;;cf9)C_Jp$w%5To5HkZ1qZ+@%N&L6d z_$?-q0U7C?!oI94u2>e9k1@b$y40}hp!Vp@%vW#awjN*m6&l!_?j&_mhSy?C11U2Y zJWUuQeLV6CFdWfpPRDeQ_#1(BSR;FuUS4F#l%2?GE!E^a~wo6S0-kO=eR ztQ6uvPYqZh0)qS^;dL>!I+EZt?EGE^#6P0h6pl_KDXgOID>-F`q|#ulav&rooP=rp zD8BZuwcC3TYkm!LGuZew(?W>=ELo5moUxRn8+<*GB~klowemSoy;U0~#q4>RDDP5P z`e@M4F5D6WN7wZ4L3~c(-Hm_X2{}X7J!w?1H~V`lDdUyAfGj-O-!%y3U@YZGotV6v zZ*ow;=aONuQKbRXFkN*JgbWg&KHt2zlhc=mns-utYP^V;W`|%4 zHVR6~aea`-gn&SMPb&?53=YZ^`r8)TQ-f)9)!nN6s|RT`mGVXwLeK&lHr6dZuM3M< zy-X2X^l*0V6dm%W)#W!Vj%8&&ZJB(8Wp~LWZnFs+KfoZQdlt~A#wA+;$BXM zs`oS7GK+%woyDA6{&k;{FD{j(So91#xvH(|x&+i67cH(UK)L252&Yz+`(KfF%GyXQHe!27c#hq8yHR`( z5M=oxvKzv7NO7F8o_0W588sl_yQk;1vSqWf+-+#LDn^8tWpBi;7t-X!#mcGyPJ&;B zF`QFreME3gXb+j&3FU`Z@~HKxaB;}!!y@qKyRl?h4dR84^BWo<{&z+PI0(DoLQ@Z= zjkOfN(`%(#REUwbt3b^rU-)F!j?}C9E5Wb2XAq@mp}xyk&c?fkcE8MZ<&azWJC%S* z?l~5?3LBIXyXx~q<0%yFbaq&oJ=|c=2sIO%Ww(FPk6U?YrxH!FF{#Xp%ZQS@dv1fp zJm(A&*hjF`2>Gl7Yaz6Vu&YI3D5YnV5Y+>JqgNmHG`f|iPiK%|VR))I{Ky}v(+LaD zDIuJefOi1{LoqS1*|yKIj0`zR8{`16`!1MBwOOl~Oi#@c9sVcSWsDs#M*3bw_--A? z(89DtA?uq`-nmWbg_vZD>AaJUng5@6E_hWeuwuOV^%xEt+^`h-qw`8HPz0eOap{`N*zBH$ zYG;gXrbNjA77nqz+Y;)1a+|WIwQwdW)-$@s$nhZvdWW!dB&SqgDoGhZQ{_fq{~TC7 zI``1}Kw_41i2^tz?5IIgVRl)|MpJxjR^-<4>|4jHnB>lax3)9E5aHD)5cE~5qx1cU z!;SXmdt{?qjHQMyI*p76+P|EuLYTW{eNec(#c3)B+L)$-lu zGuy4%QthLXQ9Mv4aS>&615I7Jv;$)`V$U)Jd#fw^S0OPAsLH-%_89r z@ARH`XjKpOeK|x)kxMo+e2KGJcIEYvFVD%dtW|Im{c_= z@K%QsEvRN5-y!Eoa1aqwJ>nRSlRAqLgcmRnEWsFRIR+(PY~udToKr_9=!<@%sL?M zLcFs*wO_LcW}G!dxJo7wGVeS(d2#phCO#HLx%(tu+(EII`hO(vT;0&X+mrg#miXb^ z{pNMh#lRMKNo`MRc-Clln2#2-%S_#yp*9whLqe{=8b@ID%;3^FXso90j%7#$D}y@+ z;Vlrb2-z-P5rtS?q?h$9e2dCcbH?ZvUvO|vLc%Ux;6&R1W z*n>(0yYOS(WO|V zR|7C{A_k4+Yf7@ot0v6l!U@YvPqZs+JjJ{Y{}h(gARA>b%4t-LkBuNuFz_G6!~#ml z+6+{p*uykkNq^>6v)!nW!JHacv_8DSliEB?ruRzy$P)hv&9YH0X`xTt{OK;GTti#5u+}KaCY}>M9LlV)Ka?jXJ%*2(Hk(^MAM5m#gjEd{D=up=(@&vthL8@0@CYO* zuQ26u%0QHd-V?*Tn)Z0cy_SU#Dfd$D^HE$;dV#jW+u5hGV2OPO^=rIKO*MS_e)4)c?<%`A6!4W7QM=}G$C?+JeO z@TL#xp1BfAl#OSqVzv6x8bSc&tlt!^+sRv8n1v8L1eaB3qS#GsXrmrb4Bs{2n)vMF)PMiEvCE^TRwn|QPG<2>Gd{pVYZYponqDoX zwKJFp%N_dtlq#_KNyaL}PDu=r5O6=EJ!p8fG6j{43pYxKJaj>h?=>!wao zQHQwChdt0Cr90;_3|w1G)CV0`UDA^o+@de7NDuJGsb{Y`oPc zrX6>U(h4L)(F8owS#QsGdjm_)dVn^w0$h=oL0B~F#@$=xcyLGRXN9zPdk%5T>QESH za0aa<=ERG2c#Q&i&ru9~6Y2j8ykTeb%)dAD>&30{K+Jl-FE;Psf-@Q!hi77XzA46L9sA8R3Z+}J2hN51%T0;N>7 zsM{3<=fWNkN68R>rj({LZwBLPE7brJBG{rbmHm!!xI0ZTmzzsnO%N~>$f%~X7sUho z!s?3C+5+MZAMAQbI-bXCMs=Ljx`iAXr0q(2GVdDD`jW5BuzFUMxII2$mgd+g;X7=i zC@(=g&2KL4xMu=YW&gHFG0)$t4_<4Y~E6W@?E0-ta<*|0+^@g{MeePBORY0o0+>u0q zdto=~64}4*1|ZD#NT|H#Q%Li&9d62Fd(F!I3L~Q==1HK~^sN*MCBtySD35OB!3YDB zoswh?meg)V4#DK(e5eop2juakH0X?4{s%hjUgkUjAP|RB<&r@lKF|DQ-i$_oe{bB~ z7F}Mn1_}?4nS$mhh2q0&AiEiKqtRz&>?nJ2!MSYj318~MvC>W3gRzFZ>C@i($=YAT zR%bdy_W8)(j-Afm7WN({_W-$0ZawkwOR9SqNr0&4TBw+!B&N)_32#m1B0L)2P$i_Y$m%4r8WM!d^(orQOQwDNSiqoIIz&A;N`31BTEQKTLW!ni&1pdT;Zd8`oTJpBKBnCNj}h*|N5 z2hH2BE&{KLI{^N%mJmWw;1RYVuKc47VX%s6GQNXp$F}RzLw~P#{e`PL{Z8hGrXPQI zqtMt3D$Y)Qctu!ze3j|nC2tO4VLv1&QD+bm$^*j_KMcnA-r%TpF){wPEiS)kWCme2 z5gry}tFO7jy*sjWf<--W&hi08#+fiLKyCUWsHm>QqKp9`_F_veCg!*Ge~L8d@J^X> z1RQX9?dBssw3ZnajqcL+LmYZ+r_&zb;X1IkITA8i4m`hw9EmdzJ`H(~%%W(Kw>_lP z)GW#|{g0NjHEji=WWWYdE=<|F>1r*RS&kDw(I5W%ZC!=pagK+K@_*UU2-YhMn-^nh zWa{c^|1sBZd{^NXZVEL9Iwaqp#ZAa*kX(_1V+W(Uyp)!Ma~tA7wipnap2K4Op(N3E zH^ZeDA%WPVpH&QWvnE>FbFz~jL7(ZYJT1!2`IF&3H7$bq?tfwOLO-Bk3!CP5X^4vN zk+U?I+4$P?whO5XbIm@6qhagwP@&rb{)nNVM^j{R2=T3g`05|_rENPmm6Yv0Twv1( zoR^;P*D{$r#J$FAiR#bLG@1KWu$Crn`1-fe_PaXQoXCiQO}=iz}Syml4>2G z(Hh66NxF(M#xJ$=_QvE(WRWjp5y@w9RpKLz6%W@iMWPjClq@KhGI;+Y8LM^r^h5Xn zJGzeuvQ<%Lm;N2LSA?!puz$BVXqPaU$wgSIZ7G-07uWTR6AZCG+ra=cT1m^5{UJ$p zjKG(piS|Bi{HOSCW=x-ek0L*$&39ap$db$ss!9dg z;iRV(Wbg}%xW4l_`K*pa2S`CYA0yU@G$`H$Ffi6Hz0ztsdNA=$Ms{QmPzX941BeG< z3CX*@%^l!85(Zm{=fr#_LQ~1sR`T_zzQ;K$?ZN@4D0LSV~;IFO;QSwt0DYpTa9L7Y5;Ghg*KMWk*Nuicvpd0oqMGI+4;DTqN0t#tl=z zUTOWNd^B_X%&1rAO}ElyIv94ub?*3uG{^~Kg7)rL^M{WhQ<&w=XHamGI_k`%QglqN zmySwsad&aEDi3#j1W`x=y0n4#L+VrEu*jgdfZrOZ(jRo zd~=>HFnZ@J)ee6f)WO}3!Ys9IT5{W7g?oo7t>$zs73+C)(tOTJ=?Fj`Ba(zEhVJgC9?LJDUi8XjZtZE?XN{@8}p;3|6+7sj1!Oib< z98&Ow7SxolSEm#2*v4;AWJ$5+g>^NM<&a#Y!BOM8IQtG-dYjXA`^K&MKB4{-q>^-Z z8wl^F9AKb@c<=26#8 zyQ=};9WUSE4qWmt@4hEkw}{VC*SyXsKi=jZ1Zi4u?s0Q6YQGWOhg%AtTg@FAfZi`* zQV6&H-PoW$NxOsUKRZmW3@&_pBDQM@V_d`6pj_CaUjqC~k@Dsu{}MI<48Ep*C#Hq)RTiYk`jd7q z-1h2|Ur7HvJ^&mUNLqame`)=twnVS_*XK=0UNlkue78mjD5IgozG*-vifgF*nUhG7 zmC$*YNASFO`nTN)uN|F{6m{s_$2jIp1Bh>HKT@yC>%VBLJ+8nv<|)btj5c;{%@^k+ zt82<#>MfJLflxW92NE9ewDMDBw6OJ^Zr;Dm5doekYKtF$^$Woe8v?bK@FD+s7>2mp z!-8-6bu8`+ZN{vBvJpLe^Pc*tF+xhR`(Le1-$t~GNy2348zc(J9JQlVRW;<_hEdd_KEz&ZXo7vr#BB=bspiF`sb|MW7B zBe&*Tf-mAiUpkzyIM8O;BmCIOqyeg+<`MEDvns}go7y-CQ=+!$0B!&3SD8Y%6>1Iw z9BJtx3`TWvJ-?4MbQTp|m|cZ|MexWr0l-O2&N&T88^dWb@fQ0HEk&w9-ph4BgykQ% z<&C^6pt2xL{3)UXvPK!OtmABgVR%#kMTb~6N(jg48dYPC2R@NL^{)^EqNk6Svc#03 zd7M>kIlJZ=TIvGyVY%sC;)hC_F$Jjmy`3Lw z1AJ7~%_m9;^airW3iJ}$YD%18^aBDNKf{=8UWPwDwM9Z%IU99EWi)FWE%+@w^3IWR z0KRtrc{h~`j@_zm#&Z$HvALj5)mQt!nCI_CmRx#}^~^o@&qLn=hHAEliTzZ$=V*#J zEkd2B!nS9UM^1DyV;Xq+HX8MA+MdX3p>s8wk~Lo~(L!RfFO@VH+)TPR_JbsDgj$2m zavWWcdNBr1fXo09??z!ExW>w$<=v8vN$Vta z#nCXs(dSPGk8z^+?6av~Ads?Tf+@W}i)$KZD!lMppoWG%C*=LZ)*^$pt4g^tGx=k= zIl%Lq6Yw}?%;??HfJatlEo(PClJ1UQr3aHivgk7~SvMizlj6~cmX>tsZ#+-Jx4fC$kR?CDB2X|W`e!r@X+8O;;)+Hf*XhOm7i}x`L{~i6a^YrIZZsb* zh|5A>AqZa>CmHXQn6u4yC_248HR^WIh`77Dt+Ni9VyEt0iqGH@)ss~I?KgZp!#86y zcH;S+RB_g5O{Hse3sy4l#c}LdTB>KEmU!b)Jbw0eP}(AZu22UV_;pHwM6^F>iK2^yt11KaKLx@)k!+fE_Z3% zl7*#R*r_nRU-pigRFYOe6~s*!LndtOb1`ZDvdF+b+aEDxQwS;&f!(QkGTo&@^7^@I zDN&hh8M*1FKr#!LHc-DbVDjt?nmd(ceDb=Mj3x`W4ptP2hQ&&m9)i+O(h@`A9&q9S z4Xw}ayEE`7{!<$nCR2&uJ2kGGr&8?~XhoB1uGn}RL<_26h9=EY)gqhAX1FPSh-0Sc z!A4lRzqf^TpoMuQ+5puWsV7+#XI#aEw;{2w92DV7GLKsQv+tqKpjfd_sdeyX;||qf z3Xy?jfYSn<2G&`Nq%ZSJrPyb=hKh6#s&?B#FiOk~KYu@<4)lEA?E}cl{wAt=fu+C; zryJXV9C5n}_rPtt2~12R?@~RnTZhbo!Kx60%VEHaiNR$fq`rilIdbSMmJGnM*@cQO zXb-O+PguTkVK+XO0O2x7<|u$M&Y*bdpP%cs>0PQu*O4N65k}KY{}DO?Gf^wG+IY;d z1zBImmd1atYzc@1jrhNAzPC1?RWmWXWp=eqb2i&?k@-M#b=Iu|!F$a=>XL_h~k*XxkBT@M#~$rlcKWvQmw> z=s0H5C_khQ;n8}p%KDxSPe=_4`-EA6j-zM{C+7GcGO#+n0%L?IHUp3!?r;381`iWz zMS{Fz-{eDu)|ck{*jjuUzuyY$#N^cY^f{9sr@RIj(~jD(^VZ)UAOBIG9F<0t^5X0! z1t+FquEcMqQ6$|Ok$@YQAAJL;*CXVp-=>-VOy82HBsfk8ag?a)A>|K%Y znhjmWf?oNGK66r8AiQbv5Pr@&#uE2o>Ob>Bb~I138A|3O|Z$?go2QaG9{yOS2aC7)owyo zbnp78jbIA6tTVit_u5`W1P;ed9XH~5u=$qlV0N{dUSoY-wINF+c-Y|oXbF0J*ZDcd zru8^I4T1QS${biVeIh)RT>y^a!=u$jSK|<-ei;^KJ?`{qDnL*)DYjZ$f2x5-%;q1~ zYgPjTAoJ!aaZwhq1@ZWfF4SopuUfbaNO{JrV#(T<`ak>T>+nZ6Sti2RpsH1kvho=C zeF7#Bo%gz@Ejh=ic7C?S_7S5!j4{KBm*8ujBRKhQvEdr`80tv$HJ}P0UK(K{^N^OH zPugrtKb?8KiyVXWKUnvbW4^2m$*%|NCZZ~Ren_g5p;#c+bk4YYaf)ae92VdPBg?)u z}wULAlhxYpd*r zlNjWEKSYSirZ$}q#t`~RgITpf6?~5W>r|+3Q8BigwZ}C(bBZ7WbzMU8fHBZ9zKT5( z_Ns9fish2(n;3X>e*#+n+P~HLIkU^SI-7pEgmXX0d?VT~4XH4oCq5O>Wa|DFETKA+i#%%3>^=L&7E<&q4MxOqL*s zC!6qWLe=$f=5EAi__G7FpU@HeA`N#-A>%X)cc37X-&l)=!KH2WY`eA9{k|m|K0=dx zg(PMfBcw`Rpy~KUO)-a`Cw%hxVjGWVSfWm1z8K6k45SXb@^WGvS`So$!*7VpH-+@U zPnj8MmH)wkR_LII9l|-NBFwF{8rGjv#tftA8iFf0Bnswoi=-y=vEjyYZp~!?Hs7gh z*gvOv`o*J&Ti9mQyl7(*+{PV!rMd$+4=m;Rr=zku*uY@qV{LfG0iBoFdhxXeX;UO` z!YjocJ8x;t->KoZCM^5>2!F3{PF8#fA}hTfcixaxe?oYy`a-Dl1FGg&yx_7<^|=1( zqqQMxue|yYu}f9~8I3sOvrhrV*1Kz@a3f4Yd2cAb;H9w1Ar1XLNSH#nROLP{r_P(E zw>T5%NqqW?aJQC1B9|a@-EP9BQNw*bc(PE7Ft0RWY}#miAR z3-na;yVh@)EN`Y#9P4hOk9Xk}?jDoi_U$})9FxWw%wnJ4EEMg#SLk=5%6fDalJqQ1 z={!m8V08V?nfH2`sf>oXOpbJWiY2`TrtuYfcqZj2W_d7=7_ol17ak#CZr%7k^Sa3c zfU&~FendxaUV&9}5mGWRT=t-v5W8e-16`NM_}7TNT;iv-wrdg8Y7SYI$*xWttH|N} z8ZPt1Ojf`ltbTTf`Ii?js(pFz<;ySuVa}pe4tpIi^^2n|@8f7-tMDl6m(rXvmLG8~ zPVg2^{_$ddvuhGCVMdW{^XWjx6&OYJqeN+sF>t=`3h?0tbX70?fa)7K`uV6 z-l-Tht0Ahnh|^3$tRg0HWsLcrxkE=QYXujqx)@CZF%b}8*Q;4ls&;g(oobW2&>8Es zHK6pHV(rSM3jY3YhDi{c5yhFgGN$uF&Y6t>a$MV>ESJrZ9CiNb;j~DscLgU?S@@|6 zfH8xQt+CC>V97JBB3?r7e1CqN$FJCaT}H~uayh4Yv(O;>6g}i?s5OZgfSQUK!Z_F3 zc@0C*Sp_m@<^)wHA-VcdJxAF;n-cVrCBXf{ys-v3Va))hjZmQv3Vns7-(&X1pE%rl zUzk2-7_q9E2XCr1{Tswoxm3gLrOGrOjZd<|$~Gc{)r;E6j+u?zr3eR`(od$uo}mp& zbnexaWFXkc{^-xqOebJ*LHouZ$LB|!!kJ7ysqP~vDAx|=wnb%az{K9XIW|tJ?=*s= z+UmRVl|35sKkN}*14Yky8VRDeEybD5YWq|Nt}joFcP zd4~;wj(>z4)kT0aOsk|tSZyGfJ3WZ}!8u<$lh6Ng5ZU_Vam}sO{356>8D-h|DX}{s zx%fFe{}I4cAV`P>MGqr#u)x%dz)G7Klv?9kUu6>El;5})pS|T^4Oq3anWE?lLT5>6 z-LGJ7T_LEYE@bq&Hl}fARtwVd+jM?smFR!*aoHL89H|84eo4B16+khlWBPsaF@du- zlva&tG5FbZp|&$sO8lOTqKxMaQB>TULe&GBYmC6~CF5ho9GkL+Z$;c+4BTF4T<}n& znWf}Fdh#Y*^gam{^+PXbnAmUz+F_jxcYKazcAP$XJRDYl{v%5&bXJiuJI89tt@qD^ zfJw?X2iVE|WWnWF0FK$&?5=2ZjYg7$&ur1K62!QF&Aa;YQXY6FT1~~Xa5*uo?UCL7 zk6QIPw?=521{vy~31*8#l;(!bHbELcfZh|9J0!w^=DF~4#lboZw%i{JZhN^5S08Gt?Z!s`(2>np(e>67f%Gb4=M+`d)@A@j%jh|Xt#BPtCG zX!DjR?3pz;*~xJ86SwZBNj)(tpzXi_tCM8WX0(Pe zrcs#*Lh7*ld$gyZ+;NZs>dU@TJ5_};-!IiNYkKH=3=3PgSG-?|MtDi9b8eM60%GTz z^)&TS%x5YS6n>8Y%PW)671*mTYcvbO4B5VMnVc^PlTC%#6$_;!}ncLh<}9mKEV#rH?q(foK~+F_v| z2^l>691sHKS%La7A*29GG?HQA3)n^WbHYx-%690M)pyKZ$W^@!!pew5{T8<^`z|d= z0KXG3)JBL}!9N<}m}6Y2*P?;m)l(lqPyqN{3n0duNuZS#WSokE&0|Y~ucfS+xeZoX ztxs~SpV8mbO)F2Kcz&7p%|=B#82%A*2%V9tR9~b0%5CoXF6ecWytg%>6sZX@3$)8} z$QwbgJpiNvKsA2th*8!Gke|ur(XoDD$ZuX=lVF~olrS}~EIzy~L~>JxI7b=6(*K}q z>*k>roZ#loqD61tm)==d94FpVnZQ0T$XCXUD zWGEb`2Ud=hf%1a&OcjhTLJny6r)*8a^-QEV=R@7YMtoJz!3Orzsj zxI?1&#H_OA1jjU{>kbQ}EEnC@-{T0*Bb3vsUHN*u_pH062Jx_^x=Da1q@m-EKOrkL z#SMDYJJZxJ*awIUO?a6jg2fGM`)VEMY-!Z)2L6r>2$Hwlcxd=db`OZj0yq|RH`1sr zmC)T9)8!zD5Q0>!#c+xVlwgk<%)s^R;Z)@`QeU?;RbhBIwVVl(NN_##y4(bjtqkDS zLyln7+weJl0PHoe${}s^*Oz~=xhsv9VyX%JQi2CBTjljiT0m>` zJVazERIuGg;#mSFw0`qCVw;A9hi3wGhh%buf7hLtD~fb=$7DgV&A}S@f8cxF^1JB1 zx#n%uLhzyRK3K;JylVR?3Vw3_S=G13O$zj<=#-Mg?%v)+9iKAS!clT``42l4thZtp&pp{ZkzjD5rl+{adR+Ld#_0Circq*IDk-n1I;0ABn z_w|mj71-@Op6FHu{rASAACR)S#7P=;`Gmg#;N`iFW^sC82BAY7;23?7)0WVg`0%+; zQ?(YZ+ZeWy8SGrSF36vZePx?|O_kWJ%Zj37vn2@dD``5gTg-*+>ahd#yToA%_e30% zR67E`-D%=cbyD5*<7>}c8d7Bg`Qrgw4DQQ*Dh1k z_N7POsjCR zBg}mUP?Fw{9^5rG4n!`%xIX9K3_t`*nxc2#z06364*>W=3^giSkan!5)*qwz@|y^y zMM1dY$TMrL8vfEzl&vNXOzgCR6O2!llU{Rs{(kKu#G*saM-_V6%6vhQq+1aw_?4^$ z=Y^5D@*+=ZzwfS_5wDE#YFEZKaoe|-OTFq zK;9)T1u{$UF~|2buJY)NtZmgt+sMPyt-)>^a6roCedp?}yK7b<JnYE0rgm%2s!zJ z5Sv!c)JvAFEs}#((C~|&uyBQxsAX0KRAG!{*JiADgSm)%t(SOqQyVCh(r8 zAkdj5tQCPp+9y}3Cp!yZD$wwrJ>&kciiLKQROKn$s@puuv#ew7Qa{`E_FD@Pib`fx z|Nm(`(&(Eep6>AU{(ApR|3R#NYywRE2no6`d1VL+e$?EmasT+r5j;*~PUiP}T)z@Y zo#^QNvuiZSE^Dnh<>qxMomATYN#blTv*@oO5kAwpQ@?GN>)-YT$}gLxj}?AsaLwHZ zfK_;`U}LfQNjJ4_!#zE7%FL5zuD?>c0=D4lDtz(CB66Gxfi)s%20dt zuJBQ8#}tUhAtbrX5vGj=Y4nLw*$yl+H$aejN6$HYPiF|=;40zNQ4oaz1S`%HiL~t1 z#wi@1!yh$=gbsgF>pXm^o~WW6|56=YmZ2NMD{qoe)!QH9UUMMxy0!;e8& z#y?CgWCyHk8o`2$%*X!<>N?}&SY_l3l6-U=nUs-+BLs%W$JEiA_LAL?0z&_ONEx_^ zt6l(MTS1!Vl917bF#Bo9KsQ1bXh`hFh*StrefHCT+y~mk10`&RIq3*yz*k=f*)}~9 z)}bI{ayi)tIZA4ry-}$sA*93`%e>uxKZXS`^#FE8xWcG9=66!bOJq&%e{864t~Ta0 zaUw+;#GxbXG^i3d)Fb2jscu23Qu@zkZtnln-IjvB8bZ{Te2#TRQ)ZQ-QWY9oh^@vy z^gQ8qQr@f9p->K;-8}fSN41V_lONU zE~`fO<@FII%25LS1>Vl%i9ay#2;etaSyPp7uoYrUmJb0W{L%PpOdiycou&SGCzo}% zSR6A>bL^gyU!ioh@3&_5+^XldN5rG)45qxZRUAhcpQ`eD+q1C{Gi3~`sve@|J_nqA zu)T1TcuGOcFX}x*Sz8rE#OxX<_|}BsxcEQq1lDv-fXZm%IbQ*gapd7iC&*Dw30)yl z?Q?|MIgi?a3RK8gb@0NE^D_!$>xs7%^*g!mrt#yZv?5>)MUJVegn)(}9Ej652Etm_ zQ@v+nyl~|H)#}+9a3P~pkhMx>iC^Dp1DW@1XM*YEmJL()yZunqsjJnRr);eIpWtP!uH&QAcxtZHXc_kx7g_})Qg3ot1K zr@y8Q9=7PMqhaeP7?Q1^uCTlwdQWx_Rv$e(Nn`wb=c)S-sq?7yfx51hmRr4MFrm0x za|?=CNtU+$c&%hWWN$!kmY(*y2gaPhpx}A>V%7?%tDy(n9;3z*JgR)tRcNqr8;EXJk2Y3ChORt7J4%#r8+HStK z{oA?4fA(XgUxvBL-G`bhzUFle_xuQZ@BU71n9)~3lPMP~zBevb_sI2Rq$;9^Kn2_L z__4kK(>~ULsPqkuFL*bzZ*Ogew;mNP2c*}_FjPrc7VRVoa1wK$aI4a6uU-0~Cv8UB9Vr)3hs`}e&}jRf zrM|kVFfnR~99q7-KDjYxfAyro4sSc{)zpKd)icfquQSK+qfl}71HHb(Yg-Y`o7KVE z!#R&OgUr-huEw+Ji?$64@<7Bdgs#3&uy=ua!nFqgaq~oVq>*gadlg4K3e`8YO$Xar z5t3FFdq0H?OIextzp{6ja+hXc)9tfwW`RcAK+G+>*{tv1oi6%h3t)>Odry5=U7tg> zpn8WR-si6Q_^Hc&EZ!dy!{JbVZ-Hf*n*uUoEkEo)XshuiMrgV~QWzH&ZYeRvzch@MR1a|ha;1SC1Jv%o#8 z=#l86tW4Wu?A3F`Em9vhKkw^~A9K@y0Z`pupWT4{G?H}dk9iV1!s+#%lNN8X1p;!! z)vk-7}}g+^@_vel_gt;3`?2YrH6i2LCQ6*tX0?CFV&Sht$>4 zTdU_K;#T&Ed$oH+BPOGmP6(9U1%K@&(65gRfj^q)B>Zg38kzfI*1|uL$S_& zKljVqzQRIVMqiw(QE=~^uWD!(40p-hGyJi3qz(;AS2>{W`n1vZ% z?SdIvBWnls*XQ;$3Sz2kq>&__x#@Z6*T>~B^Nyw6)T`KSdbsqcCpMzi?4l>`MWz@b`OWol0Z5s4uiBg_K1-x z#PhBbe5S=kFqUp+w`#p=+~J=EqtT!jR`tj7{gP1v9aGK*3@6{vNkX6d($jLJ#!6ik z=!=H)QkT7k-qS~&MQcc8oVH%I0nfW(F{DL;gCi`nG&>oQ%-XbcX<6}h`vSIRiVX-4 z?5GqO&?UmHQaS>=rjuw1QMZ@D0_jVq-b7_D?%wxy8XU)rCIG>w7zQFPRyELPnXDAk zbw&*WU2HpxzmmUmLyVz`%-rKq_VB6u#~e|pbpDn>$s_>N55zCI#Wv`LN3QG1lt!Dq z$-~`+oe?`g8iSC^;A18>(1I#_f z%#;wHHywR>ULp^aedt~z1*F#zmVAA^i&>!Ho9Pu~N=;$T$k@&*{k@-O-wCug$YjG` z^(!z?7E%-4e4O5N4Xa5ohL2k<)vXlS+!o$}q-Yp6CADrI9TB*8#PF?P-z!=)z=W|l z36_aR7glosbdfm}gr`ndJS!7**X3yB0JQqCyQ#q-x8E#}D<=oWowx!rx0u^E+yeNN zaXVooEu}|dT*5@k&lp{$dcG?ZBTU#I<2Ed3*~8Shu`REAeh6WP-=u2VbT?w3J2ku= z*W+;{V?japc;Jbc?u!|sY2hbwU8OaD;{B_u(~T>m2e#qOyNVY)$7&A$8qw_N9>6tP z4oSc=uJJ-x{u#eyQp#cykCs(5g&b(4@ zqz1C^vin11tJ-#8dR>-iQ^ZMtGCjCV#we4%R%tU02Z7NLEXtX+OnXkoi?6H7!8^4I za?w3zO=T&GXoU?yr)gREi-tRhDwH#}@A1JGe8~M9v?-c3L~Fvvp$|;QZVVqGRn0cpr zY(S<6@fbGjs{90UF<7|5c&K?pz^?2jcFck{8Cn6eBj>~n=)!t4TUXmDVGS*VWP3*N z0A#wpQ-6^ql>pLE`cY0wl&jjcEk@^Q)567Rp8wIE6A-h%ECsn}Fjbvfgo-Z^zT@t= zp^?J6d{OI=x+sR0E;Kypo{t`m@^OUMsEp)DleMb55E%S|PWeLG8Y!*_&j&0|PDOGq z#9+oR4C@i_Y@_@Ie#?awIJ%pqO+$0Y!+Z#!UzGVyLwxqIQq6T@=TU8UEUAhEffg5( zsm$Pk?EPzy;&71Hsjb2R)z!mlL^!?t)%=?p@&zABePyI}n{~rfV&s5!z?RK_=7=t= zQms#M59F)wVNjmkQAb)t2~1{R5pM`zxJNrbFB~&z)^qmI7BgnH%GB}^Mx!NpJBf9g zBF>uQrSG`NscXMP9W(~sQHn0r<*$`IefJp@BEbZvr)Eq95g1baplM9#Q(DFtB8V zWwG&9-_gs6w`HaW$Xeh>;AQPKEtJ>MNQ1P8aJ1Qo@fssRob?JdO@gfE>q;jVI5JX6ly>!rUz^FBkp@Ml!#-|+tuTfV2m+XHMW4g@zlGR(ebH-tyV^b6h95To=23a-E^xSk~sw zVE&*n=0NXRcI5a+dh?I@762(EjZRljlVYXk)sn?=P zGpW?^By^00UesX=e(b;)Ym^O!Z07o?S^lq0IT!c5u+ z8N#W%ZR(I*fa?F37E_#{C*Jja75)?1vjby(2U^V{E$E|a+ z8x^>wO9Lq%*(~L5!#7lP<`F{wdE~jQFM4AaN$k=$x!k1kf=HDOY1`Q&s0orO;fm+B zYjim`TMKs3pAy#XTCTL_lVb9ndo-1u3zh>V=Q=jN1RTpnqivUV(Zy|71nkKARjihN zm`OV}-kU4M2T1Bw^s zXmGO}eS~kI0Sx#4fUUqJ@-1CW2s`BY3`^R+!>t=a@#@kv1PMJ`CB_Cc+He3aNLKmP zEy~_4i@6XnsORRQmv_%ys3#F+__zbah>9Rs4V`)>n zHxK12qMZ2Jnsep;J8++?pX7=)JEJNrzvUmA!MBx}5#dERi=bGiaM;nIX%4wWo7VGc?n4Y)oQ zOVJ+0mBc-?MLu?A1(q&=0coFTcJ8Li?oqQAAexWBpjnHdn?0o+l+YK@9>+1u;L0JR z8P%X%D^%7*5hs(YuC>Ih3XJ8$#DUYM*43*gALun!cz@+IbB)_ zk>o!{0evEykQ$+8iirUb%NV!-E;KRo|Dz<4>S{4n=o$$_p(_}U>@O~le>g6*;%hS? zrCQruvr9%@Sdg$L=}UcYf4*;^|0)yz2BLGvb#SkLHxRInKdq2e<-0Xoeh8*Cy1q0d z`;sAhM%z^o*M8^d;4tpko9{2uEg8E+^tgfk{67r0JBx#)?=(ncSATwH(@c6z8O@o? zj$!uV*W8DWT2~a9axkn_tx4!-rptS-npJVAJu~Dkra&m!m7&cOt-1>TYUT8zpCzEr zMa^~?d2h+a{eAK$^!FxotV9p2_lN3mF8)foNSxczSY2%HA^TM0V zWt3A8cQ^rS1Ck+})i{$~oqZLd;Bm4rEbD{a?Z3gEGd!~Xz&HHkWQR(|Dhv29P%q|o zt@-Qp;UnXWwIqE*(Sm82hLoITfHFN;ChGM?iuE`BB*Go^5|t^uFJ~unOY}-mQSQ5&f&9V0RyG!|O&moV ztGQ@-+sS%$$ymNc7xeZwEhOTcI-7ea0bZTle|c{cAq&Woi_mq4bVkuAJ$$EPKi+{T zI;r6gJwSMTES~jG>8m3PEM%))uUaSb$y4ftw>zAn_($1zD!5cjSNU7kP=Y^_S(`Q6 zG9-E5g6&1>RrJO10aRC_(W~Ke5MTwi|52ky6IUu51;K99&H42_a%uKrG-}oRDxJZu zy1~lQAH(1=O8(aV$*Jl;=7iMOwWI*1X`+FYe5V|CmV97D_MEf|8N~8*lJ}X4f0Y%c z$m#eMz$9pVK})rw;a_J>NTx|i&&s~>vWydQ3HKD{l6J1GgG+s#DAATKPZ9K zfkU;`wLbhhCICAx6t$E4GfB$wS)d}&Ukp5QhQtwJvqx3t6Ot$n>zfVs zU4NZ2F5n^DX?o#>-fGVXcp-IeIB8Ds`7XgBu*H_WlPq)KZVE%Cqcrz8WzPtx!yuN) zCZ4_XF|0WMAO9?*{*$4_Ykvc5-aWG2 z6FZWxj8hG(zGPX}z6hWdlWsOn-Hv#yHq|Z3B#%FYm~g!+_*r_5GIjQuI?D!Q-|g6N z24nz)!{&LmCPRlWB-B1mOLiSO^QaqVm)>$}&&ujIw$FYuZ#ZTW-@|9_?Zg$I5wT z3~*Z{sS<*uK^@eu5v3Rd3r^i8iBt4@>k|Uk0=jS=&Th}QI|Oy*>CGE4Kwk`~Uy{Rk z?Ni*jeBU^g5UZhvt+wH!)?mj5+4nrVH^V!O2U&P<_V+M)krg{!%-_{LE0u$ZereJF z%eGjrobWEXj8$+nMI-ujX_ zrYZ&F78!}C;^7b5L~wBohAJ5%PEu)!c>ggzL4p){T9`<1IoyDM1s(> z;cx0w!Fy^J=SyZ^&6RWdyvUx(=g4I+GQl_nJ{g(bSP!flHq@wsQF8neusI(8V^&vPkveZ0ucelsUGfMu; zlKu3ac<#~=fD){Rqag6?A7RiV_hP&M+!ETK=7U5JLL9m>#?ZXe;`PDB-r_>#3@mh#pE|krb zOA~ixlr^!MBdR1hNrh`uxvN94jFo=uQ6ojWSoZL&;|FBgOR7l2Ba|imuj3$W?=_@? zP#g%hl!_B^@b*D0x_DMWAdv?TV}FhJaDhE;0@=$WQEgks|E@;Bme$iPQ}{zSjo(aO zE|nk9j}S!?O{H!ZDjwQQ9FIbAN=A5$tY4&eJ_X<-jcxS2PKq|~X)|;2XuAS69`+mk zoZ1uT9g-t2Yt7R~#7m03DIWciSW^cU(}pDRU2j=P>f(m7Sq+}MX9t6vU@I&gF+~di z_=%)R()<*bZZgvRbBkQz#MfgSe?lrz&ug}*S_i)uV_8pOJw4xD_y0hE&PrHX z;_raAUsHKzMr=H0bGn!sJo}`F_XZ<(^!6P{L*dyEu4OfJ*q8z8!*~{q%jUp&?hTmI zrqN>D%Lu_~H8CS(k}S9lgd{ z%k|jo4dd)JT(mTj*XR(>0W z{2Uoyu00FCvLm}!vCp{u#;K2}$^6+>DUfha_))*lrA%2?OF6)2p7~k76pVhfTP|y? z?{d2UL&G&5&Ai57Wb3hIbz-uN=qL)%q8UM*5xeusOtouj)fJS_t#tzNv@IcKB_6K^5N9j3`Dd|OPyMmb;fq>DeVUbk^GS&a z1}&x;07EM_&N43KiM4TUWIBJZ!!i#k(%b2ai7yhz-T*s5#J?l1%)yvi4AOc5u3UD! z$0;WpxXIZ>(;af4QLs2!dGt6O+ z^>A0ggnCcdRr~s)ys_#9I6O>?#c2SBEA?r5s==y!&(YWG$6(&(#DS;@3Z_$;G5!Eh~QGU|v+5SD)US-Eb!S|u_1J)CHNVEL;2TOeR%1_kqQxvymW(T-u z@}QU-4~5EiFQSV(!L&=Bg4yGaf=3Mwkl698Ks${49;=6^8{q1JfCR=U!?_v#pfaOc}s#4@8Rsh%Y#@iH;RkQ43%+eDxeHU>g zmP4Jv9A_)7K9r0?IT4)|xF}CikMK)7b*r(i(HD7=;J}X)Viwn5uxC@jC#fjAH2r#1 z$RCuY_hCzLwSC(r8u?;*fr+xBQShNCorx68g7taRoh8b^E9o2n%V()Gi!G-vN3S}n zOV|R~erpJBOn>db0|$xqE}>1AZL!ztMsA&O;^u_Yy3dHjBJx7O;jOewdFd~Mqq6^| zVxv5n0T_`F&XE%UgdPsO_SU0m$1coNJx1I}v$96zxzWm7FORR83(lVmp4u%`&$2 zS#{kP=NveJ-|IS;x{aaU3S5W`#Em<&X0OO#?bqT6T7&AI4>&t`W2iM~63TJanV`FI zs;V)}Iw`R@RM?aV2r@i2q5{s+AP}e=SJI-JJ@dv*8*pB*QXXXGhXW*w6D?ogjyf`y z(dki|0Tp#;hZg!^5l8Hp)QseWp6E&)>>UUxa>5J(z%P3;AH%LZsr+7X2*Y{ILB|RL zR<8Kyo-?iF4NLXJ3HYGl%er_F-KW~3irEz@qdVuTw+gHhHHgu=d)}GR zDjh26#6=wsj4TLu$naMdr9NlwD~qU3HWz;L{CnG<7F{IUh_lQziOV$xg2W>q0NoRG2b#fJX5_)=q02N8M8oYj zXdE?jOjLM;*W|wDp^!8ei*vS9^@Q~5g|HzPl8;or*X*|Br=TaZAv}YDCfY?2mmP)C z4VppZC`#>{Ghy;akpB<0*-vomO9L^v0CL2uY7c*P34*aatJ1q%vWtMgN;~l;%8)ll zteS=iXo%yrhC(nS;e0&}PSOeoloL6>gK=)fft}&n*f_~={HN0*?*`UyT3z1G{o~m8 zhA^_2V8({`867m}VOC10v(6Bv{^uBuJ;`MzlAy?)#pzV1g)j~_AlwPj((Mid5;Ynw z(=qs-7$jPkVPIu=RhA=n0-_>U!+OyD$(Z~^}hJZo7Hq}oxg87 z542oS>L&B&DRKqqgtnijzXXx`sB2eL$>OaqP(HB>5K9%SNcRH--T(1}xerF zE_IQkTnL8;c*K|M!1~xZq4ZwI0gW7}deoY_oXPoukQm*XxpGcGaQllHXa>T*HVi^R zZswMuS?MW=ZOv=1$`o3t9X)?9r5;xW0RljC65NB2hPM={fjdFaNXFBmklapGs~VyC zjT4+uj**{MskL^IN!%zC|F9*ETOSN<)v+KzCI*I~v8^jy%oa-l(P(oO=vpZD! z^uYbzExiTV9DAa@?S59j*|zvQl{G~WhfzUg9 zLK$Vgd+b)*F8q943MnTadfYO$ahiNnVkF2Nth^n=<#RQCXhr^@%xN#e#dj7SLWWom zW+aTLoDo}ABsnf}-nn*{^s2Ws-ID1CYq^c<4yoEEWDQ-x=Z)Z2>8N~@JG|oz0Dmf& z&Lc9^JA^&rgm6fDUI-4*T(z)f$zVz~6;;fx)`9AeenMA3+lYl?PyW(*Uja2Ut`e|I z&cn`a+5Eodw~h<~hPnG2u+H_*jUFo$IBrzejJqg_boGPZA2bAMC+=9Is8Z<1S@=Q-|{PFPTNN7 z>U4XHYf^H%DS%p_+I41sj~IuWPNs|2j2W8nFW%x8u#KI)0-js!;upE z{X#}I^VOk>#UHze_7zlw)40310jg*Ke8%w&k^3RiYI~-{n8h)}WC!m0JX$BdMw}-L z)>-*e+C{o8$~|1O}7Scz)7g2KMsUP(#s zy_hpN)aXu%_}BzAm7N6zSRcEQoym4~4yC09&n=BJmwTXcdkESV;1!jCgf=DRytsrv zj<-_tA-yUuLrA;IO3<7d{gaAMMWnHZH1+b>fWJr=sZOQeDXdVv-mBA>TFN!EnG5JC zEj9V+2FkuH%Tlg{|Dz%ME4=VfL(FiR(HSgizdk;7*B|L~RJ_?e7kr##Fpfy_^i0A#t}|t9h?>ITTWTt=r!I} zdDqY1oX=&GiF>HXF3s%;=mZJj>gDdp975AAVwq8nw~Z_y+}RHHth?Ny^G&{vVNdqs zEBtJf=?21D{5LW1DH#x;pv)`|_7gQP2u#$@gG9^{qHbckA(%JT7PR?viR^gVJs$3q zBj@cH$2^RjG4$5AYaUIzwpD)zl~wpc8Nj6+)cTQSRyXguflBk_3!0@O-9ZU!p1dRH zpXUF~@2dU=mvG28VsT(IFOoWJVw(K=u+?SPw>o_n)~UvDI3df{OYBXX(r&+N;nh0~ zg13bP>I57bMJ7*O@~~jJtMK~{$73LMOkgKM7Www!{&18ZBf*9<1hJ4MobRY14F#k-b`jzHEvRkEsZ;8=UhrZGYBTCiU^;mN?DQLt-eRcSbGRZbx5sYYXc zAy*@KLo-MnoERAPja_5miSNP2m@crbxR8XQzy&fFlz4qg-rr zS-m&iJ*-&R)9|u`XVk`eMIjwFdgMKxxO4XW`F*st>C?K8sya>j@ZvRg^a($!T$rR1K&FKaIBFf z$EdCv?_$Qe?{$@GynTxbhUK}`C%^Sj-?5X=jI%z6xp|r*gj~dTepVu%?%3CcQ!j6d z^1S{CvRyho&h{)jHx02u<7kE|AVO8pomF85*$jJKL$j6qdXjG@B8RAlD&}b6CzYe> zs6}e9XD(`8{4-<1de8WSceKY*`Ctc_na!rR?0?@xX^^!2N#&JHClM*{B<2iS?d587 z%~J75KG7DLhqG~gR32FA8v8kt9(yV0HW+lW!Ar>o6>E1EpKQK&&iur4psbq(jUqtu ztJqXgC<+lKU1Zoi>T77|r$I`tJi@~wwzWr!eQqV53f5XZ7bWhMT9_GXB#4PMM6zB> z*pjyC6fv7}!-lBtKRGcI5Gv4tQ?*m0s9F9lED=V^5ZT6fAaE$Yy=coH{aS7Vu2LWO=P@;*p4>T!I{+ zKc*d=6obADe0T~J_l`Ws2x`(Fe}iL6D({*ix@O{SP_DWLF+#r^gvZh~wAjgh z@_qLkL?QgQv>E2(e3`9jHs<)cAuq~ERCL1q2Z^kJuu>(;UT-%=N1S6{F zCxENF=P_`MhC~L2f|R{sX@x(;qk@;vt8cRtI#2)= zNu<@2Kc{%u4)5`^)u0*F1-$NWRU_FDeS_eqCgor%w8_V&oU>eu>H>Ncg>8#sErED*mE~hUt)z*sl|#A_1GZcNgvb5H2&9B`KE8 zY4S^AK49i3rQ#J=kUhFH+_L?;ol&k1zZSBT>PG&6=*S zE8H*_L_MP@u2DW-UX87%Hn#aS*XE`(KgqU72Say*TXsV**8xto^Dkmzwi97KZo zYTExCZPVTIyu4mqLb4n^?h@bY{+F35!eW_46BJrrVc?~hJaHk_P&=FbJvxZp%QR97 zWtaQXK-5_aI+x$5VgLGK4TJ!E@Clm2{G9EG+Pit*!BF|h<oTkBHl56@{Ih>W<{g_`42m`m zsgxR@Bay!#Sf#tch_YP{&Vv5F=1nb7CkTL;wj3}rP+7e*lhi&#w z+rf~^6ZPRnHnH!+*FVGKXB53aI$gL}JgC~`u8B$XlXtp9Tl|-sQMy3eIORE-PJDTD zueK9@2se#u8vK~8X!NT z^bYln$CF_z7d89aWoDj|wNd;@MP&R@pqgHaYvQ4nD#$(n;I?wc))e1nPO>YDdyQn)R?1j56Q2H?^c zYiJQk_sNI|SHWQ1Lo*KCKx`rM-l(ICXx)yN&vupq4li)nIOwR&HOF_6S5bP=wB}Bk zzTY^)#fIS-I`BE*DqhVL!#1N=#8bQ$F%dHsW4+_(C;US$_O24L2al3Oj+bWngJunW zLig2bBO;NsQp&b{4N$l)8lN5&Bk@l(!FPz6syUMUuVke#CfEzq99~~N*%UWD-ZknmiW==LC3K1lmbjGHRsbnXVk*K8L*PFm>9WF(9y!8(4U#fM%7{)NW;2^SSP^E2 zQ_#xAc}Q!i+si|8vHa~^_7=&+&gFvH+o7iqMFPp#OglSGZu|trCWvYNMBHv_MLtGC zN1?fGPVws3pj>3w0&6CAz@P5<5d@w^e1nw#GCau!yM~aefzAJY5-lXkB#`^rwfL&0 zP)#vI19S1pTW=&%+QUyYxp=r@>rmrWB)#VmyP`_FpoLYDZybm_o9oPp4>36)=^M8dO@W~*< zSaDgwyWwtmEGD`j+k_JO~~l_gYZNO7p0eMvjeQQlpO8LQ<7 zz6*b{U45Q$<+j`eyOyI6k8%!pK~6SoL-ZL;2AxqY&I0TJ52kh zoovN$)e~-LWKn4QQITl_ju~t0WkaP4mu~+I-(C^B?Y^xp@|d%cGoxmK&bxKn%@Hgi z;SWJs$GX^N6Jd!3K&%du<2n%bX4$yG-6K5P`6NHhG4R^nd{kyQzfFG98el*nt>X8S z05-b_R)Bn?ef@3kBD>U3(k~W|)7Ul`Tjr;H;u|d%EUeA?G|`9r*TcHO-;S?djPOr= z%xJ$T0HUpM*Z{y{&sW2C?%Epws2aBr9d$X(Hc0UD%&N!qR%o zt|ih-aYMV%p+yI`-#oU_uopyj>rJq4UzoFF7}^4`HZ@MOYtjst+e4dFXmMqs-1dU1 zjsxLgbz0Tkz>FeP-0ZVbzXdLn2qy>rkp4eXjNWnx|}*zmedyDC?>N_&_YGg&(%rdIG~4#ZnY z=q}=#=Z*keULkU%oX9RSh)Oleh<$;}ve5)DFzLj`n!&m`sco|4)pPg_>rI*;oP&-{a9T zGQufKBR{)}1s?%r%D8G=bOGDFY4?@@-QlQL9|P$xRMbI9*X!lLlmU%qm;u2c?xmUD z2ouRVD=2P3mt<-IHPR6FcOZp{8yhKSX)kEoActsrY6>So3kDspeWWNbmW(jQ@yn^r z9}mOr>0!|Pzmgyj`|y`4s?X|i1J(<7^7+u+{-Wwt-kMrkM>*=F~;XX8rBkCtyA9e9~RjIq4vRHVKL|JKVx&H z>Rc;f$3Ck*T{=+g?B(7hrBiWq<#Y34e7xDajuU>jV{CL#D19s8YnrBKrPOqMQo~2< z@&3SeY02aYW$>^`yP1iAagF>aD3A$C&5XRLM_p*k%aX-ptNrI*m>_$4>RhV~OY{oF z#~;FA^mVA+3_QaKh~<4L(mL5y-y1l^F#9fugW-n?^I2H>LUuqRWySmOFj^~!{j)O0ZVzvk%dS0Rx zY2*ZsG5}i5na`sVgrI%fCw?z z7R64&sxC9Eqf{y_S|4KwF<)0y3`omq(Vc<)Tap0<%?3TRvUkYT;Gf@*D}xBp%E$mrH98G^Gqa zBMt1*@2BDGugJ1)*Keh8&zGqgSBUl%5xb0`qbBpUG9SHN^n&b4IvpNdY+YF>rTRae z{PX9U7uUcv8$U>h)cicSu}FJNt6$zZYMH3CVq^TM=2p2rHzGzI!MHB_)s=^DA)t0$ ze)9316s<6QE)3DPL!Y93L;Zk`0$z*qFyds@5jm2Z`KUCDlXv%4R4_ZLGv;Z=4niAb7eWSNn4N@FgDi5-?}y zwNWEIiga^EL_77or}3-YPTlGD3Op7jKz-_v<>g~)&Lm(SdDwrf1asaT1+`TFc|xc( zMsVGDJN*3(iQ-{j*NX0fk&R+@T|M%aUil!3DeD%aJT4<%N9W>w+i@Zx5`RT{$I))= z;4;?%xJnarsE)f{7UxLc3m>a*FMS21Z2;y4GQ4Y|E$=X$(J~oII*bVd5nYSS35za>F37K30 zphufZOkbt^m@iicY}~dyjIHCJwMZM68Ew+zCVLVN9Hl;_*F=GNX!ks7U>SKd7ztcl zw!KVPECey$jKS3%9Z2iOoe^b6ENV8s3$Qi-^PS5b9<^9sS>|}s6Qb;=Asn%`ock^|=3M@QZA>n36P!evh2&V;|B$2E=&@g$&rsnf(RgxBRhbALS>Z zM&51_->1&@@L(6NU8?9XKlS3|v!EWEjZ)Ep-80O7_%Qh=R%a^RJ>uIrh?2=f~>J>|s#_qxw9IJoXA`A7NS%c`^(-@3z~MOiToB z1Bkl~t$Pik4 zmUD{*qGnqwq9geHNFvMcrs?xNI zqDqc2#2^kCCoY4+k$8Mm5dcT5_?4xWZ=jO6#EK1jy?rXMI!xwQb-6~Ukbwc$4(ea3 ziU^Vor1-)kj#gEjAqNB$PRkj^8SpkfPyGh%pSr5qDq`@p>KzL~iNeVrWT=OTeeH=t z(3+5%(p0^t^3;UWt9x_47EdWLz}GuhCl=YJc71TNF=q&m(pKY^Ut>?TJ@s?xHOO%- z!&id%UpR8RdKWQtWYVETa^vg3Y?IPlh=v(4qVRF83_|YO z+_Awhg(h&=9zYq2rG#i9F~`+Tkw z@J2|oc~u~S)rdY~P3>qEaCd$u$MHVOpWhgoVFXYeUnTope!Up}Nx<&l@uD*1DY)** zeMfNr2YnKnJ%u6AHy{Eh@(Px+UVq*P176wvtu%3{7Kfl0wft5As(!)4o*k&rp9=9F zAvfa-Gfw3P?oz8YxdwFzCLLQ{v^9HOpE0IXb0T%R{w@Jpp@!pi)JSX)CIdV<~YC?g%xu^?8==-RWV_-_^xWU~=@$WQ+ zV{v$J2VzpeJ2~UNls4fHK7rTuHUWCu+zL;3lm-)f4g6Y2FPlDwspeNHeT!Ct;LZtf z7e7y8_MDUa65A{^CTFpj+?d~@LQzXks!?81w9S^q{i0;etoKoq2tjntxgrA<+P$Sm z?`JN?^8mM{JwixCL}oXG1LN!DMogLj+wgmV z(ZKDl!?4cEVidz*T%7>{gg(Q+FqJpe^6YDl9EX0hSDK-;2C<$qpo{f z>}7MR`bU+CAQl|Qq9dq8Eit&7ZNSz9r8>pq8G7<8<34g4-La5M{J+O=wOQ1?aW3V6`DIT`We0NO-GDX$P)3VxtLqIYqAGlk3Ka9wuy5bXR0{G6-q*7hi7>d{Q`;HU3u&%xk6k<>Hb50AyS*Z$= zU7_LERQ=@f;#v@u;AeiYcn$LlBm-5SsMuarMwxzZe?^vAtvhA1%FHdgQn?fi&|6DO zw*PKgOhGPto{y?Y+n^Sk(Io;rs6ati)tgQ4#8QOnO*6TG*E2 zSq`Ihy%vSA^FOyt3c&lQ=a$5k7nU!+ECx03y_`!+7TJ+Lk3+)1wq1D5GRH7t={3m> zw5F57-{ncR;gs@M^K3ORF1#3{(*xeR9bt=P*3$=w(ms}OM$TWV`V8M9Y9ZOikpMF- z940xi$JwQRS>)gjjcSCo)x_yvTyChxB)m`jhsDy>pW17z1m>x2DdABTEYXzzoFwth z7SzzLesYTniK+IK5ejay|Jbqa8+bwOZV&MS9 zassiMom7$R*Er%iNt3_}OQWemsUVbnnYEB$i+S-zPnz-3R3t66D?z#h{2qcRPVfSzL@BSrA~;al7uzlXIA zL$63gr?Tw)auH~Y&d<6Nk611Pid5NNvS+Ol-DNBhp~#PB zSh9kxtQbLxE8=F*;Bg0dg2pMwZedU)=#xMu-N?u9QD81{UO&KmXGv~wVZH_u3jWB*_8g2Zo z*==CnrfB>ro@Tgx&0LR4E}6H2uUNh~_2p$5-{82rz&^;}xlfVG>X}Rj*1c6Qp))#x zwR!%5uPEri0^>@(t^B->l&_KA2ke&>3$|f6kTiI!rZxpfKu0i;X3)n6PmWpH0LGm0 zp<8*$;plbw#;`EXYMvK&_H|B8egJnjB#_2^T!%Nwn1%{UygX%C(KpJrpZT4!cH-iT zcu#YoG4741c1XafWMwi5+xIq%^aFWc<3x6at1_{yD_*DJCRQY9Bp{83qxlk%XeogbX{g=w`7BJ_;ZUnu}-}T&1ztGckL~}P!h?z zIy63HEwsino0e1$XtDk$J4Z}b;&grg0FwsbVnA>RYYhsIG0v~%`Z8)L)4Cb8dEbhaDat&gFy^wF-)wcI*~M zA`~r;r(hRkxV0&-*2|emAeE_J5y5w=_`UfSc3U((Z4fcke3%s{1-fP!ywyJHaX@+D zL$@_xor9O^X94b9f$T!Yn#9eAe}P$7T3_xXe-EWK;#2yz+R!)n0V}@*%nFvzl6Oz6 za`IupUfu>hm2_%YT)28J@r>-sjMary!BmP+$sUjgzxl%N@i|!g$@*iBz-Oovk7(Wo zi-jMZYz_YjQ?_(=<71g)_KH2YH1GhDa`=;4tard*K#F@g*(8JGIZvk(hU}?b&P5#s zD<#sJg^Vk6dZb+3^%FK(=U<7@23daPN$d=TPc@8ZTDvtGx8bx*`44H7^UEG`wNG_# zyG#Djr|nUgfTbiv81#WT-5j9?syx{C@apk>&Y95vz#Xg(y<~IBj~vWX>m+;rN#vB6 zt#8DCa|htO`_swi)g^@cNOF96sn&OnZ-fiWZu2zi1UFGoSS*{hgSOARQssWepn$g4 zXBzL<_%;vj{1r|JpWou0(=(zQ*T?qr2fI6p21G0EJY9n7NP1Wz9IUu*XU7|J1K{#( z-JAcQB>T@i`-JGX>R`)4UH4R3+kpNn4v33CpU8(LQoazOUE#$|sv9Sl!~uf_``Hx> zjG2*KoYaz>aFCrc{laA{P%7kmG6$}|S!QPp1cgJ!vV57iEVBfOPbu9Qee{|EoLIHd z^qJU%>d^u1!vq7-K6VPK4Nw_$eZ!8i-1$M#y6|ex_QbIW)!ui*7BeY zJ;f2UEmd0zSt5<}thaoVm}L~sy9ili+Su(ytjm>~{v z-qKMuY^~ZEM_twOnk?makkKQ1TtK7*?Qv&7N;oQPD0UR79H*Qc|o!EL(FD&0LQTzc7eY29g zPTsYTjis#`6pfp8&^N^)j>PfOVRs)VBDIR;+O@e&iecATxWLZ%DqlXKkZN8TH{gG{ z!oOM9zK(3z8puFj)J^JdU~Fj3_V}m)IenJ6S8%TRHUEps2>+TYQ}*3rAKuJEfbY4= zc(KCD7FV4?_q2hk8Ad79!XIW}(mr_r8b#t-uv0f@w&Q_=JixQCVOTSsbu&oVwfBHv zh*|hKIKr>dbVHD%d>QjL#HE-5YqtJ*3)+k3{0XGE)7Rcz>PJg@5aK%?$d4jhKN1uV zUw-t`T7NEGH_6m%pV!c&JC;j*S|0I5pxG<;c^^{pu;|l_680|tw4$yz_zkMI6 z#+S80jcQnW&aefAo>XWt4zIUCrkVaaA`3a##=L@afp7(R=h47?6(eCqZ z!_`{0RKd0TZ0V!jY}=k!i+7mcDPZaIAj?V4(_Q4EQtQ7YuIun6RI>t>dTc;+Y%1p$hH8+xdqhI5H`?=_!_O`m**sESQKrd&8jRcEblh+(f%3jB`O zq!iQQi_LSk#|u2!YNBR(v`pH*L10QB#u@L9g-w4@5TxRhw`;Q4>4JbtE7Jw$`ZS)t zA<&jX%Cj4B-sCWPjO)q8z|z7qLW%R^0CFv@M1bcBdee608Tp-0Fl9^KPAdG;`54NtcsJu&FVb^IxpJ{FYLby~D5wKtGL@Ex{Cht-K&CquwG@N|C223f;ETm~B% zKATc&AreyMgZGyo@5$YcXTdk+yG1oD6^Y<;r5bL-2I<7@$uY##E!4&NTW zZQrM{{!}vtHX+12duy7C_P3QMh;g`FV{z789Z(P@)WW}w)d0UAx5Qh*r^OWjglEcE zN%l7TCreI@e%9&b5uazdtPQmF_;ql^7WK8ATh-XLhPm6`jKgJ$vNECI+IF!AUR+dm zv(#VIhH>RFmas6W{xn`!!-y^Drz%6m)Q{<(M6Eynfnox&S_fDf-Zb<90{G&l7gt);-ng zMZ;ljC?m@S9%oxhiFaYox@2DdlvR_P$Cijp1cF^y4z%IF=(_0~rUuWkX1;T}rTckt z-q%*}FvX^lntlMB3IjXUm7?YVdO}!^h2b;^Bh9A>2P_y=7mw4eeZ0&G`pEz(~7kQ@k{ zT`n|7VI3hBm(Xm(W?;t;jr)FN0STzyh_Izz7(RN$aurp|(-p>-f9#1e0F(#K3r=vA zour#og;l?>c*6ZzRi{x<3j92GH0R|MfOX#Vsn}A<&gCc0dI3azRu&(;?l0X~Lbk#V z68K=YzJ}8ecgqK#a5Rf>e}mNtxW6DC$SEaIn1!+O6-2kygmDk_Ei?s-4}VO$qixy0 z;ki~**g@`Uj}bpse^^@w+zZ0)u(zj%e%yy?TcHWNhq@N|pDiR+J zYoM42xAU$t;pmyT?hMX(QlH$tKO~H$pi&G?7?PB%^r7@IA6wSg$aPSSMAibA(W`cg zHe$|36Y^(e=3yC5RPDn}7o1x5$OH7r)c;}1Pj!5490+!j)2$vX^l|ZKTLC#&619Id zP=LZRZ3J=@k(?EA<*YJGb}@NseJuf35sq!r#R<5Y^+z0=MMEXdI|`duTY!PD&hFvi zI~BJherq7#mY`0DA(O3MM^S0}?*3mTgE1KNbG?1}JcxcPY7SUFE)1NfdP=4MYxUa zC0UImK5$s;#j~+wv-le$PzxLwCjsa;&Og%RVQ>}~7p)k*X51FU;S#}ng%L4`Y-np>{-1mo{ZJ4M}b8CDHcsJRZ&& z47+v&>1GmhBocMqpAWPDQQ#=&MCsnhKL5C2(wdcU_u$s$06=9hBk>UX)Q?UU9rvMJ z_UF_h+^)vzuCkztd<5pdIL-bLdiTRq*yHoAu%SZR`8j$J3$qDQdV7L#P=kfLBY{cyx)(VNZ^e*9iq^A*#H^P7bx{IRWh4b4#PZ^4S9|P=X%LrUL3IL)^*Th- z+*gcD`QwSve)p4=5*bK+;wS44=_nht6g0g5Pj*}?-!emNWtMPs@*4|rd;(7>Erh7f zPBrPYJlHqHCW(Oun`pJ*)!P?Lws#!P9I7`G<&@IO&W+rvNRKd$UaL-NnFQ7pOm0$A zZCn@E85@_9pe5gtx3)PAj0>1Xs*3FkDYG;f(<8=qL4aLxbp((t_nz!I%)GMV%E*U6 z)(ofPZw#(w=6}9@UTIJ^S;&}&duvV%owGk|0Z~p1oQ0n?*?IwKRw_n#;BVSuYkP?S zF#pACy_!Y5%t2(7Ftg#=$r@eW#x5~XB-Sh~*xUey$W7Xuz%+M0G)0^g#<52%jscla z9jiJ4&2&cRZW!b6K;F#b*hCnWqaUe&ksOJ0j{l$5L$7EZ64}m6?U2tLVyx8Y)C*0l9Cn>(f|ocnqzOcCXR@cFd;|PmLbsNTH4@s#Wbj0%;pqX&qo1H(;Yx|3h&# zl2y|kT$`_Rnu=&iL@vZ5A8n2dlX#X`YmM6O>B;5=FIksWg3sb+xv0J0wHMV1oPaiw zv}K|izk0FG!NG~qqhcECmTsU=iYQa-y0)WKx6o2)l2uU3w|$lf9u%ec9_u?&{OWQo z-{uC461HCm)?F9yZXiBR5l=K3*qAYPf?Cbbdr?=-T_lODPQ5z?u1xb=hmXiNXmo)lIXK;DV(9a@`5ULjh&_egWy8srB0IR#RB0pyu z?(-FbUkTtu6+7n|ZU1yzGLCvvnn0449c)UwwZ^KDX9u4hl$E8%Vza4#8T%jrH(!91 z#S?$7VSi-uz9f!Mkyf*2U{=PR>9g?XmJ)p;OiF`QMi$Fci154>F|r6fC0gynNV=ck znc}MwBbXOX0zsy9pg0J7ieKJuny_p(tFpp8CAr#?s-wJPYPm4)*=`f>43nMgQF1uw z+%HAwc64WO)!QjZXB5i)e5DZ4F7B2roqZ}771ryrf$_C^UDNILQotGgMaziz$)9hl zVrIGRnHfv@N>5ty@UQeH>xEp_N)R^be^f66#WGI<0U0_2&nCaXP_Zd)M4aD526iWtq|m(zBiO9?dVuiz36?~(t3fL*9q z)C3EprZ3iMLtb?mt0I9fZTe&!cTwhzNfOlFeug?NaCHrn$I19!9erDqTKDR`BS@$=742u60Z8ersg)v7t_aSqaOoF^Wr?{fAf0Q#m&TT%6v&68mqZt!SO2C5d(R|nVy@HQ-VA|9`?Q%>C&{mq z7_g+1P~-XyWPw7;ZBNE6QvWr+2b>+AZt5_Zs%3MIPZI9SE0G+3v&MT}$+p_8?ra1o zlktpvk_IJavSDKX4YYe&VEO^UO^Ywj-Uwxg6B)!rW9X=(ydbaKC(!IJeXu1|4@h7H9MSV4s^n-i%uw zs$4OR?ickfY_?!>@Wl20PJg_dq?p)S=vB-~9G%DHWL@UlhDI;$C`!*@27wNoshsg% z&%b&Rgw~~)N~;jg?|coD-5PiYPtJ5&$#_&|M=+p&tY{Ram)XlSqq!^dX0D)=GD2g~x!aS>a>SrS^rj7u+KMu~8!-RdC= z%C^g0gI3p4{eJ|tOM|HSHD4@M4(xk)m;-X4hk5H9s^ z(IeXu5IPh@wfoH#)W>fL;j=JKgIYvtBqalbX1P+@XvSmtvNsQE>MKf!b~Lz5&5Y>g zt~VZc-*E2hHj&-?sq?H2WOY}nqs3!a5WDp7Y`3=A(Q7O*sOuU+NF+2^PEOy0hpOjS zH9kY=)5d2beExoa+XE^+#(HIVTNPn{kqZgI+;M2Z&t9aGC_`T2gZ9NrrL=uZBcZ$u zMQ{8d+{9PMy`@JAmb|ZgUzZhw9E0>^PZrtSj=h~bwOf2(VuQA0>y{+mue-G91pw@f zj5zSZdY940>U%l=2nmHl;c;O5Cn)%O^!S(h zup0)!y;>tso+r_ zV+~aXt0DvJz>9+3jD4kb3Bsq!@euN)`IN zxWp)^*I7k&mo|ctWj-EyT2g$ej-dQ|(wcOplr|9ol_*OAy`2nQ& zm{2Yk$E?*k0-U>F`Lis_FfosrrTqb>v7&=}Z1AtSlteWYZ1yER>vVhyERbMCDrPmu zaSBqKPl#qEDOt=B?E=cQeivRk3{tKzf%bVILO)RjyXQxW@|wlukN^{rr)nK4#jC{= z2>ZJqTOk@)4n^YX2*y?hxPvCT$(GuZ5%Of^q+ab6lc)m1-_@>{d~R)gNAEX#>=j_rx4svB zJKb~jeBA2u!ON@g0leT9oZ3!XDPBqkcEZ;VYnKfgL=ArL`HfqG)p$! zaJ*FAl2JO7P>q|V)RZpc}(objBnb0z$&`lmHQ!780&)=HdqBrXU{aAc+Tpn-l%dNn^xJf_cbOusjCBi?cNP55Rg>J)wHV>9 z>3fZ1WNrGC6C4L7{#$9{9vQppM*P6U<>}A?#Q1vL`Nhlp3%*Gndv}3a4b{^5D*CjW zT=!?}y2yTeo%zoip(`vnkZjgp(4242dS$l{7uMIxMBaY_Ufw}NK+b`Ir)Rn1shViL zBUH=3h?jCz5pH9~AqCdMt18*DDw7`p8to_&tkq?GZ5pQU62N)JYwR-7YP(>ek@Yrz zz_yZ*HOZu#Z(pfA2p60x_hZed>8{St8L-omoVp&6~ zL`u%BZ&WYmGDQyP-SV9wDWQKzENIZuyE^-8gcJQUL)UUs7!F7?=h79`&~7eKSEpLQ z+4_Ckdi?Snd{SkAZb; zednC7Kp+`fHY^hDB1P7!M(s7Rta95I%!pb@%LC2pmKJ63X^=U!wNwJ(Ml(~URLG*D zOvJwn420vi#?g;5UnW)z;N?NJvw-_oaQe`gHMH{sVJe@}R$1iatby<3#m|8t+(yQ3 zhV1Uk$Y`1ov`c{z-Hg#RhJ>|-09^?(kr$Gnc(A+g?uC4GJk$y9ztb>b0Nn>U{!N)! zkw9%J^(B_v+T9Vo0@lS*j*#M^s+9! z?w2VdZhs{N-U~`(LTWzz0Z1WV*kJ^}WV@?^;>KLdzzK|06A*+n$Fb^&$dtNe>!Gmaugc{p#Nvy- z-Leb-D)kw}CvJ>di-zeE>&rH=_OJ4XjkrU=jc(zv6e7O*AbJ6Y6tG2bE1ka%74jjzbq>g<-}HS2#Nw7v@?PDiG8Dpp4OW_?2YW>yz1 zXynL`c0nR6A z*3Vjj9g!!icvVmBmRTyq0jqsjQyjx4_p%uv=*4T>aux^!_cyX*RR8NQTLFH=9U!Sz zQO*>(3^a;rcLu|<>2rF3Q6|jF`56RMF_X*nG*tq}92JVA0#5hCtw`)mJUB(?hW>K) zRNq}U%D{+tX_Pf01pAiM>DPV&E(sm5Ew>l>P2+Ah;tO*;xKr6d#(!EE63IzGp>h7N ze{jf4!K?`rsE#UyCH_D7hgutz{yaRdt^}Q9ZPr-a<-fLaLnwL-NyV5qRn+nnicq;n zR%oz8GZ>t`4yC2hHcp^sN6fTe-w#YUqXk;p5{cOz9T0|m4s$;$jJ^G&AjSX&G>go> zn^|W@9C)8DzQU`7%!sok$Rk+Pjf&GD5vUm51XHDky&8eUs6Swp(-3UQ5_#8{652iy zGxsnNhD9`VU&1PD93;lE2k(4{!omQ29SIMZ(16M*6qHlO>2E$`3hXu!<^iqkvL%os zUA9he_O$eTHk_m1_yAUi(E!Jou^D~VjP8{jTO#GF@21OMM&i#kdciDhNLCr9e6{>K%fp%}JMWyrF! z<1B&8+k;jp>8&zwX6g=utNIe0ojQSUu1T{J&D!v4R?~G5q9Fm8Cnk3Nxi^ zs|Me2JQz-3HG$8p+$D3Tmo(BS-)wr;j-S)lKbFfSm}O!1-{GB@Gr4=PEI4#;F_RzG zV0?=|iwNSKMRy^ewyG0$#C^xMmOAw%@X4uFDrBXrcJ;*@*&=G!{{U0GnA`e-CH||l z%V|F)hh|Q~UyO3#(RL`n1G4hy%*7P6S&EVMUvovp*I_OA3pb zE?LSrIyho&qR9bm*I=(%e0GtNzX0PMjWf16F2N~6;K*h#Igxc1qU~sxL@vTpkG#p% zc=_2ab*!offha%WpT3L_`F|hk0DJGzmrK}RaAK)(_f*N}?l`)P=WdjFu7~)Ap|e>K zo!`{`2?&-c(@T~gf6eJj^raxd#ghY)=5-lg!on+XPRl$#v`7FPx;PI0Uli!e z;b${fc@-{mhNc;$6fV8Wt)-VOm5g+MIs$s4S-cTuSf#~UkJR=#oXNNjFIxwUdPGpU zeY!SP^Rpawn~NFBm9tB3X<|CruU(PL)MilL`t2wcYgg!wSNOHc~&NF?#6#Bu7D&y@RRHZt0^d#6njf zKdCy#XgwN$_6}ex=mRUhIHx1mHdRS{Oq?(W;WkWJ9H~=>Kq8mtEWnA z9)RiUV7__F3=^Ng!3GXLu%mt!4?L=NwwhSZw8lyOLoPa;ziB6esLR_KA>QpAZ>YH% zX50R{rUkSq9B*MTeAO7pL(TeNgb z=-pBDV1$kRJG28zqm!mNPA+Z-1Up8~|&H!sYChm=c%u;mJ zf@&P`68{Aer;;<*lIBOeB;PJvt;g_Yiuo|Ow%>0>FI<1Ol_mvXF=9+S&6BD>rqN#Q zBN!*Ud9`L3&P`j$BJHR)XOJfS+0H9VdbFALPVo`sfPok>U7r@ zt6&sW?A$=b8*s_pJa$K+%aH)0Qb#`QJxE0Mnz{?x8T&aOZOkfJgLlJEBD1HDJa`jX zb2P(bA}1H5mq{yOWbed4Jt#E(W~U$8gYLiea0+yOPVy51bzM9TW^vCEXgWsw|s5z<- zTbbDJb*q5W93_1|wVsI!SoQ?26ubzKO@GwVv@al_4^_U&0!H zMZ5o?b%Rf*x8L;=NNL(>=>{pFVM#b~%0bTL@S&%8FZhEniarE7IUn=Zso(1>N6FDx zn0X%jgN@cSv}(5%H+|wy=BeMR0`F!)R$|ENRim9J&raX9+J>$CXwZevXWgDG{F_Cd z=_gjH65OUB#p~V?&5%auoh@`<`{F_}X_wl0#GCE8UH?6(_%@>c;EgwY@pA#!mbH%0 zC+4CD*`p4IPqpTsFhh_b2phV=MIJAT=}gG>#0UNk85c zc6_daiM3WxQ(e-0UWzj)SX6}%> zMk%4lI@zOt49(@gdFkNB1})iua4O?{OZsmO$~h(~GNYhZOc%gv@Sec~5Tnj}@q0)( z6%R=qMkIYeX_d9prn-&RNr-vxY;KUzXy0t7EA$*r@$D5NS%VQLnScOV>F!-t7Wgu0 z0a?tZe|NNIx7HPPd1m3crD}Y|c2QO9Z8hk5f)R_MdGx^8A+Em$D8%b;gNviRAp@5W#qP0z@3vpHCj@X$Ig9;*BfcW z40`0?cP7npU{>@g&ILSSD_;eBwN?RaNMQsM6ZQ)^{HfCG|JK}^&mrO7WhlnNk{n%k z73~eRd3cu&3GErewf@-Xkbs`5Kn?iy2nUo_BoE&k7KF4?!!grIY`=O6P zx(M{DL3lt`aCds%IcLzemmY`}-`W=nlfpYj{dps!yn>7H-E+N0^$MD6TYx-IuVK>VIwG)k>LJZUU@ z$7078lVHlDE?meYcNgsbC~r`4``eAA5CHd|@G*mMcWn>iCe_4RV^k32G>~^U&N1*fp%V<;&D|DK z*1{qqtJxqdvT%Bj>2Tp80Ph{+{?&we1=a<(oRi2q!|!WZlrs?%1K&?xwqQ>&GV9#k zz%x_;J*ghm)M=0(r$5a31c!{x-3j0xg(kwJhSP+wx9fV4n`;~D6g)qfPcXK3lvORO zUJd}ZQZ{-Q=6Oy4<=<$$#5`V@YoWNI%0|vra7z#|rrA=c>%%=|@dN~$?LM?{$oaBZ z_mn@pR$lB2XCQaf%;`+Uw~me!;~P_`B(H!|VzCqksNc_Q_zkp<=W1WlI1~1b%^bigW*$gM zEJJsPY)FCDcl>DV8Q;vR+5jziynt^IxblTX@#>(8mWDvJmMtQl=!jZ7s|s`OP!&&Q z@z0H`)Mn0k!#m`ST;5fao);9Y-HwnmCNpHNf0p&!nmU-r_?6%6a}Yh>wco`5&3BfwI28;2gUew8Z*q95w{7eI@0AMuBX(^Xd$Gpq`t8KWVP$C|L~kJx*5f zEM$i6Uj%W-tV8!XmYi1g4?%hrD0eEeha89`81F>*D|(HNV;vitEY#Ngadc9P0vF}` z(?Q9!In!rCd`+*s{{9Xd4#$%6Cg12&p)Nra$^0P!3P>gDvDsCSw%8xvB^mv4W;T2~ zh4a<(SalN$kyChoE3LA%*d%H-aREF|zzHsgG0&}&faZJ_b25@U#!CKTU!SvgW=EMq z%ty(CwudrKM~m6Prse-dF`GiBk}q#21KI8%Rkk7OHfzT@9Qa`D|3mXTaXhkm=XApY3_?V9>ys+J5{DmJ2bV%9d&@`aaQF+yDDLMc!N~#N@LN3uz*|I zL=BIwOkrW8?YX`qkq{pf2uOhQWU#-t0k{O$V=)z-8Kt9S#y0j^$-axf#4ubrM7?@v z0B3cSP1miHT!}xtq#+6GZM#jGAFb)>(iDUgge$N`BCvnV) zZ>Sa8Jq+UqwDC#Fw=1{5X{t7k@m%?;lpUMiYD{D&R$|D`m zwr%7QUna3qs2OIHGEI}~WAR$5Rl5@UFc#*xKc#J*FAO+_ zbDA*^XbQ(27^RSj#SKK_W5=rt6tPZ4!DeoM;d&u?7#yzv1~f2A^Yr42hF$YAVT(Cv zVJvdOOzceooJc&KgBNh9T}0e3mthwl&%?iDayuSXK)Dy|sqM2G<}R+Qw?qhT>`K%e zt_slr6;kqvEa{J6Ddt_u|AhZ=kitl4vTSIJx>0v1W&x)*=oi*k`YP@HpG|?$=00y| zOJt1f!DvmW>SoT{acBA`KPKYnt*3Qm)Jw@_>X)Q^;Sbybbh>nZ$t`V^ToQ{oQ(2L} zTZg)%?k5;_p69+(!BP2PiubdHvSJ>8I^hYyzmXD<1?3=GRX7w)qW#*>jJ79V^J z3SstXb>b-6(CC?`%espiyr&yn>_q`X_jw|2Yo`s^$n!XF>@@=n4F<90+Rv-kc)125i+r@t_-n$*D*YPjNEa6FwVL?T zJAK_byughXUvijRTy`DS#vMa}b35V??8cnKxV$0V!7}F!pL!EF-K%J~WWSEm`9LU` zR(4+io6Jr1Bg$T*QM;Ncj}JFX2@Ipi(kiPgILgX+lZl2((&lo~EgcgRO~huYBxFMA z2;7cJ)0Zu9At1b2|K$(JG*4Ejon;>D_>g)H!~zJ@h~RIx`a5?!xdN~ZVY(A%wVrg; z4Vg@%zM@e=93QM780gU-c392%*cNFmiRgm;}>GSbj z(0lyys>rGZFo8V=1j+0JjSGq_dy9s#_3*QKhq=VM+I)N#AhAAfE;@~ash zIA^Amm${0CV?WA7eF|onex2ek{v7{EZS!(kn^KV2opr+od3$W} zD{!MIBRiZRETTacG~sGY%rDgG$^V)w3imsCp7cU)A_lrETcVVymYB*kXQmA%C#axV z#0YgO)_5H(03v}mU()n0CqD2hnkrH;HD$wO>J?JVC;hurS zL5Z+mHF(9T_LSvgeVj`dh+3_c+==evlz#T1+NxD^niIXnnZz~%5}$iCFu5dEKFiLB z@$Bfgn$60$f+NpMK61!u-iCd|BdUWu4Z5=1S)gMN+|GfnD+Vge`G0`!&@c79@Vj@n zP6}g3X{)N>8t+}NcDwJ&9#P#Z)DXhWLYdJ-KTS~GCA)YS~z=h1# zeV$yMKfWE+Kg=8@1;4lx{+tB2)V_QX{=kEaWLq6RC=(!xPa~489B@C6E{p_XL!}BAL-vb#=lnfc43XieUGDJ{;FOtkAu^m+P*7q+-y=k6IUOk0Od7rWG=|iRPiq>JBmbq1Z;D{xUpjNu{aG`-PcJ$I3B?LEnxv#V zSEpCVZC-TXezy9qy-0Tvrsw|JQx`iQFcyhj^l$Ua<|T=^;;U$%!ofNOI}@vAEoI_rGfzxj^1+V;G^Lq8)0xg57LlQr zV&DMGz|>$wzeu<+xaITF4Wd{NTL1waC;Bm>MT}0H#7qcNPJf*qU`s^o>}ZDVru`6e zIgvVH-{YmmqrU$#`5bI)-JpVw1v;+~yc96Lb3^<`eA{9(5vFr%+17fZOcCF4M>b$U4=X5oKUcsuPD=$$_OZ}N!9BC{rBYS@c0cOsL?-Z(? zB){BBqgDhAl$@$Jr&$IjV9&3Bkb*L7^eln+@*uMogtIA(Ay2o(WgwY{&`4pYU#KY` zWhxA!jBr(V)eISQ=##VaFM0(L80i~u=&PHo%h{Mp_mr_(r-%Qs`9Sbk1|^?8lx=t9 zq8JBz51hH-)TyldbI*KJ;aN+j5SIM&K}%Pl=S2`s7qTr zAkS(}05F0~GWFGVkxrVxC!WWw4no;ncgRBo-7*`$0^B{3Nm$-ovi{iENM^o8S|kt( zgRd>#_6^rJ?0#ZC%scz|Bm`ZZb6 z1A^sq^D-Ooo8t5tr!6IYtNa7DgLBBp_>-V~W~i6UhfVxPa+mFt2dwr7uqhErTH!4H zGNVn9x*U67lAnt6ODB<9G2ROl4{om4e83mafmI=edzzZ$|7?v_9F78LAp^XLm$w;v z)fnx=^mY^Z1gqUVeFb;0jICP4QMUMjY(}^}Sd&n2u*K>diwpef zTvRF_A8b~xznb)Ra}xm9RucoYz0hmDWr)zHTbk|pl1**rdyqsxfk4T=pW6JL#S+DM z2S+W7=xn-|o3B$Pto!69r5Ier&%-o)2dy58i!WPi2JWVnV(NAt3SzYpF?2%DZxcHvoSnVCbO% zxP5RoH_-q!N7UG6$hE3`f!QoG%to4F#dC*kQZ$NvBS*A(%pTI3s1yhF;|#fiRs*Ak zyKXdb;tz4dNs{6cNm@dI05Gp(29k=nKcs!QxdazakORWC5<-`~tPfBXGW4v8$~b~1 zQ$TN%80^>mNCNryxOfl#=`M7+UD4i1F^WiW17bKvSFSm6pkrA? zr16067fqVSm#~!(4zrmU&jTj=YsgGmJcKGYnRM*R+?eeWG0LVE|NmkE%Z(ag^$@K^ z=nmD~q$RSGBVsKh!^$&u#)szwT@d?hW=fB)YRN*0#-`%V!Vql>7!&fH(LOx^S)-b5 zD5>bz5@>ZX>~l;vgCH z@8ncWyq~$W&Q47TuVE`oLO)*JsaYBZXV_ka;cP|hbYRy?fD~XKPzQ`n#%yJYZ87Yk z!aGZq%1-6vKs3`~2w{^>iQ&pE-an0@&CNgha@82GfTAin6heP8FMQq{G%7@8*cShc zXTHROuBylV4(oAi8zbRmH(EMJVVM45VQ_4th}t1s)r&{L-lvOddV(WF3UdGuNZu}f z+_x&vz+xdiJtTBA$rP24+#rXK&(F=@bh=jUa9dibtfGJ&V0Zz0hD|J04exsO-f%Q1 zR-Ug6>PuS%3p35)%F2f(ly@&<9t;S9qIc1RKVc95CNWYC^K>#59s6%ne@cr&Xj5Tk zLD33ImMv*>dDq6OBh9;*czQD*L>WdSowWg2>yI4k|Ea!JCkS@M3cs61HN-HokMqjy zAVDH0is!B2D-nn4y4tIH4`y(018o(BegN&*gip!%#V*RdhL*}eTa}slH?WIZ8MUG) zf?DabES;%}`P5w8M~@|p5AE}Kn=lk^(;H3Abd$f%te|#kMHi6i?LS9`IK8o_i~dS0 z!YYkYMtk28Pumt{6^$>a8M;UJTnUR@=#4cNX{ADSNR$kM6C)(aIDeWFDdBOejS7y? zw^GiypD<~-MaieBnV=nkfvlAEmVLk7Z+J5X9Eylu7jeDy#Z%+rx@?4R*YJ3V#PBKM zc9}UklkrI={bHNDKuMA);wQT<$gw*r_)rCY0IEVPV5jJPH-lo-3cci4(8(^2UQni- zH2WCDUJpS->&hilA8%TjU=;9Q#pfgo&Er}xA$A|I1g7_8jwBx_K_2Kon2|SNZj>%c zUq;qV*A4f2wGkq9t94F!c=ZcB^~fEk|KxHztogx=NYzli(5j7rEu?cW{Bx5dLcQpD ziSksIDKAd)n!C=sP?apzB$27jXNe9`Rqd*aQBV2q5J&=6fVq>PTbt)`)wkbfM!x>A z6dDl#;{!oFgum`JAuy4A;|e zX_T`Js6<{yhodh*d=B|}2DH$TW7qPg>)6r-?S_#xaMdZjmCa4^qb&8+C)^zCqc-kX z2SN;yp$s6)AYkG40Q)* zQnm0XRn@MAju8}!_o8Un%c0*(AX|*;U7}*{Sx^E-;Q3_ZNCnvP>6ZGNX!cVvi|S%) zmAgjvC3a=X_O~ z>S3`69{|4`R}3_c6%o4b$TYA;0I(U?t&0}K-*JVGva7cY7_{I-cF)&*uuiDucS2yb z=#)!3NZMPJ>H9tAho;zBI+5mTP%s2l90ri3uXZ75ho8yJdQo1x9}96buN zGOgXzkXmbTe0Tzv5DXG2ZVyS=WyZnM4PR?-g{#bJ5jk@Brd%`=yynQG74l1tkE4SO z^D<3q>c0teA$^ni+!<+BO@1{vN#=6W!n8C%<>!>jn&)l>;Qd`uF#JY;^Q> zA8|3f7S3(WO{^Ue9SW)JA*Sn-4d{H=5?h1^CqJ&9=xV{Xjk?GknLbVFcFDqBj`^7d~r;8WJ>s@f}09zhs?_tYj3LS?I6xaMl zKI$ugoff~MV?MdulR3#w89NjXgh|fMU-HG>J}5`fj?jt$G4E07n|uVYr|!|wR5MC*vMII(`AoPg^Gj0u^4wdE9&!w2T8Ogc6pAR96 z8_VvQe7djxH?NCaSX~E*+^)14<49UWk+|-BWoXh7)BWmae{>^L<>s9q8a!dU!7$X{7;d%E?g@xWHN^6fe4hDY1= z|Lu75eL5ji{YgLb6C1DJRy%+R$X#ma-9tI+N5ogKu&Po0AR3QBn4+twfnucAJz0}V z$I&gKf{zwC0X7Rb@sC4k%Cf#ZtUquivchM2pJJ`;F%0S6eXn|v164p`mvXl8A3F?- zn^&HPVF&NPD4d#}h#d#yre*6JH!V0;{tb>m3w2s!RU%T_Zee|Ozqx2$LNKqo$>9+ zw02)VAH_DyuLBZA@|@(;sdJOE799tVrYO>69y7e%|tYC zj!Rw*{=wmvtl$3J!*9Blq@CVs?=`#ba`EfGlhgG> z&hxQ~(dHj2sw?@!br{R}ncSJbs|-bnlmzAweTezU*BJotH9Ch{h? zIW?HUp(jE`;Uhy*3Lx0J3Hmx|7u!MiOpZ-Y5~|hoFlwK)B1|ur?SI@`aBaC8Aihv6 zx4ocU^q_SU+4C|sO->@9fg_i_%A34>tfkS=PTP=eRrh#1m>L0zR5MR6 zs3f>dHWm|swQQhilElTAY%nssm`UR54ps&G5`UvKD4YWNBoFw(IlB5ta7+h2R?|o$ z0FstpLCrqj5EpJW$!tLO!OC)6IB|mAPnvnbPRFG`H-9sg1*5oD(HOBIOT>Qz3lV&a zVC(CT54jJgQ+$z0axeZ#Ijis<5*vyjzhIGz8+NumN*Uful%$4Clo10}hiI=H#x}}e zAYOSyVam_0CxnaL##8=TP_nd?B%_G|7&Vwzv2?`ct5qEApPA~()z>5sg(Z$NYE{e= zfi^pQ)Oai_GI#v5zos&9HxaqE$m+jLe|dZJre*D0sIg;p8J`y?ZN8UX+i_6d@*ezH zoE1u!cklgM{5gfpil7P&LcLa(`jJQqQeklpDV+d_XqCT`X3YI6qP!|JIm^>JWU}kJTZjJy4 zA}c%^H=i9YYY3p(oY-iN{(%@5%%mHo$Rl0!j00+^;e5H0RY)(l%g<>OIFv(YQRFapyFh^K~laZ?(q@RL#i z1pYXlgv2|{ctjU_6xD?nH3Vq@#hkQhrghna@~}}+kWodnn2b7W#|Du@kPBp-%>s1m zDN#CTO3(NY>$lp+^Tq1JkI{aq$^JRN3UJt)5j?i=JY^?87u{;flq-J^EXh#IAfOEV zgUcZ+7<*mX_FVq10P2g4ovW*|X0;Taekqbc9}RkNFS>1UMEob4e~{BREzu4_KEo|_ z^jek8wu7zi=p4G};f;Srd>Toigb*{<;oWep8&CpV;01ftB%iy$PdT$<)#_HCQOCt3 zdW`YpCco?1693Ye8D0q75~(8HUx-pmUC;pchS6J7i{9c?6PB)Iv*E5!W*SL%;d+gz zrRT-*Ii`nJzw}Z;q<;-Ne6+}W`n@WB+5+T4hlmvnL$a--8Soly4_KwXk8P^v4G#M) zu@bK5yhj%|!9~wD-bMZK(P<%znp$k47i;ZoSpGD4jObc11jXjj+2X9qh;QIYZ@3hs z>~NA-r3=teTB_c|5LT)6>&xv7f={xyuA&*Ji=+IX>5q&YRs)Op^_6!n1nIdk#p+(8 z%LpNyzS^h*@hHW%tv}g-%8Z}rK=c;u*n_jh>F460MOoD0Ztq@uT8IYC(7=Gi4>)Hz zOLk+{@-@RcCgU0kMI~@lYWNb`E+dV%?nxXN1Mao3fT>l__5SWSEF@HSZv2O zxP(@XYf*$<=20S9ZtwlnDYU2i=)CkKsi2Z*kQms$PPts1O!qg};UYX`g;c=b4_9c@HB47*~cPA9K=aAysQVES| zOjGez8DpIGN|@+Oeh}AF(EgG$RBBh6z;R5@whZ~#D5Sr`kim0xEVY7u-+>Xi$m+bDtl@MI4_w>J&?DHTfCr2MJ3@1+y*?nS4Mn* z!zKAEjEZ?~YeGuP10-Z9VX&U`J>kn{V&LK=KCQ0uvKWRKmW=<=LNVR~+Z6`4p%YuD zsE0!0S(C`Cr&KS!lvo++o=lR|u*|Xy0?3O@*(-x9QU*yX)ug@y!&52k0w8XK{2Lin zfrj?m)i&Rd2z=3e(FALb4(=5BjgJsrqQnZ}S_e?Q5iP3A4{lVS#4Y+qfvp?;wcvCO z2Or1rc9}R4Ed4eBD->#^TAzNOnsqf`l*!xms0=K6l0&xgX1G;#HbQd8_xc-cLwBMU zVbyYx>SkT6y+dgq(YtNvU3WdANeO9J0&Z!Ut!}X}X%-p1&?N}}TBx`4*;Dm9jWZC- zIRMO8Ya5Px{louHkvO)xqOTY@Z$3w%QFeW=cr6TiUFP;_O2JaJmW%9WMY`*YkX_6-4{ZT509UP;$kH!+aJLR9(*8%r~|Mg{4<4HXi+_6D4nOK9n zm~*qwBlfQ0l$DQ$9vF~ko1)?f>(`>x-;CVegx(_KVdFn!O($RoMufgjy0-YkAljJr zc6-pl(BJTv8qi&~bF3vg*ee=yLfOG*7*rZ4lNi0KMJsLQ6MmF!Hp%UFCI{OMk-h8m zza+GrxoD_+#mEB!60>nfK0YT}m#jT`>`Flqc?_4_d{qDkQ?mFBs3!11SOA*$RM>p*wDk|I$U3uE2ProZ_Lq*-=W^lrj13c9S(C$O#MnS zS^4~%A5F0_*R~EUgu|GPdT-N>=M!H_ixnbU%OkvuwlpB6U8nQQrO!1cv4kPcWPKK~x-e#~fPRvE+@`RIj7U1*atzY+@6sb-8=nm*AKAjR(i>&oUn{Ppk5& zc_pCU{x9K4`S(h{z4CYsp?*ZsO_NAW=e7LPhxNz6aB4457@gc_{!Ratya+Jexe|`h zW*}VU+*I~SGY-N8Fp5r7!YK3uq21B|l(UcDgdZ7oX7*W64M}svr;{}Biu+_Tx!f9% z9*ZuCPFzta#23UA^-Z2SF#kn)wJ%!^so`~)PF*Fa1&CML$j||?_eube&L8_dT`B^( zmZ012FCXA+=egQv@f;6q^}}*t{*!@fFUvrQ<~KVeT5AzxtZ1sJ@07|uaG9R55RONg zxT3oPA925A-&cjNezpvS3#-758Od;I$wK*+Esl6s@1bkBunt5!sYc0|4jRC^jTok7 z&bol^F;O`HGeFG0-FGQBP~I-U+)7i`{bq&a(g4?5u7W=Z6zCI)G$DP4Uess9e-C)N z;9BEvIYO!aN!G3IS@(-tI3Xfv%A||C&CGWw|G{L#h#h%Tp3l$GJ`_M+bB--$Neqi# z><{le&M2a6%7Fieaq4C~eK)*lQBf+jWGzQNpAg^jHU`#sEj@&GEMf+|6}aq(DDdeK zP$LYR^Dlqb9Wl563 z1Br)Cw9iPg^JLyGgu*1gG$C~!1N{l4%L!5L?)0~k+I74pFQ|$TxtAZ-VSx5%3^X!B z({#NBfa8LyeObf)txQu62K|qWmt|yZ-ZMf=6Y1i)!gIc6xVENCCE$`@(;+AoEazbk zq^7KH4^Spdpf{J4cL_T=jqO%XRzzmD(gc~j3N0Qhhtm}t$52*hk6EhjNdD-_~;uYa_F+%Z6I)1_=fn(*(W>8MyuWT9GQ%v9Z-}61I9;9cvyE zA5-`m3O}JD8;nXOeZyQD=n0}tU}A;jm`cb=rilu-Ja^;&UB71oI2ri9IOJQeG=5W})E(ro`R3*>bfST36N z(iJj$D|i$Ue!(t<8X|aub@n8Nd*Xzmi`x! zMr>l@Cn$f*{cYRW#HtTb*CvyZIZl4@qJqEnSLqNo^$;+aUpwCr0oIz!hd3=QSPPx$`<=y(Ryc{;_29s3kqY2xuohQ1b>BLN-Y6)rk_ShkBX#DVk{JHd$9E!`>Jh=}&GzV>NM zz@{?On?df^LylAWizRgQ>ol%R&5d&m`D>VsIEcPsOh0iQP9S^v)3BAEi7r!P6avSI zNF07LGbP$DsbQbH(DJTKv{t3cxwLs5_|<~S7&Uk`z%&>zz&`0IMoUm}5!ua`fCjF; zUHs6LqG38-q7N5y=xO?!*?o(3Y^1@XB8sde+6tE=PL|rlG)IN`2_Dy}*BJ@2M)bZ< zmwUX#XBR9z!pKn{68NDh5I_@*}*+>TA-H{T}gYF=Ivb&8garWH|B9z@qHo}>5t+Ml-Wud1mKAaJ3bZmgeaguDCWqhR1ZMnAl^db zFU#g<*^;q5m4#ll?;Br6i~pwAAWMDi%IT5Zx9e3Uo~GPySgjrMc4Vugh!VLuu=!UY zIBUb?P5GSg0o8@6FF6-UKkx3{;~{ke z39<8it%$!&D5wj4CICr27W)O^yNQ+_!k|fd#mBGj5~sDScrw2n=SVnST$$OeRoq8}83m|7LYSD>EDX@Fx*twO^yJXY%0rbzX}J)iXYwHL%O53n;v@!$0pMH0$AG@aTSyDeQ8^ z^ziife@p;`_tNA*TxnR1qwwq~0=>kEd?y*~wXbwYJq5EHGs81>KWi&u2B8Lqknrm` z07=6(D{6dKG84{6UYi0}wLDqhy>;o9kz#_|A(LFh#5+jzLVfSaN83Xyo#X>*p0ChT zmkfG}U@WD{=b$#9w+e$IQw|k4O^=u2GQIh&UKc!Z!U!$5!M&3e*e2norK5C`VBr^# znwSL#beLG8GWTlFWX0UB+{d;ef3uJApMNoGua0Ib$310_Q#W$@zQyAnd+#>Tdl ze}NR6H{Mx;Tgq9>s_f6W?y{@5h&V8c9Di$@u3g82u($nxmpk$#%fHQO!IbG_C61J)b!?oS}HzlQ6`YL)W^Nt{?DYqDsvsd%+J`$FLJ zX+Kqz5=7m)mw0a1WU!J@ZfHn2AQSb3*4^Hk{xS&%yJfm8UDTgu*mQM7%h{CqCp0K$5G^Jg(;C2iXl4atI6=?yLxDnN4y{bdj zQ@F_(NGG?L7r_C*DFkNo7bzeEfH3 z^r~PVGE`J>edKA0vLq@^1bJ9>BV2f4Snw>-KEw_RX`qdkEPENRi#-jz1t<Rg9~S9TlHiuwAY9T3smN&2>Xqu z+PI?n7hXwqqL*JE>+Utm_qVp8FZ0xhAmQ5j*0m<*dhlqC#ctL};%cxfFR9viMu zMC*3v=&3r1G+_Wsk7>8w+||< zp^z`9EuKqUM6w_HF3_Dy;{&h0eLVrn&4i+huZ9%-K*)3t|6!bX6`Tl@e+y9zk3zF|HChP~ zP5fj2dhR9OG*@UdN}atthMm(h{JslA59YG_{J2c!zhlAz{hs|McPd^dqaMNWgm>|n zht?I2@gVM$JSpOQ3xag#+PgmJpRBymVB?_iB*1rs$FyGPO*9bQuW9N{Na7qJs@WIF zI0*RL8bdA(fBkQKvR$-RBa51)s$n@c!fuIu$A3plsIE-Dyo1gO1_E8N`fi%*F2(&G zKXD${&%qe0y0Aq6wjPx3Yu3*ZCb{=Hd*>#DvwVLRdLE7r@9%1{Oeu+^P$M zrUJ;xL(*cGc40CZV48SPfzf0nUB%2<%7b#%^q(-QW=DFY@=!q%=4FbFs&y7p+)6JB zMdu~pHgEpGUyY;JFo&}kAJbkjk5WDNJ3p>fY#{ila3*AaA)0Q3QGv2q!LnQ;!u>M{ z?EZT5sV$}Uz^PO}l=)`~A~g4fWZT~kpX4@(;)|1R?eDyDtZ~)z^mxLkEONxhOHu%i z2CbwK9&1#@_Z$;773ET-J@s9&UIUQq7&qc9AZ|R5iv?&uORG~s9T5B2WQ~$WARmfW z^-eDW9Jl%Ah$p0p--D?TeG*uap&OJ=2e^ofrJD=@FWB>1;X}@WkIVW1r)ezo(}o(*8K=F~EcMYQAMeD$Cl&%l`Lv zN3b0N8Gv^h{bZzv#U2_QYrM!Q=|rtEUz!S+#T?4Purb>q;Wp5*Noe>58Qp#iu)SLk zHqDI#1M8!HcHdRun&a1$L-{*3>lfgFOsE)w8~x`8!Mlp$qu3tylTkZq6BcVV1?tJR zf+Zv$?F_bB(ja1Z_e~(*SoH8}+C-w6+;(j>JmP#YG%$smmTUEwA;Tkb@4|A$_Ke&R z`Ci*SgNs6TpMW0kq%lLr$b?Ak|5->EATO#J1E)T)25UIhR7V#JD1P^#r8LdHeysd* zMDG!w6~r7<;3;zWLT>3e_-%24Y&^t)VyJQKVe6V#s?2N_TA09)E5s`Q3;$j>OO(YB zI(r2NDvv#FQZVuKjv7sN0n*-Jt>IRv5yf?4Njjm;CW##`#$@cuTT(sx(l&7LvgkE< zwq;{kC0+Go8rmcnjwH?m{T{EK<`e{4qg&P6RD`f#1Sl54Cd3J8ZilddE+uQNXKeG6 zIhc{6Ui0vr*RlR*CGsYsv&~x z*=fehqlHU5HL7K^#l|hjKUN?=;&{F@QX^1aM*%nq6+{e%JLcTxK@GtC3}t!{^+PgK z0YV92s*-ttHSO~=a$B8&x~7d-c{%>oxWXpvs6S}~&*ccxJFDBMZCtTa8jXsu$oW=I zw%OUoL^P=AY>n6TgzsdHQ_4mT%jD}Fh^o3PH+Cei=o(G+3qXzr1M@uKguBlwDE(di zvPXl_?z}&md3~|=XeSJ5qHci_JUV{7$^LpmwN|J0rQQ9LPmF7u4ae1 zdrH7_JgX_=BXYm!dUxGHD{IjvXEG5;v4xdllRO@D2}r%pW{73$QY0&nG^Gp5Xf-iH zlwndyICsLf6~<5IQ%m15xenT*2IbR327wd{ddvA<3)|nP_H>o+#;(?j)kQq%#@P{B zT8})gjSfP#W41g>&G6cHYd1SYdF$I)4@YjnMJq+1xt{}kpD`KdGTw_1af`m>YQ%kx zoy_2Gv*eyepn!2Vah#AX?OxbA9a9pTU(Du%iHcT%t+xX0zmv(klG8`AZB(As`;hhw z>_c!$xcj& zBAj>6n8-I^J0H!!fzH~?TZeTQB!7>l2(b1O5iLrjF;GYNT@IkRbwm|TKT-Wx`SmS zmyK8*^i!O0I^rM`HB8$pmYf+yS^+@rCrvw;nb6b6dkLHVKD&nX48YQJLKx!>9+4s* zY9Gm#O=|U~7z5U74q2EhvEA(36(l*DF3hVAX)SpOt+iZRKm@SU#^2ls>S$!?fVW#g zq(=xw<}PP|zveyaip{-P{XFZeEEp6vM=-C(Xu^)N(Kusetrc_;oiG&NqVj$ms|)Kx z%&!!SGtvLHf1w!sSc@1@YFDf0$Jr121T>UXvXdKJ)=K3fBiAz97QT}E7Z`^Z+AXnD zUj%&epBRV!J*xxjg}WsUrS&b6B+$_bM-$}ht$ccg;>Y~b&hml_Pc)rtyQg(q@_BLO z{~s@pGIOeoG@MbuFn$TXzf(W5aw0wtp0}Ky zI=^w<+&)~)Bns}Vq8_!I`9>vDv*c=gOO6dzED-|c>s$FtBd6wB5Ysk22q_W8CZ0oA zkxJ@SRP~WfF$JF!2nWDNBx-k2V9Y__t}SyReMm2j&Z<lAvX7Z%{LibV$UZe8-qub_ z^B+mxwJ;#kUK=kEw;e%ZtY@nug>ZzTot6J)0E(?4BW=HkDTgP5ZpH`UpquWh{L*!H zD=Ni%|^fGgkucS;w znNKzXb_W^wr9XJwQKY{&81tec%D3M)*8#~kFK$r+2PKom|MG&MBK6z`k91;Cb3A*z z0V;XoaQ`cGJ>tnW?RM2H5c)SIa036;+S#aLpLxUb^OXSItFcr7#|>gYET$M4pEW>v z1y)P{Dx?UPR>;-EAJ#)yb@+f@r8Zq+pTDRyz>mXeIZExFNddA4slBQGIG-U_V!))Q zwc*pw_~~D3`_tCP1#F7c=%o&GMR>L7j9)4Mo>I{O>l?IJb|0i*qDKtX=HmFw6ijWs zu`ppgEbBp;zhBI#0@d8X$QNb9%$>h9WON2h{BK432{d4e+H_NeW~_S4&$~sNbMP*gwwzpX7?; z-_+R|w#N3*;Kn}s^4u6`6@ivI#@LrAGo~HQzJh5eWx;lIF-wb)Rf=}upr%v%CT8m% zPEZ^NJh9~vsVNa%To#ouWu!OB_66lTVYP>iQX!^TmP^_crEfQXWqao=c^HL?5F?g? zFCr*NYT%`YDOkOTg1N^LJAjJzY`9~39l63tF#UWliz6s>B9W@Z$*@%oyLCa#?m$7v(b-GfHZU9M6P~tvp37Jk;X!q1?(V(K;I1F{RdS+a zF~~T33qP)_=L&W!kU6ggfczT5KT5P*jZ#s*6A2%n{8CBPlH3cTlgfbOcac4&{up*y zSl=Y(6x{8(?(di0>(wtk@Y-{v@OUqE%FyKe6so}clg3CKEn>j7?oza)GNSmRV*j=y zq|#{Lu&*3L_wl4dFPuvxjS>)z8NIch#Ee2G*g=^T^qf|}UTAE6Mtr{fu5fdeSJM?w z?5eTFnBH&NV z4e+U4;BDt)*~kmOBxZ>YYJvgQ<}%n_g7%B|711#`T?6+#pvmhch(Nl7JWwpEewDMJ z-g)KP7);vg;qOfcm(yY)ZcS~WmfKFIcT^xT$>4Z@9gtb$9ULQ6?g2xX^Rnz|?h89H zC=v++D?ekyE2tmE z2k8e6V0CJBjGkL_Wnx<|cHVy@#wM7_@NBbHl#Tp?$JDmFKuM|+>qAf)1GV6O=kc!T zX3MGJQ9T{>!4=^CqS>sGtDx>JG;KJ7)qTc0i!^Pc-x3@AE7fWOE4EP8V^Mf22v5^T z0)VyolO_w(cj9P`s5~E#y{~1}HtYg^9AY1X1)DNEU?!kmTq0-$%9e{sO~WlFGU#ks z>HZKFemC4O2ZzqvuPiKqoWV1iM_~(}V7M?@HTY2+FWFLJ6^Y*^MmYUK$gr6pmaB?! zwb2oX@^QAt27>zzHLM2w1HEw{|{^`!U2%Vu4PwtUAmh6!sVrk!EXWOA&r+ z^h>!pzRyLhkKL6CeWBn9cn~a?DVE=<_r_gG7iEzqdkhTw-{s-2@8r6f`@npVi?@oA9`-Hn9v91Bi zAo>bK=!u_|0Tz>pPEU*ng9t!E68(xh_jhLt`|RpZ1`MSi%0BdG#VKmpy6*$4_q3m> zZoju9aL-I|A(NRED?@@ps%$(&6xe;~lCXdWbi5NrO+n#CRDrMi@qnI5h{^l9Pax|!^!Ne>d7G-m=pHoLgG zg#Y?#p8=s@{7_w0rF&37X2m<{=a$PSRhySBqsM#QW%IoBZ+1kv_=~IgvAp-W7Q)$Z zfX~zIG}09WX>`3b)PL}?zFg7pfr9Z{>d|$Wquv(b?h9R(AJ4#?U_eiV&UYm;%tiaU z@#*jV-FE_FU43ASUE4;GHi`=tV7FQ;#}|q8(+An%w0d@;KU~KFW{Sq+8>q4!=q3I$%^{ZfRMVdiTG+3(EJ~XLqlSjQgs3kJq8zl$t ztxM3!>EFk%PF*WcPGsd+SJJ|_m_RRDEasNad4W?PlTxGTh~VvBZTcQH9BuAHK-q>! zVo+9W8+M%nN%u9(3JSdY!2Hm5uGL+TZE#8N8}vGRgM z2@qrc1-MEfH{J^NX_nFdO?Uu`FTOMSPt@7Z&MgfE&q#Esh;2sMufh)r} z@%R)<-Ds~c-g{FD&;%zR|K?bZJH4KS=+Ay0X4BlrEsk!!zLKS4LupH)# zrO?{HG;jv5ou{VvZ-Kp=KY=7a4A#i5mR|<%k}Ip;DzAy%WnA!KTHhUs*0HH+T*k#N z_osf(2rMbpX%`)F`^e0|_uw|;kiz9HxjxtBwVjj8W_;#@FuK(_G8_0q!K;oA;8?j8a+} zaQYIcnsR(^ugyN|nR)Hq;HJiHFzM{*x%&*f7SIuopi(cQQ_5a5E9D1wVGRR#ZO+K$ zjO^)#4k_=RWxrY8yst_}NlJ+oNRmQJ;&g_q_>@ma2rB}!XsMJc+rQ0hj3~RZ!n`gb zM6#Uus-C2oqeG_AZxPmztd`!OwO=}k#nr#Lxzy-8Wh??!jtL3M1^b4W82tQtQYr;3 z-HvVLcoQ_#-s<1aT54-U4uAopjTD=@*r9RA*+gfL&b(dMza89r)imoG9Jf8KkhyHp zC}N+<^0E!rqHFt_gHHgR43vZMq=wB{D1e{tYjb7>CtJ#BRc9`XT{dn@^beglKM4WkE!G# zA16(}nUD1_Cd!CJgRi^!@nnW0zNv*ge!mN`FTzJa>_?>g?JC3A&!w;^>a(8XKRawI zBf(;Ip%6jZgF^vhgGX093cn1-psiCF_+3)UASiP4RT~{EFHFAq)ervTw&RO zFQtlgZApDZzIM4=(GCu0DlAvqWiQ>gSjTj$xUX)$PV&RAk*B?yu@xS3FZsKcK0bFa z3jP`Z$kH9A)zOO&2l@=&N?z52ZoMf_5o(8C4Bl*dH$Jpb*Td$%iWkM;i4SWv?c7V? zX!OEN)XMH&OTL=+WD(cMM!N@wcNh6q_esS8{!(!-l8g~Ud}n(N)QEG-ea?G$H=^z_ z)x~L1`fF5+1npq>Olo7;|CCJLK>4xnqb=Ad&P~c&V?bKXBfv-y67LA-D0+VrYod6g zRuJ&p6nd@9lPtZrA_SES50^N5%MWE?QvV$JZZj$2s{zANv-3%E317YUJ^V8E30wc~|{-mXP3PWo(lG6|n;G}zN;l_EAjxhpjvRk9i3 zURZLuissA9Lzy2K;(E{xY9nZe>@8lCtVYeVc_!MM;ffpCmgeG44G1Lr0v%mHS|_zK zN|E(3`QHpg85dF=zJxYdUiNo`KpA=D2xF0t>5W-Yai~#GmFnQO*`77^wgxUTj|xuRgKMS503!On9gN$f%({r?afH?-537esFRu)u1A(wD&vSe}9;8+~}gTsIwRyj%1t zORYH0{6uD_k#2Qap=U|OH{;Yd9sZ6te|fHD_J(X~phc5&fNGKxINOq~2Oc8z?pQL| zhP@3<9sy5-l>o2{0t>lZ)YU}IQNjK~YvC|pR=IbcRFmW4I7L-|8V-FoDu=85WyT;F z6lI3RzRKY}(e5nUi4wPNDApv7fW@*yL&q27HZ%K>nO=(S4ILzHV;8P0;-Re=&^*+R zqLG(QvO|LRvi$~~@|!{{h{$0IvbHN|7lvS=8UGadUv2IO$2&X8n=HC!5D+;=UxJVa z+sfPPW-d1@;^E%2=nX!Ftvh2)jF)eQ{KQuVO&UR#ZwwukkpiHY0}tl9P6mDh(zBQ( z9zuIVT2=3&)-m75pGQR7@2K;tn#)Rkv9&n$MpQGIfgN`&k=Z&t6aK-`FCH2O!lDx? z!0w4t`2CWl9r;^<{YFNC&=^S-Znu}533yGCnlUq@K?>NDlO!{gN0c8KqCeB=*qJq| zSpB*QSM5oFumB6|#<)Q9ZL;mG1K)USqLZ*O&l)Llu1j zJl?QlxRcVRjts8lwHIWww0(KS=QTbOmUme=htnVxk;jJ!9H zo46YXpqzPyTk6X{d#7XdYRqS5L)0qT130isCtbk}mC3*EaPb6j=d9^Uu zeFp(i$D*>dKLc!_mE^IIA9+C!@`{{FQ?)5Dg+ovtX)C<~Stk+-&9nw03{|CSbJqL+ zbRgyOYq$Opc!DqcvQ9T@C$Bg6XE#AUgLysv5Wc*wM^ULrMaq_;DFBg6gbHuI9XdnA zo&=XX-luNydH(>q#elS7r0yhK7?sUurS`db3V-3&d-o7Sxov8mTcju||JQ{CRt_01 z7Urx7HZYdx?e*vS9V$M}Z*ZqKgoXigZKG$-aI_-(WwO7OO9u>z)`75Py)6vXn7EYX zX5y-k$jEm%((9CKYN>)~WF27w)tk0JmXbBNAU%|W2rsY_ZF#{C3U`vxnW1U# zH7tQifv37jf?EAGlD-&~R}o`7_;35`TvPa62;`^rC`E$vyGsIKb!U$fe2aTaU;TQA zJudggF_!EuuUtT$vj+`KJS5o*4g$rI-{@}9*R1{vY9F`xPQ1b;UevJ}Ch=c%eJ&3= zBW`#*X=dg~CmU7*T{Se6P!s-tVzjL~QLAe1iTYEhox|w+rhJg-)p`CxeXhLly~-nM zFpt=7!9MA|`}n^s$gO3WO*rk;gB7aiwraeh?|L!5*6DW*>v=k8lG5$ny3u;iNg|_! z-B0ElFFp7*S~w{vM!Q)gHPCW?8?X%9N=eNtQk!TOu4 zzQ4l4yK1-cGmD? z@kOOJx<$LD$j>7wC-z1vAfz$5b>k(~%z0 zlU(@{d?JPAT6e_EsvlWd;)O~C7d(zN5kK1WDb?W&aeR{=* zUM^Sfc1y+1{qo)xm}~%>l~&yTU1DOUs?maLRA7+gY;o~b@PxNMA4nU`a+NrLCJ@jj zoyIBq%dkbI$Xa=vVfm^jd#b7tpPkktHc zWxj|1=M=Oydoc^d6*UX?vIO6SAZpW%CmKe_D!V9%|H-pEr&Fl-m2!q8;S2+`EMyo^2rpAVKYpqnZ^+8sEZQ|zXZ}DYSNCX3e>|#JU zVOyYuJC9T)a$#W~m{!ThEJWUPQz`pYM(%^5!UKs2Z5mXWq0mL$j=**ekVU=mJ3LmW zgUjVNU`szt3$tQx*jTcC0x%_8{sHEavhBkFF_LD6tl@;AH61xsk@?cvUI#Tel0WYX z7yGG#eVM3etbGftnLD zfRZbczT-x?dwp4IazfPRFh8qDktif_p^PRj`E*s}pfdQBzh~KF;Kvy-ids#`{g{J6 z@64iCZo2R?ycfO)D0Ip{NkAsZW=Cep5A)|oK3y?j@Dugakf0_7cBZDt2oVPsj}3mk za=P27W!Ol1(y8=c=3N&RfAz1Z8X!`?pq{|}>(RF`-gNFYH%xA~SgSJ3vPa^p`tp~W zUe=>gXd4H{7)p-;^22~}Z{giChWJo!?S&0kycGlaIUUjn(VWd2M5vz*95LGO$8b9t z6l4=-g*0U14zCIVMwZ9b@OPln6T$7?SVB^~s~(U`+3Dof32cI#IO+|Rot?*qX$1aH z&}juycQg${;iot69=|ADvGU7=^aGkGv{yj8ddyR%A3=L?28%X9qr!3b`D3A}RNj7I7_C6&XOH&QeK7$BQ0C`zumGD&3#!{!anCh{FIQ zq6Vgag^4UWWa0J{zW2(1n%8~y*S2Xm(n46p7lRoJQYEFp9Q$d#g`9SVv! zmN*e*DOdZQumitd(`7}x|H3x%Cb@yR;(&HC+^9_F*ec>k8b(&u@PH#1$1k2@Pv)ds z)0Yt3w^xFx;mE-Vw%CQJg8ZSmY+wSVmTB^{;S=Na1Mw3;83vKU!>#ovzZzifsrBDd zMCdNaV%3NrR>-T+$?%V>wZ$if(a^Q38$FfP-To@+Wh{Kq&6$)ca}Tn` zK?h&wbRE#(m|i4?5O{SZK(azuox*FKbidskF#Qw39q(up-mPK3ElN-Z2=%{IBX8=! z%~~gao5FLoKeVr#OJZmt4`N1=QGzN{ab=W#|B3j)5>oWT$LM$)I;((rnzg#7jGjfo zC|GYwFVqb-E%dgwyY7hjZKYWz>MK{J0;S2-yIZPzE6Z~QQkHPbg&JW?bwJ|bhOOF_QN_#e-Sn-6Z^IDw^dEmCKDO zklMH^Tv{ZHXRI2iC|D{orX!N5>Bjf_W2KB@PK|LNEi*4x+RWiN(Bn575>`W(&ef~Uygtg8YJDbQoAt^3Tw@#hg6l$ zOSl~r1u``;-+mDV88rLuVbWeVKf3M90?z(@x|My20qx8)biXO+@2u3*sx0=_Z{J0= z_Fa#V%25$EGq-n20fz%CXUKV>sC7t_<{BW@*e4F!Psoy_UOQOq8DavoB zWupv%6#J^LF*{ugshs*b^j^#>URU$9!4Y0uNd2u1xMy{&yAhCMk-715m8gxFkC-pY z{X<9%uUEhVoK9ynjZjlUCQg0g&S{#ZW`G|UgvQLU&EgMq9%%eyQH{nzQoZFxRZ4L{ z8%+F^siLPZIH!KqfwpF=K;9$1QW=myqE}O>o7uxTRg0B?=6lP0z|1PC4)DR7RyhQA zC$jDVySwpGPyUl$A?3uy6Q}-#?&dMFmbg-3)l3nios`5XiFz!-In2e$aLajc zbw6W(vuP65i9foRtTBm~Gu&8>A(=avdOS-aA!tF%KH=<@>5=a9q+Rt{qI=HMf9$r?Ia5^`S94}Cq&@$(ymxik`a&|jCd82Jbz8lJW9e% ze)FbI-yPx;@gPsx4!=)NmIgR9V~`be4te!?qX?(mfs%CI2MTg@E`2O(C_ICFFa0_; z5{~Nb-ZDRCC}?o$d2?8NN^nSvlfwn(W2#Hi^?6CA&Tz`}sD=R4%M47$4MOs@=4^1U9?BaCsAbc{aC5+hcRtx6x0UYhM~d(u&3kF2`^@cMoSmmaC+ZA- zGi^>n?HtxhDrtLBeSv3`+50*y`!C3(g<h;O>uJFYUYr5|~cP@Lf!H4|wp zuit|Nq^iA)GW@fYZ?4D<$BbAj$cl&DwKh}efY79aEzKUN?6x+M@uq&MYvm%M)6-L_ zyFOzjSf@^HR@4h&S1)IxkRKrsR>wIa3*KI}fBj{Vk+ zKtXsp!Rra#rEex|a#Nz+dg0FHzE_sU z@7Yl%hV?jOCf1~j06mh~z&?HcxrHoIm{B36!FhUjmfEY`StRK)vdH{Z6{#Dy}2bwRToqsL`Rk4An5}HkRhO|9q2_E^g{~ zS?d)|h9^Wjbs?zetT-B*8MYv5jQ>VwX941_8jqw#i{AsiyHqvhko9#H?fnhMV)%P_ zPJilJq^;$IM3y1ud|_S9G+p1sfI8Zji7|O<^cqVc6D#-(^r;yQZm^*F(~g*opg@uZ z8MgQnrBcvxGNc~T_hGLjG4DT8$a+kGg}?g>Y@!na&XGey)2iG= zDWPRCR3uRTc~p|0mWF;dk0O3IdlME%Be2h99&1pC68kA96dNmHZ~TxK4|8ls6AZMm zE2YR4iX9E4RO;Xk1PhFtTvs7<_8rH6g8n$KLx)llx1U>WGZUQm4TlVc9D)-B=h5rC z>Oi^<-AIy*odr5oR=qd~Kj(y0g7`8{!R29@c-2e&tmuH-rQ1t}vZ(kZY4eqB) zE%Q0+*;S?^1S&?*y<^ms^=1Nv@ccbzHmKu0AI3+wFoG9E)edrd%RCf;VM)z$ zNie$&aLHamWkJl9yr*_A4iiRwM{cv#HK8fPl~z*WQU7B_(^I)-Bh}uNLfyV3#^rKN z15Yd4eCF8CpPQ`?@9RXoLoi&sS>IZ5j~|m);|2Y%!U}aY*8JL3G|hnf)kksc=p7ir^k~QcIt{mruIs6;ii9`wx3g5K z-}53%q%lM<6COEJbRZ#FLUjAcMNXzjB++qE4}CQ>$Zz5RMSWZe$%lB4vQ|23V%@Pq zxQt_vOxY&Vzw;lK=md%cJ|Gmt7xDkS))tn{mj~`L%kq3Y3sJOdR*Fg0XyT@>%rz@P z$?Yn~jHbCZLs>ew1ntW9h)wzIY^ZLHrz%zSZ|4AF+pK8b$F#6zn0+VwPms`i7qBLO zJU`VSew-g-QN_kwSl8WGgzPTUos(4VfpI<-S=rpb!GR{6*GOL~lui`Vx67n=vw8O` zf>PqB>gJyfUI_?y=m(nCu|R-3i~~MSe}Ol(&BkNk;;}q#pMA0MsZz{~MV#rdFwMqt z_`9Ee6q6}vujxRSrYMy+$}BfCkB^c%H?j%hmxhJ^0PyD~61`dsR7gR;5j5o-2eRb^ zmXhOXDP%`sibv2WWI&d+L2o4t(JJ3YP0}a=&FtFcp@3;_41N5qcKLz~cNtsJoiE#% zjo314vYi}PJVcY7Z0m>P)+ngNa(9*3sM5|fc2{Jb5kgqVPX4=ZV5M;~W#piYk$C!b zhZiLkLt4V%&gkIe-}a0ulfRYMY&m1i24ix>GD4#LUGEn9972WjJlmmo#EepIS9bW) z<01)7vse$`=>DyYMdOF?;F1kcI-#48m+A<45d&*Cwny(!ChC zS9N(Hivy}C6*CbjA|G$*o}oPPnVkmC)PKBju_mzk4!c+XE;AN2Es9s(Y)gB%w!)n- zfhQliZ>mcSMmcxyf)*NP$8Wo9iw46EMkiv40|dJ6h<|zKo1Tz_=Lzrh5@=ghFck4e z2QsC4V-a#kN5Tj~e~D%ToiocR-Zz7R7tj0N%MDoW5_0BN+W&)!D@i>i0jzb20$8~H zNyll>uOwrX7Lst!X>tL;&1c5;qM7XRi|Ru<4xE8AK~@r@wa?zg9<6Yh zWLk*57>qk%4&JQ^3rHBv#i{_=V$VtwUtSCSi2&I3DN~IwVTB=qVIL)8s0f&Bw-5ky ztd=&E5OrO;fIZj^2Dsq2y69{_h<JPapY^X^Diev)`c$9%aIloGYO67}Tv1A6Dym#L zkt_h#JT`>08Bw0(&dHU6Z}fyT|k~ znSuToL(-Q(!Zrxb93CxxhsQ6aIM%LO;?E)R6X25MR|4iWQ7MXnF@JXT*vS0uA8HHq zm`?e_(xWwVu%;~Gq$GD4IQT4HJ*{-PU%7%B(lgSj)tyWT2_j;y^A^#HQk7A(QRB5H zKCHIt+CO6$ARD25)PoBK*KQCMm3?X_-05b@XwJ#+eXD9I=V30&ze$eRbW^s@iAvn! z)1Z;SL5~jb*qE?UQL+Fj7~&sxKt)wc?~<~JGnYXg^5z`e8LH@4Qa$k}nLLNWQjE5y zDrKRKN=DJI-G;nCSCU_9l_U>t`p6^#R4%cy3-2|j;hOV|MURow**JR4fw0gA+#D?~ zd`GNeL7zArlpdq)P!wOhp3)@-l022cPFR4WUaHpa!Urmu?vP8RR&_tSl#F|9Q4?qi z_9mP_vg#D+9+kc6NVO{fK|sF0Nb1GT4u^lR9sSxY!Q2y4N3)SsAo)IsgT75=t+Sk>!=PzDOG%a zM6UKC(p7`5ddOXUhZ&l2aq?q%eQmxnJZbV*&xL5ud~hK7a4C&c4Ht?a>jRz94V=?9 z@d5zW@5JV4Vl3YEUycD6_@DttWDjg*QIN4{hNShuov-f*6 z$oSX}(fc46*;9SbsVG*v^4Lz(H zi=jgOD!s+!qR@)Jzma9nOnxQSF zR*L0@xzVLHfnlAg=o0)sd0z*k2Ymi&(gxbBtXasW%0nV=E!*$Z{Rq;JLx=fnD^hM@Qo3ka5ARYx9psbF_BRH1$Vc-o@3!rQ7P#HCyZ7sp$@l|2cXaqM9U zRP1<|QU+G_7 zrX0pxTbGoX_WP&cQ+{hSL zEj1sHX`M{6*>BbSMEkdIGNxOHjqt1n-Ws`MVX#O0;a`DH5mxdwxySMiV?0uG_1M}r zY>~-5i5Rd?j4Mu>xqXQOKD{x@;GXjzs-eN@+$=qe`1lSQ*vH)arzi{XWx4bB zMmrW?kRCNDfKeI~NJxHP?!uzMZf+`f;WZ++lLET&t;(aJ72W*V!sVKVtjV)ps;dH{ z7BYy}5ubiq`+k*umnmg_rQAvp#>oDN)1%54g2N2x02bPNLCcVJUZUPaP#o`qWHhbf|$vq!Q`3O|MqIgLj_$RVKj%BIqypKmjI z_~3wyJ-A|d#!R}6w23V;CT>Q;qWcjlG`m?u=av^l6E{^UzWULCD$aU)XA(j|c%XYQ zDRJ2S&sA7~UmcM!G7g{PSJ_83RoJ-Qn$xi+ac9!N7=jaH2Idm zr!mVQ#z((TZAa?CtyDbtF-!ZS5W@h6HXa4b&ZPlWI>-#!jQIo#_;~D-HWa<`oWj89 z{`0ToF+=L7Li2#fMwN!-NMmd_NARkc*KAQCVG=J3cUh)EHonHrz=XB+LCFQy%j46HW*%|KSG(d)O))1U9*$oH8@((8H3yi9WvULA{`a% z8i6=<*e3%)4zZ_bpGW&M6%f1%jD{ux7>daaX9JYBWzE{#dmRZ5G#J8X+;nNxHm86Q zUlS&jz=CPF|Sg~ zG(vWKQ@ysEx5X#dhojzKM~ou(f8wybJuJPoc@Emd=4Dk#N>!)2>x;>-yIq0nXusX1 zjap1nlDR)7{{bSX>SORBO_mk%4{A#d};IHi0d~Nl@9cV%E-vol>MrA z&Jf`b(KdMJ0b4{v^)HiyJr}TQ`YMGPQ~L4aUWbb7-qu@eh~p1Zb59jI$+3}}HAR?w zO+S32ipadyM3jko(O6_2(#hYnxOmfE! zNj`M%NYIWxco}W(<#3C_y2ZpCMLRRkP#NWV;@?&Fj-k)6nyyxfDWgXX${+Yu52m(< zuBylm3#6Ye>l0j=#CKpspxP)4Zaed$^Cbi^A&_c2D9%UY*}r@9j1=<5i zyw)hRZSlc3&G?@9;mGmCgWN<0kSu-eJqJr+u1N(zdJ6OOACGQw|;R+EJp^##eK zf%zH~6kB`~xU^Ns{B{9-ImCpd-((Aj%zlR*RIlhbO{U^e@OE^!5tm$kY^Rank_g~2 zfF#5n?U6^7)L*^`%p7`Fldz1#Pt1}?2#p&g?52xM&$hQH;>BZLF5cs^*i*4jEIUl$ zdmQ4kl6w_jbt9-P)N(l$Wm0p_OfHmg>ELg)+spT{TB*>o)B}}fumtD}HMQMl8l39d z$bOYgh;hoY3}A$*EBDhYqJ}|XybT>lMTt5z0tuQw;T`GQ6oM*aIpbMRe?H$n;t4)_)yTiz$xv(3l`sr*q-MF!+p04{pRdMqrXGggT@cF`~D zoTNB?VM7O!gaK4KL}0zYU~moJeNUt>v4o{uM2(Qx%xANp&S&<#C+9X`3cMn(70&(Pr#7hDy%nETkyX2&Nt^okIFsMS1q_$OG`q#26n zf;OH%b^ESxW21|d=qLT(?Xw#?!TwZiB4&w90?#RyofG>n z$j=so5d$-m5TQA_KI5!FT&d@@P_khYOr`TR!p+GT6R*J+A{9L8L(%OPj&)i~ZcF$F z)$k$~Z2~rH5B||Z*CC~kB9$ITZT@k8#Y=?NN~gwl)WJg|jH{_1-q6`Z0IEK1(8?*; zEwP%{exgt!eqbPHI9hHM-GKsk?EYw+BrJa$3>O7}e(gykCv2qYT85#1uW<~eJ1CD% z$g=D%Nd2->BQ+LhTA=Al5KHZG-54EZUFz+!AKBmIhqt%P0UO#Xxl+a?a5^Ioh{BZb zIv#Pam4?t=5CAhtRWjeB=p0D(aKgBXQuy^F$K5GhMenF+MF=-~N_`r*AAQd~T7V{N zd7poGW#(O+kgb+7*46}NigsP#<<6_~sQ+u!DY(c+x4q-0a4-X2tD5PY=IG)`d1bN)0+FUc%6( z?}|Kv25SO<@oS&&<PIxlrk*-CnJ)ra& zqcp`{>LG`xs*f>lxMUf-An!h=7~{L8?Zg*~6XM#L>Gl!}T? zVoqC7>(s>$qNiL3zO*+Dna93WFQp%Wvx=(Ktx-z97lRt;P)7D(_)XU7YVbnohhRVU|ySu0f$2iWo?Zy?*oJWnH;<+y(v?Po{uFr^NR? zV{>XX=AIAjIDys?r__?(s14RUl|@8cm^m*8z+H64APXr<6TbY%PRu@AF6?f46QY`$ z?YgKQE)JcoZU-#d4G{61t4GS}5fARk2I(!V%C++z!rItnVhS4tAx~IFpJPoP`qIzK z*RY7*%-+*fUV=Q1fvrqxR>&Jst0$ay$D!P*+Y$FE%ht@ZM; z;x=gEIsN=iASS4r#eYu>;v_Q^;wI4nJ*~Dk)F_>?;+jjCs%1#ZeCi#VF@E|-U{M~4 z!QIZhWM-s{N5pyfi1)ZFGq+-W+i92h2{+Havv1de2^8-H%#Slez`TRFaU>Bs#?BL{ z@1d<%M%A+|mgd>m?Dp?;2eV(KgC4yf=HG|pC6)K&TQ8DlXPCbB+y<=KidVxjmD3}KWDNacoiT^|oO-?_7saTj<$pE<--EXLaSeYb5DpWhkdxgif9n$m- zn_y23Gbpqo=<6gW-|i*3rp3hY+yCVF6Ltk8kmEb|46J>8ADd4v7RLqET?`J-0&ou_=_)#>U}K10Cc?eu z{frNyoc-@Y>#V=V^jk~60bt701C*tGmcWE1Ees`82_M(d{cJM@fz3EPYIL&%7s*}4 z`0C?n=8+5Ac*zvrhibglgnGf0EyYnbTWJy^4S)`0u3uaqJZ|c1VOWf}xmmUUxZQxA z2gsN+n5+$be(BBlyRR=N4xE`4kXAF)IGTL|vlD4B|8QL9$0>)8U~KJoiX}n3Ko^#kHAx`U+W2;dDE{I=MqJj7Bo3Xaw zEer)ZwdqLMdsSb%Gowi{v0~?^_+7S)s$YTU#99gMz4-YRNU`8~V|a(zDWf9(H^x0U z$q(nxRe6wBXkZi|OmhA@sx#^Tf^9^9)!kzTJc6(N9nmAM5GlaVA&La%i7}fx)yGmqh1cYWU#dQ}Dc3?#LHl5;C(`E0*KE~m zS*nRMBxK!LfYelgZMXMlxKfXZex;F)(D4%k;fZV*0eLR9APbPh`RLJXrr0?eYD74?VVn-H|bY^Vjg*nCvT(n@OW zD2~RmYWfJ0*(pq9KsJEwjS+|5x0h`yDzqV!dA`86VYG6GFV%LS2qNo3TcCQO`(`@9 z!;ChOAe@J_*>o8iXp{ns#QDCsn&#G(tSiomI)9fEg#Yx_e&J@4O~J>L2+-Nbp6%TN zsDUQjwxHzo&Hj5?+y<6upDhjJk}GT39zu-VQ3vZxp_OU^*cTt&{BguiMQyeMIdCU& z-}-NS6-~Z#6bPio5-zt#oRpD>4KU{+W*n2SjEbY+;CgJQCGG5rS**$P&Hg(VwQgmk z-kxKx6HXQRx((-wWy9u}!4M*@rv@)!XY*8O;CuTWB>ne-+mSf?*e6JH zB2p>=;7Fp2WFNY(T-q7E`1xd)(rb?fZmZa&s-aIk0^J+aE6ezA?pcDIv*Hc!P(y}980b4;@HHq#OVumNov<<(Hm-$4-{$#N#X@)T90I|2S$|XD{ z4dwh`>RhuL>n_yFlg9wqZCqiTsTc9CSp^lb&UFHh(3*m={p8%Du$DhMvSm~`u-`;( z#Grfi^i2lkM*&N9EJgbHMNZp^fWGV#*tY!hu-gIeFq6tK9*n)O-S=CrOUVvwYg_bT z1H0`7vuGVXP!lw309-|wddb?jkt?i;V?D?YsE>$M=b!6zrp0`=Z+~9N!tbr_gLU$v zfIH9F4rHNrpbnJ5i{FD`aM1F#O(aPY%;Ls$kJ*beo#xc6&l6@rBnA~=>zzL1YfOH# zgFIAQ^1TZzoh|0ZgJC123fGZcYOs+;4(KK?qv8ldTi38)k$Jii2RWqnAWz+idYajj zyC6u?s}4*FmfVQCB%^DqvBDFP9e-6Y44Njt3!SwI33Z4%kKMI`EUvSzGRnP1`j_ zbCdFLMnL`^PP2MRx2E`qVRF@N6S=tY#!3*I)+{wj7^E<>HdA{OkH4p4RF%84V`ePd z2FEkG+W@NjFJnj{eK0W-bwp7P_QBp$5mlWqk6kx!09r&)aCX=>sDI=uc7^35WT-tJ z6&>V6=-;P>VtH)REZ89;?BJsQYZ_&C-Lu?`Z+Xn-9~fEUcK>IP&r;Jb6A}NOWWqLs z1wOO;xNT=O0~;0Ttc9`@e?F}3co8ocMmA&1v_2=TLP_D_WnL->m@7sjE4^)g_bMikp@11A>^kPcWNISY}DbYE=`RfPBrO z%vJ19_|Ju45bqO|ALX;VqmBam=Cf5pVscb}5%ydS&GI6ymPx{Yt2#SrQnaUqp;!9y zdeOWY0I2MSdIe4zk_E;s!UZK+%z{v>ckJ=4%Fo`q`)QN>HVLlnTs#G(>>$=m?RvtT zGN2~+fe@h+w?Dh9CzKy4a?19&RyX{ReXGA#7NoyV3_*xnLz{agXqq-1fLeW50j=61 zgbGFaqn3KrQA+(1e{7!S48!HtPtZ@i0pAZIEoW3|%ALys`;E7)A*IwFoFknRyq8Np z#{|=}x{3M3hF7TR^^{2Bp_}S{3^=r^{4gKgLHuTw92TL)$rng+l=3FY{Oc(Q+q-fg zt<5oyPbjyEX&i%!@e|D6^MV+OQ+Mea5$mlG2NwZ_ibdy7`>Z=BcZ9!nHi zqUg;v;}?BndZvO=8vOvPD}DIj40kD&Zg8q}6U6bn0;uAlc(H5Fl@cboJGWkPoGiVe zQZ!5}zc|NpYzR=bwZ=|z8eTGa5h_dhP;Xhc5;)lA2Ty6vTesmr>iA)d!f*q1XzN!! z`a_Q==u_HH-Vk6kFlnx3T!0p0eesVO$-g(?YvWEP;F|2izd_{)+>&A0mn>XZ!k!uE z)%Cy#w02%88~Wq>!Z}oyeCwqenhu8QkhOuFW8NOk%4JdO+2jvN4Y0!Gx!7mIuYY!Fl+_OJz*G`n?h z2t@mw$8e;|nNDHR|0y|@L1si=O)Uv)V8 zCQfYFp-BG?5)ZmRwshk|f6_>N<-z^{9A&nNWY+P7Zg^OQjl2XlpRNdVd!x{B^0Rzo9UM{8>AckI{I+0f{zNjj1vu% z>aCCcRJ7ssZ?r@qPU^zbfK5t0IjUf9RAoVmP%yBV=3`(fW}GFA)_3^vU9E zxJdW^U;Sp2y+#t-^#a?y-kQ%xrN zWB?~_vvl1fKeARZCv20l;Z*#lk zAw64^ac4gF08Yms`iz$U6;A@87p!h(J;1?{1!fOz0E;rC zaT?8a&-GWarLFOrK3X7eKdfokVYbcd)j?V4BAdIbRqy=ez1A9_&OG%e;S z@HfA@OyZx8oNtc_@kK;VUOKXAk;(HT5T>!2Ql+!>wUN6WFhZExHjD&zuADUV_McVx zdwiHDM4PFmFa&ia=oImP`P|d|nDVMIQ4YEjNYiQ1 zf;d;BN|a*8B{eg1Vu_^!Asc7*O^Ac9EdjH(!(uC*Q_?UOVb$(rF%xiOzBbOsXt(h# zY;%J7wWxMU;H(3}36&s^S&gAgECR(j7a#EUGV4tXaQ-piPc~w>UZjMC53O1#7Cvs< z7Nr1bu1VsRw|^J(U-wZ8V?aM5D(LNqen2T{#mC_Dd~x;5{m3!~RLub2jgKW5TTjK- z{!FkgxOnSrA9&C3o+Eed0)`Bm4mi$T=MDPq8W9e40t)mn!N(JY+{EYG{SZNF7dIBw zLn+9{eN#uOef&}lYFh1+E=DXmbBDi4QSx8b%#hdP`ZzM#LN2pye`yEbaoKUBBCR{n z3)^D5X?}Ez^l;tmC2tckClpZslfJtNf8W@5fV;CFRR-4|)^=~ix+r-MiyFEC#z1@y zTsu5XAXqz!rThvwyuOznm-Ab&rHn;uM5}&eCD*Y)Kc&JM%8LGeJLMm}9YIpfgPdv7 z<;!W%_R$D*+SX;xOUn2b`+ign_1$&AZHP?*Sh3-VZQomTJn|y=fr+DJe^7B#NITI4 zNL<|rRjFfcg9aQZ_1s`=ZiLziTU(23IT1h{7W%pz-`zkF0aG56kF<4rZF_H{?*-+= zVaTcM&~eU0Xhv6wO#ew2n@?&RWk!0Y<`Dh9qMR)e74^FpG*dv2gxV@dUF$DxF;@qm zn#o=oO#%kMa}c4-8AfzgL^0z?4%(KB95G4y;E;?^1D;^%W5KvA-6?@zeslA$0>zD#*P?u#0x1cIU0VT;h4AD!#_|0G$j0WR5EHb#%SY_KJ z_4;&s#x!T*u|f#%vY-UlaoSoQH|gC-UC;T$WqxN${q%>6E#YE* z3cc(L9V*9~SmOI!Q*5OSS@L?J;Fr0$f|E?ZrBGJ|^Kg=kxI6SR2w!W0gA4YNPI@vs zX26|u12c(=-3Rpti}h*Z*%(wF{YROBD>pTEoVsx;9vq)C(;tb2v1(KSX=C?}q7%3^ zB@;a!CmTo1{u1pYmCEUFRW=X0kI!wA^OqOMKc>`(0y_i>-hc|X*t~|GeKBmGd5Ab` z1pTH~rm8ED1AdHGK4c(S#ov(CELh^wNX`|(D|k+~ZfS%Bm9AqnReG~oFE3yi5fa&% z_hUb+8`BQhlxk#)Fx2VP-y6dhd|ZFAq==#!ZvC(rpR~UVD%t45pNpUB6(}fMMvFN0 zPQe|U<%Tq)`xNq^?Igg*ubdwHwfV(huYL^Oahi4y9pxh<9wn-&fn?u!1S!_gM> zD?+kPs?>21D;zl}sJNCll{)@csq<=OYme^N?H2!X9g7A>Uz-8syM^X|+~spN^T!Q| z)4oeL*G=Oa=9(fE9L5S9u}|0eI2oKH4eN-78@J;o2zH)cK+kUxxaR#KV$ot2+E#_6 z?-WqEA}1>B#a;g|w)_mHC#>A!-^D;u9D2Yzh;ggLGCwx-CgnUBrGiXb=FVzn#Juh< zdZWZD)BT;NX6|HDvO4^C#u$G>aKk)YdF5+P5L$Q4gM3PldJ7;ZNR+a+9{8!O(et0H zJgmOAW@oVOkxrKkcbL#M@NN9M_*eSd5r#g1JHLD#y33E&zLF^tS}~US{H8yKD091G z_T{tXvf`%TDKN6q(t1&>2>EwiMCo70zyRNBMz>hV0~sdO7pf%N>`3V%@juXj?D^=>ZM z{vcXm>>~_OqbS#=CFVG;A1J;#ENR$a!of_5;D)`M>DB5Zi*F(2QxZ|E#8;d;c6HiJq%KsdWOkRQ<1%8vhvkC{3!x}ujK z#J^t0$bPfUH4pVuI54j1us&E3P73Pa=%JoSKa|yz>nnMI+VpS|Yy+UXnXFM>>QR7u zN`}z7)P3G2-)fDaIbmlFw=533&PJt6YA;7TP(WSo6k17TTE47Z6-hDyb^d`kL_?;& zLd2~D;e}A8Hlo&_4WEtQ7Xq{Sf3c>76Qk8@tq=DMnV+jqtVV;?ZU-iFa+37caiSqY zkgYzE9x3lVujTkBtI7s#^F1@se%*_BiZc;`umAZ9?zMaCVHz=mRz@a>5C6xgjECYg zZ%@OrBE<&l7jBNP>+L!9P8$o^3$J@Pw*qO4Yiac;9L1ZS^2o@W6Gm(2w|1Q&#?o1E zdkHAN)QbBcTzh}6d1jL(Qkw&YQgFt_M3aC)WJko95QMA~o!rT1&8I$>#=UJ(;&{(1 z&e9$WLSlPC3c9=%w&$MpxqpXt(VV!dF)sdwB`;p=cbUmOWueeJV2E84>nY*iA9$E! z@L5?h8FShcWb_-`ThE;x9bu0buS^w5KBc|~O&@2imizl2Kb9l$$Gg{TY=XBNjKjjd z>4CGPLlE?IwbI8#jBZ3=)PCKDJ)KYUb*bFE?xK2$T&cqqM_1Y}qOLsj^pfBHJyu5z z<~x8(F~BaGGdG12S3c+CsxKLQq^;wQVOsy=%HDpLT5X~sDR|_DDL?#bDJq?{s{2z8 zJo5s2^N|IA#uy|!Ry-7LFi*H$v18K-Njt#}#Z>X_4a(?btgaUR;pwc~^-ard-e{#t z%;)94eumxy5Puk8?hQ%2bYGq35$I7B*2hPGsfQ_Eg?c_laL?-{&lBxo&`a&-0}lCr z%fC3|meA+6K3-XgRl3@9&H_xOb9fg1`tL-F+F*N{jjEHzLR+w)=QQSUx?>9!ad{X4 z!c+@Ws5#XkhPM zNTse8mdwEd^-;rILwuKK=E4i+^(g>JmNMpv7f3FHKfD;B%)z75e;X1ZMz}}Za3&R| zR0wAtrDcC(k1k1GKelWRQdd7K|k$BFl7D)oWOT_ zit&aUp^OK`Vxf539{GEk5#%yz_%-9-OCYMU;QT==lAdQl0TPzS@-_pX#}X(u>16e{ zjSYfS4yVw8;#A~jU`)(n&s;U< zJGTTE46LFyL5gIAveMiAPP*Mo*6iCZFA%r+3f<_YQ0$D1Qf90~RLxueO5DsqdH*UD ze(4ZT+tuj%^rUjavX+Hu|SQU!wp?_g@%H>k$k;L^Xo-1VRHR6i~4dv z7wG@RZ1bkeV7!IMXoooRTZe=@Om&-q0T~yWi)PIHY5`ffWWEQRmQQcCJ{fDB?4_hgJ{&T=-a)UL0oQ z;8*v!n$RDIiw=7MK{tjlcXSBZtSk=Ulw@6ryl}lWfbE1Ix1SJK!mBWN+YY@YaVQ?@ z6OA^sNfjeCa+b;(P)31NvS)eNW3Uzg{FMfVr#KgsQqv*wx6sz1eB0$K;T0#8jA(KS zu11Dpb~U)O51fuVe>KN_B#xcpkl0&@%j+NJZgKiP#SP^QOul=87bB72OscG_O{#DH z%aD22>i(9C7STkVqEB>jP`fZ?5**5HMteIUK69DG0{e3{lnx?IDDP`V-t;~EH9K39 zc?s*U#SD;d!*xxfIDng`+&|qxnc_Vnu!N6NJwNLM7Y9oC5X5kYezJ z0SG4uR}w!!NEOLTM2uyWq>Uqlc1}^BN`YljVkoPc3q+(A&7GzDa_j!EWxxFNp_H?) zxS~F?wuimet?u3+X)izj=uXB5=qM43P@p-Ag>kUWbo}9I2Jv7;-_K5j4c$jJfc*?o z&8x?dkh5VGPBKtCJrJvHP1gAK?SUv5+Khd2Spy1)WnBQtHBi`7kV^NgeiBCfGTP~> z1QumXgRZs&HSfGiD9F~}5aKxe{-tK3iF=yIMu9+o8DNA?_q0WZn$#Vjj;w^WJ7w%o zq3o+(ufoCz6YsvXu01ixM2F~GoUvjlhs6}!_vDM%EJH^)N;L6z zs8h?yCRDQ0ZMYZeh1I-^dK-XpVc^a*D_$}BZ`;(K(dm*O44B>NI?u$HoScSNhrGQm zw0;)IgqEJiBDZ6b<9QgHLY#l%K9)C;8#kAa*xGWl9;ND2)5@E5`W456Y%;SWgX$FP zJ=^acMue9OLO7uK!~Glhn7FC7HCP|_;+I1{i~lO765dnwa594f*EfswEZrmzr)7ak^EEYr)@^JX~Yo?yi(q^UHvN2c%c0Z${TS;K(+p z#NINeHy!HC=7k2ZmIe3wk-Y}X8SDwoVyzt{>ZP`HCc7LDeC;6B;j^29ou(0KPu!>? z!E27!)nr780L9`!&XJ1R>F0l=oiXZWQHL+Qo3qpo{gku2EJQjNsg~3m(OR<%8#aJw zTm*%WfGCDdHHhv_@YF*(-OsM2t!MfMT+WpC(<7i{Gi27I`j9 zh}ydK3Vt)PlaP85=ZPAGBTFl}m8<8#h1p)zqnF@W^i%d5ZL>zxRw$->hklw$^K6-4v2A@n=+GG!Rtv1Yi{$`%wFY%Fkac0d|r7& z?|x0Pb04bDUhvu8MWXD@ObF0?{v7aX7|H&yuh|_uxKC+9otBnN#3Xp!g$F>6aSKXx zj{QW_j^dfBAo?GK)0`#sIjS(Waf7(pqZ8y2iEuxpkEpJ@3zoFI{Ol{q1oR2%;YwA; z+mr(fMQSg4{fHp2yLgVgwo?$dMsAa6fH^70b} z7OR6R?Aihtd|}_TalQ(mgXEu`y4y@-lAA7y{p^NKSv;0LxwQbO(%=t`^uCT^WS_nb zQ*}!^ULKiHtauxb+mTx|D9h%}rAjS`f(LZ3=MCyl#HZ2PM$b`>_O~TY#av%9Z$>a z7Hh6-K=xueTOpF5RgX)?7$p?=Kf~avUs=Z8X$~*ELWGGF~2>Fgic&9D7RPOhwgblV7lm!sTJtHP4|6R@zY64kplq%9NhT=bns2+s= zI6x{)SGnpCTI@FE(`H5**c(q#m5LNuJB5w1#)lsso+t0VGlIIX7)cVS^X*(9nzEi%@{_kc8KRzVV*flcWh-%DQy;^*OE_by+M*Sf7^9~R13j9`^*;CD=^OxthGHr@Ng0VGdS zgJFCaV5Pxc1Z)dr=snBgbB6k$4kGHx-b!)|X$6P+$9U8$<9rP}weFFM;d=ckv zSERiTH{klqMoTj+l5@X?3Wzd1i7brF?J{lH~1@<vAz9BAF>9kqH2|w zix_>)&gmGvr^92~kRMrT>AtRE4HmGt6ih8jxw(*svV9;QdFZ$8W!wnuS|YdrSe?S- zyEKY)XvMgJyUhdcGu$W={yOgf2f_O=4#HmYlr8cc_pHl)^Q*VCFEuil*&4cj`lSIL zt-;*-Iay|(&i!Eqc)zzmWrYoJ?P!VWren_4a;Ry2IQvG$Wp01ta=y=dGHcE3dAKKL!JvU9dy z=>F@7Qu7mKFI~Ao*g&;9YtCpZ2fqcnuJ*U}4oq}fh)D0h{yILf-1+Md;q;q0`m>A12Q36M_y@!s)H_?M11ANwz+Nqxq5 zII0P4w8%#dd@XW=YdqOm5W1dP*=(Of51nx3ybWV)@;c&QhT%ylmgLdPBnr&qBG#3T z-fN!A3dLO)SYc3Z$_9`zT6)qiadpG>o75fz(T&6Ac+`!Cn-*2UgEG)HlwtuMdYg!- z17;m*sHiYCr&vYGo@w^=cnXz6BoV7Wq>f=Z(g2D!1RLTKw>;;x8YYU}s z1x17My57VZNA_Z;%?`$bhU7j}E23Nl_kdEK_cqPPS{g9FnDAKHU&O2LymMXMKNF*e z%~3;R=zaS+izOluDVt2P-UsTIjAiv__iagoB6MHllSTOn*Ui=%^d3z)p_;`YCaeEA zDGmE*Tik4WGcmf*0|=_P03NMb+5se7k|A7L82K@H?khuUIhlhofN zMs7>)J~D4bCT>==&<uK$k0)_(|zHe5KC_4Rz8X7|a=+h-Dz z>TkqRxG_2Jg-BZQTJ0o(+cOkpG)sh+hS&&^)a?A@>pCfYf>@{oBS;XenuMY9NDW@8lOm_t!#Has)iV#BUJJ5iWG=f*@j6sh&WHO9TfciMMt-H}9%H%-)NC4VQrd_s|**JnGv z;CL9Gd0P#mwV#_*SKqr>N)f@C^4k=Zf;L6+G#%{^pSg@=c%>8DKrpg~_~C zS+B>yM+)54(Q9*Vu5-_cBDJx~R_PloAT$6CxLJ_~Ts)!}V5qm^t12ZOT~>BZOH4Ls z7l7Gsl$)Ot3eFvO+H4YX(6%Uf{>~pP#Qo38y|Kb*!OYCH*ca>BhSYk0*{$%z@ORb40Z4-4e9TJiL2 zo1Igd%pO$_2-~a6*0X}v*3%Z<(@B6_G9WcxSXCT>&QXreB`SrgGzz17vRfQmi4NU{ z+ZLBm9QJ3d==Zm?zAF5MCA6@sO>3#irUt6k1KQL{ca^5Mx;(Ki!r8(-Q}4ToUY9l1 zT`@UtZOy9|Op&M#B5gb=ghk5f0)gptsB3n%N+Ne_r2p(iTsBJVk(=T%Fti)xvyuZD z%}mnUNW{khSM=(-9$lVTED-#dZ%LJ%&$k!!O88{F z#kC*r^J+VgfYPzX@DdEWo)GqM`ZV6LNxPWnpiik-EJyQbKCflL)N2=_~&)G){J~d^N;E3T<@BMn+1NUmqu(&tax8GR%B+N<{c$qW4?sB_~69@KjI{8J5UYWIRR`mL7ft<2f=Ax8% zY5t+7<`K3d&wYljLSTNrna!Mv7u>*vpU6E?z+O559DDvcmWFwKEiwRC5M;>R zj>Pb_V%ttu2DE;*GndCptI??3LMvpV)+$&F=e|NaR8&3SoU{i)EvPi%2f|5~YqhLD zyz81{U`^jQfTIR0y%4V;c_JW!d;K86V5(9o|y4inD)z7}`IqYx5VQM>s z2DWrnI{<9nv&&Y&qCGWk#P=kCW?&%usKj9k64tVHh8rtejwvp=ZK0Y9c>PHGw*miY zM!FrPmU*prpJL427~ndF5!v6H$!inE#TA39du3A))=4kR{e{I9e7713)xGhT!olj@ zA!MK1r&!>C_5%Al54#4>v2Tfs6v`eFQ3&q`BkSn7dUfw|sZ=DTr9~flyWjg7w<6|K zI9|zDMU0A`XxLl|89~39%abKtOXTLys-#wN4b@Nq1(ksute7ku$%Z`5=oMgkAYx7< z{D=rtc=@cLHW`tVhg$@Mz@qFkX9$zTs2JvzMBGrShAjkqj4hx<^F^{7UT5Zk(a zb{f3&^+T*+6Bk@r$mR@MR*3hK{y9k7BI3{g(piLPBGWW%a1e({fJC3uFOBF9o9i}+ z z0`P}56~v=lH9Q!yNvD^UZ#<#923ZG;*ggcaG;lu$WoY9vbx!4vQaoA$TO*1NPO8fp zQK0mw==xpP!0;%={Si&!!(|{LRuX8*{{=C$v-M?(q$Rb*b2>?T@D9#m6qN-^)u~G8 zimJmuH!7*)$fzUBi=QCZTZnCm9HcNvLH^nwbS>^p zJi*N7d3*XWbw1RJpe1zW`Xu+Z^5|VbECg9R0kXf6^q4@avmm0%kwZf)Cu$aU+B?-3s9{lW3hwb*dQOP-J zzt(7X?@H{Brs?%*_;VPJ8=3%yqM)gz45xFdBMA}fi9LhaFHK0>9Vk^b27^}<)M%f# z$g;*BEW>L^*FtKX*a_de@IGG7Q6*`5OLU%yP+!%$YFZ-Jc4Vw<;Q$2My|`Wdqnjg^ z3ejexNKDhXB;xmXY_BqY<_SfNlJV?NA&w!e!pCIJgEg}omD7+64JGUF_%uwLE zO{NE^Ms>OF8j1`}sJB8BgB^pyiX1;DoLFqikY5*?po*0E89bD@X&t(qu)=7BbqHv$ z%^oi>XWt9%2=86Hb8@X6B1-y4iOX1RVxo82@90&B_h`(5T4a$qFr}}0zyB&e6iZ<$ z4j?81#lg@~kd@Nrq)c&u*)47vDU3;ZfDzuX7uH1@9NWXyU?>9)6l; z*pK-!^yR)-H%MeV(f2fTggCi=kg<7LZ>HYts~Az3Mb@HHSETC6wZcWbi`(pRtxgW* z#T(Wq6`#UR2hpt__sjPCRN@@qPVdRhYrM{rxokdN3Tx9T0DC;Q;he(c6(_~NQ)#D1gCsIL-87-x zfy*QCC}U0HI;7iOMx%P^_&v|WhsdalUD7^U8Ez+DQByp8bK>Od63nx%j!6LEsYyeh zWZYG_BA1g@B=gD4e0>Y^X8m>*S>v(075V%V(q6F7Dv0@X{)1WDPLIbQ1LrXjJq>xQ z(k@1TE19Dyn}CDGLiwTegO>E;q?^Xc9Fn&!$LiI@7J-!MSV3j*8;Mm>(F z8I=b&XMALs;dWuLs=wbo`=qf8OpgR}Tll^iwyx%iTrR3m{Kl2V6S0G6&W|KaBc2YS zi-KB;k(AqSzDwdWkJnRs6puhP4(s=u44b6O({c6}pG5xI)F8fS>Lv%r&rjL+JE%94 zUD%VKiobsL?+}|zRLhe}*?On+(AY}o5zLu-E0a zFHCd1L*a)8e10x#TAQF8QkV4PsGrD_*T7~k#_r(V3+6l@fl_?dRTo@BRxP!53gVyz ze(e;Hgy7_Zdssk@olswMUxAtc8y=65LWMl43hyN2P1|1`(W{%GZundM*UqE`%|RE5 zNTBLqUV={kqdIttGh9K!@j{`q<@u2!!jigl;5XdJ*qWQH@c+M0U7Y?jfY8Z$jNufE zpJb#@4#=I$$k`oiU9Ym3!IaYRgHlyU`dy;($Ux`+B2g)H_Dr29Ve^KAf0EYF9WlB0 z4CS||w$!mi?l{*N*J z2cHFq#rD6t*|hvA&27!zV!aZk-tted{c}h7_8HEwHzBm=@qc~!$+7mPEZ{v<;RGT< ze2X6yP*sakuExZ_=1cLbx#jHQKN#FB`>BH+HS-tp_l-2H0-l$f^eamP?KZ&R?vk7P z7e4`HuksnE(QX{OuhPg^7euKVO;PR?&DcN1=%X&9R~y>5i_iTJh49LS4vrp@F=2=pX{XJB?zKkZ~sxA&7J!C8EBXJPJ0s7`xA#3X!)ql9()Xg*WM~T*< znn1E^b3ze@CRD?9Nsf&JR{WY$pb88tb}mbAF*c)`nr7#?_o7wruqoQwWzDqi4<&X9X}4U@^H!_CXDqi zNfB&|XVNFuGks4f39xX|M87Su6vXBEwfSt|%4E22do%U7xdEoP?Up#8@8qgN0QlW! zUPBp#D{|FtW=0Fn#Ne*hy0{oZ7zZ6ifP*pHq(bk z-wXsDRaAZdvglN z_?7_HZjv04sIi0v(|fIuJv2^X@7+OZYARlRHBpplm^lX~+K%?@$k^(%(a_PY#Va>Q zB&gULRhMi-GoD;m9U5$!bB49AJz<#^^Np99S+TbX8BIm-VC~UEBD1+Fdy|&7V>dcJ zb%+VW4#FqL|IzA!=2AV+)q6RHwQ`J#sBfBI_Te@mcX*VLM1kR+#R)`E$u~nORmb7y z{j-Sz{C*psC&$qpo5%`nzO9Q>nVZ0?EjiLhrLg0xTq!<|O(sM}870eA>#X9t%?KsphV^8_ zGJ8N%KoT`=wR!S*$p%GV%1spYFkA8n(ch#!FilkdFmc6wWAwP zO+N6`Xjn+2es#%;zaPtH23)kg_mmgSajy+JlvM;C#ff&R{ITvqrP*douo0G8AGau3 z?_F59Cn93FOAQVvajL~OsUT`qZgLnMF3KHW8M7PQEUGVOF};J?PBr$I%01f#OklA7 zOw;RqMLLRH1p}X$@-h6zdR+zVoskAOe1KE&?-mld0v(W9zp^3rGA7eYmR+QoK@^Mc zTEt+w%(4K*=b3Ep@a2}(p`r)KJP)~czPq9x+qtmYiz8k=yw!~iv0M(Bx#U{0$Ithk z4Z^ZZ3+en35aQ`A>5*j3hFBTP75W$703W>CVenTB!V%8Ma1%1p*?auYbKtNjfQ%|{ zwkwDdtI|sAEg`TUirj10!<$@lB}Vd@{zq&eZIPVl=Wh#7_$J|+>hSX07f4-~Vp*zkY1JALCGTcJpu0rG`Q}^0||&8FL+L z9CJ*N^sL}JtBnL44bKKWpK0JC^l zUIF)D;@J2^8QCDde04v4HzMkTRGdcYpkrCw1^FII+VAsW#XvEi zww6$LPG+aHR;mSf&f?H16Wl=jW2Q->*1j~CdcZEt>cLyKA0a0>yrhO(^QL_F9?DMX z|1>-hO|7AjG7vhId^n8_h8ha-v8E=*Z7DZft&T6-!am<%VC7J%KRC1KO=e{x}Hqtxt^AIT~B-C*9FyJ@t}=I}_t zfV(SYS*`(!LUrH!19ig}!$jUc08)#+{(jvETa*Swdg3>G0u{P`#<--g!j|MS!XvGNz;x6rn}OAY zP&M;bbD7p4JxCR)TGZyw35_|w6dKZ)6 zgjfq><*x=vydcjPY^PiX_p$-!)uobiZO_%$DD$cXljt&<)%{PL6_;-kN8YSzAo(iD z7OV>1#+UXL+Akl-cFBZ;b;2?FVr0@jHGeFRBWt!V$l>qi-BZ&&2RXv!ccz?uSt#^g z$S-BF+RUj(YiIFpdGP;~+MT5$?)390qU^?x5^POE*=VyC>Cx}TTA^v z?FTCDS`4!2Tfl9{F^90YeN#8~3fgRPiRU^C<&X0;L8$HoPKrw0;B@0|u|r;}h?=D^yzC%5bJ}T@Cf5r(aTcI=zSal2 zO5pvt@SYr-_Z{gs0n!iQ8A%ksLemNujo>@)JR{yzb9H^W1GqK4U@>l5)ya&X8a{x@5IknnQ1FCwwi z%&T^RMg%@khwO3kW1hi-MOdSJYtOUCf7})dkW(oNokfGIyI1#c3S!F7I*H ze>G?Q_^+MCAolT}6fts!AA-k#to@qEAbIrsw<=MH<5FD);n%WPPG^S8At3-Ld&-X2}qz)0fG z|1WY7m~LU)_cbPf^1EblMqoBM$CK+wxKcLBv7yuFN?@g%`A6r3RoOJv%fG%Ny6cl) z5vd1jL@HQCH<&6_Vm>vI707q~d9mZ|Uln}$lM@cb4RqF_SvgnF@1ab+uikTvi7gEB zbd}{_#Tg7PtzNMtiDXCaXp}hIaHtR0WFOeZ*+$Tf+~0M2lMJdM-9j)Zy&?CSJatrJKkHh)kA49yax2Tk z_^9hfyuxX9jw31KjfE(Gcvm@O@@`J?g}3Vz3VpA#cWf{eHnH7#3*OTk!FDkTRe`k| z@@LVBTKd2R*Hmf{ba!kZR1`_adB0c_nldjAhT*@7$dkYinx?ut@YtfabHk=e3J|nG zIPIY_55Dgw%CUd-mElO?w_Jv)ehys5Qda9Gl_taRc#yuI_KT5EsQbb1^A3~3{Uas1_~B+nL{|$pswQ~_d7B| zh?ip4xP$e#P+iVVmIm##`+K4SGYr=VXyhkkdA%Gd(8eT=CYnm9S4&9aLnGPk#DZ^3sNgw8-u(VfAEtW z`gPGCAdHn0Tfx9jCPy)p(NR1h--^5_prH_B^@YtWbaevy`o4z**_xM0lG`@aQYc6* zv~oRfxTt=9d@>%^^po4MMK+ld$~Q+u@^hBNYda9lJfL_1Hck__JT5*L)MIg#Kn%rLP{Cdo@p|vLEY&1Iy#uwtet0nNdI^-Ex{All*>{M>0XKN@ z`vjnG)oP??3H(~nt*{r!+|}JiEWKQ3vm)NKOz(l%y3OJLCZ0CC0<}|)auh6B{)f?>1ZIPE6YBz8u{J%! z&d+g+P{kGr0|BtsoQpW7r|s%w%TpIqNo?yoo7oD?c#y!Oqo4>maL1VfwyoKSvs<;q zOW62&xE(^|+S_d@_Tya(%asELQs(q-xQeCM@2g*#vrfTSRX6Cx0g+>Or)8;}fYFuxT zSvGUp1s_L;OLm;KS&Pu7Fp)o z6Z4LUHVk)&0^mIRk)EaW-osah1n#)^2j_P;5$ zXRf;u7QGHdR6T@_g{d}@S@Lr_CJK(l?HLL0cmXfbd!SnANQF>t)%25W)rCDBQ{`U+ zOzw_dANC~tv>(h<4YCvEPfkkHq6K6!b|UX*-0Yl^qAP`r5&g+gVNpFSjxE1d7LgMNrGky^q6nCWc8c!GjncrjS=e)+==QS-QA!ku0eLC=`@LWHBuQH+6`q#)s#A-IsyH^Xm5GzypCP!H} zMIoPq`U>^s}uN_tpn6o2qNZWK^Bd z_n)xCE3ZWo6HZn4zw_Z|38y6l4Ic_Xz&@6NXAKBzK zPHTXgtIhJz=kug*tKZSAGX3OzEb6H%-gijw0tO{U0yy1E-nW`gB~2kl$-KVsTFBBN z^kxZT!c@F7gaOUr&R%VD0fx?dW0qLn$zoz5xmcAtGXqhZE_#TG`)0``e?WL@PD}69 z`d7VP4J(>p@|t-pYm=?Y4bnr0{X~EL%rst!>i5!ZXuv?+Me~jW;3$@0?47vmlC52Q z2WZ!|*T9k4%n)_^#|*=&Q_zGOL_nt0t{)IDdvf+XnN?-~>52t~VYD{SvzwUU+6|b( zDML8N!2$=kuVP(V8aZKcro+L=%l^2J3D+SLhA>oa*)tPX9iBesgH6`|SKPFEIVU<6 zOwji#imAQo;q)V8&>gWTg_B1 z(lU{9i+KUjaFNG48q$f+&3CFhKj0@Z)wN>M$I#GKQmTx@t*?E<==uUrjmvaM%_5oP zBN*@T3SXVo7B2~)z;fqsMaCf>`sPza-PoiFKpF+5HtaDuyPLR85vQ!V@nVk};{*Z* z0ek8_#j6uCyOWgsDr0HWK&`c^4bu!#Ihs;?Nr@&r7yhKX0`qhah=!7W60nbr`McUS zPgwaaZkGcqSeWTDY*^(hzpl_sIHe+oL5)9&3&etruJG)>1a?en@JY7sonH~U%VW(- z>W%%WHgfCWSb89o7F&aSYbGX1VP#zJ?=tD)dxgEH_cx*;^FxS|i)#{fDI(cI+m%kF zJcQ|mFb%o{hzVrVym*}6xM7JXos_-xQ|HCZSf>W&HW_PjP95>(h=dA)20pm4h5Lzm z3nmAs$MZ2sNA9cO0)CyN4JweEaHO9Zi97BTE)+&x9*o-$62hxm>!_WY*eQRAp%tTB zYwihWhV34U1RWYOr%u9lqK%PCe>Jbj1_eN1yk&D4aqIFgf^>-_qaRkzjcW3WX9(pK zgIL@(KS5xCt-jBmssO>eT$1xH18t^X-rzc%6SerWe_oat54`X}6A5YN@QCp|KkJ_Q4J&yRGuvha{cIPJm{TWsy5in|~iOnb{%$ zij6X3oE0DNZZD)+(U5PUepUSHLK->{cFSLPqT7w(i}F(X@j5IxV0I%l?csh34}P^n z+&8^%Qz7iowx~^$qQcGIK6S4|DZ`1(_5>r8O;l$jB}oKRHR7hvOH&Gdq%zsY?}H);`G^4c-1BHAtjlvh{KFsMNfs+R4?O( zmca1K+_@#=TAh|+-7qlwe}&HZC&A;mxnRW|6(jAk5L3LjGPP+N~asqk;Y$@xswlg!! zhzAzI-qLnuI$ouUBIrT9yeu5*O7-smWcz$V&Ygp%;6Hn05-HZ> zz0Q$^!O9w_$}gZyuQ%|M7#3aPRx_5I+y1HIZQQDoRX8ksWNAN|;M91Y{WtfiPv=bg z%=C?kH}tB)AU8Vhg$?$03Un{8*24jTyZE1^2LO_VkFLgGs-r}2zI}P9nTZ(iK!Bdx zxSb>k@(ayBRRy{qq_tkwN0rNGFTe0vNm=MJ->+ncVz3!dV34D?zBL zv^J~xt;@feBAZ{Op)1<7kCoGXFq&JrDR0Rf|2D#EJ3)4it{ndOrrF0gLoEG}N z^d5tVrGZp-G|xmGl)<0qV3Fb#D36R$uUCXHAO=%D3P`EvHc5twf6j?5kzS?b)GjUx zG~|_Kkulg!{3KN_d<7Cr7r3z(3Rsp}24BEYvY=b>*GLK`vP4w{a5;@ycw4cHOIgeu zeHQ@RPCH*Rafh2+$z)bD`|O$1Vyvdo%RAw!t~3Q$(>`>-NG{lSxl>zp2_T#q74z>Ko@ z0N5$R6a1M?+V@=}g2v_7x3zz+;QV2!vIg;Wnp6PECHtYz4cN|KVSz97KKdn=s!Eg! z4FeVgcYhHK3M8_I_LTmHBo)fu=O0X8&24M9;akd2!1lW#<(*iK$H7%S&Zz#W{GW1^ zfVCs;x3@P66Im;-Sz^Wj+1#Ory*w8s$qtYuZ^FAm=rzv@(#>iuWlni+m8Rru;r`Dc z{p;35p8X`KClZ@cp6SLTle{eqadRPW!l;lVDkMX3@|#5cUyE$isKN$QvMA83-@w@= z(8I2Z$!CG}Z2)}(&MIF5rOCtxPg!HHZO*)!*yBWMN*jq^&SKW5&NSzAt`i581H|!%kFLo%SpQc@l`x7z zdL?sE=l{TKP6#MWhF+_HHm%wE`&!oNQ-9vh3yl(IV z_LAm-nbG|KCMKFYcPvHALWqNi>C45t@#9dby=eRUPDnlIL&kuN-8k-(s;Ha!GsU@5 zP7OyY%`1?#fK)=gE1&Hy?p=s`2==S7-7*nLNLO|Euc)j+023M)HOdT#^)P1&x@-#7 zJUvQSVU|ss8b@s@-boIE^QLR-ql` z0s)Rfc^5&2zSu!;3{}>vpO9{vsJaVx}<0QrNnRGv5-yTy2LYK@ntA_Ma;?A5G|KQ;WtP=ZhARM$w{$46IoV(#sEVZE zD?15^QE=u!w<%QFSK}N~P1!q7`4~-mD1~l6lefc_#A-s6V(uLlM8DRKA5L0UybjdP zPE_U-?u}u~0;a71Wxf^dpos5ZyCn-cLcUwtD;I-={$a{D2TW1>+-o1tou(!T zDUTIzH06xf@+{^@ue>GMCU4bY^_e|y()bmB#W(rFnMn)GOSAQ!C-}b4#mGooZe+tqz7%RF~{kC7L)X1y-|Et`W=~GcjDG_;dvD*G!wQ#&A2U?07 zO6V6kpo`KYyuO3#7|iu7xrhx1u|}jLw*vNL421QhAizukI~S>*KUKH#R})3(dyj5@ zO@nCzL-c{3m2MU|$==CVfAsL7-*3lTj@rdG!OC~U@MQ?qgc0H+yxMyYO1EJNj8m)Rt9Y`qV>;t;5b1^t03BFwBuFzDo+s#n zTHw?)&`4-*ACnn)vsH0PEpQXkRBsP0U-INbACg9ck1tn(X0M^gfEVp;1K_;Q*Xipn z6c#S)(WlhjV8*T$x^j=Zv-_+=3Vt~Tt0of<9G6GoWQN0JWY}^{VGVBfx z4=9aV2l{2(2bWFYO_a69@SB-msC7z$4G3lfx-BzyakSRhOwFk{oqXWAa3*HhCsWFn zQ?6y2-U>|rlwly+WY%t(z1(^u@?pX&l#mkUqK({UVw@tvPCWWTfT^Q9Tn1H{xXc<# z*{7uP>^cQa_hnoLg}Qggw5v;j;y0p5y@t<@J9#eEbCHd^_%PNKHmpk;O`Wx&j*jFE zfthHb1Y9&_YIhQSH;0q-EWFNx)iz2t?1=jy(JlY2l_FHxDeJbJ{BWtin$6?J3Bh&J zzhO2?PcpT9U=)-*cxC7mGP%k@1u}j{eYHaa`#+-*lR<<65& z_B70m+R?tmmX_Bm{e*-{ceE)eFVhOj_n(L#LuNVEp~e}6VPIJqOT}TJVh`$A=={Af zrD`A|DFK!`NuGf(Tmp$VYX9jMTMyl8a%pJiv@bSBsR=MQruSN3${x_HL@`8Qg|qGH z6Q$Lajkd7BnXZNzXgY;b(Nd6eq>4>EKQJXgB?n;vLQOLSG@7dLMw-SOn6^X-tnrUB zB6>uKSeRB^kZ+rjOoZ1P27w+nsGuwg!!e|Z>>HFbcrV|_2&$}{L``4U|1{1N-YEkr z(^!dtO*+LU^UQB_+GrUaq!1EY)q?U{KvfElW00%E7-?T0rCxf@<%Ots-Ru098nJKA z7pGNGWiCMYFE+ztA*@?*2>EzMl|yz<*?N6dSaWo-Al5>Lj*U!{VJ3B*7g5Na~P2MfPY@7X{ja zg~D1^iYFn22pu5IwK&>p7c}8=YWNOrj6ROWL>0y4Ni)*{hewJ1pqTnA$-{^Y8~!`u zBxo0mv+K!AnOi^dS384&W!d7;tK`wr{^O*{wt=cf+2X+98%9l=eLZAESqIaOKL@8l z8*4W|mPL-xM*x)xP4T7r%aS!?Oqa` zp^3HB7sn}O_j%|c zw;b@T-6)CORTXXhW@InN@<8F+z=;Ap2?gsjv7^;l9Ru`f)ou=ea!-&gPrJPAE#>jlo`=}H%H%&+d2r;G`NLvUkal3OS9VL| z8Jbu#3y;Of4dO7qYq$>>9pQ!fjy@@jS!i6gg+P5MJN2V4iwjKZsvL+HlcVR?XGfBC z@lsUUam?DHG7dt_We0ip~$_yF{-$m*}SogAacWp zx`E(U1L7oK)FdITzXTp!(3#XrBF^9H;5hCed51VzL3Rg8P+M7d@i|Zq1yB0ojP>K=|*L*^XiQ zxE)q~4wbu>vo{|<5hIVJ;t{SpHNCY>Npo#!!)n&^nVX;b=Oy;p5(d=Cx=Gd@X-c!m z;3mcUX9zT#CsET#VJoF}gOt3z8q_H-=@jI7b@VxpORln4be>2=g(_%SeQjJ= zMJoyr_$~(kNFfREQp!mmvL()o3177D9r8JD3p)vZ)Dh%L>L`{Rr?C%Glvnb*;Yp`g zpV3a`b{g{r^qBMv7>gW0<^D8@j)wzAXlI3o&?RtQdXfD`Zpti`1G;CsT?}sjIDZ>GBrMX}%E$im?NWiPKvm zH14*q%F1CFTSx6{qqodpgaUlJ0R$VCsK3kox=b`j22R-~%N0vyep)JV>wj`Z4F)20E zH-RwvcpF6)tFD%`lo&-uogMQA$-m?&lORqSy=Q#AY#{fA4FTOOQ8EGZmK1cJ-d30s zHZBTxod#xD%_>+!G1*9SAip`AK*zO~z3`HLtFJp|&S-}{7Jujln}uE`*}cKN%mIBK z9sJX7cyqd}bmy!c?(ucWQ43t5*E0aSXpX!9F4VHBG8{?Llo-UL#XFI+*g!bwNcd~P zrQAp9`xpi3wz+J9)L_#7Od&s_()RF!|4T?fXYM+4fkKb)h zZ{h2z4*egB1u~X{7i!6_?K6TjT|toSBg^rto2PNhp~)R| zm5E_YTW^QMl;p&V+cyKBD1g>CQ0}t&k@Z1y$x@U~OjH?U(l*%by3JTV2WHS?qtT)u z^5rC_)@HkAE)@AeP<1|r9-qzfsOWuzm2~qfB;C*VuX9{xFma7*dvd>}(-!TKC`v^H z1h9AtXrU2ea*sh#MFEoB8P`^<5@B~XDcG-mhGGNSm&)`1`%(UnU@Q3RFl}Gagb{a8 z^O^FZI6~o&@sisRjAyxf4wB)>*vCxfXp9BBc-{JS>ayql*HOl~aBiK=Pv~CBv*g3_ zFt2wjO$#}!JSG?vm}Vu~f4if-%O_WADt?!MEj$O48V5x?I=ivV7aDy^R*5y~!h}iq zK?WuWR}BHqbhv;29)$UjA?lyR3fHBvstpsMNv^%A^A1rI;5_3kx#Cw|n|&+MkEF-g z{OV0B$e`C)~{`_&5%GUc!226780e?}H9;Xx6k=HbjtCT<0`;Y`of~sK7Ui_Z?^1;2 znk(S#Z>rmEJ7t5LpdY;Y0Gr&Q^i99`$YRdUDS2*OkFa8SbAT@j%rz_Z+I8@#m41Oy zT3TAJ@g?Q^(o+FQr zf$J%uDHy#y8oQ$#h__-ZaJ{!%CP0sC)v_tMAOq1&s3j59O_MLiyl7%1&@tE7OXStJUbv&BF*~cO}JFEMye&OTrFA*g{^xrYpq~%SSXw(YLa*n4htO}K79K! zV+Z4;^>^9uxnQQ*vmN6?2CI6a@0IY^Xw=$92}q%EUSywR$-I5wPOEGQ>z`fmC)*a2 z0a-%L7YsWgos#EqzBYSLeSmmMEtyk9DCXM9rba%^TsR#cgEgPH-v5rUyyqoQ= z;K*~P1CtSMdv}%T%|bx@?%D>xb_sqM9=jmOPIz$tN%x1W10-P<^^vHxg|k0yPD~r( zl`J>cNn3|Ua!4No29}@JyOa3Ic<{>PV91ked$M{0T#jlTzp!JdoEoF8aip*A6KS)$ zQ&DN8VN-U1#~jPR&K;(BD}omaFi$e$J|4Z|;l{vKG^^zqZg-4-dcM1rA&Y2t{1q?U=HhPwdW?&7OT9CzMS%-u9I+>?0~uT&s}lJt%>-0 zy}!m2c43X|nlrqNtyQED9CPt8ntMw6RCZ^8n*tN+ZF6-T&1IiX$200*I01)^_shS{ z@I59%btOxUIwbt64Q&3r^EiULK4t2I&$JV-krLCz-U z*kNG(*39`H9pV2_|X9gge)uX1n)z z@b4AU-~#WuMf8H04hb6IFm`3)bRaIPoV(c4MdqF1j$pP8K$(geSekegq|vu~Aop+Z zRuu!!z-cxmDI9fQSD*^6QVgJ%>r08+s2K*z1YNOBcCCtdT<1*IV`c9Pu0TpU4Fnrt0Jv->U=l6J*r?5GLr9I?gjQ)$o4*H zg9em5m76@4c=Nj%$4bz+$&Oq$!f%wld42@Sjv3Tb13& z_e_rDL^9IMtCL|g0dnz;+}NfU<>w{HvcU1#CZf>W&UHlr05K|vMGi574F#EguEs)x z@5SQ}CdDy?K=`H!!i8pwE$@x1tsT(Bddp3I)rbwjIQAm09dN^~5%A~C&N15FK@M$QGu!9d=pAf@IHx>^MV&L9Ax zOK0lr*ky~+eI%8DIh9eh%wEUq zfgqyu5m6B^?)r{d5{Cm)wYSdLSj}SE!QE^4yG#4c@S$QX$y>#TR9oVMmOO2;k^TS_ zknWPRzQ-~&fc0#Z@Fi^O6NTR4z6wjDUhK%=01eh)BKIP zcBUaoYL&^~gRI0XvA0+kf@WfNXoWCgq(~O4T5Ym&E38JO#q*bV<&S@S%yo({*efXVWwv+RGf{@vE}S z!>}w7_>Wu}_cLKtAujca&+Xa1(WB{!m5o5AO=g>~q+y?F1x$%rr@C-6ao+x{)shqh z(e(Ks#-n>{PPWUCm$Ijo%?%$lhEj}Jf z4w0Wy-s0^Li^SZzc809L_>0kmk(1X9LQxFepq=V0 z<_}`+-&x+dx|;SgJBvhLWPS%pFVlQfNu~oK7)!EzZCq()BfjCDtQgAooTJgU4Xt@1 zm7)C;k`ALeRlJ~Y=z7K9A>BKroM@aD2!*(%)J+s?(varNURBc-8>mR|cUuo3-tm+$ zanpq@1m+73RsCcLs5P8el=y(m2_}m4AgrIPc9zHExa9f%3c1L5O2)q(L_bJDzPF9# zamXf0NstU!M_!h`Hh@sWJf2P3FbIM|XKb+GV_ojQ3E^?@y{@!*tZy*uFaYMm?}P;( zX|uIyM&#q&;4Jj%aM!g}4_^k2>eDU2*On4yh3xR(Sk6^eD+sFD%h#BIdPjR$OOdmW zet0#Nh4hiH(S29+I~91(kD z&=p;E9Udz|ki>A!C{i^u)-O_{hLc>CXc|;Dh;*#`Fw$_%(H3rK~9d2o4@IS||+6cGZLjkC#a)P0FS9PjucYj6Sj+_`pG1^5@|s-ZMW=;bYTRxV35xT2E;jF0|Bw?E?5 z>tfSOX3Acao8iPWNIqCL!GX&>CCouOCUVPm`q3|Izh7U465BJ-qIuAazAZuGHP7KwtYV`P5Jj3u0UjYy&H_@9nU3&?9y*! zq@}|&I6~~UJYPM5sb_q*VOlz#ZmA+xvV~D%Mc3Kn4cd;UHNxR+Q>{H+-U~goQf(uV zn9%n6gKdGb@riL{hFNpZ+0}xdc+E<{Y{Yn{NShGXvqwujx3E0hBl7A^x0}vd6}?24L0jVu(@4HQh%+^D%@Wi@aMlvK+bMY%O@sU+ zNCPg{WD)@%%s1NouMg$Su=7nOm)(J(p3C$Zyq(3ws$Z)1%(`zxsQLm59guemfS(Fu2~;s#V~A~3KuN{C39Y2aFTsYwj}HTqlp=vY81aowZP zWtQvZgWJB#dvdC~(WcPPU%P;Z{CIZgs#t@`@P))|~mIW{ngP$zz7cBMdVU(ta)wNTD>VHl3PkOm!8(R>mY4rvrbIWWU0&S zXjwd_PT!EOs*?D+bfv^nbh=@IG*`wEE;i92(Ma=hgq9u2VMAX7J~Wb zrd7>vPmGG8);l%cKDuL9G4BX@%&7_?fbQ5T_Ppq|q!Po|OaTIXCAhasB)lk0`k_20 zdVm`IN~`Z-vtO$jQ-C@v_lpWJ?Tb?r^1l7U#*8pkf5AE=_r{E%5}?S&4W0O;g8Wo& zmJB)XEC&jVt}B}+FIAb6MHMqK{%IXd!i@%0##uqhIKjVihfDYHy8!a%1)-L=17%)q z$Rltp@9Zp}0JX(c>e23PmM04N9Yb~fcQr>K_D)e?`qKavQIdn2A6V}NJDvSAvne6m zTVzj#t#Y-wr3x3P78 zR10X)_g-+rbQ^EOMK9k89CousxR(w`4keKnP(oiFb{G7nEAAKFsz`<`yK1``gl)D! z{V&D5{2;>7%~*3WyirXpGC18XUe%j$5#o!ZX?T+2iyj|FQ9r5*D@G7e)l_e39gXHo zkB8wWlvS%5l^kw;SmJomEvl#g`_-MfoKuhBLZ zvGcvXgUMn^y6&fPnvBDKJ)vPcRNr3yT=+T(zg7*Q>uxEXiI#wKlxRVv=L2w2_zVXP zaQnj2mA+t$j#-Li;Rc2)VkPyaAnm5>owycRhv^WrPX93QtXBxiHudE@-rdCDd`0fl z7GUjzRM#}!s0IV1NY)kM)|P(@9V&1u2Wt$!^ETZ=`#qfP60MxoA}S&;@j&wj$l?m+ z#p1T?@WFfN%5>^+D8Z=o( z6XEL``NQO{hqQ3Sec47aM397vDSq}wpE|NvIteh4I9B{h3}GI^%TCFCLq;)XYSrup zV?uY8>ZE(W5sh?xgx`!N-25csa>2jaw0Hwq2}|r{9VspL-)CQE=3lWr+DrLkxeAJH zunCi)f}G)Tpdq!!pblVKvX)16oxJ-J5P|<_f1sfM!G5$8mW*>khV941sp_1}>z}bA z!WGc-4uFsVoOtp8fwlx9T1~_yBL(iS^)_iH<53Pm!)92S3u$WmQ&dqtV{j+1}iPi!xAV44TNnB^8mG{>N zx_!AG0Xuxj^*eT+RxzV?O}>&jt5@2R$mGydFtm-=_Hy`u`9~x$sxk zWu04Mq9;}zYABdNuAivmI3s-s*F@fO9`(paN4T0mT()so7M9)8@5pf7%Hu1j_7ai`upheh^hJB*^dg22yYy^qCD1x0(wd>iX7B~_-WVe%-ev25?HTzb=T|ydxtF2n1l=* zLpC>r)S|%8W();yw;jPO?wF`aj9m~bjz_Zs@B!*`T)L{2qx-IJm){a-(R@o@#{c60 z+`^CLCJgqC?!Dh+B9apQax01R={T~jN~c*7{8c>h9_g2wE5vV?1tEah0Io(qX-?~! zcJLRZ5aODDvgMJPf$l~)`gqZoFlau{U@?udlU49+*tfSNN?)wVG##P%zL1yU0$(oEv5w|SpX zs5}Dmxe<3X7#+8o%F4oru>=}>?cxHQby)hp>xmc!`{S#bvp#aNfA(tUn|Waeid+1% zUn640Ic0ue7q>`{1hAgwA1Tvvk!Sq&cNziw_wqH6jAf?Rf#qLKz!96ruG{14+Ir|= zhl(NiCTvX~WPxcVg17>o3CCqQ$%V3i%QZ-wHy0s%3DgJzL$0wDmq6)0r*=g3*+bwH z6|R>8*P<-(BeLgBVC@^Ou>V|J7Eq#zoO9S!@KVC`HV$W7JO(PEg1_+PSexFuT9!rG z=Y{>m3=U;u$<_eT@ADQ>hQM*D)w;#N>R&W*<=_NrKK@ZW_$$yDTFbwVaM6QddDe5 z4~CVU3E%o1wmwYzLK?ees_*N=GsC}JAZL8EvOPGmxe57EL~6^xOVr9D%M`>$409HW z+-49<#yNbUV?}Cs1)4%QAZZ~$BrA7c`JIpqqBW;KC#}{}`L2r#?kQXaYEc|8(u*ac zQr2d$@3B%SsBr!+ohngS#P2_H-W5{%&T;gofL}Qm4+ZT}MCgJpd7(jQAFOv((R@3ACoX^HdgoJfeToI`U51nX7b9Ku+OGd5hNY-Pckjf{^vv84zm0e8RnC>=ika)YeS#AhZ= zc6ZHvaIoe!hVCK{vp7~>;x65!0$*Yl_gypTx%1X0WkJL`*o(GNk%@6S`z}m6Elul{ ziGoWP6t{&EeEofr7yPPmbQ^_NcZ?Hx?I;__5%{}n2tb_U_N^r$2BXxI3*RFDgxbat z$dn!ZZ_z32oV)>qLK2GCU&edK^<+@gw|{O91sp^+GO43;BcF35gE@ssp-@ryR7;<| zU;i9qLrF#NV;bjwc*;?jZx*w_Q&vN~-v=^hE_J%Oe1_LEjXyBiS%bl8QU%c0{r_3}w|MrrmY%7wZke&_&l*cZkW8C1WOnTa`Bt?*<2V&?Ok1 z3BNhd<7KCejIj~0)2&47fo{I3K2ymNrey*UgN8go!W;Ht zXKr4#_9!TRVwR})zh{G)UyPiFzy9r|ULTDgDZx$1o{vMm+nB*8(J@^m@&4%?1ES$I zesH5`fw@=Kn+R2)N#9s7BgDSO%XPGkp~$pgHEFcrO?4r7Scip4Ktw3_u6NDW}NnzX$`>K3; zb+5KZl!_qGW8uwWJS4LYD*J;}w`?^7ems_l4x{QpCO`+rCR;EgAfj_Jx4R7`tX&G6ov{MVv@YoEBuC2^pv@W?r zlIkOJ)!9wooH4}B?Wn<)+p==2SF-LG=mRAA zdiUb*IiiiX!)Z8PBeVzyPY>4C*%xHYbF~Kc@&4Ds^`er zEKVR|k`;S{is_#~u}|VIcw@MEdyBizMDkK!Ep_=YyWhJl)Y%d_SrEW6YLdVzFuhbC?SaWWZWmE_8rI|A3E{6GNNJ-(k+RglzEM7nMv|9L1ldtJ;RKEL zoE>2)rxm75L$knw$SMjttq6V`*~2o2K9k!pkaj*p+^wf`P^uZItMW>c(G`Z;&lZc> zhHxC&>)GoQId#0vb$zBAgO=$6@78hVd7rf?4FOSeuakj;p8zXfkKHhdjaZ)#RK zEQso_{xx4qBfI$swYihdJSw^Z9G%_{x%@=nGKXQ z-=QnHMF2^{g=5LW31f}z^njJK%`r#OA4lsj9K2(6%!I=T{F5Ad#H!T_bCC0!s51W; zc;RNQSy`F=6&a?;|5WybpM~&YWE<}zTRdKmR8NQLy-NJ+9Tl#k`5t7d0}Kitnr{0e zL7lW+@X73v?k5sb$~rg+a5E7?uldS%|@6|?Ck_e!_FrtI3WF6uBB zH4A-usR>QJpOAt{whch9<%}3%0r|qm)CZlqogU{ws5Pf==wmtzW-Eth$8HIjt1HW* z(a#n;Nw%h;Ym*>=s6n8BX3@ujQVjdma$Emq{sazkQbljsA8REsy_liW3kJT0&WJME zWJyoO(?QAZw?Ct1gsv@P(Z*ohP+Dh0<~-HLrT)FN z?TtsK-?_6r&?(Ri%MN%K;rmEnpA6Pz8WoM+asGi${*Q%5#8c8tx#rd%uU3{E8if z?y6|Ih`VJ71WaZlC;?u5VitA`hY53kBDpw;?zAWC{70hxMV-q*4 zfBaaQhQn~m&l@h6Mf>~tp&tns`U6!s2mR0%V3z@Qw7tx1pG25u=2?TI{YRD>(Rp=4 z2_LY0ry!Gw^IB;)8n@zzE{=un%*L@JonBWDe^> z9~Zr5?hpO;=F}oQ6@nF9N=yeNORu*Q2I|OuoijG3Lgqw|{gV(UiRpbQ&m3+KQE@_T{wphrHp94!sTpk z?*dF`o^?siqo#K#l%^y4xo6m{9cGc{;=&-KQN14pjm^GM3D|1M=GZ~Ez+Za)X>A2s z48T!{aAk$tRi%C9hH{(6=h5-t{0_xMCE6u|NFh4pxWxF(d^X^tQ;&2&TU#+s!?PEc zdn8`|`V3;0Kmw98YnUy;dCp3)@lA|vnT6@fsn)-h3ZrO~X8O@^(@JAe{6W>iE^pQK zX=SA@nDAtEBAD!HL;WeoFq&F_{Mp{8={s;(uqT^20m&<*{yO`0m0V6bUHT6G7Y&k= z6jk_;hy6rcfDFa@{jz6y(uB$OpXKVHila*JEpkAjl?NrP(VHCr^`7;JmQy{dl9eoy zM4R+4YgYy1vs5n)%OYg;jk~8eXUG5m?_miyW}jV{MXp}?6ZJuQmN`wKZs)yp??@fj z?er^qoAW>kP0oE2_m{^V#B1W0dM%+YdtZ?Uq?Jb=f`zJ4gW_HvwKpusyE&=XAfT{= zW`$kAQ~dxtVK`;DhF_6!?RJ{s48r0<0oigvwYuj6crr~3+aNV~gcg{Boh8sw0M;TpHD;&zFiY|r$M#le&Q zBsH7N$wFV}B~mLs+^G!NKM5Fz<6H?aa%&2B$1I@36BirgNyELLy3mP_OG?7S!WvJ! zsS6s%D{5L@IOiYvv1ls9jwp|NOL@%`(t715sYkV}+~gVu{8PBzXkcd3u#ny2AN@3% zi_~y0J_q%&g)+49F>)NuP9n%VZ?O{V68d67odO4|5*r~1bn40VUI$q=t97D1a}bpL zHTf#p!G#?}q_Y%)D+t~dQ50BYS<~HUSKL3HS0v7iZ3=c7@JB}O&4o~^^VSQamW=FP zw=kn(9KwR+Mt|&plP!HkKr0?YyYrPQG$Bf~7k?VVbi*kcPu8{hEA_Rx{8Ix4m70Vl;E=qO zypOdIM^slJ3dq~g1Y`ASbtD(XKHDwf9YY>{`t{=D29;gFA*8X_o|LaNcQXPqr5dlh zdG=P}Vbk*jHrgc=<@d>N1vN;~azG}jEBR^ZK>Q=%k%#ri2 zW2Bkay@Z%;aU{Nfi4}{PHf7&hd7Z)F6?oc7pAf_t_RvZi&o`=BnbIJlH)I~v$|>_^ zv6wc+;<&*@s!z^gV5Nw-%W%p6yN1A~d>^Q>Sp|M}99?H#nuItOb!D$g1iwO(+-3<` zU-YXh*pEoV5D@F6&{#wQJ}MqYIJ1dX)^?%2+!odI{3RABvOIi}%$4ZFe9^YP8+C36Jz->x@_J0yM#DaSp9wEm_G z5n%}@CmQg5%V=sgk!AO-GkJ*bkawBJfnUBweLNraOTpH}Y2D4mzPi5Bx<`A&(2r3& zb+&q$$3msYT&n+%Npf?WuMm+o{I0k7Hx-W=DT&pyl zOZ$(pg{0sBHf!QyG!dj_^;d{fHXI(MS{xgRKE~cT#n_?mQ_T*dTl?|MBL?57iXu=1 zBH?B)-Rf6qh*rUGjj5enaJ3RANapuCD^Y?|J#+sYcG(x>adHxAcBL8xI)OiVX@WS_ z5GcDm=_~|`Xag%1SEc#0lDX>rqo}c?%<=%L5eVtJIw=fI9oqRtZiWDIc?7MMhl}U1 zmpU?McNlN?D_MD98gbh#n5&kzzom~Is6HykL~A03AUGob_odbaY62M)44_yDDS-Uj z^)K=As}#RE?x?cBH}gN?PTMu})sRfQjC&9W<{_TMZbp#tM=(_4<((J8Pgva;V0O7q zN(lyJ<1}S21h-Pm3hq(xV@{eLhEfeUd5LT@p7^J4Xis@%Kr0Or$|T+{N=vw52JE#G z+?vPu`_AiG$1#x%594FHbnLG;K!;Oebl&YQ(F-iGLX?uth=FQyKeS4ilhPk%=l<;) zJblqsMr4E>;Jt{v1>ebfP?a(Bfj#$(JiE@KzlTQC9uaiK!aav1{Y{dBD~^-12&5KL z%=wxBg+&;PmEv7kceS4LY6G_u4h{7*t94+mCmWd?ar6cxcy&O1B>RpV@Hq>1@1RHo z>;gl-17uSVW-sSQ7wX!q;7$+XpPH zcWW91+7{r@EJ4^}%{_J&4wdQCR=#}{xyazH(eEt`6_NuIqa_`9%A*E*`jEPxy)eO;~&wAhj$;Ym(F;DXa8yF*%?l_~b@23^F@5|3+#U3b!4%-@Y(#rM^+d(m_B>Fi{kL z7Job>z$`EJhDR0KK8-1TZC;3S2rz#+l7L{#@$Ln!GV1|~dinOUjymaGsC9@%7sKDu z$f0)S00NOf%wz4>RPDbQ_6xXSU`lHBuWu%rv=p zz-nsqy{Mnxadb$qu+@kTgBjm&6^jh9@4A@u1yE4KVmof6$nn$5e5wWVZZ&yd$ql`- zRY+qsUAY4S531QABZN+MXX8bYMHBRDg&9J*KsYM+lAUcC*f~cJ?$Eo@(B#kQ=LGoX z$1H6^RQRw%c1gn5fZ>mY7c`tMoHnC%SR$ICN?Fo8KtwTSFe}C1i36FW4&+hqR7ujZ zXUR&Ci1_I-UJm&jcZQZcuP9aPZ@i_Lo&RQY(W*=Vhal8<`Jml6L6d=c3VbYxP82^Y z^yWgVB(d{}k#=9R!uOR{u^vJXOB|}(J1UML(%vP!Yak7w-YHT?tyHGVG|smY=GN9P z(lFrHJ7#eCBht6BQKCqp>GXJr%=%&Wb>F9Ka(g=b2leVZKQ?7|xmYCHo%DH;h88LE zT~1=x&go%Xz5|D+m%6K4xD8>mj{(kf)rVPuWftaKQb?bkNxFVR52A+G3V)il8hEoTUkN%3D z39C@@z2k51x0^a5cmUx5mO|}HD)W~AuMrYwPHZ+hS+oem`ptJDFO1d3yAN80at8cN z5kPYl6;VQ~(FdSQWdO;<_#6hv%gP&`9nV9cw~X~jaOo)+K=Q7ripxxyB!MwX3L z|1*I`CDr&v4Wjf!5?I@O*RX>IZeBeAdGoT8ZirbS{+~!ZXX5i}`DmKw)(jPsujS*F z5{KuZ_JPeZB4*cxNYh4g51thDy%-Dy!&ND5-5%nIq{I$)xTi(t6u?N5kQJT-#-fA} z(C*mzdg9%nSYT8HgHPdv#Ll{BST5lF(0O#Owk4)In$ftm>lOXh$)%38Y(x%0Ga5hl z=p47A4%{dJZ7=CExfyA(mn@mC(d3=8pDU1zYBGA9<*rhF8WpAw?EGk&yIcBF+Fhy# zfRH8xegp#IOPrNDv`kG3$LyDZg^9w|djt1GF+H*kh;tiNh)(_$62rq#W-aAz4)2TY z#VLaK&QXYa485@q>4>sHG5CUc?t*T?#&K>=9*8zUvcyPV@Fa#e#e)b`60~OCdZRrR zZ4&=Oy1+18;yGM|hj3gaKT}W)aZ0mlc^R(}3y{-$OCZyq#08`<^C?t}Cxp$RSU{d9 zQZb|=UX{t95g$=eW#BkwoV;N=t{@ml_9sL1Obxio@~kKse6T?;I|Luo*8(O+OJ)U| z-(z?(7+fqjQPooeW`|$7w;$^3dous30Yq@~0R5=t2psB;H1+`>gbCedup4bqqil{W zd>J6DWX>_u>^U}6EN=TroY95B9m;2MRhaqZ*C0@oZhUF({5~mW#Vs>S(ug`jAS1KV z=gkhWvaVOl)Jyx!>7wSZ1Po1Hryu+8mK>hUUbe)vY<;YtZMQs4=3S9_GG}2hqKZ8f z)m19U(jFR?4~uOcheuy)_C-QX&j5Hyje)%{x^5}dc(30VQ{&D~oTfcn@N)9&2lhE# zNAN_|+~GPjsku-(a?KAdr94 zwtCr)ydHbnvZgK%`Q6nsf)k~M3%2LaieIx^E6Im}Eg=Xv#X4@6F?U2o*MklMi-YeB z>iWn3k_iM{Yap-b9nfeOp{xpHD5=`ET*z$m-~Kxd#7I#Eh% z@wm@k61H8wM~UHHs(XqhjuwSTL{gjjol5|oH+MEKzhDOdqXP~y&)oJlMqOFO-Fk*3 z%mF8!9Mr+bwZDeYQn8;(bBThB2UB@xTx7F9@jJo2@eyOaIcVd~LPvZMRwm?>y$?}S z_xc@ph`&O(Ac59;{m9-xPT>&>rnE@F)@cznfF6S3>TbpM^y)QwCM&JLaRL&JN25mD z{zt4sv0b7ctqeB-|MZZL1LsU*te50^28d47R)g-{*rwy7!GT1VNEXZ)^Q*ypa)XLr zSK}{cVlq7`4$jCa9$G0`5be!@qqG{9%FBmZ$I@Zb`HqJ4cYHvPLmP>DO{4mmf@f=m zwFtbT$e5g$s^eliZ1pdY(>39ftUf-6DxgUx(Cye?X51a|(c>)M;HczaxJ?aMb85v* zhS=D5$~1T=Pj$z<3j@`V*FL`R2?DbOS>4avlJK-L1)=g zo48Yyq(YpWsLeeNP_MoMT+ZQiKQN6?-w^}>8mIs^$x9g3;E#MBm1SUfP;W^nE0>XT zhGi0KDg(5WJ(`05%mzgSm+2wQQo`Qa&=me}$K2L~7LBlEIjySP+Uel>;$W;wDn*b% zb;TgWhBuoQ1x3qY!~W<#RpHC(KSl9r;pF#kXtPCvOO*~$ZS36v5tP@J9e+0Dcy0Ab zd8Ps{)3@THKP#?L07(`u@R!kP1IgH{Zd*%YtrPe9zWWHIliq!`3OmB)<%N~hXbAJ+ zn~O!T&R4;BY=8ADG_#WgKyjNA860282Jyw{#&`Op9~E9Qwpi6X90f^=MV7HL@{p7E zZQ;j%EPScllGw5Ig-{C~RA5f$n_u-4)6{}*K$7-0$Iy^`8k~b;(HK*8O_jiSrs^X96$2UQW8qknoMoAF7d+p$poYM(UuB%J80ZAsSwbl?30V>Fi;0^{AU9Pv#k zs>FriL0eZUtO3evZDlQ5r+Kft^hQFa8aDfe!iC*4PQL)D!}Y)zh|^bLYAdXqwt@$v zL6S6Ng5SpqV9;kLB$gemWrUWoxRx>??%+J`ieAt_eL3;Fi<>6oF15I~Y%o334*xeY z>sIQjUlVqrqu77}04AyGq+%h-U9y7$-g~pYqiEoYCZ1F$Pe)sKC51s%HMX`zhEgu8 z-EP!4(&xdUqFYhX!6h+f;J{ zx6!3D%KXQr8l)fbf2{z*NFtfGE;Hkgk~L9l&y7rQ`ZSnmk9qFQRIW_bS0?Y-2AlV| zy1dnd?G>Put6SEMSg!U;6_+#5G|XXt=M6Y)Mt5a1m6NqTUKDzH*jXX?*S>wIFQmk7 zXD5U1T2-Ck-84-z_M-R1__*r;9l7?~t7)ewkdHi}v-Zqw569yOcbc4}3+(|l#<{R| zxx-PeF{nfFu5?gN_p5;a){3+-)#8bQ9a7J|XL4cJ-^S?gr5!wHgz;JeN=~d_Yyn<8 z1b(5`I?j;Z*@cQb6L@bi!;o6IO--!ih=(`Nrf*SY6eQAMB@(sPja`lZW*_d+-g=ze zr={<=(!5Tymcck`G$bbPrVb3S+k-3bzpZ^}m}c-R(saFVH5-j+!~#IeB_qp1`2Q== zZtMXhdH?q%Kjj-R&<&TrX1BSEfTZi$Lto0A;X81cJ0QH~bD8xuBWNh!=MVZAiwoO8 zMW0W~04U)ZHCjUibb|G;P_azS!h~lRdkpb2Ww8*|UoTRd(emX>xZD58B_wKO8cN98 zzYc!EI5$u8Hm$USN^PV?FInGS%K#MSg`Vqd>7!l=; zjzoQfx^Iz_*ryd1XYc`|8rs_rm zYnbm_d=R#5A&h7Qny&S&eeL1iX%Cm7M^C*HvCJHvDnND$ex0)7sDg_cwE(=mLG?wr z2xysG0ZkMwQ>438aLU?QIO{LpJO4H3gMI8qlmF4Yhhvq(rW*+`iG_~@$2Y0#CSrwE z#$|j7GO^c#Va=IT(*Dd1G#9fS;{H~)Z!sWD)BqnbdVBUF=8TW^Lxs<~>pL;XYMizX zuV{0~4#t@QkczKhu4vj;2&yKih`!leFPq9V|5)NTN)Uk&2QCvI^NwV>)ou)fVcWpJ zbqI^MqWhs<0Wh^4=HFLt2>yn`cvcm`4eEhyKVlkii)Uy zTSZYWp)rdyAZTA^Z^a5#MDB}X_7+?-{f9htC%mJCfa=3#>1q(j$FJRxv-wW-W2tPM z{zivI=^ii|^%V?c#Mb(7M{IFDNKA%?`_zM{->{>z3WKr+yx$pFO< znR`sEUdkw51A&VT@+{Z2t$FaQi2@5ycI=8m!!Q9i$xBzJIv9iVdTwrfKY20}TN?CG z5EzA)Z~HZIK<1j$e^W!I)G>RN_53m#l#dbpx_0KNJh3OS#lbc{;}Wx!)0_P3HH$wf zo?luMNvN_|M8d3ERo<7`aQAZNuEdek|oD3H@ynnRus%zzE@MLTIrPG11`U$ zsw*#Gt#`>rBMME#aEOvZ=DK$m#f4R)>IR5l3T5GuxNqU|iFYL|F zp1!^v+n(1n@fzAVUxZR=JD>=qQLzXEsOt990(vXo=s6jz?U3W8hXD~8uD623CpF}c zHQiu zPcQrxy;kt7r#k?a_!iejXyDnY%*Q2?{=!anUv~9y{1%(NxB&qEvIL>Wz3APzt-qWW z#~LX~d#Bn6A7I(gHA6gELJu`(WwfS9dlA0ArRw|1T}P5%aCSwu>2{9BS8Of4e($olj#V#h@~^@BVp6 z?KPiT2=jHSp{EKL`$TcY;ITIJO+L^WBA=5 zhedAQyNwWYmsC~?)Ib3x7StP2drv*c?N;4oLAS$}n;BR@FL^e7vNw?!7fcHK1y~}F z+>E3*2c__2u&OfPkic)d=fL)zot~aUz9q1sOW|MWl{C5zbRap2h|$al1+a%#-A)rl zVubNN?O}Q1^iLShN=Zsw1z<{~F%^HRfLVF<-r&g3Wxc8ZzgS4B5OpOiyHth!#Wv?KQtgP*xVA**@CBY2S@dRTacg}4$AO568Wv2{*Cx6 z5yw2!ERG$M6PkUX#L$yNCV$j2nf!cpL*yOaE_KvPihE%|3nEm_a}5Y{Oi*?~WodVQ zxyF(dO2&^za6V<8#xfO1Z&Ek|Dh9rb^9U*OZrgdp{Z)~}=uGxRk_}xosA{bmIxf-@ zF>_S22L>TQot)W}#Gs&Xpiy5UT=Q`qsfpu>TMxKD31*Xu0p7gfE3~KeGstiU$vk=J z&abvT0X7MKBbVlmI&~m3{Z9Onm9#8QZ`fmCtcnuvUo5)IPqG%@`1xJrVn} z&9+lexDN4!O(}7KAp3F* zV?`w=$or90Grbx8gj{CIbs3ljj|@?YC_#NkrqmN~-It`B<|-C|k43=7D5b}@) zVrzX?^PB&NeMzlfsi`3CxXa^90~dEeo~fXy8UQ!6L<|ja8cnB5Dvtt1pAY>%Upd4? z4nzY>^gFmp`c`ax68IXOoUYPpB$y#{6fvQWt6@ptJo2Uh2*Q2dlwDBNB84-i^kAc~ zXIUV1B9J!2nJJ!`42(b@QW})mG4$5PNK8?V{rZ|qy_tF9(J%W=c6waUjwrr@rr3OW zha+j%OY2SErr}WcZdR`Yi_-)nR}mS_9g;mpGjg^cUlK_uc68`HYYQZTSRajvDRk7O z^}8W<37fgNt_P|!D=?Q_42O*t>c#*$K*qnfOYN6+{sgogzn&2)r0~-o+Pg20WMS}E zCxHq9;a2;h8cM9R0DF{r zc3M3llf*(fG-ChvucIYszUeL`hZZ&KOVj?d>Bb=kq%n; z-ZgLS35j$GtgtGMP6FM6RL@`QypsdQpCXFuHphSco@UY|!BGt3<5umE$nV(HW7ON2 zQd)vaF{w@G|A^A$Ff8!rFSx-1!g~m2rE>nTNwtV#>*uRO<1}q&hnZRv4;C3x z?f&O!**HF=IR~sr>qR;vAmul#x;(>=JeL*zQ_qb1NnUFa_2xO*mQ0P?h$MVLUpDh8 z-))$&I#(@1B0o`Tda(bD+9)nMtX>suuIC098`Pd0+hZKt#=6#?xc0VI`y*i3CGcl9 z2)TWLS1JNhP!%UmpfryclOx3-QjmeRsK0WHhAr_cZDUkwAZ#)Bo+)U~L*C{<39R-#=1d1s#|nO=n~0kPgleB{=jYBNst`3%#$4_#Iq_(L4Kpg zGl`aL7E$Gv{U@fTS_YYNB3~E%rjzX!Jxqi(=Y1&AwEe<$DFDX!s)f3w?RU++8MrCo zW~LvXn}09L%&@M1|J3qaL?lQdnIBRh*hwkIf5saxQ$}kJ-6Gkk(flm>);pybg>ZTG zZwN?7Q~$?NRcK2Elz%I@Y2$w(N}o@h^H@rqq!B?5O(wug{v^8IT8L9=uqI_!5A3d? z@mH$QxT{n8wJNPdCp@=qzTmn=D4@r}OXlm$^74+iVxS%Nd+?G|(7-7$+7Cf=Zugij zGHG~Fkm!WczlR9IoAu#`AHnm^O=omycb4Znq<3EXT3?0vC(LD%YrvC9w`P>(ExA;z z1j*sc^bgtj0ZA=KC=6#jZ|GlATVP|2(<`wM0Rj!{BC+?7e!2u0^J|eJlS?3~|5$ZJ zS^`m{W>ojdUdZ_WJ3=IP71zDB<40fb&OrHx3kDIXjL486U{fk8!aKSubCTy!Ca9Gs zn!sTaqhZu0T5!Hsw@-3%u>=>wt&{SojWm2bS*c)Rap?}sW`2GDegx1`ePVMYA9SB0 zkXF(Z63V4u%}tCJYkYYHzlIl^_-Kb#kE@F6Vgb$rvy(sFBl+T1)O0-N&8b(Q38 zLSOiVGyKYx3=2U%a^nszz)b`ApRih8hEdX^sY?4=Jin$sMT`Ln*FgKjl>Km zlI+fr1)BciDkki~5a$82I{B(`wJuP}K8(YNptBa_KGVoh@fQ&3P_?Up>9D@D`~BsV z8k=%tn*>vN*L6X54+n!h9c8A{MBAjuSShcUH)H7dqQRl*KjEBg6GKo>r3&|jO3(D zrrJ$rWLl~;{65twu*hJD+ znqd0&(9hXfobWsw;@ArUtwz+&s`ZmnuZOY!E*y}91`KY#)>E^t5u_irW^J50)fm!| za09HQ&8J349oj2@U$_+2X-q6ylO?USv?N&XW9V9D_>3~G@#d7xFTSjNza z?0Ei4N2bp3y98dQuoX68r4et$iwRl1~Fh(sN!|0v9pHKEdVayzCf6Z)WJnD*|`>UMYp zk}L9-jMHZ2xC_GZ%hgACXo{Y&zc$HQ8bHC3R!qN%ug3rhuRH0U)WC8FIIoz)uXSi( z*T_X%VwZeO5!8=tOgTXV)IG^9p#R(d#K9tJr~cyM<6g1mf~2PyzMYH!Fb2B*Zj|nF ztUABg?M9cNbP=Qz@veyOhgyLzmauLEa@)?n1|$3u4{$W7p4s_44F;pF_<0V3eavD5HZwLZZFoH!rkDO3^U~p_iKtuWi)A zpUIjLj5kiLDKz2@=mqNaqQ3KD{-yOXIy2emQxyuk&%mL02&`8C;RYTS?<5|at)^=6 zd2z(~(`nXOU1eGmDkeQBV{z^FX7YjehTlRJ6+Jqvsu@9E{HLU!)&8P;tPAVN=k_=| zxpaz#@62-aCc#7?E3Ie|uFQayXCy>$ zSGk$b^HP;YbZIm+9_STr(UP1oPZp)>OiFGDix_5*2x-9GiiDLt@|*V?PkJ83wh_bB zh!P+{6t9A)cOmQ z@KFJ6soUZ*COJuGvr$FE_8z0mcf|qiPLU6w*X;l9hJo0QX;*6KMSjbI-r5GyTVKA* z9^dpTP!k^)%t2c^I$Pl~c(+-ev$}@)`hO2|iatbmEi=!ijjcUZI8z)1Baldf`P5jX z1s@n=S^6;11gngV{NfA-j5?&sc>`u&o`ThBH;sC><)Cs78f;CHeQZ&U#*PO-ghBSc zFO4b!xwysFB*&U|G1jPDZ~p5b?GjB5vDujqdzIz&m%D~nb#PQ+rgm+%(b`+@=Bv?$ zRo2lRKparuAGKSL;iTVnPf+Insfu}aq?bI_eW@tW#9yGn>S-5PAap|T8{X6``{tmP z#L?W#Tw8dk;CW+pjf=~I1Rjd!IgZ~oM~GU+!w=h*36l)+g4AS>@NRde5zJ$d6HzGL zTe({`@~4B;0J7!Loy=_cf7k#ox%>6Y7C0n&B+9dK=KecRH`f2#P>_y3h@}WbU}_OM zQTQWY#E%}jf(frS|8(o0ksQIO*(Ba6Y_~#jFW(xvxp^P$+S`k$w?$Y-;8;Esx@faQ zsAym<8T#`KO|6Qf?(}_zRQ<)Zks0xn%$G;w)`pDdo))zUSH(K3q7!ZTTZDC4$*Zkx?xO>O2zlJVFVt zN?((`FrUzm@<~Khr_-_hpi1#zQX$XSx5Ouc)#TdyQ;2mAW@Y?_vPxM<4A@~7iWF7x zQK(!fkGchGcTKqsZ%6?iUZR&fPHL?P$A`<`V-WYjkLjwaD@}f@Uy%=FkWAUc)NzW`&3{E?j{FjR``B}omcV^BkbF}TS<3&t_KJqSPw;YMZ>Sx>~*|3iKq^k%~K|S zPp8C>KIV(E+~iv49p(;3vj{-is2U`#FrqWFI^V5Z;fc|(O0_p%IQ{zv8`T@4naAYB z5=_@LgBD+$ve|gaZ{Jz61eHw{PNkFvq7hG(jaGOm>U^1l^|;^%m=fhQ_c`g^Qtq{eom* zr4tGSKfL{E2K5>ZcNHAqT0PjjJAltA#-#!Oy56n}!5TjmWc*7=VeTxzt&H4pOd7cbe(=@XV3)ABYKO zQ-@s&2AfKq7l~jDB6-NxE5j6SAX};doeW#RH@|U@cOg+Sc20M42N5>yI^5QG1G>mt zbecx^-CS+@K#FEp7toaeCzfE`OQB{DB)rDJHRHT|_Z1u5+F4Z^;e0ZEtKZdXM%7Sl zTI&$fG&j>Am8`{F&FNnPw7DkEK}P$|Zs}x2=UjSVlTRWG1xTl(g*1dTx7D!0f77*% zuYxQT@Ihot(vCSfGG)kp##H=k5rRkz!NwPuIHJZAr|qy2BMuwHTPy`NT~r;TWZ789bP*L-6VrcnimPB|R9zO5VooI_8ZE+NA$`_EzX zeWB;WMu{IT?VG_caiE0bLV!vz34^X!*NDGBR*q}L$^@jWQCV~~jqDNuf*&o7oZsqt zgfZ&+@{jAj%HP;uF#Tn0``41`3>Bgrajn+3TQQ?{qwqhO8mhX&ZMJHz6tdBUy1!}DH^=&1HyXU{!2~d<#zF$vfK$gxi4ZJZH>(?J3vh>44WR*x))su+ z)Y>a)5dXJB7V8>v0v+Bao1G+#{!-ylec75NKz0IY*Xwq!WRe1(>9Cp?r!PX+Q0$l{{=)lcgsNg?hRAp44fD1O*cH@a)wDIGdOb*! zm>KB>SGR*U-GZ{YFq|17II;<|c{cw$7dKEzj8*POV$aQN-*J#uaDOdTVjD3zn-p!w zGnE-(8LsUhW;v>dlxtUD lBC|vJ398uq1{bb(=qD4+wQEx_gKzIP)TO}o!_aE( z)25_=Q6E@u)$tili!+~XScEbvxt-qu}f|7fc660HvR-me(22-9n zBnFkiWbwU3`t^qMxZ|PAk6r4;4i!*!wXa+vVuM_jL;n$`D&pOc)i;_QTxZ&H|K1X* zY1u0dwI2(#l$zmpnDMZ=()yvSe_8L>domeL)aoLGak49E(*p4d)+ziuRU?FJw;`Ve zJ!L8o>Qj?~2EE^^cU=bN#YW}N376JZU9*B$uCA7|N*zWEh#P-^Z*Bw_Nm0c29iF9e zJka~5j?=O>(L2;wCoErC7y4bQ#$;cblfDcK?}|DP2qkuUA)tJ;Xh^R!nXB;~@oL{A zLHerLaA6bB#->L`0mV-OwdFT=eW*cxwJZVW%7G^ebd}IuGdL3pe0X#Z&!K1?Kztl6^SaON2xJsW|R%T9)N5t{0-&lG@IsUK9mbV=9e$+g-f((pj5Kn9W`5 z#dcb$zEPc31sU?(?Nvk(YQ-Fc9WdB1#Uv3Hqi-wa)|m1OvE!+}vKIQx7hIlXJOV-j ze0hhIo+;y*K!>uG?Fn-i9A44}HVheQg{;}2Eh%VS5M6DE{||jDV`Sb6R!IN@ zqPyiDr+{9P4|)`otr2L$q^Ls%%h!ht;HIL|Ee{eqtk(T;LrS)g|J!(l`ssdB`x<7h z2KhJ3G}%1>-HT)ZXxQi06!(8x@v{f;UI7vJCs5ZDsAM&=o-#aY47Ia!c6S^i0jE6! znhd;B@<(hk$41zUvr$(BqBJ<+yA@440F!y=3x_)enO@VoN;f zjAw|3kVQ6Uo^2vVOqg?9akyE;PF)5wFc>{YR=h*UokM1y*@+*CObu6Yg}%!?oF~bu z@_3ssU{qCj2ksEzDO;!sG1GzPna_7X1(dFGO#!Pe^O@Bor#C-@G^c@H{dPzy6~PM0 zrr{7G0b41{rsFFHL*ea^hJ#`Hl>D;tO&LRvOoUyF_Tq>Vh_WM04s+g7dwUuih_|DW zU=TH_FCNtB7n*+}g`f~%S=$KHU={2j9$|LU{wN2O;L9rIt>5Bl-a0gM^t;R3YP!L! zug@?_BovCxr~&HllIlXbbt|)a_@4D8;CL%zmYQfoW?JPCn&82hR29M8P(|Sic@btm z2S>9J+6!r{=q5J}nWl(01u-9$X&NQt-5AuHmm%tfGblVCr_?r&b``HJ-m^ohyr~37 z@zK@nBG5CzHqI2sJh9nuX=#u#`i~w|YWD;w)hgfa9HWzBQ&9capmbTM0O;{7+0sE$ z7}HE*vVnmr%L?oa?{@@AK)Q{v-T!@bA8_VW=o?F|6O znXQ3iH$}r=VI3!EhZVn8--3F&eq$G%>MC8bYqY2nU-iyJT|IyrteLI=yJRH}k4iCtR;8M_ zNvl{TsZPiw-0c`^kwj28lkViL%yI9nM~!PDKZZ0;;>TyP z!0hYSysO4SbXD^zOwal}Lo7Q#_;xlRd#e&X>5ku6Q;>FtcSFxC4_f zY7{>(mC10B-M$*CJW$|T?Tlx2!`VT6w}SQJ<7n(qsC1ov{gJB$CMVqRUW^Cd(h4ps zoE>YC)5CcZ)Q+um(a8bFvtnIa;bKpm{ZJh%dle7ZI>5Ax2@I+_7??1ad1oG!i$5%I ztF^Qi$=$K{W^vJ3K~j7#>K4-=e-SnM@U?~k^Gbs*-cCvjzne=kHCD1XP zj0XMk=gJPx03x@Zjbd#T@%{V32TQ8Wl=NWxLT;cimPg{{FK(A<8a{_p`rflQhh51i z)o_$STQ6SoY7>o-frbc1fQRLRWs;`I#8%U_Kq++Pq*j{pNDQlb;vPgkRp%a&>K9wHaLIB!#19@4jdW2*^nE z+1CP*)Bmn2w61{s%HYl^I*wBkg{`}ILk!Z@HV(zS7wY+%a@>lxYB4c&1*V)2??29Z zbT)1)9EwfHE!p#?=)>jU<}CwNK8uV&!MrHD ziEnn-jvBAl7qM4r^ZjO>djw<{wa(B#6&4ooxiQ<)H{qxlKc-70wYaUvOBeF4_uG?H-it7i)P1UiF zCb|n%7x})cV#@z7^HX`ZNk*mJWGh_OrkR2f>P!meIyF%&3?jsR>AI)|b;#`C$)OnuwR={0!uIl4aKB~4o@}BaqJ=u5Aw`yF z41zP^Oj7DeGXOEa-`HvvFl`JeRODv}Cd)lTW_IXrx+(jdoR*4BNInsoA2T@+} z&s0)M7~0ZOU3T}PexSMKieeNd2E51EqhixVx!!V}P*=K4G zn^U1Gg0zpNGw6GC>1d{*!?66%=bROl>yvU)KysrG`&bGP_R(6G&wMkU^07BsiUklU zYMLkhJq$nYLOo@{MQnJGUX`lxR%HNcXm+DHTNlVXVS1!tFbmS^l513w7jjvJIj8*e zS{~8`n!T0BX$6xjbSYoU$QNHy!oXkZ5N=cU9323(8`R-Lr=G@hb_;-v*eMO&!NE;(5QS02dKhqNDP-WtdVBc$TqDO?kshSDaU^IsL8GWVq zYLn1h&3aku)eYPPy&UT%bDsVHmt=pp;RRO;Oc~A6!0IH-4i2Oz?wIPFy#*OWNnnP1 zf23ap6B&EQBTk>YKnX+A_#PKz`qmQvdgJ-q>=VK4lTuR~iXOQ2liUQK_%2(s!A49f zNFjizl32O!fRuZBjBriu1--g~TcApR6g`t2k6qYic^S z_@TO{xX6?>>Wv2T$9xS4$U1+^%Q*Dtf?kLepsV8p7kf(RI3J$8FY^ErrZy5UDolwm z>u>76tNL8g>1eH2MeKQXdYRD$ZDR4f7&Xv-&xs&b({@YL=a`_RO6Hc zQUz$PfX`4WjoF`Ozf@XjB?53)|oYVaeL2;sT5T)s>>(H$vE*r zT1?Zhet2Q{rOY_IdJSusM$Nl@Lj9f`OI1aI#pD}PfryHU#)bR%8rSCB+?ltXV2*ED5tByjce09#zx`d24lZa`1;L>I{V2!ql}A(9j-hy=^&I;4=I)qoNIQC_$eQXe zT*G<$AS%4*%T-q}Ua|WgguI*aGjeYiDKfUgt|2G;TqAtTD1Cs-U=k{kmuC-`KU*OT z{`vgsQ(@8WbNM-{EnCw&H81uDJI|0IN8W$JA|HZN{~P^dCy{u@tCCn}*nnd@l+CZRU?v?36ouqcT3Zkn72ABg&VBw(`T9o&8%PZd0vx6x(4&qzd z1GRJH*Y@vb_vZGjK)l!oE+BnbopN?@#-as>cyjh5JSj)2ZoEKarICN-H8GU;sv!#Q zWdb~rJ^>yz@M!IX@6vD%go7vQmieS#r`f}^M7rmzx@OKqkEhQRO_#9SJUib_1ir*sX$%@p-=tGi!KH-|)$ zA*?>;9<|g0NR1L}iiBWlS(1WNJuA~TcQjbaV58qW7>8`$3s$D`p9^?E0gC4*TJ-d! z1@Kk%*$8gxTnVA2t<_Qj7=m8|?2xoEvywFJOfhe3t~)mv^w((QK+5_<&_ zH4alhAzwmhR7D3#z|#Z!zKcfQn<6%cZZw=BAUL(1CKvLv4Qr%(z)Vd@z&xD)>U&Rz zH70iw*vruD!XZc;k7^^S;85;U=E@#V)$PYIn4LGI=q_mgpx@d;nzy*012MQ(8S@{S zjr^c$2q!)31fNtT>7+AZ0wV|jb*s295q?XfG;kQCeVi8Tj{Wy|qVGQYVw1IB{Z~}= z>K-KFLvUD%+?lqMhOnnijYrc2ERSA>ZN1EX$m^!v8zZ@2cQmLSrEHuCII48A>uAxo zgHXs@z@Br4q}jpn^VoM|_DI!Op0!`U@>Wg5NEwq~8t+-lT3(*n!6JCetE?1YG7!9? zlX(};@d;>ZQLsIWMq$+hf2;|Ysr;9GJ(CnhLG6$?UGU7I*o)dx_B4kO))3%^ z2PFBH;hK?M^dcx^9|bK|lr$^z(YK*>b%&Tw56f>b3<@wf_BFpf`h8v03nKB~z%8_) zi<|oq45U8Aw6c1 z92AV;2^~eVJI)HlM+utlS5QM*4ao{Wlm~6WIbj8E?)IIUH#4__4R^X5^ppIURv2v2 z`3!?n2~0}@u!MTq$(+_uZ2z_+n88ez5AI3<3e{>M^fPqgb4!rx=d{v^{oqnqysIo$ zVn{eRzwAOA0!sMX8d>C(C-dDPZ^}I6?!%cbobp6os1v@d`#d6s2zBV4y!LOT`@As8 z&I{+43{*+{Qxtv=1(`AinmC&4&{i1v;-4q@RP-|6!h4XND<`0*^CmL%_t}>s!)OV^ zB1HIi^X|26EKsLj;hs&336uz)B)cT_4*5U73n>;_&`Lya&q#oABB&QyoPV4-)r z1t>!9l*qXFBm=)hFKRjzTplh{pVKN4Pdt?)&1dg#F~Rl2!*IZQ)i)2p8ViC>e=LsNR=*$uNz+tMsA#AA*Yq@m;!fbAY1{Z|j$7 zm_ZVF57-699CEv%GtWng?pXilQ1=zeUKQ3TI)@A0m?I@=;VA7#e8uX1p%_7#!N2EJqso0o{v~XWj@8soki?Yq)BP43;TH)H3cOX7uss3WGhF4)aByIaU|L5Ha~% zZ(6G|MUIt~9@Hhy?rAM@>>mXspbxiqbIVb+e`PhIIg_#rNWP&UUcSD5Tc# zEYrGRqcp#HJ>M3%o`RKIwjn7ke2F*tlYOcD8i0-3jzxB5*imQ{|0zBWkiwfw;Z+$1 zd#Q7xoWac{io##KQamYH3^0OaxfLx_ODJg2;$0iXXNF(p(bLElghPov%7MI8nL$6W z(zH!wdmvYHS^PpRrkYjE+(d{tXKdhgjPQubYZN!39&7TlIQeO?dsMxM0bG_ZEbwU2QO!heGuOaSD-lXOGO%toGlGp3Qi)vocI~T&F(c2DG=Qtrap&$Z`x} zjN8iBC|qhTiDTtt#PYT5gJr7!<&QjvKU`AM&tKx_7 zx3epYt9Nh&C5Ag*I?RbsutiM%bXWQ%lbf1mo^Ne**lkNFBzIsYz$ulz`;F{1D-9=Q zm5F>{xCQqQ+zMs28OMdpfnh5+^$-0)y`&(m!Y={DPz@eH|>+lhwZnDzgVv2n0ihHVAEO!PvJ@Y3b+a+6EZ zLmb%EHtv^J z2hgA<)7uOTON<*8P2D`hf-=TP^7EqW2)XQRjDh_g#}WP6jV8do&K68eDgHwn1?#d#TpAy zqm$6>;U>zC)YneXz4cK8ILSElmyXO?xk*R!CH&r0Z#txR<`5@i|07FOD)@DyH8+#| z;ug9T$96QA-qvW?`TAgkP}&?QqSW+sLw7I0)TF)cXyS5NfRC?l+mD1Px^i1Em+?|t z8f#e%B1OWtP|LZ|9R4ZdQDW5mh-5c9zF;V4%SsPH0z{~g z?~yQVA4a`ajQ=u7O5y0Yq;rlV(8vPHgylyCym!vnRhjd^F=yRErr@ondI%Br7s32siBizkFbdYN zv_r#+{b$rqb+1CA_(fyrRL3Siqlgk7Yr=q=SmuJx%uvhdyPORZG{S*re_Cfl?&yMO zzeJ77-M2fkiV>(vPcd^-W=t1kCkfp3qE85vV|iiVX~*F)jdT9iQ#2P#$wQ zc@yppiyflE(udtf#K&LQ1gyuzg9$yyO!;(W-boRETocp&x-8OyU1KPda%jXZXGApe(D7m2 z_to+JRD3i;ur57{78&OTH9J(1If`FNl`GNqUW377yhzf_d@EVtFXiGxD>@iWTrIqn zozSFgb*N`4t3Bi_Y}w7tMjMI>;d5bBs2d(z5`>yC$1KI{qh?mr!!R}R0JYaqZAu0~ zqq4LrZcxKniVPMeqqAi|6W}V~#I{DTK5tX=M9uBUeOQeZcbk1vqWwy@84gZOPnJE5 zASHe6YXj{6{LmD=9!;rAB~ZwCW7a;$Y>t*MAqJzak6@@*+Mua)uYOn2=!}`mWdkSw z&BQ55=eB{|Y`os}EoLpfrBQE%rc}x0e{A00^~=c}-=pEmH(qcgAIt~xpkbteU)v@W zlcHF7`$Won*P-X3^S~B~g<^$9;7euNP1i&n$6S9Iq_v4XN@&mXl=Mw~k~Gns}5pMU@?FzJazz;JTV%uEmq#c6_=+&7=rImym2@fWvbF@P z**pnH+bN@nB%5898F0Z`TN}WccwU*ROfS-H|6J+N=g>2i4+`TYvd)l} z^=dPFqh9;0S2l>pv0FO*8kG?UGBB~HD+8NCOs%>w%u@{U=-Y_uSUowhM{DD=(B%pp zy@+JRxr|Ss!?|CndszEkPDm9#3?{Gi_WxhMALnsqqMdU@?nXFUi|8hw-H$f!aD`=u z(995)*l^CdERcZQNB9Tbt$!|}Yg6WpljNEH}cYZGWXG&P*h*%1^ z7F~B18SIwHS!eIPK;~LqVlhw%Oz@A9inqMAw=v#vWR`;@a(}#_MWG}Rk8!$y@A`sQ z2Lfy*&E||^FyIm>iGEnBVm0gHbg;sz%zpwH{X53Iu7_!2DPC8M@>*|XkNk!Z7!Mt} z51`^#Kf+i|<8x&68dZ4#750Dx-d^t-L0uZp`0@bl9Sp1vq!A)!1qrBs^?FJYwYCd>qdbrp zDE3Zt8wdJ8q)*PsJ>dG48PeRY4S)xeTY(+`WTqn4Pc%yakLlawlxNL(RhLrO&NK~Z*XDWnnA&`&`JLOFhVeKkJ8 zZ#CwvH5*=%n6p-$wd=jyfeYX9=rLcLaHg#f-KXab3xaRRX$xBeL5+CZz zFF>D!p>Ii(sc}0Ny?H`blGBMQd{ZncFtwnm@$Km9 zmcLHi6LeJT&!3{Tw+5>9Y}0{YwofwJ-4eZ~d{3eG)_b0;crRLD-7#rc|G#5r;6vjy zc)W;4jdcD2x2|r0{;J(=|+*79-&mep)eMe z7&KfKYD3*lS%=r8whgD5rhknB6x^@iJs{BsGkO%+$6&KEM!n3eLqYLkZ|vg``nwn+ zq7#8UgnZdaa@HH(a{b&^S#?tAvf;nd>rHS_J-`ikV)8b#-*>IbDh za#|hH8EYAXi2GF6H|)CY#B+3ILAmy977eu&lbk%0 z)sO~IXw7r^CWzVQ{SWE^i@Yl}ZpEf*=Hshw6&DLR8!L`A;HQD{34m~a>sSzr5GRFc zjudAQ>YQnXAq3$k)f*x#C}UeJ^arEoM8b=jy?)o z2B9ug=y|)s9ow=2ruBZ?^ui7TY22qiY9~?iB{Kh%6$0n*b5|cq!-ytgNzPuT;wD$i zFbMJ~`tRw$C&!)Z+#Tzq4vuG?qr*PnwnX%s$*U0m+B0Mg5WV(pO)k;<}hr zzPcxD$q{q4`c3wHx1JEFx!#jdnBy-*pF{O}zsR``6TCnm3-?3*NkNGpXs1L>D%^4d zCr7;7isH?M8~F7yl;>#rUGSa3o0JAtS`gg$|EW;oR<-BL|7DdZD!t-ROIM13K1uF5 zKd@+$L1ln?2GoXfs1w}Z|DF2yTXdJ*v9${Wg9sZ#R;v6JQfSQbMLd4AY3&+!<%XIoONAb^bqQC3re!w-?Am z7}kvKsr{}c25tz>Bt z%ckk#Gju-mclzG#^Ij@klg9GH1<}_V1=wV6Ey<0EDq1s?T?a}+z=srh!)bw_>6dEe za;6OV&duFg575T0Y*fRTE0zC@e6Rw2yWg7X zzexxXj|kQOJgy?DqZib@!!T=G#wgYfquE8o-$<+C$o5HYg}^+d{|6&uEk1W1yWW(# z9|eP!bNOtx!x10!0zl%d&1E+N^Tu4i1#{tKVJ8Ryv+=^hbF%u2Q-Y zrjf>2sl$=~7zk9H2p&NZejhj+Ypbtq$>bC0KLUk!yn}o28jA^IAc4Yi4N(*hRXz@$zMLw_T9v04p2*Ue~$&t zX<9Z3J9@%@9n-Sh&?uZVxFn2Y;1eDw#FJp>7sep@2^(1~mHTE+oWHapE9aHwvlhHt z0-^0zI}Jsr8QW)9v$)f>&TyZ-zCOkwUinjE(7p~wZ1gLS6y#&6yO&_ zcmJKl+ROMr5NWBOre;mDcfL)*izcEGiR-ceY>?{qC!XS!EQ= zxFpIpiuJubvqkJG2B)!t$s>reFh3S3!>!S?5MdjH!z5F(RE&)_ZPwH>$~FvDJt)65 z++v)04We`1Bnd21DQITmQ~xeJmQqAnYUEvvp?h}cq2;Gxh}xvG7_bs!s)7h&&nW>t z`GTK{9YPK%MCLoGlSHsYzy(@)T6`qsK;W&%zJGybZF3K(Zr$3 z&xqrFUb;#=lO;__Da0>YR+0Uqz!j|C4)Sr=5J(mmS@WmxRB*tI(g(4oE+h!BIdu;} z#{LPq+XZfHNpmbp4>s^M96tu-&~ebYV+OG5>A;?vIo42H>FpFx0aQa@+h&*dGt!g# zZz^w8El^g~RD^Dz@W%E^*!bZ@9J=Euu=KIb9R=id+z0wM7$*uvCM5;JW)KB=Lnukr*K7DxojbQ6E?93Q>2p!YX&8y)(4bU3;$1ZRY109?$9ty<0_f1 z3OC#CwJp- zl6FDQJ(9fkgVG3le)?nbvV0sDJeSe==8T5ddcfdPN68u&UiDq{Ke20kL+$gGa)bZB z&EP@$rAXuVd^E&zZDZ@H(!04qS$ztEDuUKjEGfa|c6 zCw~TKRZB^WYcJ)>8hD`0Q+D1@x6Im+;o0xfr2tv4JJ<7ECz&d}B`rLX>&K<#cO|L{ z4Po3`q;-XRTzBPb)isYpghQpeOCTwUkVvK)GznXPRZpVX`WuJcx zK}2nOS9X3#?SIs?2zwDHy>@c(k%m_<30kEJ;6h1A-SrfR^gY`N^wfX`58=8G^Y=CH zERB1*?LBe*(R634HH_90W{5L@nrQvK22dX-pp_Tt4O$#edRm*d%icJEY}L?%3a6=R z!?jeykiN8>9!gz1Tz;L~aE6!+?R>PnFnv%kf14cm?5rhN23Wtmrv8_Kf&3%xIQ0{R z51f2Q0GN^!Zi5_xEo4uySVhzq$J}P_f#jH*u;uTaJt#g&~ zAp5*BHI8Dmo?1^jf*!`Tz<iiC!3!%eF{EjAeM zpG9zFd%nalGw=r6X_2oqkw5Yrs_AokpTVJWO_w*$bMDjV>RJMvmVcD ztjr;Jvf-ci_|W>Td2GzBh8-``4$$G*SQU90V9hY9X}##V==lINOZ1#2atZ(pzAldkArYr=Z%^NY5b< zgXNMzuc11R&-JDsOY;92;w&4$qj$|=urXF7RFO1jxqC9-h%O^AA<@ut^+V zFF+E?!J1>wF*1Uxk*1%gi4DP-SUMGB1(P}zg4%MFs{&+Q2I-e`!}uj?BRYEV@*SGq zq+BTVDzC*YU`|a|!0!K!LPDXSbxze^6steV8Au{zK$GuI*={pdBx)0MtIn!4I_s~n zeEMlthRNW6Z~7A#E9e$2|FN~x4@!FrI$*R$0Sb>UnR4hwDVWC)jkD${R{Kf9xO_E< zHQb>xeqJ0U6q7+$lV^Op}7xIpT)du*z(YD6Re4+gFHG#izeUQQD zdCs7bmPO7is@x7D4Ec{_?7-(r2{`IUs8S{2l0w3VFs2I!WoIUf=b0g&liKGy@>+$w zsCV5!<5=lLAnclF8iw&@?uh<{np|RV7E83wYU%Gdy#?R7$;u-Qe&TEU|G9(==z&dY zTL2@DWb(J9GxGWj)7F^Cyl5M=0wlq{hu`Vlh ziU{|Gb%nB3Pg(qaXs|A2D2nJ<`hx?{jCdya3rFpFKM!;C*hEtS zH|Ge9@imF-B3n=`la{!HuVJk&-1*PUML z`^II!SB`xfVIZ8+mL}m$%v0%nyIhSAvc)^MuYv9Msq- z8s5|*jyP)yx>rN|*VB$-w^Y(qryEh6G8OYmk+$scgV;OU6L4gKv4m-nd+pdC_G^#G z0)l}5j5qIjL?C$tYg2jun6pnt@KB>8vA-ciJk(PqCL`?tS36GIfa*Pv3S=fHbl8VO z97)%)-ds)17OOmP%~Re-WqGJvWOKy^6B8LsnfcIUOb`g&Ogtg_?t#SBt<1x=hTp>b zFe=`ugVCPLLjW`#+Snt!O9A^SSFLX2uJBk!Ph!ToHq|pCGGAGGomIE(O{e zr2&fTr+|wA_JEZK^@E*A_dwR3F|TGeWyS$U+10bt!Syev zeSh9!455uXW%Hi(xisqeq)5~Cnp1}1e?=%D%)&+gRQAeuVsnv@A}i5yCku?F>$xv^ zA~_8z`TgU^d2Mc^t)8L7X>yk^wnNiyffTuPxPmkjlg!;3=%9UNkIELcy)MLwYRNlz zBL2ePW}c|zLvIfDI9m_k!G_z&^fICBEFG~^PcRAZN4xNq3jVZ7_?aOfyB~aE9^=R3 z?sJ3iT-CBj>{OK#Wgf46YI@eg(vi?x5P&fZp-(kyXeI&EHvl*_OKV)D0}Z>KaR2wD z4FeHeqbT3pq@|~vA1SIo9NR@|L7{hNG%HsE#gm+(Sdd<16{Ii^AX|{8!@z{vVtjy< zxr}0Li>WAg(A*ag2n9rr%SI}9t2RpI5YeP`rhTAE6A0soJkI?99Cu*ZZNB^R_XeSmGv@fPZiDvu6C(P8& zTKD(!;LM7V>rV)qgv~J3J;P*0IfI{+t4mz#paHw5f4PZ-uUCN*N49$3$18iLUI|P( zVoI{sS7S$%aG&w%lwcgnL#rj_LQ?&L3Bb*O(6x=0w17o42eU0q5A30+#gIy74zOaf z6%2Oa8if> zq-HEKGP>xhyHc1SzHK}w`)c)5W1uPrXD(zEjdvN51%pOO*Mgbt5UbBrQAVqLULC0g zT{N5nJ0;5lR@59rIR_g%C-YBv(Fa4f(bl#gg6Otz&J#1x=R*y#ww0GWkBb^fD{xh6 z%`h}p@#HdPZ>oW99jtUF(@In1<(k0wQt(e2-ipTYf#fYTec@f=tVEa_7wCX#&a*__Vq_3{yh~vO{su5MkMB|T3}lwR?qaFWt4d4AjhhZ#Fy;FA)L)0( zB3JxR=fI&tT6frw75fi}v~Ejb6Nx8;L=&`q`I@nxXqSq7OsJeb1(1?kM32SrhKs;v z6eh`;Fn9~p3nNLt#yBT^aFRR=(F|OI_RBxDPQ&+Y+DeN-&hbmsoyaobN79uHXGehZ zPqdW?%0{AOI97&8EZpULt>v$NM`&#b*?K!8HZx5JLjxTpw1RhtPv@IQLh7_??HOKy zmjSHjM~p8|3VtmF2z~@9zQ4yRZYD2a%q7O|#?Uq$)XQ%{S2o@#Qz(7Vxt2b|=8LQ? zQZy5B(o7p6#z2TUNDweR55m%j;hxq-s9vTu=K=^jBnZo(xBhPm8@yo>phSnMW&yM# zxFbxnUCzjJrR6%qI878lM303LQ^#y1J;SAB*Zt1rPUM0Cyck01HTz{Xf}Yw^FGgh6 ze4@|swkLL(C}f+c7gcmq$`gbvO`O|UVK<_l=~6V2nD}X{6_HAmqN%{qU%ehJDu##~ z!8nQF$99i@gx9dvFA5JI>&QMi$?%G!+X?uj8Fh4G$r;aJZiwWb0m=```RbxnuKz({ zNKaDHt>py0ed=ZpV@(VeJFc)}QzXQcKTGZGql#{II0gC-ysWB_KY-i#Ul zPTqZ~7*p}>I}=vf^i11PGLcU``zPPMejl3OS^g8+iuTJ@lyPEn3&l-v&_^Q|Bo%o~ zJ=+8({=h2uxDk{*0DDjUfQ7ulFWWLU+HdVQ{DQheg~pw(@}*YOMrpeFFc5E4_a&E~ zShpNusm)A})ARc_S5UV(-JlBFF6ev4Avgue|Cm45>l^FMsyqcGSjPZTZG@uVSv7W7 z8oO|BJ61q($AXYMvm{D8W$BNhn&yIM=qOm6l_hY>A?Um?t9y}D{FGeQHpQPvIcIur zrPaJNI0%Om{iEBV62%$IXc~~|S39HziL5Xqi4bEhs{kZRD~I=@^nO=axw^w~tJL5g z){z%#F}A9U*JO(9K&3@_GRf;_oLp!y*{k5$M~zDpl)U zc!C#AnX9^?4{{BRy4=$F3Wc!+ztdMQ2Y3t@6q!LvGnmwGVzhQQG=(Wq6^tJ>7U8m` zq~)EYE9XTao}!F)Fv3_N2V<924tdc!ssB{(T}44Z$mB)PN33RlO2OoySx-AFJyEw->4rQw4N{^ z{E=5kvy{9tt~`cdKa~?`L#u8gL)SBM@4zZ?aqh#yg)U|KWM^-tqzYH@P$F z2!1e>9O^AYboCFO;?@#=iEW^-DsQt6?tzXcZYGav-EXJyfyd_C4P3Jd(%TRR--B<; z7=*x>%Oa+mTL#okWu(AzE+PEEhs2@Hyr?@9afV0hx5ZIeuMQ?&AxnP;h-IWL8lBm` zhOI(W;_}=g=94MazUY;pQ)j{-`< z=Y+x4%*?|2>T~u803D=LT{Lb6j-O`2_CgSa9#0rPi9#8!gd->Vx&o7XI0vf8`=D-4 zq!qPC9sl2oyFA>QTep9{1oh8340KG44C{*Tmq&_z;mMkFjM0&WM4di;CiowKhwQ`! z&@@3W$tXKAbno(Wv0b^59SkutWJ&dJHO^%e%L!+zf`zTDhia;0mR=B4s>{pBK8|C1 zx{{!VKGOR7!zN~Hh(_=%unY>aVa=NbIibXA1Dw@8rR(wVf7zEi4U`)nJcggQU6i(ggZR*wLrpxOpDX-Y*W? zSC5k}Z)d;Cjk(6H0Ft7tZqdq61^uwhtzr4I4L!A{QM3&75Q>CqMK8M_3i;qB*KuJC zWxmu|-^PIsZVS9n10Y*Vg|3|BOKAC zP-X0%n7wob`=cC;UnXC5;00L}S#S>QM%v}|7pDj~764*sRU^w~&$b2g;JEw7xDjRY zG~|ZgRsdJM*njR+n&fp~#P=Y)I`c%LYa0uV-yb{$K^6lsd4dsC%MFcC$gx=aLz@A? z@u{~!pW}2p^vd43Lg*5N;6&y^<;(CbX7t*+!VgMA9eNa)Uc-@<4%J`*FVha-p41!u zwZGgYxQf}U3AuUXfSQ}f(|RTxvQRV-YVZD&=J_1$^h=Ijp6=h%>E?`7TYG_&H%1I2 zuKrA9i_5ynj7G%|os94_3_U31n^QB)#NV{m?$kO#3zCYE`g>43MixDdb}z;$ecsZ{#U|i6^^gx+AyzSrCGc z7o?BJcLPp4)mMQsXgWZNqFy#=(Y9Gbnn=p&*skvxVbcjd(z;A*mhP!CrHBB3;w@SF z+8VxKOG_*-IAvodXK2xsFao#e5z5iy3?`itNJ|E)RrEea6{z!ujT|yDii68Nw`)$| zc5vwrN!nB-t{Ug4Nwm!MdPTVPw<{3SHx3ordURLEFz6Z?3+~82&^KhG>C)WOiF7CAe|rp+Roc`l^e% zA~$=IC@U@~sZ-EY+3p#;Bt(QfbK9|DudLiHK2fU=o|~PD6+4% zB~M02g&N{~P66n@fnwoL1If-c?o0GRUfy|m3)$*MO1)8rJG-x6 z<9f&1TLtK*pikQ*{91)&8NCc%|NANwVXpR}jq_LKd8VnrRBp#TydyHZ{l!Y~H|%VE zu2Em-PqKNe4o4ZS`1Yz+u*DFo_RgZ)$!->#WCm%VhuJnLM!|5Q^cyd935CN48}mPM z+rWq5uY(@poZd(X-gqS+zk(0$Rd2W3pDocus}F#1$M(g?QC4}vqmBg@(kGUYU!zBu zxL)@c?Eki#t1 zBw54}e;I;;dA!!5(b_q{wJwi!eMGux^u2Pw@b(Pp8EJt7u{mI91l?fX7xs1gFjiP$ zGqzaYpIRNkroxefukDFvSv3!P&L?r7X<`Y~ApZ@6HA#f%YQ#gd0SNN1iEeD@G;pxh zJYzk#+4&?JS=01p=##2OxO6k$-#Co`4PvJI^&y|HPB=!=49yR4pC?-`6pP38!zM4l zJ<#?9)jg?pQz#}kGej4#P1ri?Wy3$W-Cyj1o3J>}^4@ZOK|%b_BbXg`0VDd)bQh@V z>_l66kUuANP$=4>@K{Au(wS}x+iS$}h-?ru8&;X2b(hiEtZVR8?92@-K@Hr%A2Qo(KVUKRB_(;Sc1qu8;pH+# z(WKxc95`FRPaqtjx?dqBzJTkhFH=FzRu&2U z;|6(OOIDlxC&x;dVPX25Cys%JR!(1im|Y8(NH$glFJ0Y9roB~T!zBIoHBw2=Sfr=N zm4c>}r~Ak5N(8j=a71YXgR%8#IUU0?QD{d+6FokokB)Gyw@IO@_GTEqZw!X<=|)PY zuRV@;zjQ)Aw1V3W#t~uR)VxeO4@z^=1q8!H;#OPD@|N^c0Qv4i24nVfiwpQ(s-zt( z`4E?6$-Hv9u>M?KCJh=d91r2z&IvTEQ~yT!3uT!;IWE)U|B#48hcl7~2ANHM@yJ1& z!a`o~+v^a?b$G8!WF92Q)F0X;{uGry-pe9X&=! zFYbtYt4vj?tVkt^5W6WM!Ml4)B^vD5PT(M|r~ea6cKs}s$1lO10bXDrGDRBf_0k18}E2sgiHQEysj+0uE`QA##xyz;r#Wb`ksj-TB+82`-0{3!QT(s*~G6!8` zBE)Fpuv}X<2qOgd^VS1|PnbA&|3Z%fZ1HIX;7-@R*G~xKNvFwuyIl_6@_J;3v1SOF z&gEs_HDz|c^Hx>5OhzE1&K^6c&bzy!GqDiyjpI{#Mm_KG>L{<{Fbaxg2vm}+TEMrG zDqzbKD#EOTRfRDS0^4&ARxY`wZ&!E2Fjkr0A7hrxE5T3Za@)rzd@GKs*m`0t;Dc2x z2N5IE=m+A|Da>T??q%dbjE$UnXOSsG$;|4u^KJf|4r;_1o_zgk`SSj+#+|%#l^~!~ z=Gw$|=idn%94z3w&dB0~pvt2UkTHRDl6rhB5O*Nb%&WCDrA{%wa^g_NQ05PS_Dv* zQ1FAX{~t-HiEOlmA6(Q%V7(7&%ti-Ad8*1!x>)PZp-yBgaoL&dT!N-W(Q|g2w*5VB}STI=(FhOdxWinw zS3c;6M#SD)w3P$RlzO1J#_(HREacj4728c~`sh<3a}zb^AbC=}7GvwNW&fUUOns`z zX#R?FWO{LHc5`6MLgHG0M7Wnsot{R?`6vh$Jl-FDK8v|@h{J=X2j@kj0v~UKr5!7i zYvWu7CVeh@iN=GxSxU5efk-1sG zC8}QZ7;|qF`k#aC>ksD0ORQBnC@?ney_x>4Q>I7%E3o+pEuzTnfT+x>!$!cU#)s79 zV<4CGbZ<6)bi%+?SA@bk7T%kvni~KXBUv@M8BpU+w+3->9-*fO5Qx1?)#VQIPCIK!~Wo=VxOV`bE|l)qFUkf^%VAHt$kv4$N`XAm*{{-O~ z%nP?f;s2*yYkE@MuGYe*Xcv7yBb2z_pU9!r0HnTV7)BT_b5(R-B*cBBcp{IwpN6(r>?c%!-SYH` zXJ#c?WXw`X6bC(sGy}J?vQos3Z7x%J?PhxL@t9D6VCwh2o+Ci3&qk$~NiHMkaG}kM zE@m7#602$h4?+s4m!IByKAc3(E zq$4YT1!|%DR*d2Ld}507VxH`a#>N!d$SR>;!iK%aO*Rb;#fN#*x&=aC4$rq<2q&0< zlqE^s4pFw+$wE2M&l;>nRDCVmB^lAcgtW~0b3#^|?mIFl?Jxdw(4Gl+%yE$OBXJ6g z`n+3vREStwguQ2n>z(40=!94#ez^*V5LxsSBiPjeH_M?h-?!9c)}-$fHi3*Uj{OUs z808E=vD9n(lSMbH=VebH{IRnzu=GvZ%J)!?%*BKz%{J*$zF>e0Oxe(pZ9^;3F>Es3 zN!sotmoj#_gxoI9!4TfCeEndXFsS6#BfM>f%zs>8lBV49NX5F0>Bfd{sshhkWO`II z43?~+_80-$#qaF}f6qG_H0x-$Le#QqA#^Mu><}VSDkxL?`8xbC#NWpG*@5ud#)-CQ&70a*d_W-i?>O^f4!Dt@gi%^I>8U`BHyjqXyL&|B@ewROrZkO$aE3+z4}XDxux(;lt3T@HTcb`cysFMm zc8Kx_e99T=DH0o*RqELHnQWLcP~P$c4~@u1S;qXuxb}*Njd=s+Q+Y6%BT$fW%T|<6 z5}W>)mF@WN0n(6S7FOV1U{PuyXtCedu36x}{8xGcGOV*5`10|efE49`=+1-9a4`^i z>vQH|yeH{A3_0?uSB)=eqr+xxU&^3V%wF!`)e}$4MgSMT#!_L>2(^<&Av!2qo*62v z%cC11(vp;2I0L+AZc5tImVwGg6H(4@~HIhNQ>7xIm<@W3|y#ts3Pnul%*3soH zcG*fbK5^-JUjitm$EpxWC%|%lgm05x{bgF5{W!a1tQ18LSIT8`40F`F+wnJ~iWJfn zUS+5TM(a1&7EOC6N5lEEWB~A8TmEwqo7jU4EwvKqWu*m2Yd*+pl8jux_pEal_=G@- zBXzl7@t13DXe+8=I+DSmzOqLqH-yU1qO=+AWo#6wb&19A>!nrH(pv*(;roPT>(N+7 zJsDA-s_MNxeSHY~7Xj{TB?$dUfYXm5_O-jkGaD9sY`V6AhMI9O`z{ZUaCfU&HP{u5>L`?jBuuwXfmBdu5r!WRH5Uk=4T_RQE|$P-Xn-%H*M zp;v^YQTn&8Ya69KPA4sBuz@h|PA2>=-ap5k5Z|%a&r54+lU&+lR~FOTCz=wW<>#ti z8t%P&TUEvpGa8<0RSw?^#Kn zni_9;m)W%Z#vF*DY{$&bUwC_=1Z)D`sjK>RfV>=5V|RijC7egXx{N`qO--d6SPY|f z-l&=_7n0EH-adN@05U%GM5d|2F}dbv$KL{VXi zndcf!>TOR~yA2eLCXn~G_PO|Fid&pYg`HsV4qdQX62iM6P{d911)={%IRi4~?WGcm zS^NM#O3=ql4PHoIUAcN=)z)P)Yy@EFTvri+YkLD}N+kgm!hP0&`~c>rS)u5Eh|Uie zKj0ozoEPEqzeeDZTi+8mxMZMM0OdAo9l4LXZjt$&YwIa$2dsIjo8rK=A5}|wjj+Ze zEw>h0s-D`IB!EoFZWS!O%hhkR)z83Gbe1A>U6X0sFxD!l8(@DRP7I2u6P)I!yoj1ALnU?MaAg_6rG4cn# zLEZbLBI0DRIiwTy*W)6xicr{{xV}wO?U6Ji&naIdY1vP6OqlMw(g&gSg7_{yjK>V% z8EFQCkf^titw7=sMDC3Jldom*VI9h_9aqrwWA~N_9e)e|(#+zc;a(1rC6KU`Y%r?b zVH$q?o>pc!*upKgsoL|!+H)3485j%h`~fI{InwGw{XKE3RK6_wv!UQ9tjzil{7Ts0aCkA^fn787|{C?)Mk0 zCEc`EgYKvosx}jg>eLdF%>YO%_Cc~P{n7}?4w}eXCqkF3^VQ_JU*g*8=_R5?8&GNO zuI#nrDur3G=HCbpZn$HoJY}Ew9FEKsXU9d@FvG3AG25&=jkAJnd^6L4+RA(|!TSic zv2oELAfbF>K!AZFF-qQ&3?~|i<1%~byGuHpN);9~yZ8*i3X2=-HXkkgvanB&?CK<% z;eXM8_2^{OX5bepYD{N2J{^dF3@%c{1%EXP{Hm{D@O)D|CXLD=Cr5==2Lh31NfuGh zVWW+*EDd>fvRc@@FtQFe*rnDS#!x9$H}8~L68_>5NI*K9jUVWZ-=WCQ{wm*B^o$%`|5W!0G4 zq!j`R1AGF$awu5jJ3xW;5UcC6>)OCNF#t7fSlW95(_p1cpxCqM0z|WJhvQ+12|jGO z>gDgrRLIx^MwMlktyd7+n>AH8_X4q|CQ`Fbo~&aUZowAraNGFU0I;EK-W$L$(uVHT z=rqCSG)SWX6p3V-F-j@xKp|?tJ$*MvVMNCZk%D{}zK=3F04};GIQGMKO9nfX@zw9J z8!eY={CbA(fR}KQ4NTmk81U;6XF`WZy21Qf2{RhJlgs^(_{TkksLQ9Ig)wRVDwul# z>fK4%rckgteJkcneYE~TPE4s#vSn$To9DraSp6_?xERv4fh6jv7Xk=;Q=SYy$Ye;(q|82>~*OHnnCA!_|FL@wsRoQyI zBwOHnK|}_r4O1Jgf%c$c=xWenZEkN)4q69oC?bB6&A#BIj82A=i|`)Oo-b%Q*}aA9 z2^ded>ymBfJeh&()QRB275mkhL(;o|=$LsLl zQ8=0mGr{Lx#YwP8{;9)HlAW_fx49?T!eyNO_p~?Rg>3qA8+Uxm)D_ivxzJSqB%H-Y z3D0bp5Wq>zBc8{Aeyw_Ks`92bBftx$dcMdln#AYq6X(|uhS<5OdU=F5h~ z@UBPHVD)pPGr=l{7abytQS9uxQ$BAx|A-68v-~;Y$+r%do=EH}?d0SUQq=&(FxWEr z-pp~aJ`L5^>8bHIhe*9&4vXvw0Kv>}T3+WVh((#($v*@9MqF5tauq>D^Q?*q7@)VL z>nZsi00CFvC9F=P5EX7mMZg)cGg_AFfQNjo)7}y1j`Z-KMdtiO1$b=6#+@_WK0?G- zhXrwvk`A1Bg{cfjnr0OoCC&n0#MP|3?$9LTRG)8=8oFSKgI^6dNrS&!Yb7gRh!?nU zKxC4!Shk|V>bNB3IeIPN28)=*!O9gu7En*fq{Lf2=l@Z)C$BKgy-G}xU)dt@_)A&* ze)6w%Pf?o)X4xGjUFZHfDR;P6TFVD^euUyYE?&=BYhS+fP~2wL6DVfnEG9E|+vmrC zk^-T!>p1tyZ(rI73BmX?w z^)k>X6ZtbQi9~-5wP5LONmknDr}16Ct0Gky*eV<-ch4p1zaa?b<9ft;!5NUict~-T zMS^n!)DeB~qZ(FUC{~u`IsmRGPV)pTEaUx=l9Zv4xQ62NTp|>$q!&ZTBxO-+gQ|vj znMp@cen!^t6zg6NdB&!qbD@YB>j8r|usaCHl9CSR4k-3+KaqA8OCy=MxHL~*zOVP< zB5sYC#t@-HJq})i-F5&_&o^S?D{;)nXUcwi>#7Oknk>bjn_Z|c?=#sw#TVN_H2#i; zOcb`Tp~W%#ktJ7?1$*bQv0t@vk2CZ4n2!Qb?mS2B8-kXGbrk;9EUcZ$+(kD@Fe;DK zNgVeWA-@+A!r={7-D-D(};lw|)8{zEq zCfF&gE(T!kXPI`BX%p3nSRL?zoVbz+BrG5mW+*_AJ9{uR)cLR;Ow`ic2eMht@3mY0 z@oHCYMj(d)=1Ye@S>{*n;-Ix&Y|=Hy=CuKG(@`d{%|V{gBf{>pa;`T4Op}8kT59Z0 z0Q`|3HIRsq9u+N>B-e2lM~2HEu>tJ9nlyS+GT;|!WY@6Juz#GBI$f1qfTG-WA#2&x zpUj>95YvSK6*XQx{=_g%C!uG^={V(7Y>++oSOqc02SUo1Iqy5Lk4F@c51x3I(YS7Wg89zaHG^KJoWA)~?B zeAu6OLY%Az^nFeg2;W#!)IejF=pa|1L}FV@-|h2`zpG2$5K6PVwyFK0@-kx+6|Ius zKP&NDf&Vn%L0~8biojJ|lzBplji=u}Beks({zo=3S`to_y6;^O7SeiEf%X?M+y{T0 zY6w%9OR_l2Q6k~_D)y^)alxUa8!>abEsDJ?o_jU-$ePAdzqUvRx01i~@qjCeOP|0S zW!r!`m6&S5^$RT3?CCI&FIl&{c$(~qa(~IKUNYhSyBU)08jZXrOotMyW$6b8Pn%lt z=mew6?)JtO&%9#7DlQt+S3`B8oRM$vh(IKXcfno5*Y6g$cE|rC#->(}Bu{}i&or+L zCAd{rm9j#u(mkFHU!Uqb7h_zILsHxII^WDWzwEk05>cHWnZF^#gi%q62uLu6WJMrr z*dNH#++Q5E<_YfQf?N9y0n4FRL6-~i08;JouSV2)%uZ|ah{{7cqHErJV`~PjO(KJt zq{u2j6V}p+aVrL&G;|p>?+Ar9hi0_axsZ;POF*QJ!aLLiBrD&t__@e;Wj{vmQqsLd zolOxv6H|tRM?F0;hI!oipA-hB_8aY(BE!J9>B?dJ zd`23A*-c}N@tKKT^*mSh3?$)Y0P7J5QirRf!kQg|BN~7S>NT+Vz}fU%k&mL~Ys#v` z==}VVG!ZXRp})#jd_htg&vZWFuAdNR=!lA{(`)a`5=of|kD37XDGRa8Eh@_TtaHoY4{ z4P2s(0y=E8zd)<308l+^no1heL~?f~^f)3HDcT|z(kTu>d8``FEgFdwsF9eVJ`SSn zqDTIk0*6ipn%vLI5u>OD|1}^6j8R=kEHh$w!vRR z9kaYwq13U0{xv9l+Bt%YG|w9AeH`vc^&9h!P@`cC?8uO_Yn1F z6_KPV+1|p`Ic*n}iAi?na42m8XH*Qt^ZT`Ra(YZ!b41CH@YJfCS`-~r!f93CsB^E+ zqVoR1gy1#aDNJI&AiW|)f9Cg_e$uB?H@E?crPa*-%YC>+t`h#ZNAU( z$8?f4I8fQcsC|%3EVa=7dB$o;uz2kV)m;wzo%{B`VGt>EucPD={M#>#0>m992aheL zQ-(lKhKXR?Lf$5=hS2L9j9pIF;aE)6NMHsKKj`xf%};FF&)MuILJ7|vw1NgP*#ViV zKvu>3I%@|df=`=S>KNcQWD!iVaVV77h%Fz+TGTf_Pf5*pig2G}(#E6mZFV!9q0i07 zwdOe(!t8PkOhC)67+ndWOK7a~ap~WxgrRB@`hz5k2XBaJ54E+O{*o7w`}W!pY?4tq zBiUBW29A>;fnXBI96zS1)S!`-vkvv&(M_+8$6BRmGV5q98N6bFA(R`Pe@=C~P%t~p z0K0}2gGj*z`S?AK7PjWKu|?!Kk7eI`re!xJ2aF zpd*d%MrR_87kA<6$HNEVLLbb8Fzr^AZCT{~bBW564`%iJOL#E-FuICL$GB5=?E#XR zcweWp%=m)sgC!b{tWDZs&I^^w?#@(oVhhc+1#J*<_Awi6sVVbF)E^r+djyP=Z;kCNK4P=f-Cvu=}X*%fiM9(k>xBlcsxQDPI&VAHd~ozH9SRoBlI~*wKpm` zhV|wjijpt_VIFe~3AmUFIRCGWv2;#F>xLMetm1$`5eSf2k<)OaDwhQo$T@OqTQ3l{ zh@6KJrlB+hdXrUuoih3WC_i{*jtgTMZ#aCh6s_6TzvXhGY9+=dqdYqj-9yV9d|X6m*rgn~ z@|le;DA=&O&lIUJ~Xery?i$a^{g?A0;@{0{|jz3gAQV5lqC%G8tApiDYF7%i_tbR=C<3lb#c-A{U%Y8u8lv#NT{{rFKGFi*7&-(RjA{4 zUfHGcMJ3G^{gpm8sc#~jb3Pj48&d*(yg}c5w-}~XLfSOd=S_Yi+n?r|D#P$*)*VF_ zjR9K<)+javMNnVl@96W+J59AF!1o7yZD>>YP{ zXdhOV`QDsv)jhP$sl^aqAhJ&pPWBE(8hbozy%H%dOdK$@IWb3|jF9Qs#t-3n@k<~b z(i%H*5D@eErs;l89OwaNZl-zLRBj#T88 zBr&ohm3?K}ka6VRwm#7aDo(4`xc)hzuqfW;Z$EoQ-sMifSvQ9hOx2vxQ6KZPjVBg22Can~twf1vI8iHGxp37>aar5nAu|bpR0eGPl-zs$Brz?1WID&6= z2{Srp7h!ow|Hgi8@n-Vn87iYZRbFTC4SMl&QtNB)gLqec2Q1Q`|5+6otOxHwIFn@# z-!mYq%Slq9a8WIN9BEt1c2o#fDY!8 z|4kkvHLE1jDV#*UWM{!@v?J;5sMK!n$B>o90vr%?@u@l%z(oFXE*;*hpuQ-bzIt|D zCHd+q-Y;9E6<^Q{jM2=yEvu?;BzM1x_QOvIX!iz(rm{}=z7IE$=~-OPP0=6D@@#Mu z&;~fIIbSpy;8HvtSx9Etg4FySa~g%p8t^?Nq<|LuhVr+cdZ4~v+Z?Z#9Y1shYeh>P zZ&RPPfNqmXArE$7J3p3YkjG!*7M(&?YVNGtd^ovYH2I7(Qvn_e40JuqrcYbls10k z3^$exgHTzwPS;+#hsj6)+TO8ca_wCOo6N$U28*hFQ1!w1p`h%RD;(^E#%WtAuuLi` zg!*+K*^qFzj=QkJEZ}O92T)%*@ftc-0NFn=aG315V1fBn0A4_$zks415HK|eZ)_=8 z<^RbcVD0fYX}=#H08lbSfvp67d5^fg9*_6wkXK`HTISADPG6OI?o{wZ@0(_2XHx?L@Y&NrefJ)X(s zPLy9v)lH^s3EUeH(5+d^!Z)m&!?MKW{ArOO+k>KX#W8?s$tF35wA@~x*>Sl`M2l*jC9{u7RS#fD5CJt!*iS2>gMpl$U??xwdZ0p#Fh5twEuH_d0(ZxuV20wW z!WI6j*%?gsRzgOTWamXnCA3Jx9S8GffGJSlj zJ`n*`_QP9Wc|bf{1Z+*=(%-0#0y5o6`ZVO^E``}Uev+Fsfxm9vJ5!vpBp2rD9%@HY z)$OSaGq+m8-gYtbW&quC&=_A6amrJ#l`19UP5H*dK8I$I=#K!(WUT+hA*f6vIdmBX z3p3#hn)-JLl6Q6Gux_>AnM{d67?rQY%8rff-T|rE-@v#Erd#&z82C8sHyRVO^K6Oj z>e%gegd9K%Y>Fw6&YK1AwyHQjRhuavT*8g8d)oPNcf0Nf(?Nxb8|#9N&Zr)n6eIme zG)tFHugw|4j*yU&k+&EUto|HyC9WcB7@JYFqw$+p+^{_?BcE6WD8u`TTkrf9j|}b> z28knuC@nSGz~zjflNf1MTmFpMiNH6krw0<#O0u&`E+E#3VY` z$J)9kJq}{E1AUD!9q^&Q4tHFKOo-cd9!HTx1)-4MvKPF>$5P}4{$R*x<)=R1r^5|Q z$g_(-{zC`#eIBo2?5=(WG~D>SlU9iAz;lsz39+0(ltdQ)jD5y~OYex_ux5NVDL1Rj z%Tc*;DxZIgv5@P_fo{#Gn0|V<7Ibl>&8RzB_FGNS2GMpxi=@@;{yNgS1HE2CnAD!C zz4WwK;1#2SVX*dFH8Xk2s;@9Iu z8FJYXL%M)YO7J%vf6XxqF}4O-A#5W5X(3 z*TQDV1NUq*Zbk5+OoQj88h_-lD+p#{v6Z1-kiLSa*XEBBZn`?eqHi~WttUQT1< z-+D05TLczvRSu%B!br+y^VK$AMi0aTfb!O~2iN~XTb{>Vw!pS&qso8Xh1s$~Gbh%G z7XzKpwk%I+En;SV4qsFwfNiid9?iUakLG@+K$L*s0Cp-@0r#efaZEcvWC715g^t0+ zccW@bM_gG-OZl$v95@NyXqiQsyBK|QCq&Z(9fd-x+c&{v zteTrBXAlMF>d3FtsoL%BGAwHYq8KiyZ_LrgwgMC6cBC@S{Two@6P1Wwl>b{om{oYL z%)Y{tZ1cIAK0h(%w8~usrylPX`oHiEkIxfuagHrviewnL;+|p&W0!5zWTC0bqE?!D zWn#1p&9wxa-$^O25zk#uvZW#kyymiFQX8o5B}vU2^R)XXunX!en#C#_Zoa~lL|GX9 zGm*T1uTiJ2ldkwo?)HR=`y7g}MyO{KpSrn_k4I?Q8*xkPn4cu0%gAr zx^Y51BgMlOTqRieY^r3dCRt`!yg*ff)#;}v`jnCLRGJaMjj28{xM2b;;3pTPFY`I6 z{%28o8^LEe9-JJ=lVqdOn4}(j8a1C=Jt?8C(E|4dTIclaNSCC`K?M^VL^_yzgGLOC z)V+_%$M!w)AGv26q!n$eaJPs7|57^@B7+tJkR~dN_G~hpSSut_TCuB$o%ePx&Fl-z zy$g$hf_bMy;5uRXkJ}_Tk~XGIrn7e{4d*qZ8eY!p;ISLtgWz!>P_p>J(mqW6 zn#MVXWIfu>0)0051is%f4LMjbv~QE4v++Ql&&j@UO8*|)(CRRNbtX9L{?(R5I1UJQ=}Z0`;kZ%I#?W*K1tOnPJM_N-!(0k=*uq1Rfce#SFJjt-~Pv&X4c0ajlv1XZLpb?y_|U)&=nH_wM<*QN;+1s(dhq zA}>H(3vM#wLIV;Fryr>jmaurJ(lQ0#l{R0jr3^^w$M4s^aT)t((+#Wh-0KlC9QF12 z1cw}&gOyQIwqR5zI;9+v+P(|Z zX(l4ArvK+wpZxklnlm!8_2CqrFc6j++q$4O*@b0ts_CS)L_rmVuKKgOW)X}Bb84LU zs%)cKj|b7~Ua))`mxq_Jk9}>l%#ZLjT->zDEt$h_=xoS6MM@<-Zz?AJeV8KOdE0pQ z{O2ngd~N)*QwY7p?F}(1zglZVS1?7QY4f+lxztcnAkA|CL9W5rPIBsJY+(;@X80f< zZ#16EqKUAHwKw^GxN4*znK9`Rn3MJ7M&30C<3?S;v@qiAE~j3%{Wu~Qd26l#z3g&RH>zJ**M9Eh_~|b(lHZYaoua$FEdH>Zz!>)sRpccBAQ}UlCnKOq9=I; z{pDia`zC8)POFq5U$FqOha=vC3x3Zx&cDsRper{BxkUDf#nY;SnoA{j20``VvW&QY zNcZ}F7Ap=rS*^h%bBkvhx_0Zccw6VdSXVHO%Ipgha=5?3Nq2La#BM0O zpacf&4>~5k`hjt6p{=mG%cRjl15ob@Gx|1FQ?;}3#m;hVveUBrvlSwQ>YgH;-gCxb(WgYCxGH({{q9VFsS zJBcFj*YA;l=1VeQ-7xUlm+uz(){&I`iU=H2!5TIatQPw&(b#uO4R6IayJqTQNYTOb)m^G=Dc*<^b&r= z3+^UyM&4!$6CxQ5+p2zUEO|xVqB?3tH38&BAVUxV%6Vd^hhQ zG!{PtmuMJ&pW12_3E9JNUJM$zPD(!^;}?FQ!Hb86_b(=*d(Up7LR2pES6FJ_`Ovj* z9X8YI*Bu~#>3F^qQ{*1nvLW=2o_BWo8%)WHt)Hjy)fZ{NTs+EMukN_Yl^dUPI*=^b z+b7Ybz}U`btW{7Pa?cg6Ma2JuZa)ec9B%o?+++FN z619OuNFnAFbIsrVMYY%gw~zL=Uit_ZjKz~tvYaDGTyqM%P0?i}-RcjZU<}k|mG}@% zZ9{$+t!lS}jg?~7sg|MAs~fZCDbd(->u0xs@GZN#jMmy2_3{tn8se8oh&cDjY67}? zL@6VjYYHqyvo?}n*{Dj=yNKMm4A0C*I*;qJaP1cEazJ%rvp%ls#mO+^Pd9Ji?j;T4 zB>`@9UE5*XA9~frAf)Y?GUpzB+Vb;4pXm1jAPd7w;4{c@(f|e5+|tTR^lQVbO4h9h z29O9c4lC7lOlG?uy_R(uosNlbBfzq&c^H%e{88<89ErO%Z;I~{H- zHe|hi(3F|@z;A@YDc(9d$S#BIXP~5`d&+r+=hnU~-bFxK{JS5cv2+8g4Oqv=m0NTT znt{ZXz#0Pv8WIudu~+2do!>nT4&OTRo%xm`1G4dpVX=~2fvTF~xEY9^V;TCjbmS?u z9%lZ!M$!1jgx~^bA%%zk#fM??MO#(%R-BK7|KP13h&yfjve*@`veRAH79f1xk^1lK zf;Vr&5eZ|eLUYHXGvnXo7>rEqaxV-z(NX?i%n$GL1s8(odFKsIV#^l z1aW^f&lTlZOeC;+L=e38_znjhU|KoN5hbo8BST2N zBEjLRII$zDX2LWC_LV(JYi%-{|6G5{c=Uz1Om`DQ=v;*s*Sa{PtS9~bQOzEhhj@^S9E=&)zsw$gypE+ zV@zoTWhNQM{Z^_#4rDIp=l4{iwqf4iBdCSdr>`_>CY4?&tcK`eNQH0#$vu^Eru`m_ zkOn6KZHeK%phO-{B0m{;1_nD}5A#_u@!b~Y5)>|~kf+)pZd|$|q$3m#Wn?5O z6Tnlc0hnl4D?*XyHnro1jOtJ(gppn$Y7t}rW!Y((Xqe*YJ3i5o)f`O-n8$ONY(gM z9u{F$tUf#oshU!mxAxxZa1*LNSz97QlR zDu>_O$SL*60rTa-KZ^HT7=c|sRQ62z|(G0ZB-3B!zkmsIJ?Fz%A^?M@qjCjFG^b@ z2?F!ZsKbaxA;9&o-^fXNUs-Jaj13p(()#ObHXzU<`ROviH5C(eGk^>Vu4w})HnA_( z-d&da>kIp>*BlT4@6)yRm5D@3M)u1L0S!25_rf%U@%lrYQ#RBo8D^apa0?pSv<;K3 zY~_#Vl8X7>(vh|m=!*~aj~Sz1v>PzsS+tz?^If&+E{7KjYFi4(JoZ+Iu!t_@{3J`7 zYBcy!wXTlc?E~v|+|1@+a|4Ss585KV#HcXPhPV075*mK_j>T5H%`DnE3*oEdq0Mpl znHP(Y5;~0ye!f>W3LIm>PVLgIt zwluALPv$mQoHlR7M*nyf;crrU9!>*Ev*#^_+100$yiQw$%s?{FHS$O%O$SFflW0~$ z(~b9NFk{JRxv+l5j@tO9spY-!^e`LTeSMpS7cghxpbAgy~|4)>k zAGHNgNXueVW*x}Hy$wex8`+?-pWrBSYrwdKM2oCXU{g`aUT@}%kO2Z(0aD0jI&M`a zuj2qejgsVu65bBmaCl^CyM4>6@)RMS-p+9u_fbmLHHM&kQW8AOie7oK@+Rqt?Ov+9 zqpCBT!eU^*Aj^=zlQ*U5*yk3hLH;0=ns2MO~In8@nO)r~TJ=)slWpS0Pbf_^Z zB|%Yz5fU^TmviXq+sTckdZsFM*qAsx3FcsXSafNZy%-bteDz2>`+&pA@$UwUjA^UhjHX)ox1}+QgP}EgGiUL%IlqAnWx=cg~#70BmqrQ@* z2ag}dba!4zc09aO93PzsHB`1%7yDrc0X^?|%K`u{)0xZq;*Aj<#)iy&knruKs5`7q z2s@1nE*Hf&h~#{8JdS^YRHG%RLYa_}FXVR+P)gB|R`0b1h0nHqII9;`pkP$rnc@7;({v@^j6LuJGYsM z^}8;*BBN^ibq1#@6#zp=aLVJc_++wFFV1tV;b@!d+ZFIXoxm>wO!J%E+5r6Y6NjyE z)8~+o2-%BNFuWNIYP(JV4+N>On&D*y$<=u-(tny+yDAs-gkUzqG~3aS*^hA^vi#%^ z-Tc+^fs4j7UtNN+BEV2Tq*{Bf&gFZ*57sO?WLkXHnu~nCRU~rxnF3_jSs-i~J~4V% zfi*z*ffnMbUX0HK;j9*yM3L%$=z$f>fuv>cY7_f1;3QOJN`pN=LdTm7Ghg+r3DniC zNBVC6Aa!=!u=77kk(?yFSh7vPRH{87<_MS~uejxwK2Gc9wpB#c3}KE=hfsT#Iyh8U zh7?$wy-gSw-Bd024yg=7u|5U@Zg{iv8}FHxjREI&V~!6g^-d^gJS_OQGnYW#L^tI8 zGEwLbVP$~Lw1SG;Y_^ntnc_}k6`saIfBV0LuewrI9P308r2)u~&04d%p&Uzm0>DIJ z>4D~xi>8icru!cGFEzh`a)Q~I8NQXe>I@2B(1uL~?CC@8(C%Tu3b)>uY-`)vNYjXQElFga$eEUKb>4qzg1t}ukS>NP) zvlzmtjE*d)i3Ytpeak*qHaq+*cY>eqc`Wd@yesvLj(>r93h-m@&YFzl$nOz4>dL1c z;*bZIX^}HsD8@9{wgx9Iod{6&4!2(cMuP@S5oPS#iny3k=1-v6GMy~y=knrR+$iM zlJ;L|-f$LE`F7GAUOgcMg|(=%S}f;+h-Md5Sa)wwe%f@;M+rqBz9?IqaBseVN2oBM z@m&#S4&32)Si<^(27#S}$x^(0aEJ6U&`J>)sFw_|M$TU#$qm42C(X$FE!dXs(K@#D z>$EUxY2cN_%gN5cwn{rUB7M5Ml2TNnzW4+9vm!@s;1|S_p7porc>JGU{|80moL|gT zfBRyYUyZA9-JKAqf?(jDhNze<#nMBTvh}04vmjekR`TxTwJI|aN=6IX>-XHBCZ=8V z#-NU^xXk8h>hrz5LA+6qar@|7b%)`9$7rg%83d3F!)J}PWI!zGM9C&rh)tL;rA#d= zz5#wez7OV5AuCYjVzBz!E}Sbc&;J&zV2k#7&w3Wj)@4q?=uY=lTHD7Nc|i2oQ@#KR zA^cmu>(bsc7;S;46oU`^VL9>Oq}JB+H~lgnmL4S#dJU`D36RznZ^S(9S|9}VG52;F z%^ze-CTE82pT3;>M{q3%*fsLeW7=@vndzAq_&x5Qrwc<`s9&?83FvmQC_<&`^ZXq9 z1f{p~uY|EY&TjN#&-_w6EYe?if>6hLWWTppM6V?LAoH67Q&FlTu6NU#^AzDL?`gdt zouqlQ-8_x(&06>$O87zCVNmGWC_KsveJzGh-HT}2@dRFcdFUh-!@5>fezQ>f(*??& zfaCaK*={wjwmuR;@f|KknZ#pwE72Bh<#mk8-HUwKz{y$-3;m)CvUFgDs1#yhgb85| zp;2zk3;gnt2?9eTM#J}Yv7Fm!n#&nNqN`g*sz}u1kR`D?B zXP_XyV-{OF&vDsd;6c$ z6hn*Lx)h&r=qTh7mB|;exQrOF5)7KexZ5L<%{6_|{a z{~C5v76`~3NW3i9kKk(A@>Q4st$0_iN5rV)2~UxR+oEUeIW$le7efF-opFOO@u?fX z|BL2_!pI9zU*r?{Dy94Fd3~V01%v5ncNGxUL3s0DGt!4oyWc5rDgL!GFb5a+b(Obs z2wl2-qI?jow>1yv@w3HP9;-2#R4|kV&bByk{TX4b!GBiP7z(XE&wIG@@6ADdLBH6^ zV;vsY$g{5NL!Ypo)MF-bUGG2>{_@G#c%T;mbh|D+K$l3PV15l`T*CV>%Niexr_di# zf5ppiR7PJXoUL|;adKTe8-ozv;oG-7_n8e|fs0tAoZG&8%-2pD9_7fcOLM+hyyI{r zz(8@M4Uc)dI02>*RnqH~v*K_=H1)vy6A18x+O(ma8#LowY8!1mUX$V+KDSwThbW}l zb1d-7e|3-rp7h~^UkLOe?vE>&bTZ|k5~RFau6O*g5XUO-L>S!ki)Wr`vR}P6!#Ilg z{S0M5Mo+lgxHMKK_b+pp-38p$vNsJ3lYH*8=7yMd?F6&~V(O{%U;#W<5qNTf>m*pd zTeL;R|5&WbUHaC4GOw2Kz{~% z?qp$eK9tHxT#>-@Ju(Br#r`vmYb>>WQyQc2{L9;lWlPh_c&Ga-BfG7v9-$uF2lr}0 zOnDH(uH+yj@Yzg}wl$6MUpY!10|;rbe+gf*`^f18V3|JbI}jU_ht@htZX5RK6D)kq ztxP>b4_`y2g>5J(@JKNn_Mo;-9k!P(%^*T!6Ly}@OdD4eTl(GG&2|nMtrA%!>e=yf zb}g{>v=Dah9p8};&sp2l@f^VX!`gwTsD6LD#vFM$gezGdYhYhWgzmlV<=%0K90Cymfcg9F)STG%JKb=c^!&e7$mF5-GXK#=M4&bEZ^! z9HS7Uec65&Mx+5|Hs%?-HvA?+Ndw^RC>JMva8!){>-n|aR*bN^XmO@xE4Cw zvKYFcXkUj>$zrKKEi%s_Sd3!>!87{Ss<;AY@#d1TPTC`-XiWiV%g3JKt4FO;lrCm0 zvNjoh9^>_}Tp(cL(D(3D4l1|`iu*YFl^82y3b4|`e`Ws{z@>+US5OP9X1fuyS6V}A zc3h#h{vYWDx00r;O`cET-PO*EB@GkDi1146Q{<$4V?2+X9}~uiJ9AR%l?b%@bz%DZPYEVOAUD3Ph)H zGAmHE-=+B}2nP)N!HsDqKGG}sM@u0I#oo7t=DEykGXjJv7^=`H89ebJv51S)?ewNM z7_KSi3P$e%?Q}y3+)+EFPDAHx9z57H$2$?X7)X4E8R-iEUp=Ypa8}0H zZrPJ~rkR?kCBlHdKnOA)APn+$1E=SykNz#-6X6688X)${u;}L0E}UNQQFfN)X0>5s zV^7q^@#^WPY`K*~y>X-<5oc~VCLKp+N%vS8Xh(Tnf1s<%OE1{Q{y%7R2OBcr(RI`R zM<_i;2X1}(lF&XRKsUo3#oE>4j;K23wbEWL#CJ#Ybb1z6Vr9bJvfeW;0Yo&=R}~uf zpz&~@?pKgB4^A~2iA%4>A#A!|7xyo)bPMHho{rY9T~AVi33q4<)Q!z#s2QcTlx?o+ zTD#z=!oVuyaUhm~cZR(=_$|Gv1h0yUDa!|2W-r_@-|SSOZ>qpZOoW-V_e|^}TWh^% zumt9-Fjju|xzBt-L}j6iG8@WHY;;|J71om7-{k-Ma&&*&2B>ivE69(02v~5Dt6=ay zpl|v-re>B|!=V_|&w$U*=EjAIs<9%Y<=}R`W&tsN^uJpJdP6jy5*SvAu)3{?h@h+G z+GmySOjcLL@7eRgaGBr_CCpW#k>z)hTbc7JGww9SR!pRgDr~kR>~63)B$OEz+MQy& zO9)f+?*o&wV=R&Yr7deC^-|@z6*9IXH9l>}f80=(jCXoY_^+KN*aN`7+^>`ODfJNy4=E~?5C9g$p9A7oX?FVjLd6J|+7GLXOE_2n8nxue~-=5O63Z_vW3 z3h?Y@g$l{_Qg23s@4@Yz>*^jjb|9L3ho0iU^&QAVq@OxMq*+?(8zt#UTv^8?fj~Kg zv57P4R&d?sC<*^GrUTwkhpYLKk2D-nH&7rg*S$Wq9P_GVa%SljTal_;|zQq+9atbSgt;VxqA!u(rP86+)oaf`rWmZPqWI*S( zkz^NulG+MlwlG(T{H;l2+7lC97F(Ow;z_g|#7`b=-u%zJQSPdA2};szk6O>T4C4Y& zEQ+8u$v>V*CGMVQ5$UP`-i#6JXrEKdh%QKodi)RIP0c}6Tbc6_P+?CStZQ1Pug={%K)lO{awQWpJK zD3#{Xdf+|%wvWYCJt789uHs$vIUiU*}b|27Olg0dSAMQYJ zxCtK#Jg4j#Y!d;7#0RjxpLwKkCRlwMPW-DMWdjNh?^^|$V0lpjQKOk-9Z8pr6h6?V zNzh4Piv_wr`^>Xw2=kU{0Tx{*C!!vMzZ(1nWgiPCGh@Xc5~UAYz7Yn6?=V^_vo4DM z;HiG5vBjZq2({YWz>GDeE_YU31_Edno0yc2ztT}Qlnh3R35gMYjeTa2s;Q2Xj1d5oWpN`amdGcHw zVTsW5m3Xa~G{s%GYRY6mO4pn=+iWQCV_%jVFE$|`zJ^ouxPj1UOrSYFEZ{lSDw8*% zRVG06VBe25;*n_nq(1rNc5jHu1~lqBGwH*FCcyEW{(s4sMP8HG@|%XbH#(W_=2Tv~ z3T~PqJO#ma91IR2pv(XzbYY(u+_2Vbt}g@mP}Q8#VmFbG~yta6G%eorm-)| zGpv7B${uB=Q7QLn2}|i%AfXQfc#D^|A*ITGdyb=IX!NJAn8i^$(NJRxSk%j^j2tfn zp)66S705ItROc(ghLQjkqdCA@Pt-9dD2!fB*&p#h?f*fDvV>;Ii~sJDv6r zLu9jT3z6TnC-;HUZYUjA%iI0fgZUzjiy%WgQmIR6p=jT;>y>?37mQJfkCQsLm7R@X zpB{}$LMGwrO`~Ccs~9i^Noi{}U8KPFsG0ru=8L7Z8|%u0+mvv{w37>SD%0I}Vl-_l zF;@cmU6!G#Dd_~l{;XoaO{^)j_@;r%nM};pyM>go9eRVq=+7oa^T(XZj!1QWa2+YO z3b`_i);M7h%FOMfR8pjaDW2DUhUCF83bs2?*G^=fTi&K*NB8ZpZWVhb+W2)mr14eK zMXN9-_W^+){17>OXHG|Ut3W(4uqP03Xqr|LK>w`e_>oz#u7GStcFnY1+yB0dvHy@` zwId-No4>tXt8d@@WK@II^_Ivh)AhRJa3{L5vuN$@TmW{$1Ol!9WlK{F{;6!|uwy`+!asvmDwtf~#|3b*N?{iX>23@p2- zdaaC5vqMS)mBja?U{Z5OA66deb#5Rv$lw89c0M`Nvk<9R^Njbvt9X+7cXG?|me8-7C%Nc3M{!nL6PY@WyxJ z04YA__sASXR>D!qg$aDjmy&Q+l=Oa6JDrKMrIp7Kx7zZ{?{@#j9j)OAzJM6W|t>DyQ5%R z)!a(;7`)%70Ndol1G@dTn44jK)x%x5akc2LKi!0#1H6$Uj6ELDeGei=b|~(vnQ){Z zLg9uHevyM>(=TTtS4O}`DcdPpe31dhpcg_($2}F!Ib>TQHh-l1y1e{NrvUCRXs3d{ zM@piw-@kBXPCA;F8kp(w>z)v*H!YvQm9!q9C==ixx>kqo!mB}c4n3@ZA!afT7yLuY zme_|Gs^S|fq0l}%S$f;U5&ADcT$!8sfgheOfe(l*&lZT{&uv8Dj~3NE|8XX!if{cL z%mZ!xJdK!&L+q-OND*s7^7-_nwokA|#9jIj`1hj{5^G@0sf(pS)w!LcY(n+z1>>}l zdRbdLaWJ@Js3aes-#5S&V{QUO)8m|e@YdoyCr;> z7<>Gv(Iobu@HXY-do?Xv0420owmbl2I|xkol_A*}oTtw&r<)6^j|(jqMYJ199l5BM za3kT|dY5g<*Z&I7UCl_;X8*Kg8VSj(59uF1{ctWRm#`CifE<)3g0A0CwY)0c9 z1w(vhS~ks*c;?&HBrjJQCD09WOQSZ}HaV)XFQyu%9=z2x#y}pBCL)(eX8mi?2A@)# zIC0Wp4Wut1iyL9l%r&-XSs_#9v3*P_3+-_^&DpDvRDNx(>T@R|5d8!Sz{Y$rM+11| zOKj@9B&k*(GWJrkZ_#{B#|eJr`7zYActU7JaX|9QV8=w~ld_0F9SHCls#2QoNiJzU zg_7zTmo`G;KoG9-1c*_njg`6-fIY|YLAwbir+NEB*0e>=+$m38aHaqZS^X3u0P1z%LW9d=MO>uXJ-e7grsgHJ7e>#4_x%IpP;xO;YdH7s;l$s4 zCjM?0u;L02V3syT^@Xp_o0?cff9Dg@D^P9(!>M872dIu2*SF9V^7MqdJnLTNh$vG= zYnlE(>znLga|TPdG*muK_m=nM$wX6%V_23y@riqzRT_3}sZT9C%5b3Tsmz7wtr70> zF>ZY)Ms(bg1+8!Di?CrC-O!__X}cO(qiErl)2))CvfL@8)_nFq@^6<&?!TXu>*)8k+1Vu^7Rr{p3rOkg{CZWoAd&BX6J7cJ+A&wS5x{DPjh0sR!50ZIYos8nCiD>+4=W0|tOw4K>5C3ZCr-oa zsSNk`>?fQFh`tkUJWkf-AU&z&wzAuW$ixga3BCy&;Wh#p{TVIij{z_3^I%k)wJ0j; zrQS66F>x;^8)g8>wfBt%OTFj@55iTogYA-NaB+a(lpc2i0t@Ji>IKUv0cJP+(@szK zKqK1Yh<|fK-^aJA<{vede~JV^)Lk43_AIj;ww073A&8%FM3i}hm!+#BP9Z|!F`#@& zN)99A2hi+X%T|p^GL?v4fW%Vk5P_*LvH85z>eA~K8B5p`WxQx~ryaLI7h&+YnB!q= zPLMdrWb`r)6c};%Yj-f_y>YNa9Ohy0u+~Nih+-ipGu&C3m$A~%te6>djly@Hg2N)dGZdNM#0Ck(PO^PkYz3Ya^OCMC0tL~lhn2~>ySi~-VWiI*OfKV1M(*>5 zYn2=?uSjmsV8@jBB)i@&JWmLdKpyzWHiXB-uVCgB5=%hxd727yOQHUn5Z3mPP_+>D zb}K!4qJiCf`2b1NbSHMGWp9lZ$8xM8zlT64Qj8De#8Uo9C?+kpyi)9Y) zVTmkMiM2MxP<~ezKu0t1@^0(5%=C#=^vQpdiacVq0fvwYAas-07y+Q0sBcKq(O{{=={fU^QB$LfdmMV*`jF9(cT0;UY;t2To_pfN|8VMtdkrtj;%B!nt-?I{LNRSkh8?@LQS*3SSUfbN3MZ1m`&$$#9iWB!gQIh~r}sJ?%b>q$ zNofZuGxTA#WV6hZLHbEZi-&*4{E(c|S&5Rq{&1yM_2B?fCDz6Rf| zETwWU*mFpB-mp5%ivbeFS5!z>O<~@{^^m1)J1Ba|0je%5akY4yN&8V$7Jk$t^rgva zzKCDS$FDg@&f7b-HF3+~(&kj5+p7*z5Q(gJyn%U@UJwg;OJiigd@;^#z?vSVv-405 zP$!NOc?`Ne0>X<-Vx?ClW>J-If|Dhw-Jy|nYX`*0FljtSRlwt@Gt{?V+x(*xmQe`u8g!jv+rp2;%&|C4DwSeLv`bw^7 zcFn23NoqFVavydt1(1_AzN0UBzF-)Nv8tRXzd_GAu?;r1P77Zy(h&J7?iP(6P5S>5QfMDyzFTWF>47I!-n;CCH-V;})8{lRU4veaLuLz~MF z!mbl1OTM~Hc^-#X(F{=89|4p&+q$)ahG;p52>+~lt1M74WcD$nX0){4ITf#^gNBDe z29h%f_>iFEg)&5Xaty1bydHEY(D2eG1=mS%xRV5dcWQGP`A9j`R!tu>a5-!&Y6|IO z5?*l@zzhN*M=vV!F2Vu`n31e><)6}m(Vn)1He{37 zUUhzRi|$1tRyMXODSb$fQ|S*oIlnH=5v*UNnFo~yh%HYvAp4FA7dDm-VPgHI!5CjY&wiLF)Qvbyf9L+%}`RAT$`JH{#Jey5a4rWCVI zmHDa%L`TMlN0M}l!-SFZM)d@mc6vfz)zvKF7z;a{2A~#6Ll$QZoOG(FQJy6E7N7&L z5E$1Rwo5T?8{}?0R@a0hZbz>#j`scn&zcT5u`1V{T}NRu6~ru&C8X>`-_uy(uaJPDl-I)yNT zr5PJJii;G}+3M4&+zUY;ZvkS}!)U@i{9Rf4s_Q4hW10$$-%Y69UjP`!q7n}<$bFzV zBU-*aR^2@;G8riC!w^P70kc0LScP`OPyCwcPAM`hOYWZt+s!ut5PPdd*D3;$%*-zo zK7QbyAM}M(boa89G;tYDGq`j@InxGvNV+ zQmHzse9bOu*N9BU>ZY|nFBpskx3;)3c7qz+Q8>oy`yJ?=u)4cU4DGby_&lT|oKC0@XP7zB0R{f2^1PPE4$*hQgV<)D_Vz`jM9Qc80~GDJmlndItPXgtcl0Qqi#Pyr6Wp{9f1Vg=mtz%jDxnix~8e!jz-5>rIHs6uA9h- zmv$VQ-^4J9h2!cpx+{Pw(hLjz#9^{uTk!6RY41EKjpnZv9o~yui@I2su_f z`=e%pgu>N!d7bGm)|A#ff-Y1XJtqG|uF{GkM$sQ@#z8N&R)Dx^dOhCJ6eSQR>Xbp* zH?l9MeSK+_Wgz7Ob?Tx3NG>pl!D2YFk4A-zLs}9wtOhd24K`DjynDjQ zuRmk^m;H=%@k{X3dVLo*_uWwn4zE>jx7+g(+nLNppl$GMRq33{1VeyKDIddE|4=nFh>YxG%Kjbrk@`|cg*vIA!CML0ZD1MK2!C_xri z$uRXDd`gq`n&~d4h}v9`j7?Z}VPhz;&*^P{5$!QdO`G+~`2TBYaBx*z3YE(#kW$~< z>7Vvx2t`hl-4x@jx8hms5P~nFUrnpA~hPB_p0-f2&7S&*a?+WvXIjCw}}t0PqF9U1=1bIfWCNWAQd=3PY%o zmhUgsbMKSxqwy6X88w+S9^nHOmZus?&_bqkbNO52)oZ8_z%NN{)+YQ&aPomISOH2Y zKa=|I)#U^+NgjKtl`%`MjEHpamzF+otJ2xtz<3;St}>%RaJf6%D;FcTt5UN+rqiI$ zja!kaREk3a3?^{U0tx$KjS4-|O_;=ha)-rO9+@}n*G=P6q5ACQb~w)jrc$(_S5UE> zmCb?mmk>71#Oma7?}PZYCaA%Z9amfx$-mhk(j)=S=$cmw%kI07#2+DyR9zJ!S^s;k zjvjtKace8sHx~P#=oN`nEbi(z?J>^&HqXIqskX+`sZ&4692l5i!sA)4BY>km)4}#? z6hpyXP=5!Zs=qFPPbl}5)xb9sR*H2B*C5-rEP|Qmz+1j$k>Yrq5%#nKwAfzzhz@BI ziQ*KB&9{Xwq)~M!CPu*XX^{i-N1Fm))pAw;!QU+%8HQfoM>Ch&TuAr8fsEU37;BOC z3wFxx-?X(`Vuo(TBUlcg=;Ew4`Y4wEqe9+DmgeSGO*Rsvyl?)Jemr>A+A*9demjLW zzFYj_-^)s!)@;OkpBH!SMQh`E;~KZUu)rj8rnSmiVX&C;IWNQTBw@P}c>OlQx~}6B zR}504q(3qrr`Yx(qG=lMe}%Iar}I>TUZq5`iD9?*(YFIsUGz=*{i8`tYMO5k<3~d@ z`^{@)xX?xZ`hS&0ApcM6X2p?2$M+Z2fzauw4J2p2d0#=sUy`n%*mF|J{K%{$0WM0d-qzTJy*Fg9_`7se!S1JCpw zcOC{R{Iq^7vJ6Z&O@+vjOp8D0G^82CNf#(>N2l1=cUuA(2R+ck;R=r;WIn^WvJ!z;WEHuv6%)zreTS$I4*jfG@jvH~h)hb9A=~XrSHdL>Am+jNZojKb9#K_hn`2G@k zytbx&pbu9XY1wd7hBC?3wH!z4_mGj5c~ir#v*C>)lybmtxfAP$mAPNT$%uQB{sC;; zwkUxtBoG5lOsHpwOHRHNaatUrF~C$lG0n^I*fFoNb+T)0@RUlZ51u>iQP+5T$z~v= z<;tq|(?j}KMG|!z`Lz?7<5DEt5Ob4~glFtpRZu5!5DUzz2XvUMne$@&DIoNm#NL;8 zLPL{rHWzYiSD*s)>H`Yx=xC8|xpzVw5tnIWb5E6axv($Hcw2Pu0xVVe@oo_&6$Z3i zOens}0d`=Yt`7JI3X3~B)IN2L^P+5wB1tQvg+oqknmH+u@np*Z`_(}7GfnPgGS*So zktb-XyE3S^OT8Iuz>ICsVya1Rb90d%VR%^7TE=j%@pjsC(SFpwyA!rOyVNtM;2r{p zjN)S*l}SiL+0bvHJew){3-Wy;Nh2M6vWc!{{Oo6TRJtHIe1#wlVNgS?cxX zA0M#-E(2dd=DYqV-yy+dM{73p9Cocmq|B*|V-Yi2yAB+rHz42-U7MkLlD!9%Z;T4= zv;oPfaco^-S<~Lsh@@=M1RX6WiYAwU)HAc^#1tLY2Sla1L~}FB!hf}iz0L9MP&JTI zUgBepYDm;QxptgIwg=5a{<4jc>SARJa1Fx;LdBMkgv)p&;TKT*pY;(-<8afp|E=zx zsQzaBeODb^gv|Q$-wnX)kEmkjVUhCmQ5h)zISvSv*7xRyWP7HG`SW+u`WJtx`@==dnr8y3Ts?(L!3=&;SjT0E4b} zVG>kw8qWRecXqej&(V()98E1)zm!71ei5A+5@js!8R#?s2A-%F?SRuwAR+|F;;g#vv}hLnL=zph zL^2r@&xN$;VDkZzI>9Bvx$4)yweDi>Mc`#l$kcwR_|DEfg!~utLBqPyQWBZg_eb=bh#9VuZ>Rz^Agr;2;xL1 z6O>lggvV1S)rO%%<=XlE6pmn>+t9%D(!Vj74<|s_Vj*SlK&~ULW$m&k>kA+m8QE5O zf~HGC)M5j~mAeYO*-(9heBlo)~{N(8g7})gm0;t6mqX)dk@qDO{xx z9Pkg%ZeAojSv)3vJmK7cb0+Um?B2T!nvNJ4w&t8db8Z^nwuMZ0{ojOcIGS$sC}6Jx zZBrvXH3Kx@MGS>=pW_P6#{9|c*?3e8T1?XbA0+TqH<{HRO5LinP;4>rSrN`ukf1Fq zm%%FZ{}ti0$xrW*k$}K9jfr!%ILTqM>pV}fGK9}yR?}B&<$bmd zFc{s=R7dco{C5x9CbM|_-*{{_cjf#=e~vr7DOFJq(q)Dp>yllgOscfHb@V!-TUv{4 z5rLLU9GFhV3o?ACCjSV)Em;su8p!C)F+tCZRG4DU$>aV}&pZQDM3Uwd ziG{0=4w%%wDeQJo$lK%`W;-4)!S6s zpC>k+xP4x(bPPob|ImlIM6tRy|M9L551lg-E6g^i%No7DGqdG4mZeOPsL?MemZC}| zAsySTc_xn;%Y&d=r5~RcM+Ns|q=-%*j$H~YVCMRmDx2Wz*%5&T$K7_oV;cNZAp%Ncw>T{3&6*-PR{AZx zq0&D=eMwLu3pznAjMBUx24L<;r3ZHrz=WJDb~{ozyPw;pBS0U*aHgjdjk-DLhGAoj zMMX)OS}uV3tR$+?=m&G{_7?laKrGn)U}S^_O0=aE*TwX8RY6Pjz$Fz+{MIXmM@;c> zW}ZwLZA&@!sd}&Xf0g+n6aJU9$^``)7K{RkIR3x>!ZfZqdD;jH2vkc&b{@-S-3f9? zv^}L@m@xw2NUy%DyRbD&=D?{q^^L1`xxl3O!JDivi*`zdk4~US*C)YPmY|8o>41P7 zU^-GJkuRp$jS3&F1CAd>X0N-LAYfq5%TwPOHr)?E8;{RWBO~-*r^mX@2ozKYfJ9$8 z#OM6~*@Rm?P>rFl``(gtzs@2g|B@59hg5XH8 zA{%~a_oP`yzN9~R^{+4`V_xC!SA%;5P1Z7+!JIT6kLJ)~R}(z$b}-)~ca|k~+pEST z{#aR_pm`f0alV8iZy@Ln-_6cXBJ+4BqgRc#vAn+))({{!j4{I9_#1t}7AX|87fP0I`Ct3-MP5cENh(qc;bdC9yeR4i#5fHhJ)ptjM@Na#U6P zl<5KM9{ZFYxD}+eUDS_GHS198Stxdq0K{3Y2t0F6;R*&kuT4S|fjOZe)jKuVIB5xn z1Ztyfa&YQdq2?8Pp;psU&QqS}sXb~RhE=@9kAZccGwn)uXPU`BV(4+2Bqe} zC+#KhSACdPd4GyJ{y(jBY6h-5!?66GtW0{q&!W*^Lf_%sEyGE+&K-ISydW|pWdoI? zna|;O@P|VR0@sJvt}6H*UC~FTy!dRTHu2$$8dh4&3@ycT{%Px_e?GJJmZDveC=nO$T|~*W4_l z^U0_Bft~B8TC}&$Rw3|f2wk0?87lA zqwFq}2C{i*I3Mu&@w73N*Ib;mSz32c$WVLs?95O&0h1Fqk$$*meC(yDGn&bRnop!x$@qck^dway$Z$FF@3K)?`##LL36vH~#qa5>t1qbw?Z z+cN9&MPibr!k+Gj1#K%0r*xUsI8djd13vwH^q`5Fg#R-Q)gC@!I2`a36shw94b0Ro zl2t{Di%RYWWfrj_6jK5QIvqHPH8-3aUz3blO8Ran%UcL8xu}7PFE{)lYtH__rtQe@ zlS}woxG5_5zLdObsEAg%OVdn|oUU2^JA1H^mCLhSF@BVPVy$(LeTmGPdy#qKcSLp5 zp*K_I^7$4@@=RrxItC5OudtR9rkXcd|70Qo39$<@qD@Hb;OBxtr9nBtSNEZ^~Uo?vMn zC8)7F4`ut#Cju&fE#e>UB$VGoXLvS+f#$;?2eX}NY^^0ng(xS#Z0PisxD$j)zynYT zfF-u9axA$hqT0rDWWW@Fyw_;_Z6McvgrCf9VtHT$Vw31c_IcG=Z~h&TB7iaX{{)Tb znpB)%O*kmhUrWU(dchIV*vyGT0uGFB14Uc|Zq#mG?09Jo_J{etQ3CoSIP@1`Z(tuX zdP?Su$1Yt6+azR)-Df-jR|48l)64Py6lpaNE&G%XwL9dYk=;kgD)Hhio^M9cw-pI& zh0b5ESmtqhh0{8e^}_FH&F#fbQpFX~K}pNzCK(ET6+Cfcb`(EfZnGtkt~=*b5;c0Q z-T=wqdEYW#vEOcN$-I8KQH%1qt!CezpMxU>VRzK=pIo06a**kk;oz-~MY+iGy)FWV zL%{!dAS*%jye<7X3w_LN9B4+sp*Ibxp0_T_^QNPxRWf<8*__>|E7U z=R)M?=rO>~(px!-<;ldo&+ZQObN+CxfF$Zf&XDJSilGpZCqZ_oRHYh)rOzJa-ZHp|u0hERfNH zyZPUoXUP)7L)#E_J*_=}c#i52q`%`7P9lSYelBDVCQ1VS#o$Z6JDhG{XPmk8c!F4+ zroi6&70OLTMvg@lb^=cFsz17zLMzkTnoSOPJ8$8 z^c?n1VB#sP4%Wzn(VA2_*3-Z2&iaYEftaJzP9URztL|%J2NXUI0Iobv?CaaXw}u*f zyb2=ul03}FK2=`rGDq0rFYz(Ll(6d`zB|sAyy6_k1}DU!%yH1mM|RJvPfi=86Zn$G zR9NN<9s)a7ZZ->BIG5)OgEtnvAZB_V;6TuCASr{xPgZlTK)8a*HF{kTYIJhAXAdFU z;?$+>-N!24GR!qZvquZ!BRI;+v`vVn32C%!%@_v}2dCkCu!Yfij-!`pL>OHnXpodI zMOP-3O#kF_6TJ}`#3VcmW{@(TXHh#5wH{&c43PvqgV@WkfJ-m1pj~Cql6|Bg+)}*P zUG~TG8x`Lzp7dvexYU*GSd5AqV1fM!>}!8FdO;Mq)GgnQ5ZIM#YXTCi_Tv}6=(xvF z$D()K`Mn3Qm8i2_;H|BT^a$I4_8+cZZ%`8mQ^8NcdBpNEwgQw=(B##_PPNQQHH^vN z+(%ib^B8TYs{UQnSn=Z=yL#2}bcnAJO6m`}gvrvz_;2MhYDzwxmFuX{hX86_(XfrX zw%2S_mq%R)2(u*MESB8q46(6rJ2+d2;N6Kyf3oP*JQolk1CW~|>@}3)$H%4dB!N*N z{v3aPd*gGty%dR7%1@*eg}df^6^?`jqj|v@IG7~ z6L(V7ch*SkA$6eHW7+vLCTc|$|6Va!!{jFOIee&X^#}=9GU$=0wi9BN45Oj z&gY%9I^GM~0bnWWS5l1%MnuY&R$lpKVf%2TJx!qwIzWD8yxw_>gE{59ZxZUSmsCIk z>STs+?l)6EZDyRbzZ8@~~74gTp&~mweRt&H2hf{>HGMBz&L}$g&9Yjl<>5(|X6I*k>ET4aEY{I(El%$V%esC^%0yN2$)Iyb3=A&A zf?t@%WeFzzqU>vJh9HTX!}=F0VgSq{l69|0Zv3(!rw#1RwJD>)l!2z++2?Di7T1Ik z^aNpnlXf)lTHTMPI!ncPGrQ%QS5}tcTJ1=x2?a8ic%UnztZF+y^|EHR@&BWaA@g-w z@#(srZht4GEs5t%$kIh?i0MW(MRGzbv?Vg3|lR4Vo)Axu#ONs8{{8>rS-=&>(I-YCzOgE%6 zRooGXC-1ilInKM6MH?t~IE+zcJvi$ zVX5A8D#?cyKEgd$J0C>wFuDGICdpYAN}*l*;F5mW%qTjlc6LOs-K5Um06*2r!9VT2 z_Lf7JPsb^VYm7FmL}ZH+bR*Kg(^p3s4l=;CodGtP(fm?!04mYbKO+>>?H{Oik@YA8+j3u$7(u;}spp;iw(Nd!b@_mB!5 zs!fvYDAoIP1u3dtoxobYY(Oekg%=&k1qLmQu)zyUXF=!6uF+}K>?rq*dAkIOs)qWv zWwMu(Fv;g4sBXkV9bfia*pCmJ)>dm=AdgT=0i)q6$5Z9BsZ^Qe>P9SmWV3G>#Hp|w zc3}(&c$^^wjhcMQdP1>;^(wHCDed@dA2x+p^TxAia@B2a;rMJ#B`EO9H>IC~ z_bVk#u2AJ#Lk5CE7ReCC<`Y+>)I+2Fy<=(t{4F(lmjhtOpl7-v?-DE8b}BYp7b)W- zP5Q00h$xRs<>Xxz(Fdv%Xh|l2Sv9o67s-c`EDq|^Nf9iZR;)jG*KRutv=#|&eJ_})y5@MtxKXN#)y$femgBZ{hM}V3Sa~RC_PCC9fKc%|s5Nh7 z(aX3LjX8kuMTiFZ!!-$n+h6QqbW)_d=#5n$`8OeLvXzncPpHVw zu`Z^+`MFV7F#8qeoTP-2Qflf z8xe&TJHpqTx)}j01rHsX_noPc-&pEbZI3>r6DG_+CrgO#rqp*5i^9?TE0+>Ew4Fyx zsfP2yCN8O~gfoehO~C}ZhA#6XC6@*qhwd!ZeFX1z-+z{&SjNbAD$9;oI6SXulUTBb zsUs&6fYv%U0@N;lLld>mD`H{5WqbHq39jQxKf*yJQfbV)Sc4&fi;#6zoemTwhTuIg z_VOStE6fSf^YHgSk_jx?NBVZBLEkjh2BEzAnZCqcgb*%ZNuH34dQOg;m^H+d;yT`p zYL)L6y-FvE4-PmrESbB3+RTusYU(ZWQJFl@-6$Ae-#7E_AsC|mb_RQE7FloqdP2Gt zsx~2jX#B+_4Ogq@_SseRrOaT#DUmT_Z^GR7Dm_)3ZUeNb7ITIrQ-1ItT9b4<1bbSp z;`r+r1icWYAIDg89MD#c?htI#OQV*vO)(c!4eNjjl4<=i0C>r90psE5u<#-Fmj0yS3eTL9EerH?{hO9&#|F|Bqg($M3&y1=gWLXu!89ljlMFyHRR(ZMoUOd*zi z&Awd4Oa$~YQJgfJQQr#^E*U|M>aVL&zpz;mor`XmDKRI5Hk;fS$?GAMs#0|FwI3-m z-w2Z(6AA^JtK*c$E}L;6xQE=5Jt)KejuxhDJZ4*cVLQ1Yo(ga)l|XIgS6XHd;8Qv- zLYMTmMU;;GW0_t=hZbb=;!uLE7P0*OyWFQUua}GIEG;|AMgrRjdvwSA;V54~_H!(c z(2N(6n)mu>>{?9B4Rse<+nTK4ba*nN>Hr&)eP=y8Yx@|W6{#;y2Xh@DC;RSf5d>vh z$4nYnS#+nG+h^*99S~s>Ty{h;YF*d?TM>Kj5wC%H0pWBW;J-$p0+n)~iI)QwncNSW zz%%e_a~b_e`M8KI<#B^MoI)|3FQPBi>Nt4A4rEm|py#;G@d>KO!v3gA4Mw>a6UTa2 zs&Z-_0~>!jp*cB}V^WO%xV1`>mulKE`sK)^e_*dh>ivo?eVfYFif<)|#1!Ycy1QOp zedoVGmpLOIQro*giu0*u$6kRQ>U0PvL{V(z{SE2E0Z_;TJxP{BcdLW0wB-KLYk~4y zIYDy?-)FSe`M)w<6=Jzf`V$`S+v9A0Du^Q^v<3_X_F|naY|y`SM?}DwH3_!1!-fF z?`_Ppd12SX1hhJ_#-;*LI*6M%AdZP+i-0Wav>bbiYYbET#&kUpbD!6uyLWsK5`4jZ z&MR?Y<7g-NH0uxNIj=>!QU-~|N{hvSw&$o61RdEpTmHO2n(`T5O-0;?kML*f-IeQ544yQP|}oo&9pBOG!)vJ5h|1CxOTi=M3yBvQsCip-I0)5 zmJ{ad34~pdA14uH(Zse_n6|)~_&UzItXn7S)4VgHgMS45M5o>}7%Dy-gurNyDv8@C z(Dq?mMJzt9U2wi*^KRi$z_}Cf*NuVA`>{3b@|nU(;SAA?9jCY$X)x8OY}+Un-xkeq zcIr?=b(CO}_ENjUt#yItom4Xz0sDuix7cAIi60Hz%Tm+!yNwr#MEzY-fG53rh@qv;#ZRs0eV*l8%oiZ8lP0o z6lqO47cr_81MrQpv%Oo}7w475FnfG{7-WwxOLGyN`}p84s~I zCJNX5TZ-lz(h*bHTz|2Ej`NKquqcyNd-DjwI%jo{>QWdBAUENv*%f`HO~buA-3`hn zfsBX!cY=Xvf+XQ;oW$=5XQp6R65>jltL_N5HO%BJoKFs3${LfNwRkACG!h7j)U?w# zt#cHfLG4nAdC4-hKeWwEwUIte62YOFx|L>_=6>UW`K^B`J8&G`#|tFvMssz5MZ42O zb%rGC(t6~qhPt8q=CHhxHKY+mbYO)CoV8O%ALdgktDp>WiIZ2rjBr;CX(@e@_i=4-+4uk=NR>yDz{ukWFXiXf#OgF;?3FBm0FmGYpR zoGHWMfv1pH`x_h@71|_I-VkEYZa7V&42{2+&iL{9?FB)GEWZfjkWC_IB}5)o^U`mG zkI*h;+#l8S==g{NDkCirnVu&e2Y)~y73O}(ZfT?ZiEU{e*W9WM=@a{x#UMQa77e^q zt5AFpj}1@tp$&$9Rg8JOxez(L^cpnugQ#2esC=oP?!X*q%%&GJXGVc&t33T? zwC96l(IDZ&B87pE3ncKbm?Ri_BV&`?So_&%*Zo>5BP2nM|kWcD^_YFv7?A^NJ^W^989;-)0PtjEz$*{r6Ku%8wQ4EZ6O=Z9R!fE zNnbzN=y){;?>YjoW#;!-QOSLess~g;!!%s_FzK|;24TF;lPkF`?D3J|1l9nCDZ_nK zhXDsc3jG$d8#>kohBK-JDJjGy4^N`Fc5^=MGuF^-Y+ptNS$o5l6$KHp8;V-cEBssl z|5!vfUQWxFz9@f1X=$1!?4+x%Wi1a1J@B3D7;V8fdC+YX8xE%muPxW@h0IB@@HFIy zQLUQMNILp$vEO}BW=PDK{DunM{hHcU{yb2zL2wd4X>zIJ91C3!vl*{}>#;mHPHk)9%SnAD7yFqvnT#(w&w9$TB zE*f)EPtqz$o4q^74w^Fl=}b#I-6CYd6;J8}@GXz^ElzeDCk|?bz=W^<1+fFdUfktp zg+qP|wXr?_=OtNgE=HNZBNo@&;>LSeb7s6`ODLukM<;LU&}(LO&}yBUD?re{7et>& z1PX<}$0e9g_1Uz)e?%t-)~@ciuwn{#Dd8FjIA~yHR@$1i$vs^;S<11#zz^=6yvd>$ZbB78a%RYUWaN3r}?oi zW5L}K0{y`2O-diIOY7sQ{FPb9tJ4clk#;&+sJNufmg;I#1)rYEtWreJ90DS8q`(Xj zpy`5;wT^&{!e&F2rcxPTfsmwK_l_G`jyp1rQ^>XGtCB6PXWE^19uWve53ec8=)&<7 z9IhG>&M5(TT=F>jv0LdAC2aVo^DRE*hSf^n5@@B7)}YJ%cpKh6*drP&*$tGm#TKP} z$x0#aa5anh-Xdr8Or|6dkL;TXj=Ijz5)lh{K$aQ%!YRm$f82~ikL6#ryC&c=>4PNL zdzk8;4h-9%cKK*+`Y{mC=g+0sv$Xx~X1g-E90Lk|;K;>lW z(=N%4!A9gH%5nC0__)ePFxUT$1@j@>be}SG21)~mU<{#Fb3HzRdEqLOws;vw{;vRN z<<{lk4ec2r6mK4XZf_Us7If3{nSB%_$c`lYhU#@zNt&UFC&5XEBphuC!>c4lVJ$F< z+YZiW!oe4Q$Q_$0`NuZX&W*%XDoxIO5DH)&YTlYyx$Am6%sR(qsmi?ufkY4cbWnKI zY1?0pKv7q?VEGj4gF|l)O!^0t#$KynkjMMW`empYX2tH_i|qS);|>I`wbFw<)d8+~ zN#qU+FaP~`Rg1m*QTl|vRFH#U|BSG=S^TQKL%#O;wlC0ux8$#k?}8*)PzheUPQ~a% zeolJ5G*+-PR6jJAl*B|LDgy;@;P!ESXIN%Xo$qgc)v|hMv=dBWGkiC%O^w5g#XONq zzj$MZ-27GC?_iZ=(6++jO51H0wxm5{zriosl-i46UL?L;PVM~U=hEMn%Hcuj z&w7*_I{?TU&3Z=^)iegI_+^wJpN$dl+aE zpyEq8u`G+@6+6nP8pQlX?~60c;N1kI&F%bJ z<(o0W-KK29BO;j}qojy6#3LGzjMVtmr5LnkUE)*xHK;*_vOfES@;f`SaTrKT-CAPu zF)w2fqFf!s@daf-&YbG|VgrMPCzhR1m_=iZ&}oSRfMLo7L@?r2AvTii*81esElug# z*WAjRkrH=Z_>lC7d}6?C(&e@>AuVL^1hIOZVUJ&ffUhc~mM?QLxrJSjLJ4qmI(Gs{ zmbba8ciNZWnA!VjF>wgZHyhTXa)o%C(A2H^Kn7y!j27(3^?)q9my(#@it}I50irw~ zpkLMyJ{{VlES;Z{cM#kfgg@7}?ivc;?3oeVblmuz$wNvlK--GgrHPOF@&;#l7eq|K z%Y`t8RQ4pK3;r%pqyfk?#;@9`wf_-iixBPRF_^?JkT=pq6T{d^x8=;IT0U0rJv6gu zYNHIybISTe2|aUjIsU+GFN7Zg*O2kgGHMD;x%{a8JMZW;%j-MtY{!Wj2{YvU%8??g zr+m{Yv$O&&TNl3)*Nj3Owb>Z-kdU;z)Tx_CNIVLxwh zfzuOI+Zgaz6K%kuJL8os#aCL=ZC6T z%mK@sWSYn6K3lfSI}7_Pgepe8_#5|Y`-w^9y*INgy0x1tyEFeWGU!!lMImG3M-hS7 zqM|Xe3SgkyQR6HIsEQDVaL{p4Diw+k#*QV~CS{$hwcCjpgz6w#k|f|yyiUNn5#4ik zpf9Y97b5F>JI1x^Mt@Pv0QdrhuNJ=sH9Jyw&T7a>@g@s~FZ}>v|D_+xZ}bT~{qwpR z+wRATaHUt=&vUKN{%ranbjq9q_n?Ie_VbrPG8TH;u&!O-F+IjWe5}?U;R!dD)UAFnk3A zC)9jJSWPT$s_a&H&S7h@P$a_2TOb7;l+If;XYO{Yk3%AP%RQn%1}ydgLIl7UZQ;`k zxjLr?&R?*J^7se5b*t!$KJw-gfr7*?Dg@h*4nxO<8fx%WoExfZjTuc2B{_?h8#UN_ z`@o7ec6i&$gdlsaF@d29x|8lANP2~6G`0!wc|>&BYD|h+;JM!;K;Y+Xo}S&Lmx0@e zMz%75J0SwVZ|hM%tD;2Y_D3lIw9DdXk{HUB*koDvY3!1;83`W0w;r=pj;O)=TKKI{ zSK8Bi)5`55S8V4Bl3CTi5Rl95DQZcT7meBKR?a)%w+Nt)7#8488~bNVH>koj(gEwq zEWju2kv(HY@dM0>Fa}eJxJRe?uYJ)i0{vcZ%cB=m;&Qb2;j{hPvzuHPF5Vh2RXKc% zTJjmL@A>MJ-}HJ;>v*QVQd_45O3XzRAh`b{;w>4GCXD&;SOo~^$Z7k(Ar36II9%QN zl6OIltObrX`NpuZ+UO@=ezj&zQ@=dm(T4L8i%IK-PaprUOKU_@chT+wxk=C1I$*se5`3z#+bUtZ>39o&^jlFqPr@+5onOVc`S75tsX%7COsyP=O|8Y zx+4+dx+4MrIW>Y;QS6>`nW90x1Sw@w)7x8;6PTZQY`g>RR;>h9DRZ!Qoaw1KpR@Hv z=a6ZTzP}IO$ZzbSmI>;V>98E@p6ip4c$kdyW4*b`C;zfQ3wXUD*k03?k|s(FMqvvXy%P!oVuKAeXyMMRY zrB?^o>(e|OmJ2^ljox!$AFb^$F%AxRK@bca0v4o$2vN4X)yDVFTi|WL?v4=9hB&_t zl8305NspJ?#vmGhX1hf^D7cGRwjI<^p-zdi06dk-R+jOBs2*}fFB}Ow#_d@$NYkgy ztY%nrvRC%{2ISg_iccue$;6WhMnx)_6qt&$^j2ytl*b+LK7GznD0ecuD&b1zULva+z+0NPcw<2PAoXdG)8(A@y&{e4V z0u#)})_9=qz;)6cH<0;moU(|4f+PM!yTdbdgovu`$r{JT0umuVf^)Yg_**NAV8Zzx z%_ex>6o(5FL@R1I%H>oX$un$;kVyXzY}ZR)m#7rMj*K6oqUf@E2MzjH2!31>1`fe3 zR&b4lCTMazJF9%07pbZ}#j$$ffboKYO{fO$dN^2!=jLi2{+fI_c6C2okzPMv(;iJ# zl;5mn?Fu)3VuQg0QuDPiK8!yrH9>0&vL*5FXr~%m>I3u#It4KEV*S_hv^xo&sShAW z-y$DUbfL4z8AukpX=NBr{;l>oTT93|YI#?798yJChSGe;g^D~&qnM8PP!-tRuqiX$c`NilD>_TB0~GagPE08bVX&tup%weX|_t*J-mRy7Av{-`yDe@->W)qUBU!{911E2(8 zpJ36ArBFW&Ud^-O318Yun5o+Nq;i=0@53~>krl;c~%m^bOvQ0xaqth^UBtz+^q!DYG`Ju*~u1O>Uv#RjGtUD9D z?ejJ2s}pvyLq{9T%4lJgJW41kLWXi_DM5qz;&007FfF;T+koxnD->C0E>|~7Nn3uArm*YAwCsvofhRHVg+4g7`lGXBL3n0oz<40W2b-= z-DdJ-)V1Oaq^NuJKlN^n8v2b`CG4fmSomG|F6x*Wes+-^&S_Q?qcB@buD}^#T27T{ z8Yu7Z!6XgbSn1um5Mg%6V^O}aqKd> zkL1Lla{6Cq1$O>iO8#ZwVjn@m^c7Spzx$%_9@0#W{*_N}YhL7xLG`2NdtPr;J6h>0 z4>}Zjkec9kS8qr&DR5o1uJv>TJG6lRac-_*j83$~=^=i^fz{v9eQY(-tZ=*w0J^3{ z=H>8OxfBT^>woJBx`asx@|ZK&7NUmEf2~shv7=zosBYZ@Fe3|W%)RFRfh-s&CbJl4 zhs`fTULo5H!3c0j9vNUurbXA!jm8XFyoXIwl`Gl)iug&|KboWtNB^qBO`YFo%glk` z+)3KNz}1G=)QEPLE^@sk(1r^OpvZ@g97qfvr{#ypsre#79*7!h6U{omi4&FE;JTMZ z!F4|q!Aqog$!IU`0IlscPiNDvoSvHPC^GXxekES)PWY1 z2OhKUp(gP$n=?|}s{V=He`p8pwJ9nl_#{{@?)#EZA74NEv0Bvcvd4PtRE?SY3x(Tj zc_`lA^NjX=&BH>UpaHx}zK$y;lNUVgu0GOhid`aUASc%MfnWzDV^<>Xh1yF}(N8>m zS+>_=>+mnMx{LB}q1k_UE5ylOhX=tt^-XBh@_(8;A*2s#Pjm~AFj8sQWwTAJTGJ;C zh2Hj3^qXy-h+D!s@wQI<$rUKKxH&B}o#@s=0K0h~SdFzpuDx}%25kq`kS^!0WKiQp zs&G?GvB9(p#4Dc6;-cbX01HkFpLDQGF;}E-0VmzB*fezLIcbsY32N`#&pPw%OoOgi z0@$TCzz7;y97PJ9P44rqF8L{!dSC%Hgw%ZYWVa%Hd4#P1jy)(bPB+oSU{a3YM?}6- zS{L4;)^M`yj3e}9B8(%*LGvH*(f3F~jy=^eMVqf5_|PL^rVA+^i#(+(Lu{ebF2$cy zCC-6j&zlpOnt-P8bJS~6X~7~j$^m4RQXKywn*sxX{9O7fPr#B2_>Y7Om9@8S0Z(dd zqG^aDY3mTOobmpceI@i4*+2uRFBm;KeOUjUWso3CmzLd<)7HUbu;6V5k?W3d!Jn1D zOTj&gEKa5n>~c&D(6RvLNMS<*f1vkIsy3;S%8%T|`d|RoSW{(%FR*q!lVLXA>!&AQ zT#`Vm%I)Cwq!mV8mWL5=vMP@C{JzuADY?8!0L!K#XMAaBAXE%$a&O=oY8j>sPsS|4 z88Tv>zHosMF_-n(ZzJ^?9jH)U9V3E_-MD2B60i9=pX3QN8Oy1!()&2udIzgUBaH5* zcFv>ruhzf@3v&H}ox3`U8o>Wi{-BB?>ZcLb3?A!+z*57|K@)^S3!|f8{kA>6%^@se zjGU*{M1czk#OnI&hIheB_-aW9JG@3){V60fX;TR6@`(5(s_u)w2Yh}YskhpN_43AJ z*d!*pTN4Kfn^JZ-Q$c%7ec`Vb z>1Ph0@mP{uPNy7IR%Z$lCqG#53J{wxDTduV@q7bUfARSc=#lmVfUCRoH6h{bH)$k3 zh`$Wj%ieZsdRA)Dqs7%_Zuu#dNmXxORj2w0rSSxZSH;f1PpyL_<8H)=QQ~>t_7;Clv>$uD%|#t+R*JkSbjC-?(3@~LoggQy$N$)0aB_Jy z{4a?h6F6lFj~C5YvYQl&ny?O9rAM%n_|EmsZNB^D5QZ#7Jtwu7HD{=WQ_lJ<>rKVK zWFC;UfA%LuLYSM;9Oy?YZqQ9~`*tNho+>{o)XCx?`8wtCp$XjL`$3kPzGA%}9uz|^A!496j+c#`;}U~i8C|J&zL|A_)I znZ*wG-#E;1-%ga`T5N3YSutqVT#%QXcyNqGpdrXN(sFA@xhcj!#fHTZ+O4OOZus}j zAuH4fEpmTvtcKCOhFlNm03|gqYA6xb%Z0(p$obZ%Df$+tonjeAsaKLdwFirSoxhSv z!^dLy8vZ?~u(YPLuql&^MQBQ$4DN;#meL@^`a>-VW{u=xm>!IuC2^MZvEGi0bN@Ir zbo4ycK~G{w-FU5Rhn!6!MKSXy+p2h*SCL`2M3hka3;C3CP!AwTyG6pR!zyhQk!wal z3=mk!e4%}??W+SwC$nLV2-?v$5vl+?K*YammgvjZEh9}O*iH}a`k~78uaR$B5JGu` z5Xj&RwwVwxVWq+0u*p!3poJ>=7{zDoJ>?>#p-@^>Zb!#tLP2gmKHiK~-|0o=LE^fK z7l3R_Vf4HO<@BVToNj%X|iVcYL3c){*XiS zO+rymGcmm9%#RJR_S&Ap^nRA3{GDbY8hJlug}`O zaweH)KQsh<=x@B>l+pII`QKO6?Zuzwc)1z{AQ3$|7Mn>)?V+pJu}=+d4I@gFyJ zDiAQIK8G_dmd(-=jV&n0jvoOru!A9cxCe*b;Rp-nDxY=xhOWR92cJ0(+DSFKs@8te z9RaF+z=WA-T>|1w8!xMV_hqxGfs+UBQyTQ>}5om(UXlnhXQ40^~;L$zlZ|Cv};L*Xq z2Fc@W{f&*_oEM6N*sI`J7S*hLs4$w@T3RI2&c}H`r^CjdhmfbleJ@MNM!6Bh2b%K= zB}5k1F9{K%Xu1*9ol4vk>9d;}H-TdCh z$!^>{MV#?ocW_w<@7@3}E_rA5&{GB}j67U!q?N->fdp!0lHME;{t*TZt5efHb^Oj` zldU|2_DdE$j|oJN&&SG6=~Jlh$0<^k&bf+;;8JK#48KRw^6QD+lG8Q z8*`Kllr4+%=iBhA<50zWtH_BT<609Q_3Bu|yMX=yrs8TI`et*q9r?5AJ7MQ$i!-Sc z{askWUv?I$)yFty-0lPb=BI%~Rn!9MrYYi-1M)bC*XsmHiK)JXP|4p`!A%WKT;_k| zv1Gj|T5cLPlY*w=kFbvL>brJ5=>7DJ^sFdZ9F;d6a#PlGF52!Jj)?HiBK*b>z8qtr zZ$kUDZ!t8kM!^~WN{Mhg+8e?T&0DTu3?^y#qd>Us*a`j&u>mJbnf#)r6@mLk^UCpIh^#=5+Sl;YY_5!4CfK+6g-*-R(9Yv z+j^ybJ}m?6FspeDnb75~yzInd{CGCK7F0-7V-JeMyuJCCdo&|G4FyVIZUj+8SL{k^ zv3xpWMCfSh<*tb!A+}K5*G@Bt$%j_=E!IyF0tEec#2^tsgO*d0%Q#qGd@4S-vvq9< zxWb_-B||lCnO5lFu@PGUq!3v^`=Gp#F8F%`mzQ3Rm-M~@f9D!NVdVLAPt5rbs}yOAs_!gu8KS=BE5^>+N_h4)}caF7Ft1 zyj83MP4{uV&_v?5{(XEr=-$?>ToTa;ps)z8%Swq`_`aPl4sIRpG(VoF3cxUsdq50N z8EYA-gQQ`d@5Awix`w|$;7u)h!RL1zbSnMee|IvAJvl;*rSq$54S@e}P8jTj0P(Ob z5|WNF^ZYegrhy%IkY!1ML^GP~qQY%J=m*YRJ$c`a#szt*su7r|zEZ@CVu#@9&uT!; zR>X@>B~D4~E!Z0@p^veP?4H*$n(-}A{^8zWCgqLNITAMlc`1Zk$E$kfHkj(t)>Zs4 zlMOOumOD{IZ|KHIOG)#NSgsMn8GCE)cSjm#_6&;A>0Cc7-_dJ6*Ffyw zLpT-}qC@RZA?r3dgR7LelHmj*AIItu4Wtl46apMkk4!;~X99@`-@))T`p% ztn%b=Og)JfI;o-bDzeb~X052=JrbiPwE*S>3m0$!O5^RIeH>#5opWJ2dMUs^xrK@* z1OK3!PrkcMR)h;8IiRUW#V>tHNuWf@v1qG|Kc-8CjYt616AnOMb~#nCns^6*ct0CII#H>uC|geP`TX zj4@K$12;CJgNH-+tSG8A^I~JsRm0%Tq!Eazq+@U7qG`%znRp)E|4}`jz5PeRzT40m z1PThsQH3?}uZ;a47++OGUPGLFLx$1=hCvUe$;Il)lT_cAUI7#oLM$zEQyzw1s03tt zK2+C!I0r%yb&n|Np)`xbf4<~gE{Hf5SYNPcyRrVnU~h^)ePLf$z674L1VhjslEU8V z9|ugf8^I;mSK3v>kYk84s!T>5UX7kA>4gy#j*->)(-fsyBh(|iJB9$`uiGM^x}J1J ze87v5X(7y&uMtYc1YGcZzAp~dDT?CQjkZRg} zke(dz-m(O)roJ*hw_2%m;NA18Kn%{CeYc!b+b1Kwj?Q0wbBZ(84nbQ;Th4RhkZN)b zu{cVD(13F%7O~QQn{=<}`YQldsU4gDv4uddL%PCowH6Fkgxjt177V{#ZriQq9*XB> zX&hsdoJ-P5K*Zx*?_kM=XIW$(ehxpKUOtf3*jbRc(p?CdvUs^T31YHQC}VC75-5Mf zCduYo+{QIty8UwIGb)OaSTNG=peq#{TkUlF&x6CS+RKs&cCEbZLtA|e@XHHRn1LIrbmgl=t_drT&vqj7&Ei=}rk7N#t<9SV%tg{IzqMso z!3u0}2`12YsXfm2sJ~BeT zHW#rwq`njF#P}YI3E2BcVPx*u0nOrw9-~O{d5O(4>0BIa4d9$pse%aCd=BhpWY)Ja z9uAUeG$|bbb|G^*E9;@GypKA7$C@sF#0v`#x8aPV(wr3A%Cb1)!<^lR56-KRY(J{i zn>J~L-xH)L9t}v8mCt97*2dxqR#&|%y{2#WUwNZ=bvJ zzjfO(q&To}WmAN7ABE;@w#KFdsc3u#7o7idyj6#vxAcTaA?NY`6S0nVKAu3rIN!`B zZFKQh`PCZ;I6brv%jCQ&0iH_E)TZ$F*#*aVTs=D7iD5EB11(QX%<0F~{eP-j;uXM} zl>Jf1gZh2*4Wg|20@j9VvUw-ePW}Sw-WvH@TWyv`e4t5;4{E>&uUtPWJ_(zt^o2N77*%#VuEmRI`z&f>#HO#GfA5PH~z4 zfxHQCBY#X-qn zW51R%n@+heE4OOn5=-@8MRZ?=B@_Pp!x$_*&E{Hn3uAJ5%LK_U&X~_60}GnzwUjqqPR#BmZGjTMvb`!epssB4$^(5$A-FG4G5ZWaT}47SN2~KQ1meDRi_X zW?QCl7(b3BRa4fyFm$anazO5O(g~mP*vKaHHy@9jGGHs#Ng4N^;gt(Q##u5flrfp7UG(( z9R}^Vl`sRl28aZJ%>CqEf$JyD!AmHCd*%LY&llVIpA;O^+;v&SR2gr|Hd!QuYBl&P zy3Os;-D^FtGM4M_tP0_{;1Plgkz#nL++9l{zr}nEq@&=cx2L5Nc9~prpgMNstnSyU_4-fuWFP-5>X;J&sWVz(Z-|W*B6L$aJ zxl4?_@G6f>Cvs48l&t$rKK9AZ9<9I0n!U}fuUcD~4c|&@rBkL ztuem~i^3u@UE-U~7Pn(`4H!HcpNb4_0L)-1hnl-7gFfutV3Su(0VLty6yMyj4ZiR1z{$7|h?wgLc^|lD+~-U~l7SZj%>(f$0ucd-bhwJ_$(0q$%7i7oPQVzS$vG>M(sU0M z?BltsxbIkz0r7)AUgGIS(SdmlTUoOW>r!9ItEh5qNn2u4DwJylk{Es;x_$=qMq&LstY$ zd|BgNAo29t+^$p?7Odmee()@IPdL25QAOW%G!)Ibb1Z{Zfu@DZc-mC^STlt?L`ytV zD~|m`oFWX`5`Lz~;hLoCD2`OuYGM9#jU)kDC*= z_GIYEkBDE|y6zcoO}+1C48FSP)%HBUssun+rm2o4kb3cRlEhV};NGLGkw~j{oV2ph z;40w&C@CL{NCz|#VD#V9%Y4*FXcPzbQjjfTSjoHc_q{}AnDI+!rOIB}s*+5cXxOVC z-)d;fBI>P|`@J%LOF9fowk=G+M3DJ_9-9)3RCz+JL$hq?nBR?Pc&Y5~ zp3v@e$jKG;p6pX=#Ij(pE{sk#H;k5k{<#L$pW{fHqYB|ot)9b+V@(fNUjZcLf89Fk zTeY_lI&gIl!#-8Jq`1eq&=LcW$imaEh#`mXUQjCaE-3+dW<>&r{7HrDOGpw=1F2RCr79)OR>0x*nrjD zCa3c(rk;@hx%{?jAkQ2x7?0&WK2Y_v<__pN)?ExH3UB3sVoFFzxnp@MkUkB3m56Le zo0p%~vOh(awO1)z?~q@y>YB~YN1cS)*UBWQ0c)gA@WYC8&koKa1hA4~IvhLy-dvLU zx}E|46UG1N8=aO(vfFGc1?=J5%c{wj3L&7njwH&Y*{PFK!i-=F59SAF8Az{Run+q2 zdbchR)oSzcBt%*E((!N!M;U2(i`UHMqG5f)azP4zD%dI_dxL#fm~Z9Ej1iS5*rpej zrGRCl>PT&K>@k1iI@-+n$4-sJoDtw0@&QV$&WxNXoJx z6d-cEq-O0V9Kd)pwy2{qH5$t+8PQc?S7ouCuqWTO}WBS@C>)us!5~i{oil_`Q+Nmg~iEY9m@p z51qD)dIjlium1sf(?2 z)|Fl=pT%gA>Uj+S{?RPffiALX@5RgUJq0>cAaTaj0*zk*(n52>@O7Jj?xy+Vq46er z*+Ya8%53bJixhBpvpN28aw-Epy6|{;DM|yE3s$l&OG^8`2u?Rd2U@#dI|Y_bDtJ4;<_wS>0Qp}w2* z;;zI^0+Ov<$@tFqZb0$rOt*3JxCYl7*xu8Cf36sdxD@L{Axeqa1*V}wd}aJ~lg)?f zOsG5C*Qj(OzbxGw;6guIRISGWEtu^QWR#wA;$H2sCM`Y>*$-@a)@2&l$%bM&CZZta z28F$}yD4`<2?0T2KP&XI122F@K~5{keH2LEGGt6YJ_xIzaEes0m?>}6Be|KLk*_xP zA=;z2TJ$!j;h7Mnv;rbGe^Wwv+t95T-&Ul=d#Ox#_4Wb;OQQ>@Sb7|92nfH zoFz+?A!G_sKIViK0a%CZhP7b`E7hFUs~T85s6T&*V|Cac3-;A`WQY)yF*#Kl*Rba_ z5egy4iFWA+49=(aik>YlX%ez8 zlP#Wv)IqCRyiKWPjY%wWh^HeoWno^uiS38p-;_$mySip|SEMSnqnH`OYFr0Ea=N9$ z?|#WK&*1v!y3Kkg1LCUF9K?@K~_)nOmgVmL5Fq6w?lnI zQLm}`g$)lfO_5f^P)~}l(F;1X9qwVCyj}L{MdzQHOgCXU2Z^UNMjfWR93+hL{KfXH z2*@U4O$$CK79{Ov@w|5#?op^mV+L&iN2Y?8&;;#(@bi#^5dQ1@OMe|AFhI#)t#bsc z9j{0NTO}K(iKkZ*Hf(kNEAt5NWvl$;=u6NB-6HB;|3YIys*Wh@<3w_!CkyPu%E~jj zY7|Csvt$_%R20c(y!sOK2{crl%N-0LIas~jUJX1F?lAJ}{PB)PbQqS;dkg0+`S`h( zb(Zc;2qidLraZAn*S2_z2PXFyrVc5xhdiY%aFF9>Q>C0(LZ#!@xigJYTE(h! z5yX$)M11*t@QsC_an!6x<3fAnHA8|c%daWr5-#0;H6{e6fro85J8l+HlC?c`+m$&O zO2$3TL1Yq@Qxj>yuLf#M&<4j_)IzDpEijoDakL73^~E)B={95>blBx61yrJoO7f-E z&fP-?9d@7*%lsps3GB=2(EDPVqN?ZoT!6vYjt$Iz4>YbAfh|#Tl~Pf7wMHHpiMP5= zkP0#+_D8Fnw8w+JFQbptJ+o+|sqX|(SZ+rZVW&^lQhx6Q0*WF!g0;QP&yz_Jwo2R9 zP8X!02I9+y0F^ooc&uu%w2RWSkS-wfF*kt3BsE?a%Xx=8*-ri+znVJidazU?DVJR< zcPK?5_ONfxVFqK$HzGm*Brn)yjk5Ii^D}vY;$EAL*E64^G-gC7-!HwmH*d99Z&m&I zzxbWSG)^tUH|^{LYSr=sgPzL1>#v1g^*g_VeIXKzV4}_6mD+CQIe_r-L%L33eX`Wi zi$4wzazD`dkEul#+h3!|2oi0mFXe=4zLXm}?Og`TOhEx4lf7+L3UyVGJK7fG7ll}A zL@c4DyQEOvB$W#>qIt=hUV=4Bq!*sKkMu6&eK#X#1)GN;pIc?_Y>zQ@X(a8z5qyM| zPz}W_jpr7}b!=elaic5-@~Qd)97f(0@LdMV@*8FGT~z_vX%L zfr5DoFR(^sm#V-BnO-^Lj5RQq7DBvz&EDmpB#Lt%o@;=R=>1~>8o zQo2>EvS%ElTj1EY5j(twHv$x#owTm@uonCdzG7BDA(FC|TfVfhCC9z*ougZ@SKS2z zBUWXe3ts!2YYyf4ikOvui6ts|PGWY}_P$sQx=Xg2YI4csR& zvW&IB;EtzE1@7S=R|A|kddpIEn=H098(;G=SyTVsB!w=|on7jlY(4kXHz&Z}CoYRZ ziLsW2)_3RNk!!}Eba+f zlWt+vs5nscfUn_(Ysl&`u7~0*(|lxiu@V&T{L06$3$K@0Tz<;7SOuL4h?L?WauD1T0JS~QaP4CWzN0M1Xy z-&W}03As40`|b{r%&>CBi%yfVLt@m2nB2kmIhHiIL(FOIIm9DwMxS?VCIKp3b6d=^ zJDr7mf?!81_&{}Kx>Zg9M}>>WDkN*9>u6z@vSfwc$$@>PM(k_YeD`(+DG{;IvMLqr z&;^JVGU1RK|F3$JyO*0d*r~Z%8nQ|0&ORQJ?=Yo=0)ba-3FR*Z)YOht z(GhR8hD8aEU}GkNexhs0^$!p3oOOx88}Py#Qbx~jzax6Q&uI**Cl(~0Th#7nd1ep9 z#Gtoah^?8HgY7#Sl(xfKC$IU80%gaAQ0iqy0a~H#HVUgAYaKHo&vqv{+X@M#5R*U3 zZG*vI*)j~CNK=$3OHbs93#IM~eAm5{y?ffpdKG!Cd$a>iRtVUD*CWX*il})@_D-S zKH3cLJ7<=Kis7nhrd(iBT_)=2L@kwM1g;LP;_po-WGW&|zS*iGQf7%rP)4PE@C;qc zj?%CZM5pSFS?Qyil!66&Iuw+Id&rgEq)imP2$H2v@o41AVe|127{nMQoyP~Tm+G1FA2qY#9_mjXNW=Ah)v zDHzcDL-EEBx3^wRu%&iQZ%^SCA6JF@_|(Vz@g&dqVXw6}4q|mc3`OzmvNjO%jO~V& zzRs=Elhp!lD#{*`=w<5w@IF2a+Mx;BWtd69#ME#Do;f;_0^_7PUBzI(;`$Qd>zd$cB-u)UBKuIYgHlf z5%jaE&FPW-A7y2a$@PLZ#?3MfjL(+uSBiDY->8~D+J&OtZ>m?5i_i}dL>^aYKCwid zBNSvpUc*vCbjBTa$|kf!QlrNkcAkN}z=f+gGYyL_Wc(Y(AYg#3SQH+`vzSI5X%rQk z=_n_JlBj{%7a0#{I_?t93hpBDBOAkEf4m4MK8?;E_wYqK)R!}N-lvp41-FXCg?JKs z@gsQ7WyS>6pIVxVO7U+>sn_^(x~daJ20^VzIfO_i4*k!JzozTKo7;6V#}*INe|&U2!MZ!Q3z&YpzYuHSokm87#gQtl z#u&J$TYa1GGTF(!4+~{f!We-kC@%twaFmpR(1#E1Qpa_u0+L&f{Edm_N3PJ3;PS<+ z4+4dj7A!c%=o%oZ%Gzp3mb_6fz4I%YrW{1V!vmn+!eZV}Lf(?qT9l>XZQdJfLx}q% zpq1_%*Xr>3=yn3-dP3-D`x^$hDty!^O-U`s0oQ&z2zxy&bHaC5?U?N2k-%pv_Ooy& zf=+?I_n(&S7Tg`0k|J>YBfGE2M4 z>pSXt@XEn*cHqllmNMPK9eBR~I1F6@# zeUbl();6+TPLJ1lU=mQaq&yV~IK~I@zrf?FcX2N9CB-{{-~`HbVB)!x11dzm%w=W9 zlg)y*i4DmXQV8bUVC7Vr*3i|Ua-gVH4Rr36d?eix3us+eLxuLzIEA2gaRE7Q>Ui2PSk+5%<1eNkC4LD0X_JmG>x6lyXS+~BeX8TBkE3p$nA zQkEf%V3l^$(|6NPb;$8aQHtap%0PAbPhX^nBrzoiJLP|-!1m>S^j6n*@u8pls6CMhlre36Uso?GE!kKbp zG^&J49h-yDvko9h>lUS-UQ(zN&XHvbkUZmC@dO0vPUKe_kj|RcM45t^K3a*BI!=sm z@LrapMS8m6wO|;b(6S*H8y3>vbUxL)H zKm4~Jj$xB{u7xgV20S-)Jqo!CwfB7fva)$RJ;T;8dvRlX+}UC4AYEvVi?0z;yN_$sjcl4&^T!Sihr@)oat$U})d_Mi>$djj{KU zW91(VafmX#Y^j7_K_rv+a$i^~Wk_9DmHOPV4k?0M?VY!BpqC&_ITmTRR7ahvMcAgQ3ZXC{!m4i&Xy z!@ciJ-P}CkRA4pnRlBm$BvCTaHh#O6YdGk*6}AT}1jsPbWwt_;UI>G~bFO2obHA9q z+%;%fMWmcv1 zna0hXHRu;WVDW1IvTF=1O;^&+48(4V?$z# zwh+A|r+`5bH6ZHZHP$#6yd$5AY>`9a)=>8OZr%eV?CH8x_G+@a*qyuM8k6G#xuj@6 zq}U%N&D%{@iAbYY9LDua9ONNUapkej)nCU#ZSp_Kb;N)a%9Ms19a}Jv`DDgao|op; zgBQ)dCs0pq6AS5=3EJUUM&W^nT?RB3v?0`SnR27m3u#7P_QCCWL>UJWahZl6gKZAI z7GHRMB+xkZO{$34`8Fv`JZm<1cd+nw*4l(+ zWWIvKY~$7?%V&>T`s0Vnazqp)>39+tqJLBh4%p#$Um)T4kqHpK41PI9%Ub^v0{J>yr}mlg^16=m`=eF91qTq zso5f2a}<_$R7FEb>OHt3cJx^vM~2~sRv<0!p6mn;myiM2KqHx_5`fY`M2FW}mY$j@ zwXR1a_?re&=cd_bK{c>hJm2C|Te1zuv!FrGNz+*WQrlSQN!{S39Y(-wrfdHX)_xwg zyhqFfp?`2!SvLt2P1zM?+{h4K|2;Jo@u46`p)vT&wW31`4pc|UZHY_fJgpwgR+%yl zZNRsG5w{WlUnsRFK0aQnmI_vgfkyCk@lV-AgH!Pb{_0)y>cse0LPu&u)Fh|&q0mC-pzXpHmxXA;c3Cr68=Ux+^eC@r`z@s9j3%oUN$ADHa2*5%wG;B`&1m{ zv12(zWKv2apT8Re*J|yL>D2YALl~RehR|(&oqN(#z7b!#ZGZ0z>;edP8+D#yaeh4~ zUU8t9$6daY<;;d8?Z-sL3Ns>U9mxr(KUQm`U$^2b+8^MXP$Oc4&E-pSBP3bNY#g{2(1sW5pMMDC;x(4D`GM; z5TF?pvQyCth0#<=+dB>=f)h5!pl%lVsfjEOTho{IRVFW}yL0)p+yyE>8LJZO@Lkv! zENYXYi6Cn!Cd00=I$!g&3}$H$8iDJZL)YK^cR?Ng`+v#nlZ_sl2hWWeR(S-vxLx0h z^vV3HfA?OpL2a_-rLeZTmIxt!NX`dJ^wLGj(j`S1VUr0mf*x#}E^Jwu9j9}p!%8Es zJ77U2!LqijvXkE%D42s0r-L_loP|)^b!{z7UY`eu0hQhXQR8sXXus*3jXiONBVqnO z608%o5N5DcglSF^j8++wxq`sM5@!$GoV89j{HQ4Wp~!Ysfhh6oGaO6)an=tuB;CuQ zp*t+9oHrR;7kO9QKv8kS;KUJ;kkya%8xU*7rWX?)-G^Ico6ERL4wlop#c#bJWlL6H z%sCLut2d8~%T%@hWjX)3^$REz3&(=`0o5?9Qc7T3!Lw4Mc8|JbJ1l;>;L}u#a0Dt@ zB0^LIw-w6P2IDz^^jbc$=37H%QN-BZNXQv2fTTN49|kyoUQO&^#N=IA^8Ln!m7=8- z!Ndq%X?DEt`<+!`r|V$uA-_|ZZ{neCXMpQU0h5^C|0j%F{o57jk^N;Sj{L9(oY-a6 zP79^qBb)Z=igE2|1b1P8UIp+)scQ&veR|$&&*ZKtEyIuWfvq=U#nvI~PF?m#pwkOOB;l61H4S!QwA0 zGWWOJqA=N2i<4$qnJkARRZjDODGgs`!3u0mN z%jWPj;!s;FWo~56t%L#t2t`X?O@C?`>Aork_Cnsib2pa!T|EEmbKjo;DSQ~NROr3N z7XqY9d)7-NCRH>dkEL36?gggHMPKV};A-$}rF*dsgczrAl1hPx@!Q26t9)>SBBH=g zLh^vO7dC4U<==j)q$Zws{psYRgC5#yna#pu`fW)9yJ5?bm+x6y-*KT4TpM|X-uQ(% zz8uUW7!@8#bO;uBmV56)S`6O~kch+*eaH=cR6T?N=MY4mm8W@v(GTX=S}1$qEH*0A z8F#2PSh=Q82cY#ggA|d5H&Cs!l3hK4q3yR-O)XI#6SC)UXAVZ&0V;Yh8n07Em-VUK ziFQA`IwzBA0#O0lPu3*SEFm5!3DL1<;|*u>nQ^^XgLRdhax6AKdfeHtD4*a%i-H+; z)`8Nu#n{*_z@gEQlc`tQL5N-e>ffy`_W?dbEF)$YSVw^YqTSY-QwV?A3rQS*M+O1| zBp8eL)1|<}nUnyWJGnSZC{;IOvBRz5U(_P55t7vt@-BG{G$$r5XU2*b zLb%G4ShSsd&l<_GbIWj!g80QWBYUj*!8vJ~$g34^MuyR_Kg20r^;kpKWyqLlx0kDK z09N$I4AIgbN&~Yb1X*hB`oK@U&uBi%IMkE)p#bAC|EF*8kqe`3@(L32r61A^U8-*P zN9~`O7b%T5J=uLqAA%%Z>3Uo49OBEwRZJxq0*9OSk?ZLa2iMtXeQYlojL6a>`*b4W zPk-8FBHBXA1avjFJ%_k6m=RSHq4ROCva&afdIBAdWw@si5rpq7ln!GEH+kCy5T1(Z z9uGlV4l1&5qqR^1SEo>f;HDxJA}D>5kZ5>0)-rtCcQkKRcT5B3(SM2GyqmRtiq`>@oh!>w(@<(w?8EFKX$| zs6mix{*9|os+LlE0EYNFctn}8=nV)vj!2fdk1ueJ1p&uK9^2p&T(!gS|iwpghDb4%A#NdST`EW;eSZZ>6>ifl$-%NO7#lRr|F(Ei1_4wIi=rZ=D1mxwptS-> z{!#_^A1e&) z#3wscLc04dbzEFzM*r-m!&QpgaBH^51gWH-Aicg(6uIXj^mmig{=ZL*r#|2< zNe}=Wjs=utysHuXft|p|uhYAX7s|3r!nWP894Q~h$#+%|yrIJmC4**3WtLN5|A^ew zf>RZLUA0-*DC0wp~d&nmBvm6=b7a>UT0= zgy(qgI0MgWOBiEW<=bsgump=s$h{UZ-eFj%?d(!g~y_%UrV4xtsPn_fo9o z?B|%r(MAx~t0siuYvuN;jBJiH_}hSN3PX27YaX$ZzQ>Y$qlhniYBdC;*U@LxrAw`g z#CPowW-Ppo&dqC#b@C#kQ+WpiJQB&m*W8pj+oPNxa3;=2+JO~!%%QBD%6Km2ufu^X zM5<1)ttvT+mx8h@F-#jzBiqQ)V~;nQWi9w$t*(g|A#mEp*BqI!NQ>V|?}2N5T;IXd z`>)8fa-o>XiWZTx04BRWgBA>0`V83M zlEDQMglR_##7gD0`lw3UsmLHhOJFH6JIv11g^jSS-Rd%N^9 z?39!-nAF#}n{tIWic>gmC7t6{8D>ulZBa(oTpU@+n2e?8_E8feeD)oIy1Pu4D*%k% z*Pj`MMTx{ljsIJJ8N8qVu26W>iow_-NzPER1hA`IkbCj@b04B@jm(vOo1SO>98T!l zh`3W0$M-yTI1)nLRQ~Rs%chHttqMlHD_e%DW>S^5$I#T*C)zQ5<196Linu78ugNlX z!&xnTa+d0AaFGw?e2f)@Di(I|bPub6)ypz-GV1%BA0YE?qgG{*@5T#J98doKqoi?| zQbGZmKjE1_YiK1ov z!=C4CLQ}9*Mk3fRr>qS#DCj;}=jkNzxrZTqpS%1k&eO@-d)=DKj%%@u=Kay&d53|a zWD6DAdG@oqF*CG_C9Po7;xo8PO>@h8IF|~$wm}0JIa-PiiFq~1#=A9Mv(lQ`z*>S`YFwq{lM7>D^2G)ZmOn#X%+^H%X)SOZP z=m~S273LSt)sL|m*-EN%QclDW8Stl6neLD;MyWlNe;(00O0Df6zO+xg%z#*u|G3D8 z*Fxs_VIKk~Rp*B+QXQJRd2T1AV53oFpz4PimHse};J3UzH`&@Ns>aIKEhB2L<((Li zkRv5D@q$-(m+6sLxA{w&U|oZsl>GXd*?Kp9fY>wm<)8k%l88&Sfn$`wG;<#`*UwIf z8F~AYD2iHlO4|0mwwZAk`i}_M5zZblXgOwOS_7r9imKjhq-vB0E!1kKY}E>y0S7rq z$98v0TFEQ#B=j6dG)6QFy99HO`T?puV|T@pFUw<6$zACaGm{b8KcwP0xFd2Uq5j|f zl&MX{%R1-d1em{Bkjj16T_D&e^O#UpA8~{~1fTIR>t?+m5FJ=Tp{+^bEp zUJ{=C)HDf#EGbAad?F$HtV9(B z7l+XSa*B|#$`uN;V*-t^p6UP(|G5m=daEo{|9J145LzxA`hvu84Vj%#=`NAviE&dJ z+*xd1d-sONT|2Q-C2yh$({Q#NCXfPV8@upJ8A!{HCDgyS?a-kE`}`8o=u0YkjE4~z zyEgPoRs}-82Rf}4ZzC*GuM$b@SG(Cd*O@D?|`IG(m4b0s6}@Yx~t&Q9>p~hfC>li{5Fn31t_y zmoqyK+dtT|R3twtIcVJRS}mdEqz!`KTb;62*u6KK;uM)Uzs7$NX>#yl3%@uc0(V#3 z!!r_!uw1e|je^^k?W9K~&!aJuu8(>$93Ru1WRFu}Wm?y-0uP5{6G4RMmsxaYcN0@y zMZ+yLxZiD&h!)uXE_yMN%$Os;9L|7j3zM3{s%#}uJM|l(ETi`#q4wMTX@$8RC*hjj z@RUM#L%}H|MXTl;@rJ`WUyx=gp#U8w@`D3~+I^bI(MDmWv!h*WdeYtY5WoxK!hgM& zdvQJQb{mDjzN1RV5FARXB^-sjGWxF1BPSIycOb)BzXMHQ32`u&9i6S#hZW}Ybzr=u z`T}Ht!*0w^22)9$;P3|w&+4JH?CzbFP1r{bK_a{sWF26=pUj+^q~VduR1fu+4%s7% zzX!MrWY06|CcX{hmNGfRk4bFzunXz-_)2%ci)iMXf zY#=?j_pPpxjr0e1zoq16+udF}@SSO!k}5c_pCD+kr#k!I@}6Y2rpQ91s{AD_$<9Ui z)6CP(wxT7?b^h^fj8@Nuss?jBc&C&oTgCB~1PbQS+Hl?5W-TXBd6q$_*3=kNETDtK zS_pqDIIDBi8b%Nr;|_gf?~I8HQ3qu0`|1_7OR`Vkj3AyO2j~loJ z!P1NCH{ITN*bzDJrSZZM0n+b^?&x^+o(mz)#zrW>`BZ~cd2F(;%AZP&V!@13JWd1} zgSpk&S(Z&Kyb!XH=y~-~Zf=;AA3(rPo$8a^e$J71LJmAYeyRH6ABOU|?YO1s%sS)F zwr~1F_=h`g0&%VGemhUvJhjusbG3DuP6lD+MXC@(>chd(6ZuE?Zj9iq*aJJVY0In^ zXC7Ms`e5|(B~~a{U}*%qd3?q~ZQEAn)yKd}%ddNS{rTL>Xu#OA1Xa-(V~0&||9?2` z1Zk;@$aE?)psFTz+OAq>jW|=dqZB?D3O<5PdQnPCc8+$FPf!E%m8KF}FIqjdEal?H zEPqMwE0KYT^4z#iU)lgnH^vw|HD4nA0p#ef0n8zlZ-XB8M22X+&Upa2?!)?2TJU{k z&83mdT}6BFvPP@m+@==-im?0GW@!S2&;~tk# z>C6XmH`Q+QJg;38dw16VTNdeRa1IzfMRCc4Y(!eh`~@e1v{aHDS3+jjXRg)KXPgF|=gUp7?QZRW%@K1hj)QptRB~8xiC(ID}9>bF`=?W^a zvuR3crxk~TQ0LXWYZ)ZS{zy;)hG5>dyN#@moJm~eB+k2PLMtc`#Y&n$_f(App2{+y zxhN`9i<7MxY53;{i|?16Ub=?TAV54j#+|=r0H^TM7rQI_q-fr*eGaG1W0SN8yzi#u zLtlV-?Cy;1(?%J=2o&$n!<_l`+t*YPnQquJ=(ws701J+{I4 zX>}#k9I-Ic5{-!;90CL_4EbPnYecAZb;_g zvwWPXLH=igq(I8zK~?k`JF~0Uu^oN(60rD zA0eD+#*o<8f>;!KSn;TKq#>rK@niQ3w{$c+eN)FC8t%DMrFCnnz0%sDh|I}t#Df5@ z30{@EDUh*IT%B4jk@jF}I8@LswfpHGiB*H^s>gTvQWq7xS^X&$#2HQ?cdHPRYhN^P zveNktbapb-4aTRGCSlxp;iC~dE4yl(tjVX{Me%=2k_SeA9tTI_%2fyba=Q5z&X%yq zPWeRlXI){d&Q*>aMqC0O8eLl1TP|?bv}`K!&yfzUK)3vu!%rB&rkoUY$LqHix7z!0 zJlAX>iEl#sGj7{FM&I0yUuj#*M42}U)!PhV74k)%w zelIMdqpb#xVR=O#H0GzNY;!|{S_i);3;X^;%O$#vUF>#daL~n_)`9Yp*lz>cFot`Z zd zZuz8udtHa?0R`!Z4Rq6xg~oBE^KaFd7$qZAJJ&cU9oNmjeRogN;<((F$t*DVkI%eb zziT4?BMWC%IWn**%)n|jrxX$mcb>yLlRnSv^()0|8ro^Gwa%Mz5c!#}2eQft!TV5p zy|3pXsOM|tJbPdbY()<7SGa)Ld7%A$1rsyibjB+A+Dfi#^G6BIW41Pwn2SQfqu8+t zdv^gHv1uEXvx;zaQD-tCm!tj&>2%^dn4Nv8B(ou}MLfZB$F^Y#-f;zruh)xkU=&D* zNF^6$za1}R!3iYLfEE1^6YXa*Zq6uoZdi=L5e!HWYnWyrBwDknhWtp>Of?Fb+In1b ztQH!1{h#<}k#1A8z!PQXuH5UnI=-uNg;OgyPV(oLu+?N#e(fTKVoVqqsgx5r&?4*o zj-xp3Yp3(6biWv%hP(vpt`KR#x@K1Hrjp6=K=EfR2kHk6oObBe`FRkdGkB8hk0^@2 zxG6#?OiD-D-T`TXSL~OYwDwLoD3ZrORkOM+|J-61XQy-Br{BO*E)Vzj4~|Pm;r2I~ z*;1D!VFJ>bj~dwmpHUmHc)9e|HTww|o6j-FXmd;nq{u!M5KVlg3@5`AHuYG&a+}U4 z0%Bv{NPmiv&mw~Bl z`=aTynN_A!oa7l?wq$mx$QyC9Iu2-4dB+!zuzU~p$>`7mJ5#6+xp?&6(0EFcF`J!x z5B4(bI+i)oJ{F@O)gzP6lXGr#b7J|y8j6AeeS}^V-jnCkw&Ta88P?IG)zb;BaMc@#?vxb=I7)@mm z98?nxQ{dyvp>L*bmsF{=zxw9b>6@u(gsIr&qYiE}?H5$SYUc;!te+OvIie_E+~X-7 zW{f9*s;Y>o2>mFM0CIqT0Ov~B^8qYLh0`zMfO!eO>CQl8f>wTksbCcyuHX?tT2|^=&g{)+7m4O{HVM|GW`oYN=69 z=cHn=%jY3#bh#+CYc^A0&|^|=ft}p7jaL4=V!)tB$kq8 zT}C|fcj}IRb3<~n5LSkB^g-NzdG+n3T+T|4#+|!tqwZAZWBSncC<*HvvQ&*irwyj! zx)bYXSeKWnpa8knhXxKI0iO{0aphNV3DNQgKnLcz_t~+J1MH5=V<%Q8P!*M|MQcC^ z{j71-yn9}Uq>pHr8UNm`RDK>65k!?rk1rtfJVwIR$KJb3Xx!6>jt3{yzP%1mH|JN! zO)B=d7AHoHTWLyN&6ChNM3Y5$tCoif9l?vyA<(^4@A`+F=L0lAmPRZjyvYVC?b8gT zq23#|qwJehn{{l9to3cu3^P+eUIgN0YGX}Px?$r@!x^#>-qHE{XWykE+MsvFTr?iJ zfXhXl`?gCzo=2KGksimBLR|AYIak?!U%`vX>xcJ-Ln_Vq(-jpcg}PiIMQf0|+2n)& zK;wTX@y0$nD}q!PP!soNP|m+CN6WQ;`?xvtG0*S@1NTK=tEC1E*V_C8Wkk?1bSY{J zC04j~F8?e!jYD17DzwG{efh!AsKcwt zUuSk-j7dK>v3EP;@ZaMnXi^OQxg^rDNHKgl2g-(gR=W{9Ejk%jE_Xt*c;XP97Mf>P zx2_O6mBbcW%?2*!@xF~U&Zqh=IknD0;vhI>Al@TwVlc(0tJbDyBfg0!NL<0XRol;Q zpE(@@-_BTdi?BNHtZes2Q`g(>cifd!yC@Mo?h!*vc4m4*K?nO+>`>MY z9@sngogtT+@BK@b6yk28b`|NZA}wZ)b=7bGX|CzGl=ssAk096WV_TO{`7Pe744AP4 zr4B~%a;$>Kv6nGG6q5oS5N;DzKKn|>l@^po_r&wi{pJ#eu;7j2o)A3CDpxyZE5Mqa zgj>a`!+%>_7dp@Eq+miWfP8E(;V`!?zki%Q+AbS5DOF3NAZWrcgfYuwKu;#g6b$CC zRBOCcqnGPM;v9hJADO~V6rc?kxjU+_WmD~MSko%bNNeK0nfL#ReSJ-5u;N6gjM%hu z|IYOozzptBse9}X^0J__wgj{tp@W(`?Lz*84@TmD9=6VTb;Y7uRasky<5NYuo`HX!Z+vJaO*tAG zR0D$ig5lSk4|}f-DZ&ju5NKGB;ek$sr*?uTFN9%}<|ipq)?Wnze5tD=(QuZWvDFO||9`t30=FPT{!q z$_bb^>WwJ^m_EGWnX{hAPTPb9!M*e`K-XT#1P)7)Yq{#C)Aj@5?o3P$_yl+No$erW zT(dL{6lV7mE@A$O^DB1W$@lbE*rOM8aOOEYUuG&5?btN9h1`;D|M0SmPQx0^j+iZ_ z`N^8Oo4=hdEV{JFH|U%j_r6-~WrtYk@rov`%WbX3IfNV}J$oF*KVv{dJL*+sCjZsY zK0rWmaI(k|nD1l14DF7x%y-^=p_HuwMiZz}eSqSLG zHFxXHkC;L*;`qE@+}{&e&%Ycb9C^b>>{J?B1^yn=;55)7;zRq*Z=! zh+vk?)P4T9FfYBP`L8lB?*OF@IcedWbDAwn;|;s4z?+W!WgOsNB{nTVgn)*9Xm&Vp)Ymgmps?@t|Q#+P%|wZ`aD}Rq~m60D0|>QkvHrQYW?Z{ z4}d6x*-8^j{Y93%A^&ZPL4j!lxIqXAhiN!fUfCd1N-qw26WCf=&>Cptt(@%vZwV_`vq=LjcynclX@4R`|&#W|!zMYIR%0B3|$^z?QYJ0FF zzAP2n5)hyY8PcClQ+_y4D2E?z#C2+BnGbNflO&*X3so1RmRNeO{el?xpPFGz$(uOi zTS8w*!LLFCpyhYibf=tEU}^nVC+6M6}kv(dShWLh^lty8B@68^U3k>I|SCJsDbb@@M<>~QtBX; z#d}*QnqNN0%d$T@l18D@7*73WuH-uaK!Gu6uUvpljERWk=tka?@?>Jb+4$JfNp_IjQ&ZhaFw}Q-9B}M1_Ze(T@Su3 zEda2vMNm~~pFVjn-02%QgJnegAq^X_+vl}#_I*5YuTvk3O_^pNKv24$#YU(c>42yA z%E1dD<^R-Po8)pK0SJGI%8%>m|0KN8QhV!_KbVJh)MMCVvs@0>769^pkMR3-7d}`MlIkNOaSN&F+4ei=z;&?PT^ZM z-G*@ZnE?f*Mih(YaR0n68!~TUs56H0#n8nY zarup^?8B7$^5#V}sgM)cz5H=Avn6I_Y<&C?i6&W(O;3s03YgORRD@U*{gQMbKRheQ zUy^4`k|;fJ#)5FnjkV@k206_oDR$p3%u;CD2iwt2`jIfY=RHQ^TVgZO6O4yPs~Z{y zZw8*|vpIl3z+-6Yu_Mv@g)DK&^6SdSLJb-~%OEc7!OPzC^GFA-yapoo`A8q|yNkkq zsZkSyJl3V99zb=UG<+%MMcoKJB*P5s$kNUF?J9&nUamuMxLUUyqGFGHk%Gg0A33Yt zgM?eYB2&%ynD1&;xW7d1o}*jh#%`*rOJg&i(|_3v{Qy?j-NHt9HaG@meOPCavPy z2x`}EbKky4gBs?|afq#3T(u=scY7`u*dpeBd~k3f94X46oH5Iz6`!1Y8LI!Mx2J?X?@YLCI6a^#yw=)qxu#n-6H7BL~*L1 z3?~P1pp!4aZ{vnLC(=jPVGA_1nv$SHA`Ixw7^QXZOrZX)kc-O2w0&U{z9 zIsv;V*(Y!e2h4LXHkz)M5+r!dk?PhIqqu~4__`5ExtNtXbaTQgu8w!2ufG~X8)@;W z1d1w9E2f1Zqxcg|yD4^^1$82yVoe?bax`S)o<3}#QXH>Gl#b}2|P2QRz1hz$$u_;5 zUrl{xBJV0V<50E`_3kEcj?``BF`Bi+KZ)U=B z$=GVdn7JpJ4T}`}dY?Y=lQ&i(Z{)d<yM9g}hjJeZlSnBJ_ zdYnDd4kr}|6Xgac3I6N?k>P&=s@73x8iMNxwo0i(? z0Wzlir!DmoBHx^w0Yh{1_Ny?W3cvT3t3)x{g$Do8U&&SmmNenKR&iJmI8OmaL!iU( zW;otKIuB4|{2PrM;LAKX?QLDc$#hGYXXjpALhG01LJw;u>J-a{<9@$8_*yCvGgDjgQ(ciU=>Z+qSxO`v=6i@xAIAPlEI=u=y^G$Ie66WU9Px0WL^vX zdw?@)A=^FNk+7S;tEPX#2A1gO+^L=^0`TnyS9rx)=1l$(jyE6-JX{iq-4#Q(FZDIe zbtE9s;t6Ol?KXuN>O$CGr)@LNcHFCfmK99wtv1(UL-^yuD=jaC&+r}v*ZH2BJ>w6- z!Dd#Od7(SaynBMTUnAA>UKKNXKCl0Vy``s|cCxR@CKaIEp-D-K@?ywSZD*#mB&<6p zua$!mXVBc!`JPq^%(#$Ag85Jt%&Can=BpW1|MPgtc83{}l8toHZFP`S3%!T5)?6_w zTP9lu4W}%;w}#XOCX19rp8vP?gny`37&Oo*BV>M}!A)*YEE9z5VYnIQp^7bkDMJ<4 zr8}nFegy5<)&rPQ2r)$29Yhz4d(4!~X2GGSw<3vZZ?isVhN+jOQ92D5c+K*Xg!hJl5 zA=h#|C5$0(E!No!7af=C1wR!J-)3%I7M=7b-A3JI2fvaQr*@vc0&bo~JI$}X_-_D`EvtUi-1FppjQlV$SER8?KdxOF$0wl-C2K@1h_&HcJ8>|+6lv&;#qT*tiTm)MY;_fMiaAePA5XlZ~#@|{p1>b>&6v%xX51Tsz3kMf>X)Dyn zfz2?muRUtZOLAAkhcAJJ^u0Qht>fqHb$YJv!i1Nc8jlnH(*Lz=c~E z%R!b1{gyd?%h-=($^a1oQD?SAdU|RJGrlDrs|fGRBYjO_*JD$r0b+-6WsWkL3M(~O z&K8|e*NoIYaU0UY=)yrwv#=`e6c9rf^7*5rjEfV0)0h1W;QWe8jubTIkHv_hP37iV z0Y*f=(uG~!=`gb92UG7+#58QSCf{HH+g^w*aRFDSq`7{KQa?Ido~&VBVNTvW)i&K3 zcrY{~=0xIj{4Z+!(*?}(E&6*FaR0*ki6LVI5hhQQ%t9#lF1wmE5A8-4TL?4C;iTib zZDh{JD!aZ8hO%8;QPYx<-PFW!Uq=RmxvZan?U^HOLX;+Gb79PK{fw?%kLkpScB1SI z-*Hn5p#7=obk~tQL*XIR6SLQ%+XCY{AZ+&j;E} zgKz|n{fUmWS3Q;lG}J(NV@ZUqAN47@`n6`(+f@MIJiW&}693b$aaPe4xKfIjfK@w& zf-NkLM@o}NBDdH9J+;So4FqLlb<{k`BohIsE6-Jh8F^k0kU90pYmAJ#eb3keiW7=_ z@7EEn@ndN-Nb4I@VN>VCSGeZW51shvM}kXLI{qCOMA?->XRsbE(7}Iya5-5i=oG8c zEw*h^-za8o7ZmtP&I1mIy`@~Hnz#Z^9(Z^TDIshoQ%mZNY(ru{O0N~X$TYERD$#60 zxCA$Jv>r2NuvbO(r_%R=8G##mLL6M`qeg}4#|z|FwJL5-6L#565B!hvX&)5zJuXwJ zw(zQ4E7GRMrEIjtUFv^(-II{j{ukK>Ik`S@#Ay0gFsmn5{zxxm zM*or)w8qZh&}&o0s-g;pEam=^J>SiluB8fYevzc2)<07Igu+sC>RzE(6*aAmpr9%k zNt`EUGWci?yI^5I$Fc8gPYVHQlZ&X{K6He6_syI=rXLNYVIZiAmQUZWvgCTLTP7NW;weFT@*9;-qZ<8N8#O#i$rP;XE^3550$~ai-V2q3rb@jX`^5siXxFSKQO~ zZM`zU#0+I|JaFAQluX$fT7CW4vih%}!OpU3w{}vM)ETm8&7onGhJ@iAV?fYd%Ivn< z-}*yy4wC-%S^K)i9Y|(-nQ}|H8)#B47?M1I{ZydJzj(IV)jn7|qH8;g$s7M0+Ev6? zq5Qsfrxja!RekvdTJHv>l8dHW84wq*EyiU1lOd+3kVq-Qx<4|%Uv0msi*HLs5#zX5|^(nOOKI3Yp!j^-&Im^s!AL!5tLQ zZZ-7C@c?D=(Nan$b*`FzSxg5kWj|d9*z!nur5rVQb{5NHV=p!dGwRx+2j&k&7tY6Y z73XX&*zNHCxa>`|9nrC0<2M>975?~pWxD_I;{svURKQ}6s-25=KR8iGoh5}fHLqge zw4PoU?(oPR?fx6JigsXJZX&fttEB4Ptg+odG|9X(pn)Q`VM?I6)NY7C9Gkr-#VN-U z`W!S5vw>?p5{T#|LFg3;Q>pqRf%|bi9I3$&^R{{m0;xEIOcDd&-(#jrg=GIUhP2Zg zFQbwEs`m2c;-1kyIAro9(p^(T}Hp z=MX%RaK{{>ZJwPk#u_X-!YIQ!#M$FyPdyO}2=zHvX!Tt6Shn9Isa-how&xvyz>LIZw!db*yce#K4SjAWXuE znLuGH+f-{iD%lbw#tY8Yy2_SrZef<2Gx?(7WIadJ&cXC(E?*$!vWeF1kv>%g7iOnhso!wtI0nJ8tTAKG79r#|#-e=sYf@TK@$NM)ccMH;efkPoF8R?1SE5kK||OR!hu5k`FS36qedL1qAfp055oybd%0;5jmtB2i5=&z77SKU z&9`u2Fy$D7!$BC<Pi0q2^9Zvt5u~nrU>0 zbxk;&NkJTk`jI45v4a2gvnnZ)bO2{E;v#zI7me)o`UfD1w%iU9i;|oV!ugsoU7M%&x#Q?cGV2d!iu_DT+PBnG_mQNY1S;r$QKB zI~NnnARk>UZ{DD|hX(-%(s=&w1MnTcbYScK0g;QV#OhpQk5%spEfH0^bYjav%%)r~ zLn&KI`BYcGGhZv+Q}pwks-1Xzi1oqpnqo>3m8vmYHC$+$j}dA*{xH_f<#%U)ZZDQE zE#Ib2XK1jO2}LQ1F3_m=j93hrGd+G-AM*?&`^%zc6ag(&wHh-fVbU?V22Gz8&v5b< zBK-UBVNNe|T#TGOqQi77qEbO}B<43@48&2cGm>HuD!l*m84x zE`Rbo#hc@t{aqeqIT2(Peq1JGUfPFa8)7LLtqu9&gVtMFng>#9T32a}p=FyTjse7b zOAa1W_l_W9amnuQV?5zvwhMc93cLvhar(lx#srG2NPU)cw+CSh=kNHDX?ZDSggI}6 zg-&sg1sLM}%xar$KSHPGrfjONsc(-P!+4$d&|ET&l?NvJ34qKSTeN!HjXL91;`@5gW*?C~5OV<2vO5MT!;ItE?u^XBnhOZc_ z2%|@w8r4YwVbTr?B{#u0UMYK@ksq?MUOz72A%8(CVYQv7k6%=DlfRt}r3rm{T4KRn zGk6*4@RUBsgxni;76)CLRDf~5^r!nB6Iz2x@jo3pn1A$iU!-@AOlUku+s zy2FVqZAWKIhv^vd;~^xi?ZDOMW3Am=IN|cfDNFUX)I|gOTFQGEQ<}LG!RiGg|^I;vH9w|~IMS_|4I;Hwun=~+5cUFMgX!jH?*6;nSNjF3Py{MmU zeXv=yU**z3GnVinaL2@);X*Yh6(memgzc^aVfvu*mBQV|{mXoUB-8pp|J*!*a;6=7 za(Gx+WCSmAHqPHT_Wg{l>iwS;#+^% z2MDykb`~vIu7<%0J|tVweQ!c$o2GcYW~@wT!c@0E<-fdiN{fS}8@XgXIC5G@Sl z*oyZlZhAIQ=Fs&wk!8q72u&Xs=p)f69vi*9$uSpz=^%1to?~$a%|}hJtCJ)Hat{i^ z4)p!BxXubwym^MTxO7f%@?)r^+Zjrr)*yU}_PR>BCscCuBfhN9SRP7K!I<|pMY9kN zXJzO0sa)QdQGZ+lFU*Z(0$EKow%AO(&iCIvDMm#@sykA_ZGB3{-xPh>!bcAhPN}OFP`3$Y}xwZI*AarC9^GdJ{9}u)KHkCaQ63Md;Ul(j{p<6xwX>FcfD?A zAHx{wf@h#`yGzpE209~l;h1GRK;$MQpme;5LN0I?;W31)-3{LqUe|9#Ei|%zl3eQF zSTiL$o%2Zzsu(Vf+PwVKwsx4*xl!F{b>jAG!aerpM3KI(3`!5`!_jF*sp4vJH{Hk^ z!xkk5RlLQ!{N77#AxKF`{Z7bhXRZ+E=a;{dcZ4+MblM{=Y`0lYg;ee3hWsR9nId%Q z6kN0v20b3V{W3#%s%7{_BpcjD^myhI!75?vb6)sW5K7P&CJrYqnVV8l5Oq5s1l>G+ zMJ@E3cA!$++%rm_Iqhd(mEa&2^scN~CX@n9?m_33A$kwuRy3i7yh~AKQL`8#4%~|* zy(Wp5ftz*Vo(AokOtgK!-h>HYKAKsnTt^3UeBeG3w!+x^@poamzrW|TUA7sVHd#c8 z(<dcla%A~BQBX}b07KigRy$wwMSzI%swNa!Si6*)4sM;pS1Sw6ra04UJ(YEy3 zd;0KF{`5+XnDcgc0FfX3zA{5QVJk0_z>2~L#%Iku=dQ+HDoipf`7`#Cru z+!n?0-xzqc%iK)N?a7fK@JsU&wjTq4njgLmoU;yJ*XL#ba$;R}^AzfLDuyP(I5umr zMI~yy6Gc|PT51^>&g}FBW0J0o?tLDiHH=jh`2E#oM_w7ReW~m$44oT@`_ZU9ks)2C zhuy-+2grwp??T61l(W%r(?f^4rGkM_VV0(2;}?S0pD*5Nn^3B?B4s_v*1z*d061z+ znWF#&TybY`A|s>N!0)|f)o0X#n9`R&MYnpQqF!UFtusCQFW3z!={b}B!ej~N0giE) zWazcW3&Fso_r%V_!1yYRvB5U|TB?{|KAC;su9Kh>?aD^Qes=F|7-%viYS2E(Ieaxg zSad$QPgaR?MBw#fB)iu2vSkfB(a7cUcXRY^TC=7xUSPU-jE!|D?0qRWa?b#-`A8Sk zZ~1PJk4r(@VHy~aI>ZFS09|P%@a0Zao~?hVb3}9zoYKN&@+#>-OPR*fWSIS~$~~`~ z-tps#su}U(%$R5$r3Un90iV#-b1B(Y9(?ZxuhHH%l-Dz=?l`*HIR|sHF{vzGq?N2H zY}qNb7maH!DeE%(x-y8)(@A;B<@cOxwI?NXPmp05B(ur8u6~kX%!{v0)p+;+Azq7H z8{4u^eA+l%JRr|OG4`Y_+-ls{;eC3B69pSlyn4WoM@fEzXgKtnorE1!2{d=|&?Ep$o zNkW1eg7pHxrqDLihsol^)-GFa*zKLbv9S1WgxxonR4N>DPa4aJSO#ejrAi$JaW*Pwi=y;vo4u?SyvyatQ+cp z+?qRK3?rMgIg(C6nZW!>*^g?HX(l-O#(+iT2TRU!3ZrPDH*bxTe(6gg;8@|9TGh`S zuviwhVnCXU-L z!(|!SM3>u2CM@hQDtd7e8iZ-48yl4YAxcAix)e;bY>aLfm*{Qv*7VS*6oj6^>`LmK zftuo{<D*Q(ce|-#Emb3|e|J7IV)hOYQ8OtAUYkDzGxqsmDiuFms`KWWLDUzW}w7 z(71*T%e!3REE-PI%OK}31@rSo68iz8(pAEYB@f-J%m1HpDJ2H(YGzUvCC;rx*O=GL z3~+zcx8gf>_|Ll?J);NAfOG}m>;>AasOx5=uDsx{bx$Kk|c zlJ?&RmeVWCWH`)v8ZP841guN7xxV&W5I+HP68k&Gx>ET;Fqg!wp6}wZ#Cw5zP^*0! zl@6%X0EG5d(YU#3`~|?rHKY4&=$;nJ+(nm_zJ()ZejzcdswF|Jpl#dcHh8qYy17QI zuMaZ!_}=%r?O~Wplv3*3)D#^LVo%L_!!ML$k=WVe)~Er!28)sIr+j@jv^ zoHt5fLbx-?N|Y%ShLj8)n|th-LTcS?lWfyELJTx2J3;DU8}f2gEUAJ z?M_8YRc_F<6aVU&zHuc|Ed=pJPPLSE&qsdb)MT)!Xu`^$?Hsbbg?J+d0O#B5g=Q0f zLIcD&y3l(L@a*5NPC+sJr-ft?Q8T~Qk&boeY?5DYQ88v^{gLGzprp;2nRU0nw%$bl z0Rk-#L+_?6Ta%()jBs5Y+RXnIphs`5WhOfu*0wQ?PG4$t)V0&ff{7n`qfYdnf3_eamgx3lw@ZZST-73`Ph9M?;q z9gw!cXhK&F%4(_M&@Fi*4j|9?+rR9c)8Ck@0`aSq@aqU?kq%sSTjkCJhg|LFa&y@G z2UEjdqZsXpsjCXuz7_E0BodMT#9Ey7@#z(I@v%6L8L*6;sd(VT-5f4yuevYo-&PNkb|5C)xSpxcanN~K)o65D4^hpZ32w2pFs|~3tl@BME za2ifBTg;nz)eAJb*ofA&*&FPIJXlGlen28iSkU5$FP~R|dxAG8t&!#YBpRzCH}p_? zBcfUt(5SQ;sZbyVDXNM&FjYBMP&XGZ_gP~bP@?0ePWvl6#wx%I$ll9?r|W9Ba;3zIT)W? zAo*Dzs>GgRpQ!on+M!21y^a&Y=AEltdQc}Z+cXsak>coS#4G~EaDF|j|NW*9@E)OhKxB0X&5b5qlCaYU%%A8DX=Hr zYd?M?%|4%|LDs&Upa>l0f&P?0KmU`cA=TB;GLhkKtNP$Z4i!5pc!w3t#Uz+kqwhQ_ z8|R*DtJVScY0q=7^-%DpC)L(#!fMC3>c`^zJ$@`hRkl*ZTy>i`6hy$O6J^(QGx9Ia z7}034ALfDN^8~up9E?Z1* zNr-42Q=BsPQ-Z!3mXe%3?yxofw86sk8C@msQJHQSjw}pZ+>wf=oE~Hk8CcLN6gM}0 zB~H7C#dHJqcS5DQ6sC%4g+O6lCXZs4U9#H2jr-dgV({`nafFs?A4@nnG!OXNp)Pv`Y5@s`}88UP3C1?$2X|XDO+zJMMbC>^_Nt~PEQDsQ6fR)jr*XBi-D!W)C2vLICz}1}jgqK> zOlFJ4`~t3oUy0Ay7JQ4`jdOzBP`EROWyJoHhkYP{hTfAv$wWDq?dTXIEnkQbIa~zT!N}s4t;%0kyTow|1X!d!EJ+)<($ZhJF zi`(JxM_|>4DWiIKKwGg+DEHH2SR~1$tlPv(sRd+iKvCD zx)Y?zb^qgNVe5d*EP7P|+TB&z$pCNAPpmj%k) zQ>@^+q||!J$9Xi~%9{vj#W+V$lioaM1CeU!E8$FS&vlU&;`(^y4}h>EEgclfT^Ew( zeq$JQaCo*7X_1}fEXNO23Gs%?zhXfWEf_iFeV~gJPG;|GTpXrpLuGt@;>z}B4!^)_ zkjEw*A_hlSKhc%Q28ox(*rWLDOB{w;<-;#xBGT8s-yejAKhjB7)lMpM&$D(}GcS&6 z$J8KmK+HB?*5oy8`Pb;@1rUY>TA>&G@)L?NOx7>*L~Zs9{}4rUoa=z_8NGS<%8yhc zsfZ9Iyd!`tLIe>2wL>V3;N9o+$`fgBAt&QBnvrL`felEzUC~b4kQp(T*?>FNmF*Yp zs9;!@AE5-5KunUbom76P`fM%_jtpb5;nI|$>iW&KV0)i&~ro8avv&S*bkpsF?R2A8hIrS{!gxG4`!F5O4UWB zpOaWh#EB`~dsB4%#83h@^}`CrHsncw#M;ISh=>&f$@;mS9i*+0Pf3!h+p(m-WMHg^ z%YJUUO4o+!!yQxeJ#aJ0mSh-E6?t~;Ql6y~H3p|B+m1L&i~dta9>dKt_MdHPecCAh zM_h7I2!Uv;GIO-Nb}ARIIOsl?5z;bIt3>v)F*=o6-KgMy9Ogm_mZub}4iLM`h`utF zoRRHxDIlf9inV(8-CEwwNtcpQr85C)zK27bh$|{{2>-rJx@=bbKz@JVEF8_WG3|ly zE8y>Fkb}0O-3mKjiwExAo01dVL6#6Fv>-j9xsGX?sX0d{5LLW4wX+{KLLgNoE$zap84E+lXf=QASGUR4Id=mCx;fWiG*v5SQZL(ysecA9ay`P7G6J*MT*-qN`5dRKc)lj{wfBrkA=Y7w_J;&m!)iX8{Yf z)LnoJc0bv9qUx#ze4YA;zIns9Q7KO7qkidsysmgswfd7Xf5SCrzCnjAMA$kqC}!d9 zK2u|>51428Raw0Ba#v4+3CIZgLtFU}p&+BmL$_B}%!(9YmgpkqTs;wFgp<4Mh`1$oh#rS}6|+Z9xYT=BGn(FW`IDF>Ioe4yV0{J-ePemqL2us+7JZ3fX!Ms)h4OH7y2lg2M6A^Fj%b<=`0m}P^A@6*; zS!8G)2m(+@5|>XIvQqh(Ppj`9FZHu8=|gmX^k70WoPqzV_L|#wg!8U5QFNlO&|)`s z09}h@u{kuvjnGuFRU3UVQck0^&v=?UMu6RPpvA9|D9kfH_M7^=Wf$qQpZwl7_iX)V#FC98X;eaT^^^C6Pmb5%~b zH>54tGwu1@w9}8YLiAqr@Zw`U!<0FTg{sADRle3sSj70m|M+t^v7hm&I;~>RJ@^v+rP$4ZV*QQ(oR)HPQ)BX=}!LJZK|Z6y?3 zK_LmyXhQse55CSb4;0B(o=BD?y$)9+!TJM2^FP|Xp{Xm%dr%c~PSYsp-&jm7LU}$V zj_{u|$>k1iKDmt0OYIGb7dMeNht=)n zic>{bMcY`TjN4r4kx5k|7_TT)* zYR{X34DJMYy;^ppxLT$|y_kfvmJQa9$6TL`(7Y^!aX>1!BDpe%f3cwLMVv8QAXzP4 zTS+fm_zb(RPk5*SF7YyWFzz4(X8SbdZRtrWD?)2_j?O745YE3A{en3L2%f)FX}K&i zIVtyF6-w_9!;pgv3^8(G#|md*FA4_Z!Y$m+D)qr~7CAg+*2)vb-mg+@{w?{}D_@IW zQOW`@BNUwHZXo5(&k^?0^$m&r_o9M-x0&RXGXLeJ@T!~V%g4{lO2oA@{P$iABZc+a_;CZXmAH~6cke;4A1?fh zi}_OyzMN!(&P0qjsisBuB#Cu?r|41=G^tZsf&>%J`qF_ZGUpsy4S?lPELwcgs!5EM zxPIiKr$E^hNeKbTFup$4Bpby%Z5eSV0s`jRErPH(cYYRo?cRd%4G0nge;Xfd8>Xy| zgik1*485SFTL0(AS;$ecsKV$*nA9(22X3h6;&{G>)giPfR(EeOKHZiMAWNyX$!*11 zy=L~WAi|cO6}ZCTKw$AV6DeFUQ@97aJs?E9#PJ<V;^6>O(_%bZgG!B=3bFLAO zR7a2s_<5oiCtil|w0hFa)*-`hBcxz1o(Y;3}=VEK>nJ4Y*8ND8^8v;9*s7fJ4Va9@D?J;0gP5=Z{q+-|iJ{7!1T2Yx-6SMU+K=hXu}sNm{g%>7Oa0XkMvTB22h^>U?X6x>(Ys9iN3u1zrs+#~UJq1Nfv-+Wdju|2MMuB#X z)!l${JMQ-mDTOKygTU_>X;mbGXE!ozNo^fcqq^>j?DWOV8;rj}W`%rOA8qMxs`*oU8+1G+atTasKY_q!^O3Wx z)AO9k#(uH5&eu^OL&{?}35-9`ivwAsn94`x#1vT#B`v!-Q6ww<{}j9*JFN zQsN-bnfWmLHq7(7aKmN=F00#r)}jput-kDRpVP)Vw*WlX9n2kLS-NHSken;@#U(~^ zuHKQa)fgh$48vSw1-JumhmYuDj(8;-Jr#BlllZ53j8u%+ZjAv1sX|tkUIFsgnY(N? zJ>=vzxOH*Bp%F(E9OBlnEGhx|j^nsXA&F#tnQw@yrY=C$3J2ayfFDoZ6+#f<^9ddp%Hn+2LiR!Aq+tJyy3f`8 z0oG))lUmpeNHr66CA$xhP;cMEv1pSN#`>rrAHyU?Y!lxdq-%w532=@K;>*A2QExj+ z?-EOyLQw3PvAFDo5zPq_+wtq=>TkI~*@rRuI=|1bhuKzI$41&zf9t+cgbDi;!mSKn zgH~XFSPQ?q9`RoB=sW#4cdkm67M5lIv1q)|j2t+Pvv4didDa2Eh95PJjLc|A3&Wig zV|PgH@L|pHPwy`RxzRp&4$Howt&I=&Vb^ly|8vC1e z1^i~!XHbh6?&$E_DrSR}rUFsVilvyFAa@5Y1GiWYIfcT5zE#uKvnVMbA(3r~uRs)v zo@%sTW6HJgNnI5-kl>=EqX5-WE_xece0X|}3VwdVqs?&7&|@F;pW}^H6Z4Qj@5tq^ zYQtM3PEzCQP$uQe-%^vpZ_?C2;Nh`L?Zr-r=sBN4n9f+=a!i1Je>9_c{lRv?V9wAS z^QF&;5fC?s16u+XhodtP`m;|XQC7jLJvdD(PmU-WX@XAj|IW^075I82GL7+Bj%CKI zeyz%#n$~oU3xq<;g)daO`2fvSdX)AwjGUB(7GkG`N#es1HDYG3X@_e?_ zIl35}s<9O_N69g(Y%WRYfl%x~OC)?#ld?fmqGyThbgMOY${Xc@WCND`PsW$~YBi;y zkama_KEw(4Z^4n@KSVab_|hF_YHHg@j(`Nr8%61D!RK85A|E*g5$zJ9a<;q+kk$g1 z7e6;8nm~uXz&zY4*CQTS=Qo|$nHW@kSUJ_#!}vTzLs3?Our_4kn0rOl5$DQIm}DbZ zIVt}B>blKfYp?pp|Ci>4&6}sN)4M<((tm4kZiYWz)#M8qQ&*VwX#;oZfPK|@07QZO zymEwn>EJADPF?ckZ=CWQjLkB21F=WYL|2|vsq$|s1wx@>UyU$xD=eoA5R!T0i@NMH z5)AnZDzrJ0LD#}u?#SLP`j5N+oxVxd2|T~C-nz-|1v?k{NtQUPQ=TzYYD*nRvrOpx z|Lv7i!Xg3g^oX;E4bxV#u;t@qR}ZZJjrD839(Mha(ar7{ro=ZUl}WeC>Z(SU^lO*S zXoRHjK$gbLw?fZ-iXyc@(uaTUZq)d}qBMiNePg*+!t!@DAzX~@xmqUG&2pG9 zm{quEOkwlr;+vPy_5rq#ylJG<^A=cj`;s*jts?UwA z&BQ!{{X%TIE8jZ5sAn{y-HzpIU_;bo!_l!nn|*K>36Eme7NII<lA1YC?fV}67Y4zv(3^HaA+q+4fOjMwNr;{MgSFNd%&-o;oe z$DOe;De-(d$Bs8Xb=L_KyIG7abWT}hsLSGp1m96D`iy@DsWvg1Tdj3e#+O4^eVlcK zIV-k!L`;ZY5A`*>Ws}eQ?Pe!hu7#4GXww|gDcAc-xXn;#*%J_C3bt1w_TNgdGl??^G4Fdp4E)-Fz=Dc;^D) zkHylYdkT%`o^+&$&57|crw{-YpAm&+Jz>Lt3ODtZgJ1x`d1;Euwn-%ik_<0Yiv5RZ z=IlX!Ky7ClMh@L)ehvj}h%mW6g*IPR>weNOGH>(^uZ^#l9)f@i0dIq+Y`%zt2g1u* zLrLPVjO&uYnl|Ux8PB)zUu{kXu&q25v$yFi_D?8qqngQPtN#i!@4j%#h^&sB2bLt7 zbi}Hb49xu10%prg4tFzu1yDv`VjCCnJ#vi)DNH0^D~|i$@H1K712r&-@+G_wcn_Di zP{cP`t+-g4KJ}}$L2Mv@d{mCo_Gery>W=;xwSA5mwQUb(R-!RC+P|L;iF-$e1U!@@ zfW*M)XbNv1x}X>TMX2IAy1IXY}^1*`vT zO!JO66!1@OD>iey3H;p=_jA^t@$cjNtA~eLIxZP`AGZ0>ZYL%+MF?nG z797j*z~m>l{VqIzBIg;W2ee{2 zA$g=bsINs5Q}l(^wW^M!h`$N2w8%l%W@z`(;8H^?5r2qp49-mupA};WsAtJ{Salvp ztFeVyILy^Rw{tQ8v01}5A|IH#a`+LSWH>JVo`894H2i>H`Yty=_zYPq;$l}$qmxxR z++@|Wpyl=dp4Or_`p3+**<(EO=UBmJp5?s(;on;dDnE|TCe6PTIXIvpY9TQimZEh( zS7B0vqiz(tDDxe@dzTElIsA?bq$jzqx1alcv!ZK1Y8$i2?FuZoxdZG*8{($Z6?fs8 z6lt0-sH4jRbC(B(hu&0Q<3r6Z&(%_)J9CrO`6y>Rz|krIfrKDRJ}E^a`(?g7jYjX$ zsj|sF7Wwlv5A_L3FU!TYvs2z*BZBazkT9jyv{y*7IF^zP2<+*Ikn1~0ANukzpa_XM zXppWa!XvHbQyqY~tydA4IJd6HYF5uUN2iCDJ#{s#&JIeb=xa;lJ_9szYJ!@`=vw#a zTEWi}!FAson-{3w=+o+-4Qa5=Y{`jcAzaPG48E=7eN9pfJ~!ubi!VqfT2_jdnEShb zsVaMrIBD=?s2UI4YQNT&Vi@ZHNPdkTDF@rAx3^x5c8$5}msoXZkNUa7Gjr_F*GXxu{ghDABU6mC$0 zM|k3!Wc4TRXnRd0%7n_%SY5g!0;RbZmhj4fA9`&qdXWF-#}>M>rIWDjj~y)mduVF5 zv@Cz3zRCo1DyGlw4IrvHh2sfAq6mI(^vsBP!pH`;54L$t5L_ty5=V#s#KGTgbMt(c zUMLi-t`M#ALW;0rpBUd(r(7v0yA3c~fPbno>=_2nOK(``qRpQGuQj@dmL*@T;)ur< z%2eA?_qZ!BW_txFgy9x2zxT7+y(oBwQmMsTAN@~bQtt%zlm_qWp>LWrFp>hb4lnHL z)$LiL+g1Kf^Un}yE1}kv+#j2{2+253jzm_#rVvf<5M_>Jm0}w|2%z`TUor`fylmL+ zlQZHAwF?xM|30VVf*@DM6LJ4~1YEIb(q`^kJI4e%H(y-cf`YG#PJ}?p_!Lh++Cq9t zZC&X%f_XVk9JpFHIc=*V#Cb!I!gMTv9OrMn7q5Z%O}fn5btGE|Nnmc3DF~2?Xe@eQ z|LTYMF=!f^3VSzgHe%_2q0y|L9y&*229{S7R^(OcG$8q6GSsLI3c&u&DFI%`9}(%7 zf7?REqb2d05e7pgCQLXMXopSAD4&#FJ9#(-H;?p1`@L&f-7~)o;|==*eAuSk$Vgt8 z#2s(nM&*eq?CWEJ68Cq>VN%}*i~gzkP+jwbKaWT@C_4gS zA*3D9o}Xhv{Gvp*hDXi*1#6wQGpbxdvV5LdkM>D2jI$i; z4Z@=|+RH^Ornbsm1c2s&<4JRD0Sgr=c06bWSrIDJE8*}$$7$PVWG|fju|ZW1564eJ zKd?uZhxytWHOyC6 z2Hjqv2Ba&+qABS`VE`_`zU7ev<4r^hA*!hk2wXGWWzVnuE8^yEGz~j583n?I${AZv zI#YAFnNomU`W(GR_6y&UQ8AU<{vyxT5)knl=p#e!0pp?)TlQqwVv2JYQ1v|xB``Ia zgnv;OgPo>`VPtdH*v>@~Y0WlT&AoYauD699)8zHH?y72ml8KJJP}_I?7MwnTuPUTh z`r6FK1>I`P7m2leZoKI(WW#{pKX?l9MtfZnc?4~#Kc2l4-lKf9AHt=_Ci;(AvJf5Y zsl^z9(At2Ce^^oz8w)(p|JF~yN6Wu`(n8Bi>;qqD=70ctg*!t8Bl0!IZ<4DTy}nx9 zz+I)?1)8so4mF*7_~|X13&KjnlS`dcmiY}pm#~d3se`A8+ah)9jJ5UcRJqNCWDgGe zbe2eUz)(gEjc)Z_r*P7BLZrayOkX$MR<8ivD~H^Np^~@~Ix@(L+i6`Lzn4bi=2%0& zPbIDtUuz!`P-)=>l5V^dxWGCO+%*m30T~^>XdU*_L>xDJ(_7|RkxO<<@pa}L3EHb5~H4Qu=*`tmlYeE z02dgtIK3WxZ)vxJeR4pfTCqGB+!%tO>PZ*#o#GOBut+xAw*uFD{^qGCwf{An##6MQT|!z-VxiBCpAI_TEZ1{~0JEC66& zaK{Z8>Bx6EjNIEP{+-Dype3v9AFCbk>PGKUigmsd8_UuB9)`of*9@~>4Xo5?Q>E1h zRC7VasvRw&*(I+R9mfqa%*+iLVd@kT_rBsiafbippSHfIiVRT!Ss~}LU7J8fpQ43B@y5LHNAKW4^h@RW>`BHBDT z`rHhR{AFhRcyMl>QDJDUeU(MQ}! zoLS!3#7#%3+A3&NwJk}Bt7fBTsmw!e7R{*I8t*a@zj0f$&qfmZ-3Xaa*grt|Lk@z2 zh?*wQ6>q`zq->7s_g9rf(U!VoRH=!eGV_+#jINdoZLWwUN{gLegd0|Hd9GK1sr)Fd zR2?th5QUv0+3ITb)`NaX&T#og_*&mMQWz(Mn-ndOhUvCXf2b71?R$lqv0$3^Fb}dg z0^zm#KKG3GUKxhOz1eT<%`V9$Ts2vI)J2BzeKAywV)TurG`kUq8yscS^}*cN@vbNV zMvUCeh&UzqahiE8#rk5&Ov<(D;|qQmWyt7Qc^YPiYOF5B{7vQBOjx|)A491ax&0M`^ipvc z=oI1rELIuX{P_;x2*F9rOQ7vj#VvcX3S%p zY3YmxX1Rn0qXrj?XrE13b}_7C8l<4vFEL5REI5B0eac3F8TotuC+uRLw6n_Y2-Ng6 zAOM2JNwe2AaK71Vhx-nmSj|3swXQoYK>;LtW?H^iLrVO7M}aqHB1VpA6ZaOmr7V^@ zmUn(}#Z`kxj$6(Z0D&hx{*qSLmDpNuAk!L2>TA|H#Dl@i%Zj%0{qNVtcX()#&)K>Y z)xM;@91l_QIyzhn-UF|@ImEZalC(hLChO32 zXGS=2<^H{h7paG_z)Et%F-)GltF}58tQ8YXQx<`KxCN?dit-@eMJJ?>Jq80uL4B~= zO%uql3?jwP>u@yzCkXkwpa?lXE53Jx5vtjy63YtJWv4NuJo#N`EKu~-@uO%o3`nOr zL~*WB+-%c_@W5-TKpdklhtmL2j^Y4 z$pMaYUem_0$ zwt1MCzH>BxG+fD7b3pH*ok|PN>*IZloP&24{vU-gX*1-0emHAJ#?cj96#QKu!LB>k zih%gi`waX#`y7E4*O%=-TX7>~-^BGI4XNGV8>ZqO&RLBK z8G2yau1QK66E-@U$_)>|Z6$OKlcqdI(@2xkd#nFMZHMg|rRiO3m{&>HZbK9fmm;yK z<$rc_JsJmPwRW9nHufw-AN3o9#|U*hTx8deG=w}^`d44=5qjK;;`p^8y5XRB7t!yc z1})&+3OW|qLwGSUIzw&Wmp?J28IYjQQd5C?!(Vt}m6}q_c7o#8V**EN$Ass1biv`* zpGt^!jS-2qxttZ_0M4AC@StfFTw6gFNHz%0YzhSeeW1ch5!^py_$(dA>|<*9=thRr zSj>Aw334q~3A;+KmJ?-P3{j30VFVj6J$fnS%n#)ygE6VGBz{4juIO7l;%zY5{R-@t zw{mk-uuBGxNJxtL)+8%rYP33z5btGpsNYV|Nl@p(BTj7*zEuF(cd}MyUSVdvLCbDP z&t8Ve4f?Cm{5C}RV93>nwi>1tv|tupnf@6^c#w{M;hyou$E5?qsV>|>ZCwSGi{1w& zC0UeiM=352;AB{Ar^Xk;))2rD7UFA#gVpam_=LNfh-g@c34(r}cl6>#)^M4pMnk@P z9rq7I?hop5Iq?=;slMbtZI8H9k|LNP|0b^!JV^Dl#Xo$ISQVN2mkeZu-iDhOi&{Ch zi)-ooU+qK4VAnP9`(c-9LR02g_Px7>y%IxtikFDt4?ihdRe>>T(b1Z~*K~<}?lILk zbKt(gcPE_E$H$4-;B7BA=Bm1sNh{lRo!cc84eq5{aL*%~)lhJX5lg&l3 z9uyV#TAQh}we!T}KjjAg=CVH9k|BLPynm5Wi7U3dfY~~&I(F3Jv-dntAlt{S%X6II zk7^$Lm)JVn30EqKJPBiFt60Qu-CHP|~cVPi? zt}O#ILW37+yI3S8&lLM++)b-(LSr)Ify>S@8U<=q;uU9M()iOhCak99)wGPax2!rT zwUjSQ`W~@%wdGH>?ao5FteKPKd38pWy-p9!KNfmz%R!$`anTAM z7*(%8O`49Y59BtUYvJmwF7wrpGT)vY^94A}R|s5f3tUxokzN5xj=Q8{Cgaw71xna` z{Cev+x06jrV!=}ehslx;jC2~$@`?d=Iy?0cFTBPLFy3&Ha2&n<5xVHjXPw$zQOE9) zFbIoscy#@Ho;?_)`bO5NSOA`Hwj91Vp)?PiVVJ&W%{AO8QvMkG9ztN$p+nLk$1ns? zD1X`%NmI~7WndZxez4cXX|7jV=%P_{yXux*V(ry1lWspwTgcx2E!391Cs|eTYz?;< z*{WxSEoE3=y>sGsi|TR^aENK4&{!=L43kY^l%?@hHoaEp2Du^jZ+=0#@*Xlh=S6_z zq!mbkeN%aw7xv2+fsH}VlZgqpW^*x77IPx*VGw#wB@D12bk%FKK!@IWvne=a++?a< zJjs19-Yg06;}9n8k`6o)G@r|$ z-73~FnS&9M4Bc?=$5uTfN+*Cmjr4icv}GyAgMNt{oE4f2e!<{c8+YUepEo5t&=|-K zAB_^|u>yNDSxOF#Q&m#2yfUPSM3rMy&zRX;!jIck*J(v z^dzZB^BU`?IC`EqsAvdq_uw9Y{Ay|Mmb5~7jalGE3aSo|dq8cKHwGB4l>yfW7d20? ziX3K%y1BAH*Krx%npiFUS$Xxga)?RwQoCA|+&W=c2`{|vM1<7AjV`^#?K%@Z)Y+|` zKbSrN({MNcUmate5Wv)Hn?JeLq%Nln z;q0HPVHv~o*@h5t^ADzUNuM+kkk`)2#HZHraMmXa* zNA)f1TVnEo$4?H^yaCH%sECVa&6IyN5?{X&Bi5{zCDsx2t%@s5A9Ywl1}-l(Vp#>g ztb1y*b@-j{%46Z8<0tiXW*F6t2qz{Jf8|qU08j?15`qqfLoMsKP9Lz5Esn z!0Pj`3nP!sXVfhd66uAl&qO2a&dqQ=m11a8f=x#MriAji@MVL^@t*U~AaygpJ$|lu z*(yBqdbS&=x;;u7jV*1B_RYL$z*_n**`XaDE~pg)p3NqENB=%KO4<&zQ`tfC2&Tzz z!bP}$FAW*lnfrMXCr5j~2k}MskAffNOh(-QUn&=f-=#rV6{k!(5?U4rTs zVbb6~CekzMFd2DnoIF@$q5EJ z*otsgzqVXfPUA0NcbhKZ$|b!6@Mi2o07gEsWE6(4$!7(&k*cdPL^T!5AkmUmG4*RNg^a$68q^6stKZ{lOe9F!PjdDE8q>bo?u6GOR+ zI;zZ^q&EIS8~}!i{TRu59M1FtU+aJb7-T88CtX6XG$EhG7RDpxmDNXiO%43g@wt{n zPPZ&hDaeFB4M!WkzSmUScmrfrs}606qzASOXI8~+$NgY^^--HmLgUv>x{11VxFA{W zRX~|QN~5YF5qQ_Pfv)zzcGu=7XciO5Hy_U$Oh8jd{n!|XWFRwArg}We3n0r2lKTJQ zIx&SlEYJxm(~s29B#^qPmTp^CqmzN{*+{$=Q^SW+L2?btQrTLQ7YKeArEH~pW+HVY zo~b>iu$N`z6!O*(2ZD~WgYH(m6vsZB6>3Orqd~ASO=Gi|HD(&SbasCwGHwqd-SYQ$ z%eW8LYNC-wPWM34L8Zf_&-Q&B)SF11NEv9SS|*UNN;zhfQ`{|*)zfTzHLc@~s%lf_ zb=smYaB_}NIqK^uXOM^oKQ2)6$m&M`FUx0|HrxF0)2Taemo_BfP25@Q=tC8yCEHGX z8ekoYlcE2l3iBFW?fX@}R2Xmw3U8(Ckz5$;w!(EiNSfYT+gMv0COWXw3h> ze+cs@gzC;?1e)$W37_8K$djMn<{jOCs-QO!U162KhG&kzQ2mxZ|8~D=;m~PDJ9jKt z+T`S6D|3D0vb_eP1TVLt5gm}q{bAs5U38w=Hu2WxG(F^@gDsWZf3k~0a2;f_8OSsp z=3-iT19bzo)ugS!<{xgxN){gDhqFpb)}H=A5bVHiyt)6<<7=G_Dc=X%1RArt|8FP5OABrbr%{d{^MbZIOF^o|LjZ1i_n~Kc2NVOMw@9_@0rmC z<;v}~dKim72uF)lfvT4rmQ=*?+1WJM{Zw$gg)Hqv z5p@yxAnA9p+(aynP@zZp5QubJRb$N)&mI{jsL#0DSw=z&n4kNt+!7R&tfq)t40f#j zx03Fzr~N1a&rt~tj8HTQUpruMjK_XMPB~{OmAx4hn2AF$%aAz(^eDNi?#oHTDI0jmuLq95yfm z&NBH>Lfr)*au+;{MO9Gmx&nvyy9AZszkmfEpav@X*|kTGPs^oiFOoVPxq34=dSiGR zt~p4_oEVY9*yew0?Lx7T4Z@)ATpY+K%6KP4P0ge{ju16;aUlOB@l16KLPFi$_7NnH&}p=JiGURQNU(gWqU(4BQn@Zne$7SfZifZX@lt@d zewyc6)I$mJ3j^Fi(8_66%GioM;@1!ZB*zN@dPEBKFT^Yd10F99uRTC}@B|#B92R zTUu{TwJ;F$(iUOTBdUo8x+WYK<1@pBrRCin69_GvUnX$lg5Nd2b%{*T!2^UG@Ow|p z^Y7Uwmg%R^Q}W=->BVyWIi-Wz@Li!<=kmm%VZbezxL~JnO-W6eIAv^tsewg02@RM&cYW4pu4i`INB-y#3 zThFrf3__M-zK+a(=J0TKlFRcGU(urI;<=$;G&1stl~YX|%naPE{S9>7D1b*Dro_%z z=B7c4EXos8p*Jmyn?&i7qf@Gd??o0;UBrEe7N5(=p+&vXAIDynOsTOuSUR13Q)2~nk$KLb>dzwm?5X#tI1u0+$*`X zZJq_#P#42=K^-$I1(%b&o1vtIt)U5bv}`Qp={;@Vmj2oNCa@r0AL{LU?tIr2HK6A@ zhBOAtl#V0h>fMV3uT+!=y_c5ifktJ+w2g>3lo+r`Rx(&(_YP;6jXZNgUg*r3=+IW3 z3jMi2&qEdl1?x}JEJCe({4x)(F#TOc(;{Wd!S7GR!4f@Lc^Hzk_h6Hu?4cDKJ5Zgq zz^82tpL1-i-b+Omn~M<|1XKd#pIf-kYP?x84M~u3QJDi7q%pY#tLE0M8+`>}kNOtp zO}Q3i@1COL8akeoy41u1hxc~Vn*BMsg$%Upw)mnFGN~k(gPtk|j@9aLM?vvNxdYgj zghEVEfL!0>>9#B)n2cc%uq%G(&2L4;63B8x8f`PP9}14}UDFsRzeXI`sL0p4;NX{y zgzw4}{DwWZ>YT`Yf_Vas{c*G05?3jPQ*9hd@V8?+)}<~mKzZ}lmSH?2exFq@#Yw$x zExLLw#zVl5YV*c2IgX~>O43b(ecVQ|8U_fn8100n`t%JODgkQ!xnyjjnhNRf`FP~> z{5kMYbjcIHIt_MxZyz{Bfs3+9Xiv%+aHAFw92mNv2$8D61H7hT?W)48?e8*s(WP!} z#+!<)Zr9(?vICOxwk9{gs{vt;Ib`l3pXMcT;KqSEY#NusrF%3|Irav9dhDsEL1_007ZqK7{=X8L3gl|b-047GRb-5r z4sdoHco<{{Y7-cD&v+tNycxsgXq!h>j60x_tV>bc+g{W!`@Zvn1c8u~-CRezF%>~|Mhf~DxY$1;Byn~9I_ zMe|MgTH8|_BdWigu}kHM1n<9zPwH}5P!EJWf|kILYE*4vF`6k6Bm6el=1d8EP4*P5 z$pxQVfc2+KRu`F#=`Jx`-h)T%9|~x$fcuA~dMG2R>n^VJA?9b((CFYMWA+{$!&rgMJLnzVDT&isCY8 zag;{KaDOBVuAVs6JXOhl$D(AccMU`B&TL_|oAUoVm2}Eh%`G^G)WC9eOx!?{AI< ztoa~V_uT#k;;-Yx&|PZoifI2`5i=gspYGg$A8q%DMA~nnnOpl%D|53%X(GMA4@u*P za4_|F+f2C#|9<9lw_AVp?#ERwY_2h#fZ1t$FdT2yif|Vc<@Ux*qViF#jX#VIqsY-m z^Rybl{#W<`;yl>m4-o1{LuwHB0SSwGU4#%oO_{lsc!8E&0?R7A@- zx7tSPD4%3`Mz^$wV{ez|FBJ0aRD$_DpCw@K=9KFQkD#z{;r6Vx@tsFS58?u<*0xMQ zQr_e>;0a3uKXsC_%&hmqzb6JbXgvG3JF_gjBDNPWN2|55ynB$>s;PE+3YB?d>p<(T zuM^i6;w}0t+>hXhUSVn=_C^LnmM@OK#el4(I?2)E%V>c!)Su3dfn@p{$n71eD_Lx$ zI;R+bg&0s`fK>7mzW%-S;5JNGcz41wG;;RWVgq{MTr8&}PXVpFPHPS=a_3#jFel0M z4HPWH<>gBgB6$CO*%7vpR+**Vh>!^kX_dfD6)UGYI)>9o0OUStwVIjLYmif6G0Sw9 zg6eF13m{rhCI0>P(ZxekJ#LY4T01E1_A{W^L0=q7why>Z8CN{tita?WouUJ#O#map zIRiXK!G49rqS*SZzElF?Uc2}E}#@(SNdhh%^MI+G2vFpsOq z^1Uc}8EDjA2dtW}r^}<8R97E0I)yR3VdzXltzwD;C69MY8~qH4HGx9f$2JpTJB}!n;ZRBe**);EDh@K*+x~T{|pi zTl_{XYbnwHA(9=i8S8|Vxj9aKfh(gPF)p$Z!cHmkG-7(} z=i~I--h_o4Cdk!*uF=%eb);h+J15RvDP@E4+y*b`vI3Xjm4@h}{SU}!STSPpdRBgq z{#>!Z6_>Gi0hB)+KBQk8_+K;-&v{2AWmdeA?8wY+(o*JEHP`Oson>;jTjFp6 z7<-Q**-G}reZ}4P=i9>?{P+s&z6j}=QFu9QW>#;_hrEZu(uoRJ)!p$o z7G~ID4yS`LZlWE$qPEma)2lF7KkIS2jG2T;x8Z6emR)Mu+)jEo$@dT5jRUO;yIWIy$T0qL5T#L}#!q}kBQ z6NcP77WbcM0TcPUsDLi_gDKz)^(Xse*6lsH;CCINn8@YVWIE+-pNxU-@8Ir3qqHG= z_`}ufi+5xxy5)$8_E;j{npTrYLvn0rr44oV{`nRwRidcW^Z88d*1WU21Nirsy$+~N zA=x*ES8PUp0&Ob5&s{WBuTAWr3Z`<0zp`Yl72Y`SP{;~=Ya)Gyv(D;hlJe`O!=vYK zA_qfyXRpSj2bgFhH9K$yQn%@h&}HpAy9xEo#=Yrr_Q=ghg^v}xTiAaaWydpDg5*>T zx>}#S8VA2!y87NpuJY2CJ*DG)Ax(6<0#`ZaBBn#So{ zV~}Z#DW}#+Fv^A;u>0T8K#)US_yY6lEzkY1WN2CswC#S`b18CxI^<0SFypZPRh%se zgN$Neoqk_hswL#daCT_JeFj_>9>F3Mwe7h{9{yU$J5Y=$iXA-l$`{rZ0ELvfd+mTv zKNjl3X-=!J#T7iP${dUz>OVMO%4bXm>kShxSpUv2eG7>sX34)iaf~pu+A2%q`E<`a zwIGGp$>P>rI($~eMhh;#lV`agfcx$}H<&EJ!Z0@DFq3;+g|b?M^J4+^Hfn9p`-@PK zyRN#qGDx2>wiTHQMsk$1kkUEe4K^(u`uGRTDqY1k-=rvl*ug425-7~w$@Z_q*110^ zQg4KWfStZMK+*?TP-82hNfF!5sQb?3X;4%HNksN=9-QNEAfy621MvMoA#{yq%MN+O z-f@9ep`aQsNV@R++?P7Df4hLC51-1n{*J=+!|pgc$}y3w8;#C@iPG{&{{Ul(IoakA zjnuV2rpZ_m-6d>Vg0(WL1&x^U>;M})tb(}xDmc992Pm!ed{PEdG`pi@WthcE&bfrE zLG&Yyxp#*^yY#W-m@H%2ae4MXb4Wb4E z6edq~Wayv7`m>97 zp1$)mV7&^D{E82jM-g2uOQo=JLq{Ss?oE5&Hw+bNHqCK(Uaq%sUf4fi$9&>aig9^W z0vR(`o4-X9LMvDBb){c=pykh8jb^_iTH_5dUnbh&HvtfCJ1XcNe^10oDnM; zAH=z|pl)B@lIMQbE>&q?l!`Yhd6<=qw{r9gG+_sK%x^BXycT+1paAJViIoWRrTYyM z+5n-4FD?BodQ+(|;0Fr7C$pk{5ZH=%u2ljdX#i$9RP>z7&ZmZs8BF9%R=Hn-i%HD# zSdIb#b|~4|E8E0*?Qq=dYCiFQEEHwK^gSZ=_JEckGjrj^@|#tq&aGoUhiYHt-E$XA zS53x5DLxH21>@tYM5IM}j~4jP=u5GFuqQi;-aHqAXiCWDteu>*jvb)t%DDOCB;;1d zdPrjon@5uZ0)kn{%wR(L++WS|c|}3XtH-qWEm&px=1>P9=DbT~37N19!`B{ZcK-*( z7h_E|2A5#elbhA59be&lSRoW26lm$-lr{*{s7rw@QJ~_21JM~yTXxZH#0KqT^`j6} z>*ukrdTgQ4kBlov8mC!(d|!8m2*tylmU|nHNTl8cIxIqI?lT9LFi5`6ONvt*tZ?U+ zD~U`cBNG_^P!ovjt|biZWbckiSKwB^2K7=5DE{Hty+_)y;2cHT8VE%lz|$WkePy`4 z;hjH9GK?QC!3@_@GOsRYOIr*_sk1LmnmPY6)hv#}MkMwh2#)@ zj*>8PszeeyaOP-OsGvkaso1Q_9ks{6hyzHDd*7)!O`(YU*&c;=owmj^T8s}3E=u%A zo;wxT%W&$Z{DZ)dJuV6c7fEM}QZy}Q@04cz^g3TL>tUM_i?1+fGv9j|^)~e@feH2G z;%N104zB4p3huJOBaWoollgjR8UI*wxf5`>o;rH!M0-zyY%v4f#ra-EEV=b(kr|hV zj*ZLX5Q4D!Ju#TibH{zCKO%2p_MM_2pb|y^9tkFdqVim}uxoMZfC!(IV2;sbJ548V zsWp-D>#IWjJFwd`3KK1WYsUD~u!Z66{tmwqLE^;G;@d7o>k3g*IKUqO`}3+i))ByxgmJL&=udo)`L-FocMf6Q;3j>1}krecT<+OdUyES`8yT@SwS)f(n^XV=;nm7aO(=0}rP z+>-jwaBGM6{LU3b=L#Kf4u8jU+~OL~hj4g1H=3E`3L$dJ74Ls(lkR(%!Im(JfePGa zfFOJMcq>N$^Gm>CH8DBHmr`0D0AAniXMqm!Kkd0KzB6R%rFl~6r+#)vEgEBxTx#+! zuf1g=K#h_19ag~wl9QqV%ugU50C0DK=a;}u9etG5`-*)5%|fXc=UdECy>YdgcVfd0 zNz*SrMdLcC%|zIr!R!hL6`?eVrU^Hsq$b36;w9C)x`&G=TR*$$2mnf=gDjBIX0CF|ZXM-0VB@UT=1PI4zLX(%mGTcXpa6}4AWDkRrJKq`IifUXM^|uz z0k7}lxWIu2X(Ea&w2}sHk7XDZSlcj5e?4p*V7f3qf{QZo5UIyt$b)Bs5uT5yoW|DT zK3Xd#exzi2RAr{F@=K~IUld*Cow!ZZbgnw#uEl_w?dy4Q zo%7{)Q1sd(UG=i{mW2`P+Si-XXTSRW?%&WaYbeh$`h2R26S|@lp7*O`@eM2cYm)U9 zP^Ssq_~?jnB&xVICV=`G+lJU5HHR~B6qgL)v(>}cGr8eYa?Fw{F){G;KXyEE5)mNe zy4B#xbePFu+o!!x=imWto~EY`^sVg!mOn9`TAR+Hwcj0H*%;s1129gCbf67hbkEal zY7y`Z=iD+hXDS9j*&CV%!lRRg$IT1FUSp5;GSVrE$6`05?htnU=BjeoDh;t%1_hka zJWZsX1}v?EO*{gW0eg9O*)q|rGQ z?z*)5_jFlg=Jg(2?@S7zv_Z!FlOs&{%%XvoQmGCV!&nO3Cdul-ECisdtq*wmcTT;8 zLiiWTZo4j2^1~85<_VaWK`nagR|L?Hf}u*iDu*5e>x$CLKtFD4nw8IJ@ngiKcHp>4 z)Y2_ID2GSQaB(SXqz+MBHqBQ7zYkEsHD{e4+@?lK&ebm%U6rD^_@VnKY-Gga5@>Fy ziMNWxfollB=_v5YD&L&X-#GuFDxL`)6DU;Cqq+=qIGw>4N@|z!jN#7Durd9N$Mgi+ zY0Q4=t!%Mb7B(F(9DI?C9)yH+p$!yB&pB=4D#r<~wlNE9AJ%Ezxjoud#GkV?pm1(O zaF01#xHTGd|IRK?EpK@0a3yU=8mGErGroi{hB!jGS;jsl_^`9-yY=4d4vQ#MCdk8j z11BiR)zv8ZvFh!{jCA#8F;eotaf6E_$j?=$l`jdsb;q_bopn(nhq^e?2p3+NmBJ#_ z)&dBBn*#s^fPiCx;4e|czQoA90M9tLOSv4eak~=J3^nI+zBiScbxffLR<`lj#+lpE z`DW7w$xt)l=TDTyZjW@TNZ$p9E-pv3Eiqx0$V2Ma;6d7^V&JH51GBOwxZIe>LPn70 zlT@P0A0FA5@17Z>WW41qwa!OpHkX5Xdm;Q$n3+?7vI9hmo)3mY2M~T`nhwGH_iGVz z{wT2f(I-#-B8dN0OPBCqcv+66M+9Id86DtlUjSJxh~I*WQ%JgZz=t7!5GWc09bto= z+-xlR$cf^!M&h3#F4E0x6lM%Co)_i6;^7tX1Km&keY;Q`vxmXSW(P3d=>JeYTCMK3 zRa5`PhHMz-bnvfflA)w>fw8%ar9#P<<_6c|kk*z}?wF`4yo!}ZZ`)OMSH??5Ti(2= z^kNYbiD%w_lr>eIT3UPh`FgeY9C94W>{LJ?yOD>o8tq_PPsWtO%GG4Y2SY!W-KzWT@p2FJO8st2Ga7qLA@Pfw% zqvx`x_L-C$&$9}j@NlDW&0}d-Q!fF;`E&i~TE8FFoB8Nj#^q%NEx!zB+MJF6r;;g+ z7;v(4>h~y6Tx%k+<@p8TdEd8$d%q)c2Nw#jWk*q`!Ab2zEbRGN=QGpNY)IIzKDTNM8m5mT}rY4 z7vq{f$y&w95V&|tP==j=RwomnMSB7SvkcTYgmc|J4i*zxqNhBMGP8vkbuMUw{p2ePirg0`aNp$)HZdh7Z!$Qm$&vbo<3y$b~v&*$=VkR<^ott$G?x& zX#zW+o9-p>eGdt4YYF-LU)YB0`n}^q=5^L>0ye$*OtnKh`dcj8nh@D zbf#jok0@OvVUKjZUqBU^FHmcUNX5l>>FtSyDMzYJ$`)e%9#;Rr1Z7=EA`B)-r?VBx zNuSNG;Oonwp4a;tP^yFCXMI)N$3fn8G2jF$NV6O=i(VltYC~fC8Qf}) zI6Zlu9P376mhRbI`Gx2NBFF(M`$$dlh*x+d8(G5Y)=xe3W@QdsZMmGCgv2xDTVB{v z+dz}=@P`$|b0SAPEAEG>NA#V%xum>7tgmgvBa+CyfN>9%yiNq)P)X@Za$j+lJc^xW zz#N!2gHb|28Bn3%S&F;5-|_7E@V;zsx^d4;KGukt^-)5@Spex3!GbrS9{E{K z)vs$CwMq#?Va$=4O?o4X44Dj)qA{-CnAvJ|H71gWj$5&C(chb(@o-L!Z&1fZU#pXa z?8mC30gZ#)-;)kIItmK%$+EgF{cE5y;~;NG5hE3elMI^2unliSTff0xVG>hiMbv$) z@n(pDCV0T$9X!gseTAqkZ$d)5Lw3NegBYMVO^zlX^abGDY9;jp zlEP_vl}gs+r|%LII*k?-?R%+Lp$ToYP_ap+V)SOH6#lE@{~eb(LcEY(dlThn?#u4) zd!Av4RJ1DN0WpEIiE92k;xZM`<8T$_c@H=}%i^|{26ilVog$7(^0PM(JKWU~6sm-~ zwjB!!iF^)4eC-$?dg`qbJ&2yaq{`kfIRvSlIwvS0UHahYYa;C-6iChln-`W)FT&G{ z48<4gKHJfR(s2t_#P-5}3dE-yn|!NV{MwZwZCP!1Q=Ju<~~t$X;0_P0r%y`A=$w;9_tht$X+%w&VR4$ zO_E&_FhduqAYodIBiJZvVqqQ#!WqHIb{%t$K`037pZb4qnbMWs!59_1k%;#1HO$X# zV5TZF(36}J&xmLES#awpSh>)U%Rfh|0|YqjCxcjZSeuFY&`X{LHiHj1upMI{oz0ef z`#+)}Uw<#lTdwL$F!GKnFb?_Vp%_qF?A&=7`DmF?8`7$Wn8OIYeWf|7 zb%|jxd4|M(SqJe*Ak(jH1w3i{-lpVZdq^~%cu309sAnh$WVJ*0 z;JZ+6m>3WdtAa~O;4Wmitozq7Q6>cuJRYF9s;3B0_;DK|Rd&i^HzV`*{ekj%0&rYt z-4dp{wLda?cvxoydHPXq6G37&F?9JYG7l5GEGD%%OQ3V*%V2F4Ls079yYGAgFob2m z4J9{d8F+}MEO$*(qAj`Ehijs<)J^i?iW+i5lMsri!SyR8XbQ|z*vIM20+>CGHllmQ z;j!z$GgjXJ21OGa?4^*b0+exlo=@8iWF_D7-Q*1LmBq#n32X5ZEX_@mw9VjL2at=R zG5;o#*4|qVck3Y{I3k87-+`l+qFzwZ0Z3VvMS|;1SZcnOpENit5aXLbt1DWON4b23 z?DhZCu2qnZgv8180MyCNWblTnNq2_7!w@Cb?Z{4x1!iDlYwPDqq^hS&8m2$iVT~+T zGSWPBZa)v6FS($*bv^EybY^0B2s{R!&^x6Z&`xz=oA+2VDeLqr+esSg3G3C|xTRwH zDzs1b%blb%nexrxLqH&Y<-2f+aWr z2lgt!eGl;79BT;VN~A6xE}m)zW4+e-^;8`f)rKG@JUfpUT+iXHw4VHiW#w1c8N~)9 z$tD6xA2G@)^4SQMOW~&u6&J!^yu4Ax!B3MA{Y3o`fBRh(I7Rm=1-QH0m1R4GJxIa= z0;gvT)IEpLow_Tn-1inwgOb(HAeUMM0d_B26A#k0g8B~Flu<$Zz9x-Ij1K;uPwW7z za`WPAOk)aN4J*Bl=XNXM={~wLQQ8st3J-VlcaaPx4kkj67@~_XM&s?0Y5!st;vKXQ zGR|6<;pkyJ>=)&HAC`Z?G@QsrvWe_%?0=97tY8pY-TU-F%&M(0K~6+2{c&>AfT7|> zRc&6V;_RzoS5}aV2aBE~qCWJA?w18)M7?l&484j9e#r!HolIWe{!7{FzPTrH(oQf! ztyoySXZm9#d@)s-%!Ld+hc1Ry-xZ6Zz8a;AmHwx_jYwT3a7GBC={jP~_CFputm*2c z$l79<&9(ncRo9MwiI)t6C+5sRCgg0Tvc0Q+`)uB<69e_K<;7ULN4rcBtK>spGMf>_ z9I1WeZoTCX34l@Q`QmDYp;?_`juH85ms|PGc}z@TTNsaNh7m+4-$=2?X=4}3dA`2b z{jN3(un|lW!x)ymGw_b!Qvm;u6?KC~U@`$TLoigkj&owH;)J<%#4%*Yvh$O9)Hii_ zzAnRvD)6d)4;app!`d&ujp6>zM~chi5WJLZo`_!zy4(Ci z+v6mL=S8+zb;}oi^%&aX4TW<`W4c}!%O~)EQfN?yFb=}tj(>cV?+6d}-1DoO0#@RsH$BG%R- zqB(;8i&HhQ)R8u1D69M4zLmsJa$W7J{xt zhK>65w(@>}5bioIOG;*dakBZ>kU!Zb9X`Z_7t0hr;1}Ka%YZq&$X0+>Wp!L$RF>dl zh}N}7wA0vkE&6Ch+5Am2Y;Lshc|8s~zJkH0;$<*_>aev6VwOYqsKJiT?K*sEC@Ase z;qU57$9jk|5-^p=%I8}sMnlonD!oXzuRu(L1z3N%b4sB4#%n_74G`sbE5-x{-%}&x zR~)+}r!$*|Ed?#Pu-sFBM_^*x0O`mX*U*dTJVvCkHJV{pk9<>;&%3$^<15Eca)iHr z@kiusQ=v@Or?N`tV0(lWdi(Ip4bKZjFTXac2|RYVhb6%6w)6z;FMR7Aanh)0r?J#_ zl=N{SJf1$3YoX0*Jdt2JyO&>v_X3-c+ zQci!7Sl~TQ%He#RoOS~|ZOw)p=10(ne+}Vd4fN`&A@l?pfRZ}YVPZ|x^X2h5eWY=v z=8!21efJSc{@Ov-_tJZ)aq7IS)uj=EV|OlA^w?9f4(i-@PcvDf+%sZ=9&#|#;8K9>F^{|^=NTmm!G4oTBq|br zY)?b9J#bf{LXTqbM_P5ESZ0BLF1C<04o~4-pc!8DV6bP956iB>HyYzuwlu6@x4%ad(l~$GjCxe9C!tErUa`>tCBQK~7`i7%qbqYH zU}Od9OFiRrWP20rt`LKt+(9}Smra5i6e6IN~2AAiM-1wY;a(;35F4kW&L zrC^=fyhpC^ck0j!YyU()lQ>m|d1k!>I&$>|NjYLnc;?cwxsw^!(9{+hZqk<2)r88` z&Nl!*qnV&dQmLmsh=Y=aEs=%*AAYu^P@|P++#xWXsY5UVE3dF;oe)-J$_qYoRnVpOh636|vU zaSXwiyk=MN2z8R#M@nMqLbU`i}^?-2PVER-uvb6xT^GvNm5)0rW$EkR2Evh_L z&M#j$K-v8+m1d(+Xx5wcPzi-0EW0-6rsMh*WR_O_Z37d6R>*W2PJXPcXO6n?4uc&d zBpWFb_;-<9-ssBz5T@j0!Wf^3m21U!{?!Io)+!6icq#z1j{u}1K<7R`)Ccg6* z%xq>NLJ)`9fuDak3Ss!r>Y+HTh%3T#+LM~g>MlGm%T@+&tJ|TJm@wR5o2|2gZL{En zg+-5{s`5v#b-eFWVozjA= zjnRbRFgH?08yn7DUbhCSrRVMT!)<#FhBz0*XN&CN@p!3wGs#tt5q_O`Bfx!$gHfhf z5ilVjR(4wVACBLTF6?XO4T^T$kzM3O&jRViyN*E?r+Ov$R69IB3lWx8!M?O>$BxUz606G%H6%pG=mIMFZJA>KFEVy_#j z2=K<}&?k(QsIADKPxHJ=!#E#N{gT>XGY2N@(gx^BIp2oy)Ws{QCk1y`2zeGpE z#iD_?W{}7wN!AAsEskMW9ly8nZ=)_T{QdD%)+^j3m_3T&U5Z%^{+}j}i-M;B&6SJ* zj8#%0bWpo#pL0$f`r~)+1AqIHW~uYge4nLxRq%#EpWK&1>S%2Y&+l%EPzaQ$3%LAF zn)#XIVuGaAt3ed5?G1h&Of&_X5$?lwJ7|A&3jJ=K^f;%bLbTv#!Y&Pj3>EjXuOMTI7F=3cs6w6UN`yO6x0bg+ijNKxSdOvrh(Yr zE^JcEpi`a<8I(&uiIkx*ceY5PRC`)hS(; zvq3_ces`f*g$Z+VMS`m>ipYP z@EJdD5?b6sK3!%$N2!rHd3Oi4vOQWG?SloH5h*zA`R|b7BR+xXAB00HN(J_=%i9Wa z^d+ORza)W=R6;NnC3!?w8Lt&O_8ylm`7hAtj91S#9>)08OWh*>z44UXQ!NG1!Jd^H z#?O1zj&-?@Q<(z&bNcpQfp7~vj1_$Zin`j&DF7z!!Lx8qN+L$qB$BKn@0MvB*6UB*l|2JELyMHTdL2F zlJF*7K$S=MVOE0~SK^_bxYC-+t3Zah_~FbCsXCaZu?F$Lt^O#Fcmr~Q?jqkxxDg?V z4+Y~f%j;kFL1N4qXiim@Q!L=f_WLb)7sD1CUbFp>D@9Ppb9YJgt;JQK+N zUu{x~PV@PG%w??OA z#fhiUYn>r#9OvGJ`nPyF(wcT%nuiHwiFg&sO7B@=+0 z)yc343OK1D!43@hM1Vh@M}67{ZCg|(E5k5=)Zsjya1mgIOX!!|w<& z`pUvLsWxCWX;y!)+sVUmB_(hRDowNS;U`3p=hJvxb9ouPkB=i0CDNv)x^q9&uF^orYDeA zI8uBkxjOp%DfK4k&QtLlbg*CImx)@G`A~H^uDUspT?MB7{VZ3;cI~v^o#KFecb;_i z$F07u_~$NY#KBH)yOoN_tn?m8S($69)q3|9&-ua+MMKH3M)(93{q6y^o<$=FSGdrE ze=M9XK@{12@E4iO|2`>q%-Vvq?+Hq3n6?X$R`0(TDL;@Ir<-muABQF8R5i6<8>~Bg zHXM_7;W3qq2}vdaSc0O!t`N{MY*3VE<75?M0IPJWvqIqws~R|U1>z{yU?h#wll8F+ z+X@inhbxkN8#0Pr)O<}9pp_Jp*jp=!D82{3)$b{-;@!#}&_z|=vs%l}xZj1} zUD;V&C7l(!5dEJ1XyFwzd_%2vycv#@;)tHyi#Nyz(iqATTy0)O$PW$DrdM{+M3+%@ zG7cg{O2v7d|2Qw$G?pt*7X#N0!!3$%5PB^^;Hx%!qLA*}H z%P9qbC~c@gsb6yYkewb$3Qr)l1lmCZf)?xzE-98^j+!9pQL=heV#lmx$^`)8d zo0-4y=mGr^ftwP5$q{V!eKq)4)XP%g1yVl0qEZ)$kfg-*+Ajf6v}w}F5L70#R(%`5 ztw0>mM&9d{!#!^i9`Wu80aKx_tQgtC zWe^ua!^G-uK7n|{?LQYvh>c;_@5DLQF#yKLi^HfaaErqcbAY7=>_V8Gc`6&hJXahFMS_)g+NfP^k7#V128Z!RWVY zk&S+&?>?B?Zp)>4Q2|k~8=A^<*b~&d;9AGwyO;UH`v!1ZYC=zzi?-Q?1AYBpEBbyW z9Ym6Txx>ShcDEnb(&2qdkoHE|OEc#oIZwD_Txhky7}KV=bn&96e!3vkmI1OM|Fh0D zw58hVTyRBd3Sc)40baiUJI2$>f)dj>LbK*M z-7w)5N=em8KT`DRu`0}Ip7$xP=Mf#AYAGS~o>J!b ztY^&&J;oFcJJku38RWv!!U^G8Ay=1Kv{bOy8X98lYy3P(T)`~Z;u>=i2a1G-XHGJd zaE#Vf&~sMl6+QDHavUffsCae(-3fOFAuJl}%_lgepGo$?|BaTLN73Jr=4QudOn~#Y78c1@b}hy|DA(5j8Tm{Y zaUAbTnj0TO@84v*WwM@4Gl7qZfS*g!$60f4`QCT!8m{&*kf|K{_JwDb$P3tYIEu}e zX~US&(H(<9sIW>9Xm_Btyl%XTWtEcpH?#@6fc-ab5#NJW79?JCCEHO(J60}GY9hGF z&O$QOFrAtm6UV@+c-#Mo^h%$X4@hK+LrlNnhPY8)L~WtYt*<6{ia}sr8>Y=*SEXDa zf*`qt-l&k|-nf@qc41`;IWXHrBqt5d=mu__eqV{;Tru1uvm5XP9zvSYss-n{K*A;` z$`|}w6YHJWIrC+IF1G$fg+f>!K-MPWj65Ua9<^M&IARq=G=LDteizFs+#90v2ck6o zY6$1^@EXq7$GWaCvN-_5T}G2M)Yei? zk}u8KXujw46`)T7ILs{Ox zh1BVt+|^2q^dgHyzk0b-Rk5kuOp`(c@BREH_p5;r@!dTLqf2$WE<4B=)xed3uaL=- zuK{xMk>#!5*dvrkpnFr|Jg~0`JMOL#$|^1{XM1;M;_hJYYD;&$o|fVYrhs=^ZL>uF z;J;VEQ(-4(wtjbAtSx1-8e@0yIq_?y45MmFvedw{Hh6_2m!O*oBo7B59)(_b8pS^f zJX6K)RI;?M_T07My~HLnr$QQf$8F3h>Pv=)MDZ`|m9}@|vzN~0>+2FnS|(yzUMtYo ztHnajhMNQkD=ljUtjSCUOhbSWb2N_ITZPZW=qo50AXh-obg5hylEt~cfDM|F*;@`8 zs=s7*H)eAI%s?Rj1X1Kvh<%Zx=V*Sdde2);>OJa=9S&|_^U9YFHCszjrMkK=DMblf z$!aq}LALv73nmthRg>cO;)7wn<%4aD9V`gI6)Pj8G>*cu+AJSE&MpEEM7;bG*TYPv z_gP;~$Nvp}dwI{uDVw-wSwCf7jOBZwgI zgYGWC>FF)~rg z8?ME4+m+@E8}H%Fm5H7(F6yATS#K9(n{7adr{N=B97_3ikWHBz%hfpxHH>ik0_wwP-Rz6C&e=Xe^jcRSJ+m?MsiZA_0Ptj{j4H;vP&8l*!-|CW?bk39cd=NqV z&`tS7gNV8gMq>xY0pKfLIwv4%09=$*Fk)5)Si1WY;Qpr-)oM?1Wya7eC;M~ z=7sCMSNTCeKzqn_mwDFZ29dJ*jxEdAcs~VtGapCl8V!g~v%T4JEdkeKq8TuYc_yLj zIxOJ1xf4Yg^TrJ!G~HT>@P;u+xIfH|R%bL`4tf9#eO#~YEzgyoc))&U^R+L;zNeb< z^jR|dU{hjJ-sTB;#sH+(E^S*O@sZH@!DX4CIB=k6-whWeDzuH-Y(l}2wgjN_5m=T{ zOL+lfh&np@xxpH4MxvUy>3^+x-+{EQ&^|%D#HNqn*-sm0478i?dXpflC{jxlYUl}( zsvL33#8Q(=_xUl5AH(yMx0tlL#GU>ldtk_^jc~k~AA%=p~xw*AI|G`Y; zUoNlR4E0I4e~jl%RN#nxS>%;usjix^+;{UxQ(KyFoZIVtDT6wM3r@C`pEZEkF*oHK zEayA1l>Pyf0RUtp@piCz^$Jtfst{RP_XH89_8S))c7H^*pE>9&f~A8B>64==g$Ls% zj81W5Vdo+4H}E%R4kstSe_1{#2c03H<}vq* zE++)Myljs?ZdNV@IIphw5l3SR(;7U;=_>v2Ezx~1@Rz_4>wBr9nCMU`Dmd>~?paquZ=eyK*!~rv?w$;moxyVJ$`YNiJEi{wL*8Kn;jARDsTVT$*RoM7BTi{i z9QXkYddo@Jo*`Drpbc92oDsi4d0p&=j(QXq!Euf3Jg(fH*hAwQrNk{5p z!$Ac9{yg2tZ_HqG6MpAk%&Yc?E<}z8h8XipEfick*vn`h+SqjTK+;NafWL_cM-B3w zP+~NHviy1GNRpdcZzP*Y7RZJ3Hvvan`W=VHj_U&;BEIW(m+5TYUyiG;HtzvkBth|i zhyP|y8`y-Eb+T7nj(a$=xLf z_k}i{{hy+QO7RLwC9XnlO)RM^T)z46=G!M9N=6!Ckg?Q)Y#%tm6f;n<&cep8+b-U~ zY^_B&i$%5}cY-+s*QmL3*}wuV^ye2SZAc;{6?a4&Xj$~*Y@7BbX%|V6`Ayj+2~yP? zE3wRlmL1DOB9aRLJ{_8UwABYb$=k%-o`o(TZ~Yp=p(yaTjc-g&(6a>A3rSMkvRiR8 zOP5km?C<6f?yA%zcS&9rEaUJ*iMVYQ*6q<$GE!|5jOTX>)}YFsh&*5=-``%r>O8N` zYb>ggDm;1r9g^LfJiwTc(o{f<8B)7US4TGXj}U71qo1f>J>=k-Cdz>JUd8AhN6zn- z`y#X$aa}v&1%q!5C+GN`7&ubxbf#ub(Z%kPUl*^yAk+GQ>W}W=ss59qTXW8K%u5Ci zdjcXN^(SY6U6gg_IJe;5uijKv8Kd@NJ4P{d!^2-g?frr zn0&+uy{tj^a_F`9<8ItDYR{uavMoUf0Ymz2^+(x!?QndQ+dE(cWX%&_jCmal$eKd7 z)<{P@<>-MZ=T`h8sU3~ChetK1tO)X%@J82H0D!~%IHv2aH<-B zLK8lBzU-y_&FSyWZv7%5(RwlS^`p06vg(N7VKO| z-w-og_YzH`=^)#X6Np96zvE3>+J-1uqfyRZfgF1b+|;;KtAEu0x7Y_dCc-|9e<$2B zo&jxH=-IfYL+&EYrO&fJ&+#Nb4o;&1f9^Bj5<=fvKS*xdW?yX4#xA?bwmFnn4*$H3 z1DzvKUTfOozIn!3C0tT**F zCQJyiBtyM~Ae5iU1%Q0Kaq2ijT;A#uCW=Pb9$c9s=A7*~Nu_?_U-#MY(qZWuKL35e z{<7IfV)crd{N-lE`+ehRl0$OKY*yoB1o=S4p)8sn-`FEF!=%l7|YFVg$Y z+L?`Q0nIgKp;Tqceb_6jwYQ+%N>_~lfLWVfI*PwB?|4NE5+p3OM*pk~2h-UT=z^1Y zG981142|rkG1n>=l{4A|k{_9oitCq4s(|>dfQWUs_ecNH^}_#!H-&?5)RlJB_|$>O z=>hFWhB(-Zls_h^VRIi&GzkyT9~4>zXDD_R3L~_v2dvKZTDVHQ>2&b;UfK;|bts?j z#IUH+NCz>7s7?Y?lBiMa zs1RTY$klwX=vN`Z8Yz zcrz^iteIkBH4jYS`34M?XbS52tNOt{=qDeWl>`rq1q{+YI~$x#mVN+CGvPz8r}Mq= zNM>4naqbp{dB)5!0KwbrGtF8}ZJlB)`gIBh<8Sn#Mju0-u~p1X1Q*lKvCm9$23CC9 znQwxs11*F6;}4+;(ktrVempwe0Sri1%9462DVP9^^BM@(oS2$~($H%4b;ip4bRujU8 zDJ~1x7qc_O+*gGpymLmm?`O+uHHe=%l`T}g6r=d|pJ_R=mqCZMWMVi(ov|WzwHSNv zNLW#!kI_QuUv;oJUXO)RI+G%<-xUSYPgl!`Fi!3M(&WIzSDyK?vE?( zl$o)a9?Ww)Ujo0TrK8}6t+rcdN1|JT%ngL>lptW`V#%imHEcXB2f8>!F6sLl^7~LY zG{dIxJ}%7=NQb9Bq1i;+{Mk`KdM;p%X%6ZXE0%MD2FuO42-va@LJg`R=!C6zx$5 zA#X(F-5Rf!xV@S~fl+ssC^TsZae*V(l3EsyvO@)c%jNymsgedTBc;-`JWA51cOXOH zgh`g5OKeet10dscxEXkxQ=#pZEjZ-MuWy@3h;9~<03`c?PF;ySK!Pj-!Cm|!(Kb`n zGp({qWy|o~(FyyM2~u>E9`{W47qC^T4B-h0P~oSO`CZ!JcVYlc{;g^CBT`+R2G)Mjj(`=b32=b(?6tgWLLPaG1i*LK36i9x!~y zMwb5|(^5P+X9iqeDoJZ*}pTLEr{F$2LE~}kk z>tvsfn7tVpDhIxxcI69^n*>|=(iC`RGFe7#AVdYvr}N9W12jh7F3<&+0^}_hjtEF9 zP^6F6>^NLLcdYs50`B>O8d8pCy?n|74&Y-=D=yv_{M&v4Wc19-*av5V*B_M=>#O}d zdf{x-*^I~w)D99z&Ee@LaP}T0g~E-t6-X=ZOHX2eF`}ItZ>lKREWq~4^dGeKHg2Q! zjn}rfQ?|a5VA#NftCAKfM9#Ox%|-dd%)-I-lQp^BtZegI?B&O~>z8*^0C65-2#K+0 zw%#X4-SO&>^vV4yALH0Rp1eL@2maYM&Zfj#qh9R^gi$8VMXY`L`ZmNOnM^Ysncu@p zOHyFC1@U+`ERgM%+w-Q*ybAjAKSN~DIkudFbEm<>nxVci@ovadGe*v8NiIjGrDR}^ z<RQAG!&MEsjG<4lgL8iX=b@_)2ZuZ;`Ml#;XmA6A-?|T0|gGienGYxSu zhZ0Ob5)+d22XfzD8MHo}OEIKl0rU8N#klh^l=ta^cA1tsFYx4GgQ$F-vW zNhpR!n;YuUSCcgPLO2b>he*P*h*hUmK$t9O{t-Q=0wWb`P_XrokFN=F@aTPitRIwr z^Y3@`fH4LF?}F1sR*d!^BBFEFQZXXgOEqLy8?3wEe7&?2t0C$hs`H=?+`}4U@?KmD ztiCP5&7G)9%&z7_#`DRU*yD2nw^S@%krn-i8v$j!P_Z+a)`hklM z>swM+lFl%YXOeEsN)la^KUSpK!;-bNx0_b;e@es9N}?VKRbCe<~WaAM9pKV zuYQo~;{7ov`FJ?OiX$l8DDrEFI*d+W{ID&^%?;33D1FznJcC7yXe>YJ9bM=vJgfu||GNcfP_n)Uq?eV{wMzuR!cLPufsOvKYF+&-1 z&8Vk}9f)bA7#sd~cZ&UucecB|x-|8GeGEh04y#Y*u+4Y)%>IsI`Z@R@uIraoE9eA+82dgPKLkC%`sUpAJx=1mVY zVoO1b>XfG#s+vFV$!n479>>WkCLgk*ZAZnr1HvU)E+l9$nV>)AJ$ZTcSfF3@q^z4B zgXRuGnV%h^&g>QEbmqXN0$@>nhvV$5roBqc=+G5cCI;0=6(yJYgPj&oW08Py-@5Tc zS{;|4`eK~hRZ+VB*1o}R5O|N=$yfU7%$rCQJ=p9m-qHLCZ#GV|iMymR;~06Q&=Fy0 zDinJIBdpS18Hb*HWQ-pL!ngn0D%(>Hvd9OtA)`bqcZCAUR`}+^Wbc{+9Vg9J`E=qK z5{5ia!ERAnr5-(XLURS)iB+%4(LHuMK~V3C`>?yL!7CW>NV5m$ri&a#u_V8$A@0nh z#TQ1^5CIKwe#T$)Vvw{KS{0VoO~)ffATWs>kVR+=Nrc<|skUQ-ZCsR}X{Hcv3OT|*l9OlPd;^<1;4=56<7C)oN_i~ z14Q?(K*84h(Z)epGOeEMJ?hCleU;N6+oC%8y+S6J4BI}!_Vo2~at#$)nk}Jdxe86O zsQBh?aoE$IhuuDopsGw)Zrt`r^jFJpfO&+i0MS6R^>pqO^w3cqAy?~_&xX11;7a-3 z6a>6cEq3kqQzf0XoQs((yEtN=ir6QeY&D?yzm&|j*khmYx18sF;#tc3x;6Mj?Ts|3 z=UY-c-5=%m)$};OS?Aq3>!@@eQRKWa4lCj|JX{PhUnI_a5n~gWx9_L!Aqt|?SHD<9 z85LFnpCBxTW=LTRj7+@!emE31yJk1aTi4Wd<1IHLi&CA{JHep)`G%f}js%^UW zm(Z_0t@Opw5Gg4w&^>h@?G|QDf;Uk!fL|>R$MsnoROjV07}}fJ1q|N}I4w8!x#lgC z25od;e|x-6#C^p1$XoRj7XLT*Uei)XXR z)Y3H5bJQi}<^~}2O=1nxODB++9iPIS9ZS;R_bqIt z262~X>N0NQZ;6=>$7&gntwlqbfTE(9Q4YnWFfd8Nc3pcda%SLsT-#jmnGjn_25d^4 zh~uDS&;AwLZOF1eKi8LO?&}^#WyU^lDc=1~s zO$0We$ClsT!o>o~8Zq<6O`sNX1s3=>d#e-_>v^&|JTWep;7UzgM9z?r+T93rXK=?gnA-MLSb3J`ni$7&yZBWv_b2FfSw;SN=OYV?( z@4(O;;6rPVKM+vaYA~+f-&qg|y#-SZSiY?W*a}sJtORJvZb*SCQ$+Yyb@L z-G;mAY{SGMm1Vayy2>nN$K!C8*ybd9hVUwpswSD&{$r;lNq=3cgkY3^i%O^$ny$Zz zk98UMVnJejnKO58%iuOwBL*Btg~k$iQl;)YND}t@XAnn-NwC8R9N`gA=q0nO6j1b( zl6V8jVxgtIQYJHoJbVYcn5_+RLKG^bw84)p@k1x~9%5+j41Ff33kJ3Iu8S0G9rzMw zLV;0!Oe6Bw==hh=9xUN!Y7ts@WkF~GH)5$d6>XoZ)PI;Qq=BRN(VvSAZtI+Oc~DL}dxqY=tzG5{;QKvaqFbHXS;;Y4nqA7?+vtJCO~D+n9`m3cya40G$_Z+065LNo!=3BZQ8xsXne%+FeSeSMS2#f% zxOqeGpt3JaoLIz#+aXV|)<$3F?+k^=gfmA3J0S^PDg2)Fn^FZR=0FW(3e z*;*{D^m=M+Yvj@qMC$6Nkp))S5y9R!6hxVgU+Ws1f&&0R2;^1Xk;yV6<>vfF3oObi zh%f&bd-RvD;oS^?$~KQd>8YQruXsmDj=NGVNK=6`An+1W1L(yQOs4&`ys}xYSGrqb zjUIXs=eLQ!p=xK1TiIXPBRo9-clUIlUDK5tp$Qh$mY zeOi72zHpUPEUjJsl-ZR#GhJgL(DI?)8GXwOIjD%@7`hOhp`=6wZ5AJ~sB=wa*&eU; z8)-_4xS!}irHLAYC3@!C$@rO%7+tiw((pE>Fl90u_>;!3Z-wLny8~e3`5NbhN+A0M->c9xHwOutA~?b z9?uzi;?wT(rsbayTlmpungf5nC+uSJ$O#uE3ZSaN&g0jz{ul$;lS@IPt@abqv)F>>b=Ez+>nk9H&`-p=UjrG9 zskM_qfV_@GR_n0R^+s|r9mGrcr5nf7_kIUNhlfL{+=eLrML=b_NBL#|dQOjt+>)G| zxFF|_?Z8R_v`W0rrv~ZtL%o|+Ja_GAV8JeOE>S0HR@ zrN2EUZ#vl~XP^=`2L z+#|ZaOgMTm=>}ewFDJBbj%f;97TcU!XKxi#MI{nCbp*XBq-PtTd_+uyJ&36w))O<0 z#p3t?hF@0PBPQ&_^dlfDX^P7yPHN!Mj~E@_#q@kJ@`ZGh485X65I%iXScs4rXMeX# zNDMw(0VZ&6LUtD~s=Co{u8bb5=!O;$?2CB^hy!>=JTU3zMj4UiOf&(SCr+xKw;^iU zU<+fx^)7C9t!d^z{?PJy(CR3a`#D~;gna0p;Mz}Tn4RV^%_r~|s~R`sO~RX2TQSYd z_|@Mhtgk$IMbDPf8Goi>>cOn3`4Q>h0efb`;F3eG4TEmOy}pDU>UYuHgk{Oj96D^8 zg(K4D?EfY)NP3vFi|56a5aOdNJdVDCKMY%(v6!W%C8#Osv4_^Zm{Zko-yKn$pEJEV zmO)+jWl*Q!ql$m9)bftvzT4!84Ca#QxK5)YyTAN>+H4TyOEQFE;Px2~( zPc$vBu5>;Zt%W7zjRDnX;i;F4SA(b(%!Cz`vbK^;;W#d(K43Q1^lY?S(KUe5Pc#eV z)>3QBj2Gi6U#eyI^4J1Z(=RE3bHSJE&rC#?1VTi9s;qU7%wicQd<=I=2fb#qVsnUa76AokNloJs;!I@OLnLz{I$+dGW!O|N=UU_-aU+$! zqK*j(gbK@So$6hN=uFr^bSer0o;?aat<`_;Xv3htwxu?-8)=*`lxMDbdd)|gfupQO z8ImAngUeg5i`)d-N}kX|6(c1IjoNNEj*!FPj^%B{7h0wxh0!oj4(+73W7II*{R4$v zexbNNBGCu5J2Qm zML=q)r5Ffq6&#cymzG6e`Atobts+XbjcL+0Zm+2x0+K}^q@2%)UXh00s3!V0xacaw z@>8QD_O%>4MbQ9ea&;dCP$s)ve#J2yn`;baZk&CEgyt)*?;GsI|L5grz9rqD}m1@~lN1br7_Lbeaeb%r)ZMiB+p@a~G;}>WU zdt#8qw<)|h0ioDaUy_sPnoB8b+A-*4%*NmmSyXW_HDETA1sDQU8FxL*;MeL8@F?n- z?ff=Lc`W$wNUPqgFQ^^=TL9yNN!3wf^BjZp+tDB~YS=~n2?+}y6dy> zL}yPJtJ79psRI;uimgFbX5`gx<_lss%HzsnfJ-{?9Wd+yi7Aa%mb)$+{gdSCPeswy z7DEX+1Q<@p2=m=u$s}1?z&_19SAN+l+($Y#{@)Wjd1cwzCbRu<+r>_z-q2FmIf*I| zca?saYTjMF-WbU!-=gGWN`N#CKwM#wFE&dH^^Sws-_~iifA*jd>sZiy@jCzk)aMD*fMs z$HMlnu^)77fvc}5S?O|lVA)rl)Q8x^WdSQUdLN82N^gZ%hkoAw)I;!bstLm5{3;9s=KUFBbl*$uV(Qn;cu6}sOzaLI~;pv-X z)G4I9sf;u6<5X{-6g9`i;y4G&&5XD#anWPzs?-uzy2I#eC0MOQJM4K{4%xDaScsab z;n0^yaCk}BDMv;QK^f#xOkALeio=fJ6J8ycAK8;3IcRn2ETQ_X4%Hg-rD_=U^#L7VQLa6Bp&pptTe}hl)u(|8`$Lo7-~#*M9UV*|6d{!A zuLP=&RU%Y((w9oVdBk35d8{I*7_n_mvRhk&(RRr?CMQ?X{PMHd?;8t9w>xN9zvQTB z{_N8JmRsEU$I+%-%@CPUZH#M%_({0@r6iYj@&d!`&0#z_RaiTr7P=80bgz4-q895j zcp-4KG}c*i#AOD>4Y`kN)L;A*QOa*VIGTs{sPEI;7LatS@rMO}|H$33vr7=W9}m+* z8rR~kvNC*{PGZT_kt(izMdNx*T749V?7oWZiOklS8<4?2t;PAj3TiN%{sZSf>bpYkOm~Uoo&rzySDFNss3Uz-0|gg)S*{SyPahvG z!}LmDT~u$0c2B=-I89{8oO|4F;46A zHbLYS{lj*CK%qlWu{cfn?QFB0o)w^hZ-)GIr?KpR&>%fjqXN7HYeqR%p9{o(=GY5jOK`nj+N$g zAqV}ZXBmM#Fhv%u`leZo-u=VhMHHZ!_%f}y+o#gXG?NZ4Dm@yqnsyJ37frh6jr*&S z(FZOJXJf83Ysq1Wb4z$5WCES4+SI-8*x;ryHKw}#RuK_`R}I!x8T#7g1@G#bN3i#ALd*!U;I1Zla`RXiHGS$Fz39J6==hHHlEK zLy$+}exkf2CIc=q4WlxAe@S;S$9Z`X>nYx55z>G-xQJxkTf*kXk1L$RhD5WME7oc2 z5+CTv7d7`)!&MVhgY|iYKU5*TW3i%}+_j6Rbw)ZmX_O|JpPI!b%8>#&7)U^n$}U*f zwX29=eNF9QsZig28rbXZ&*0^xj{EY`uV{mmb^F{$?qh|$48(8v4kH$Tv5U5 zNwY}0xwWwr$yn^JbC`~{Q!U~N#FFJ3Zk;IaRxNuRS2_!%twCyki)@^I{`GWScHF} z&141S!Dy@+YKPB`QtB}n74fxF)|~fd$!LKHZUb8EhmsTV0?xr>n`4s+^D{b8&>NX% z&n?R^ZJ!!g=cBA=E#n5i)<6|TFjBU13GQ;1EiX7yY4aRe$K!)zv}k+yw=3HaEPHy8 z1yeSCc338UEQBk#Ygzp|#;y4Cyt0zV*1o&l5@Kqyw5LoKyr1K4yaYBcJ;IG$E}=<( zgNM{pz;C9wok(oS8h8W44d#6&%853&d}E0eFQ5@D?<&uU!nfkCVN!ITZkT5NuxWXO}O~h1Z4QNgFH8tC|BKT)#q>Kj?Js&bKt-ARW`#A8J$i@06=YUgaNNkMHv04Bi^h~&0=te1pUaIi` z>}c$wOdcvP8yTm5bEnwQl45>Ry4fDCf0VC+n{~-Ok5hZ#f9ZphFVFW@|P-*TUEBH~U-|Hjl)I$kHtZo!SgUu503~81@$IB#!JYR}COUM;< z2T2=0dR#sQHQfsVlu;ssSN#*hVGrRG#+3+4>Z>jVi9*q-S(pN1MWhCpdi>N= zHeSY3yG8VHwAC}}kePiq`J!yo7bh(4U^R+g@w+Il@8o=o<=jd`PGI$eWf#$_C(fSO zj{rOlG1&2iyOncoh-;r-X)93_o4 zdx3yvdWGDGPpKl@U0d5*Wefwamqjs?pUl)X(WfgtPy(l$(71FWkO9u&1}DedUWSlwo^f$r<-U^r*VhZZoTKEHlz>d@yEQquR2H!t3TAh7eY`nql(? z3Xkna6{XRPEj=^H0MO-Q`yRgUXKa87!J157-SgJUK+0t8LLW!{2qj&FxE>z0D)PL0 zqXVFw4&{Ybt^nl|KSX&D7{ zybvInG|dwfVT90m)@Gg(u*y@P^f`If3)iv4rP9>l6ju-9p&xb|_@lZ0gT22qn9;VM z#b$qlmr0IY98YhLuy7THi7#SM(qV>|x6;3$ zmk@79B@R`=5UuYuN^bkrDSVGc$p1n=wy$Z{cZHo=0aZVe-YU@CI4zjlKr97gX0UEL zLwMJi&M=U5q<%?@50o_ZFKPDvtg{ZAXJ-D2ss(Vy7GoEPeW%2(nOZlw;|?}n znX6ZE3_KJSgVxU7bTQ4DcFx!KjWrTU%h1B&s93>yCPn}y9GjV!atXeljh-J}$`a|y zNH@M@nGK~odNFM(6;-z_F{ganG(Uudc5=Kdv1J&QZuT(mhn$QL^<5o6o0N(;=kY8e zl@65%vO4W1QF)3L8P~iXB*>+d8*S#-B17om=z~SSIQ0NuVJJLJu4TP^(+4KQ*lZLY zOqT~JD8A)yQ42oJGs@f~*Ic~&j92BhuW17s93%;QZzpr0?bkP=Ld^T3Ch$lJD+26% zuNaZ^vQc=xy30~50>WJjS35QUBpJEDTtbh(A^PtcIE~C#m45D=UU=wsdC#C^l032R z%j;K_UeTCu!MUBl+&J@e_6Bv>OqrpqF+xU-qAM-rNdHbuHaGdi=6~Cz+b5bq+bnL3 z#@V7VN~>cViVMw_jJbE*>83rt46*UezWmS$rv6MAqaN`-mg+@_13 zWAZ3PXrDP!jMzN+>9Ye*-n8w5f%quOD{!TJ_v7d8HE>fkh-JD?68A)sdDO_4fhhqf zxCb3-$z2-w`n_7H@lC{dU(UyP`JZu#vzAa-rw;qg;lDL4Q|W6eOTtuCot&U65KuR> zVu>yomXo$K48lZRA-gZTtaAMElxH>Utg|O zmvZ*SZI*e`_WvT~Tz}q${oYw)YOf$KphmU*cI)XjojBQg;`55V?ZVn!AA$$2aIr>! z1xxGaQpY7G;e1;Q7VJkOremWCQ=4_C)WQ!PKL?D&q-ZcwP4Orj&ScziXU|46JL3lo z4m>Ps)k71Hw+QFi@0Mq+yY$wyJBT~)zXU)9d)wh#|9`ed;@{dXPJ0L z4N}pD0X?eG0}T_~CXCr|;Qw*-cg)wLBz1DUf^VJTW1CH^t}kZ(9IFPIdoE#&%I;2+Z!vg^KP|zqm~z<5#s(dn)q<08fHk?_%nLAp&e}9BU2=kKKmlWhui!n87>nP7HbPmOg zQE3rk+rB%3c_k^OF;QA0jUz0j?SkhszBpB>3T)lUDQPT%1P6E6*<#pb2#`3M&8weL z;UAvPf!(8{XpwgfNML-&B-bB>tW(R2s)8#n|GV2>%#7y=%UuB`WYlm->dPFc6mfO- zj2fhkQxhcrvUQ`awHQFWcyj#4*7zaUa} zixK26B1#U$C5Ygp@s!)+{Lu&o)Vgli0GVt)o{hyUL05S~lj(DNF!S{%1D^rYYOo!N zx`rpwq6;sH-q0tQ!n9(V8<f2hX+k%AP_w@oVXC zXbt&#Y9t{?J)$eLaCJBvi^tEl6C3u-%;D+SDL|;Cmc*NDl({kl(`g)KqydIiWhSjE zn=)HW7A>xcJ4hY;`i32=+0BR2l*{9)|B)!mTa$7ElYI$eX--sT3#rM9OCM?-!BJ)*ESjeY!%5cp%2>0a#w(CJN|)7TROC=r|L%0ncD5dm*$iw(g%O znDHQf6spemQ*2)P`K^)rx3QE(r;3QEznyR3qb(^oYryCwx#gohX&hvgab_>hkhpmT zSz3V&X?MPMI_^^@LnV(n#A`y*4p!TWCi6F0ZlO@?vn1XSKH9uOCM*Mc$Uad^*7XS_ zcBl3OfA;2eKxGJ-ooI3%M}GS1um7y!Q(wa9q3eQpmn?J)E!&Yu^F{%{BWc}W%rXR6 z`X6+5`W3GqGyc<6;*SDx^JVdD?6UT0{_DN!fnpnO=PG@8xBd|rD{>U|a;)8yXKZX{ zL6>VtDGGQGSH+#t<795Q5sA6Z^|dWezco&*h62H5)It#rJ|%+P zvUn3{SMaJR6ea;JLx|iiHy`-iIt*@67u%&<@8}cfr=?Q;SS=YIUxb@#?5zH(rV}Lw!D@9gGw`#n{I{%hiFt0Pxsv3-Q!a;z z>AM-LWKCQ_Xe&4dR*$`Bd1@i2*C*j~Jy-D)FJ-fn;_OOz*9b|kTWbqlKxa@Y)qO|l z4BxWC+NNy{TSyTDc%>t1L`0BqAaYA=8-p^{2gg3K9m$3QIftGxnmkhzSqJ2rL*?a= z?bgVelt!jjbC(8`318-}A0aTkre4mVP3$9** zh(~aDON{L-H5)D)pH01&c=?!QSC7J=eU8k%)?HM9x^Usu{`GD)NE+u=}Qx#1D=(Lx9&k z%~gi9-S1Er6d~O=ndMW2L!jz}m}!UF>jZe1--VQ4v09@pytU(1m^d2R)INURy9x56 zmQti+&a27TcDFc=jDCeaLM6zKy_D-{zjB94953yEIw+_xdR5AkF%hW zwwch0!81%;K*7Eh<7?o2WG^R&O7~VVS3>F|X-PY}By{*rBKpfNu z8>iM|6^2)*dGwZ;9e39;i$a=tXOOM`1dSnT#@OE&(LgV82iVkxzj?aoKvYUb)}8MD zfm)x6`PN>DW$fXg6@A9tM`kIqZWOxi*2TZDLfVul>EM@lprm&&dP^bFm>JW30sTUi=||gH zx*^2jr_`F6P#;X>pYHvp$t;h`UjPhTxxK35D2H(+n*f;nAH>?vo)D6d(6Cn$|C`Lc zw-EWd6=*2wD!6dTttUvIRRM+sQok%uZF`FeWtiiPy0mSJ2HxhjobaXQpULB?XSlrm z707G&y6W(% zAVuOuU0hZ3r%G}~VW>9o;~Xr&o?G@V|LhEjG(~CuL5^L9#HKj$_nABmekavNeoF@xm#E##St)EDb2S@f7MIwL8Zf{8L|wjg)X&2~ zb=;5$q(7(>%D=ntE^v^#06nbf?XVzDiwe*>>1g+fWh<$cq!Pi`@5IDfbkEMy%mZ_l zBiU2Myh41MZ*41K$TEuq$NCoSeAW6X>&4oOFnAhcLjZFrD~GnjdB@SOK=MHsBE3FG&_C1jZAvDm(>%T#NH$@h=AS#dBavlZx z2g33sO>};%IcRWNz`Cvy9J+Xpx}$syWg{Gr^RWfjIF|rlL9BXqL6%0&)Vua(dwjmP zrglGMMdrRKSM9inW0@f-xlYk=Kmko8T&_2&Ew&VP@=YMC>sZst`q#B*g=&QXkXDCGj@UQ;`v3ML;Mk63FgbB){L1vl!L6=RegJ$C{@xq z`;PiG%!$G-)o4?)0-Treb?`sj#gvJ!$)=fPeyDcsDc59s?3UL}$!cdSF#6cI=7}da z7WQ;z1xjlr?_RJh@@0rCI2;Av8S)ZZEna`l&ftCZfj-wVWBqlaDzRN^x;r?^ajk&L zghw7E;~6=p%A8jY;kv4{j#bAxred^bl&GNcCAD24V6t&KAF@tK#Lm}^T26`okb^rL zCQvF1 zyS!Gvry#o5!OxSzqgw8xDS^=?OI$&Yao+y4U>u={y_yFs>-BOB8gYT?r}{OU3H3## zeg(i^d&o`5A_jM*Eeiw*Qo^@04#f)D+gRpzsAH;X*Zc{%+NiN3sH*7{`P{pl^(;t7 zJrS0<`sRZp;np9&TMW%H0ZT8c5R{8#+x6f7oJxUw8e=WRWl+8JB#lDRiz>P84vCwo z{hcdQn}c%Jo4}puh)NlEQCKZa=GoF2djMOg}@C4 zz3Lpn7PMB(SuDsClyc{NBPYyT$C*CVP3L>3J@6}g*`Pd^){JkzKr{@Nd-UKbnSuMmbGTG&InW9tnI2`WUq z3<6*vaqP2jX=K%*^jyXxyaLxh?&N}Y`(Z>-#+)70m137$w)$_wT@|+W0-?nQCF)NH zQUuN?f9WL;sA8(jMgRYwEB8l`=uP zevNZJBKG<7b<1;UH_m=9etP{?kDu%``RHs!+;YTe{cv5tqeE_3t!!*Na9CFCC}Jf@2NBqMt>h;{xlTA&{G5BMEoqR z^GFZX4uh4x_x%;R+q)EA)LVe@o+vxlN*mVo-(&+Rqbp~onO+0J>0xXEA6n)VKBW%m zdpZxx3^$ANtf1bK`Ro_4Q*Fd{SiT56ygIn>G{UwzRvN8n8=RVA(-0L%g4qW%LN&nA z;h2!UG13pwf117O>7+d0KCYFw(_x}VsK2@5GlZ`H0p(e~itjf`jmY9TQ!8KIohS5@ zpb7nd^+*+Q7%ZOp?7ry)N_|yMcS|Y?erfc5VB16sj~ZV0=i10Y+Ql$nD(6jh&;#Lc%K7G~{kA>( zrr&oLi}0BNYZAoDPteJ6M9>G|%0PwHMJKK-OI0bF3Lg1ObFZ1o-?b>cJ{BPQFmz-r zuUs%@TB6cqL2R}a=qkW4)_wkzH#`6o7{`__2DALu@%-zp?vwG*-86`E{}R;{R`4XU zcF**3TO3Z345MC-aNi{BJ`zGgj4ihVw2c%6@`2YDk4a!52Y6D_MP9=>*7^btCMK0Bp3-6x`fL7zBq1<``n@G z1rLKgkxN0h$-lS3%Aht(~Kdsh4GGe+0g@DtP+Jj$5Jk>#X}T*@CW29o`RvaX7`koPV9gryb$obqd-e-!_H-ERGCoal}X_8x4K&y#PC%FH`-F?HRDzc2Q(p-v-2w({?gDS&0Ai>7O}=`)riRI zEeqzYQM|SVn@n!E?vM8H;+MA(cG=Wbr;m<)^8^wb4Wkj0ZEMp3ObAu$;n%sn2dnAY zNz5LT0(4YGe6!By9?hgneqw@m#I?k2gXwh7*3oxI8#vKGg?*9u7LA3OI&RDqGi^Vl zC~*~&2e1$s0EKRQXCuVED)5t8k}FZCjXMyH1!tlCJCv9a#efEOkC`^0E^Q8 zi(~Vjbl4t=ozxr1eK*IEtZMc=8sv2~1{#1oGb1>fER#&o0e5*#>mTQxISK?NAoPe9@>i3T|@TgJx{MY+a&1S{$!!6-7h@`& z%+{ssMCqy)!bXhiI(=gKdj&Z}6iZjpu}CnE)fPf7CksxEw%%3L>j9aw7aRRB;hg6fRPOrpvCuv8XXk!}%RKs$*nC8SA)KsfRj6Nu8vH;%sQbjkfOa7bq@ zl`(yN;3)VSb|ebjp#_K&u(9eWflDL;uqWzwJy^WlJsa>3x^y<{khoM5gWyPRK2-g`Dv*8P6{Z>9o3r+uvs+BK?HK*Cwb zkVQmZr{Yul`~BL%ONvNpuc8-jb{UlXQHk!sW+;ywA#K8txFA-HQwKz=|3t#ldT6rGr9qETl!rx${3;XwieAi7C-4H=B-)2zwpv{4SnXw>sg88gbVSH*CbUF~_oY$_fHLQzg)(zCd26)nshWYKFAM z|D+yw#nWNS*VtP?$)+OlEMEkYX+6u?6E8T@0QE|$6YX{;>xy}Q!vxIb(PzjKQQ$8s0lC(^xC*VpSTL`MN zxr3G#h9q6N$;xJnX!nJ*G(0w4pRE~K2Ek%WWH>5?7*Ugmv--^R&g0UU@yt-C&Vjxk z(oGZ$YCRCd!4XuEpWt&AWc>g+Z$XZ`+JI&`aJzluc?+}4J%$~2>IX8sjzRblSvo?{ zfrPM-zF0Ep*Va@9lyMi7EvVVv--=FW4$>`{h*jV=d8}ddS|SYPQ#76wvn(eaDr4la zU}W(2Hb{Z%E16(|_8pDr4pHXIs}OD_aWB;ryKBA@pdq=s)Epj_pFa%; zDcKcl*+vTcl%GHwD>@LyNO&xyV<%~OI?&e7934Yz0%rV|8EHFpV8eX@;hv6~3s+4i z6KJFXaGNm+Z#&<2T+|rEK6$O)J+?!js%My+K|906&>L8p!ulqCH!%eyql{EttGh0m%(7mx=8RzsB`XH!4k=LS+}Id{+97}Jg{yP$ z5pHdp<72qQsuJ3+YKJgHEJyi~~}bIJ!}{08M;(;vuq zw2@ng@dbJ|#J;CDRG84Fr*=FyuVVzrw4|~o(RF2Vq(Ma2<8Cds+LB(hBhL?GH9xmq z1;^D*Abs19PQDPJO6ehf&v zVy}(}-mN-by;VszMIOblyii zQ53Lj6kH?|FZK5XW2u3!X*V&;%v{8<#R~ui-U0 ziQU7JSI1RYuJ@vsliDnX3NVO`=Bad=EtgxW%i1grL`L^A1In#KN$e}wVUBJf<#35ew;X-DH=Z>bgZ+S@)*h4W$o9VUsnyxJ0M+f zDJJ|CV_9L$@x_->YHnX1;kR1LFsa#g<_7Bn|Tp#LVm#%Gr>7N=5nboo`By%E01&mNcDT!eZFBm z$MKkU=7FdM1bqy?6DYw;6$+mPf=g%&1rH@m1Lv1?U;B6U*)vET*V#U;gSGA;GcUw) zA6&b8h>UU9TU!Dg=9ga=vCMJd!htTR4ZDd?Aq5%JBP3M=K>KV>!g0cT*CEI2R@QE2 zzT&W+EfQ|c10l9nLN55hLR;6iU?U$;zp~Nm^qu%hma-K=bhp(ljGjuNx@$P7DFvwd z|F{O4!e8EcK!Vw2C~Lrw8MjS~hN&sR*?yH%dMU#0^!`iTt9yPG*jw!i#pQTUtEi81 zw;gm}AjA)ggbk7vB!37PoCbUELq<`^al7SJ^1r ztOPVh4m*+sda;`(OonO$AJtj=D`CyIUGxJZ`J<2f_@oOHs>qqzC1=H}LinL{UT6$~ zfGn>|H+|?DIBpBpBA+g(dskRjJbu>0oFR2(Qjf)vmJWN%^uXIK-+rRDT<(dY%K7*P z5VS-AR_m`72ZP9{sd_&XBVx0=dn$Lm+aPDHn9`b9mcD@E$$51N5ymE5w)qU=uXTQC zMm&q=J%%MVgaLdt=x-6~`CUn>?W(G^1RDq86NLUFQ!)}-Gn%ABR6c|cN$_+>=OXD* zE7W?MlGWiMe?uex16kpVLw|2)~xPrqF8-)z;55DR! zYm1m9e$00FY0i4TN-<+rVPgH}?^vZZLq(FGk9z~?*2#qs6w(0e0MPX62n5FIBDIdK zzym-N)crJt03xV2ZW7SEL8q-asJSb_$IOQ zKN_OIjDO4$Bnown_?^*Rl)j>)g%ZRKkOa0De*Q_tDrB;eeJR_BxOy`p-*9=;EOPle zgbr>kR+TV_$h4Fu!LVbK>pib?`T%%$x7GQQHidxbR@ypV?61GZ zNP^^=9LNznKz(#*{aNeD52^Z@Obai8cZoTEti|M!K1^J98{S&(#J*9u5EttF?{Q8- zv?h8zQez^LkCKjinc^w2{d27_PYczJxpr`0pFEE{ccI-F(jFwYyvScMFxDR~?K%H^ z!yWUeQ0#-;^TP8}15yL`1rgrfPWE($tfXDzS3+@RJQ8?Wtu@Hzn@Gg@)C?`%(kCd~ zS|KgXsBHYJj-Z6GP>mMx+Nny=NcchpF?zC}cYqKu8I=ke2|!xO1S_B;Y8 z-B^@j-YA4uXc;=SXXBd+o#d zFw!s7!Dzvk=6zjHv-yyy0oS_Pz)vA|g1bam+|r}tr>_dCw|@Va=A_k;BsVARx5I6t zGw3*CF&AdXG^`lvbb^WdEA<(DkR$%l z)9T9+^c`j*X9sZ2$fB^?%IJ>8?D1CJtadWF*QdI0SI$LwGo8v=#MhT4iKn$k5Z&LX zl{w_$la0q?OaOKHTsEdH9W?+ISuj%s!0V{`$P(Kg>0{+ZI81IAjo8?UGP~;9 zH?fLeIM0Ybq&R)Vv{~c0qUhXHaUDUhwjuhULcEB1l%+(QW4Xnw`(8j-)w<%eYpcAJ=UsakY1V z=@5Ga^^cQ_Wk=5mT76;3fWHU2wrbSPjwQQF+b?Jv_GS=?BFeP>5w0SG=uJhTcBH1U zt-Q&9JFRxl)Jd>5-m??)Y}lu@3kazQNKxfM-x<_5h_`Jyosms+ROKqj{p+PPI#M;A z{PCux6Tmm>$TaV}ba|~NMcG!{iZfs)-r)*dtFQ)*<2E{%5y&OnT4Spq^>0&zK7Q|D z`&kpokblQAw%R`!En?R`J{A>yw0VFhhd?lHjfNg9!KRXlB5($g0rDn)Ba-rym=#7R zX>qK9*z7@1Pgtcdyb@Q3&rKW65QBTcQvn0@PMy|xKlZ{;`Vg=-FSbIcP4>ePI%>m9 z`FI!Q_3AFy=}ZB!f-LaGPY8{_oaLTbLDO?wKJvQb+KYb2-`!ac;n_^%wUH(u2K~V@ z-2|=_+P$PAchQ;mksa6et|{emu6^CYn7gE~tWs7$Fo*`$+-mG0I5wCNKUGnexiE6w z7*2y%6r%WO(H+1PSo#(Cm+yaVeRyM!WYWFN)kZP!I7z@-d#s$I2(96*9pgx0$ zxu+x;D#0pr_qFPtTN&=dy!cw$^RHG+!6HrT?>~Wpg@XEPsdaw?H0W3n8Y6AD-3&9R zQk zsA}`6!7av9;ryZ{B{Sc8v5B&_WQgxp9L3)g>F2=bmd19J!ADl<$DSmD*w3K4zwg!^k1#CbC2ra{s&f{LA5+WnU=Kc*^rj1(DeP$-FKv)^ zM)kX^o^#iQpH@p#h?rx91g=@Iu|6s3C6Kx~1-%J8;-5q*m>*N^H@NtnTDKFBiR2i7 z6yU`O%h81nxSfFEOb&7AR5|SATWfgvMvISbd?~OXq=+a1+B{vxnnX03I2KV$MJqf{ z_%tM_Xw1PsLKl*b?O!~@J4#<3z7x@?m?l9H_TsU)&QKopvGm8G3QpT-BL8j?o@?V4h{n77=@H@1R$Ud zEdc)7JXj%|4cVUKMhRBO{j{bVG4+cOM@WC&QW!uvAYrX7wco73WA|e# zg1p8fsW%?e4HxpWRY=_P*hEV7m}J7`TPYGA2iVv@4i%&KzhI17p+Tn`Z zxN7crgRwz#+_5jJ-@T7{c9%UIPQ<-(nY-7>>Sb9jHLSRXeT@&<&O3JL6IfZ4Ts3w-$=VAjH`Ltw;Y=vJrwF6&u3Z zpvAjDAb7aRDAHNWlsmsccCQjaFZw$`WcXkzU^DnqO9`y;T1lm+3lQ1c?K8o5Q9-C? z2!X4nihJBB7>;Q4+6I4$j?U*T32HSTAYW22HtEr{n?Wv^VL4 zT68ne>=9oi(klId$=k7 z3t<~ly3oOs6iUp!FY+&6gIE(0HdzQOQU3va?-{_QL4EU{IdfHG>gv5UZ~2gzFlgE$ zr5MMtSnvV5&qbvBiYI)r{*WdR1Ft4boYTBmZ)r=1KAiw4wn9cpa+{Q-M#8H{eBPO# zLdqZY@p~17G^k5v`5nCMCsX^pO5L7l8-fMeD0JJ9&#XjqeF`=@Ex{DYPc5n`1->9D z$kiC`*WA=;Q?ae{SKmj)F~`O6WK{c0jTR$`<*0n*@ReniWnJBrHZACL+~a>FDr|2=--C& zXIZ+Y7`7*`rzm|Jk{l0B*&KF*{c5xkf}|zT60?i>{bds|UUE0?WA)@3atpK*96guI zX#@5Aq^TrykXN1borE1N3El5z<%b^!hMJys@Dj7s8^4eGa2JiWPQzUcSnqu9?S=|I zPB7-;FJ>TqFBpG!sL+%7d3pr8Dk9Xoqsv&=VwY{M5hH=sr^+!d)mrr{oX*xh;K{tzi-?J+clAt+!esuZl` zl6VZOVfvdk>_em@jkZM!h@xm|TSVpRxyqu;FASFF5c}O;Njabcb@F(rjS=~4<5r*t ztV4-Tk|FVySkDLFti2PG+2_Dd0cG@P%R61D(KqPWYmYk3agNI;M+&Q@>X`h`{brK; zmQV#pPa%R=#7M;;<}Y&{f<+)o8<+_9xlVmEK8>ntrR+f)it~L?40L1uMT)+=B;QT9 zMo8fONDYkuqfZz%o>_bP?O2nJ$k08fH3_mWkHQXLK=LJB3`Ej=W__%@TP7$g%&7FBY;Z>smCA7^+FSY0Hng_z z)+0tVmZVaN?ZB0iKBnvbrFE!<%Ns57^dLW11gX*V5ij-8{NCzaT*&3S{*TFJ-+leT z3osv@d3@#*R4E$Cyox|oHz*SJ|SM4Vt+rJ_BH zXGtQJKXd?pFn)w`s&&J=N&Qtt4-_&8^`quA%1Ka zt@h5Or00rUK`Ab#r;{PFWGGBnRtHs_rB&TwKKxUWGtxd(VdGI2jy0*9Z zF%!Y2p)vnY3Mn2%)tqzGRLoI<)-`hf&G|(p?C+)B%(ftCP!JX6EG7$Kb0DYeDfa*G>RR=Fb%H)DQIzY9YPp7J`Q^9p^q z(A?ESnNHH{{Qfmyz6-<1a`Hsj)ph(0-H|mmdm-j$4Pu9SRVgK*ypev1)g0&wDT>y^;;-uh&^J`89u@@F0%Pr^If>bKDGRsdpX<0T3>ct*AacTxB}t|D4~VCNwKE3#H78npG=HvpT`q<=Yz13<1}P{o4a6bY9GeQOJA< zm#IZ^IjiQGPjHHRsH;>6E6An925VT6>c&SqiK6-HzPUm?m8h0s8$LOVQHX%a3Bz59 zBewx7QL~70WUCOZcA&iPXc%&XF+{1yGRr_=MSd~GMR3Okw-@W;A0+*6E{hhi%#mR6 zP}x`C4m$aTawjb1dP-;p*tIxOEs$glMmm|MwU3mu9YAzz8Rz(Y9c6F#_ZRfzBM{gZ zSq{U%bxD}ipi;Ur=C{ z{OlnHM8d#v$_WCThy+1%r(5x3gF6fufV=|gY{WITH%X4XJ9~^_`op&i>i}JmvWUe5 zU;Pun;0T3>ztsf=)JtSOb%Z!8&yZ-qga9poUU4|%{xDZMHqG2J^#r0Ny|Ff*n;si2 zP?eyJaUf<8vwEhe-jEr&E(A=L*mAUG@$L z>pko8TiBc(dPtV)Lf>huzZ42Ypcch3k@3W&cKtiVS078Ih&P9>tmGD6y{O{|pdvF# z?8;}e@9#Xn)gjFqb`hJVg?l!(tt)K;73ORtlFXy z2%_Uu`@?gTZ`=#11N`;Ak->Zm3eStiTGxOP1nu3~F?2&6JNm&kD z6&8+x9eoC(euU{MQynWK>y(t?8y>#eLLr@-8BNcg%!*;t*x#bH?KajGnQ;WqW=B4> z#VtVhSF=9+;A@F7wMtybyH`pK^4|01N2x&39L_Y+4jS2>N@{#1p{3P`< zfnNSDDko!*@0Xz9?%r=n+h8E&Fkx%E!s?<#vA*2xF5(2VXY|aST^FnJtg&*EAyR)$i+PdNAlU7WtZtuU=EEa@z($drb)#Sh>h>S#erNO{RI55%)#)} zNt^k(`OSkn;&vI5xP1NRn_#HTW}t3>sgYKJZ>?`L*mn+b7(}q9`t1tn_lJ0Dni528 z7$R(`EqD-TcLB>Z2IaXyZi~-XuL06|mW7ET8SCd=X#*Oz3S~r}Gf6M`A=Tm%%{KfF zL+!j5Px2C{@9s^D8g9!2$^teBM=v90(b-tQqjpr{?%5iAMgwOUQk<|w`=j2n3f@P{ z0QANa$yP;mnNkFJnrfXuR8PFdn;y2Sf8A>?`lYtSFNxy%69%&IzVUtlmpH4C2A*EJ zB7iUAJ{y$l>KA(Sn?Okrh@&k{oXZHRpS2T4GTJ9xNBg0!SpdBEamEFvc8X+I?wSW; zaS$F?cZEWT2Ti#T?;fn6o|?CZ=QZ5c#bGlQQM^}|)5YsH-AXsqzBKS84EDEJ-Nd+5 za^r=PiZRy~Hb1a-eVK=p(iQX0+wa|>A5wx6wxrGm&sw2byI>3dMvY(&5ba8DEL)G>zA8lcGn{@M8kr;h*zRN7s_B@k$IMVJ}}LVfdWGQ~2_Pc`+hfiW10eD4#g*v+gB zuc3Jv%zSvu$B+r#K;sJh7?gFFH&Yn!5_Bw8g>No&(B{05Oc?{yyejGNC_74-_;2z%DMmvDSEacTq^ia_sEnA3-VDnKP+4AO;6aH)nxJaHIm{e#Pev85JDd zsIB*Urp}f4pBTxEh4r1Q&MQC=&;M1<=?7+6SsT8C7tD`x4vnU1(uk>7+jd`wC_Fyx z)e8)%SSgU6Xgi;SSw(cXW-6RPh1WetRsHgFsNFFf+|aHJz~&j`{0IP-=_dV_0jDAT zp#5n5&c#z(Hy!qh3WBDHmuUBZ5w}Er)}~!A09$bYyEA*=Xt6!|lD@6q5q6Dce{}zt zuc7Sexa%e-Xd_$OtM$&-2FD=fD*f$K31w?w8TYV@T-i|?Mv^9diWZV@W6?7lr?y|X zR2gltw3**if5i@PbI*R#q_mK9prcz9Yku%JGEcc>2qF8EQeb%V zCF>ZlzF9^#siz?@46E3Cm^)_#PUFDLrIidxf?Va%*Tp@o--d+}SuX<&vdy?$NPznn z_;aN~DoY9|O#HM=!B1I6ZY!J-wpFVh-?JLn+^+xuVa?fNAo_undtAj_d$&qsb8_w- zS={IcuxcJ(E5stA@@kWQi~Xt!i)!Q&lxY&DvhmI`7?5GY^C%U11v zOnP$MQU?=x1)_sFtz!tl`Ie5`bd7D0=mF)kKcPpa zbOp+kn%pqwM7b4>_Wk%Q*{I?o14i*)owr7M!SL^7TiwK`m~dikJHm1n@Zv$u>Z5s5k@nS_APZv=mFp6&Iku~eb3 zF;(s~<{8%mB}qwH&bm_nKb4c728e@odM>i4aTN$&72Nu}n*}|8t$nH`yCtLJn)PcO zzyqnN(s;VJW-pNtLFz;Dz?Wn?R}=Aw-d?#>i_Q&vsqCE+GEwOwONAMI?3v`z;;?_%NlYycsPc?a}RPdXZ2r>w)P#)A?LnN6>;Rz70iY^Kc_34DE++!+suq>8^NES z1wjt0=dw>9wG9B|~~oT-cLQXh7oI?GyfLVLt`xtk5Q z-&NpB^5@DqRgXjH%{`3yTXFc<14mE8t6hIPad;zuVk`ni$wB&RR6I(nhjFC^@5&uV zqcyac1|6Q2XphLjcWF zngh@kvF(~-;bh+KLn48YaO0B z+WMpxi@u5SnhO1D7AroX7Ab@t+@Am_x;cw?JprY+nY-Av{q@XEJ&Nze;9w_P)!#b7 zDZsWn`FmSYG7_|yWJS?iQ;4G>T{qxSuRBOu5-_1&$K;LJ1)RHrnR*&4)>mMfj>`v4 zI~Zi~-(-xwG66mUa8}WOxA<1J%pr&39JPB_+OEq+C{!Y@fSmcD5ByJ;JQ%xxh2Y|r zbx?pBkb>1I=rrVnHu%{aGb8^S{rV zS0P;BacaZe`9cwS4b(mI(z9k3({s`Y38)u3ja;S9Y&zm>rm#KR_SS91V4nG%U-?l#Q{cXr)= z`TPn6S)$@GDD?**418EBmegT4^)6+?7yquRKC{Y5w?ZJtxgZp@kj@aJ3us$%-lB7& z6{d;Hmq+ed^f>c$E52ySTaj^fzpSPv9l^J*q&&=rG05No%eWC4Oo`L&_0Wr(5Fl1qkefW^=%) zfe!33yWV}W7%Ph;kfM_9?DD|DMY&)F9Z&w^Va6NQs|Ar3k~xQEOkX~G(7t11cK?@j zvi@VouICeM-ziz1E~X@`p~`5gCK`pZ4(w|iMAuUL zqvQxyiG57Uva^6hF76{~kj&~v6R?a>*2341c^MiMNN~PMp3Nd7ex3Monn8mf$N`IZ z?F{JiSh#@4ro2&^m~8vKFxkt(sm924xEZ#UKDftsjiM$zh|BTiOJGBQ}pT_1s((-XrBD~D6v}(n}X|LQ1 z!K1=+wb4#K1R?>H*`(UlDd$S0)y^UaQYC?xPqEeE0;g z9pWP#Hj{K-Z3aQ!uaGVTP8t_wV~AyDmlR)o^9P+)Y*s-$sS0W{FdW-Uw$eSb)G7C3 z;h#*EeZ7PtGxus{XzFrUPVrhpu;VmGtQoMx*Q}_?5Ic0t39P^Dg-HK6v326hU*u%s z&$F`qY6a+H48Tf;PoY*UcBW3DF>SqPmT6qsi}UGtU_F1Tu@Ibdle z5>|Cgwv`nq%ukz=7yv&*K{jl(gq9rA8DH4&dW#A#oym#%t(AiAr6 z>{^trv?Z7sGyZTKeX7c(K3;n18ahcQ1oOBAQ6#m}7qP{{UcZD7c!mbW`9d-5`uK4k~p zj>YHHQP6#iJ?H1TbdW#xNVYZQ^aI8al_5yT?PTpHSZPKr&k2hwb1fz$!c;^Rqqrm z`SYqf`sqj*&zt3+!|MKQg+4T4X}`3C(e`ZzPr$%TYzEC2 zyRySMJ|<}cd_l)k40o;W7QH8{Uv^V`yqcGPOOBgLu6MIP5t{-YaoR!5B(Dv0rMt+l z-XXU{UPgSRkLtJWrhTQA!*1Prx(GUjHHKIXj3e2Q#_Ma0zi!~l9$t@zB2y-(33#wJ z-c9t@=Q0o2?r#@s5GNs zs2O`&Wh9hCf(G|!cY-5Uk>GNWCS*ecio^4nXwoV&<2g(Lm>06vQbgM_c~!d(dhM}F zmh@Ib)_2gMF#6iC+r)%DHX^=xEu1~#h!Bc22<0DpAm5wr;|MAsMi8Y~X)9N>(0$ie zFE<_fwsX71?VOoP+Fy_RD3)3-_nk5v_T_n8xz%He4aZFA!l5{CO=L5wc3XW4oLh@J zBB@}J1pouk`d$ndIS)M&6Q((bUF+eJYKW$rXK%k1-0!bBD!uJ`%%Q4zc))qLBc#e>ptbl#eN7Zo~w=08CAh*LyU^ zn@9$&+S#0H?lv?6*07wfFE~KtU114%4F+1mWM;D`TsayxGmrnf^1pUBkOJmVu$^L` zI#6jjKa8N$e%b|xgAJyfm1TUOIfU_^>(S{89|;o11QhHG(BSu{?khbPx$d-sTS~i1 zAi-v~Udv8xqm{IE$UlQ_AezgxZPl?2N7>~39A&T7PRFG}t~IR%l8FOZcgIh9Yeizg zeg@o24_I%6=2B2S=H4a%nLxnn?ZzVH(O8y$ifNJ81MOBv>sm8a#wZ>?IL33Bp-$i` zg?H1qWV{5x(?UC#~M;dm>(t)=riMW9xlt?HmHY-Q=QJo)$63Z^EHx!#6*BZ`OmVT z6sT;-%PpB!ragL4vR@5Umk%G&XjJ?@LD*C{Y@rOgb+;<3Fb4dnQcV@Mx^ zui-ertSyG}*kI`dFu>dUHDV)?5_f(YK1PH92<`$w^&twAe`)>$fC1nQzAic1O42c6=b2ggAmx<20(L?j zS!HnM3C@t#W>uvg>$NN|ZZyQ?78&Ep#uwm;3e^Diwcc&198^(wG0&zshG zrBUgfN#nDP3X?~WZhU^9u0`3Bzs$#03`viQeGtD*X064UJO42ce4ioYUb!%NtPz2V z!9=;)ijtB4Qz;Po)cPvGx8-E66Ui1ItK;`l85Z7bjx>uR^z09)ZZW)4k0%dgk8r}q ztzGw_DN`D9Iyl?e~ zr1$Xgk(29c3#rlwdp+nh<*wn|6LJe|t`L}xyraitojU|wCe11;r-@D#mISj5meK|d zju~?@>x4pp`(3VQ;caR_Ig1V#gtjw^Blgy+U5oeWoVwP7$I@}4im5J2+vhVVnRC+% z#P5mM=IIXgt(YTlWyH!Sm*#k98f%T(hv~08!>thNx9lXXF#nz2!+iAehQ8z$91D@N z`fTx7Vy{p9Inpuzh&i}VzgLs02RH5k3Q$ z?4m565W#B{|6Z0;!d`ul*1tEo>elH^PQL%}b%Js711qh6^#c+yByOmS?b2sThOPQi z21>0Jx!zDx!g`;4?W9kZu>G6TtOk{QE?r-HRIg%`l@hv1RP@my#IwBO)SbP`DVR}=;p z<4eU}i&R=w1|lm4@Nb&<;?XrHm_N;XQdx{IWoo)`5vdD18Zthe0Tcg`wAo<=cTgCG z-`;+seo726$13leXu9=AV_)8{lb0rhKs?8I_=dO@QZ$e=DrddWCJffrBx5Q0_L!BAk_6U~&}=wZu{c+ztC_f9^XzQyU*MR(jefV>|I&0|gommd=gXogy7&m=m`<;RX8| zU^{Dr)k!Z_*{40m$YGblZhrNeW$ESTI*h)l0b-#K-CH__7M;`lBP__8hKS)0Sz?Z(f&G^$j4$7Pt zB#wm>2#esf_`pSmkp-F(uBYaY3qyn50KIiAD;U&$g5xj3Fyzg%Xo?8`QaCHfUcruM z;|vVp3a1iF7PE=p?8gMH*4N?5*)u1BJA;bIlB~{Yifoy7*G{*H(t@0{0VooRG=1`h z-^7lv(pDK)N9}~?n^GtH09%E03GmC3c)^7-7 zU{K_Cn(mg{kg74mq!(UCAXwFPcWOmjK^w zwF48_2X~nvqZM85O|Hx`|6g z>dyJ*e;@b)?kz^&8|~c&HdD_snj_$h`jpzrMZB<4c(j}5X4qqvte{qbkM-0mIJuT* zxfrX)?QHAnMJ>ECeD!8$Qud!+?>UD0&0hTlz^;;uB}kWF2-Nxd?08Uq<0Ax`j&0dN z88~==lCk-YZ{;eMn=lMBUIt!${iSer=m-|6T4Q%o)lhQn&=$>B+iHsjPPyW;7SKC~ z4qHQW$N`v&nol$gZr8y|z8_;pI0&MX8fhplC1jF`C+|dg4gMw1enY#AHh!3P_?Pte zZS+nCxv*JY*=b}pysPAYlSe(j`jg4^Och<5(Xlp-Z<`x4pgq7`rN;IY--XbMni=D; zSvCCHDp|UtQ@xBQGuFl+kFnlWhT$1AowXIlMbZJd&4n~Norg));+0Te}|B1d{`hzlSPibTVc! zEv!GIy}+R9&uySK>c^_1RvS|0ZLCQZltHOb2a{T&rq*aG+1i1d8#M)%^30|9-G>iY z-+#n`Vkr)M7Et0m68?}AUVqAanzqY0rm`c*o3L_!gaX1R66eRS%mszYQ^|z$JCT}Q zypz2tvy0lV21ej!-D0;7JZS-TxErsPF#+Y>E-;J=78JwrMVWJk4Hv0}Bp>bY5p=iG zkRzp`<0tpBru>1`xz$j}p2f2V6!gowdAfS?Zz2vIp}^n}meb5n&m>Yy zsuSE5x&$w|w&k9(Fvnc~TU}Xvx2yp*npg^-T_Hp7p??r%-DEdw^HgTLA}!$0cFgl} z%hik?Bo>C|y!`Oz739WNMu zs{KmcOfa|0ln&~?XUb>LiA2Str0NY(d?smhES^tDBC6&vhev?BTZf|B>azSVX-VnW zYINDa@AzOP#zUU+iUT5DeYtw}( zQ;t1GORGxzh}x!c6nrHmut^qmCtjgp^=TjZUwqZvGk{C_ z1oYM2{Ayi*$+jiX=BR3x@`gBDoECudfPH1-oG!N%wGNECbUAsv03et@Lr)U75kKjy zNAF(kbK+fo1$pC|{t+A48?GX`4SZXBenGD>Yp&8=L<`Nuu%1Yf+xoYMJ^es3Gdi1M z9Wb8Dl;UnNGAt5Sr-oMZL{%t!W{}+z#MC~40Db*26K_8hYymLMFcx30=V0HrrcLhN z5gz=FQi}$_qwF!YFX2NtMnQ){Bw9>QFfA)&^oQEBjvgQMaj<*9%M9H($) z#;duv(X3o0V4}<%5`88R!A`}KfTl;ZFbQghtt3ZH7Ip38WN0JCR-AuWqYW`ceZEGssBz^MdhuYkN%)~@h~Wdoe?-{h>&HPFj6 z;$oB&9*^JqiX6MZEO5J&_t}v{3^`uqWOtfKer8AT-q3|j?5H(H&h-9CN8H74DHqsW zKZhr%1Fo;9^p|`i)XL#o>u#t|qe^ucI~ARWu`3^Djq9;isN6|D`HfOHciMD)`)Px_ zZ-}_{5)W+d5GVD>cNUiE8H=+HBx+U`_dTFsp&*P%r)2g7GQ2~Wa9b_Wn2En(M2cf? zwsn>TGNS#|ZVXKCnynxcACb0QM;e~VJcNF3em@%)y&aJ<&GFKZzHcdG4m~&OXsbfA z(*-D;*UZVSB#%`u@HH}h8}3Cez9yd7@H)HyO_K%F{b)nD_4Qf-Cnanz8TxbB3%bvF zWfSzRW>2e}5z1hYiGnhS@_C0Pltc!Zxo^Mr$Kvyn#EWg|bCyx1O1i zGO6Vt4O{c~khRq>*3HcQ6i5_YDB|1JRH)P4$S5w{`(j`lrcB!yqFNM`=)i{f52%8F z){1FR%?`|DhZZhqhxjveY_V6zZTz{-%|`B&BHPU%rLP5X_zz8&fOn9LE36IgjS`Dq zP`VM?(3^sWAU8s*NDac*^TB&vW4}iAYb~K6IsOI@P;bxcohzYSjmCCKG~`4b={5Yz zmM;2`a~y7>`e98XF)~}(KG!4P@|0y=pjbUt^1y#8V=c~lqziE7^x z`!uU}UR_6aTj#lUDM=spxjAJ2U%3k+yrU%a<-Y!Yqa_Ho(nD2ckdIa83G=sVH_K~iS7bxjCflQ!MZp!C1Pb*^~BbH$|wM+x1daER;)8UO9# zyOG+s*i)|ZtLYg;l*}hO!Vi#ptYB|zaAfWaNz=EX2iyqb)4%H<7qVJi@0bsxVX;OD zZgDkQB5y}?WumCqGI%|>#6xESByeQ<1`&MMRZFUvjIA=2rmn$ccz3#W34$wkTUXy5 zN<`)T4Lej2^Kc#-_z+8kMW+!Kfdh)Hw(jtR<^+8HYm81v$2Ev+S+Q^jS)hISHm5SP zMaY~+xj9E3O3*6q^B=Bj!UDVyAEvy}l7mwgaraF=(#W}^@CT2M?P(y1hJqA1@)0&O z>LQe$49v7P<}s~Og6kYAW(#|}b1ovbhBHYJZW$zBz!_ApG#Zv9Q(33HyUhlhX6e%B zmV3av|K1qcgQUmI4NZ|~mTdM+bXI5@hGzP>EUl`AMm@+yS(&d^0llHFlH0CsJDvpj z@`iFh6Q(qo<6x}S`ay-yg-j0py`M_9wYi6@w~Ty_sIZd1%7`+vWEOIv5X-bsRp03E zj|V(cJiLin9*N;s;~p$YuEquY8e5pAEU2aG1UFXLqmBQebU%X)|Jaq=G#b9~pKGFz z+3eLcZj~mK7N1q-GYPa-AB)s)Lihba#l3P7>9b0imB#CvhC3RcUXrz$^3hSh@6Ar{_zK^bcp=m;C*j$GC82 z9xZdJ_DHz|*@AK;k?ju8@*EZRQZ>I92|f#1Yt`uox{cQ>MN48|El0F+okLBk?k%UQ zqIwkQZCs$ZJYbjvm}+SR7UGGgxwXg&flYbF@L_8{Eju?c4&;@ZdB^;*?g1u2UsC7}HelKWiHaz^Z^&0m@>O?ZxPhnxq{;z-w1+JP;v zylmzJ0^(Y>t(LBw?H>)5z^4n0njD{qm@F(wBM9~J89eJ?d6q0{9@xV1?rY-1J3*2& zb)XcW&0T8Mwh)q}p}c~yGprmbdZBK@S+1EaNoWq@b;5jjCrXnWELOA)MjlWy=rNCu zBx$+SF;Ye!!#s;k0|uO6U1voY*nhnY2&hKY19MpK%V!Ff?{V_o zsHSuzY?bHG)(XK}_j7cE@*>uk#Ixn5s0~VD-GZ~hGIhADMPG`~+Z+3iR(>HSByy-I}R1MKA~4~hq$c%BqlYN2YQ z@9-9Ada3y%!Pg>uQfvk88LR31)#6&Ix3IiB*zU+jlcdMYGaPAJ^1^2~XNpY>m?l8K zxNXi1mTj=g?Y>IV4%+Chq&T|$L5~R&dm=(CVy~jIt_Y7N*1I;wYoBa1ca$lNp`K`8 zYeRl0W4e%ZDuib}9GD~qOTyUb0`!rgBGsJd`L&9~MnXnviDtqZD#DxQI4>C9Zz4h8 z#(-URvD^lZ9;thE`J(pOQM!EO0KC0GSgL9By}ftVbF2Nvr*%C^HBI`+>SW+=?bnAh#D=g1Yls9T4?N|*S1x;D-_NIB(?++wT8y6BCHqlXiGHm1!^oGEJq9=cCg*VsmacFZyn@V$gB+U%a2*W;|sAQYZ;00;4 z9giwlZOTB^p|CvwbICF2`DpAXdGC1N>Egv8oCpwX{=O#+9~uo=jW;h?hCM0XGP2co z7(zNN7E5o71s^z=pV0!eq9n-PT9TPTz#v(xx#sQZGMH1mgZcK;tAbQ*|CYJ5J9Ik^ z>h~OlsQGR6^2E^|F;8X&eMT^;F7z|kJ}g2hXQXdLs^9-Re9aSzivZ!~ur)ldFtRuG zLz3Y2#6(AYwnUCiRgH5}Pb-SBGNv9a6HF%w?~?hnZFv9~{|a|Eg_hGtag>oG!RlZI zD2BtGbF<=i9X^aCOWNSuj9XGgvr{UY($~9xy8}lBGV_e)wqvDCDE}Ep3a|W^v$d9H zml{#jTqnz%_hP&n_J;9SP5LD%wFVuuI^vNc+at>bZQ*Vgp|^>TmR!aF&s+vRH9Dk`81x*%EdCFy0yj9S`~MM}RGiQ95hn z?z2ew+ymaEEMMn`M2h-^L9OLEzp!~>ouOqhD&*-`qkl&x%6ua&0Cm{CC~~U3hN()V zRWT(!I_F$Eo~(LDb&ur5Lesj%iMKgP*t<9?_ou6YnMDF|S_l`0JB;qqX^vlT2h#xqQs7TxB zZL#6Lr<0VUHuKQ^Av2MjY09&btp4C%CNiW;K5EtysN3BBI*AI(bCaePH!$b79x+AT zlt?>o(SetA??`3K9KBUVSCV4i!08_tSIQ{$O6)BRB>8<`plmF-@s-;uE!+8d+oE5K z&t)B}DIymKS)wWe8(F6#KlP>SW~uHg4)p4L(5@P%jiEih+|o9ER3%?eYS<3cGuArg z{d6TIck!JxH=8!9h=O%_9O*`EzbBSKN}j+q2OQH3CJs?zG3}W0kl{ zkIg34-s9WZF=tL8Qam?P*j)SsqFgt;;oVY|*VxifUH)sP4%9lBJZ-qmvm|D+eRB>5 zn6sxcmTB|=kwpPml>Wn0(2zw0A!NukqiG4fcAyw!$;-H@S2z%*;5rBPYhO6EU=!i& zE(+x|>vE1PUp9*HES-gtfV|&=sC`a<;i9M zjjrqeTF*0T$C4AR);cG}+Pgr&_WKpL+mEv#tgc@@lyfsSDNP*kb8jFSk&cM25(%v; ziKPmWm^35H8xp-3Gu0Vm2JoAWpHEGO+WRI<1_chI0X4MlXri6;DN)p5vIyq8btR`E zZ6J#|ScJEo4%WmL1R59qAS(N0Q>J50dhFEF6>8VvUFHy|bjJ{l$@#vaL zDnri3y*<8cujGfM{^Ph8%Mnoj>pfbPtXbNT_{gNtvy)g)hX zO76y)% zXQ+_qW~A`js6@UQ?D=}qHZ)?UWt60j#*r-2f%m|@1sEQe@^O)H_T0D@$yoC*PH?t* zyg(+>1Og${cO6-=gaH)P+v4BEqw%kZ+l|C-p8bOnKt%w=@k^8NPrUfpg9eevL?2UZ zknz{cke>E-J+d`=nn!Uz08iqNPw#c+`PT*2{QD|@suzJzZ8bDgR2{RNhJHRfl~Ty{ zJi{EJeDTq_m?3&Wh_=sy#v#$l8jm_LJlJ1*naV$@Gc-cx`rp%hWP7?YqHzqN8K*P>*znkZLxFkH>o^1L-!3iOoJQa-k zM{pD;teB2b^&w@{BbME-Yz*An$41j%}*h&(K?5?{3__PA|e1ZZJ5zJ`Oh-knC8?$|A9=|8KiB zJ$kmdIky%cf&YEu&Mx2~RlHIYc;caW3Jy)eJtbd1>ad4GV3@xEJpds$$255V+L z(K`UFGSfh+AW*xERFaC`dLDN-E)~c?dq_SF zw_})-JivY*D4A4W1557jsp#ml7wLRJV0{i-dos;RFS*{7p(L(Y9XouzfL3yz7mAIO zUyJq4=T|enu_C1Q%Ly+@Y+6l)IgpjxZN9$=3vfj7q&n-P!x61Y@b9Mjmxl6pCMYt! zJP29N^%`I(mnpMB-m_7V9gBurtpk)w>YGI7%t+$mJ%+TL;T*21u!OS5JY^f>HDOts zqSp{ObPwcO)_Pk{M$iLT{N^0bkp8$?#$Sk)7dzjNQnJ3U&VNO`#&YdN8?_ig`MYl!4ILcwB5b0)X1Y z;$!pFkTbis>G@e;Js>*fzJsNkC~%uuVbcRP z48mcKcId~TAq}k~4vd)IMdMpg#~oSUu2JHgRk%zx!nMEz{7Rrz9b+}H)q#wg*Sr&b zcd~BpX@cM+1uIKw!FOR<%#`B$Lk2Ts=coxR#YV)KrO&<(M$nphYJ5-f7tHTnA2ZHE z*%&%;e+mb{c(Y%=X0ycdxlt1P`e=~Gw?IrAxvUJ4cY(f)l}(U5$=Lz69FU7-2>hhZ zAk#>3hjY3I65 z`pKSSV4d+y+SlN2wt8TdEcP}wo||OZ`9x*}G*<;muh zedzGax*+*$WQO>i)*GtI1y)k9X%af21`91hqN5bXa@AvjS?Ti(yxn@T?*AU1c#OA> zb~Y<(txv_S^R~#qaWX0axF~QOVny?emxr$219;_g?#Q)c)bEbVvQM)fk!sCxTSSswWcB&}^_g0vc9=2hC} z#w7c+p$`OAC-vW_jPty3wt(iQjc8cPU+enzlL8l>Yc&c)e}_^GR9 z!xr*-h*i4|bc})#{_l)H>NxRl&_}6HXl>Utx(a_NX^|`hSdIHBi=x(!BGEA4IGCls zxIC16ZK4qxeuh(N3|Xjb!ieWwJjmlMaT_g`J^1?}Wx#MP{IHj`d*G@BPHch6DZ?DF zutjBJ%QAfq(wjg0wsAIXEouj zR0_U+XC~2khz7V44P>c6z~>KVrP*z)@!(nbWx}0gg1#itjPkV>uUieCjVdpTTy zCV2>YoDz+P&Dj*jXj6|*K-ja+G5NPj%+DXg?Yw;~WgR^TwyHFFWG+e8M@Ka|o}*o( zg^f-v!o9A|^U0YJTjyX(6^59Vqd92v` zZyncaF2b2s+XZ?Si-t;;5gVuH)b?@OW5xYrz+I}2>n2C5EnzZg zQ8tQstNjOPxL$v+5)6<{NTpysvyUcrOXKf6eqIc9_=y})v&8v0%>Z9w-uisrmHK0d z0ra)O4-_rP4a(_RQ3|(+??9P@tS4}@FT|HoXAq`?`h;`?(^^2DmM|*<76uU0L-X!c zVc^@@gaVRFeB5FDBxYXCmb0{#0Q&5F{B4{mv&0T_B2gk5shWP#$n_$+&o17ur$arD zK3P;|^IzQ;WoVbU&F$LKLqsI|G~>(**fOlmj_iVPhJG~3{JSSfyk~1)U1{y&QwwZj z9zt7Nj7g2bvJi-Q<|i^vxDnQe9NIq2cx8}>q__AO~ zb4qOyi;nFOQ`J~5oxbB>}*{eDJ(8WsXVfyp!QNwuI%s#l(uBHk@auzzEz$MTI> zk0g1eUdac^y7QACw=TYkkWVff@Czs(Q2NlDvsrqMJlkL;JNxe8Pt@EmN)D8Qz}gsi z#Byka`&v|F_-qwncm?C!&6!P=VNA;#F8S0#W=-1IKmy9jbKEK6ritm|zk9btE4Z~L zje{0r*2phbLQEOC+e zxFQ`5TWt_K^zI}UNxyLR@jl_kI^C#_41?mUzz_K+NMJ}~=TUf%8SJ-ELLZ!`^rK4d zkIKE;d5!?si)DM|_5IH2cMT4p#Zs+KC0I-C<~p|>#0r;vqHXC}P@e>}anf2vP_r-Q zl&UB_9B;KDLlg2)tz97MWl=^7BehQ^P5InX{RW&e3($xf;#y!cx5ySet+WOcv2fr2 zZ6w3rS^@7Pl42x0cE3d2)CC9&@Dfs6pA&hR^2nf!CY{CR4XME3YCNG;quIsoZz?~m zXw(i{(q}+Mh*dGk!Iu@;`CiW}MJJ%MaQ(Q&PKqkXiS#5=k7^X9Xn9u^3)hni!|e1C zS`%hKGg|7@uX_0XD``K>a>GfFNh!k2(2J;;X{-WamKkk!j^rLRfIs%`fLZFHn8%p^ zqGiW1P?~V4vQcUSc$Q}o{K-9zq!kVcGt^6VpauY<69T_6Q9LE|b9RC+)$(=e^4O)$ zi3(ecR=xdvPWlI{P{;3f*EPSuqe&VrlixHzUtxsEVKNyBrZU@(|9S}TJ3qoBXM046;hf4iNokjxg-%vEaiAOLOZoRBX+#vn1_WWgHOKFSUJ@2COM8&#gb zs*F$B0&UGmFfLgI66QyH6R6IysXBjBwrIzx1C;fsu@h zxzXby6}ljeil8_c=x=Ua&tmI`{(B%R6s->Teizg6%93g7Z1;A583lUlIK=o+g+h8B zrUZ;Vu(u9pt0)h0%|=aD27~d~>uYWK0W|YZwZ3sYowQ5F1}izV=nvHJ^#>mDccMuF zQK2(BmXbSr!@siax6{=b`iD^3N<8n}mwW^I76I21bZ_hYfDkWp@4fITT;@3L*SKRB2OU8e&L*dB~3g^HDx7JE2Z5HINSS~`mZSgx; zZ48-pgDc7F*Yeh_cvQCl-*S)S2P7?MlZ#*sOTEanZlgEdH$;-x-sZBOJ(5-p!Qh+_{LNU?5Mn5! z&P8+5e^%~-VgywbDro2-a*itiG*e4!Ah@@oN@UE{8}131bKoH6q|~asiihQt(MCK&X4ktF)<;RC$Tu|r`L{RPDQ3DyZI?I(&eHLNLdyD~%}BIcZU6MeN!DUd4$6&cna8&@+Da9E{z|VgKTS$|UPs4@#i|?Y4js+%E0fBsVAup~7K@{`kaA1?T%$hjg{i5dd-z-_-Q+W(H zHHhe4csqa~wm#xI(4dPdTj|5}%tlvVnuwP5)y=iY2)eK5cs4+h85k+PzpEoYp)ZAL zwHR#fF^I$PrHe}Gh#E2$@uaaOsWIBK9hjf)Wmi+CM@$m%O~AJBa!~7z&OkP$c??bc zeqX?ZJ-E@B=+ui%H>9CF2SLd&?ZL8HMoZ=0V)}Jh3RWlQi;l9C?;Qu8#hz9vp81_O zLmUsht!F*)((^X%Uot}Ae7UfBjx<2fxfGPB^}If4Ts;)-M{7DHP{G3c=NUedTXwDG zNF=Eb+dl)C(v|!cwat+t9#okTe~T&2aYX2!)d5U?4p<;KlLZn8XQ5HL#H&Ubha7YU z+?-~-`rDnCR%64_Nqq-gtyzv4L6NE5I&KKY{>~V=jYXnz@?$->XM3>(-hgpNqv>oe zjatP^@`-|r)7O}XO9?{*+m&ib8`iPL-=+S!{lOv>V(BX~fL4EL;j-k9;wre+J#K3b zex@gn23>?nJWRuh#z)^v%2>N4IKBh%Sl1XlMf z>*n}=yQ;;88n$3zP5-;0?&gbVh5(x4B2PsgNQJPKF;1gz8Ww^quLEZ^(tOG!7&mVo z%Z{e&Ocq>gWTrip1S*IvzqWjy`@FobBCQMw4e zbt9!NcWu-qWs9%>T`cKm%mJLCz|cSHgB=?aMO4&D?-6^wxuw zzR`j+90}0@mLcC+ph`Zrfx{hSB5R(Kf3$u+%^uy!fbiEU590~PphY5z@gcv0p2@B; zxgT;$eseT4QNFQGdP5voSoM-C5) zA`o|DTa^hPPgQb{^DhXO2YEB^9Eoli8n?@z&Bhkz>=fc&G^+*Zde_d`E~^>hp{~=; z+aGS3qF+g)%4$-GKZ~Q@O{rsBb4ztax|Hc|o$37^^G6HJrb)ciVqqN+ghra92s)Jb zOLtq4swmzKcG?EuZ8Melf7Unz?T`2oEoH!T`bA}8R0Pv|?0X`8YOY;}TRoytu%aU} z0+0*5eo10QXKE8Lb)$@EU>OSow7Qx$u=!f(S^A~RpZ`4;@57~B^Bnk*8xusZRU zmLGmd_RNY6lX8Rjr@Ib8M96MQ9BJy;qno4zUj9Faek{TQwHM7Ti|-DmSsqv49nf>< z%%^%RG^V6mCx)+-Gy8da`3#IK`#`-dT10(?L&WBei&wkWvX#vaa&iU*KYDXrq}=8a zL`Lk3=}gh^C>|GLpCJj`K%&WGK7P80I=JoN(IDYw-qlJ)YcK6(!`3TQL>T+O4qPK0>1hrto}l%sf&R+7;p+jmdc*t$#h z)bygi*ilmz&2reWLMjNr)C|YEur~a7mk%E0e#+yMIf3Ak9#J4ePh-R6x**}OZjJ=Z z^N>cK_$nvJGH#awk3@ICNp+!u>w{}da~QpMEp2x1#ulKcJ3^ZDDpwKU%fMB3s3B+h z?1A8KCHX7gY_OZe_@_L0q|bx~+pSoXa*Bbn!$-~w27LlWHZg-j7rKXeyiMpMgxu2E zonRh6C`|pDcz7Hn;g7w0rS?H{OF{1-CEwR1pgGw8~nonx0Qk8OU9 znqQ4uas3)eblCVCN;a?TAr~Ct+E+hfcTa=DN2`@I-k?hsXA(7GsMB>@@2^#7v#Sbt zQJk|wA1dS_dRPI;L&Q9(>zITO;<`Bo;^TA3`Wp_Lpd8{s?FjZgE)(Za(X1FMa;tnB zC;FC`Vys|0w;Lv2NoFV};`+Sep+@1((&#ld%ScvEpYwF3hD+W+r@Dm_?TH6#gz-ps z@0oev8OOwWcPemh#n--8@q(Y18+$wEz|Y}H;mWew0N!2(d6bO5hGfWnN)}8N%CLC> zQLtNdUXmY5=6aZ|c$RMFc(~kM_23YsAZfH z)Eohsr8Q!t!HZ-msyqYvKftSl=c)AJB#C|*IBk?Zl}xR&WapXPZ3w`pp9U{Tazv)#4^YxJHngm-8@{0UT7f^Va;;=O7IX8k+7vmGj~Dso0Tw-Wl-Ae$|_l3s~ErG)5n9QpQ2 zgEKGyQk9-(Ot}<^!Wy7&b<2`RkFRwIbp01DG`Es4eEYY9;`)V({l;$CN&tI{<9Pm2 zX0N-`z9)-Res40BCgTjpVjYZoMF&a9L7cvGf{RPLxk1UzK;y-_ZZQmvli0Cl^t33( z?1BqkW%80CX&2G*BzN598a{@9SJDg=`@A%0>Q{fxVua5&Hfm^1P@_;*Um0l}`^-$W zEvRl9cJ^bby(u>mm;;cXr>3v3);TYXzFQGH}Lth zKZ9VQi%>$}*hwTZe4J!ZO=!oUc=so^F zM*`VIV*y+iWa0J<&81*QimP$~n+IX1Id}&RcTWi$C4U4b3_)D^5%&>Igu z%KfB3pfU>U`A#qoqoVSIcqidju~nlP>15I)%(6YoMgNH$rtFwADGnnKQ4U!$BPiUc zIk<~-T8dUYU*{f25oXGy8P?s4kFG7FGL*>M3!ea^)|8NpbQHlHmRr+7@7(?CL$w)9 zOHde#iswSc!Fw^l=6i5_)jcl+6v6c~+v~Jdmfe?ChO=0Odj$fF(fe8&Did0CYk(C6 zg%mi7TI8^vGwd(p$s(-lERsM-}&zW4>{kU% zUMJ$EV4*=AW>O)XFb2!exn3E?a_#NpO(kN1V3H?7%r(M9?Y-?_qlB2m+RAazg#-DH zK0w=WIsRBWKfk-wD&NrhM26%0akwJwRVfLBw37T?n3)8NSy2k^WwRmU$;6ub`FD%{ zApIPL9qtXF?bT*(dk`}okB*_McoIkC&0I<6 z^~Bzy+W|3S&C9|Og8TV&;y6eVZ1v1YlMx8$ysTBgZXl2;_r#KV-q29Q%#SI37}Eii z6F7PR_m&RH_(-N9bpfJbYS4ThrOq;(qzHQi=Znwis==%_rI0aUwa9{&DKAdS?8p*G zSL5zQ2Hsrn8VTWSdA)YDRBXH2o*QV~Oq_Cby0I4@d54k5(q}KnGeSnJ<`o@mxtVo< z3^%DedJ*|*Yv?s-^1|}*EUw}0JoBeq)V%dJeg?ZBJC+kd-c?P&Zp2$l4c@sVA}c=% zxQIg;kW6R29K!2mEfg(Vudox=VIh8am|IWw1JM&x#;|9zQ~9!y?vJFb+4dgzuwHd? z7-^}O3Cn>r{cyTVu9%*Dt$7Ev{(|VyeA1txJy%7gTcJB$2hgdAczu(7)mi;OSD{Nb zR={(XfY%x~EZ_E~D;4hyT|sr+(0@FE?rQrY)Lu%P3=U43E~VTswQ^gThV4?oY4PJ0 zEN*7jgz~2q+xKG5m0Fq{##G(6WJD%jBcl2N7(2P+dNXgGsCX7U{l-+j4~+yXQ#JY( z6DzXkNC&dv_on@yK7UhT>-=<0KB2mu@Mun!ozAY~L8H)4frnL~#xz7%$+UlvIio;U z!K~MEaTWL9xg0|%_!(ZBt_9#@r_+ZCTw~Ka>Aqb0j#na;%wF7Vkag#&@Ec#SR^;I{ z!`!azB+V5LLA&<)ze$>)_sGsrD0I>??A;hZ10&=F+$cA)sWou_NX{zNe(dU}fxfwo z7LW^133~uWwbTk8*6|&e5$N~9SkDMqN&U0`^KidKYFF+|9O$SmG$~1R!wde5&Qg__ z$M-cyh_VF>$N*DB6vecE1JQdaKgx|Rg_D&&|0gk_+a`W4n zAT962CAOFlu0p_t---j8=K3JTI74UM_Iql16reCq`g2)!lu*Bw_Ygn{ zVST1kAxo`2*AkU7wQ~ud<2BS=bqPFTuYjXi3+Vm*_B=vDTDE7Op|&mO-6y~yMj)}l zJmZsBKt!aVs8urV6_`AEFvdhJ&NRBe+*E{!zk(6!Haox)sOkb15ytjRgg??m>qhW^ z+l2E6Db#!GzZ~HvLr1sS*eTFP#4Z~MK5jdmfXIN*CiuuW&Re4)7t{()M7|p?=Cu#tk-sYJ5~gN zXib56(+<=_$6QsK$lt7^Wv$)Sy)rNn!9@T7uck2$`II1rT8R_JjRlsH*r3VGD$*B? zcrU{?9B;Rd4b+n%Do3C*z9vcAniog7o1Hl!d^6SEj-!>z(dudI(uBCVadl^r|CJ5| zh&%IP=nI;EHkA?C{zS9|KHmo2M~@1Y1-^FP}{DmU5jqzIx8$?n3A1KhPS-O6`yBY!k znQzfqmmi#UTqO~{+pAGM=l*qN4Y)pNzu*e6Ag&qVgCQFo9|Cmyr`(_U77@^{XhGJ! zrJVENF=&~#73);33@|&ok24+Qih8~<8AMX85L`#z*jzm3Ej#SCX}#vWhw<&@))pQ$ zB=IsT-onT17VD;2bN`@z7cCeO96j-Z ziOTN1c4n8{8K0J^6s&rYyzYkLOCsP$#{9Hq>6i-`dsJ|9Bm8f}dMi-WY4&MrvW8SG zw+=RD>8JO4u`u(2lYK6~I|psRe#2-W(hS|tl1Z~-L*8L*9tw$k1oaU~IL<)q$f_@^ z7?iH`r52Oj$tdS%eULu18^NJ5eet`9u^;CH%kc*283;`idNY>yVnsU#})^==nBZXK|EyJuHw&*s`RBxJkfj(Ly-pXYT=0chFTYR&RqaC&^D;Jp0y)s_09UD;;X zH@!xE0xdYBpb$8D@OmQx^L)dp=M@Cfe3~x`?bGll0Rf_{hILy*LfHk+gn|WIarm#{ z6}|8wxpV+%{w=o6AP{%&*miq;Ab@5L4>V2sl+>8@BM%Zp5Pg3KoO< zd@;b3R^kzbCI-_gmB6VH{xjZJgJ=jWJ5uV`gxT+YP@Q=3PJQk!Zf@&uAR2#Sj-Gbc zH!F7(DU?|R_c{Pg7F5~zNVmd)MX%@;e3%R%(}+psVi-mxC1l6BB4z$wb@CjX8d9GN4 z;B3349xeJNlw2KUi)q6KOvv)srYfL)1p?CT-p=c*67~icmhu{2Jg>P^Hl&GtCX9#u z(rJ#h%s!wTTMbCYaB)(UVykdju9KYiaZN^GcvQwxM()_DXHiO6QvCxjXY%63QS};n z8!b;i&6h-f_dlctM)iP#n5ZlfVHgLnkZg~BYJm@eI!iVfpPAgy49j5>S^Vg6a$hW| z$Ost&CZ4Wf2*nh2Fd@wPzGvhA%v=POxAa$(gdPhERc5*|GBff3>wL&Zx9j3KFzvDn zd@bB;1A|L4M;hGfxd|W2W}3)~b>b6Hl)c`|5gucfYXFWYiA^o8=)bwppx%wG%NWs> zB`1}d8I0IPrGOwPwncuqdiOvXY#O4qC_}KdFfjl5P0nybYV;2Lb>|3YpOG3`U(_?x z*SIJPgy3=3+#(fpfF|4L)jLjx=|iN25jz!^C{PdH|=Vl@PSz2YO4Sw4H#L_0VAVN@Xx_lrPC_2@7}GJ+)p_>+gz#jtI$$B_mYR0 z9LdQ=p$*n&D-!t}R;BTb{b@+P-yZa&dak1%e1u>UOp=x*6lnC$c+n5ZVvL>!-^M?V zO31nX)g{aGX}9X8vggpm4$OCyba_S%4l6wuS%vHHGVIwnsJ2&LJjm z0bmhNVTEFws-!WFB^Z;xC|l_erC-|{p5~>%3V$yPBL8BWlGn^JB4kmJR9!`#v~{L) zHx|jFh4ry|-=dH!btY^kwfHPhj=1eX4NAvBpa%?<*}T&XHNX~~CQ?#=(8dqT(sAalg)gp?A^zrIo|b(B{yJ1LWY(cG3G!lNq& zf!h`AE7X{{<1ICXnmyBD1&h!acCf=yuShgR+@mPrXbVf^sAd46{GLq>^bO>c2sIlv z(-V-<7sEZ`pj6yH1XLZ5@$a~vi!L14VzphNWS={yhw=?odwI;|DINf*!VL+Z@KvH1 ze0{5=_qT3gW}AKh7&N#NM#mF)BGYtiE^`%Cti7-f{Ua(0MrmHd3Tu9ipa74JRQFja zK`QpC2tRg%p%}HP>u&?~39ZcBX;5vqROietx8`)dRF+EWxp-t}x4rO-;T_RJI8q=~^1iY2z@*6!d zpU1HHpfNO7;hUiD<3JI)8Cj!NEsA@EZ5WQ$dwSH(T?ftWUGQ)B(^yCQU zx#%aA8)(s;Y{0E{W*XinYl8b_KjaCj*0mOnv(YGr=I9K4L#>mubkd_m;5`BPgwr{v{-CGUa^k9zj;VVHP^2gt4`L zdh9fZ{A;a|QFC4+&}s<|oUv?J=1@&m6ADn^TMUeGaxWJW@SSqTIP0CO;xg^`4J!%K zL|m9Dp5Aow8q76B=j^eeQuZGHKcyi1v!Zk099YvZ1Gy~~mhE#9D1WSLwOGXUflQ?q z3<{H*G=TM?tTDe`L+cnP5 zkOU(!QihrFzLrM9*VU%Id7BfyfwD?+ZjrLAhOG-gyzI2Di^kt+y2wHmXSys9XC!QXP~(n`8w(7m5|_1qFP+FN6|UCiN8d(# zJ83s2^TrKgL5=U&T_%%%xi(fJ6T{`o@c{a`O@%jcoDos6)8(~Hic&qIt?UHIP&C27w;p6Kl)|I?wL~PpieiEU_w7ur@nlQ?m}~9-=Ij=G0Kk499UPnS z-yVc!G|%%WChkdN=M&CS(`A<^)T0#0EvUZAxJ{3A;Fp5GX{$2W}CJMf!5?n$E8e${i(o717wZ7B5xZK9R{JtUk9-fEDmGq6eq< zxm_K$#0+ulmA-aAP>ikE7&J|^Fik>S7hjLGEb-V5D&haQ5uX_{AKj1b$9VtrXcru9 zx$?$q8^NDseZ9Y==4cdejP%BM8XJ^TzEqT+V46!5)M`{W8Of@^uGuXs17q{7{=hha>)^yXpfffUIi{T`N0%BUg)i zC+>ofU7UurH=^r#A-D|?pX%QHn>yTAKm#HuJUDRaH$9sBG6ZHGV^8wk!13&)9xB~A zd5+)g5-U$yd0uz_F#C2-R}4$EmS%O|mQ& zf)(U;Tt1&;A5X!&VK)R7Mj{Z|tRX&8(lo`F53fD0;2z6{E=XisSi?1iJ&?5L&5d&Lq$wv_fZfS~JWL5_I!9`)TVBt<3OFy?Mnr_sopdwr z_WPJ(ZL5W?&o{vcS>ZJskwcxi8=Z^Soh?}7#eXuuLwV|*dg%KcPD{l4roAH z(bNev9OT$Duu4?dQuy#baQ1A71M-H%(hEFdxRosMwu$yb5j-NxFbw~W%@}}wHbV3m z{I^!$PDNr7r*(T99b_QL0<0}G)1g@R^Y`|-ioPp#p%nE0GZ*#D*$^YTkmGS^_qThB z6u?AUUtjE3PN|3KNdxObV#l^*vJ$}--{DFu^7B#-n!@V+Pu?LjwIDnyi7GF5y8Uj; zA~3q|<{Ra8d}X0`^zME)>z4~Znkke+=()pA%zhK}g@)^D2>KQ?_^h{3f5bMLtM`l} z@-8s=oOTB+SeB{&QLOT90~9atfY(Ouazw-P^3)4qs+<=PMxOnZ`{V4nM)Fa*kPQr1P3LVBYD9oeduZRm>5_zwT2{biNF#%Nz*_yyCe($7#KwWgdJ~4T@y= zhhqn=6k92^F$`B&dg~t=rg#S zrRE8B4h>~vR!P07Q_(5KA7Yl(jEhZhkaQ5qNEar zoZ=4`P~Nah?BmFs6VCN{41fd;m5$5s5$mH!Y9-$Gr>|4c%B@)8VSpSa)Zmx6hu-fZ zp9S#^7as;?I%na=1=00mdnB)4CnKvBa-|zERO-QtHM>`#zD2pJrt_xypH(3O>oYxL zFOd6uS=^g)1lABxrdE+jdQ+t}Y6Q45e6e*MgBV01tX*5Bz}raTsYj;J<5pBi#Mg+J zjtBTF*T(>;W`6GLk?*5O`s|6G3&V^fUFc9HTrF9wkF1lVWiQS6+n;j$AACbdrMqtXKsa$bU-hp1FAIK?@VcmfMQ$q$9Us{*7 zDTBDYB_V_w3M@t%Y5H2D_=)lq3d5W^P(TCze<#k?n5$f1A!L=J{Emu8k(>0&gg zlmp<{8F#E=34p*?>GxHv)Bf8*HpWlJ$~3B}EWR4}ewn($+)qML^&R!YO)pEJ5$T5N zhVydHvl$KK%>p$_Wyjf1J(JLbyoyU0(TsST=n0rYH!yLFChHEuL0yCL=&D|ZR{Uaq z7y>T*r&|3dpmO`eYVr~_R!t~TgbqSp23Rj9F7id}ry40W0d?fe?&l)UVZey?mnbg? z)Dig#z|ZwgD%SX|EDsz{$lWyIX~j4^&pD0-!#o(3ARlxwf0dyZ?sTG2qd6_P z^iH36u3R2X(yo5+NAv3M_l7R1X#!!>x=OBi(5s@B4Al{IouKB#04HA17O>F!iLzQ8 z3WQMc?aRGnuBLq_4Cv%?;dWA2plYZ=q>#M{da~{vP=$<#gdm5~;(`@gsyDQx=U_H`q zZ&xGWj$TWSci@kHbY>j4(dR`E8oWHIBOpUxv~H{OoC<} zeLqgu)3xqZ&ee~pz(5eBzQu7=psO<7tO%P;Js(?IC(-@SoN4WK@o*Yr+jd2HqFj82 z5fXjT3~(yT8lV@hc{PFVLqP~s*!yEIkk0`&!y>EUqSQzgR4c+OKO$RDSZnE~^j0=u zQz)(#u1!HbZ}bTF^)?Ko>};ABf7}(dpc2<4`IM#CNpLR>V&*X8e>}`Uqrcw zydL3q6ZmzUb{tm|vYQL5sF8lT1(YoMAIh6Nx3t8dp}MfmXclKJy~>ZxEU1`%Sg(1` z#sGpR^5$1bTsY*xSGRHZ>pnx0DpC781Ygc}F&*RwI`-P3IEdSUz8=tp<<53>H=&an zZ0DnO1V^gLqkt&gahP?&Fy5O^$z7J z@W^4lhm3^@I*e6sk@#}bBex;*!25CDt61{7QgII&TN1x7V>#=$zC|(ju5|g;dy;-W z?DA;zSAr8Snaa1xeL53;~mCcGZ=jc9svVS1c5 zBb0K2z3nUAQrypciuc#745`#^YK_AVOF`{n{=C)YfC!vjUaPFUpZbqU?V)Lwk{9E@Tl!Q4Hy#D?9JjDdW3lz!ZZ)~+jTG8#xmZ~I zpXDnq8|^Y<2$LTphO&!8{qlzSvuWqIm;ygWALdHm#R*Y2i-IzCev#DL6_sMziI;H4 z(OyV3{&STWt?Wc1NI+eEU{kkyzYu#OlnkN*pk=*?b%PCNz#BOi>>9|CO|p{IH|2RX z-}DE;<0hf<(UNyaFT1V9GWWbi8lR7)N9ok!dCkkZ>xZa-drX3vdPYWBqJUzN1kIvA zDKr{DWny{yQNDNt$Gu&r6D&#PbRx>sSv3O3hC|(~D~ge}Y-bx8(}zdA&OwspBf5Rq zguKiKX-4KSU$S%#;LF)OE2cWD5ib(#+71O5qaCBm#RVvCzKcvIf$F&-6FJ)gXjQp= zf6>QOs^2SHK+cUq4yZ`mqiTMjSrjTJb@17tG6SzoVJ0^m)Z^$Xe5$ET7Vm@5sN zV~|)l*t|O~*WlZi+4q@m$&}@L2LRcc#cZI5=Bs70VHM63SUqt@r60u=FGBm!4I@(n zMv;_ax4m*}9JEI(dMGu-D25h{Bc9giI>c*(anE_fSYm9?foxox$_6U%*P5CJ3^u2S z494K6yR0blQt$apI)u|0ykrzsNa++Trvz(>guOri?c;~XIwAf`6G`%9%g2UXxI8(aEe!t24#cm4qnOX~bhrZO& zy4%nb>_1Ypc!!(szv8A`XbUE}9k=5~&2W0zvZqkQ?+OQ8f~VU*Y!!n((i=9nw5}3+ zmm0GJ_liY)#CZbIHr4>N+DX~LFy>4MG^KO0Im0Cbl+`WNR|94EVqSXl6R`*T8-d(v z)lo{Y&a2N-7Lq~%Q~o?*sViO9|BkitxZn)b z&LqlmVzcyQ#8SQ0K)iQT_BdmesmAt|ySsC{xg(RXoRKO4-64K~M(j-F(jcay%z>LG zq_5HkE;P2LKsl-)4Hz?dt+0ZELhmA5ufM$*I0lhekU z1)#J*U5X+fnZGf1W^%5uNLEm`z)0Hbl^L7PeXTkdH5wTouKg?o^`dgc(3gSdPnG^4gD7I0R+YFUP8CDE*WrKwDWGKp=?H$|F&1f1P`j zMkq?CfQ5y^W7QNVsW-!igc}R}HLo4gQM`#1v-nn#OBN&lvg@R})|R+y3t%adV9MuX zl73#63wT$=`*06ye}dbS6vNb*c2Fjs_p!$DlJ~Uf5%T}dw~X{Ul;FN*hfVB=gqD7s z^-k?USiqiHv1Ty@yCzwTgAUw>&Y1+3k-rzJ#XfBOc^C7-1ip0l?VE`%uDYsW^Irva zx%SRnN>;C!sPvO_`ZbHBGM9+NndO-?5Y_0{6}Go#IQ2q;Ntbx858O`q2Ng7VF9t4L z*FoIWT$)RCZr6kIJS{o|@(5a+AKA6NmY^*9p>L=Thg*;L8Eq!6O9MbRSzNu^+AxME ztCBqZiyuC`Zc)?!FLY|)okQw`;;c{`nSNwC<53@Q=zwGN9+GSNFKIyeFF*(6@!UO^ zMFK?sK_O%fx&zr9F=H045p}`~*YH$$Ygg=8c7I6+6o@M2J7cr`E?z$?kR!*;DOl2b z88N?At<00^Gz*qGy3z=#I5Bdq!o=Nquqo$Ak034{t-FS`c ztc_ma{Jd!HFAyXz#FuU7xGQq!#efZ>Z2eoS@(mr=Qo@JA*im44?@=c%&%kj}VSdQ} zD=4fif4p=4V7&Vx^G)0V533Jn%J}$nSl`pEiwa6~L>;Rt7F$9b-C-CqRRueN<8MK= zZ(%huFgMho=*M$ba);g4qO$6=X!D6^;Kd$NqfyBl$&>}-=y>_xhN3eT?!v##CuQC+ zgp5sm*UVCD7=iqk{#T6Ng8J?%xeG1DzjJvV22d+La+sGqMu`g>C62}grkog;HQ`Gw zceHkRUWdG7QZ!}AR=y-?$QM-MJOL5TjJ;>1^vgqAs*en%%{olDgLhKce2;z zNmD!(&To>`;lJ5})! zZ#HspC-|J2Nb9>W?On27%^I3x` zx*WO!fT!**Q0yr#-s-nZXev1+>(mf~qC-#1g~dt7yyN?#vTJo6V} zpopa9&e$W!$dP^KF%y{mF2Hl|KG1(DU$vNimx>_0Qd)KnJ%d{Z6N(_|-i~ND$08t#e`;nSF zV|y19OKu@^F15_w?Y!8{gJ_=jh5W4}rEXfx9WK?o#(ELyj49yBgprg-b?AVc3aXit z^P;zB&h&2F6IE6K;1L&cs_#Ksh}3iRXJxLu?Y_ZtltRZO!>SsXx&yT?I+SN)&<^n$ zXg&%aFT?+7MzQZzljCchO7pG&)v2I`9z?m){z~lM%%=)fH>mahs{;yRdSC?_Vs9~x!F%Ey*VM@2Ms0#IcMpi=t*ROsP zV|U-MoFZrk6GB0#f;P&xkgbZ1UbFf0x!2qPRBq<^y{Jhih;vqak*n0YU{Q3!ihYqTI z1hBi?MFyJb!i(s5^XjLcb<+t$(?jX>Pcg9?iKyhwBKN~g0Qjuihf;w}k%ER2Qfdpd z1ZSzi$Ht2kRexU@ry5rLCtIFv%wm^G#QJbzL)?^Tlr?QP%_UoV^w9-@#q4EwT16tOuFVIr5>@Fpd z+r=Mee*Z4{o24{(9Y_J>>}}OwjENz9jbUEbVB2iR#t%jBvh0wd!xf5MC0>!{&u+$l zSj7ZVqodgzpjVi`Ic%~G0ueF)-7}na|HM`=(0B*&742^T3-e146LSG;e99!btAyme zJjbFWDUDL!OsPyLP^PL&{jmWL#v=CI6HL@mv71+JtUfUw}bc6(khL;fl$>QkrrSSl#M^D-uN~Llor0 z*a;v0Wt+NM_m|^u2zbt$Hr2FO>F!$nnd~r4I~f)MKvD14YRg%--xnId`g(b?M5^&_ zV2s-7t|DZ4Yi%Fif#NT>iS{sSd;E9O)0qMua(xMqC#a#bb_FR&X5dV6_dGusOTj5k zdWQ026$lm-A4LZ3WRJGlKzl)h&F2K0umswdTI$oWRJ#_TwLP6~?-VH@*! zfTe(wfmX8+Eq}Q+a#fkAcoZW8-qMt~zT?4B_HhyQ5&|%;JA)u!ij6>UTJx@i#AWYr zLe%oE5XaP=m0=pZZNwsPCF^bV^u%~jktANqH9d@xfuOQJ5P_a^u7b)bbA$qx(LP_< z`1*W(TcI86X$ogo)QCFG5#wH?O!-D}`cK^qBFFjn-m{s9BQ)c0WU>mQ8*)_YN;QP- z-@AK zi>K`Ieew?wwEGdyP261tSyuX0-;gURLh7-F5vz{J`72?YA10+Vio#^I#oPuA3ye^r zIHb1we8R4iK#KneZA#W6d%@;2vBt_BD|TdM92*~n{S4n2-1RwiH$sX*$^LSD%;4Kh{D{dduf`ILySeU)DW#d zKOcK!_#cwf6(}|@;%-O|=IU42lieepH}FLLNQtA!W8f$YUu2?aFbI=VGHb8YLpIFn zxzwv-{M{@F+m(xqgl@|MMLn*9NA9SLNzZJ{KySGby}1vLG{z5tIGT)_eFx9PpRA7t zh)SZ$EFc&xaA3!5+j-8*UOmo#E-Xo{VGRvI0H-XJ;IoNx7e!$dt>O{a(;&YCI0R;5 z-4EAW0Q6`d64oopG%|}Q@LaPP+q%+A8*5)5_xcrpU&p52N_AIOOLX(tG?c@ixaQO&_z&9n? zN>K8?67Fh1_iVo6dKnR+gOe!eCj8U|ii9Y?^9&M~6Vq*_Iqcz6C{tO0CL}yp=Vt^h z4?VI?eC;%i#;Z;0S|-n@tZQeMvem!Sqf_2xNYU4f2<3>?ARj4AxKoXN25u(*Rg|iH z0B(9Zec!4m4$nmO)4}3&o%x20K^op(M?uXrE#D8bCR&S`YED=|bpbu;516S%jIm){ z4IVn>kR0+n5q0}7zmU&(HAo6ip+y%!-Skhdn24x*eY2$qAf^dUTpTa|1q~^F$uSQ@ zrxLr!2Kon^%|oP#&402C>c&`(KfCjU78g6u%FyO!cxBXT_uT;|o^F5`c%?FiTOE@E z*ntG=A2zY?zc|qRT|E(Jt4I_L#WL^FJK79z<+XIIJmZb`VCYJ~D-maAorJqOl*-x? z35GnTku$JGDL@Gc#3R3XuNnu!{Zrwz%ZW3#9@5+z+F_%%r{`rYI0cvM2wkFi#6pXC z!x$B{rHI=H|M@0YWbQ9we+kjYwAqUHy!EVZ=DPCSMq*dOOYRUK3|=ih-#>;KEkF6Y zaKpvvjUD)}+-)F52T2eCmu(i2__DiJtV595jOTlz94pWsa0aE%B;w@_&$nAV360j!>3Nk!N$lm z3W|{GVnKndwAeub7J;{aj)Anjq7%)4Up<2s(XF)Oj4h!*9Ntv6yxx?DPt~G9@Xk6v z5et&{y$^~cy=vMABwcjr;CN%OE&Tw*enl~gn?fc7b9B;xnyP{gQ!n{Z1C3+xV7Xy< zYl#vQ3eI?U-2jPtMAwtBi}Ox3^;TW|1h2vI;y@|k;jWnKVZL_EF$ynJrG5=8`Lbnf zYAb>jD-;h~42PAR)~A`3KB-R4eSrk)CJI-DqLlbA+PrqPEQ7vGt{T6pb&`Dj#gCYJ6Yyo$Maj%C3bFym>v>j55O!diBdjai^8r>M(9&5n5kCmnSV^8RMaL;arpsyK4!uzjGrBKLutrCXTo$j2rWC)cQkcT z6)LPa3l;EyimQ#fnP=5Wa9g4;whKFwVC_akzAXa@@NgDl^W*sDsKVP;*v`@pQIsRexQkO{DG2%I9~g)(vyn3`<_lhFKG^?{`q+ zo7XEHk1VrqyfM%!zV(v83?EWM0DOO?73k;uYpu7{R0%NiyjmKmSpyx~do&M`#!{rv zB@E*pScM0tM7IMDdivG0#GVAP1gPo!U!*DF*j6-gTPDx$g6aj6N{x@Q1I=HNLr45jL#cv&iVeyZ8#5$6Qs>$%q;?<`2+cLn z%NZqrlQ}ut>uzweqfumZAn$hzA?~ z9v-Z}|3`d1W5l(wus0L;*yg5aJ*^u_|Bs5Wz9`-k=$&2Cf?-zzx_Nu|ZRFlvImV;} z@Jiru17pB&y~}<(<)X+F+Cg=9kEE@`G9%m?u`PD^gxVsWyz9;$q31>K`acrN8_I+D zP%L#!oE$&kho!%eRE4(+fGSq*a#q%-U%tum5fT38seY$dW!3_ptnQclD72b%hzCmX zbd-kqR?Y0)Q%ufvrr74z-e0AuD7Qj`zS@7PBvfh%#|?57_pJbz=_E^EAR<*y&cl`{ zVxY*pQUW`CO+vW;;>E6*X3^d-UJkzlY~o{_gfQG-y+NlYMYY29a)e2zYfF7vbiL^O z9fwn~4(Ywx#21$!We6z8N8fu^cT+@wWl~nuj4VPE3EVyG zX*cftsqc@;EHDa|b}-e3-RmgHLeopUlITvC)r1y^%R5~V@qev|#CuaUN5EcNR=YMG zvvaAI>oIA-q4)Ht8G_3?)5VlsKA;81fEIMpit|AkPWjYVk;f#z73}BChpHRJ^&^Op zCfME+ZQ>Txyq7htw%S@Ct#hdhpw&lGxj8`9`>)++Yp?Qk8o}wWu}}hjwmBB8Q2Q8x zd3^9ukc?V1p+VUVxz0ic=i~+JiTYYl(#e~EBa?*xmRs)bzM*BA|cA@YxX#yxDeM;8aIO{&fdLnV@Fv+?^wO1qZz(xirbxr;3)ZtOA?&8+hr>JUsdkIOqX%+vAB z5{f6U03^(|2POAsc-kCR@_m~J{@++RZ(wByX;81`nO_y;#e)>7;5-$@8RfjRujE2c zH|owZG44N~zQj$obrH=N#W#^|Y4Rt+%90?7q_9|o4q&dYk>a!0M0D#L?_W>{7o+cY zQ`F@}!whJi&9(atY{;k-V;+a%gSsm82Mf|`K^bxY!Qe1DdT8r}dK3wcdEKujwmhTx z91ZiCsqsFyI{jU=L;6gMmu;E*G6QFCu$+1+T&uJNb$R5p?3a*s zqo~FirG)4Vb`)BUNLN3~7MPy^-4D?Ix$3xEQv8@X9cyCDvdRu= zgpO;I*T-MF%#FHwdm(V$^>Pcl%TYa|IYO5YgC*e$(WkBN>^#3 zXd7=$Jrb52qhbYfN9X0+Y~kG}ek|ZMnlF!JAZ)T;@1tsXF=kTjIM+uAPbu2sv%X7d zc|s&RJ2|rrgE9Lyu%?5geQc@*kMQoMoZ(3Oa*W06Ui<5>z`|#z@ z5f&F=@{Iy(vxDLlb(@V9OgZ;wd?R*K58%M_VTjj{So{nzQtRpBSnf?!_>KJV1#tDk z`F&IEpu|!V%?}DItW&*tj$FfFPZbZ2)*@PgSpx-HK%)+mHWXmq6&Bn;14{)g7h)DJA z8r*ori6?2!Ec*sKJDKNQ8xna5ZB9O~B*mznR_|e0={ryGO6u}GVPAUDzl>*fiAuwD z&g_89oyefXf6I71nyym+O7{~mAUvlH)NJsVT{0C6EbEE=x`R@INiDvX=R7wNI%q4A zHj-44M058r*a>G$wrdftBveJlq>|?Hei0>?HQrqH27}d^miM_viz28%#&O_$D~asp zB4q8Pt6~!~PGI}_tZ_y%m3J2`oBlh3>82|TFsN*+@^;dpl`!r!DQdd}HU|Qw^iT&jpQN2+h~h{T1`k|16W8E^!M8XvE3SAeb?N=wgHkuvr(Ly& z5P~a@b~YUv4uSDsCV=ET7t{;zEOGCij!t4y3Sd85Um7RQsk!w~a;I#=m1+{?e@f{;^Le8|V3tmedN4F#PT!25Pq7s`r+(vBZ}Bbsizvxn1Ii zsxy(g1?~e2M(6+Dn|<{U&Y%3;v~~U`$F*&EvoY1v8D@CW+56eN^NVfD#JFPjDG>`9 zVFuGQG->irfQncZ5-h|u1tK=m#X#sO2sC02Q+7|HCcAPHlS7?v2#GssGFZ5n^DOcP z+Bj1n9;qw<2ju(;JGJgsy0W3cgvQA{1%LLhe99XpgGk5iXi@a(7oBJ%jW8{JX<^+# zi!p)}ZxfWY#SI5gM{j=!r2K!^L+iiYy z4pZdUK^RhY%YuS)8#ho`!`7B_X#hRYv_oTttRGm~ceV(Z`OY{#bW~t^Fm%(-stfD{ z*9!{`6%!-%@9pu(R&ZtNMZEt2&9OinCCOd2XNa ztoUVr4EAkObmWd8@F{xMq>s5ZH+V>7&Q*%-au&t__389F?t@>ojV^J=tZBGlIC|yj zwQ9LEuK}^QYk%Ng9gQjmH9zMdF9fFv2Z6U_oGP^UG2BXvFu#lK(x^!dP7|pfKC&zM zlEk{P!muqZNlY3+%jhjZLY~%jwE9hlx|+N5*O?~j{<~#tBALm5l3k>RjR-+yC@W zm@8QsE0woLo#98ESEwvOz-Ho?f(|T&d#PgLTrOUxVIYl1kPKiZ(l70&{TKx)zUyF{ z-J?m0{?na-Gu7*EgP=M#8sxFF2E4^nA%%xxqc|jR&BMvP)&ohrZr=0#Lw+P(-Omo$ zI<1r;-u7sDOFQ2CuzI1u*L`0WoApE}$;c0*U@C;vDM~gVE~ww&o6m4F0QgME-DHJe zZ}&b$Xlz&(=foKywhGZ^M1Ybnoqv1Fgb^svdq&3lq$Ff94Dq+YPssSzwoiQZCgp@& z(pJLnd~(27M2DK8{vCXhrJq_-$k5-^holrWl`^W8!D=Z^VRf~cF<^bjE?3x@(&A)dqQx~Q$%zI<2;UWo-)+g6;4Bb1Xo@?qUctfS5 zw;`R%5G$@q=NCl)Zf`cJ*gSH2<(g?q{9EPc0$)xYz=gg<|)d3P(*(^!9qIxj9M zhS^K_{*pzMV#YGbfq@NYB5yXk&kvr!l@pobQYe}b=3de{ zBt9Lquyw5F(KE7gHMK@IFrB{fo9mI8tKX8J1&T_Z6sW(P#8521;Ey$8U?!}b;bh-P zpWvN2tb+;A;|<_P=oDRy_NsBVT3AsTY)zq+k z=_TJ`DxTyi^!6^#wbe1(IxDdIg1ZY ztmN^nOGK4cS~GZ1-E)rQ#xs)kN3{PN7223_8QF~;=6b*?M!)DsGCN|Eulng-mI%@Y z_!$TH<<*#Rt0O#srCVuYvtynHN=w5FQfq7*@H^TO2ZkigbI3(-Cw80<|KaUakcX)= zY0JBpk*B9S%XW5lBg+P<^en~GXwDt< zm%kM|>*xKV?=|{T@)+HsgFREgC3wbCCpF)L?l=gq`Igbqk!w)nc+`jP(6qPPP9sk$ zdtJ^R`N9;n2YR!&EK68hM+^`c?Qcg=@z{ZKegibi=-o7%$gtTw9bv z5;r^x7JV%sK%zGTM(EB?7QB;{GeN;qlSwZk7yK3?Z}PaXp(%w*gt(5hW7xMi0CI1i z$Nmn{cIkJ_O$!J_j1-eoFdnV!Hc1eAvkdRLlJ_B{jCNabSMPRxFT>#^uP4&8%K$k4v^f2NO1krI|7 z^S=JhN9r+G$h3u6Mhj-EgRKYKpCNz~H%0!Y2Y=Vm3R62VKM}$stSp8TOJ%QfK;+2e z3%Y~RJ?oYN?21oF-Lp>f&@^jt>fsUNbFv zTUy*U1*~}?_oZ!-DX(r~QOP4p1r(mz2k@^5zGl%OVh+PVihTNEDK$T%}An z=^9Dq=o=Xz2%eFTXlVMF6O>lhBMo`YPCUjJxWPG0H!yEG2#HtlgYZ zSzXag4!Mw>a~K2T!J|nK_4P0{MtmT%dyoqmoeX;lf-cUa;iLM1JsQMc$YL`2f3u-m zTRb@2JRZ<}u66)socUps)uk^{`mgvbx4`NdUJ;q;Yap&g_3>xJ(tOg!8D7HA0iPt= zgim0rLkuN*Gq)d{+!y+MN*kTyJ#2q{-7ykb{u&s+?Z0IkzXAd2%(BsY$`OAG6pJ1;YvEIB#8MGHt++-l9M-TS5l`_4oB27!&H1F>KZFhV7hRjif zi9O3qDM~wR!Y9k>)y9HsqXPS6li(97#1`aa=p-O}zHu&CW^sep5#JgOBLyp`^bS~r z(CqQ3DxrrGX4iwA22d+xm?{P`epgGT%$%yyfQ2o^>2Yv5TYU!7+Qtvc z2qp!rnLXkpn#d+8XR6U^rdi|?{6I(!vl&=P05s(!Ev3+3bhH&mEnE1OcV|RS;}V4zK0JIAZT?g;auY=KixbzKI;1|w@G-R?mCeQ zQ14RM5W9E;xVe_7VVwmYznzc}Xr$Orag6J94{jsn)@t1H2?`j5WH^C@R8z|tL|Woe zs)3=a76WI4&ja*Hm9WzG!4nj;g$t`NB2@bxz|k+XjM6()$uQdnqm;B^LpbiEfYn$r zMlH*p)p3c!2S~#H)Ihz%bbT$5Hw_T*S4!;aWmlc+c>Zo)2b_Lbry2iR(YA~OdMhph zr^xv^9ok2Wgq~cms@H|q(q6pS{?+Lo4YePjz3Fc{D4RpD1T;V_RrAvbyic(DF}5)r zZ&m}+-`gu^yQa;XFCvEzlho3k9gl(w0pMZ21tf0=w^Pc}mvPZ33E6P2B{wKY#lPFs~NAYUIGK|$&H_(Ul&1pP)H*qrHkxeV29vlx#u;xEcG1 zTWq<$`c(?!P_4hmbr!4UzXj;(MIZZGvIl9-a(!1AF*^|1#QA*p;B{s2R91W$C~1na zFcCsKab64a>pOANR;QLZ1=1nLiSY^0xe5kcich5o^Q7k=`P64-?THMf1d^Jqz(W*P zN5d&U2XcaESFdQBmT2Fxzjr_|YrEM1M2^Irds#@6Cz+or?52^U%B3Dt(@RCrlPITb-2@fg#5XbZZ zv)1ZL1c2X7Najs;O*mhR9sSeJKzmH4^?vU=r zW6M_m0=K}#)qoeR`CoO}t}b2EVsKw-(DDLbu65Sv(Jdz{!u$vzwUXi}_92sB8gp5| zG8<#^Z0s)&v|$!tsxI5iH%S(;B|RR`jy7|yUZX@h92K>;$u+;$&G>6me1d()MaL)6 zT&J4#{F7ugc!04H9os~dc_jh@i9h5l(aKW`xT|TFdve`$dce6g(TQD6M_M(ZKIu*$ z#U)1_!)+uoKv;du7aEnS{m&V=Y|JqL;g;ogl(EgvT}jbaE5NZ0oQCyy43IW0#EYYE z&z7_J^F})?h^?KDimh+XNI!o(9Sj`h*SPdiV7Iaf4qgDqmDpnBg@uIf>1OU;A!p=- z3ujTndCeg^QtHdl`jdbxt*AI}-J7?aJa#``_Ca z-?=E))xtyy@-bWAM*u~o0XJs={j}bJP|$3z&#*aK9S1WjR*uZr#h0M>2AWjuggi{c zciR~&+oy&FR+T$K_W;5`%^6Q}YmYi+6X$!idw(jEV|C&}?Fdk=2*!diTVDSK*c2dY z?b%r9ptBeBEYOPAYN+0V9e-2A$)IYa4GIdi=XKp?UcaS@9r~d;@LYm7JgddCe|_gz z!CRqCZ?oH#sbUPb^CNhw0#sxp-Wk+v9W!WQMOHOwjfABG9#|S_9}AF9YG8?fg z7{q#~hcqudOL_$hm}BLkJMhJD>6&jiw_ANSOK8D&ojxp0)x7w7jLPQtF9J<@}|=Y+$_=PL7EtTP-5^CjtdO2;|hdCFck;1rbM#u0Wo@Le_ zR4a(E4>m&QC3!($pWu?qD=Hpjw4K%fN@CGrI>ll0uLt$=S*?Wc??iZr$i}SGX z;Y9^ZFBH40oo-;ms;+um7r&NM)zGzTTs(jFk;v-(RY0?)6ENN{rNhB%8^drwoJQ5l zTwNOwkecuEl*Jm|sqE*tRR%@J z{O#)WunGc2NmeFSY|i0iz;#7fzt5-j%*<#ZVwXh4`H@ZqGptA&pwBPq#YA-43xoII z!0iYvM)fG;q2S}P5@1~0fZQ<@Eh@M0puI1MSdJFopJN0Ii6Oo|gk^ca?0wf|=a^b9 zQRB0gH5iPE#-EB4$pjLvTToJ79`ldLvG)Wc<;kGGGw6-Gux{3Hlr?wc+GY0VYAFhq zM*9$6RO;kD6ltUV|H|LtcUJxEdg03Gd#deB0UWrzz6p3Z*DhzpikMs~k$UA2g%ofT zD;^wWF(>1x{g^!Wu;xDXQKt$>?>AxMgr8Rd#ofm2tfi2k?Gp#DOTEbQTV#5wJnz2U z3MVA=ygA^J(BkAE(N9YYRh1WHnaj9U?p3Qr z`D5hh`oTci@N`WNp5<8s;t{mNHlsYK2}X&B;mpzU;MpE|(dxi}8&Ro=cH+CVakO&W zq391*Cp+G=IJo#EqgyY;$KNvdP&26H`Li-7i_{e?S@co&%EwWa3=BwOS%u{rv-jqU!n%@%W$=Yy)=Xymt_YTb)f} z{}3(7C^tw%*jseg-;EDHXw**&;Bsn7?%jL)z*Ee=!nNjsz~1o0t6fx;;9&;3P87b^ zc0>k^rsUd6yiA0JkG2#krjCA!j+-9F$})|-fO+J zV#W*dt+i<6*&YKw40=wo!;hH}{ZC=5M9&O%K&o+KfQpvo;Ucq4^t4gT1s|%ny$w80 z(W|!-Wniw}sq&*Qtf0Pu(u073bkw5(5F#IBU@6j6 zJ)3fbk8Tvuxrw<`IdQ>>5bCaA=ua#MsTw%kbV9`Xf@Jm4unR?GZ?MPe8&AleV3t&M zdvR;-Z))3gHaJ^%#lF$heDK%=rAqNQhe7OGQki_-?oNLY`37P}FwrhHR8J*be)A%5 zy-(0Fe==q*et;pIyL^duKET^`jgr&T9J=E-9FN>yb_^k&eNK_apft#^uaSD##;zT` z7_H(feEbx|Pc9Meq{Ak!TkH7nFBQ~T$s#0*KMQefavny>DfS_OIm~x*Biak>Ys-Dx zwk?hV*oa|S*ouThyrg`Ed|ZWO$#=G6o?Dm4UWIQC zMHmr2b}M9dQgK6|Y7n(1?Kk!~Y~;@Z&juT6t+$bo#}O}egm}(m-+E3Xga|}8evgqS zk2j%UNDpss5BkZQ;Ii0R;6I^sX_8s;maKio!sP57C29eXZ_h1t30_p?u(CtaFY8u5 z?H#@S-LJ`UtOK*a29V=nuqoFCJk22I z07Wdpva-NvZRIoO5!RhwdTq(Alqw2=NeNmMoja$Rim%p4fp1m$LrPL0UHnhkjCYF9 z1aec>JF9o7SgXr1KABr#09D=#7td-+*Xbinx`Bu#vDcOtT>lK!_pPCqejwt+q3+l= znSZx52nhdChb!*ZBc_060z(A>NC*>ZF4N1l24AYosZonDAYl(v0J~1izshvjpnSny z=+^F$YfT0;JV;6>fFdo?8&5`|GqC}TxksyHU-K4f5*wLJfMnor++87dRYzYyPUT*&8Lyc0+!fN&8u!oA8{gI|o4{*8OdYs$7yuVoUaC1Kv>rN#Ul5nR$gr`?b^xQt&z ztZ6YuNAnMUb=R92y&<>lWd5zldQ!;-?L$V_jx+Bf2G07sFPEONwSJ~Kz`25jC8N{1 z-j>K(petAdtT^vN=y($DC;Yw5VEYpMtB*xA!NIcYU^PQ;2A>)OKAHr5j$=_srT{}g zyuXuODdh%PxKj2voxdUhvB)7#rKtMM?U6R3S{r!j6)VHmc*PL{0-JRpbpy-N$jX~PtNOb(%i8nKY zM3VZA`o=#?{y%V!111hpdZ94@3zGS~BBYOBy3h@uXz(aYqgBh=sXu~F&`aMTWvgN5 z1~PcEm_BM%H{g*jG0`Rx{==>%)>l`|53B3f#clZyZY7vCcy+U6>UE;YtyCQUB+VkC z{X7w`+N`4NQj?I$=7PTqIPjW?5J@^>sx6zN*b)5Z~3<=NsZl@P`= zGMbXJ0kHZ^jptov-cT#{gikILbUYA%3RHXUWI<6+Ew;u0^;ZKEiPY8iWs zi>j=K78cYRXYR8GpGjn;c#?c^0ZT4PW0D!{p;jVMv-8W0aNn5BjItK=UMVjmKZ6n1 zoe7ISdtxJm{O5QO%VATieb+RZ`mf|qH9G1#WA&{aMM|`(Xp9gJyA;r*IR+^F)q|#I zPj}tzB|Hj2cj+MqI$O(tt*S%u#wOk7-ai#D5$$B(PDRemY{r8tfzOuG^2lo5I;T%7 zK((MKnfDLoCfhKIgh9FVjQtemCKqt`kPGt_)Hn2Loj9JDrKK}U zCTeo@JA4*ER;Ok50zQfEjPjuoREndXDQ4zF^JtrQ%P3r6KWsnf%jH^keJx8!k6RY2HY{9`0X! zqu8m46fG~U{zW8SC-F72K|SXdqxAHUDTs^5nU7+&!gbogQ%XI@G(Gp#3E3TYsFZ); z2T0kj;~8FyqGuCt;%%F<7j$6u{~+RJ{2qo-Y7-s>4}q_Fn=9Qq>#-%wDTwl$J!)Sx zGl}5mW^=K{k`5t`Ne*9Oz9JUumL&>K`W8HNE9w&ev+B`?pvV&_dKjnOkd}iRDUsb8 zQ*@mTev<4^H$9oC`ytw1JlMeX5AJb(el5LZk|0W&4EU@+Ov4Bc+rscYe*WQZyd>UG z(dP3i+)gWoYQIQ~*HZi4(!z;QH08h7_h;A?!g&V*M z(!0yxeA8I~{N=XgAx>$+@}!l?KWvQAFR*84@KQZB&=$pdX2lNg(zz8InbLeZ1a2hZ zhWE;4FZ!^{Z1E7Pr+^d+i0)i0m8JKES8z97G%aF*EnKeIOJGZE6+(mHscUo?y>YP> z68B5t|3nSG!c&(&kB)sy(K@~;qwg0|A{X52{2!mfdrWUN5zX2MQjDs>szgv5?I=cq zvWapc;P}4iXd=O)v(>{w9!NR_V7Jy5MOCNX#=-v6*&Gn^+D%m$4)LBa$_EbBfDQbm z?m{B^vs>JXP~ak`BkswT#n5Qb@*(wZ7^O=-N77xm#8~8(`V~KZam&Xo&-V=6K6enX zr7W_|Otaw)5S>DPgK6W+2{N3okm_G2S;wCtnQs8l!mJaRrWZ;^Gcs+{h)Jqt6hYGq z+F&2L%!+e_6u@+$opQX^R!SuC=vI7Qe2bqC9XSkFoAvc!jabVYqs1|DzRWfjjnfaw z9pNH`tkozvmFJfd|FBUe+W(^BnhCfRf&R0DMLEC(s6yv8;USMT~@!L$BmgY ziV{f_lmB4aAdTx`;U2MX;!4&F+b*pUz%Cq))zl zw~kf~FH}5*I+2hdq#|;WUz&EkA_Mt3&{S0ju7)Z4L2D3T`LXAu=%Sc^6O8G6T+QGL z*#KIzlpH@Mw^0LiSVG4*TD;{@Y}JLp@%QITsC_ZJqHg{{FoNv$O(}; zORR;N6>*?8r7s*y5Ff1Ci2leBA8a0i;7pf3swv~=Np?&00+plcl|^t3>cm3a>pNM> zK!F?Hjsv;K+F^@)OEm(T)VJ94THbrw{1BHL3A<8n&l#u4TWn?i+u9E^x%0xc!l(x` zVrFn*1wi6{Y@NOz4jim9jyXPPQP~)O&PO2#PU2Z$vmOj-sYSvXiszC&9PlNbN029o zYM$bVP4JVxAK5U8LtSg{4?>7Yb|c||Dw+Q*Zgf)!> z2i_4zz;{7&jkKJa7;?{YXI8R|CeM~~CoQr*;H6pZ_Pn;-zpz!sL8vSIXJD8`Ifhp* zf&tz*N1q;&#RHP)Td$*ByYsWQ0qshDq?roGm2CxCPAh8l-F=iFlEMH!6+)r7y*r6U z)1WMswqKO4lvn*&{WzL0M6P~5UXvs~?F(pO_fiU3E??%#iu~rsB&`- zy-Vjmd+n#I_cOLaO`~)_vg|N#Zva$y2C2i#|Cx*tK?d7enW`&bglVf;ebTi>B*51*aOvKu@8ZC3lt^PZ$Y)?uz#(JN{c6f^{igV@ld!KD#cW`yUmSoJm8N34wf>5xo#u zr1Be^W96Yun&kD{oHgB(>Suz^bYP3}yTcLy`H)PdIf1-Ppprfoe#rc{RA>9sr0B1y zIzr?`XDQ#H(x6ldm`|b`d6Dv0DQi!xW7C~e$39#T;i78xoOk4RvzI z^;dw;^Ecp=i;UTmL_;a|V4>fKmTh_aZP^raYfl91#r>PxNtFFBg*hT-- z5j}^%QdB>9T$b1MaPPkBN+wfCpl`S39K-5hcUw>u*XVc>A_WgE3p5lF(I?}(@X^S! z@wPIeg`uO4YTf_=MZq42cdN-n7A8GwTBZ~9;4W{alAa_Xw$+S)VS&OeG!x4Z30p%% z2!xxMNZN0$8Z2@92HtJ^@jqdWs%Y-1HhUaI ze=WGGjI(HOM2Z+NT}=7qF9$u|@~Bc3euu~ILKvmhLS8pxB~H9jb^W(zCNVEvZwPuX zJv~v^+M_H4>v}CLo?*XJodbIW%679ehghp60h zGt{8|SX`K2%Qf`#6)fB&p3#3AV4(8t%RpOE4FMF$Y4#-g5a~91PswsV)`I2fLrLeh zDZ&$y;DAS{X=2;mg9ngh$0EwdI#T5=7;8eX2KT;P~6nkoH_0LFh5GSo`n_cI%w^Eiq+P&o)F6A-nd-Fym6QIIe z+X&;f9=V48g8e`wRbJmb7P2b#7t&6nv~G+%ZoW$7+yyMmId6ZrHQ z#7Ir2k;p`RvI!ZHYRA#GrFhDqAmV7^@%x`R!p_eRrkrntNkPbK8v1oy&z=ADvD^lm zEmQd7-)gp%DGa&eISJ~`S4?bUuwI2GYzz`p`>KZK$TZ?oq>QBBi!wqEL6IWHvND`x zaH8wZ&e3(!S7Modh0+@ZCzvVAr;ONr9Czj0>P(=?4kC3WNfU(97tx0(nZ$^0bsU}= z3DCXc%5Jh&?3(Bc!O(AGT=u*a6jrm=1+U{+?ssL)u73e3#R3#CA zV2TE^9p~K}idtuNG#Xty5HkMCo8hwNa5n6TNg4ymW#w%ofDIshv_DV>#&Jl_9xJ#a zCr0Fc*C0aX`c%9Y)V{P0e`>smP0{I%__`}dvn`-HBn&-(6o03lz7LZ)`Q)H^G2 zt7Q5Wd^)Oon?NuLrUT(*1Yo5B0oA?9`>~W&J~G+B%PUBiikL5zP;=bC@mAnI#S*o2 zl`3!5rZ5`IrBpmFb}bg`Tf|W&{dEud}J!2yL0hbeq`|bHgs8m8D@lh zH%UWMEAf+hUmPi2!YM?hJh*vv( zr#vIITz6)}h*tvv>5qP}8o1z~bL6oPfa79+eKPz^8xF&j@ITh6KV(rpL;xS!B8Cgk zcbf{rixV|?73M}(Tes!g%;>+{;x}Oql5)#{#D`t>CM?x}0{{x$3SE;}9{b{r8bl5` z$1v?bIr}geNVT+%iBB&t;bqs$-r~`PqzUksk3D67c_T0OKb1xZ(D9U1s0Gf?%d@Ms zY(rY)Uuc^!3z&!tPc;>0A35+#9?$yhPJG;-Iv~v3k+2dq$V->j`WE~+Xo&@L?`W@) z!(W!2NN$zli~RNK%&mE!BHx|Up&3yK(!zwqcWAHDaXJ_>8FlSiwX{n9AQEi55M@yi z%iHg%4vx!)>CtN=Oe1-blt+cF@WR>-sw7oP`}N^T>Isb+BGNhh)VnA#`F z-=iIfy#m|2r3)}299avk9OOqOBl-i2(K(~0q3b7nq3bbofqvIVlV|0gHc!a!90cIB zk^!aVppmwNUo;xJdp4F8e56lGO{akGk?3v%h7MoZK0$W!QEO)I*A&zHx9 z*g~h1Z|TrFJ2Vojs(!9<%i!wV!#mZv7c*|@s?@XD>;=Qg%2qF3dziD6K|Qq3d`Y(M zt?Iy?Z0>tJdghR4fjGXTy5**Hv5jFWhe3GRexGe&9mFgFL?&F^+VbL*2uu~ugE^k|Ii`Ku}hzxhG5i(^sGzc2m0apCM zuYnEMr+sz2bJ_a_Mo&+oZ*MgJMX`cbWMPV;u}C@CbDF*h$>%c)mb*JQHed1Nxp--k zBK&*){P^V13{pCgm3kVV;uJ1S|4bxI%wZ>8xu->0>99>59`bnOd{O1Z5)lzI>r-C6QRrJ$q&10u{ z_VjgF>Q#-v)0-X}hVCDzIxRSK9Ael)N{5bCImvu#QzjjB6&d5^G;ne}*!qlT)G9Hb zDl=P?8!`=I#y5rv9<#__(5jOlN>M+o@SJK;`zL`9m)$=g%86Ox41pm0>1Z7!GAx!V zmJwCi^sl3T$h~KuwsvwsbElO@QLq%rF+#$KenlS7kydzyhh{7q<=Gle8GuG}_1v+F zmQL8_{fwuv?HT}spO_9`{p`66dwOAamc_5J{7#ahl#@6@ev*j$zj`gp*_N+?a^`gj zBBTx*>|jth-S)Mfh+gjtU(4ZZW4=uRO_rYj6)ip$*@%Geg&bBMLQo#a#g9UIr=%gK z1q!BPRt(Nn!B8Tvx1YuT-v5O|b;BqUpM^WUR3lh%444qk;T)NGO#oc+E^iSg%0ir| z9^ElQ;*`VD&=3Kg_^_fMj`tqIWeoLnW;Wz7a~1Ouhp3(#tvWVv>yJ!NEpejx0uZ9zt<@<5%$YV02lr?4Sd zIoHTkaHOm*Ml)?nFWfx+18LeDEe_4fGNm(knP>lvL<>izSeQJ?low;a(o>a=up$dS z)deal3jHF>aWs_QGvEayl$E4vEfIzms}1+w52VL;Tfw(K;T@b>b#Yf;f`g0lPI3Ag zeV`D;eiVNE1w^~dY#oDfDaJu|`APJ=xhIn+f(4JAs;&CFvGTypo}X1QwTkj(02#=L zFoPg7t5XVBfLD3|v>hV9BFMlt26Wh2^uq=(G%Y7+JO^zjjK&XL=x9twB~*NvXfr}M zS(1Xgs>eU*Oxq(A7ulh1vxMEJ7D3LUuMsCIc>hBM#gtC3bh9(dVMEDmIkbYvC0!@pKp**yL# z2PnoJ4c1s8--NnT!~SFXS-w=aaZGegb0JoGn6yOyxuWa3ZD=)NqDgQr>bAx zFnbPl>gW`ByGDGmB+dt*e{qj~dX}5?dZ$mQDU9i$Z-C1^5m6PB55`|!c+p7Kf>3tIuO7S?PTp2?J22G4J*v{2 z##+z_2#8T6I^|%fUwI|j79j9Y!?f=+`l49h@|z*e|Emute{H}f%$VJ1ey$Y33i_=a zxq;adYn={eFmf`U!;-1AY5iKMV9J9l)`ql9jh58+?gbNl(fqvOUs?59WSbtiG#hf2@XiZJG-DTWzpX{4Ot(3aUIE)Y&4{{^dr; zaT6@RKVhg1E}&;km`jnpIV;SdigB|hp%Q<=c1_yxXKffL8b!0 z?#iB$d1+m-=YBvE3(tyn;Gh<>&Fh_33{KponL<qK%KIn zk52LeEJ%ebjJOx1D@eL6*#|L6=9K>p2|HL+|UGO zfNYJI?AwR3&@BVuvCD9xZV2Zv4hxbH+w#7Lr zjt38RJlilLVb%e6by})bpYKx*lrfvkODZaEh4=a~p>Of%?*->cq4}?hiAIntP442x zaVwTwYeFg!u)&;Z+t1bpX~Jfj2MKJNf)PRlnf@V{6f?hd*_3>&+SZugT}7|CaEii< zh)BL@zx&!+03~>`paoT@4`3|SMY+H+aEv`YVu+LvJe3a2byv3CxgxBzUgken@CT|8 zUF=5Bhm7|!y`1=Wk!f}c_J}gLsZSds3=GqXOy*5m-h{DFPd`QIV1yqWDm5bYa5Z3O z)beHy(IdipxoqXvPo0E0R_h2@3p9io0KeaSELo=DA&oE1Gu zdZ;6cnn2&uEbEclQqECn7p(6TsE;~%8e@m%F={*1q0yY&!unC&!>j(1I8>p}_hWVM zB`BckZmDoKx%i;Mu)R5eF?6RLTM|(Gkf=e%!ng0u^h>UIFtKhJiW(!!3OwR^8z7y6 zyjYYI*rQs;XR34p2-D4E4#7}>29Ac9m8Ep}px0;}gSEsPxrXj*NZhdQ7 z&>eg&##ALzvIYR4|I+B2!7;6KAjBwM-LaX>!@A{2TTs#a@Z{BEbxqn)rx4-OIVN^J z&-XbC^&L+Xx>%3+A-6@Y0qv^XTT1@Z>DRZ-(8iOTL#KFS7R3hBl)YBU?{h)LP-7Z?Lx;(S+r5K&%;a?g|5wc7b2 z17&ft{bxBHEcTBFL5789p`n{qvoks-h!BgKp+PsUP`B)|r%_;fLpf3z!B$7>Nh~-M zKBI2WDoZKs+n#&szu~uunbN;cKp_%&;UHu($e&Sk2_tWpzGzzDpS@9b zZRpb-k@W5f=JzB!O$XtdaSB0wN@6I?f^EBb#)Og!PTB_`P*EFHEq-<{PhwWKhQ4o= zeAFxozy_NmICVo}Wf|>r0KrQi?(I*^?D%{i76tML6#R~PuKZs~w)Y#jIupt^bmO+g za!;m@g&{PA!zhRsQr&-j}~S>gYB8#$v1h zAF^X`=CHo)&P19+&)VcxsE87afOaIxLM;pFVv`fgnyM3<$s^vSKn8%C{CsN=1Kw3V zPauG)jhQXEq|}Q#G0&+b#z>5Ws%%Sn^@`2L!TaTdJ=J@8DryZd_9OvodPQh0(?&_V zQqhy+)dsTz%V+ng?+E-{R4@BnI$Vu~Yv2B`BB1kou_`faL71c?whCmAELVJTNDSGd zOYMO5Lk@TK2Ls_MbbR;3*N_^#ir1wu_x@NDV~l4N$4@WBsyiq%Ag>#gK z@*Ej+M^OGmwPTWdo6BP)O*0ytXy%*Syz&OUP>fjzfWNkCu?ujYyCr?~Esw!mM6Xy3mZ+Nb92MTZXaUl=L)1Sk;vgK-Tt7c90O z2hnxhpUb17ew%usT5`uH*OiNpj`fUH>J1J{Y>xe*+C^ z?8L4LO&A!PiZ|4{X*w#l%gdmP`=oItGT_>>#ka!aZ_lV2{_|C?BE6xrF1<1X?E?w3 zVuEc)Q;67XK4sR^b?{4?kr21ncJgUH1HSikuGYlCtqy?fVPM8}8|ZMn=FL4ma+q@6 zOEEog881YYkz>ARpcEx$Z(cYtNkY7hf~;3HfVlz|WaoB!(Gb%z8!_a!DhM3Wr`OIC zUqF-zb+GPi*A5lfVW$JRo~o2ziUt)IT?Jx#kcEw?>+ZHAQf&qyA!k|*91B%~1Y$G& zLvDy!D}AePPITN-fo$z_@!)1LqjNnYs|fm0u-|npQIBPOWnB2x!IsjkiU*6Z9^#QB z`vO|eFlDVGf?22XGCP=#g*?ppcoQd`dITPEj#WHI+;Nuhmj#*JH2S2b!p^ z)%-hZl1(n4%L%49Ch;Jwez3f9BMjveJ_t2D;J%#UqJ;QS7RSBkW~?-(E*e4QLIYe@ z-ZqNJ7APx;81cUiY{ZtK_|KKM^VAD=*FWSW$4~j(VMYUM`Y$@|n>5dK?It;CgrDql zZKY!gDirb8Lk7ag_u{N3!78W7l&BlN1(u1h1$&<;Vuo+KA{LI{)SBd8GknN(nT;or>QVL_N-g|k>M`apbM6W z(BW}5nXbnKR|I+r3*Po2X(r0azy#1dWsRR4S9D$t=&d5~)cK|P;wkVyr zLCwg_5j4;X4yvr;rEDTyyah~ffzjub9Nr2Qo9r=42Ib5f|HgnrmS^sD#yMZD2ya0 z8;-vdwh@Ro9+#oy)o3g&*V5BN(Z|7gHrc!%@c8cqAinHpi*{1l)c%tX3Xzz?n~_CU zxZzb(%|_kbRcJ;fr<}tUd&Hq(fcS3HGqG=_K>37TvYAXjX_%QsnRJC~w{2%_&4NeP z@Z;6MbAccm);FA6}N7adriyn9`u?Y~mjv|~j^FXp^;$HZ?oxwXLrVZsW;n+y_jBZk{Mb_iG6G;bW@?Yks4fL3 z8@Fk?*EGzp{Z&j~Jjoy%`)Jl21~6>w@<{gCyECacGoKJZ3MsM245J3zwgLW}Np#Ua zSLqew|EVg!i-VUXvT>cPHav;Fxg1WjaQP-KMkx)b4Qd zLr#6*kMqOg3ejA%yj*NN$_8PXa$7F2J?FOGLa`cfA}OROsRGjN)sDB5-jd9LIe0kD zjMsqZAkCtuJmjBQx9gFUxI~i2Gjzn{q?*#J8#SfLcueFD#MAagzTT?gY_Q}9vdlLPc!A+)o~f`HesJr$ zPtCY536WDjrTxboOsblX^4PgL58Q*g)c$H4BHhPL^Y4 zis;K9?vwpN{7fmYEF2FLV*}o=n*#A zHu>Bscz81~pz#FqR6y2oK*=l)5e80n?A}e9XW6T7uv;3`f@o(%b@{JS)L?*TKPL!tGxhQ0yg$Xe-M$ zdQG~(i9~-@6CL63`+AYp5f>J^3tKvDBE;n%B2S*C5X&U*aRKgSidBnTr&D?dc&;n_uiOso#%K#X+n9P?TqXZjL0gkO}B-g<@m@B12IEKAp7kiN zVK4NlxhFsbB$N+c#csZlcX|t{dU{@G3NiU?O^)DsWpq+u6+XEBzhLiNPS>@VCADBA z>VGxn(2bvmF4MP_w3Yc#99?`l!VT#eiUAlhd|*eIF1(NBK5LPPpD@ttsamMG-ei2p zI45DtA1``sn%k-I&J^pVnSYK2ahOgkV=8LW~|f@JKFxKS#szODFvmx}~R z$S0@-9Qo#OHGtYBMXRkqI~WVdU%W>2gJ*+p>Mq&!p==v0mS3^j&0&oaU??UBE9p%?n`{Hj9{+va$b6ULc!&9SOv9DdcsB6f45}AxFlcNO@-lpDK>%0(INY>`H%H)!%<-KzS2$9=kEHdgDUIJA_ zeIWS-4Vwnpwt4y@qp}+v9dy4X9x0+6mm$|=r7tH+U z(JA)&yQ?~qZA}Kiezc+idvGz3+vaq$G}X2dytJVO`Vw?jt32*NQV85?la3JL7rp+b zn`;c}v_i$^^(fZ4s{zJrKuc9Wf$=@k3JM^m#MAozw)$$028p3*b_hx-G9tc`$E2al zXul+>E;-<;A|D|kE!YFGmN#X$sqDykrxT#(OODcElTP7A4H^I{zC|*ynX?z=IsF$D zG9|dB+;G-~8H58LGA3wY!(*0-X!r1yo2+U3%VGVA124}?MZdAeDjDaVDu%G7xQJoEe$m-a0clM}ZF^vUgu&w;J;ksOY5H-u5 zNc<#`^689V?~(H3q;0i}0}?y#f>QfFQC z;mjh4DIxE}$Mqgyv@0yxuk0cUtqlFvNk|m&iyB1PF?_I77ot(HC#!!8h8ESrgvwZD zL{mr9&PzVw4E;E*u|zI6df34Xn{wg;jq*@3a#C%;!rqnMHQa>EV{fD>a?)Bs=blHa z2usS+a70Jk`wVZokaz0RuTTP>RQ0GU&AE@dmkvtQ3cWv`a4ctXit?w1dPEBM3D}=xEIMZ!o2V{iNs-e`zTli>{cY(QSCwA zcu!xvyth5&clY5<}zjTU$LrFdQ(7y_cYm9-s3Kc<2LsH?HPMB3V5$Q(>J0lRiDk|yNx z3T~wrb?i&x-zOg5&3ByPWxBP?o&Hq(wrjcBKkc#^!jJem2AW7$j`)F~{cLH9AC=13 za2Qn#8L40jGjXEvbOjteeq!6oab7*7e6#;a0WP|!BiX2_I_n@^IX)vCRgWzAP%I0* zy8mVo+OUshsTT+ZxB1SRTyg`GL4A=kIw4N&hX?r8BK_|pbC=JFT)VR&H6CPK!I7KY zD0q83ZZp=oVWQrm9y8*(392W>qj@)m%5VJ~yzwjbYa6VpfwQ$gz!fOE+zprt^HBhM@@=Hiw( z-t;xc>*S=-HpGjS*C)LEOfqg$-0bKnYKR&R(nhjYAE%9pr`NL$Wb^*p8LPAW6ENYk zbVFS73=3Ri%9j7|5j1u%nKD&%eqiqC?F@V8+w}e!c#&jRw785UjUfK$}$8TSTybROGX)?Aaq!gEcCnQ&@W6GD8J$-SHuEijy^qtWmi;E_O6`&?{D{(MC)AFjy}I-N*r7!j2r%_wmosV@4{7-!;a|N(=en z7T~UFh!N0u5pqL{LXM}%=MBK$T!lu88^Rmj;R9d@Ij9>~oZ}9Q2o2y`g*20v1;p?E@c@J;LA{=!7gJMV7D*jqT%ux_7u3q1cSlX=e{PP@K6RFLq*lW#56+ILq@Qy4;s z#L_D);MCxwKUh_ANC{ehFfnvUYji+BLCKzz<+0a#Hbz=|kdj@1uT6wH8oG_q9oQWr zr1~9ClI!XhHcY;bf!aI*uHNsS)byAg?*%4?JIrDQW+vJwt6SehY2c0B{Ode^ zv42ROiN6LcaArW?TPwhwjOJ$DB(eCKYOy5Ar?LECsfqq6?}dt|Ag;GW8vyA&AMj8W zghCFUkU7 zcHouPk`WZjmInLnD`4GmSx8pzMpii`S}+D#h4E;B3Bft_i=KoV0Nb_j(j;3#d*A_3 z0!f!2r*C^=3c5t3oqOYHCg2A|Vk)mJm``K=B-t z`$mk%S}lHE97K&q+n}_D{USgNnaP-F~;jg$DdSs(fH^lnLD2Of&39ATc23e4_3 z<9Z`F1u*6@epmkgS4|JnxeyA~{nqd)RuRk-K`?#6q`TAJ{J`zRh!^6oAc|4XQp?u) z!5KgeiuvO7W76@5qk4hJ@Z)Uf{YkY#LiqAF01x7Z z78GqMAHWiX$meT){evlO+p4{q{CyX8IsEr^z6w(ipp=rk76k2 zh|=#KJhD1Z{fG!PU4cjuUXUuT@YNm`Q}5HAO%}L-1Tsm|(?o7(S9}$0YfczYAUDrp3cd$xb2VzAZF)lmBK4Wx1w2S7y*GE)Yp#I} ztJMD$;~Iu@bAknf2!M%A7ZUAtv^h%S?xv*EMlb-yvt9L=doCgeZzgJjHpC!3ttr1* zzzbM$i5n(7cC#!)f2j-Bw`6mGO^mMYk zSJ!FU1JSV3I;&I^!RZEno$F?;QvuVYdJt?9&;7sufF@P=VR3*C1 z&8ynw^>%NjaSog>5F4fY#U49ja5I`)EW6j^Cr)qv!)Ked4yB)OL#r^eB&II8zT0UnD!0EmKbKxsaE&r9zSZG2Ji1WU`X8{neRvQrv&>_JFd`4sZQ_RY5v zrLqAgM|4=cSO-3}Q`#dR<*~i&PREap1C$q8iH`amr53WrD1Y(rsH%!@jQD4@u(c(~^DIScSJmPBDpc6|a%@jF zl5!i7+0!+7=(Hzemwg#l;#jAmb<`76p7zF6(UANfsW2lxnw0Jz_RNE$t$kyxJAoJ( z#jU}_V2FGx*bf-N*4G8 z1Y2a;A5zC^lt*eKbh<1M%QK>o!=V2RJlvyLO19i>23e77RI!cVRgc22go;}L@H+t( zbZ9_hTvwe#u5TdSYyBM>DZ#rMrNKHg-cN|JfTL#)wZ-_J7%dL{6G}4XZ{3wh|4enLc3$(P?b?ofKC>-Z4jJyGdHm^T+M-Lj@cc!cDiWpo9X>#GvUi=3^0Y6 z_9FB?C}ADrWe$mM2IXpj#Th+ zRlc&%25>9#B*Em-*m*5dhv$)*3CtkRQhh&(2?@g&SF+ZLHKJv8vz_VWG6TiRTiUXx z!&hjDp5uIoFrw)i{mgkKqNE(;fg9#*<2#&BxOFZrRvc3RNlofeCfr02w9e*bik~O9 zz0M`4aQvlXM=j90bGpOfczD}Yxy!z#*VVWbR5rvJO?PmJ_N5i>2z{QwNIKK){NwZaZd470y+w=(*dHIi39H6hy z8Xzb&B+M~N=YwN}hQ7zOVDW>?!gTTVgqFC*HbXF%5IQs|iXd05Of8OBkc5xAGTHPi z9pen9FEg->6KWKsQ_FNrik&Jzn$~}n<(Xsv2L6*=P6T=_E{^(FZ^@bA8R`VY4?{4O zud|UyV|T!o&aj3R*0@zpQAZv8qtnsQJe_;{l%Md!&i<{Mb27=d&18taLWBxol(urn zB{T8c;=l^U`^5n-z{5w_3%c7Mer7H|N)nE^lNx8l-rtm3Kj(ne(Aug#S7lj{ zXB^hCT+S2U)u6i~*U}x(A1Jy;7jXI-sUfP-d!CuEw(9w(wo-f#%(aD|EMEQoyxJcw zsuFniBD@DXBtc|11`KQM*mW7qtgCur>sszgBdZjke`*AyaRP<0J%|F#_M)gAaz(A{@Y|Y_6oAZ%4q*CcL&e zbSBPq>MVGG2c_qz4Ky$d_|`Cy^C_jx~|oj=xfOr>XBK@jQoqd=g?$NQ2x7qrz- zco9&c8;E{!?APMR&T?C4&C|)}>`eiwGDCj*=LCu9YKHwgRmDNLl6VQrUD)_lpek%b z>g{AI5TYt z7-X^8nDFM?TNIArC(Td@{NW^_^<%LrV?=}^x9H!PNZX?`lq2LZ>`zlc+0E!7(RF5- zj6RCBvxjz5Y{MgSHT9TGDjajz{PVP>4{%VGTw+i`fkq4pQSE>Yt&qhjE8q;{G@JIc z0*#!RiR7k2HsJRObK>#t>5cn6oMkI)E{l0DV{Bo;Ynwmxsd9jAM?KffnY*<~?;ZK3 zpt%>EVK8g`YIonse>I)gqx?j74UP3*j$hMq)i6IrbGjh1grkyq2ZI97T@6fQ9p0w$ z*@Br8!?+CsUHB(DH|`TgaHiw}(wJ zi>8PU5)X%+wv6!YqA;-PEoGz!{fBDVu3;|2jXlYVAtY<##9rMHhENp=pD$}F7p|63 z;zlzBJFj1{;lmJk!ez^T#mpwoh?Ib>a$z}s^#iNlrgx{?@MowL{wy!TK%7Q7W9?;2 zh{wrJ#fFjEb>O_-7%|dHzVMD+BoWYDWxj3J8~+;!onKbpu2~gn)y|$BZ&0cZ$@_bA zUI!<3_igD18N0r9#k4@U+Vh-c+9ilqa5b-qD zUKW`-f6xR(fz|T6uAcE73aWql3tIKd)ow>gS}+45DK)`u4saG|+oQmm9F%KtH#xCo zaXQNbAY+GsJ{u!vaF(;4T!vs{59V$)8v}ljW)SZ6;Wmf z_P#71F)(J4*rdp>>7^5r_^&vI1JIdeH;~wn6uGxyMw+BfTb*6Y&Q4V?8|(7uBPLg7 z7~lgYB*y2USJr(W?Pq9C0mL_2Qf#-idumEcp73q$VNghBmwhRew!L)t&^1A`f}`hN z?)((2C{C)me47DhX^*~~9|E_7o1&zZ_`l>vDVE6k2@d~cJe)m9(r*|Vy9|#(cBGVm z>|?`BSAVlvF__z+;Orx9dk3vjWLnJ)<^07bhjgRrO@jw?Jni$y9PrSVK zCezTMb;|B>OKFIJi$B!k&GG$c$6M-xDBx;R@Q)M@ zm8iiRuNG+4m8gL>*){DXko%|kP;W`m<*qrgnR;?AS8|`{0b`^XXHq0=S?m$B+GwBj z=Ni2xU+^{o-M2g&Hd5YamIWg$SB{bWOWk+)oQ2mh+I>bnw0MXSEu%WH76>&44XPc~ zL~@;2h``%{cs^uT-wQyw456|ed=me(Q{d-9r*P`>XaekK-|e*XoCcWhKI72}$9(#1 z_y1TjbLj=A*qPb~Q*rLyAKBR+?Z1I_c@TAQ58Qhyc7FS5s=4&`=nNYJERmbd^z-ZP zquhTue&V=dbCj3-9|HfBu zz=jIIrk<1lna}Rb8GH+oDE8K}nrmZHU*T(on9S&|Pz+jWg2wbr$N z@1)dKBe-G2GY)9X=0AK5A)*gr0(FD2Ju$ASr*d*cc;CLOHw8w@WiKosfniHyIxW|5 zMDYfBAR`#@3AY`=nPfzj0)TSfiSNMkVD=1OmRngRfZ*+hyI?Nz|75^IEA4MgH; zSG*Md^fNHM@;wT^N1`T~Bh4*5M!+rxgipWS-_p&ZXa*wQ?E#Xe6U`y8N&;nXMd~_0 z7p5YXjShOHb5q?QZ)jFQfJmz$JBBEEpMJc8=}?#Sn2n0ThwL)H?*TyJt0eFI zJ{bj&eW>>jQun(fyy%DF-jrSR_{^D3RA`BvJ=)jBV4Jmni26i3lt7G+F%AO zMx9-mw<8=l5?l(gF0#uWaU4bRY1Wqvyizvp5qfO81ch17(MLsz^n_5(6{2ZTL7?bJ zl3D?81yJ2(?Jp*m?@z3#So};W*S!7YhRF6B>E)|&kt8UpgzCwFY2Ta0UAf=I^}zT= zQ@?T5TpZ~L|9Z3}l^jzjol_bemNFRCw7qVpq!^Tq_TCxQlQN8BRF=~?o+;AgIQ|L`c_a~l z`^Y}u8*1X_<|i$;0)CZi$F|7-Uf?#1>WOZq4uliY%$V8A^Q}nfwCz%N`x@c8X`e^J zToEE0lC$)Y{5;rU6I|+PrOJ02Zr@9BjlYl3r?fn8JW+MJcD*=2xwh?h6!2r8FL)K6 zCoEHZ#;}djBqQlZ|7aw_=P>f;kv3ts@zY$LTkxPSpPfe#{rL38FJ&B+&82v(%b~dc zg)I&7%bbhyQ}A~)gSn_;Ar!^#JS?~@8qDfUTd>i&IH9U>kXA%sZ2@vW?dTB`{1u9~ z+{waNbl}(ivOkuiAHMtI4>!@v&t!o+xp_V@y4FucB^ups-ckIoenqY>YR@1q?Joqy zM8D^ENQa*LqNPLI?@)aO;EV&kIw4GnwNudQSq?;t(TM?OGG!)LvjGj+0@FSjQW{TS zD@ThWa$UmpwzQ&?VYmrqPZrX9W~>Q06my5-6t|c@zvN(w{@?S|qcNcWUU_#($=J#} zNYnX$JyH(;*5Kg(9AwmeSbu15BL3Jl3kuqr`#u;v>|FKh>fubQ(DeSoP@&@brTXzo zZ(12j`^0lHcXR`n$D|U!Zt0SDGQ6HH{}tqFC*bt~OwP=bLaXm4Z5lIN@Min0jw6h_ zXL%U2yX#8k;0Owre*#PS*MNP(`D_Uoh(bM?88_SnQ#qN^wi(t}U0XCZh=KWKMk-V$NQQcAa9j?p>*`Je@fSSC@sO zz7}MFXVKKm_dOL~Gqgrsr@7C|6^+kKe6qBOYX6kX$=X;;YTY8G$(6sbM@f5!SKDEp z$hwU&M;8RO9ZO>Os7RJsTl(|Kmns2&%^n0YHY_rZo%RF|O(z6X*=&b?Gsg)3vmz^8 z=$HncxD8_{64HM07}Hv@M1~AB_Mq@8qJv8Y76pD30M(@h?`2m%!`wgy?v`a=ZC1K1 z74~LGsNDNB!CesyRBG@-E>*(cFU)ZoU+Fvy9_yLFN>Kg@Z(UpyX(D}Q$}_Trr6MsH zliU4Bg-q$cpzjvd!%)HlYv$9?_^x#gPYX{gHn?W{x`8ZiQ?-O6bDeW~45y-cajqD@ zPhlVBX7KrX{k@3jh#DJ0BU7-oVy|3UJNf{qXwVMhpKya^_@6^rumi(gh9 z{$pI91jMOp?@KaG8%C(cX!bLZO#|;~U{PpPTLQ)Ip zt2-#2SqQo+;!?xlb7?xz@OlJ%@s`^kwQ9u$N$K#I4qp$BjP_o+iVdK2NjngdyBGS% zPLK8!URJIUa`a53kd`99`+cZv~+-7Tio!31U zP}z_X696xOTG6C^T{RUiP#e9Oa8`p#b4$-{usX1Pc=-+?s!iK`7h(GfkI_S>Q9LYy z8lITX?&o2q_$N4J~5SQWVN#t`kgDk=yBK&E9}q zw<$u{A(x)ri~PR^rdtv7#V-*NDIioV^(}sYk5tqtsK31A>WZ_wcI?=)Q$?;d$@QY0 z%x*H5)E49O86HkICzW33pDhSFd{&!1N1@oSe@M2Pu{T{hj3p1$roW+Bo}r7r=f{}Mc$)Adg?Ju`IBXS9XyI3rql9W<1^v_hYz78=O6vl^{R7UpM`*4&qKO40D?9Gr#2 zY#V;J(__JqR;V2#YP}sz20GYLyu_nHogj1LQniYClm$zdob`9JB-TJJzjO$Vw4>wU z!!rk{csv@i@ZLV49;1$BafPrD!zC@{gLZ-`809an(W$m!{-PgW+r`30n4b?E<)a%e z^3xuj1|cK?y78F`uaC}k>@8={W>GZ#V&+WaW@STl$^g6miy2zneI2CfE4Y>k(V8!~ zJ4?r*o*+6O)Ipk5tt3CL)6ePC(uz?@tV`0WP(u>THj6oaIT>JU-Z}?7f=@dKc;__% zuw3P_`hE3NO_0x#F3uhz>cDlmZ+`TcgA~VH56dxV4G|pKrHF*PBn=%-YjqB@F&P}U za(t07XH6~TwwObLUT1_Iu|$1hJh_x&VWlh$1`{jwuX**5f9i5rTAl@q$(!&5@bCfb zoVt6t=P%I0ARynOZBEnVarBmB{6mLJ2`B;rCLH_EsWqd+$G((Z=p zC0?-uj`xe@{j~FV3}RvA2D%Coo5cKpm79O7m+F4}mlK3IZofw?S6w56*FXe7-%;)u z<8)47VKHs;ci&DEHuvHA-Y9lQa^AZ>pn0fOmp{x!`(_v-#3R={QB}2g4-ZY?UJ7Bh z+oq-kxe&kSi5Ocr5(pR1-o7-tQ91P;DMK%iEXVhZ-4HCQt&BW&(c$Wc#!UqkbGtX@pr4ho3$JEWC9ppry4+uMC*+FVw zgg+GfT0X#Boa@>VsgNtzH~>}n_C^7dQebnr%&E!oWe${O{l_ne6mbMH3N!b9-D51- z>v3p3K7gqC%Lb71A?T}{{KeXE(9Gn8LC%ODY4qD@eeVYJrB)|=A2z)W;1CcYhg^af ziMt6NjN|)o_lS?yDkpX_;L)j=I3a^TepRmrsm_gn%+Oq zX54x#YHkm;KFYF?@PTAzNVH4(PBTj8*Shm4^X!)2C&#CiNwQaW*Rz9=1@xvRf_!l0 zlSG0tl9N=hd*{JYG}37)+Rq)8UDG3*u({#m6V9f0d$3I6M0EnTUZ?!<@iAO7ihWD6 zU)qSjA?AVDv2dKMKuXqwhKJ;2%w{*X*D|SpU9VY|gYy`mD2>2f^C<-F3s0}7TG^`t znC9j9qtWN?Jpy|H7X>LzF*REG$A)pEVo9rqg90Rk1Xuo?4IlkirP3hqt^(EtC1Q0X zez7|dbV8O|CSYnh-uC^!2aG=wI0IDlW`Oixa;0G*jQ<5v8|)~{iJ~w>XXO&N1s467 zB4g`&T}G?)PB!R({zF>YN`%U1w$Iuy_chxh$#6(HbpYwcl~GQ<;HMlFt!j2I*AUaQ zX}qYce<$Sv#Nks-b%B7{f67IQzxR(le1!gn55C*fD-jy9j$+QRo&S%p- ztK9@(CYxoDCPbPQR|FySwX~(4qZ28wo4^i7=i!FvH-aI4LBf3^Oa`v~sDk`qfA z!ZewfvsJ}rj2^6A!t+$z1Hyy~<}M!X6pyr$(n__^eQ4y2;b^NHpv=YG2 zk3lk%7OaCKpfZy}Xx!jh56JtQ7ic7YMZDy6xUJs6iRM8(HBfamzhcC;G}e z#>DD8;$Zz6dL3QEDDVspWv$#AI%Ps38y;wMMEiGLA=IeysH^MehrcLlv+)ge2@%eE z9k2jkg2^Gn_lVVhrvIT-M;c|i!u67RJ=kq35b|m=Ec`%l(@1dNiYV{?>ZruDPq{Ad ztQ5E?9fl*RT=%jC9->pJzcGOSs)jF`1(cAK6u8@0#CwJpL>N*{CB}l2b?V?pRT}jk zI4?ydBnohL9wb9`d>KI0KI$y%@~TLc{utf< z&~6fUn=2?1uKgqKm|%`7SZd-UYM6c<5ym1h`W+?=Eper<63g0A@hv4l#y(P#8y-BA zHo25I@bqTX2=*9}))v4gy5AR!kdQJway?hmbdMI?;gZ&K+-<%cWd%w46_jePeJj^I z2CoCAhgZe3oswXA;5)EHKuti+l)2CKRx7pP(jxg+nrwf47q|X`JqcvI$g9!>hKI3{Q@vI+u9Hfg)}KDaZ{+FhyjW-q*_1xnS4~N^G;@RLYI4;0&w<{1 z$JUO|APTJi9;bLYeJ*Zb?l5`%x)GDwyX>3rTWSNoiPvOe zDtyCFyfZYD!r$SX9a}zG!ot_jb9c%;*UzJq)~ZPpuP|wGZ7M;B+yEHd3jWjoEUNp* z|CR2tvW0l;kDu~hgwLI6fQ66OqF zzGFtOqI*-rQlIeZJCSJU8lOZ6LD+1gPwBpujRS>)OgZq&z0w|>TSb%oiG0EmBaPPa(SX>Kt!oIQ@~@k|;@pMuqMXD7}8SFSxcQ{;&= zOB^er;u!9R4iyR%eo5QnjGMH5vkVF*=}2|DG5O-Rvs$0jAa`hnlTKw(9JzrG(ps#y zFbjY`5(T-LqBB($GgDwBsRqFnGcM_5r??3(#%Jf&Oe=?{+F-&Pz0Tuc`A;6nSY2hy zn3seBAsd)k`-#&#OMb}+%eYBMM5b3{ONnY%nm^ZQ0USNGriO;T$Y~!l`==Cph86yw z6dZ^0KOeT3u|U4OvT8XmK@(v`GK+xPha+jDb??!HK{r(7luLDo&+QC{t7CwWZR9y} zw7G1EoULjbwh|)3Z#jB<7FC<}o!NCoe)%sXqS-c(wO-GN%gadT;91eV%1Re;=mmNh zNki4!LnjKOeMWiR+>#6tY4Wd1473$Q2)BAO;k`VZUmZtriS$IH!K7)L;Pc=(Jq8H% zRWA#bE|JoVt=7W73EX|ZOPh#vt_+B1N_t}y*FpB%wMIrS5wb)0BDnVJ;;E1Xbs_Dd zeUnE?rlU%;ExojB-{uO#l}@dhSddZm7M+El>Rz6;R8(Cd{{4t;XOGHc1B(m$pUiJ3 zb5ly~K~ggb9;(UC9{(1LiRi_b#y>I0#qIKnk%<5u?{`4}3UJ`8$$n40#`{JCbbxZV z2iIXVB7rAVU#NJMsjJ?ny%m8O2pG;s@N@ynrtTII7D*Kt77gBrNQ+D%2D%d)9G*sB z{#mZ>2^mAKe_gF{l4pKkmBx{H`aPMZ?5$|+ng!zbgeS4pzB7_V0*;Ef@Ub)PK z9H{)YwUZXbi7*Bp4xW}+sMJkX>sOM?ezlq$-~oGABJfJw{L{ngW!W3@e(dQxy|%oG zY<8H$6)`pd`T6CDw66RfQ@UH#5n}NXJfBUH%U)h7MDpvbSB1vQoWk*I^pZZ$@5|={ zTIP5OgocQ9ysz1OQw&PmlaRd2XN(8S^N-Ad`Jb?FCGmlGY zH7j=zNo*f)XnjY8gfH~{JH_?rHujbMjj?AbM`-3InDDPgjOi2JYM-W_sD~80a^_^9GFF zh{?NzUOh0)Ue}5@RsSdj zyPR9tu*QFYLp#d98^zV4l8Ux_iX_fbHkWez?7Fvpcj>#5AqSuu8zlFH#9YQL^ElQ5mdypa>$m})CZY9MSWroF=k8c5a8tK=T zOlRP&OB88?dd;20V0OTk7JI|xeP||Q*2vbrNEEt~xQx(X42OSNyFs0o=>azwu4-eR zCh{2ML4DVQJ*zrp5(*2`reBJV{AnpW%Z_=}kiHMzu^ep1bL=0Fu=^Yk2RDV@_GEHT z-_)VJ$=n#S0|N7Zn_RQ@ossKTPgU!y|}5X_-x} z6Hsb9)ID~^sJ{&#b}tS1pP(y5@1n2y+LjJWsJKQ6J7H%E!R)?rqarYXKhD^c6_B9G zZVA3{p>be6FDe}Hcm(7W#{)OB;kcm8axS_HzIinl7k!#BqxNV}i-ALU1wRNn>ch+u zS?%YwXz~7XwpD($^aa`aYO-+An&u~9btuO&CV(LNtRuZCmE_2=`1YDM0*!A%>dOz>Ujq{qw{u<56mx08-nXGH zj=O14Y3y5f*FES<^{Z6q!XL26$xGM2bd+`!euN1&Jv3uetRVZZxE2BDrI=Sh&qcGp z2iRmn6UkfvE8(kvU%d$exs&JSI`6J$5g@uPK}zCwp;2jrea~E6Q=< zq|iLivYkFD9$mL;ukizmkHR@v4AcM?$Qp3ZFEwy-v~L1zIbaMC>vI0d!v>U__2bTH zkQvm_&m{(!EpGbg?M1af?v_z6wJ2?x)i+bYhSb_iX9wY%nlhafS#;!lcoy4!P*bg5 zlAp3@t2+9a(nS$ildgg0V;ZHr`90P_L-8TMThwqeCwsZbRC(`TI0QiyV~gm?l=T`m zuJC14$Z;gP7&|=u)ZQ@{^|_JhDn*0z`@!D02v{m+xjKY47G(e9R5eHZ$U{JcS?D=aXA8Sa zjM&=V38@T?Dc1;t)DFz+3u}8;*-E97_@(Hk*!w$<&F+4e0P}iKs9edR+Cmi8Y7DRZ zk7aT~Gf3uR!DY2Bn|Ze8kTvQT@hYu+fCIO-kr6!0OF#Z+dpl#e4Hl0>r<(?0j4nnr zA(snfxAnm=kTBA)De2-|droRp(J>0zn`+hM{Z=Ys%Ll2mGnlZ2c{3>8MBf>{xvAwn z!|`M9jWEhgAl))K25;pdKQ1WZo#oq|2`rYoP!}sxaqOSjq`D>p&SPbq0Pfe+`uKi| zfC8Z27RBaZnhyL3vfs!W&non4r>bOTok5)5U4PXI<(cCcOmj zdijD|?qy%sD9Z}iOrviR3|h_)N1m}lyi?O6RDAWAz0%&F9e01`VeGaDx{-6l?bsB{ z?)9hJNI#yNl0Ad0)^Zwp#L)AV+UKoX=%|M);14u=XDO-3DgsvC*yB?qXbLh&cKQDt zFTjKPjI5?qT`TWu z5m{9@iQnI9E3Z_@{W`fXm!Es7G&S}Q?YTEXuzF=bqOIL#IRr_S=)4nuY`gqX^LM+m96uj zfhlLng}k}zq8JpyOn~#Dgvr-iPXXE7`Ez%`KuVhq@vgU&K*rX)M;K9{WhwxGZj{r^ z)|gR$Pm9kHMfj>%eFe~+$X;Q-@*5O3uQ+$lbM{iJch5LkT{uLv_QZBhZTTZ#Sz@>l zGZfmogBs5l69Ar1rvBiq%yvMK8LY@HrNzm%6c}|8K0;X&CWNDy3?f6+`Hn zy+lGI{t9|{rIn!edt`R_o%$G)W^d$(4BEkFXl!cPas%K%Ps|uWc%Gxwcl*=DE^+)C zD^xU1V7eD1NE8n^E5?I&gEpzq;ny3-%nuL|2KzVma8Irs&ywGjweV(9+fkfZQCu#)tK2o_I6NhQek%TQQTW#%740X5D76|yH=~c~_l5Vz>yDVC4!o039be<_ z>>0}t1#nt~uLpd7Ck{Kh!d(5gB^`f!K?K zbLqUck_oWTUjZxEptP<0wjJX{wq?%09;2{ExDnTYOypJPZ=s9M&J!pqM?XVdY<@~) z^+(MO8~m5}#tF0Wh3? zoMO$@_4?Pa>dk#~s=@oKOY3j(9Aadhn7{Mo>J;C_?CmI?D9mY-@uqHes=dHi?qZi4(3S6Xk~1Hr@$= zxaU?U?>$wt#z-KWZ|(*gDEaRzvS1*oW3@p)a~|c5Vghzf3W}KcGu{b6@)EZs-fFEq z?%mE;$F0LU{1ri{W0E;-c()LqVD|UaSKqf${v2TlaHyYzG#-at#$Eg#oMGKx*h#D> zSr6Rz=J^2U_HsPmv^Y1r^R+lxFSFPJ`q(%10atjma5}DG%cq?>9-wAZAsuRDDk5tm z5mWc3Hro9S!3yP``iE|LiPi5+82&nsOoZXwZqv)(>s6y2LI^7HsgHRSy2x3XuJGDa zG9L;O7-(6t_#vO|J_fR-igeKI4Dn~7T`FE_$N{|HFuXy-~?~B#66|}Wo zxml<)dxDMfl+=f@^C9!cTIO0*OkB}21{#?8Q6F!1lF-GV&UjGB0un~r@J5x+mknAj zSzr)chjuMt>6D>#WDp-BTvPbrm6B097WYBGh00+=#X%e~z-bv+OYh%paM5<)CvtK` z6?mD)4%S7FG;P}+GhVMF^zCBvI-i!Mv*gJ|fnk^vR~#)&r&S{9BZ(y56u>zWB2w%+ z4}S`E;`0Bo3}|YOc~lKVpnPc3(_DxUQ}6=t`|MLa;N47# zeZG5yh@5X70gX&53bD$ywrANzaSQKHUz|mX3lCoh^rLy?W5BBfwIIdCquAz2BA$nN z0|pmy&MYMCR^$=!E=4wHH|0_GyVa0UMk=g_w)<#ph~@+BiwG4ro4>8l4%3!hqJ9!Q zOp(1Vb;T;9f1Nd2e4}YcK}g##&>33$C%{5;lTPI~=_?@jDAjky-Ep`6ic?c-X-*qi zqvLEdHo*q9g0~MaxBhWaf^r^g&=gT#VOw$3ZPy7zGmDc3zwN+h&cZ__VTMMF;R5OR zFRzG>kqyz>PclfWuFL%_YHE>`vA3NL(JcJh73T75?h+J(0t9uj9OW0BdY5;Wi5ehx za8yNj5_-L55%$GtK-|5Ywt6XM_1|~QpaGOQv}IBH#d`Xg%$#wEM-ZSUAFf-S+S}j{ z@VbGJ+%)2%sP3KT)jmrT1Rhg+d7n`DwgU#9*SsU-W@g3YNOJ z3&sXenw(LJ&E{+FB-bWl!I;c=W_(qs{#=Dh&{yXVJg3dRJh^aO7&3+3^t7Ib&L5L; z$wE+%a9vS876!U?{#>$s31=n>6 z_=`zd;v1Ows0V=fu)fEBnoE$bIMrByXwuq}peURQxh z`?5{3>QXP0U$I||zvP&8P@! zWYoQW=!^tgx;P4u%N5{=+OKQ0iEZPBujXVyXZNwc@}5##p}Ym zbkjff@i@)_D8lr@hfAI9y-|DZy4H`>9GEUR-Lc_gSCF@xi7}ppW2X zo9469s+$cwOkLM!Y|h8s0DsSB8u}jj0${Qy4xP}M;qZwS^>}l_PA*=VfjC?v`pHfp zzE$i1j6Q~YM6u_G3k(0vf$oG z;l>rSVO|D9K?FrwxaeQqWFFRBWUSrcG_LIHcr+L!u;gQinkEKE;tBbr>;Vth1Nno7>{~dvN`>={?wXCRD>EX04<+prc>3&KI^!{)Z3AI z?V)>m2Z@5PHp#S|fP%PZ27A<3HrtoPy0aU`fH~nJLmykS37;o-tM4$}pEyRmGf})* z2RTC#3AVsgZM!f0=#g1rCInq(Q(de_tGXqDY+sWveTN$f+e)fa-qH#0z%%p>-sP1p z^Hl|^jr?U!kUr=RDF1(8bx?)eS_+4OQ@w|H#uuZ9$z?nzf^dA_ifpPvy+&O zU%Aui&4gCabnJ+En_)!4!&u$~)Ta0TN9UGT_Q%CCe-$SXky2Ux8mnsf(?|u0w*>rK zwUfEH!quVX?*G}XCsp}{IWsYF|KaK=JpL;G;tZhj%4&V=Ty9Rgv2za^(!9`pPJd8? z(u-TvsE~Zas-gqoUgzSy*bOMk<#oS{(a%twQ%3v|q%|ReJV=mF>XFnsP^!>L9)zW8{*GHtX(%{VRL zL%#T26%-KAC_?NgRtUlLp7({L6+Jx2H{f!%V8Zx^4eUR1gpo(DnROF;3s7k(tiPq! zvWm8wzM`u79#UoYS;Q?>0hWCs+U zwb1@E_=yy>kzO!ej7Dyxf5k7ElU6VZ-Loj|w(H$i#K_0@16-55SKjnv7$blH%lv)Dpk6PUk1`3FJcOE0Q7yzOFA zxM~zR6EKVN{jB`Muo6#R{u;Sm6)z^%)>`RaDUkt&cRuV$#0gB}49!AK3Vky<-%L58 zGzC0}Lq1}-MlHx|s&!0qB?g-H&DBsy?b*v&C$N6pSYJ|c0b~V*rj(rC1=pgLFu@bq zvP0BfwDg;5atV-@uj>;_BinGn0TfO-IB`Q3HZ~|`ouw)iiu1h<$eI(K0|qY{!&Y)t zP*K>P^_wO_u!S@{;VSqE!T!N79b)-r0kJhQK`Bz-!_pN@%#0uI&3arzX4bsf4klQ| zl|nb}SaF<0X7d4uGgNpE6|6io-qK>D1-1Yx^;{$y(TqxgnkMvH@7svkdDRaRU9H4n z=_BXE-<0T~adJ)y&*inI3iosFxZy+sNr@^irtQr~m%=ixrG}vfK2qs@jD87|AZQ3> zI@PHM-sE{!YPkD@qz7^hvj+i#Rc)|l_s4e2f0hqDtSYdI zw$DcD>R&haZ}Y3Xv)31j*1eiH>ymbJde8D2rgB^)#Xy^u9guXld0XO8_2ufez2-~q zk9nRWSkrGCZ5&YjmxuRqJb-LPN#eecuqJ(#Az4aX3gx(N?ss`T zc?PAdyAE?QR@yc(z>n4{@~Y9rLL(&bm|=y>Z4ahx!5>E;$LNrf0}Jy;BN?1~4vu_7 zrnLK2;k6W5iKoOyGsthDN3~)K8a}}`FWt?4#R|ROl4HgVB=J5H(>4fON30WCsrxfn zEOIGm^28gBW00%Cksc273rHD&IC%5v4p)?~6EIbIrH%R4GPqKR>0_(E4l ze%;~RJ4P~$)uTCez{#kEGLx61M;(bfCHO$B^o)s!8;wl)gVyEqERKmN0kwVD;Oj4C ztF!=NQ>C^-1%{YN8L?V3eXjyst)q9ilVLh=duYoET_fCQjO(S{AENH1_CXI&T1i?k z=+n3dJV(v)YevVNx{mAh)bpm23M4!WwbH^;QRx*i4Vxf}Yt<@59kO!S+feIgRT_L} zqsJ!0maI~0g?piaFqMxVZefZ3$YFI?vSTo0=3D$WZLN+)bX@blgTTL`5%H)+z{~mc zk$s;*QqB=Gd+mcNergkf2TH0DZpnKGqSHpey6U7FVF;w}4NUW4LR%h4IvVUDO;)WsP#g=JKY?I*eTE@<nID27dnhU~(rc{L=cj>yv8W`onC}-pc~vA1eE$$Orlfw-)U`2EX-PF;eR$Sw z&E8HppQ;3^2ScbP0~@u{T6Qf%D6g#icF{^txwpXpf44!MvDlaVpVZ2b4GT_AlVSx2 zOOr(%GE%|KU$yX1f~TiU+87~%-!P+8s0%|emL<;+C_7y zC%n#o788Xwf6EElPRiT3gK#)jzFEvz#;cZF$R?4;6<%4(ag`T+6~+M{N+EHyTsX#R z4IS#u@O8Osd@62OIYPwna1TfBSi3>^SF8@CI^zD1U?;lxbc@^j+^hlXSE%I&U4cVQ z@z@bk+wpZnXB9A+E8h4Kh|GKCGfNSxxmBRA#|B5apRt~Z7HAxT6_}LAduP9Su+x8# z1zs7Y#$}j>QO0{<&3F2GKX3aVuldiLw@NQkmM8x7hPJaonEAr6(3UYy9W(g^Y(+Ui z*I(fbj(+ZZHbw6#$cNb%!@1!`txHcC$AEoZsl zWAc-X5646e0BS1=U!Ob*YqCNd2FE;S(Zq7U`sgZd>oV;7DUcTpW270N8E-2G%X=!O zpCqiCwk*3#RD}~5KfNC;vO(b5n>4v$uAEI_nQQZYy@SrjhEYWQ)y%owkpW;_Q2$hl znokRBYD(=H-dVEfk(FA!?pUQ^{Nn}Bd)v^hJn#{j7HQ*ccw<~UV=Etf8qiDorlW57 zS@Vv!Nj&e5dO+RnM;Os4vh72P`)d8s#EP}6kJSd4f*C*~+AAYGbIidbO))b8{v%1b zVC4Mo^}X}IheZFSIYnzO*RD>6$p;oMsM>>)R z^rX(DbBYJ$YPu`El^9Do{7;Q9{K0!~Om}Kxw zaLfENC|_6mAC{FtGB`ZKyk@Ei`ngyU+bf7BHi+8s?9_p_)+xI%n_09!RiquQ>V73J zv@^N3bL&_A)-lDFyrfiKw}tGJt9ovr1sJT*i6 zd0s()s6W$ML=qmsj)(mszBAI>wvqym=rQ+DB9?V7NLQSS zhvo`AHpWf8c~~-p%cEun%{ymTU0Zv1NP;-7obY)!>T{deD3+w}L`4>m|8j(XFK=KB z@I)*tYm@V|j-Mci-uszX2aer0mUt2uMe29f{kVTDk8DS#M{jMFqVQm)B#p8I*e!RU zktlX}F%;&28yvRPAM^Nev-1#_AV`#^7j5#lICTEdLEf8Z_Rn-Oi0yfWIg5oclt57B z9N&LQl+R$qA}>g%9_ExmSdn_&JJyi=fNWPBN($`UQyF0Zm$nnrP60p_C(kN z#cO3g&`W!TV$|GV3{ZmwGBe@_zEgG2&b5yGrx}%^=^rt;vUQLW)=kBl)2~caPGKR# zV$ei#^_FiMy?@uIAAq{ z4rCycpVQvwU%R+W#klc09{BlvwgW_aa$&)j#Fy+yXjGKUB~Xwy))?1S0}RlGO!@*$k(f!- zr>>BNw*+|s)+lIjBP0~gXTztREAMz_kUUGtm7uoI3&A9IzYBiRpVfZaqhDbs-Ug| zvsa=zAv9f2Wg@rL#0*ADqbqzoQxTFt7;ACQD@9N7{n4WkpXf?`G@`RYT980DHhcHK zdQ>!_#9<+|!j2;!(#F^GX*R36;r<7)T~OwSD>|64O^|k}odHWe#+19Jev@yLxJ}Lg z&ph3HOoKyodzp%d^>flfP%Tww>TUE)LSQn<;Zn}sY?t5=viV#ZzSqMo2{W+^!7W0I z_huGiNRc`0QF@Xn%B&h*lEIHx&^xITs>VVNuO1y#u&`PAWVqaore)#uVHY`fZnBa%IgAWajl;tq2w*i7H5a#YNMVG@jCDg4>n^ zc4GxFR9Z<`HmMzGG~aw_$MgN1%wJHnnF1@+l4gvFPx0nT0_D#y;qONqC~II;A!(nE z-TF8d6H%5@E_B7u9g`SC`7&nVB+a_q7&MG7q1Ur1#{H{Qxz9GSPJIDvDtG>rQXE_E z0={YwKU@Ca#AvWr!J7#m=fjFA`?Qj#%ucePHTT(O2x|&8n)7Zr-eq zk}~s6E5k$Zsa~ngjVjyybODV_t}Ixi$E?{Rqa-m`N=)-f(b?S2SR}M_t{+&U&aD}3 zQDER@+E0al>LE>qEK!0%H+L-@Sr9Fw86Z5qkS z-3cFze+;se=9Uv~XO`j(nWh|~l%+e@X1RlQ3x>zQrz8+XZJxDckQ`90|FDBhD12-&_c+mMWK!FL`wam@ z=Th!^o7QSo*?f-ke~XI8ji;v+*(0^=s2ZDz;$do{UrQLR!3nWa5f)c_s@>_?#}xEa zX6ZbT&zy(lvAfyZXt{_q#%tlPcKGPrU(Sm?`Th_n)xhse|Tdw{w-<}Ts(`^wc zVn-dvBbL$@hYOHCh14BM0tyNE+s!#2^`Ca90@pC7E)fE_UmWudewGhsRITW6cL#2jHJul#wdJS=zYC{H=qyA?ZNp4 z7-Ifjv?^s`W6u5SU8_fNhl1A4ZVLqb81pO$^f@Y07n0T~rfesi!W;M+f6*k2S)N5} z>nxTHT>(bH{#8QYG~Co1~>gbErvm>2rd~uC>Xz&R8HPbU?V4n30hb zi>m0ZHXIgwowfhmwtRFtlx#MQ9~C|N1k8V1><+Br7BuNTiqw*kpG_MXSVVF6%Y6YX zTLkrWZ0@OQoVt&W2_40Ok~Xiv*`)5$`6QvgTZoTt41k6aKXlQYDk$~jy^Rk$fAD0L z&OuVQ79xB{P3aC4Dj|W1ma{<~WD=FJuL%`w6lFkbBl@7rEzkQM_lizH8Q{5`3x=f2 zu0=4rtDy)C4G=QusiW~PfVNx5xFjdq#8j8gPkCUDz;(Rc%H$u6#_kX2S7w)X?}{5# zE8P-`9#;2P2|2x#ucRk4<`ZR{m(Kvn6hMPry&&JkT?=F5VluyT?8t(RF?%<%K4XyE zB!0bI+(~ub)0vs*TKG1jK}6BCRPnFQOmmo-2+$r`kOQFKP6h@Uyy2iyE}AnqM3Zs) zlG}$e&h8LtvE0=Hdq7uKS65X!J-7FAP+Xv~KwnjKUn9n}du|Y4_MK?p3!c~QLfql( zJ~qAD;}zj?PF^WdhhU>ud7|6sNA*LWPZwsgg|csBeP}i*X6(^sjGhsgbQV~v`+#(x z0QjXy@*RrIt}g7G?#0s{DS7aX(REy@!|paoVFb5bv307iLOOwg zkLJybocm)(@lg%(cjsLmVozH;L#8auz5e%Z0DSg@IW}}x0@vrqitbkr_r>PP7>i8v z-bOy@x&Y3e1oTFATZp+pas?cVXOys1PaM_w7cN$HF9W9Y_=}wZ2uZ`1sN%G545d~R zZ^4X8mFb$DE7pe9p3z2E;>AekXBvVX&>F05Xl3kj*FpttcUxW#@c`B`!tM9gn_P`X zTyLvXKW=$?JuNlG=@kB3%-GPc#r*eC1;ac_p$oUWg|D3Je^z3nGIi;z@RgkH=&Kd~_ zgrKlybFIP#X1Rc_%jRVTB)wUzsTb1%$hHqlxwDQf|MCHwepDx{VW z!ChZ%%&wihmT^@nhtj^oXa^bGQxSqa7E|2KzNjS(rXlQ-sSuGpsjjiO71#JvJT(O) zi@Ck9+aXqX(a`?xOh|)e<|`Rkb=xwfV9F;yT8WT|q5*;JWIMNqxfpENu)=H2Q$Cd%AbCRB1Zd&x6URvwN9DZ!|Q_*Hi>!yyP`Xrb-b|D45 zk;9&e4pBiYBeJueELJPQ2v+v?FSWkS1N>r2!MY|qRV_7H;N#{3kUA(i^YC`0`PMNY zDi4m_1<-~P@AhhFi*`x=A=2R`aP(#MjD*<}hQ3hihz1cwpIB56T+|Yo&%dpGrgwrT z^$f5!shCfI_i~FdrhnjtWCLEve$mOB;mjt?*IaB4J1O;iR@$2f3DX$qNa6EO-zXTO z8M>$>Lljlra)POv27UH?iDNE_XIHvqL^ z`Hnil`~GWV!BqsCmHJjYhjsh7D(nQQ!Z?6C`-c=_5OKA&k28dVX4QZ&y7}=hCZ|Mo zdJRzwijn`qeJ^M_K%t8xMWL)y@Oe>GYt-(^M^O}0L!ajYAgC(+G{FQmZAR}IZO!S0 z`|yMTy#1lJqBVOg8Wi`SxrhUucCH^TbsFX*}vw+#8PZSJFqr#n|;QL72CBfoc@)Cyk3{Gy9&?<&pc)5c?T5zts|0fOUaHNf;{ zhwX<56$KTnt~9jDVm6w5Zf}r^NR^&aX9}EAC-#p!t|jFGqQuk9v9`tC9F7Xg=w|uG zFlrs~PBftlX~cxMq<9=WP?*}OoWj|W(7-m?|Iui`{K*SM2lKLRBgbRyF%3DRa?N^A zj8H0Vk&GdY{m2WLTg1ECZPztip^#$yE)dEM5j`Vc9`KkPkb~B)W>g{VNxfS@LiwUCo&}t`y=)kPS|A9tHX0iBH`^ zBWHEplBpt|K6y(z?%oZQKL?Y0E%7}UY!;&UGBpYb6)gnySG=bM@7V2+6C<|kEHkzxSMl88;S7}Cu8ODq1!*8--0?!;x_ zDIXy%*epu|2pH<5DxY@e^l_*MOF}p`kiZe-fYoWu{2!xpW*^(@hsp6XL0O(Si@%F~ z9A1>?=Ifmp8)ktrt{kWe`~!g0mb`$0PzCfhA=z2LN$jzni@c0_fx^%rN4|t4v6MWA zKep#z-4d4J_h|Z&NfY61FoYH}l1yaCxC8~oO%#IU>)xx-@ z75oo5V-s?I*#uxG;b^5gK2^LKN(LItf;a|VOR~JZ7vFfG!bmUi-$9=sN?bW(=-Ehr z2A*6F;zPG93=}dZNTUw|@C11FCCWMoXZeCA{-bqrYCN}jf@;2hkObt*4|v$7OgD4n zVNqH;WK^c;z>zFzp1->c>z03oMfhbQ!_~h3*n2Ecz0A9| zx9y&2>o`^QTeryNw`4gDt)eP$fl%G|RQDPLb%07@f3;#^AJix!1sAf_?y45Vd#JfOY#xT16tW*l z1fHZJ0Mh1l%gkjP!jP?P!0)B1;Z=yt#}D>kFUU-6wcj3Ra*@;=!=U-DLY{craTwVo z40UL(71vtv+=fO2fB0tOaz^_v8j0pXT-u2#mm2RD?!k^%t3UAru*CR99o(=_9TiOmihZUj;?}Sc zCucOhu+%x_KkJYUKUNXaH_ke}bsXj367s9e1HFXQkKfx#kUN!0qRS95gg?eKYt!-+ zob#LufmEu!C~e__XS;r1ddNcjIPn^(46|F)EO%fp%5XTFL(s=Nkk}wK>5a5>!0zP+8)l#Rkr(fC2RMsFxL)zd}Y zNe-clihA1gGk@i0!^`qd6ysv`Ak5ha#IhM+yNIb3Of`G291=+xv&|h+6WaBAec2-N zaZtQ{HJHZOVR<;J)z2D@P(w2UnMgrXuZH#Myq@FYWwxaJdH#BXOmVn6lyZDSz-THu zrXW}-4rtA;qTC$9mR3a`D;^Y4$gj32j|aXz3bH9wbE!N;UVQBkFw5X?RQ97jEnOI* zRM~O%YA~1vs*t4cvdB*C8UVnqFasd{&qmXkV}K8=nTG(HH$~blhdefcRD`$FsE5C6mfXoPgHfiAM3PTjLKf6tCgL9=6crL_8lZnUKWv z0#FADo+-3Aoky7#4ff8R=_VE!((tuz!sP^DV5mm7_@9lF;}Zh}N| zd8{H4nwUqpBvaV*0icu+OTxkAlPeu4$=^{34z}{%WYd4u+L>`fzS+j7&@$D0CdLA* zz+m1^j`>HsM?a}ANzcT`}Tf~SfMFoE@Ad3m;D?c2%EM=n621c@Qdoh$kk zZENv7@4dg1n%(Fix=;Azt5_`o!)d5U6uy`Eg}iPAhu`!KhpnTt=@{VKPMW1cUiNa^6$pm`FA;&l!=*RbD5w(SU zz3&BN61@+FV(>i%#*Mk0T%CK^kcUXvL&3T#m;cR>8u1R9yaSaB_4Q;iiZWMP4qFFw zIjrk7rYs;(1LW;uBCYGye<0sk2XEq=A{R{&YF`^X_gPkL1=#YB;zS4$2LTS)%2yn{ige}2H?2V(f#bU72U1dmCmPLoi zvNXUeII3d#yz+-`ME)O}?;_mD+8N=>o}y#{h&_tK=mIrQY?Tl~DysR?+d_|DMy+Oa zXuj^4Q}&FNMPYD$XvBp<$j>fX5t^Pa9}ezZ69u~4))rbS6rB5Z2z&j>I(uptyiJyN zehXsGAijA;2QYy@rj)2sX<4$<&}Pkh;-gH{G7yQP>C_N_v{#vQN>BDL$tJp_dTmTE2@;7CuKn?7(jI zc+U_~}DOnv#NesB!eU!|HS0IVakDyD<_1bS;cZUoXjEz|Q7vQrJPX8N3~#N4TYb|V3d z(RuY)N-n(761+KiozJO^v50l8o^wqw3fDF#(dx!Z14e7K-kZmSmOGxQz#=%A_V0F9WAv>S!0;Ah)uV9e$wBqB8`zt#Sq+I9O;n2lI(D+rtGJOp zZ@4YJJ&K8kRA#BRpG*uLQ^);QYm~`sWfUl0@SZ#nueU^EtKD(WgaO6otJPxeYL$ND zyF1d_@4#3nVJerkDVPT52dZ*)RxZ(M&*H(4u{!iyAXrs&YZZ1^=f2#yDq)1ST(8&e$q8taPe}4+hC^HSO z%gTQ&)=GugVfbU1PiOMJU*@p6z+k^%v(@bHBZ%5domFVr$Rx9dLoAf4xo=q)j7)7p zgH-No3Lkj^U6r-i9T@uh{uuT;GAebH%N3_YeAHvt8kopQX8q2ntFkDrcf5?Cbzc~B zRx%G*-ddmacr}HIGTps|Pq32#N==V|0>>N=hz8CbyYGPLh;VBQUD+^oEuVXt{s&RS z%NHpKqK{S=J&8Y`zb~(K$RQ z3vqu#bGYk>JM+uBf9$!#CgKw?Ehf!*WQ0Ww;?;p=6;+iq+n5H^eAjk4IR);Gri#_0e<#!!LQ88L zaQdVRB}qA|E{`#d+d#BduarMtmAGbC0N&>9fqe3iE$U){GHMg>%tKxlSx3h{bAD;9 z3k-(1luZgpV@oNrouFXx-ZCRZfKQT9Sx^1B7N>dw6GVg{W2x=rV9bqx8moD^=G9<8 zS|lvIdNp1|P*E_u(I=l+8$XUTML*=T!Vx#E)kM-jMmeYM7|5nAOQ@6%oYAlH3?ZGS zM=bgqKY03R{jhHfe(-gdf3QGPnPBLhs?`_pVML*{rY!jyKa8%Q&OI=iIEav|To`j) z;YkhVLtJobU?DHg=1BBR;eN zz<^pn#Id&Z_)aDCbiL@x=m<0Agmu@wV!$Q2!sq`7xRZ%c>XDdjTj1)=A4yMnj=_{0 z3eS8>sh!>gjQleYPaQnrG?(93Z^enDiwT_=aH}c-QseyS7Cxp;vy02=n?_LCiWN zeJMi~U*6@?C9YwFCC^t<0AU^MBVT!!Q$Qj;3Y{9ZG&M2j;DqLDVQlOeHS(`GoTymf zu1*0RudKlzyr{^Bl&Vngoo{h_da;VPb75g&km;aeMLGxNG!!^A!pE<;q-2DPqQ9Cg zHr!?X!%C`6N??xC2xUQ8qiVZ}Haxkr0nri;+}6L*Pjf42)e0f$Y3k}?INh*5A}Z)N z1Q7Q5BeDDPH=J>Fp}m8YI@pAmNYNlnQdWB;A=K-!?uX1ieKAHP4==LKd3C^hS{wsP zrcduO@TsTFHP+E%u)l>G@I)5~l3ib+iE0>c-f?hp_y>L%1uFybc8k zBs3qte4f*E971Ni-A2Gy%e*A*4#3VuN50j$B_|Xp6XVK2l|Wfb^JJw(Nk8NL_|wP# z=@|QDX4!bY$r(=dEk3FFdw;@;W7+zt8j1S{+SkESU=%+4=w;fw$eMxK6|WZymg#|b z5j>j^9#Ki%-f-jbSp1b40Im=fr3S*tS0I#Yaqw~QR!Bf2c`n?S9R1x`w~&Dt8vYCD z*lW`YdDFQ5yxwc={_>j-yK$h=(Rul&C~6VMo_kPp<%t*SbT+D`cHY`Ok?&c=^>JOP zUO5(Dax3Bw!SP=}jrRz3+OV62$NF3IpS%@05KPf69;X$na=3R%RGQ?3f0?hE=i})_ z*4N==-Xp-a6{-GoA2K54;#~tGI8iYy71neG40}>7VvSZt#<0y!_`x%(n{Q=IcgnYk z;KvJurnF86uez$4R?!iW+uI*9kF+Uw9)^*naVL&J#K=sWg3+K6V#S|57nxMbUej@# zs)YF~o|9%DBdL~%ENIk@gmw=JI}Xsw73^PxGR8-qw(^%i&LRBnYk=*zFBcH6#l=PE z#Ajffv@aF0Gve)CVffKH(3HSpc_5y;AQzkd+pxcFfnV89k^76D3`@F*laN+6Mb+Lg z1mbR^$fkRplor;5BY@ii9)qdX2_aT4Wfdooz+|}w(vE1J@HU3(f9(qPbvI=av{V?v zEC?p!j6XfHf9l;YT+gwT7(Ef_{JvEi7GSw9uoT>EOZVByXoL~5_|t2Z5ZjrhhSBJW+YLmN60dY($CvQH3nExIcr3cWePd68AscePa<6=f4PFJuHIKK z0VB)Siv_GXLdm(4P16fo>z|JR?3_EVaka*Kgsh`m6cmv`KVl~onxRvMXvx^DttP<_ zPFF%@=p1C#J~6eHCSHFyLCPG6czR_^NVG<%+#1bY>XQt-(kodb{!!Iw?7UwIN}y`v z2|1V3@%9`u)x7v5V_2(_Qo>~waQTTQrTkxuR6Jfhuj*5={5Z?p4<$`OSg{6^5zV)U zI#hBHuW&B)myzF?Uc7X91p1a4_lX8et4GcB_RD&Qh1LOEcS8Y-3e`k?MYZFcJdBSP zHl2^LO($e$qdNc07FQC7x)}k%_zMpqMTrIWWHGpDJY!MlMojc#kiA#DS3ft8T61}Z>l46);0BR@;cA{SE#PL|$oMkZR=c0n0BA%iN8v7&bw;^oc&uE0hf^x^L@eAc{FWc}^HEoK_gP&RxHH;54oqvao#nv8vOXPM94lImf*&U*GCJZetqhOGubz!_OBam= z_thw2M&$ssH2Pc1{?3(|3$hHiBv_r~B%IW76KvBSsU3G-)| z3b~uQ&zK5K(d+_>=vFPMH|L^7(ZHHKLQ~$8GcyXlLV5b-x!m4fyp(!PD{snMmL8}f zQ6k4m9$t5Qx^@|!;Y3GoTu1L(2}6iHMNJXYh=ctUis@`1v9HHkWn!d7d)#a0CtLnb zE>0H?nTiF`O{DWTc?1*q*eAWLfRy#8pF_cE;VP^wgs$a0(O!lps`U)PZf(nU$#V|> zo>I-4Sa5Du$(-*a_{Xx!AJ!&$p_T%Qh%`7dcS@gb9ITBp;Y;IS?=W5t2I`Yms+$@~t6 zfVrE?>dfdd&Ubmhx6FZu2OXiq0iFI9Y|}0q>NN!(*H~?9p_GH+vP+RZ4IT|S0A}Ge zA!A|Ux`kdAp3Fy?aa!qLUKZtZn!pgY48I6Y#q=A5Q_%f%_KL6^Hf%%Y1o+IeG_Km| zfNq;%QmfYcd=OEou){^6U3F;FX2?A)eJZgN^Hmua8ZrarHJl+!w6&ln8<{mhvny7= zMFxYnF>T{0`N9V#_BoDzzh7*@Qym|%C>Ijr^l)Iu|60)#u1vJYxcO1>t$Huv3$}q% zzm#hz=F*QETL3eNVL&IGXnN3ctE359%Pj+~=P=kl?+plQ?x-x>yqavc#mSIuQ9h`QLrZ+P1_F&;fPaD{>ph#)Bxk6aUT3R{h<^_yzCB6g?0O#nL9cTA@h=bCRJ3&(Pjam2 zCpGq`sK^ST4$ag%k-Zgd+{UVJBNxzeAcA((3H8=YkYT;Hgm_nZrP1vqThvLGo(DaN z$34UkWrT+%Q8kws_K2%60AA^2ZZXMHDytSuIp8({w@rjooUKs+@G>ZHIn*uEC>WMM z9@rX|b8_UmZ9405|CV5>16Y(&4d`<_30W9beMBQ6O7`O4GVb3h$d{A0N49aJbA->( zgF`VjWZsP#S&uv^7Re?Evw?OLcxj4uRlW35=yc-wbW@!M#H@N#Va3onT4|qskZADo z4Z2twxx3(}>N+vxtwOZJEFVM$1|qYvm{1$+9e2Wjgz)US`-4ph%+qNml~%pYw@wc(Gu-Rr<)N(mFXZxr`pH!vi-#B~e?N1Ol>E0_1AgO7^Ydi_gNk z6R4i)uedRLht#B-yhlwZRmQkeNOGp_&|&Dp-a8@mKzuHJvvTT8(KpU5K+YQjsfLZj z7jJQ(0*|>DZ0th@kHm0h-{Uri=PyaY(LS6Cyv>V5P#Hx}f9Cb``+5V;8vbqV!To zMjJvKV<0it`dzxlnYM=F-BDd(w-YOn%0U2HkG#7c(Dsl-Vx~HoZw%5HTKONXk=7GX zm+_G@@Ul@MAJ<1cp`7-jCP9EV7sK(YK@v65Ayde?sB4GbWL;FSRTz1g#gulw0)2*0 zRopq#|L5@CeSrVW4vm#I6c|0o?9bu;Nm=Esn|98m%z0zWGPB-&ZFe)(7_Evo{y?ef zKfYpy`Z|iH0YgCsv04C>Dar(4IlxyAV(sbeQ|{MPmE}3j_3?d6`3pAzL^3~b=_u~mo_QXGGv=%+^W&sDU?-DM^Hs>Z zcfva`5Ux5ft$GA2|NXTa$F-RbMN?N5XZ)cYp6+~;U^8LcKJIgbt9yKT-grA=ad?S6 zySa8GYbD4N4IYud{c+38oY~#jK^0@k*WNeoz(1=cWj@!YyghIjC0}=q>b-nzt#8Jc zo3Q+*R;*2p?%7h#b8A_?nIsQiX~eVPp4HzPVah>KcoT}XLNHJe@8c><45%7@W4+Uf zU}rAZ{Tc{?%`+<`m;qH^;iPRZD9=N@xCXKdQ@WkM zKx(vM_o+RVMH9wse1{LMoyRyxaw8^3X!}nv10*@Lybs^#u2-ZI_+0ptB zK5x?OD6geN+p6JvaK)&FBcR@aDL}W%axDDibDO#2&yvAYBzI#|>D6D$KxU&uEx8Uj z^LXa|w+MRtv%t|m@%^mc1XN+HJd2l8;p~bFh`G19jezi0tizlf{AZvk1RvQ#y2U@( zSYi8T;F2B6eQtF62p6(&4OKBfxJ9qk>uyiN5=T+OdEQ|wg@|?v3k26o8E9}qCH6Jv zey)XlV>xV*MK>hh1r8%Hf&^@642*i)Ku^s+u%91SEo~}fXtEcC;piFD-}gleML@qG zip<*bG^sljY%SoW7tFGlks;Q!5FlQ=KFb^{o*2XnggWVqn{)W^%PJb(xuFn)`EGxguA>w3 zsCxEpl}gldnSY|i?yL^LchpVPHz)G#`PT1XQ-o1R)Dg5HeTrLwR;WZe|4NiiU%nvO zRX@6bhX{1OT5}#0E?V*2`0YdNdCPu}ba&6L*g0Kdwsfyt%0*{dY zwmEj#VwE`8#;*Avx68m5oUU#HtqgzGQ-a^=q>(h6bu*yV;(c1(H2cidx90TS%FB*Z zAdOL}x}b&NwNEoOd0nZ4D(f>IRFKZHK&l>WjR79m0^oY+;rALs_<1Mf$Cg@JJaMw# zm~j7)zMp6hff!S>Fk3U2;=!Ra(eWnRXzA*zP{c4CPe4GD;p!>Xf~LSot(V($st`O~vJLEQEv2(B1XxB1BG+v{zUdl*9yj z2G3~`OQ4+v%1O4VO)d%ZuA*WLpV40wtD}y3d}El2z-nt<8gYRyK=2{ijif_{PGoBF zVffoLa8+ff5+aCau`TH0!>ialAv4Ucu!e>PUkEH0_@*bwk2dYZ4A>nVXSpX}ykWwk zi!IPDD=dWR&t6Fh_Hl0t1s_t4z7#M>6|PA4UL{KXMT(5e)%l%cvIF(4juR*LN*r69 z4>jX={fHQRPrDJO z=T90RS}0k0&KC8O;7Q(PU9`o}*5@i=aJ53w>36crKn7ZRlg_n$yHtP4E9^urZZzbZ z^+3@7CAhDp=7^A1WGPYF?_|-A{zw&qXkayia1>*;k{8wgHCk2L9}t%^!zg6|n`%A^ zVpW{2D^8ZggDW)AB-_JGQ&FUp4)$;DvEsfz`!whU)>%?LdMMQA!;x3?+{F&)Wf-kV z#;N;k=C1rnkXOh~LN5Euh=$TDeZ0|qo_Z1M)YaPJhMqqxUFKHHD~eZa$O*Bn%0gh? zMuN-<*nZ7xoe_1qeCEic@6K(aF9rbeWn{FK$YUUpb^M$J(lu1y80#(?6`b(mKn=e9 z)V{Ajf!U;U$vC4q@Z&{)NyO2)dX9-i*XG^GbkbD2r6j$#Ok9Hak>C~JNcAZ)S8$ko zI|YG7G;3#@{M-NceAB-iK5%C)E48qZ2JZ=i1r8m{*KD8J}y3_=T=7 zhOIoqT&oN$ic>>cbR#bCP;%T`R;sev;>P(u8_GGMCCfAR*J|TC&8#*=6EVj2aeFw0 zsZ!b8`eERevds~{8KN|)?>HIekD!jKb8W`400PdSlC${J@>lzHiB5_Cn|CX5{2T^2 zVN&)8#4c!znlF^E|8Om3sbRL5JfmDe7mg46LRGZm`;|UFz3>mi?rz#zx!M3KO2x;U zW^Vkx^|kOdA8krSda<)=GVQ3jQuC?J$O4BvjpVU?jTwE7n14wJ|JkI0B$eO@CYJbI z9S%ry#V+;pEwiUOyUn*?GbYipqoBEXPA8}{A%7P4>{MWX(F(_EidEW6F=@qD-G9^o z#YVY&RL>-xg~HOVov==4Y&ef7US}vx#%@RDVo_D0sueP(o8u}0<|*XIgDkCq8eZz9 z@T;L-9c#Ach(0?sZ7>6feW=Wd4vj)Jf?>&;z{Sb~X0WmTbkvA1Ekt;y?yp%>C?x}O zl3|u&X>8lPp&TW{bT`ete*_e%ADmpu6`MxW2X=9r!B=qw8BUc4_{)wE9k^EZw#S!)lyS~kV9(DChemn4v0 z=Ic@rQZW~dw`>W~bGXy#V<%Kn>G!S2a2Pn@a-4D}WhrElE`$&$RDuTWEc8DZ9s?6_ z&^~qUK8v6^_%<3E`QdCyR6)Dm7AyW{@xPjU*&xGji2_$9v~^RWJWaKMwtV@grZ?bE=W{iba**?HTz$`{4Zs%TM&=T;?cLsk7&$c+|1McUY^wTK&O7 zU*zvCIvS{75%;s9FME27fjmvtUhrlZXE?{BH9xD)jrmJrmEp#&EFU2z65mg2Z^7uew~5#q5qvk}ZV41YtnMy|l+##CHH}Tp`!1?CM9?sw8hq@azA7n?c^tF$@^{5= zOx>VyN-Evdq@iAB%CO%-mLoO@m5SQI$4f03*;d7;m<=aW{sl~63562ZTBQjTBVW&}P@V;BZ)3rbi8^&n?)-dyvFS)E&T z31zXL$%&@L{8ZlMW+*eyjOeaQ_a>;q>~AtL4gXkDI@-H z?+||CyuL+y@USW;&lNnL;WcR`MJ%Ud9S5-3MB&YM))m^ANn=5$At;dZXyVW;MQhaa zy4fqjX&S|~4sA9L?|Kgu4iDLrfCaui zMb0kpc9+nG~1bkg9C+=fN)H z#w{u5OodQLqxut+wu-C~t5X*C3!Kq)=mPsy=AHP}dK|xVI^eL@I^gL3f^dZe{aQzq zt2fvwTeSa5iu+;(0zDa-$~o4{qCmVVv(xi4Ob#8`O;p;~0%ci*> zp!ZTJ35N#hj2c)0#mPkHhp)4lyf))wy*8~Sh>hbn4|olLdv@Y zVi6tPWrG^|uJBl+ySqr{YxBZ=XTz|0r=ZGSRu4RE!E+&kR$8AkmGn_(U?q;+TNX4< z2R@viV52t0xx1pCwCv$Xn#GS0RF3F#aY-0~5$yv&+U;b6)`23kB-(n}QL?ghL`6ZC zh9mlURVXbNi?p-LxMIY{hh1?PqulIz6b~W3mZUw)tZlir^6M_GR0=0>Caxjx?(FR5 z)F|?*H-KbD2X2AqcIO{Ts14JcF2lr_w0l!!?K04+w^sLqdJlebhUli{i9q>3kJd-v z7*MznWLovSE&o{6p4KoQ@SD(>j$su>9o`RgffUsi>hFF|HA5oKzIz+T}#=F==G}?i^2lX}Sv+L4a~hs^@$5*%cf( zh6JhuF_n8tL8^LCL+}3qLe$@PqWLHs;>S4iYEBj#O)GfnaZ63qKEc+O1tAH$=XtXQ zHl>z~+l?V=T!`QElwJcDcNLHJh6_01Bas%sy?hJtni1uwmFo`UD&lvw;^P9R9`Zxd z>AZd_Y_w@fcVVpGeC=3+cw2&u2CK}YlY{<1k8h&OI5`L1lDa(Lfy8XdHkUc{ihnM{ zZ@0@m@YVRXf+Lx*90z$%Wz2H zB+cTycAhcFr79;CX0VXZ3h3Dr{Ko52RyQfLrvP9mYBhHv(HF?}B?-4a4frZ1b6tVs zmo?>H;*e%ObbcVE%Kxh{Id1;*AL8xu53NLA4Ff<)etnTNP#8Z!u!(}F=n`}eO!@5v zUXH`GNAgcKOb0P>J=P{nZFb-wF1nEVr z8O+w9Of6Lb=~4U^^&MLR>o${-iGo%m$e2A&-1iX?l{@-KWJ#lohwF5B4&SpnwPz-^ zlMYRmcCy*C@CGIEK8OteU6)I2;~0$6LB}0!$66{M#Hs zx!kt9mVA12pPC52PAP0S_ihIVPN;5uG%#)2^b&BpBhMkvp{bcHQu-#@-_^5? z(-iX%Za9W!51@dJDomVy2%J+21xse64?!wA36uIBUgPKw+z?@_W&*4MDGUMyUZ~MR zf~kjxtxf?f4oqBorC}~dWQ?KsI;W%qpJjRiKP1~=!ML2YTVP_14mF(_r7&B_-MgXE z2iXVO1cT^E)c)-0aQwOL#d+%lDii4i3W{BC39==Gg9%yau)?s{cNw`4NVBP_c5VY? z&z-3A@B+_zCV4(0*=S~GqR~78$P2_I%;9HxoIZXwBpocw zJIl+W@CjLhV%BnLZu!P7JHzw4t37Y`CzbUh5B{EpuRUboh{nDfpHgS`Z~Y9Scv1Uc zDRCkFT+@476E$;VIP1DFmF1*$1=t)|)p&;~iBqe-13g_n9A<>HN@ECV-vqscL)#6L ztc6=qRLLmgOHtCminL$*j(ij@VDKuU&$D^ay94H}MHdo%;tT0hKA~v&kx1Suq{8*b z$KEo#TDgItP*x{MY9xAcqTv~WlELu5h%pg|)J`n<4Ik!b)?2CBl&@kdI7ykwEz5=jL1Q|<-k)Gynj0v^ZfViHOZ$RgcK(ZtWtJcUn3O|Ta(+%0$Ng(X z;?E$#6Fy217mJxPNhXX!*soege_h1}*w9k&8oKmOk34;*-N|<+Ob_Y}`w$LH@(Vr} zqa((I3L5D`>{<3*mP(@xsO4o2H!}NgbGQq~!Up~heB7&RcKMvc;59n)8I5+ZZ33eV zrZC`eC|40u4)Yp?Qcw?ZY4N`NhDKkEn-R@!XX~JIsf(b(J!zV z|E)mvEyWj-k=3%>@{4PvY!y2lN}i$9SPRAOdRNgtbRdPB<6b~$OSg>eZbHeelTeAf z^peeB3v8Vs|0owa1axQ72G76^)zi|F60~I&LKj;p%@s=FQzVdf!5?TNpi$}i+V`i! zkbCAKz&cJ>2ZrD0&FyUNDusUHKS@lsYOnIZ%WFGJaF>}q4pauLr+tzD2hw21>Y!$dy^!U+ZhvYW)A^#Xeg?3K`!CMe>bP%q=U|`gpW`#3@96(ioew< zS1gl%w}F=b;Mr~0{$mJ3>_PEYOJ(ZTztCns4GZUiShs9aq$evP?at1tB6u>WAlI@< z&VJtVLcGA|8dvhv@#}s@_qLzJ7S3vXmOrybp1vP7S)Q}n*?JJ?XF~^--1)gT`s=Z2 zIPEk46k%He33*9eIJ|pPH>7NRrMF$u$JC&lhI>r147L4hYP8089uD5^`l?+Zeq{hv zel)~^k6<@H!M}WsES6A%z7Obv-Sl3U*l*YzcYhrqdiFEe@O<0~e1^7Pkx;>7TG?T5 ze0_I`)h(zWh#JU2p#j@7km_y$E&XB$> zNz0jg>?A_=n^8D|2OI>n{&5EO)Ya_Vf_ty<+btjpM@ub-h|@q#n^zEXdQ;YWJ}LfP zipW@V-n{s5by6E`F)cg0?|m#PFu7WM#d%^cq%^UzF2jF*^SY?a+t1iZJflKP7aNSh z{M0PaZriS9TFA^mplCjBt-Q!I)nHFx;SfL~HWe3%r|`8}mah;O3_@`d1DB}NWYeBo zQdjGogv;+;n~N?)utJ%zv*g$QtFS1N?mS&eXKcP>cDy!E1R2%Kz_WaS6D?uD{`eq& z{GhL)FIcTdZl(1u&CAa83rVY)4h%l{G^l^U+MG;pOg?AU4U{0OYmQtRy-l)5UTw9s zOonmqVYY3C+3al5S%%_|WccQHDF7~juOmC>_BUc}=a(YI%y)EGCz{cJA&S8^Y9iuh zhCPc+Sn)tUdxejpiT~=sWE{iogO5*+F0NlBVFY~BRQ%?t4AyPhH#Xd&3NQ8{pa9ZN zU-jbm?))98k{jH13A{~=7(um;uIRQ+v#f;vxIKhnGA?vq8t=LgxWq^sS!J0`;%Nw8 zOGKY`Ka@m4cOLOomwQCoN9}SchNac&pqled6E+-9G$j`9kNqEos(6^0)AEGgAif6l z_uts2ZnV~=q4q660-_;tWL8T_&h38<3_BOC6%;I0Feg`4O>ckNCwk?ohXlt$=DFvOUoqn)^Q?J3CqK@rRLiLn_}X00ZsLT)l5D9>R0M=N4w<( z?u7FJnbH812jz{oO^xwq_M6)N)}bGYG$d)U;81Mo0M!&0Zs6K3OkjNQzJ;+}W`ldI z;pZ<*WogZIX-~Aad8D>?c-r_C!K<-$B`(XZa|=pc)xZcCrpK@a5@p2vtHpO3&pG6b zie>6GHMl1>ogV^S1Xkd@!}GOf__Ts#a%g<=8M`+BmOFR{>b z+qHIvA+rfml+D9-PKCLW_(SSOWSc`+;X1PsU0+ZOr3M{*H%if{GrF?3pNfffz#yUQ z$BGU=avr4T|M&DYeYep^Xgpi4aX}NQ>5REou|82fzGFli_uUE@rRk|V#(g>GxIqnr z#qxTEc4SuNWlc7~rnhz(aA$6tha|IDv*?r#1|M_- z@`6!<>hkTKf$5TyuE&qrKdLaZ5WE5njd$vWZ)s$_d_2k2Fa1qwu*s7Nk10jSrLu4NiC=#`>* zT!oUf-sy@Q$b>RB=+LHk8k%YWu_Z|%VozE{Nl1oqMcgcac#plmy%KMHb)l->zo8gS zE1<_WJaJNwk44gd{lM@Hqb2}~+^a;Mw#=)*q^FgQk}6!k<0JuWbJKkt6Eu$?-Y zp^b2VTq%HW#gXa~zbj9~dQ14O;rUIjd(2+OP4y55LhJ^j$HqwObksqtL>;&kpSe3^ zrHr?yK*9u&%DE!Yf#cB9C9V9k0}VM1xS(lW6G|qNx~pNfHC|x_!M7l~vT-Y6P37;OqXw!eN_M|B7gE?YF#GD^u~i*LcK8akmU zBU@GAGDj$;#g-JS(@v->P?t_5Ubo_lx3&>iic8lKOI9}PA1B@BGL8|PH64&V$Cg`z z?0m^=dC4ezHPIY-z5`%<7rhH9x@YmwrbSknCe@Jh@KhwujLLCU|6#CUahTmQy%Qt} ziZaQCiuuMfbBAi=RW21U}+$3r`?P4{}o~ zBiFvv2yf5&o4CtPvO=)CzRKS}PFzR~)45G%a1{Ra(XsE?$Cb7l&q?=8bCbTwpJa+f zk`vsd)l@^erLB;=q`3L<54_(ABBI#0Ogdk$r7hkKI50${}5x z6icmW>1D4B?U7fF>P=3-ZO-4^dV5VOz6nJIfSRTvrRK(jCqdUon&Chsl^M_ke^dXiNj&nF4kJ3aa2F!!3oRm-arb#J?5s_iDSv z=3@lG{r4}if5}E2Gl@t*RKq}7L-UjYLDpueHxcMAN3j~GrXD8D{nRARuOWImchsFT zDq(^1&jNoZk%oG0;TP3{ZlIbropfAp&&E+8Tq`kRbhHHKFDKWdp8h&_S-0$$O!v#y zIcJr&1ra0(v0uw&R2TRy6Yf$+$@iC@Iph`>+PTwYUt1{+9!=mL1J3b2Gt?qMzP^NA zrGyPZ&-Z*F`wTDli)ID+?&}W44KqXf23c!l!qizCHFmz+P3Jx*(7+=1pZc=Q&M8vT zu)a@mKoUE4ix^%evJ=4-s4A-mt>*Mi5ldSOrk<9Wf`)2$Crp|;tH_(UV*5mkIGrS< zeM=?D$pT>Yw=d7_g2bdNsY>o!k8oS$N<8Wn2G7$7dhr`JVW6%h?4^Tz_^vjcR~h2& zugDY(??8Wz7fbHwT*h;dscp&#o5^qL?ceO21x>x}?d4gr+zv+|At4Q*x0)X^JErvM zPT5lCSSlY7RO5%rjR5+0ZNameCaULpyUsFFzq(*vSk#{rl5<(I?>5UmnL`t|ps2)p z@>#;8^4BYg_sme5f+%sgVCLldX8^14->KnuyL+-1_2z8;5&I##N6)(>8abt$ymF^x#VN9wMTiQn?Qo@FB>7V+(|S-oH`v6K0X_(;iyyRK zv+?tq7<#NG$QE0M4Q*E-=6lFOB4{n_WgV6Ve_kTZyhg1u1-KDd&M9^veLjYh=jg={ zS8+T_!v{6{WA-@eOY#mIEJ`nGtqM31e>lgBEotfeb6}bidQW z%$KI;{XNc`b&4^;eWE%FI>t2YL89E3b{INq%X8EiT#d0QM$UJKG4@jZw>6?=sRpQ+r@Ym;Pa6~$qMQ1b<4qVO<9I7= z(NMz+BoFtuv4#l6$~O0rVqlMGpoaFWWLYPH>sWvPOHfLCMsR6O7<7-btZxvdNUs536@O zH4&5bO-Z+jy|3=$&BH%8ACg$OFVsQhIlzd7+{w2G&p&=JHs3?Z)mL;cTfU_3iCnLJ zQ@;#g&#t?S!7{A#_vaIiMGpZ$PLAT&U=oUZ9GUxo8alKPGaX=uBew{ns(?3isuihv zT5m7K+%y2aBtUN{HY?e^G6PyCu6i7ng+46b+ZE6T?|~+&3hp!i9iwC7N8fFJrg=f9 zyMoK$Qxq7K>rs3Zd+u4e zF^dDPb_H+gEj)5-VYDW6Q>X;d@<>Tdi_NaZ0$fK5vFmIlA+iJ4(SZLbW2OSE=XF^k}(tM*vfxqv$Mb#hNyEx^|lSn{^;6B!l8>k6Xq2owB;FTAlAI zv>+}3T3REheyi@_5A8I6M~+d%9_J89BQehs;D{X@hj2xEMj+71UpnpNZRG2)eL7~# zOeze`59Zw{ix9~6jnY}4r@1&MzK3Pw`YYN#Mjn-=B2my>V|Z&|LZ?1Xa`so6i%bZ_ z0zM7I#VFI`3!dU^dn4350{uHLP1xlYNEUGe)IhWP7SW^QL<^QXrHECd#nRJGA{v;D z5ZQJrS@wIcw$BdA>*!UgfAIMBX8?)CgF#VtLn7e1s3qw|Jm%Q(r~GYdhg{VXY$@- z*vQIvI;NDHWv!1e3sFEpyd1_QET=U00o(TDyiD=nw4+yG2rjBq!{XxcOR59}Lv>WUZuJr0gfdtk z^%*_;+ZqZq18jPZL#zNv!H$aLxe&ZZ_cn6Al6c(K)r(%Kq~;bepw9r-`o?#{^vE6c zsB~rIUG@W|s7#`ZkfgY{PFJMbcv(d159LfTzOTy3TIMZbC3#G)<8g>nEOka!4Zn1q zBSL*nFMHK@snX{8g4r;sc2<+r3-$K~(D1#PpYQX-ns?3eVDjo{{7_+|(R?5+ zT}hEaZd^QL`#m95=hdG>6C=C3N=-I1|HwI@hS+bSNG7Gr+LA0iI%)SJ(3OEEu^o`E zz+~bN5?(>3BAneL&VCHQFw<#(_I-fiRuf`rMiTC5N5SFbLzcLEQHC3yi_}z6iNW^X zv=FyL(<{MTO(t##WBohW>o8tpD`Vc}MSz7XJr91ycM37-E#-MM~xCk*iw_(!y1 zL|eng;YZ;Dsx?6D_u2ED!lVQLC_21gZ?s5N@IZ=oTYf>oP3%;`z5SP{d6I)o7*$hK zM722yo?=%m|BDf7s>zUem@-b)P(nc~q7>)dG5bB6?Kd)}MK>brl_$b7>n;AU0SdI{ z27Utqa}8&TdwTo>PIi2AmgzEK4`V+lfDb@*7B|}_Mpl$=GcYqa(3fxmszdHYRXS10vW*v*@w)PTikBwAtDr6qektYo}`jV zCbpp&_5JxVDkco^CTd%GD>SgnO_BC=MN=ds+^~sWhj`&5<+Gn&<2}9l79%q_>vVH! z^T+#?zbw z;rrF0;OfA{&#{5+oa{V)La8u4)Y_Mq&DbZ8o%Lv&JcfrsNy*@ni=OE(dzNzi2;U|U zRB_z~WIode!!ah=NanE>WcoFD$(^w_@Ms%0W-5lh&saywsDRxH_Snx^cM#^4i1^4y+ z@3ZiKDpu%d={9+rOP2*QQ|a9f-E4Y0LMgeHsIJ?IkW(7phn4ocsG%1#XG{&BF?_rn z8s{K`IJGm3ApkXtnPV5SwXElsfm} zPdbbDnl1Nmi?@M>l--PzH&q7;H+O!@=~L`yrV8RUWYM?0&0|<-ez?Q1G--S?4%Qbx zGwi9@yuc1dK#TGC*J@KRT3xe?Lyq>NVyF@ z5ko?hVJals~NtpVQm)ux)I{Wqo23(wyT|< z9mj52#aJ7}n6vzRS5Vs?J>AdTlbwsc)wo93<)6Do5Sakvs%7d3QBh?F6lQKx!*8upj)187;obHQa%~S@`mh4b zCEVr|HONP}2Nc2{E|U!vOdK!JPI>ke(0$L#bV_uZ@kb^O{8bsoIPmh!8`t08U-=T< zfH8io=FvpB`FljXpeQZoZ2lSc18J9ChAR2_o$zaVIF=c|$a*L%5n;@|tTin2s`zqX z;2YW(njs~-*h?A+5XpoJ4s9Lr*6!q@oZ7Kb{RK{>eF~Z!y82FK12+NOqiRn;u-)Ezh6aArZ;^fT-nzhh;Z`u7JwWIj6Ep z)7SK6`r81Pz4V*k;W~bt5#hBA^ea&F@NdX!2q3i~#rwJx=`=30L675^nQp z65BTa_7vr@0)PB;@lWRxtCLdLZo&#CCLXHGzeYaHI?ORq!n%cf7x=hrb`0-ouF&0ALJUicbzW|2RT9_)^p{u}o&A-!Wc^;1BCP#T;F#&`Ud^K)OK$JgI%l=z; z>F;vP5NmUs|11n0tou@Uhb5y}6Bu4>JaRZVe}J8O!x#EC)T~g(jpq|kC{dwALdMIc zTLNRzkOq&PGdmo9j*(fE0UM;+)ZNM~1P(*%(DU}WwD%vuv?!&{9kY{C%9O#E_Y6ZP zBJk0EH#W?I!lTSTMPtd9ahTxQb=JKgNqAv!Osn+7u+EmmC83FZTO$j3=2Hx2$}1*b zD`~MzGqE(j)m$|fuLzHJ%>{7K4r6^$%R@T$Yx<=jg_W2_9ZHaNZOW+r?eogr!Zle>tBHHc0E(tw-k{XZ%A2!bo*jMyMAQXd0aBpVBlsiMbBHk$)Wx)cg;uHVz6y{ zJr_{F@(c}Z`>Ayq3VwRmtzp!l?VGsM{WHw%H;Qz~@3)spkEc92!tTK!E9E8mQ*SE( z+|7-lUVOs8Kp!=6G_5L9<}gPFC7Ry<*_fyo4$_*(UUcjsD)s6U<$U>~xlrqVy3!LO zC?(rL?~TXB)T*&4eRzzHbuMKmC&p*%*3ttnkQzj&WGnW{0IE+sxY%ZnUKJuAJj zsv_L<1u~$sM?M&;O?Ivf&Dtgv*Neg+_2t|ZANosyY^#x+o2xTs3Vv1Z43zFw-4A_Z z1gF{_V7&exRf?2M@|HfKD`=?MLGRZMWo zo?E>{oPg+#sGTUzQH9V78G6uh){=FSuZnLj)Se5f7j1DMD>x@mimf&d>OwALIP+ZF zeE#zb<|>sE5V^Sul76=16tCamQh(OA2QyKT+Kr7!`nw!40MUpfsNWEdLm z;i42ZJWRX+lzx;&N2E$t3IM$zoVoY8U&Gx72pP-LA@eX9S(l4hbpIMSCO9*?@3s+S zTKZ8Zs$11~ZnpS)vce=&bTRyMbY!fYpTCkPkRujS2xoUEp$TsO!4vDf2cLyx6yHX^ zxt;IzO|*#X$gb#1pr&$4O8l~G!v3*gdG3|}IW7~h+*g=tc!JsNfr2#?E1w-;Ejk$& zE!+IN?2de2ABeGTJr4hYi7BE{-I$euOX&@|Ei6#Ih#THJo>1$xc+(VKYlG1O8e zXi@ZN@x1a*SfY;Vx3Yzwv~2D%iRiM)XG>*7Ej7T7<; zPQ@n{`jH3<8^M{GTu(+>Rbagou}>CF0^=6J5f(MMsu55n2z+N*+BO4J zKBj1u+CeHUk6TUtuGte8dq?>MjNk4`{~=M?_+vVHOu`OadVuH8><21N!%=rGXi(mh zJ^sjFw;h(a@K7`>A=9z!%eS2RUnv)aw4aBqsUmTxgr+@B`LCs2>Mq`#)BY{*po(L$ zv*a@q=R>L&mgo=7-;1VtmSpl95kC(xOM9`la+eJrzrXdMaw%rD1dhf59}vAqSF%|< zm`9&<)B?zBlJuni+VBn^mIerH{yY|gcnuOdzG_~-J<^7EAc4$qVfrHw>BlE80-mr@ zs_i{nFdX~F@Qtk~mgK<*X_o#KIhP6p83vlM=HfRWxeOIhlq*I9h6aGtO#6BS1s=s~ zpPCpF;yFSpf=S;ERgG?m&vA1;Py3HD7dE#ZjFqg&9m{|o^a5^M=p2DOCvtwijPNy+ zs)ie+3+DT)f+L;moEPtj4NsKJ0vJ^@c~T0cxcHHqUl<*hu-p?42g2FKew3gPg7mRH zTxgM!G3Y9}+g9?4k^zl_`B~X9Jch(SK9IEJvic&#MpQrdhl*v#v05{+&`&aa2OJhD z8h`-Rqfu0#GerjAGcH0t!j=4Q^b&vKQxk9QT{qp8SD@$F--epir9=l(S~~oCadt+dBs8s?Q=z zfj}oCDT{X@D5XSXO1rxsRADhz^-~!v2vTfaaA* zp3GG)BU(pla~JQzB}~!okXrZLcqbXKf+Y*yE%@I{&^B4d2K_zFF@BaMRu0m2EuxPm z?zqr)vZbv3PG~%PIHdCK^9tya94V{$k@Q+~Q|FqD&RiY77&I$bs_6pEz_4b2JWOlV+mqJ--Z3P95ZZD9RiIy3BTKJX;2)X z9bR4JJW`{=V5Kx5e);*46VV|;w3$9CMqDmO6E)Wp?ZPL4C>eRmkE-x^&2797#YtM2 zF4^mYA2OcbBV4a4%^@JRjvR7ju8Tv49Wb8?m0B;xp-b{SNK*$p;~}TA^1Jb{lUS z8VxmU;esTk(f2+hCZ8gw{M3;yG4Ev%QpjTvDf(>xLW70W{e78w=|>~d$o`MIf3Fy! zia_ZqrzIH}9~6Aoia+3^8cnHwI~GOi%#%k{IjNC(;mdUbnOZH^V}M)K%S=az9qgbn z2AXDb`u|=N-o@W`g97%y%H9gUAkiDUx^W(VOujZv2w!tYWB~`6{2BS4q2*D?R*1eo z;o%W9eyh2O;NHg{j*)F6j;>nC)^NiH*L)e(A66;E0QYMrPCv-GM}D{+QrZs>crH1o zE%j+}9t&Ink>qNMwF4fOCPvgx=AeOCx23fGYzbWP9DIXr6fS5_yLss` zS?a)za}c?BS6`Kghj?u=Y!e78Jdtf6cpXEDJ!C=iA1jwK!3dXqK#=T%F`(R=z%}Bi zaJ}@$e4rIp%`iLi9ieDk;X83-dy4CcQ|8W;JFM=G&Lck?QUj7L(lbkk(m286&)GbRe>=b z%J8}(MxByW<-EeZRBB`{?D66tcAf9vJS$AYpfmZh0|eOpv_$@;J)o4-)js2^xa#a< zpAe=k0NUjmP|vio!VtMok!O0S@;u|#2pbCiNT3V0MwSxtU90`sU*uG7?BWr`g_9ak za71upEzl(8(B{&w!~Sb5{>JjZ5=5HxM&AY0cBoNm(cw}%KWb*f4UP=)OH5LGYzJRJbnuEd!aQA z-gdn^){{3YWjO8XPPhuQ3D-11VnZAAztt8Q#RaPr6xgPE#yV0* z*-0NC<7rC_4%4E24_awPEY)<#ZQ;`2MDfa2J((L^B_c666Rd)8_1489(ZAEWZ(mOF z#);bac0bzkp7hRiw?$a(DKRpCQUueN8?g?A9T4W;ODdL}M>xUM(vZ>s2vBK@XXcPT zEY6?8wL{&-F;&!!4e%+Aue_!3Z(gmzG1HF9M}i$Tf2gs1iqz%=Tl}JYyF~IBC)?bs zL7>mgAl;DSv=e=wI+y z0DvlkY6DTdX-EL=?yR9meqt(ASrCWfe)Y~hc7@$>bE{Jv{0};r0~ZsDZ0NF9agOhZ zQXsJK`A7wHH55nVYma$CS%ph)D3DM={QP#6LwL*Ql~o^7`eP!r3C3jZ&o3X{Zg2hT zLx5nx&~Eem(Qh%lNMKDBZzf0eVwPoLEMjrZ-I*uTd*GxM^j?CDa3^g?c$6P~PD1R@J3sLX|Ek2gW0VlRo#=*` zE$?f+6cj*681L7e?=-CVT|F&v+y=yxW{oSCDt2m*4Tex&)-Jmy{;BZ~(k99-=oepK zyA-@8D%e+s=&2o|g-4HRbNMvJ@QVE+AJy8NTJeaZ*}O zBk+b8s8(qnJXH4P7<46J*lpp6YN}>P3d|4@^UOlgl&lEFe*aoMafF*6u zrp6ya3B6iM{SnlgR5fb9N`M2JzEZi+tbg#gw!GIQGcOvpI_I09<7AP&K$sGvoaa*i zBGVC53{RDb&7zb*I=NcQ4-TY&{2YrFYSrv zV(%Kj8@Q4YzkwTphsD=pt_KEE3z`zrj&_M}c>m;o!!vApyg)=|HJ2;U_ENbDdLQ8^ z=QrHP-AFN|E+Uw}oSVH(12O%yz%yPXXb5xy4uYsx&e-6oS5R^7OaTZQwTI`w$y8Qi zX#8A76+qt-^i_=MOp)PJUigTzzz=A?DnxGJA+$NVD@E?odz+LNa9{ZLbso(mrITRM z%%q}Nlb#yU(*N*49MZlAqYu}@V{t)Y7IH}pc1k*qQ)ljmqm@40fx|>`Zn)W^U&erz zyPa)XVnIS7RSjB*#A58Vh#k?@Aw-xq5VzX!{F$aPFMX!pWDbBB~vL6E|R# zPX4*sgOhM>@wV0)M%n;Y2uf>i%W%6B_dZ@lES`jc8sJ81LNWN=eJCdCJ{+g8_!tokw~NYThNbILHm@>NNpI9-fCGzwAY@X47F9nDgfwK%P61opVg{ zl`Vp1#|mjf$d_8=d1M!`0m-J9d<-5%;qj?V`fsSKQj?34YTxNxX&z4im;#~Ohl-HY z%(Qoi4?Km{U>!+W?yi#M*@Q(Q;7KlCy&bIHP|n40-_+lo90P+bS`++C)bg;9M62IFX&Rj%)M$eD@i9 z^DF@@s3$d~j|1n;IPk=ktO_}E-Nc=6dJ;b2?Tt$ksq>+r!T-m(CUbN&sn2_zre05R zzNVfoj*zjH=~T7?xYQ&?PbAc>LV!(bP-(`im!Lb3J=s(ORC5wAPBPHaym5Qgt2R!7 zilQz(STo%u$_D;O7$(HufFbbP5~E%auzRiF?(zR%QMm=1su!ssmhEOpG>5sBteaJd zMssiGLmrIXhW<9$2A%d^At*Iych5y6Jr>+3!51=8n|2!BNgh~?ip-jrwv0iRkXm$v zX6zAnEla5^L1Y_l%;Y_fgBTP7(5T+4G_7s0)kc_Rqps_(WumbDbt_6Huq({cPq7ks zMsc&f4P`=r-S}7g%e2cK_>C&yXBCa4Uby-Q;hSM2Gan!^`YodWU@#$k)J|IaV6NH0 zhcXZ_nnga^bpVwUjL{v!l0Z15AaOu%}tahkl^K@hOTGD ziVf9UvDrJ+K2Y{+kZgbVn8%%?PB;CYmszG0D=>(#m*c3k%0N#ww0Eq1Rw$JuVeSXN z5yN*<5H`eDGbpJJ$41kw#L6sJcW3X`ihxLN#DcOsD9NAHY*nO`eD?y3&YrapROnxp z!mvlF)S$rm^UCgj}qk%fSC~LNCGt%5PAPGwjzDaQ`As8H5mlXFUv_26QK8 z5Yc^lx#IEPn>18A;FrO=+0q4tI$eiHuzm^zBP9G)Na}rIZe3iC4nwo#FVvyXd{M+S zZ=o!*N;b(m*X+A@IREnY8edC%c6N66s-hU%-8zWTU&!UAy~-CjlOeVuhdit>;;OYA z21vd6D>VqH9qwJ7Wa<}h8A~`%NE7H)zf^B-_2~IBglo$e3haA`!fzU-!x_DtBs~_F za%I%>UuZek#hJ1-^eY(n9sp!5(65cy_(PGVmeNVM(PF*!%h$*ONf}4Ailli97R3zZ(oBT$c-ki%HWYE{bfE_Ca zV>jFtZJX6|-{yzi>}{PZ8hZC{v&3kt_gu#ujaid+&7f`_NXmQvCeyt46zwwhGOVZt zCA?Ce&M+Mj)Vbw_QV|bK{(s_6{`W}W2ymAQD$wcSv5im=P0&vmt!gW6?KY(Y-R1YP z$yf(2ba9qlnG@y%&ji%P!2bU(Fv_Bo%i8nN%akld*ftkfh9aEXI`My*gD9TpV&6jE z<;jBowsK;be#Mdm=AkN>?R-9-Vk7iFO9&97JPK~>QB7YCv>2fizyn-#UwX3P{RR8w zp`?7l)k3tbV!mB;R;II=LL1}M+AG~(#(&_@RxYJB4v9*w(VaPRWb?2ArRW~G#~GjD zA|?uT#vS@rV&hjd7$#=TTn5Lb;&E{1dfU0)VJHbaj>Ht_e$?i)qT>&MoP6kif>B(- zQTqR|wj}!MV;uB4+8U))5q?gG9}@!qVem@+^EzA)%Sh{##AGpA+WSz3?$xG&*OYAo z0@R^B_y11C+5Gn@zvOO`FA$HdO!TcJ!+a2oO35Z)%vcTfrmdXC6>Nar1181^W)o4y z1i!ZqCw+Q*?!tQz)tFpn*LUlJbIO?DbI%~0wFf0OcVJo!q5sY)G9lJe@Nfrf$zU?? zC(cqiC)b)-UERj#oJ5#)Jy6BisKQWE^hECi$h6W`)iw^P4I$*pWOmC`Tkf~4*O4uG znps9=egH+|=a-MD3vd!;Ht%N~Yx;9B_xtw+jYBIohFQ9M@Tn^e+X3Ru=ORh!+);Mu zDG?5z<`iy%2|rb&Ygi$%ww82^!&*RT7^X_pvJ5FZ%|Z-yOB$8iJE;Vmt3F|l0hcsI z-03CH(4>by$(hgm78=$mORQ#_0hm!5R;0bzM7+})*yCQ~9X+L{l2s9**$0J@XgESt zF_=U{mc4MlMmiO(zd&O_4Nn(SGmS(=u&^TiedQBC!)y*DF0EQ|2}ZvoO`F*&pk`kn zd#_*7sZ&h4RF`pxrg4Dk0RDSzi2ju!i_y zQ~xa1yWV2WkZxn871i4`4=uA)32ijf(6W+mB!6U3|N6v+#_a*YdS?{8y)S`;g2-uu z(xX`A%2)*mOY^A_2@<6m5LImy zT1Qvl$+e79LuA)I2Tl7Zl}dLqn*TX6d{X39#A+AGn!f?giJXz_QfMlJAvb_<2Z|Lx z??IAV4JBiD(Ke0PObwz(ODvxFxQpL1)V2^s(sNEaRM^^CETzF?IT{-q+EO+YlsF~Y z>m7z;VPJ<>6&|Ht=u?AqGD#byYdgQ5aKu-FfBdkTPIFE-QFiyt9%@b+b-Xf$&cfkK z9e;1IUg-0j2n5#{ZVC`|1xzEnQBQ{e?4&8PlF_rh8)SUW1fAGvgIdEHlQpbrti6GO zpCtLt_-@0xzd#B%P#OIRjzMr}bGbS9i*0SU_`V0!P%<9&Yp&Lo*&eAvP>acVH=^8X zO&AU3>jFZH01UzaI5y`V`v;!iYT1re5B8`3qx64O;+%#zBsmIbY)h5RT&W5fq^f=} z1pNPVin!JFTSDgyjBFj7udw>T{55hPM8NoF>y2Cac1}^{oD!^SWEBl5WQjoVi*9N2 zu)H(Qir_EFzitW5=Ixa|8aMu(1CCwiCrG-wfC$=sBWOsv8iHHA;^M6J3qV75^a9mW zZ_Mx_UQDM!C3Q)+l#HoV1Hngc1Ww_M!TKk zRb(|^yL^-1(}^{W);qdlcTeja7o|Lo{R5@0_5QKb9GPDA*CJz?ewKySO4GhOj!10w z(1FXaD5he#*X8Qf*kN!y+{BAj>r$S9A7(@aIa!;Ps(rD9PJP4j0^`nF=qma(mAr)fsFR!nc58dvdI^T@nRmL zaItVCA?Ve1#IURmAkZ>4{D|(t(~Cbs;1oE+^?gdtNM9Kt$&I*~wtye#E&ki$mf(zO zbX(8laVFV$vofYiCoF(#5DAE2pBZl^1_jj}CDdb2}y0k4l$$~vdIvmE%Pe4oCsJ{`@`A0N@gaO>ohqdb0OE4-UVt~s3 z!Gk8w;Z#w!E7w12({)&LfU3C2rf$H5G4FyO6(@No^brzuAZDR3uO*|ec7Oxc*SDg# z$N^bW!=jz?(lVh0UKe%3V&hU835Xbg%McE*?5s#$g~DI+x4*J}$*x{~gr8Af3qf=9 zWQeUl>Dh;~lk5g1>O+2U6U52Du9f2^(~2k!i)k3%mc@3I>!-Oa1ts zEe;V1W00l7pCU&jQR><&=7R*^yv2;XERiSU-(;o;!ED_63Tg!a!vZ4cbK8j^D`{uP z`5577fmGenX~N<3c zN9#%T{3pfT3yjNR3T!=`F1cBnW@_(CZ5o-oHxt-l^ldH@mKv;Mt(>X=XEgV-s?Y|$ zOL*km{QZaGjS8jsv}tafJu3zj4fgEH!0zNZBIXcg3>g-Ch*1@olFHSi_OeU;#aecM z%JQ*Qiy0gZ8BV{a`9ABKQ}Pt+6>Wh-AxYB?qvmwTaFLrDXu0Adya0R)F%C32T^k((%=xv&v(X54F6#!Q!t#P z0I<(U6MH2_?#o^dLqZ{Q1k`4gL3Na_aJWc(EJseGiCJD(f-4VJyUE-H8Mg?@-R&ZO z$p>MJ8zI+QGN>A+eY=I~&M7r2{(q2n2SQ(79dm&;jq&+2X6?|wAp+v zE7gQ*#HP(L^?nWdJ%ujMb^y-*5#zxA&#x>#ky2G|#GQt$0jE`FTtREatc zL%e@RrKYTOoKdAK-cp)CwO4q-jVEru_uQ{OvmGclyP)j;*d88VE2Gi@Nx^j6rThyuh|Cxm73%-T@zqMXf0re2++^A^+l<7x!IB= z7vEc_*xyJAkjF}|G%m#e2~?FE&6kZEei80!FONsN2*4VCx4CVJpiGrsjwtJfA>((FX_+i`kfoUiee}Wic@MKV zU>$roKi}X4tI9aY@>1ZN78D738q&W(axeL;ik;+sbh0J`XFpPkxhKoUY|#`d6ei$h z1vnm!T{^`O<}-Q@<+@WqEZj8L!hgW>2l>>Y8_t(xynlH-l$fO~c|xOwt-au~e$)Op z@d%=ZcPoZtWh3+#oE1@KK^Ci%HwayT+dD)`86hJ_&|q@2-SWg}5<$jRO^oT7Yk6^J zC~5%dt0JpB<~&M7|E)(mqMeCOQbK|CK?gEVQLNsdu)H5-V2KF2M~8ye_kvcSM6LDC z@FNuE=?kZ$?_26RU8<8qjp5rFl+Fjrz^nbgu#yU-wh*q2r#gI9;bqF}okw_0?h_JY z(%d;B97-fU4e0V+QAUGJ`v;p-WrjE}0~;-QGq2E{f2Yyf){Jmn6+TD!a%`Gb#41te zcfbBiqY54X9IFal7ggS>bf|Z3mOecuKR>;ef!4GLUg;&jsD(DmZg49Xu^ z?(?Y!<0?C@6QG?I@8^LbKCcurgRkKL?Kq3*>|3b*n_xQ zsU&yv7(9E*HhdYw;0NMD%J11`%ZTY8u^AC}J7I(^dj6gokeu<)Xt}NUv$U5v={2Y5 zs!@(0I#)veDg=+!_v}dGXvncG{t5jO+VO1sg4d1(x}1V2Z+_o7TJof1DcAtVY_-uL zgL&zoRC`59>qUD~c8MEXX&`dy=y=l$WSQv#@T8E<25f;qvf|V9IF?QncT(-2y`^1C zLgzROrrtZdkwaNVwXRDE2->NKO?|OaD!!Bwt4T#mM?{JrGxN+=LW{to_dBP`XDK-J(h8@&}GU&;)|vSUwiM#kz*l(LC_2O9P+2<@iY z4+Wtn^~tGt(7uO%97&a%zKtqeL5Ka0OJXem#^)JwBr>Z2p~$-GxnvaSD3E-?dn)W&(Z}-su0u-438s zH&=geT5LQYKc6V<<}SIUp8mu292sKDW^6Zv{@1V1E?Xzx zN~<^?^|T1>G`JyW@K^}#-JcnfmQZ};t()-GJBU@Iuh4`z!7?P)1A0Ed7X#LTh(pqk zLX}ARE>v+@!!ok0V}7_G)6XocdP0vI61>c*#cxdawZ>r@hl&bqz{(QP$C6!{OSP@J z7bX<{1}x}uhsTsu<+}Eq9i5p-#lPyXFuqiPUNc?~!js^Sv0)TiqL=0QHooc&o4|an z9AxuYJY>#C*k}_|X@rqd+Gp##i9e<-+3|UC(?;Z;x3KeGpNLXTlFWfRgOZQ2ESNGh z)xad&zQ%t`>Roh^gLRv^kTbGU6rL!(y=$pPVaP-}6tKwlj258T2nS*{SMRqD_SPWz z<=`JTm}VbR960GB7&?8^tfee@li5iOJMv}Cw3|0a2cT=w5rwe`B>&L3_RdG@e0t`btHp$m;@y|xku5ic5HTg^hJv{W*9eU(5=^C$Tqhxog-<2%m$#IW4uF!uG*ca8C#bu zR}6Y8kHD_C7yg*$se&zXYt#i{hy46mY~K?9@8nc%pNrR!sXy}zSLlajBwXAptSUQ1 zRW+qs4#eNFT?2`k6Wus196_p{oMC+++C2M3WkO?_th+tlmIs{FmX%5!$|L1A zg;Vn%WW~VBiN&3-v1ei$pYZHhpjnp`rPc@r7GG7jR>Z#%PDC{4`*T&%YV0B|GB1AG zHjS1nyWe__y}7d~Z{=C+s8GyWy9He>1jVYjv)R+;;d_C|E|XSo%BCtozbj#ck~W3m zVMAom#y zfZX+v_?1mJe(Ck(;{L3>c@tgDk0>!luh>TB%$wLC!P3_EsFD4II^{vJC9hg9odviI znoE0FtFGW5PiAW35Y!_ND76&l71)!i=6@moeili`;>(70R(?$Ru--J&dS0MVj7kUR z@zM6dx6yGcjo_g<9isq5POZ4AXc{*+qrWeR-q3S14q??1)y2Mr3?OXKJo~oRURwg* zkzIb*n!X+Pq3KP%ZEY&M&hKxC?=zi9u4ntG30W$F&6T0CL9vDSql2#RIwFHLL5Z6uXu7rC`(} zS$fg9%bj_S-%%;!jfBKTjV^7h4XrmmD6it@7Rjc+76xdzQjpPIUmv zBBMYFDD}AQqCeCZN_)J_m%efX{5!Z6;dNlCjKKY(T11lc+!cnmI`LpQ_g0br+4aYd z1N_bA>nbR-l@4IxLNdJu>B#VcH@e9S+JGI4WnTr+> z1w~j$GQkUK4V}+4KFR3RlV6WKzdw~^n4WuEidgbkO23h_6;~>?pq`^o1n%s{mLW1Z z{Mk{)u=B8#MzG+ut9I#sI5oaJG})XXn?+x#vb4o$CAeI1Zp#B9rj~B+zV4)s1#FIB zWA+3hJTEG0lm_9E>UWRw8F&$GF`Sj1YOG_dq0yv8CS(OKv^q3CpqP!HwA}?pU$S(J zTOcmRVP?0F%znwjVw`J+abP{)L(|Nd5B2GdryNc0T^H&FH9yWxf~0XG&&xd+aemeR zbd^bmRF<}DeJSU#Fxq5Fv1L#(k?>reVXDbPx6m6YMoxOj&SJ5jh+@!KyutDHX7gqY zE~9OPII1P7%yYA>4ZdmOUz~hTv#_Zz9|Pf_xYXRlX%G6N;r5RsJP6Vit|LnRR}YoS zb{ANE`aNVd$c6FvmhVIT#Kr+>3|JDY-IIX)xZ_EwJ`zn*w|uChky=`}7TSNm;7mD@ z4lBJL1+1TEy=Q#oJ4m^Mq3}jW9bVV6{aTd+6Tlp6skoXGR4+{n+(G3k-gSJ=Ap0Kg z2J&;TQwt!f7o1c4OAtGQt)?$=qdIEac&*ci-p1k>qAX;$MC!ju52Exy?&;pmI`|=a z%&bhyhnP2}H<-*&x;j}OtAszo=H_AmMk?)(&jwBArDsd_-(c zxL%XyN=t=ZtEks8OXjOm?_xg^RFez4lsm4sSxMhBcwyghylLU+OzdEXKb<&a6H3;?%Q%-U*7ms0klXo|h zwK{gv58jbVI~|_pH%k20HFkclZgDkeInHYXqYq$60~fkFT)xIucdOAhuZ#K3lZ!V6)Shq`%0f?k@^v*%C57ba1g3gfsM$Ty z5zcPWPGY16RqAZi9gkso24^i9j4MznLr=c>gkJ{xV4nv2)P60H(J%Qs84t^JL;zAj zt0P3^eMOD_qpN}Tup_W9zGj#qs?mMd*<$RXZL2op6=t#bc7F*T#pWrMV^apN4 z#V|ZlEv)IDuz1VMv{Yc{?I7|_|BnYz=F?*h4~-_>4pI&fGvY}*ULe|?kP1RefJ89U z6(2=4DeG%mN!*SjQtrKCMNhN}*S7H9wQ0cIkQj~2T~o*I4cO;McJA_mlDA8uOyC7D zk0twGFJ^$hDv!x9Pr}9iCeBrCXl#?|!);6c9v+Xd5?nhsS7^VcSSTImCBib#@l1Pt zikZkGkrJH28-kz<{}~?hhmEGb53dwIPOQTLI(!V#q7fx|lNhE&%l(VhhZ5%uqd!Pv zrM+(3go)JUPMU_Fb zux(C?u2$L#z<*}Y23Ggpa1x=sYXKPo8-axi%&@mi>unh_50R?X3=W48M`@%=Xcc>H z@mIO7R7-9x(a$~*$Qk6AJjBl5XUnf$dI!6*+&ZDQG)Y^Ov}@kOB7Flr&v!{JgbNL% zMw|vEr(85lkh@TpE{EHGzEI@R<_f&($8wDVzKt%>E&XtqLpJPJHjy)DENu=*oXT%B zCp*17-y2;t&oPbzjelq`7ehOTV~m?r*Hyk+WJG539{H*9KQp)|*tm!U0x;RE1l}-N zj_1*uplD=@T?5dH%bVnr)_3sZiWIM_1eQ{#dl6QJ6AfiHC=OB9&n?Q;=lwZnOGVa= z?M=>lI3|_~7QGHjtV(viHLh#NB^eZ4O4-8)TvQ21PIJ*nl<(&=s3NL89u@?+c1AP>ik z!uqsISKO#tMYZNS2tog3=AzG)bgHJz7djqhiFKqwG0&+(^_qJ1K@*abZ(mKYWOnF& zKr$B>Q|B`szCqB>5Tyw?m%MLFDA`kB7ZY8$?QH@Rj3xm8xRLJ;XI(e{L&Ji}RTLPg z2c1@D-nRfhiFz+lKfFWgr3!!r%wXfop;R1Hk(6TCR9Xw~&zKsexr@b#nyjw%7*_n`u~Qlh=zw$_+xMNjl$F%^Zx%&Ceq}a9(wAoC0g%+z>f6;T zeHddCp1;b$?F#h-*R_QK5xQne9o9Ke8IKxEBFE9Vz^HszvofQurIFuS2)iI7`9p4k( zh2DU0xiuOEm`hEoTzCg-p~pkYhIC={AQP zVsGKclE*g{Rw6UL-qiMHm_qC={N)YWNgD=Ktw&!b*8o%8R6b^AF#Ka<85UW)Y|u!j zN?9O$Vg`M7!QyBS!VKr$ff^B7+lKxBf~mnX!b<4f{=+@w+rbHTJ1kNOLzmmd ze2%jFRN&#Pw#~4RMQmneZT}q@M?lcsxg)Q&U192Q*gwmH<08BaHgyON5qU7q#_gO# zdc%D@zSTl7Ff2|~;yHKzNufj%A9HHAHQ1|!mim3jvkU}trj0cfipT}sPK(S)BkzbH z2G=1DUL0Dol;gms%R*&CL-Y%hmbZj5ND3}RuYa$Vw?N)3%&S}hd|WA<6u@SJZ5 zS{02u`ypRl`k4_u_Nz1xW-!3~ejqhi23@QMp%_lZx zwcYWrS{`HBX#0mL^d$%c&{^d`xk%a!pVUr~kYv+m7q&}l+6wi-%3;frXg+@DBK+5M zLiMX`1LP2f&(TQ)Cm#_#+MFuIVF(PjX=_*FUdn8qt~pvFJDt_@cMRC291p^LTm^ellX^?CTDdCi&`2HWs0|nelTe& z(D%*T+;Pq32-(~N7)da2)0^LlUE32`tOm$Z0`BzXs`wTf> zoT5O57B~ENh@UC0_D{g$+46p*ZRUG*^!l8K6tIBE%1(7)oyLXquQ?{HOQ-Oumu+PU zjg|4XCd!c;nssB>9$@6B*M?Xzs@O43G?r#N_RRnT)#TfHELNp)=rMK5aShBPl!RN#j z{1DAJe17t7^_f1LG7x321Bn{dUe5VqsoInWU)U(%FZg1VR+rj2LZG7H`FaM)oV20-E`jO%$%*9gT2f{;pz5-@jKYy);}bFJc{ncM(2KB4I1ai&ev) zr)7+tM^r)#NYKlIfnaKOfSh=sZ|d5tqI zz2v-g;9{@Z(%q4|rKJ;am@34Y+^0OVJtcR^W8w$#4*IR3b^&A= zDIb5~_4k=GfCu9%Yes!NP~ntW;8q##-;0iIVP!g#MbCS^C|bToi$q9U|J#Mgd~Z>d zYzsKk1(5=2Xe`nj%}?f>%D@%%B2LYm4ZiLJ1;+646Q(CQmZ-OvS5)0zax9Lo^y&S) zT3c}*J)FizqW)x|Jm3f?D6It|2z{N-aBnK$FFy|($upO@Gzt9r;&Aj?@H--rBxi?5 zR zwN=$`H&4-fw&5( z3phWRT?%mx))ge599#cm)3goUuelX)7G3`5gIrx!_wTZgH-;K{d6trTsgBauolAdRXYrE{F6QFi?oJmF3)q-Q22FOMQ21$+EdIT*_J5} zR+tSY+Lu@FrK;8!Krpv|DYPxOp=hn{_q>*<#u^gzi+2*0!A<+QpoX)WkSz|Wn@sp5 zXmM@r>K`v20In#DnBj}b9wEKlxpWpAwGv&)t?dkYA?=JA2_&nc9^AR|T_^#sp+$>& zjvQwT1kPh?fCtOLmQo9IU?c@iYMWzG(G1gr)|GoF(2-|%cf~eWH}-r4 zYI}Wclqi-MO-D{FWq>~S`rwr6jHB=b>XZAM?yeSd9$x2Fk7n+!|6qigEnKd7hxB{U2qXyPziu;pu`w6or+m@@Sv6n#xkHSyw{7w_)~KDej+)rmIJ?J+OZue z%V`Z?iU^c;%MJT0(Bzkymjzju@p{2{&xkpaw7mA04(`bK+pYcM#3^6 z%VuI8VRiY?_BRFCN3ClYXIRB;@1n(sxpyOYZKY9~_JR&K9e`|9R9+^ee2-V$ zA@&M$r~?=1<)6V(&jzGW=4;`^o;3RB>u?y5dmsxbAVyYpo89s9Y9>*Az8Zy2xz@fA zu97|N{3D1s>V|B^l$bGyuP(I+dhc3y;(RF3>&n)pVfuTG2x)nERk#P_x{T>1==`3G zA-0yjIK>Kp`nOFWug7DV9i)Fov^=uG=XJeSk^<-MEK7u|WV}^FUif{TYv(lbC{PGQ zd{@@w1Ju{T2(<3^&|dUpL%1|yPCV$mzZF*hK+7&d`l52wbfBn^aO?ZyF@+eb|9Kg{=K8>mcu=5Tt#(0X?xfM@bBD8@?t}tuka;d%6+~5&k4zh@c_U z;R|1PDLOteY3rsm%Ml^Qg{gnJyOt1^fK^|6W%}o++En; zAZHg!;|b=tkUAI^I{yrkf)QueehkHkrkFyqFsUcIdeMczAww)y$Z_Mk8rf77(yo2W z_5olNqK>3X#xscK6J48Wc}s9U zJwN5;@>1%TAJ_+&QwF-Xm?8KFE#034fOsS=W3i1`lvzaVb>D{_hlO*-+4n->LhmIV z@{(9zpW$VBF1<5uO0ER?!p%Xrn%aP$O&7TmhBsB6i0h?N9Gd?}gu;$piP2B%$H( zPeStmE%qy1T99kAMpejM_`vP?6RGX{&Y{ESy~9AdAEch(g2(gsBa-!A8Y@^Ii^Vm$bc7 z!fBSh5?t}d7LGyWmwaw>sa)E>UQU~3GWU#DfOia)ky>ECM@S`V2E=s+Cf`SQ@0-9n z+5Pi$&M=D_&znY`f~!(J9Iq>2H*mR`*p)w0uoY(Hx6!+kX6x`4y$YIYe%6F~j0syn zZ(jpxL|U6U@i}U`HDTwl@W0kDz7?Y^)lVA%5X8l%x+Qg2&QebdP`4$6px`td@6(FS zGb(~wyw{WUKA9*ibi@A;?2qD}aLM2X?;wly8vhbKI`?yrq8gj_zFN+h~#k8oulGMD@G#rK)#l3`qJ=yBc(oZM@K?ZnnLSKZ@{=dDvW92$K$?Tt$+^>Q?GFhxEF79~2rkkZjH^W=fZ*Zp%4^=P}Hk`_~cL+qe zJelx(UR;8e%Q)o$(z}2^oYk(AeslLd4P2Mb5sI$n_2UyyNQt2Aonax@a4$i68;TmC zTCh9GqOxuQ#k7M!iT?}ks4({vHNy5CQ6J6T8XdBSa14t@r%J4q8lNWV-Qi7eBvaDs zlLu^=x*m6f<&fs60{qZ@bjC+rgkkuVn+-p3(pz=cqg)&A*VMRo=0o*IgT3t*}96)yC1Gs7%Q*BCELFDxg! z^+EtkjZK<)zfp<}YhSB`Vg}!GPR_yBSh#sCM!LCi^UjXQU|wd&lzOw`I3MA7Y^v!g zOtft#fhqFU)NL~bmlztH(BmWnpcviM8{eHd*8#wr(U2uRVQ5A+YmoCNN3KdA1w^Na zhUJJf*O&{Ui@E;Ntd}ze^V>RTob8tnCAExflzlUjSvM8y2k>XeT%@bAk|2AqdPFQK z2|w$ZO?eL?44)1Sg==|qfk^%Xb8-v#Lgjr0I@|G3Q3{9}OMIP<;YPc?mYpmnrv@)F zMJTqHGxJwy{0inVt-ZT@<6bERs&FAJwoYQCxK>^?tiKB>Q}O3mB{h=QV2Tjf;ju$V z+$x9E28rS2kg~6(tg(gevXUq9chKK;-Ac?WK8nHz6BSp1H#Q>54*|$>h3zV?1tBF* ziBZuEN)h(EytStV@2HWMS_8Iwa9!ec2yE~0ARZ)1aSzmL+U4fhd~3#Xi~>L4zDgzHUI2IWW99Hvl+pD4M+tQ z@22{caqa$$IQjtpU{r#+Og&_XA$aSM+O7iODM|?ZvOw(T0_O8t$bY_Rymf9~mIyM? z<9zdWyK1_^>m6e5@&rvFXq&fXNeaKBx`ESWKch<_7dRL-Om!@mEHH1Y9ME^TCrlf4VZXY-qn-qoV3K~7}txwQOvOjqHx$*$lttZ*r`Mu2$ zdIO9xlx#hy%+fA=7m=BQaEk}5pV&BiA7E>9I;rnXO)Ph*!k@N90naYx!-T*bjaO=J zwIOk#%McMW0m^M6(+RfFyeCDNjkr2{)>W3>5?_t=PbUX&_$oQk)?xE z)EE9zd2V5JZu{J*%Xpwz{P|T9q6e717u3k=L%2($w``T>j4;xAFu`l=P!`x%rAED% zPjRr#e?U5`kj^pF(K)CzF*UmU<*TC`Q-Ig{1}9SGa8})e{#t4=&Z7W{tN~Fz> zePZSu6-krLCRH6jBqG3xTY{-JFp|r1rIwV2o|8~9l8x^PX`2UwC6qLi;B+lZYt@d( z`<7kmmveT0wGA(l1naDpGPfYVdnFK@T{n_2X-ON%1&B8MF9dS+C5g@$WucDs$d(wh zwa-k|hQWS$I{3CS7rB1*&^f4Qlj7WXbg9Ys<}~lftWgx)Y++(&6a*SJb&BOE zN)q3#ijq^6{15a0-Jw$%S&;&7>3JLlWRlvB$E5=5!=SFyRY5)){(s+HZDNXYu>sxV zhjl*C_Ss4Ix5dp?3<*3u@_)n^Q|EDNnZB0+2xr~K$RA=&V%^<)_(o$mvWQS*CQX1c zJ;slq1k*P+K}YUS=mw7Mr|#Jmp-3MLggCblZZq4u=cH945&PY&2&*e2rb@rAlx74Q{k>=nEGLcceyq8kAZMrhamN+$}^StP?M6 z2G@}(`;_N6&8y-N55ASi?dl@vGEY7y*zX_TDv-p~+^Phh5*>kttRg@An__SsmwPtB z@J2eenQBoCbO#MQ=MFPuy7sltX`4F`5JQV~ega6YS_0#ia7_lM%ykl_6ao{CFAHL! z#%5Pl3o52n-rJ3GzhC0yDxLIf?+yd>iI)2fFaAbypQq^as*Z|9r?ANI=bz=ZATDB& z7xl5XVq?p7E|?|Yl^fC+cJ4Ww60XHRMH3C_utw|wo3vZ0_Arn~!xa|~is)O*&CNbB z^u~W^FmiHFR^i0wt0Y9=z$hX`UGmudm$tNBw?Q1@2f7T@HS&3bzCiHdP%vXcG};847xUSHk|!w&e3*E_+LL5G^9t=S&PimC=yhBc z13yZQIu5~~YqS6wuoM}(&aC`gf{l+U?bOZAj!Bpt$`S@VPV0ut(#6s~3etbNFRH;j zc>t4kupppjLGlnG9OF-(4%}?xB`}J-yHFn+R1==$hg_q|e~^qVN+5~fwXh;4g5j%E zJ>C{bb=SII0fiARQ!JUkK!qNDP(PRjr>X}FF8P>qXG^B3MOsy-)h#T=4qA+f7GmZ7 z!Yu>-MiHUP(62BD?4HL5h46XiFn6dT64Lif9)9%86PvL*NT*{oxQf;${3O|MlY|2% z>Hp12gy8FQ8da=PyJhrI?2W$Oj72S_pn@ju`G>ffdd=b#&Ste}yYCuf-GpnFFNlK% z#Uz=d(I=?_x&3;Qof6mWg5ZLM*j6K*D$2+l7LUGg1G3XrGd@D5y|S*pb%(~wtbG(u z1^q5>0sMIfUybZ4J_(QOK7qPKo!iJIsAVUj1pIHAdv9w6gJe}U#XCnY#)E%r_}h#t z!_TrQH3-I>)Yb!H9a65(o{4ALD}#Sxl$c*6<-DpXa6{Isyh6}>ow#A{IOP#@)UVBN zbF%+GP*0VdsG_zQE7|hdd;=0(xa&M~A&@GrhU-r??mM-;M3!1?tTfX50!hPO@;LX= zopH8@c8ngtCFT{qQh+M(Bs7)fsHLxj89;E@&CI(_^nRFpg)K+7c$!DW9u=v9AJRF# zq7f58m+!dxlKrgWRiV05zY?1?xlYA`8VZS_kL*rGQ;f9M6NF2{E3FFy~Tp2)%l^ za~9LHgJpp>EFxa85bqo6H#H|^p)d-%ixlTNxKSsajW39ft*OkmPEB&=((QUc%V<2( zl@u023>YYHenx)U`oSaNXN)|KX>b=*y(8$JnWk@06;ua#7)64Wfm}ET&zI{E%9+_e zE3IiH{K19jFNxOI*Db}Rxv9t^38xGVI1q&;H`mkv%`hZq>*9P?znW$Dg=gie6uu}}$rC$TkJ*&Y z^?m0eDsSaps6O)bmL6Aekd%ic>a{<(v$6PWgnuE`wGELDxN~oPFrm`b%re2MyvoY^ zQg7ryJuhx}c6iw+934Q2jY+v3BntFM;%V%F@J_J0wI+i_DN)dg1osEG$Emol@i-lF z5v|updYfI;Lhgts$RBg|nfhNlc`q0(0zH5@u|1BrF+HgWPdm$v^ZZ5@Z{O~yHo_J6 z+2Q%&l&M5K8zmzJU(xo9c50_yFVaVS=>onZPuBtNCzsw&L&tNa_KtDR*+90P=wdR< z8@I;>ntb?@>mlt!BQ)K}^QTCz_W@_1V0omdOCUPzr8d)W&)2la&*8e&p)RYWWA3U! z8iUjsPba1!*iT2x<^Z1UU~Kps@`2wLN0I+B&HqJQb@|dEVY(owF_Z3qneXnPkG@3q z_K!}Pq`lI;Cv#KS3F=ktE_{ooOi=1yl3#pRqh6v9x=&{2syKb4=VE}pKo)kdnE-MV zGM=fWr0)ksL3$d=(L)`%gKtCi|L4Bx&n+H&=*+&N?2p|XrNKhFF*jmuNVn|#_`EDC zy?hf&;K0>((287iym#F~W&sq!Er`U&g;1avK_Y}Kpp)fqdy+im>&xn<+wHN?*{PNG zsA>H)xJ<>5AG|fK1~Ahok!{A*#4pxio*nF$pU^&+jg5mhma(=j%&o;Pl{}xT8Hhe? z+no8z%mJ z_Fc3~xEc@Y&omt$u-U$==~Nz}_?O0<$!S{j_|ryLLI58#9cU9l`?@OT{jcuhw*(Hy z2wj9c~K;5Q3ZQq|6dv-kY#ePrad%&9o=EQ*D$1eCb(mg6H zca4WCwnp)|d1%u~-u^akclR{;?}(dxT*Y=i=V(VgOb~oH_9QMe&f|-H@HYX;_lUpR z2rxYE@(a%J&$G}GAeQ~jg8q0?$^R-0{T+)X#H~kOsBJVG)Qv^!RXt=I5_()&IWtE2 z4{Q(!1~Bo|f2k}YYjJ3^VdDApxQ+dcn-BSd!7=QRW=>B#Tq_i$oN;Tc5JIn@5vcf9 zI#ED^Z-j04Hf|G5y_~xpA}m)m&Q(`OAy9i*Us!P%Ih9ov4b02ey&Y{XT^{aqhLU~c zQNoDj`T1ZU2^E*!{QDFqMy3x{V1Re9=PBs}Vg}L7+lP76k0ptTF%c@Cp-Jn|OwpS3 zU3_FGhdgUsL>4j@@V5z7!p2AqAuNo@2FV^1t^X}wTT=SCCAZ>Jx-CT--xIFsK$Z=L zh>X%P@0=mN9ayeK>C(HMyihL-U z6%a?54iGeM`yE3`TUk*yO_WmT1=H^ynmX#|Vhq~S`liHssRd>QXq#8%pEk3?3cb`} z7j{`fAu0aUCQJ9^I`KffQM(~YZyFu1%MB%I`ve|hZ zmKwDrNta%d6|%8e9K;32@(F{)JPB7lHPly&x3XZ2)?f19^iX52k^4vpk9<*p+vYW? zRkh)b!gua;K-1e}Q6+=C(k9ZPrPMM&z)!BZgVwLne780reqp88aMUE+1hFetH0eDpCtzOE-cr^JCESIuR=*be*nLYv{F+z zbIO#r5t9n#zS*3Uo#mzT^lk!*Ryr4#kt&L-|4?O3($0iqm>;#Lg!}!~rnaJ)TFaS6 zPsrgoviv6I318)IbWXs|U_n~cxmyNXJwox@i11J}w;%lq?Mtzn`Ocmt6^~(8_0u^R zam0-D!iM{B3QArW4zagXP`2R}kiOgMz2HTz;hN>Nw>QJ`k_bp7qB`KunKtWvXG}JZhFK|Ei8IB%Hr14lcvIxuMi0E(PkzCvLH#tBtLZ% zNxsl`UO9xlCjb{+cDmiZ=glYGyV7SF5qhKyt1gf zk*vPP&A*Eb=)B+?pqaM%K7=F2;NWbM#CUD61>{j z1xR^^5h16hr0K3bU%KsyK^-GY^P|&Wr<}!OZ%^xe)?0g{#0a0LQc?gPFA|Q6)|U76 znU3UQbsFDvTtqij{V-+&KT5%7xtBOpE z(!X~dltaJ4>PlxLw_p7TOdgCT92yQ&FYAQ(kum9l>;VRte#WyiS!}=F!zfm~tr@b7 z{v~g>up{oMzQ!-PVdOTpS!IcZYgx>&S}>3YmB>qYeq4mdkxu7Z*a~!#JO+K<@t2(H)U@&gja308$tgWV>l*BJGDdG?ID@Ifc=AA5Vtpn&}|f?Vq;f zt^9&1K(zkER$2UBf|9B}qJ=(57EocZ%P)I-n~!;dHlmC;5K+v9hYcddQz&K!KH)cK zVQtP_o6%hmIfE(2;CUzC1#&J?xVgayE}UJW$#XopSP>w})G(Yr^Snz0<6Pv~0fz$1 zvgWR+ti>+>bh<{yMsIGXZM-GXw}YWUad8K7Vt5=60iDS>b%nlk?G%8c zd@S|BiLpEHpj=1s83a_-X!~v8LHpDIE?% zkLlm){7v&H+)H2*oZCu=0k=3y)bRE~6+B`v66x)^E0ZFbjQCk1w|nv+VwTqD_PoiupEMn(*$ zF>g6qj+;1G=kL}3566Y!>V0g%L$W*pQ$E6mpQe2#X)r9f%Nrs7Tk_g$P1;)4ub0@# z+Dl`8GB^fL*|NAma;}!&n#~o?TW)1h*w3_iB|1FI zXiu2{P~;;i`eu!9R|7odDzABgL0IG^axo(i?vELp*lPb_T!wRDy$!#im7p+U`{-SE zz`BYLYIBVq!4f-aP4brP1KNqCJQ&0?m_v}#0l*anF{9cZ;PCh8_*9^)(p6NV2}55Y zU%5G`^;UK!;<_9F{WdtM7gHuwdQIU0!x!cnGsCd0Hh3E zYNSE4vMVK^qE>J*4e^n>)`?T9R4C&E?^Ae)Al2B$xX9YR_0(gDz{+*3+hI7Tf06`t zQDF!{!9BxXvYda+eX4mx7SqK6CduiFvo8_@`k(PDL0z?uBF5CqpFN~kX}==6}R z%>qw{cz%wH6*WTPl=1r;G$Mn2)(!9>^2I+22lu8YKwH7i67AkX3UQ*_!u5|?trPsk za1++Q5-pXoK7)iEj~k&lFf0j2L_u<8Y7}4nHsxLO`{1`Mg#oeB zO=k85cwyI-0$wZJ>k6MDV%&VESz1L9gG40_T~mc{Y5Uy|)ip?JD(mt$tUgkJV6U+; zoC_}UAmxEHJIOe%*t>G@Es#wulM;MxXbogV@};bJnhV$MHb`6?EGD`yu6a9o&lc#on0UH_8w*fad-59&{gPK)%`qol_FQ7aclFbye1#FByMzq7+u zaQJZAZip8_7>u~(Xu;9c$fK_^=S#%P3brJCX=lrt$DL-YGwhMb6V@)IR7BkZ!L1)0 z1d(qfGo{eCWSiHpz#bjSTP2{}5#}Bc9Upea*&(~UNUoMPE<$&W9NA=e|9#^&CK9Kc z3_}VXa$W`#FACm5-y3KVh~}FZtrNdEf~FCmJV2kuc^|I?qc~4K9mnkzY^iGPZ%y-1mdki*KjnS=o27 zWS$+Xv7#4+HGA6$lZ;$;+r!oNC>_~1z83&w%G;qLZmv5ajQI`IAr=A5}O-* zOTf>_Yw=>K<^Rw;G>d2fXlcnLqyI%WUy<|V4WKeWP;|v735w)2>p)^q7CLJ8~FX&xlJzf%D!#*+UrqVXb`kF6q2VkU1 z$O4jmGm|p+ssMOWt5140bo+pjg$`gPPK-H>?X2z2L=&4j%#qn6Ao}|Y z;1k8Up=>XvTBQm75~&WDvp;Fy&QOI*&$-KMZjNm8vElA8PU4tgWro#dFhP^B)Q0!q zksCdbjd|(lpu{)JITlMXyWL9BrU-gRdLJ5%B^#FKmy2SSqg|rZ_^`jBP6;d;^&+2vXg zA&r6dgSEyHJT71Y2L;1z5z(o?piMoGEpSZeWEnPqpo>!s)%OZ69um@R&##=cQ)%1> zJezRSQ1ZwUY2K_N_uyiDvA38S#(SWRd(Xgr)R&a|JTF588h^XuF8IYjX0LYdO&yYu$6$gWfG zzR;i=)^ZD)yNf7c8HhYlhi%yzQz{C;1qTztYH|dseL?obR}Z3lcW--~5>Co8x1=pv z)H$4##o!dqUr8@XhJEAMl6U!c%3Y!3u5PZkJ>Uw(M5)$9rDFeg`qLaPKr$7SYVx2! ztG1AKm$>CuFt;sV$59UwehO zbfnjLz4z=`<&dF98{O)Vx^8h^AVeK(ZJ#F&Rk%e5N0zLJsSyzi1^t?t<&pQyL;FGS z3MZB?hHDHS(A@(pOPT+KFk$AXTxD1zs)UBzr&`NdcU1i#0DS^I1@k*6`fW~2l5?Bk z6quBx&U1wVP|oaR8`y*1iY-%Xvz}?Ka7?X3zrJNF96`OFMY};Nt~+UBIOi8c^?CZr9K%X2#!ofea!~jOT-q9 zy@vy>nbS6Ep<(4N=ew$*F@valmd=)5&W3~X8&&So2(E&skvigdykdz? zL?xyI++uqbD!J6s%AP~rfgOkY53T7qI0Fy&p)kKQMMxT{;DHvo+1RK~+OXX7_XYQ# z+KoPzJZ{O^QiOE*`Jj!0gqi&R2hJil1e#umCma8pIUrcK3WIHOxIJc6W3E*>B3f@< z2=|x_YX*{`5J0guh1tN_NY-}%D~d+RXU)@AMI>g-tD%!YhfGTY@XKO&i!aZL?tAb| z;%+j1u+F5OGCW-C+2xAkMSfBIigXWPD9%1O{YhxaGaBkWZxxZbr>)SmZg!rgr0hg* zDfGc^U(rEqg(N@Fqo4O1o)pmS$y7|z^^Gg4Cmn6zTT@= zcol+?pySE|;G?nk*pKAf^LS={Z`E!+Zy3?MdKL0f>^zw7b`G9(vf8~J{V`&E0tM(r zcr53mrbHo!5e)KXt)2>qD2e2JC<3)(F#ij6H8lNM{?GRVMcagUUZ)xvBi! zsNe5A?5t)WC>CM^koeY(bD>|?S|{MeOOP`>tsN^Y4E4~gO)_7b0Qzx1!}N8E^Rew1 zN+pAV|0?iXc`9=X>NfYACn-NcLc`WywV(l48mLJ7FcaeEaZjxyK?EVzhi6oc{n}&6 za;(*cP9w`+D49Uf;&j8gq5#-VOLSU5MSh8dJzJPggHHRW!%*`OjeR)n zu!>DqteE3XOMMfwKLCtl?NyriciU7hAXj-(49LOGJ8h4;F*DbscS-^VJ4k55!;m63 z;y`uPHXb%q9+5$VyhL}>aM3Zke)WJX*iG`+@3xM6ritwP_ql@-{ey<)34coV(3Vs3 zUuU`Oz)1b*EM8@31I67kSbW;HP9DAbZ<-@m1N;YAVw@a4(EmEFyqo5q9vQHy+$jx8JtwUbCFW_}i2cffR`twYEqR8GD-p zwnPz-qVNce;Vf@P?fKH9Z$EF%3QyN9qT&64)yg+KM6)y*p$sDj(+4nAr9vyaW1x?0*ik53`9e@RIaiGBNiXTN^yw~gSfj)UUNpb-tS~#Tc;XqV_3!vS-(bWJRqVDoEh&v4rlxL>a2n>;UKb( zy|#ZF?57T9nR)=kaZnIG(Ks5!03}lNt0IB7)s-ta4Y;m_poby#3yGwZXE%qMoJ3!# z2JMcQWQ%XgC0CFHPRF;oqKchL7A3iMfxV4%n&%pkHGwi8DHb6W7gx`-`JY9ff6n+3 zrWKOlG#3`8{rWFHJpRwkm6(rRn>8*E1!7M#6R_Sxt$mE{a1@utkAV>X;|-01V9Prj$GmpROa~XmK|<#r zn#Vf9;JU8PJj90D$-# zsO*nbS%eshTk@l%AT)>>a79>V_zaM3-2O?VL5%e!r8vZZD6w)T7B`9pA@pT$4&|V` zD3T+bTOxT{I&(?R{C)oCZwT^c1t6euJHI2h}q?z;)2^?m>b z^x^6DL1P=y)snzq-RBZ0fUaV+OD^8Rq18h_2i=44mD9=l3-2jTT6VM zmQ>t=g%*kW1DK#6K00ZDu*}3!#044f0K-Y3HlbUNG8x(d4C7YcTc_A{l`$b#_RKtw zhbq~TI_!bL&FI8pJrV3AUuSd8b>;RPETdY_qVcL}Wy}W%KuN|RVU-ec<90ZNMEv-U zSZ*1jG`wV&k+L7#K3e^73gqM#p;K$$(}X#;Yu1XT`5O@SvSieQ84`DHJ#Jp*pP_R9tTq=zhEerB^l_dW*1Jj25QxHFL z3e-@xuX{m8&@x}sZ-`#~?aTE(6hsf}V*76JHEl^iam$&A+16*W$*ibj#Vf{obcqg! z8US9!#AH*n4Nwqb=pJIE@75z0V?%UjoUA4`H1m6`Ucn}CZPUbbg0rM}n?-x3$s&uM zW%wl5i}olOJ{*7=)no)F`0+Ly60ei6pZm;LFn?_kXWd6-olfO~M>=Wpg)=DbeS@uWz&X9G-}&48Lz?c`bexSd|vr(Uv9lQIP;D7gbH##CEpB)vFq! z%mm9V03|#)^&vNuyA19aO1!3von4Z)ah0M1cDq2VdvgxV%lP&zRLC{;9=RX5hn{#C z1dar5aumC2gE_%D;EEVW{US|L#(Fl@D4dwLcIkWALMNx}Zl77VJd3Fs2KyD1px3)) zQ6^G~ST9D^d;|2EzUlY4;w;!*$d(m4(HqY!hXWR#E*6Fx)g6>SS%f+`mk(SV1YEIn z|6QEA5Er;SU>Qz$>{V*0V%y`ymzV@ZTim0;zp^MRQKFyGdCUvFWLV!nHNs3nrL(#y zdF|b1Sy*2)boYF%@ggQlwV5xL&?R3#xWx{bkYF4O`2VtG#W{E&Y}ZL`+c8ZNoB@pt zgdflKd>R#_WH-yEfVEGFgSt1j{BM{q3=R53X$A{b?^~Qjk z5|8g4BCkGmnb0~x8v*sF0DY(01pq3pafx)Vr=+o&cy9>FGxeWz(Ppq00jo;Jk~JT9 z9;THNWk9T%JCDuu9-zpp`7Cq`-ZKcFPZO=hKCK;cg*toE+Vy1eNCr4Un7sI425a&zIU9r5EZwJssJIOcBCg37x82>KsDNA0tK)F!YX z1_t;4eaB9~=C*O+^D!c4kRfm{3r6&KQ=nuoK4M#seE_jB?`~GReEAtth&@ad%1iLF zwt*GE5`U}QV;+HoTH$2#fbn=C%e1o*ETQifR8D2{aj4n+>+l|L!g*I+ca`_1*;tj? z($D7s%>$EhkClfNxsDUE2xth8(gFBx3UTGzAVkm3`l%wek3@^2K|C{78p8 z&ScP$akY#Bq7dRbQi_A%RABaC$^>Cf%TJEnRlSrGa{6r0HGRt8@!SEW54~DwLYpm| zQ6_Q81A)aNCa|pKe9p&L9~l^1XB1G_#A^(L9RkNCn&iK5N3x2U*4FB~XA_5w+c z@Ky#owt~h-L%1yf7V=eu413Oe=Jp@#UN64pll>R++zQG#@rRWL+l6rdd$=k+#-ucR zGtH>^HX7WA#NV_16&XGm!Z&j$})RXM(I}Ym#uR>4ri^ZCPGLm$gvuCiNmo zTboSf#a_p8b6p`fYwQe>D=c1OA-KEQF|(K06I@|(U}Fr>MvLZ(zR}VUHz(PusK3Mc zmcJ;Y-*U4N#_wGh`>)ps;5$m6Mbo2C<*T9z+U+k?m}lFdJ8AIL*VM$%bh;FXPtnS3 zbR)FF9N4`y=rzX*Sn(>Kj_OLoU`57XN!ml?VUBEsKvvlxS7tGom*o$3&4JAkwp z1XLmZTSBu){ga%w?=H9eDGK1bvuetYahcR4!f);uWuXMkpx_vAWjkdUA5K1QGZaEG zpZR2&iIa`NZjObGq=ivUD(PED@o!dm1hQ@;TW0XCEWLMYJ!(V^Q&s}jwbyf+{P06c zxcWdZqU@B`C23{|p4MUK4&@NPq)zmjZGgfpKh!2_BBu5+d`HPp)@|tO@cB31c8iM_ z;Dn`W2BpFxDQA~aL;L3U;4f;&9KYx{WeKTtNaVJ&NDhjf0gEfvg{CKN)|aRB54dC2GNAURg_nhy5AZKm-#LaVixaW-xiX!sY# z30J|NlrQ#W3W0ku8nOB@z@HN=S!QEgIEi^eTG4|iA&=@qOjjyCu!}CiFNA%-J3Up& zA-cdgpzk zVW#aXby4l{#D>hylCM4tT*0$wjRs{p*$N#PMtC1^78H3 zNqfjzK(bP6G}Nn1XIr>;1CQ^_m+NR15RWMfGO@a`Wq2o=QBiJbJie1E&8cp*7GP-4 zrA7{36SGONiAOkt8v5t%G#w?eAcl|GS3O5&A$0W8N$U(AYRHG42fg6+h7b_b!heAH zI<~GKwlxRUYvYd;Qe`&BctDr%7xls$Od(>Ck{z*u`QQ)MJc5-lG%Fu^R3D{S^2os(ad zrZ-YxFCno`<~4!&p7I5N-+VplW=~kT zKo^ypy*u(!QUFyLix!cpLnNu&&E$kT>INx3 z%lvmIt$-i-Ak*XQQj5;~O?j)Umf05`Z;nToKi@QRIx){xh%Rdj_LquqK)i`U6^QL| zTYjkWi>_mCZ zeR>fW0=z!$4!@*9kBZmFvD(m$Ex3)7Vg?LMBHcln&OTopg0$6?lM%A6$`lPgY)%0Y zbL*eP?0vQSuky<>~?_^X{$%k0XE1NMk3IEPO4p(lY!lm60+o~Y0 zwdrtj(7W2Bj*ZYI;FEwKmGRpxuj}PVC=aJfhD3KBXQ>uJD!*k-B`dOlhDpOOCBAv- z&+`At1SmUxi$W@&VtYEyoPVM&WG0rFtyRdaRG`0G@^#6JHgFb#??yqud0FPkd5>g04m z0E6V#LUrtS1-__y6|~qaR99A1zg%)9`l4P zEIT0a?A3Cc4M~lGJmmC)4ytk6WcYx4phd_~P7)StxXB$EGn8w)rDRHlq?fNzM?pgIJ$A&d%!k&2Cjx$^T={kky2OdY#W#d;l=tFuxrPqn4A#z z&03a{pSs4zbFHS#xxcY%N=+jiA!_@z#vjU?=itItxO;~;yTwehRMC_)A1+<-xm#OH zc=Od_Uf|4t?Q59tH+rvy0ddDq*2k8Y!P(-Bg?HV=r6kzf_{+j6@&ZZE8Xvhn ztJO&dD~61hNi@F}NK*`QaZi{Nq122uuby4heaC&FAfw^0E!LSImqC%l3#{Ou_19g> zhNhg>NfcO(DYzMewjK7Ra$H54wRwABLdPlty}jP1YO)%72MT5xehl^BC=U(owL2}D zk-`-XDm;^c>Qv8b+H)5J1ALxm^(!2xgPb@sx;#yRkVF)xoefXQcH}Q9JchDyc`l_sloS)ZaT>NrUX|AsO@P7Iu zKrw}vH}z--|ML#5oI;43AkxCKK8t zsgC-%JZL2wI}tfr8lt#s;J&8?hJc+_kzzlvMI%5YX9x^8nZCD_X;DRfX0U zK|+D*_n1_ny+e91=*uI+=+PdSUiBEo>#g5AXUG0m-lGFi(+F;OdpE$oQt2_(N_{>^G*RlJyF z4yGT$Adwlte7c@E6w$THGO$_?s@c~+=DH|l{@d>E=AT)bS}uTDo@SX&g@#Tn7oRbt zVu`vn&)?c+XtTnh%db*&cp7g5FaU5wK!~|tZ2L`Ks!EGoqxgc>G}*NM zFfDUp8FutWCVTm8Wn}k(BlV6{PxP-A4uqgb1>-A{M^G(%)+s}8FN9RsAcTcJ0Rgf< zOJA-#eq2+O)vZB7Pi7<)HhQZ1vL7YNpVsz@|6)c1FziR@qVoQE)vlU>LK29%UvCG7 zSGbHUIpdtCX33RuXFU>QA3SqJd@v*fgrkU!O*|@(TEDD!UmLE!RaX-X7$+J9|0S9u z{KE<+l!&L5TMk(%o(34Fk(vYpj)Q)U(U%3*rXQ+bUGA0&|uo->FcKjgB`cpsBw z(l~OyWzBLcPp=_nThKadp0};S{+;l6YYRAUvqylc%@+L|2hOEYv1PvQ5+TNKsYWot za?xcpNBL7)l>>MPFLs$k(jH{b5nB+M-U4PxMaac$7QQ(AvB}g8dP}^n?Y6#~h=H7o z;$W{9z|2)dlL{5gJqz)1J)DvWN>)_HpJA&gM9RU#D>{m`4kA3|bB}p&HSclZ#7$o7 z?vp@^3+Q%UAT0pNNK&B#fjeRwV6B$A-QPHtd` z>OzV^{e`Y4SH^8t$q8L@eFzDHkvwpN=}#*yyRBk^Y1#}aOp}{)PlC?mZ*6>qQnqD9 zWZn0!(Fa$pS%GV+Wb5tk{WK-&ZIv?*Zj2Lq(x%!4M;#uE<6T!oJM9_G79)U!fQ^S9 zkbBq8s^>sO^W56Ar)FYD%wY?l+~Kc(d}aAc3y%6l9=vtTMNi=T91>Ap5@sf_z&N)a zf#*W6%UDuH?)Euk;+2%@@RkC7fe(Yqk5FtCK1qqw*W1H4dd30m{hAIY*zA0L(mTH@ zHaeYhV|X8W>pTafY-WuH(p+#Vl@x&H0bTK;)bB$AiC>(YK2XyFriKB)lWDqfUv{c> z%9NEMxwdcyhJ2>`_yrmb%J9d6h;D-p6TefKqJKkPMb0<%$$I06-_3g};04HyPOPVH zuPE$X5QYx=V%TE_I7hZB%_T!m4L+i~^I0wp_r*%~*wn8b;oIawYv?8iYIX6LPo2Oh zX;)!zg)wNiSO5Z?j%_Yvn3zcNK>_0=7@ARjXqLTM1tE-pb3T`_axCg==81XyM~(ls zRnF#Tb39k;;ObApL#UQ);H~jm0hfOi7kEC8AyhWomIebiIwcJ@=d93&-6tnK@(df9 zoTatCPSQ~N{>aITvveywrC&0;pt^EY$M-kyQ30G4IGsN6QAOxz)Dkxz;zZPG`|FsK z*Xb3ylqg}r6PLa)YXgTvO@x3&` z05}~FdTU+Y?sPQFY($hJczSc!d;1aO{Bk8pU__8*nKEQB>nvS2|K9=GT>u9orO&i? zN?b(+CpZ!ZMihRk|Y478CLP@VORB!&dfwW zmuOlKgsBVfu@;*|aP1n)v-M4YTZJ>mN$fhJLia76y<_<&z6-cQQyBmPx~+P1JSp4Q ze#GSwHI&7dZ)vT$=>3Kb^z#g;JPRyBgfI)7a>r&E991Cse&_NkMj_McYeyNqCT)L6 z9W$GAm!gdYh$tnf1C*VU+>MV70pVdAH$v7u7EX2qM;-g=xvXK^S?r=6`zz+xs)pd0 z-f>4e(SSQu%unt)vLImQn6uh2E3>l{KSUo4TmO@fZQUl;)xa1za10L${%C94%K$$1 ztgvhJ@lAyl93`_G@0|=Fdk~Xm8GP2*ILqLad=5{v;ER`)TQ>+hvFz5*Mm000ZN8zz%1z|&i zPJv4DTwR;m6*)$%^@~wIVmQA^sp<{tUosPZ1!jNSrw0g#UYc!qvJOjR`gSXLcmodK52JWKV>SsMYS&J_ zr)x2h+hkj2G3^BFo6$<4Ve~`-SD(uSj>#n_kxvcUz>U0*M4Frwr>L$Xo{XZA>}R}n zlS^g@pDh9rJ~Y(f-zqB(6}nx=S1((y@Tp}2CX+iVl9)$yCbaMtmy6YM)6GJ3VU&C^ z3iTdi@U|EB$N=lB_^M7v;dwV&$9T6WBk^eXybpkg5U$<84|+0L;|- zK5+7eZo*PNElSEjJGB&c#|> z;1UA=)5GCT@7{h3$}}#T5O9zzNibxTUsg}zSfm=;!6>x9+-WaX}m+M`b^hBAkZ8w61>22-xr8-Tk)iP|e; zF|$YI-_%UVzZ0R*ugS!b9K3~l-pgZ6zY=Shw}^1^vE3(8yecNm zRIh|(Px&Qcw`iYK5u#Yb&+w*t4nMRm&&-MK!9(XOx5SdrOW_=^Wt?g=B)u zgsZ+4>n;o4T|*4)K%fuX_mEsWZMFZxuykxbkBnXMNL+fEc9q;XiKUY`KB5mu4FwqV z2uEJetovkl!pu(4<{P8qCUs?Dv?H;{A;*IbWF#?(bZF@T{;KSHFl2VTSHn>w{wpDu zpteBdm6opMcvTXfo5WjvGNR>tiRv*=t@UtSbzl%V0-5Q)Cjr zMx_q6h443^CQxt}McJDI+nrS|HGO}9=T`pJ)ZO)=Z?p1UiZeMk&CEcnmg|&wJRY~o ze!?cmC5+B4@?pzKThFJF<~?_(WF25>Jk;v^!G)N$B`PAVjVU<(WC}%V0QXPFLj#fj zrw?XeVZ4h`q0((^$rU3ptoj7J1$jeS*!5{egd@1V^qCG7!@*0?p5|PUoY|u4(;gd0 z{?z#`%M6HYQ%22x8m@OQ1`TcMuQMJV9J0OsQhst}2XDV1pW#ZjIo^F&8Qym7VRPig zu`rp-wb~IGPl_4T)Ms|8?^ll{!?v-|wZ;WLJI|conyj?e*7Rz{&oF1M#i)a^XUzTz zPXY=JLsQpp(31Sys=8MU-+S-pZe0Gx z_XHgoX>O~0z6h&<`);pWJzsqJNyi&;+E_qmL4Ka*RSeb74%$xE&}{+aH)c)` z-voUx>8w5wE<-y%X~Zn+_T=SS>_F;nc|@RXS6eXKFpmaMl1Spnq+t!qo(^pbK^5&& zfb$}s8V|Lk3(>jK5?~M6z1&%@0UWlAG9J%s)|4zkZrxfuZD%!tl0CiBr#rYl{*z8+ zygyN6J(g_yKT6JaqRI)`K`J zC-}bRBaH)DvUHWM-it8Z^EIgs)RkImlG9WfwX`7OSV0?32%Zc5^Q9z)rIcyp6;M2( z*(z6(ol{d!IqIYSQ?gc_Z{pQdDyteVanr{BYH$N(cz+2k>4~tlOS{Ag0VOl?KFoA8 zdgpg84=5nXj8x^x=01khsVTGOF(3$T=MgPSpwqvd{Lt!<2-_XOXyz#P*83yEo?b~6 z8I1y3-Uvqi`qe48l}F4LRQAix3-NwR_Ys)v?rN<DTTE(~s{ zl6wg#4!2^=Z(0zKC+9Be!(K?lMawzsDeH`funN85`~|NLpj==TE##0n4>xPpL3hf~ z#HwS*K_31hD%u=moi39iHy=r7{W6zeI{@7BfVM_CKkXb*q36tkUOnnk$19xRq)DD3 zEo0J=69SAuK?|aEl;b6@5ki1aQ^yIwM?{zpWBg4w7bOeZ8jC%jTZ=(bns&Y92JFsa z$WG2?S`talX?)&9zH41?*GZIy1mX0^g|;QphqMWHF6(Z{R;7UmL2otRto)QN8BPqm zSVvyxS_a^w?084Cb7vOP{gfCo$c#jks}&oGBKEjAs7xN_jfZYf#N8XoCxB~3<-sw8 zHre#c0?9i?>sEmEzYh#2NH9cpCCF{+aC>Yjsnw)m#WMoWS5xD}ODSY+TqOw@bVU8N ze7j$5YYJM2ToIcVvL7&v3_Z{FOn@r;V*rjzaa6Wt*zV*C)0AUP_*`c>j!wEg^?VCA zbO>nskYfsKd=C`}TQ)w#IwrVy*@FD=81ZctMNn1rNPh6oh3R>4iulp!Mql1n5cUGk z#FJj}3wYw6_4Fnyx*)*ZMQ%P(tVr(Qu@kDf5*7qbPKGeFApgV-2HJX+(xomXp8*o0 zxUeF8rF}s6&MA!F@Ymck@C&>~sWD5^%~%c#WzRW({O`E0q$@?{w zM9lbyWlye5ZZs1~4}9B=K|aqr`x=Mc@@|DOvlK$rX%$$gnx@hSR=u%sh02D&vk=KUo7ptN$ETe!Vc?4j~bdO=EvDcP8h`a~cb z^oE3S#9%~6_m@@`3;zB&Yo(kHvedZ5Z>%>NR~T+Fm16 zrx9o$&A$N_Q8?mal=^|RS1AS`6%euqebw4JFyt`?ajvw1?uQ3-OL{sa$n{R}w8_)3 zSOh+nUtyy7E`)U_-8tzK0!{}BZ&QRGKha|i^5;CvT~KMuo|HrraGAV+ z@`BgN8G6LxoVQ`lmQlKH98Z!*NU6$<&>nSd3G|rOmjo=m`kM22B3Uq|@0vAC+Sd0W z2Xr?{xJ&u_1y|Uo9*Z}TCH-`QTp1Ep!ajjfry7=W27eB zVsymqPf5^}uEP`C!=$RGycEPk_{HI5rde$8oH^1&1GdqKvf5zxoKg z>&p(FvRl`V-#4R6%sSz2X}CI`ETKSRnEenr7;8{hhgQ+ZbjEFWU1oD)I$UNwseG#j zziE$2imV2-N}YCe=S;(PX&L&XO2a|ME#4?Rm(q`%ze$%J#)mz~zql;F@c(=5neWt) zfe5?&D9IeGgUTIv4cWDxw571&+cM*kmV(tTVmhOz{{I@bjhprSYmr3to3ybOO5Hj0 zKk~FsBq?R3^zBJWCv|!bzWxvxue5ea?(O#C(ZAz`0k3EaOUsgEHc$wCNczquIipuh z%pgOGd}?4fGlJ7K3d2Wt3~s{nYpmg4tM~!)9?+>SXP3C)Yu%6GYFjY0ebaM=vsW3* z;AG$+KIGqnX>Ax|*`&>L3T6u~-{FW5doAJEg?m89w%twwwwI!Z{tI{gL(SXR1psX^PFRQ3=z80rGsy@lPh_@{xr24T{k6Am4#G zLkEe+x)_yF0r$W7GkNPe)ZhsRq}6~j|2M|py4#I%h?zk7X|xg9y4B_T(Tv5Do|Ya! z)p?a}hgQggQni;RX-#mNIP%aR&oE8T@PuJ=ax+0AyiA-91k!R6?2U#5KE9EJ8!B3? z#{O}oEX95U^;8fabaQGQv0;^Ff4wt7sYsDA^4(WQ*p*zp+xOx=Q*RxYeP@u$RCLn1 zmGdkJ$}julG)xc}{mDk(7py_Le^(L^pq#`)xX0an3Z1g#XjJm7{Qe3DUIdx5A+WQj zG8j5N;T&i#YPc$FZjugQ7Gie8xs|%>7r)18>i_Boxu zI`8hCTDtJnYt1WKmm^p>M;!g3UMtu1RC=tha^zup<%anr%;dX)<$7Bw938t%)6 zr!~3WCT`sbVEdHW8fCkWoRAFK>;SYtR(4~%kT(^9A~TALBs;eP^|_0F0Wu4)d5u$v z;qaNKfO&_o3L)k9`jor-7_AS+!Tk#7s>P?<<}L7_Vj>X}~Y5O>$+yaf~yj z{U9MeB4lOaVoW802hCmounzIy7xJy31H;2N`tx_o2{dO6lD=& znp2MpayslFeSVV6_U&n3x{vlyDn6ByH<3%EU1rhgg!7%m7zSI;bHg>v@$Dt8Lqx@e zZ^1ZRzWdYR??}TJtf)u>Po+n*Gbr3=2*{#JY@T2lpiFwhyFmpUGkV`E03IMnr6FFP z{1Q?|<7d8-kZ>*6W2XZMuUzW2=~5K-$8ft<|JKN0AV9r zyN1$HU4fgGoyMmS1#}dn*rZv&Vhm%ekk1pkK6azAwBeM$uspwpTPe60nrH5D54nL{ z2x9rKA}7I)ZQ)iYBK2D%;8pXRt=;WM;dodY#(vbC}3(JtY z#pZW3{J>k+7MQKdH_x$9-aRcss;5%0ea&~#NNiN$i_gib;LT^d*d zkd*;j#>0CNTbO~Ve3!UK5+w`Qv=egk_&hFZsAOe36dHwm?qf#``9sX`M960BmHX~E z4}eX4A`WATl@Mt5GDVeP5qTDs0Uo*2)JG*}c83L3E;&`thhxL_l#IXF)XL|}a(i`yb)3|`l3L~r^R=4Fa zZ_surk2;C1M#J2$H{tBSutzfe5u`RL_v030NOl_x%gkby1mLKcl0&Hv+|rdOH|%Z> zgyp1R!q#oPBroxVnog>vuOb}P`gU<~C#VBp!xfC86%1eb>LQf0o>kxO)G zEpqH!6bMI9$WLX!>ZgDI_K)*Ij*|*Pz4h!rSrMYgov|tH^L(~=x<|9MGR6q2<-Xkk zSTfq0XQ`ha8wmEdteqk`eUS`#*m{RY-&GLz?Lk?aGoWv4){llt+?ZXmQTzy$qt7i= zvSTRStCPABnW!<^u3Li)F1KRt3c;K}9n|&WO?y)v69wqccagtC)OD~lW`hIudHiPU zY(Gix#^}G+o!7)3R1?Ojas6lk&3yZ8u?(dni9u#w+Wyjg?B zXvyX=}(&8th`oI6?TUtyWQ zexhUs|BR;O)tE3DspF%`cCky}a? z7P{I&L{TX*u|gV}rB=XNBhnS>E$!`UUn!TXmm$kJ#ze!ObbgFsV^XZIVMC52aod)~ zdy0T5%y8ivx_z0CcHq=uk|O4$MYD(7K|Tl@nsYk5iv_+r!L)`;$A()={P~ivD3!)+<28XL{;p@hkh4(levO}EK@nE6b7{_1N zj+y=w>aDwBgTsg58WUw#7HaD%Uw}mnL@?s307eTxC)D0Y?X$1gJJ*`;yu7^K`bovE z+ckF%RPoNa$`D{YxJS4~H63bVx)bh)onpX`^VU3s&X7HZ&geNx?=fJD*~(~4RAW6q=9hT4|7u+AGQS&O>oGV|UP z#5p^l4AY78kAHkl#cEciu#Z6!`R-PbN%x`kdj`_rtVB2}k^|ARP@*a@QOsoSH54|Y zvCJFQ;;L5^GLv|Tt(&5NGAt6TzK+3G#3ny{ld`~O&o3_DDPu?Iti6G}DWHS3D&)Kf z0gB`!3&jw;YubQ+(h|CTZNpd2GeStpRUh^6~pq>F>6u#aW7&huq?22M7@6c=&koSN9RxOP*EWZp+bT${^9+=}{4u*Zal!k|V8m7|o zSS-V1^ih^rG#k+jW8y8<+8TEn=(Q2cmYIWJ~K&Rr|w4j%fN#R1l5A_!a}E*Emw69p1YZ^Qjm) zUiv@>?u$l@JsN?sounfi)amB%pU=fB`A9T&K&?^bPr&Y3&xX`(&vt3Ox1R-hx}HG_ z!zh}LH5Cn|R?ke3$(&-{waQFR6nG&hW(RayLc6=7Yeyj`C151Fc`g?JD%tcJJSfBP zQS%UzfsniHlC(yRg;0k0i%E=Z`LU`h`qu0)+Mp)8kp-BWCbfP#peYU`tKYr<)F5Bdq2MFMe zfDet-V?e^RMF2$rrc~rWW_uux&HFHNDzi9b)ZT_iBB`=K!&v#pq};~pT59~-_oFk6 z=TX0e-y5l~2wmW3dHvn|p`?5cf?4YEHANV}wq9*8s`_8h=`CQWu1UW}{9kZfjQ}`O zCM5}uXU0r8kbuqxY12E}H0sO{g>D{;|kk47a}@-K9Zf0$>gekAbFY zWAfehFp(rG=Wli~M&U;WdnL&3cyI3hwlI3mjNA3h0my(1)6p+_l9e}YE)N|b7zmPT zxVZB41K&UZ7gnAOdZ{mdVLLLZ_D);kH-OKk5?Z!|NsALn){F@AkS}69_bc;-;Ge$S z)#!9;To1g05Ss11pc%sNmgT6`Gd~%|>mKV1jY`FHo7t~n7pUyU&xYdmLZzGCXaw&1 z_UNdU&#iwwB(U|y)OjJsYGK&z_~HhwTq1-;gAJpITg!a1F&UC+DX*a(kQ^b%gw=8f zECB)(;`=zPC_1_p4Qy%&{BD&|&u+s>K<6Al0ldMjnwwl&$Y;4P@Hq#MW_-FX2%M0H*?8L>zRPP*?ML9^_n=j>#O#0l3A$8 z`+fpvL*y>PLc8vcQ6Q@u>qfBFH+kr*a9Ja}(V5@g%K=YpmKb^UOrp0stEy+q|JsF2 zsL_yumRI-cz;f}Z+YiD}&_9LJ&AW+|!LI+s@mRd}6O%RhaLv{}AdWQIunu0F=N9_F zRkMHWNc_R)STXr$w)>Sw*~s4mX-Ba>hOpzZPh;NNUi+Y56iKmk3-0FcmCYixc^ASS zHuhHz0YGJ#7!H{AQyvMrS~fF~7i;%=V8W@njV?JjpuePNq>^6K;~880R`8p5!)RJU z<1q)y|J?xx7laN9%B!w2-bioTuAM< zD_iCSv9n$d8yqoJD*$%~c1P~cj+oeoQ1XlD#LVUs=oq(mg_@Aa$vA36rVD|vu2pH32!aOF7HuDQm zs?SDt^PFB-X$pU2YQm!<4Z`aTRunucsb5@RS{Q@{lUA(ZaUhn&nWuIBm~Qz1nF4-v z2mkq%k{ivFK>htthI!Ew7QwRZD<^WUpFWX0W?RQsnh*YzTozANQv-vWsU&zGo#pD+ zP?1RBj7s)YiIFYkHm0_Adpx)Qa$T7yq}K#`#&Vv3aqtHS#~ud~Ap6V0)t+SoZULiU zF6w3T_#iyBMSG1G^Q?15o~^H4HKgBnk=IfMasdp)&OK6wiWCs0;!(_`ZM4oQDjsFP zQbsC?3+`bc%{D?rZ^%fsS?#-tod|p5*g6T_2q^$VK)k=(Q=3e3h!e?roU`xQs}6@X zrED0_J!9JSlz$quZ%o(Y-F4r|t zAKwyAtaU%iLp`8$ z0Lsh(ysXQ~(*9;53=*x-c@vfpc(SF=D6sU&un2 zs$-s&=f#{shi-!CYtM*G__G@bj&@7=*m@mOFcCF}?GMLS@+@qIXbgaTyC-;I+FJsv ztT_lX7o@ywr%4fYIQsfq2rya8N0p$0_Rc=A0lawB4~g5f2a`WdaR1&O`U*tAEF4&= z0XAe8n1($`W(;fU&^%_ZA4XJk@eEr>w}FT2iGG9RW6#prB*YtlR`%y)l=Mx#q?MrN z$mF;6+8tjP{!zc^h=zs%YK)3oD$^eOCHJ$!Gs#Ev-JKkN7v28{WCVc(HvZcUZA?=a z3Pim0#k|nsYMkRW3d;)WX$R_!je_?jQ5wZ28>Lm}b}#9f;3v6S3>=-uAWZpc^zfr) zV#bMxO_rjo_o>FjNBq9p;&BNpkcMmjyCBM8tD;*mQu;6zA(yl|F*s*DizAw1vgU0H*6)CKK z>IYpXrPrq@{g!O7CWy<)S0jDKrKZC62GzY~GGO+HH24?FP#pRJ3P6x2!U)0o`ygWR ztLE85L}wt-8A0}(pX7WlaR7NcDSNwQ7MnBv6T)f{{sr7l9@K{D`zq+jO7n-_i;{`@ zrzglSDs?QVGa|fK*W{j>2rdttd5}N{glQ z>>QeR7iX=G2cZK{;uV@VuA^6$kvilNdo;6->$+qI+{UF{)poaB6}>cGZ+L0=$_Cp> z{{vRCIEeh?vmc{PJ^XYVZ#`l+>-vGy28E#l%e%wm-Wv$nZ}TKfdeo`*8s;H$1Wt&J zq@!L0frG2E=J^-+TfS}~^qQ-D7!|!1QH!}BWG?=EGB!Xvy#rY6M{#9jKB zq6~D7=0~#j%8O38`kTcieluk7LccVouRL0z@Li{6ua(*+!Le0#QDGvyBdvxZ>UG>i znvN&q*UjFk><|WQoO5F3bT8!&}9_RPTQEvM(WnG2lphsPl*K22S{2nnTu87*q&ujFQ;}{`1Yz$ z(;Wv{e3#!1xwaY!Lw48R^P7HP9J(eKYRNJkk06(T49j#3m=VO;*QI;Rssxh`y?IUdx;Y5Lms=I7BM@{W&~^jrUX-^4 zQiF!l&S!7&T;KKUbcf#MkUHR|wVc1kF?2G=8i=k*m>yXq zvk*_U#K56-Y&-C?FU%<_;-)fNMX|b(P;B&YR=#9N6#2`!pbTTAR(LV;Ge5`0?Fn7BXkAGqOsMx!t8`jBi!0rlvTw0lVF9vT#@xy3-c(d**;Z$u$35BHHFmz=F-$k{LxMv|tl|XH z?cPEwe?BihW!tBurzt+ImnoR*L((jIrPrrh5DL5ZQkApl`mJ2{Mt^!$rISh-M3%9G zN*InCvB-v}m(wj;Ag@vpc2%?L%%P;iSl87Ojg1$2SvajZ2ADT7C$wL4Q6Dt*KsclB z;>WhAbjrip^K=>tyt4u50vQV39)ZIbLNReTqTtb0=_!|yfds@n)m%QZnpL-+9!&Bl zEyd4Z1fOO?Sc*6kqQoGQ8*gU*RL%&&LJ)lHdea(J?WrguSvHO0or2>^Br6csG$ewq z=JV5Dy$v1nB6-M#aN{NUoc%cE;)Gz)>%E;90H(NAQEYDd*2!i}7@5E$xn9~q1{XOt z>z&JDk^yu7ZwB*w!E!SLbSop$m+Z_)!#o&_;#?5vY=LUCpbTf!R%YB1r9@&zD%Q>0 z^Jky_(Sn{;>E;|0^$HF@2!CT|!%loL5V@m&+RTd2A-yVAygb;+k6$o9OoJIJuWDMs=od`*{lDC;D9#9)|e9JyhmD1}tL|la} z#i0!?Y-B*)hNgMf)x?F(cUz&w?dl-W2dN#AA+G~Kf*kSjscXEs4?XPFQD%A3O&!7L zyTCu&fIKC`N3Xlrqd8Q$(vu?#p6Ir){YrnKFH1xN?HYu_+!k?dM)+}GffX_s5y1Yr z`nE~a36I@1y@ZqdYPac#w36MtA-2S&c2+Lv7iW)eO^OLO(XqHpI6JSxWp(w*E2?9E z<3X52@uOU`Vf7tW^(3y}uCliS*^q5sT#A_uecH?DhNRIFkXzqBH?G+U-lSrZ}{HxTidiO*N}pJ&#pI~X-s zYevz0&-tnSuJ}og>1hZL<(j)EyyMn{Rcdor-FJE$B~9ZKL8VDOX7lNhg~$#zU&qCu zT@IXwZ%rp;g;5|kelBR7hV;%qQGLM3`9=PWTs`;}vDv*Buv?_`edM?l)3Op*UvaN% z_=Qj3P1BAkPMZivSqX)ua8c_jPuaq4#jN#5Go~!+jg1{EiQdWG&3mQd+i<$cEN$iu zhJ_m*!{HjC-bePRZXLSJ>{%OC97lQy?eIK(J`J0Kc>y5obPf>roXdG1 zDPt*Y7NH$?ZJzgkb`qY{u%v;qU9E==p$zT-V2dl;74Co|&8gkwj5`PkwP)30Z#+l$ zNyox`+#JdZvKCl*BnxlThsts?9i~=98 z+BZ9?X2LvGqU~sFA2U2(a}p0FSF>6V^*;sM9ZCAxz7Ysz%E{j*ltRF*I}@XJ&<_}S z^7>dO-7B;vyl!dW)A0UQcyhv9U1MaOqNF5 z4ma>%XSLUffkcQ5UXBS^y|3Sieav}0p_kZ`W1;!$X;c9em@kxWuwOcbN5nM1?rU%sXI19rn3%8#0 zNtMNkYCd36K2P0~zgH^nY5m-k?DDYCSzhMWrb=WwtB-^qma4wf``sE=DM2*g;MBkk z+6f0&!ar|(h69=KM9x)*IA|*DhBq_iB?~shb=pydb4|RF(zwkFht*}yJ(kk9pyVgS! zTWAHuy+T7v9L}3ASh`nogW?Rd6&d#)W#PJGNQCtm+^frw8wsa5qEHUd4)0H-^GaRK zT*GDXC7JmqeauH?tcYyMTpD2J+8}k`&89ps;gi|M8loPxAlawFQvzZmfbeNzYf@g> z>2~rwPK6?WT**Y~@_G}0#6Mj9 z1aW1U4I2{K{yW^r4rQVA&RSRz=fxxBoLyyBgA;$?0Xo=ls!mOQe&&}bjjCaQK#@6V z=SvkoO@REbw!BvAyZpw}j4?`p4cU_cF8WvT`{Whz+7C7B{AQJu%ZkgRNxQda@CB+Mxa|KChtU5=>Gc{bo5qjS z-v9t6itPL5qmRU7{6^uSy72855S?p2A?Jo1wngU0HaU&&1%sB%@fdCR3!6%#6nJr~ z*?7Z3l)(j~X5TMfsQfL%xbd7eSs*3&FLN@6o#F>R(>w0HbxK=9e)DD#=nqd0ZHDV3 z+&WRXQ_B+}j(!D$*YS#N4scFwmRY!Fx)To|BqJFX_T*xj>NaVqH#xNb?czH3gyI1G zIDU+Rf$IH0yz=~OW$%phkMV=p2BbAyq`7(6*chBbBOKm%SKk-^I4JE!JxQ7VRYqu% zUMTjINHE^Wa3yD>Rk}w>5EPuR+Jm6ow(uWYAA5q5uLbsek;k#7o|1Sr~^H+ z6Yb2MYERS`x=OIWa`ClmL@g;~f^aK3C63}(;O!f`$?o`&Ddz$t4bqS_dkR=4@5+yR z+1N0m!jgE~YC@Q%<;^xN)^w6YtbH%Z0Pq)4A+d>D(3|irt$|L*S_#q(Ud8|c`;4X18YNy+ zw&~-G7VBFjVUylb77^lACd%oft0HST@D^5-47Nb3Je+sKp2D$juB@!f=&tJOsZXp> zJiDlreyN)OmBtyWQZyBMnjg1g8G7~qb)Zo}D{Sk$3h-@!^x{PHD6!e+k^y+MnQ%{v zm`FCk#;6|NS6I_2+gzVw#*~J)+QK_$pbczvC!MIf9AG5ExvaWN!)2Y%-mnZdVixTL z>X@(949_6BJF2(2uNVw^H{GZj+B)=fBy5TB05%a3ucXj~u}`(?ivbT9uH+xZqmurG zZPO{RqdKslMKHJpn578|NQlYkC%%r^D%-_COv6lWH;km*dLZC-o)n~7NeX`n+Z25o z$rfF%L{NptbZ}IH{N%uAFHz-4T>3mZGLq^#Tvm1T{OP#jVCzmVhSw70bJzAjYbV25fntVtrJ=E7T# zm;4wV$m1rk`J7NMfrJE8W=iyRSoy0B`&i-z`K%HBHB?Zyo|h@x2c+r4HafE?Mb18) z&E__pPzoV$03fzai&Oe7GfcJ5AsB7559ijvrMkc>ZgnqD9kOSXb-CN;?VLE{K!x@B z8E<;Dc!Q2i9%@0EvPy&}Gq zb#xT@6%%*zyd1he;i`d{df*(X)OB3vzeRVb{$#X_H0r5PvKZ;+1LWNYLLf3l6z}KV z1{A>n7%e%^o6huDXD$o(05Ila_BE#OmsF&l5;Bwzeb$8^D5*Gv-Wf|Y?}VTf6+1=a z2!!X+ZqaDNh?>1=I_2Atsq4;H_+f2m)Pw2+C{?XU*;@3gU2Ky~{>T;ll(sSj%@ zZ0_r(01D^((p3)l5$1b}-r}J2I(Qa?Wj}xidG%?eBwuR(#bEg5%(jH;W&<;JW^OCS zZg%hV*Jwb%ir|9W>}u4pdwj)y>ANU|^D7);3<;nL$)Yu`Z*?bJA2(X-GpGLiE`j># zsjc0zO=|$FOWm}9Kz9>KG$ld?RcK$bVLVVwEg&jyjLQ7IQZg;8BkPf-?P3VXJ)hYra5Z{H+Shs22^FXxPizG`jU7`S9uBuHmn z`Tb!j%J6>OK0tSJ1jMw!$2lNq#Ezt&4%(W|sc+UNcgIRTcU^o6kTZ91Km3S+kYFy{Ipa7;mYwU^xV6gIF+Zoj~6+JH#*DCFNy z`Mq%`v*1}Cnkq@18mLxWsk_`ZiBTs@qe&vKo&@=eZ1I2iCA2^@-I1_Fbt6(SD(g&a z2dT!WmL-I0zNvtgF}*P;C|x)8dZ}6WpyJhl_V9(Ww~U0tD%WEFFV=N45gHS?@hJ$* zv{&(7PtFIkPXu$#B+cHWIBoq=`!~g;GbQGR&mE4<=eYi0nVcW;HaLU100X$a}dSO%5zIibFHR#2y#9Hs|E2bDlW@Wmup{8$U@|36a@G1ZGAG zLyhd+j4{>O5@ZOx_WLU{PX*j%V#7`JO^|4`>z2TRNL7}{yXf& zE8zmkeTSs@Km5Sp32lO2ZiZ#l|BNV`;kdxwpd=^22Q_NyWLnU*VtTj!?;#TBS$f)c z)fmN|xM%3a$HrG1)&JSk0+;#}wy<%9)GDjMrP+igNw4beaLh6MatiI7BAh7iM?U1j zfX2Zkra;M#yC)y*K{3%K73yO)W&NS6!qs9p0lax<^}?lt?!`wh0;51n>i}Bt%L3V4 z+O1rJB^6@P-jR&|uDJMrj7~RDqBJzYyjH-%dU0(3;v9@eWg%VIVs=q*7eSmd2is(Q z%Za(Bc$&x0RiIXEC5;@BE3g@A@LugK;>4t z!ugDCoQ=b@@J$uS<+finS;H|iU?$)RC6FneNm`x@>YDD$HDR>SACJ%l3 z$R8S~JfDHq#gzAK$}Y(exR7l|S5pPsrJs_CxZ_0LSgkAbBX||~%TRgMOo{gTul`Jx zqBo*-8a@J@4EP@6PZ<;(1J&(7dvtpT*)NYAsF>QimajY^3$@whb$IL3GM9xjb=vmY z5DOYDyPKQpuI2P}9b-$N-18Am`v}Sdh^N&-s#~Ydp#Z6iln!NZ8SXZZgLk#gOXXI9 z$*A54Ko_MH#aTd8J|@x~xxfp~j-fOR(K>$-@doz91SJLeA)!irX;7-auHsQ$q?ihmy;sDV|z~-AlUpEc8+8{_HCdtkBkkaTwQ>?BrsE-Z{7c5({<)OXxSD;vWtcKhoC~&T29c7}x zPx*N7PU51Js3qt|>uHdM3)i3TdTp1@&x;8+;HmAhiko8VD9xvJ{ln6Rot8Jg;3-)} z#j-}>Vyhs+*=w}^H#NLa2Bcj+Fe>r3n-DKKQ%Y91P^j(KRe_0ot>l}M57SO)+2^=C z%NrG%z>h{zeCBVD75%P{Nagy48{m*jj$fw7!p2{0k{atZl2R7MJ2wEvbqstGCBGo_ z>@p$hipdLL=zfp2hXVZi5qbRe&NNBm;L6s5;*_ETrfPp*`2ecPN44F(s7gO_5P|@! z!YU@W=spU-@`-&^+l&qzf}H^n&NHqUSByH9ieqwMllDM-M8|f!_3GBWIEmpLJR1;fOH7eY0_ioDv#c) zFgF*e;XtZ6${Rswi~^T!u-#;qPOKRnn0}+YH%IHb=30Ffmvdcm`;=8=GoVOjI`uY9 zDo9*}9{CVrobCdGFouCKgo4#Pfbp8TR<-K*H9Ues5uK2p{rUHvWBb+(lF~D*9~R-b zANkEWt)eYx4;L(8Y`<}m_Tn^)iZ|JX8;vJ~fj!wDTgf69&{y$W%Z`pq&UK*DVW71< z;#Mu_AV?h`da38pJ`Dt-oms)3@z}rnsExLz>{KJ*h@CtHn>qK0Inm%Er@aipaQ-z1 z-WEy;mNGw+k#{pDh6YKY(f-2Ik+O}F%?7bboj62YwJB`S%}6x;c<Y8Rs`W8S3lK!%me~dM#@m( zV_Pd2Mj%h2B9X75S-)MqmH4Uu0eV=GPBNFFFH3DK2tmZdVS3m$v_JmEkTfF=rbNF^ zAW-1x0-4DZiLPdy)Yf4~qKgj2{cQVcWsTvBazf&L^P_n+p*Q z=%g*I1s`p1n63l=w1-Lrci^@l*pp~m$3*i~@$`NJclG*iU>Hf&!Hq1y2z0>EeBb2eHMrLj$ zee1xO-cf?z_ZUQ8)n}HYVq`Uiw*eXC3-A}ptqqYYYS8HZJud>~N@x_MkBHd2{SHLp zG3jl4(cF#gIkz(EhZyt)f0$)6VNW_w?R8Xx7+cJr$gcW;Vr(p7q7ysha9dn`*YXp!@q^B6eSl#YL9%uHpEf~>6-q6~ZKFz112SiyQ zFDm(?fHOS@17e znN)t!_g;C3C1wYY4(}#Sddn6`HLxky9v{y7dtb=iZy$OtPel9=+O5=d!mkD02ro54 z483RQE2gs8kA4isohl&jbol)>&j)XZ+q4yL8S(Kkt2Uwp$^xWjumJS zGXHY~PTvf6kwU+ojK7>0SZ+7s^i`v)h!f?_-F)B!h~XM46U<}Sg^d?ZWZ=%>a=Jl4tE?!2#;wD=GkiYevv`dtuusKd=XC+3M?ywzac zS0Z?234<2Af@WVzQiE8_?l_UWJb6cX2#vD`Gf>;!momg?uz%`yMpXDgV}t6XjlOV% zM5LhI+y7jA&<=?)i-58pzwjDvO&WFRfdv(XqYim>-&UIQlID!DBW|WR%Eq?h(aPnW zqXIT6F1TFP>DSr8l@wAm=Dv;CV`xj?bUhuqPcpRFDcsTBAaBIkinqv1mE@zgbbho#^;|RyFF|5@|2KNcOiNt$|NentSqvKJswai?Ys_LDXfwwph(`z96aVs>5%ZH5cTZ?LP=MhBPQroIKvEcaa$7ue{RaQQU3J6C<%_wCe{i zIuWaCzD93?FnZR3Lj80ZtBLD)MgL0>(mNw#g2A1!HLr1I=B+1aOi;Af2J4Cyu9&b#W$c!6r@E!P(}LTp7)8M zWxmLLV)X7&kg?GeSQKh$bEeExZszE|JnN(dc_~P_gcF2Dsxw2-NKU(B(lF>d+mLPd zD>X8I)e1p(Q0draX&8`1k0ZZ7WBUvrDuwI|186DeyHJf!;RSJ^^2~r=c2)Vgprfp~ z(JquPC=ygp9W`_4zwfFUia}zEu^&vAd#$O}j52Fqym1$eyIv;Gu_A)KSeKuNs^2eO zK_LdQ;r+9FX@0`%$`dP1*BxFg8k5+MosT}_4IwM70t_CSYG$mys;Xr(UzYPlYf`X` zfdfOWNp}=dzR+$*QWfaE^pWFnai4*3Eu}98ocVXVah@yx$Z$~dQCa}nJRRUltu;{E zZa*91fzyAaNDZ~bShLo&>gn}_K=2Kmz z4u}v(B#@IPJ~{VxoK|XpsTbrwnz;kS$aLFEH$j!%?qN{@6;`3ERDHi()^v%N_FCIg ziSI{LgiL<0wMg&%q#B4j1!!thb~)-D`ljww>O@rBWkOW`8T@?7*YNcWjl#IR>`jr5 z@QB<`1u0qHUMj|L^b(h0#fc&a>!EvvL*0)z?lEVbvc~M)W(4{0qn>K5Dufq$DpvMm zD~X~{3|0&$nIZ5j!YIBjCc68Fde8+vK14<5~FKz??e=F?a;egOU`ganWp5O)W|= z-pht?Y84$nwIN!eRD;BL9q5cckvj!f)*nj=cS)+X56J%X7R#L*TErP*H*;3)4fHo; zp$nd)#V?sk$`r_JcBPSwh0kG9OK{}PBBCE-$zA+|5k(XODqAn-F;Y=R{H^Sg;mK$| zlI0mNC((f;4>f^o7erYwkd0y~pn4$rADz&2wv2-R^}g{eEaTi*+6&kF(Bv+-Q@a6E zXFr`7#2*of01Rw`vx0Y7Q!NCou?bIv8vm>%lpC#rNA^uz~eW-P+8Wv-(6fi#Wy?W5^ZyH*n%DUBY9m zo9bY{ctWji6DYzR3V<{5EWr7)`XLtl;@~GhpU3YTb|C)9nS{Pci&q0W2KVr`Jn#XX z$g#0{)MVY58*R@_Ii{xRf@@c&0YvOq#4$ETTNd6R<$#~iYQzL)t(ghrMhk~-tSnw7I|42 z>GI2RU*Q@HV3sO8!(L4?V`pP+1UYl*s-~N4^q}Y5TN>4vTwTe&WZtMy?jXuTJ2piM7^xz;EgLqh(omIUCB(MmWZA1tl4IA?>Zz^3`8LQp5J_ zPv7pP0~!n&h);ei<4AgThjFB1n_O7Z!V?J11s5iEB;9^yN+4_M82|CoZ-MctTj;ys zACpn@ha2PY5-tLyS%{@@N8XM zO7YgtW4kz>aLaH@847R~(^iM}F<)7^($$;F%gW05T_(;~?e%(IXg;Z(0)v9%A8pd50C?^4I~gh6+bE6mwY#=Whw6VIiZK)R{NlgQ`RqUXYXh8$ms)i) zfnvS8*)}!YX0HY`?y?)tWcT)2Sy|-o>5_=C9xJsibxfWpb5|Y4tm0`( zJO`_~lfoYB4|bi$UZb?;j!BYyc^hDb*YfP#6`RP}0j%I7DI`~ob^im#Cl@tyg@-7w z+7a z%fG$Xck%}g$Rgui>qq~U#zS!g6}cumP1I0`fckGb$zsB@TO|ukw{1Hg(^^R4kb|5J z5v$)4n1YF;p?{6dv#FDbg-bc1I3rA_eFkXri*5M*U12C5g0LIeoM@{!;B9Ov#=l)e zN&D6AUO1GI8kQF{hmdelfprGdFkl7GcofDEnm#Pd5VW&mi#oqb8;QRKPjMp+JK_<02HiWO%HE0wgOmJr}q$fpz~*j@~63oeR&m zt71(Q-CP;0Sc{Pj$YS_SOc7BZIW~Hy5S+sNEE<1 zyJYHo+b@feigiE6k!n1|{Q#eOky=1Oh?e;uy5C$>?7HFOoVjFA>9S(6!$sYlmL^=A}` ziTS|qyNdYIo;ZHV+Lm;r)gjyF?8a7#48x-{Ucv zLCLMM-}8uUtGaRpWzK;A`05x-RpMX!${LN43smW@XLn z^?RU@zKTWTUC#VcbdEw{VagfaEv&= z*Xr_P`Jv5=oG6v>0-||lX%hnzFlx0l8h04A-VzD*^5Oi`V<;}+x`?qNKjA__xZY0q*_pGfmbOjth(@jv= zbY8kpuvU_}X$^|?obw`auX&e`ZfO@q()0m#v;PnVcu<39JWf*QbSmU`(}z0{Nn zprmMqPz{xb+R){s9A1%C%T6DFV zM;n@qKeTSqV8QNVmAG$JQsyVQLblHTJ+puNF?8t#>C)vHEPrfzqCs`&f^!^wt@q^v zY||x>O`1-NG;^Tr*y(x0(pD(adaM@!LFRqRmI6*Xj^>yZtX0dBG2lfnp@|Tf`Bkpbv$oqU`s0*%3y*dsA@r_P+#`*+oILy}D>Ddfe}uws)I9k#Hr+u; ziO-3$xTY3;K?C=p3D}%>y0Zet+9vph%mz^9`*|Jz;zTaYvPp1+9f~Y`j38$GpMiRV z+j?zd*8}FURxyId=J>zFs?Wr5LVl@G`h5ATpj1V9{+c6a*UR;;W4j=q9)L8A`+7=CjP`pQ$=4DF$0(HFfS;m#QVw$BWtSkTkn@PV3 zv~u;P{Jabm)zWmM@Dl%aOVsiW!ZUn8aOKw{yhr<-ra(%MfEM_jx`5=6B?32^Es3Csnyk1&7kT zlYi<0T?8=-naF9U@+JJDlLxl`)+<%|oPrwpGtFowYdD`0NkW(Pn2#|RcM;wKSr)nd z(bMN>D)437={MKQUqd{)hUr;QaKL3p`96Y+q!^$a6KYkLP&VjU+`J57xMjCA)FXRu zy=aXPZlG@LZQ3@`mb$p~?WJi?0snNgv;>cohE2ABoH1Xk-9#v`$o%XAg*wXpt;0tz zx+`wI^w?Z!5!z)RBegVTS`8Mde7T1N6LKzjI+5nW28YIQ!=knX=f9rqmdxJI24th4 ztevv~q-m;xs^c$!^rIM@@57?FRuaOz8k%lKlU{Sx?M4=9)5OeK_cU4qj~@B+v_juP z!sM5_LsRYi zm{#PCXBUwCOgi$>dlG9!!~t8cwcU2G*IU{NYcgcl3v#`^r%GeY2i%BXenNIeb9?1M z_dQ3u;4`)hJcp4Q)*8hKRj!?So{p&5RTnXo(oD*?!Fz1h{pw%jb zk(~3hg~fMpD%HHi40K%>F|$kP)@?KAGft370h~%##>9&vGHAVoBV(_vhzWtMs$u zE9WDRvd=V2QGf`p9jYv&XjP_J+?>xwH2VElv5e0ZmN`vd$70(wa#L@ii3N=G=pBn7&iSkbXL>G3+y6^T`&RGngf)_z64A zts62NPaKJU48H1hIj4~`lx5}RX6h9u9(bruw%T@%gNGP(OdeeoVzg5=-=JcP2J#b= zBvoa@1q%Jwb34c}NL%u>=%guCM&+ z44sNp(z7ik!GFP5^8;#R@x_rktfN(eEn3BN^2N_)AY8a9SQc7tCrW{!#Y!{iw}_JH z9>G0mM+hc&?YJ`huv#{jOF<0UY9bP1A+^2ZCfvPhGv-KM9#7#<@8W=O7lrU@6P=uO zd$)f3uiHnGOG8i*`pC%15!iS9U%l#wqN7Tj#Zu3SKS%C+Xicv}2u}m7l5ccQye3&Y zC7>0(%=cLfk*dx4bH~xL+D&1@DKzvDx<2(YF8kk}_>^RyGBB=i^rNy}f(bawoxyZ( z#g!58s|5p&0Vds*$}RxkVlsfKp776xf|iZ21uB(nhH%+qf^_pNEFf;q#vGDU30Y`r zLCet8#*1wgcTSDQ_b&>XTg?!e5*(agTPK z5$d0+3Y$|Y(9b64SJT9g&)fCB;7p!UUFJDv4pZNe|Ii^3BLa)Qkf>?-8R{7;0N?7C znYX;fOHE89V(lA!@Ra9!*y{^nXB@w7GsmL`;o;lP(mL7FZVVne$_^QExY|di48ORJ6Jd4pNUduT zz2SjBNRbT6gSL>%&Cr%v15UPCfBpL*DNMR?#pa2@pbUKIjM^zD&3z+QFYLiW${X2)|v#IEY2&yRISV?B#w&$*`)G3i+87~_K(Vrd%FTH#4S-Wc!nupzyR$W zPsi10hL7h#Y}s&L^IE2y1f4~htDj@{Jz0rTdXjsJiHce-xnc+G)4{;&z4AUD-;G=& zyHs(nW8Nw)1(e3xUFFnh|9In~mRZH=-*`Gp8f$ayHdxLz8LP_MH`(St`sI$-9o8Oa zskx0}j)&0equ&#nA=sn@7N1U3t_Nzoa)$5u>S8TJObkPO%WS5R+ut}?#!Ma|L5aYA zhswF4PtouQnAQP6vCmBvlqWf=_`G<|&l2?}XU(@@qOZJyQ=u*Te4hCP-S?*V{|Ss4 zPZfpAQR>q@+sdHI9-y8A63W5z`adpO!*uc2YyBo(K%EebDe*9A&hQ?x{VwG_S)}AN zN0}cG=PL)@`TNn{qe6;+j}h1*nOGzWA$(kvjQisi9zPDdq-Cv0 z%)^@QbQSJu&T>vgd<(^~D&qoJS16 z1CI~MqN_#C6GkXer0pD1URdl0s8`*gT6goR$zapz)m{vNAN@qG!}{*s=IvllG{iv2AWDxSIsCaz1=T;F~jG6Fo$&XaKuy7Nf-_-a*CM{bA6tj~ewT_B(2)aT{() za3X%@wzER8vH=qm)!x)Gd7(#t%7M`NNF25;mJ|H@G<>zZ!`>uG9v}aX2jNl|W;ia- z)9-%nQH7qF{S_LX1Q*{}C>U*gLc*C5C-#1Rn?kYaGk6`Pv z^CmZr1h2)e-PnaiH!NnWVm> z1>V7JmFMc?P23Y$L4W%r+C#LlVsA&%9JJ@lX!W!#|>QaJCp1 z=YixvNra9%F_xqeOvV_g_PEoO7RRG0_|qLNfH-m$9sBFTEsLH?Eg1n?to?%gC;5K3 zR!q%~hBCQb++Z zfIZZGcRQfDyQft3sXRX(#9U98+}dB@&BIlGn)A8jZokE0YQ%G!@&UDcr<*O17I`-H z_-a6$rlEcK_iAdx{@_Ccfjn2$>WC7N?jZ{PjU*vS|$&#^iU3ATo zfHoZ3CRc044kbyGp`*KIoHZr`WBN1ly-`bYDnB(C0Xg?B<@}R2=zjS+n&C_()=SqY zPRcHZ0xBHU4EUSQ?=X>Ptc`%6q3{~y0Qpzk1SOo_Xbs9Y3UMpD-@YmD<10&QPvvGWT)buCW?Ec3V#UGZ#wq~SuI-Hmul%?L(6%#c2cd zh&|>7b)%MrNv~VwvamV`sWV&o)06n*pZ4JEQ)&X}r#{=v=2#S7@ z4GJOb?xrbItrG5UpBDQX8OxiScX=YxBolbqHMn~WTYfj+cdLNL9lym*Ny5-W;NCO4PRvL|?xgcgNCVlsz4IW25&d;-;9CN;#hkr6C3%aaJR-vcGmj|z^yf0)O9D+-Q*N3zLopYQ-qLDU zy_dzdDox!9NlR7!gJpA-iGdEnPG;w}-&9}=3N$8RB+)-yA$+v`B|{Df7@M=*y@3T? z!>9;Xun*SN?b7}fB=p&_CKhxI%n+RmSl^vQp@L0_FUg&J$>*#RrQZ-zMET<+!2!0X z4fv3<3;qAlagsD@gm&axS6%2oYGP*X07}MQ`+0(&%cgoZIO=fA=i8tYtEJ7(6o<@X z8FxIZlA^M~H=xz{50szY`>rBw?HpH+!Bat7eD_IotRIC`Y0spVIIomP2B-rT2Jh2m zWnXsuAFb*>U{*vwS7A)>4oY+^35+#9hY*PwPb*c8B;yWDsEIVZ-qvyPA2KahbRqYnkYRA4^iLelv`P%nHCO?|?RN~E zkT-qx5VNB$iYa13OG~e@LU40dfE{O($)UW*FluW36 z1GXD-(CI60_T2p-zho`&H&x8Y;4-R`+1soC9MK-&>FxSnKcI>GPa#Us@gvNGos_du zgt(m142hZaJuFfsy&X2uYR<5h%+Ywh=h>jO4<}-W9?M%XQ7BTi2bH}e&LtAl?D`qb z;~a*wKi=vx=;P%U%F74w!3BUpeEN?z6IJiR$IZwT4wIt+MX2#}J|E2^u%}rA9U&6u zde>3tjZdHI?k8&-8_)+fCk3k+6Ac1gEo_Et{OqKqy5X8OLArGH2?vB0w6O@^pCa2B zb@ViPi8=EHS-LBg{Qj(>1#sC$Nq7OjmI`nMlA2hu zqM}}f07y2j)83De6elHSXHN1Y#pS;Crmd%b$?kK$VtMjepeLfeIx2E%>2a%XgL{{|FvTv_`+O4 z0zAXCXggZr`eiz0wO(qUrlO=69MThSuT@-Q1vTuFn6dA`yvd1MM*=~Ft9k06O2c_p z9YiHBDN_Uz-}l_ad8f<-Z>r+m7)j6>k&eSQYrGgiyp?rSi@=v?#CLp8CVUD6tmQDD z;G8|wa|@lCkItte- zY=MksgPR8O7>E|oL?V`EI_Zm%nE8wrpd-<95NM%Y?Nt~G?D?F$n(k+s<_NK^!wOJ} zn?aj$^mWBw$&=x)=DFhT35%z`36x9A0_0|s-zPwq++DZ0y& z>@*r6qh7rHZ)A|@OKEmE8sxHlh)w;@`rH!YyR2gmp(kyZXabyBEC@NDb&7T7coO+V z2u%r}H%lePOxZ!h0D(p$``*tNM_5sR6J1$vW-Do^vmOUi9nN4K-PH_I$9A6dL{aOR zLgD?d{E3U%@wQ(eaF`I{dGZAFK+k5;AXitELWV0Qb?f3UWxIM( zOTHVBr;B6#(e4_Ii%Q_2GC#p0nsMs)h7)6SGwACq<^jgDIY@eDc3qkxQ z`dGKx(<2F#jsxIFwickXfnMue6)Y`4u9l1Ec+pkcj1UQuA?~h`%#;W5dV8H!{^zNz zR-h2hDt0S9m?9g!tp&TK4Goc!uv(9SBs|43_%-N}D5!+%06J)@!*Kbkc}hcd!1qV+ zRTlgF5s#_uu=qq`If|79fX}7UrRL8+VC4te`A~8I?e{wot%%xik$U;cES7NZwLbOn zQglmhN4uLVh|ct2hD3EiPJ>wY^ff^%?FAFcbQmSOTY;{4tjx*r?5^m;g_%R=AugdY z=KcH}Ga!LNsL{7?sW}7$mHd@~x@HPRO@5_Q!7Hdmcl6a>g^@&zN{2`r_jnmhls-}< znICq4^0v>ldoF-{C~^3-WTt-vw+J>e9q&%mWLUm}*@LWCeO7DE!H*Ff2F&t?e`M(H zzD{kB=tE%xX!}tjaQ&jB#GyA1I&Z=F7?#Czv#Uud|W=d*ALS2NNI)#oF5g)0R)oDMIR{@xjaPQ5G2V z_aV>P{Mp9Y;F+_thD<~Z@5V!rNM}}T$?Fcf6adxn`YRKp3IPWC(01&Pk2W8re7^sS z(uk~&%VH1_$_vEc#KO5+IG_UUze&K-d&k|A%V|AU07)Kke}mYl@%1mnu?x!0e8p_s zyb;TZ$l4PGg?&X~&Tij&qet4b$65T!8T%k`{16rf{9u{yew3-DZ`2}lug>;ED^Dpj z>bDhQ6#;O}xuXYlWif=HeSgOE=nmB;4!F%&iP>YKd+- zQyG@Dav>{=<|WA7URsWXBpxB17Juu(N_ZxUe5&7jZVFkq33{IB*6b@~19*0Xy zXwW0mF4SrI`=JjzMoeu~0l_%EO!tw<=%6G~}D`L=L9Wl%igXnp{rL=;t*heg=)v zto1pJ&5EZO(OnEflK%oHa|#@naN^*`bMcJf@njAzR?0+9)ENW{`bN1uPYOKfGojB6ml8c{>CS5PTuUty0cPQ1`?bqD5G|0w4?mvvYJI;X?r?h|0oMIA4~3W9&|1- zyH{=2SvH_?{IMrn%0s)fn2cUOdGK1^2WSf2v;;~&bjv7OEN=!g+R0^30kCS^kUH6s z7Vo&q{g2M^E5p3^XXobs8eoShg(_vQiwoUW{Vq&-E4V0y5?)-LpcDue3~lEb%4@+e zv#$&6iU>aOvUl|ScS6gqzF_RT1L=uyB2J?nB*To7Y#hdXtUUV)A8YFtT;td`|&_WZ|2b{b5iRuGxF+Xd)73@BCH~a=@nkp8Y((dqhoA zHrNH7YLv^wO0$fLrs0LiPAF@C`0x*;JDvIfB#@hGj3~8`9d!I#F_Ol=GaFdFcXM!3 z+0I!;$YEm4n#!NK7b5(Ssb2zWs|V=4QcugysAD7qbJu{~ zE7eoj%_@1Ae*n&qKpn9;1rDvq0n=&TB>7Y8!*8h3-y}ZOv=F`bPo-N@fI5wfvFiS1 zD!1EMYjm{K*NxN=SN*XZ-rI%sP_}tncNbpiYsWg+a6+ahN#<%mTj(Ic(M$V-iE*se zWe5~>K7Zg;7RzZ5Q~aM4R*BO1aYUJDmbZ#59NJ&`8f0>3E;8xK1r;ige-LietL82L zpR{8kZ>-*i^t3oTYjE`Li8tmWpb7I^8VTuzNw$GCtzcSa08#jXRASUo-R>j~9Dhr5 z0101}!3YLr!dN{%GXjna7LM*WK4i{4G@*Sf*qCSEV}@;T5!VZ`!T1bx z52vM^CyDoH3BsG_4z>mrJbXV{!k(Drpj zs@Cx2xIz@F?d{%M*~WEYPt`FOW_nNbMQNo*pG2k(|qK-uJm2pEQ5`M;9k^^sh>ef^Vy( zTs^!gx-ktQMsL|JMZm5e(v#9!cg92E5Dpn~1JvL|g$R2WcNY3+)-kON zK>V%~vsqcv8Vgd_2f5_x(f{EIwd8uIb7_qY*rP~b@3Cv8pSB+SsR9cRj~{3IvF>voLy$qCb_MQ0 zq!7$jYBa`HO+QK+;pAVl*lQS50g+qjFe)5hZ^fpiVH$ola9)IR=C3114oi;VG13dI`t5hEdV9?8DDl;(Uxy6B)HY5a_rfrg-n) z(uj!Y65q9!hId6we4lQ0H!25*nH4q2bW>82QKS+LVe;W)JLX0L$M(sb(`dtSc|z-Z zaA-xnQ@?b+j3PzhWH;)Hz6v=|;{?B>Oquixkl`2(xt5&ytt1e8a)@@Nu;@3OemJsk z37eQ}sG)GV%4jXC2{$LroABJ4`n}y-jvDe~VpxnudrG0fnG3;Kz5pm;Udv=C8vnD;CBT=g7 z4$uyafDsk(EO14=qss(*0b0vwgUb6chTJJm0|lv5(~UG>-qAn+T=6vuG02wZBEX)> zhCyUR)Z7N6=g00R#_{SEm!q998p;fjl<)w)ML{bgc#q+BpbLznRa2%ZuZa0wixz6Z#M zs|SYvKlu?R@5w$4@acC7XlbqX)vya+f>Qo$#hQS3Z?U7D)-ei6g~}2>Uj`+IkrF*s zTG6rk{3d+t`lMl1NXDE#M%ptrUt@3!p3MJyEO*+oTq_-&0H~p`v>i|3X}$KDYUe<= z-UjvdVWwLzCIP>K$*%YusQx{jIfH^h6py0e)n*YzHzU+lHYPk-OmT^`UCbA{#M6GLD6J~LB*PI&~? zM@~u6hW*R;k5m*Ld9hQ{p{N^RZ3a1A+g+Uz-iR^#m?==I#lPjQ3&bbCFF}Be-+Kdsf(TT!4H+$$-GWct00^gyfG>^ zbNu7*PQPNBy<1IAhr3;udXJ7$6EhhG9s~Tn-4@-e<@@TasH~;|toj^oZUD*f!Udcy zqSzuo5Qfs7pj*YISl&Od9Vl#*{jLQ+FL#cJ$3NSmOdX5 z7S2NzG6^R*Mzj&Os^u-rnOm|Y=OpuvPM_PNy2^+9?C8O(Z1daDgOP+KNr+rnMl zjQT3_+O@}%E`@%4EEUPYBZ6P1fZ?K6pUyE)zR%FND@UUJ5(xM-zMC|~ z#ErVDSkzJsZ~;)OE9m9nrq=^hv8Z}DKcqvK)er}#O_k9Ik;R$ne8Wa6_xI8&A{UP4 zm*5i|6M}2Bkh?cv`KVo%7u`?cJ{Z^SVNXFw(HV5Tf*}uWfwc4wS>;;6jOH;;nLHCk z$gQV;2qnA3KqKOIM|s}YneP;0=s<^Az7}D79`df01}^o~d5~pW`7q8l0^@oT2U)eL zD6n0JB~HNc#6YD^bWLy=gb7{44-1n0!?U!`h#~_ZcJ@d|BoVuBZQ3eP8 z5P*gmzOa=uG@nZsyQ>NYm#(4O-qqD}-_!iA?Pt{1@RS;C5Lz4;id%b~r?#c>u#)eh zWGo;%s}nlwwB|+3%XKFvke&5Cl8R7*SvSilEeifxcQJ9pbl<`KA2DL16|T}TkA7ei zow5+x+@0ZbM-jcL=<8LP1Y@$X+xQ(|C#-C$Wto|;sq4MOA(!XY{Z6$Gbh*1KSvf5$ z%&#}3m9VleF*XjacMbpjE|7R_d)%{)Lk{b1@Ip~5Y;EKEvluRMEpKDg{XuVFrl9H|$M3cZIYvMu=*wz4QJFwx_J2dA8wQ#f&_( zbmnKeIPAC&T_q1cj|`|?c$Q2y20&lATIWiD3-5O8NdLWHAh2@LJMXKJm~9bl4uj<` zKn1Ndhpk{yp*RC=t|a!4PDeb)BYOK6l+GdT)!lK23P`F$Qi$1eP$4e^>ptRhc7h(p zM;!{r3YDO_i*tO$#(QICdb2&nZpn87OZY5R>#cTDzN$d%J>)pN=pg6arE5HJ+I;!L zMOj#Vr3iZxX-duB{3!(+J9j9RdVt+d;hJ3EyS#gEv$G9lQo=?wX(m#gQVHODWg#?U zQ)5WPd&s{1>9H7W}F!L-_EniaR>P$fwn-)<-Hhova38|>%OfLeiyR@ zxQh(hX+HGWa16@@$}C7~eOu9l!Dt~2LhlHxuyP7nRP<~0s%2rO$Ug?9;5L}QbwiaS zMlM26YX6veHc2P`0dcX_!Bf8%`6|4DVtXtLS4lN(+~*S|u^Wo2&F64`7zm?og~I(J zgMBF*a>4EI(~KhQ6S^T9UW_EA$Q4NvxjboF*S&?6OA_f{z4)^5E}Pas#Di;H=ZO&c zomg6KG!|hsSCbbO292Qr(8641+B0`7J_t@W(F=1;tc>^bxO*z5mVB9;gTOjB;%kf~ zEUF%9;VmyL$SY(^AjxTZm90osm$lDbn%t>=&K6TjJz7^yrz zJ_CaMr^@M35edqyR}f4?-~txhusdcEwgI0@|6r?Yj!f_+FZ!BLK`cg#)snOw2FcD9 zl3$MlopD8`E1goXU0xmwA0`y#XT_U?^xHcmOo2o-l9kvlyzPg+we%&U+LbMLbg2E` zwdk$P!wU|=HX&K6SR5Y6h3`%>pH(n(p!lg@uEemUgJpoJ=L~AG-o=NLntDpTc4YB{ zS<66Qw>m7#Vy+Z&akVD%oOR%Ngq>5_&YjsMuI%VZ(kn{1Bp~vw-=9w}u@xfaR8}AX zgm?eEcW03rU3cHMjC zhNON`-WqOGK-i#dHN>gj1wB=oUl7^ZFJ5ky2VPhtReK>c=~>FH z6;5io;e6YKUu6OmwrZhfsDPLi9 zD=PpX}zV=B5H$gCmIk#2|6_Xatk96X`2`*|djqkBJ??5AMBdO3}DyK$JS-nVd z307X16glpjQ_angiy3%FkN21Plj!Gc!?mL4Es|wR=J+Nykr*VkQHUB=+(6V3FaE$` zB~IQUZ}HV$mIrOXXMw4c@gImO&ov zNJ)-Er_)p2VN!hsPd@HrTM~Kgc@{Ga!(8xmqc-fHa?0k2b>i?xvg$tZM;ZIvYK#7a z$T4&=6@q_C;@&zcW!{p7S7z>cA~8CT*g?dqVH61$IB3^FRkU*RW2I!%uM>=EhINDl zorGbQ_T^dBz18T>)FSfy2T?Dh^ZOQj&1T=kwOb?Xj|FFhcuv+qVPYY7B4d8N~x+G@F+u1tfHlNaPy+sG5!?Eg&;BGWOWaHJe# z6f{n*LP+W0DV1lPX{eN(3hz{+_9#huSjBneN<_S{4c4%n_}eS8D2 zlRr9MJTyW%M&dU0TAgt#!9{7=JHGlvb4)K?o;TUczS*b& zz~!iT)L+2)vLNMt~{#t~P*t z2zYUKlP^}aBUI5@d5NXt`zV_{N2L8~0InizxJe4rKCCG5?UP6CbbCE+xM}0O+C}|_ zn?y8Ydk1#p4eI^cO8pwmVYBvV;I7BaXOFU$%Vm*sOM^K2cf@w(9b?~#eqtlKPZnAy zPL-zYd}#dZRNon=Sc#9u#UNFjJrGKK&pk~9YxqaC3Th;^-AjEA$cm9YN;fg|(_Hi} zz`p4|N+}&a8ux%)mt>soW!dR^qes-Te+@F13|TP`0~z#8faPkmENdYU_JuOR#&KDGZ%}Q2ZZ%9~Qv#?ta)zI+t?IT!0ccfP#_YZJD*0S@ut6p1YReHO!R}>$lJAA|n zHi2BSwn@f`&IIs&1C-L%bv)yCbcvpGXQt|7gtO*)OJxdFRX_t_m(iF}iaoBvK}atC zm`#7~`f2-GX8r2@??_PC_A5N!+o6vY7-?K z)gYsA9$zF(x1XV!uNV#QFhjWkL6dC{Q!>*{VAEBc5FZu0ok>- z0)|znEh@3M(`_}MVgT%XkijcYi`HORf)ZL*wq;#+!61JLkgsObPuDq+^!hz@f5U}9 zu4O7KCHy5D&{_a)55C<&f#dApTUh_JoX=y3)#TRm;2p_Z^;F}NiAn?X*2{r59Q*W~ z*QDfDWp<8MnVd%Jgbj`H^npw*09aQLRSVtGPAzQN@H!kkIdk@Z#f|#x0~c_~J&!?~ zuE(5yP168cn7s#UFl_rt@lm*ei_K?pUTO_Kn-rA3?9eMHo}KSya+~G|g^#!29%6i;PRKbc*iH|U8eZDH zyj(p-t~1b34t9WwlHo4JQjlW)*8i$P{HlFrP=IP7)&Z8qoU#b&k}Q$eN3NLnAW1-p z``hQdvinH{wQB#43|_O5N@yvchj?tMZPNQr5r#zvK;S0(8QK$}XaqGVEd$Ev`UkAH zAD{t)Z#_@n0Q>STCHGD&H@J6jRfsl&LPLCN2NF{X+KuO*-lL1$i!-2Y5e5P^wVtp+ zk33?RX?;K|!R6TPsOVIf5Ux#>Et};b=gD$AKt{A^VG(+rD^N9IV_zDkTeHO|vT7v< zQ5+QXS3t0!3k2(WFmH^OZMJQ|c_>ALdV8F~dlgxOSVx)XZ-O73 zl+uiQMXS#m?~_iSZpSk2Je_2x?&Q@$5&P)^?f9A3M=n6T09L~;ZEi@quE&KxZw>a$iK(d}t()Y;D3$*Z(Yppi?{us*2D5d7zt#~_o zbF?(iR*743qCwO0U^V}FM310=#s&DFZ6eW|%)p{ryG&YFe;_;IQN&ukMA&Karfr!X z*SocDyp>@zxWK03o_IDC+Nk*>Q@$<%t2$bhxZd?%Cxe0jmh778zXvIh9(Nwgp;Z|D zb9KuZjI+XmI?H?6%SXB!^)`L&4i98Zm7e1>8;0`56$Sq zB&JSyqiMB1wxmgMGc7Py*Mn<3LcULAt^Uq9q~A`5WJkj5a{kMBnf6!UxCwMZ9xRND zVXJ^BSJYDMGOxbve-W^SybiHsR{Q^F;<#$1$$Gbh~}TY-x9{D{}t zNVC&!2Q&ZZJ}4RvWFmr-oK+J)8^*cZ{6Y9_Lo6`Uzrh9+siYQB zgUN4gN1so!o(e|x*;f&MwLNV(OcrTu=Xq{PHQd&`w`GP=R!o#!@8CF$R3XG>JZQ(Z45Xs1Qa6M=*x0e+8MXeQJf-VHxmL#>Xn_$P5Hr`-YbA zVD`f%m;EF!d)ZDbZO=u?1Z5Jp&eNjd=2|{FEGe+~Zm3O%r(Qy2^}*Qec>aF*$2wS+ zbl)M3Q{g~7zUl)}DOJ71fxRN8BCf&>`n721+eCBm(Hex0hrct$9R{TezJ|jiLArMe z7_xK49TbzEcv)b2cM*UN&@Mf~id={s2)9pj@kOQ~%xKvkroM46Pvp%j4H(_WIyvmmJ55;!gBcg3Q^OB?&=pFV?0t>AT=o%1ae1&zT zOh8w%AB(MH;NC(av2Q>inwKRSYZwo(9El6`v=CyQEy(Y(JM#BkaFTB$$%|Wf+3(Ed zU|TvfFs7B3f4HyDe5Cd1e~sm*pU0`#sqE*GEs`3=Lw!Bcj=X}EU18n0GAanM8f*Kna;Z&H>4mfGCkE;`0yL~6R5p& z7BxY-+Xt_}Yk>d!39M7jE^f!d0xglvdhtd0)%^CH1%{<$jR8Y2^FE0@ioYu}EcqWN2iF;Tn<6}!P1!C7uTbyFzZ;mky@ z3!QpMEYE(!!TVXvnrR;4>EhuZzDUK@_wz9v~w5xuT*L2-QJ^eHz5vAi474bIo zU-*IIF{mG&qh<^;-fQe9M=bQ74WJk3V`&flXNH5&ag*ZR(1R?Tv<(4vcZljY9 z1L&~;O;dXWlR)fzS`kU^w5nS~?_Vuv>Fzsa60`y$0+!E|2*<7;RZ2TeGe}c{I3gOu zM^7K4yGa*c7Zvi~KFNB+Lbc`X{5Wj&W}KX zJS@iwUwVGq6n4d4=K@9l?Z-?N+$KG(HLpy_(8|&Zd8)H*cF)7p6vHwxOktW> z9=svusUDHbKZMno!_6q+zpn)?S1Y^FnIy6VJyM_>s9%x1e01<`Pw6$@Wy!=wn^AI{ zTWkUI08veZY>kcVnGgAkCXi)=6)mqB%ur6__PI{usS4Dl^YVQ;=lI)U^xJ}C;Y6+pqE6bMgg6C%{WO=RvsWBVY3+Q zWVRe0Z)LlOdz$tr;d&ajfU7T3n-?$7@vXtvTkJB*K!RabwT6XOlP@DOK_PD218JbP zgT_!zhDnlCE31g^Zbp_KIPN#OM6CnF%-{eS;Mlg0+h)WIS;}= zXQq&<2;^CczFdR75IRHcYH&*x<#t|K=hx;QQR-e z;=M}F1ulg7d-_*K##5Snu$}@+{ohKj?xZ0xZ=nrUNXtxSNxPrB7kAOX?PE3US|S&+ z+U3mxa+}{^4N{5P?E(Gz2QxGE|3)%}4vE2wZ#CCViCTMO{z>z#+di(v?)KB`Wjp02 z?;^94vPbmj*;Mh^dpuB*g_``mK!qB?m~EFAw$>#yPq1BQr|7V@RSMLGOw~%Vr9oxO zJ}2gB2l4tvu-q~OFTIG$-s$R{=|*QYv6FPeBjj;)a9Q;z%Jg5h?rQ~j*w9ywO@Gw9 zR>QM?JnA{NNjMD~J%PsT ze(5qI8DqB5Qr9?^!F{{6p6gx8_~)*l`uH>m;6kIiABiOVK2JCi3MFRt(pGkteBSd}xOT}gyuA4u$2180kf#Zs0dm5%D~)o$dNrMEFOD!g;& zB-``bcaeAUrp_G4B4sfihZg(&HVa;ui0UnBj2L+$rVDYH}Bn~?Go4b}4o)*R8m z+_;FPHl_wj^+xMMGLaM~9XKELH9zL`HP(5ZxR1$0Q9PxKWKtD_tqJ8-rLW+8!OZEN z{M(D1(vxpsENGHte5dmD>xjb2Bb!JpT9HqN_5IxIrYlBuu?V@);SB+O;1XC%AcR}PxyttbC8uAC+vyW4`%*xyTdd}Ll(;$ zNGvxqJ1^?ti_qB?VcU{2%iSlUdr>7gQg46;YgDi}ENXT~Sz+Jz0jNwqjOP&GFadsM z?4Tu}$iQtzB=SMQ;fi_ujvogJr8f{JwMml| z>Xz84Dt^@lECE=O2q)_r%AWJhZ7^2scHl*_a(Ov_ELU5N8JCu0wR6yn$8-UGOZ8h9 zRmG^fKJum5!BDTWri6;1~>DL6?%yE6JomoM%QLD-?-Pwh#ox< zPKVndSyEsolooK0J=|SDjh>NBLIC4BZJXS!uTQ$?`Qkv0pJ<%H0+nnqXV>)tT9!QN zjPT7)qoA_Ab+dLvT!@ugoX?;o0nbXUle5#7rP)##6VO@-6!Gf*w%<%o9w^h^L7Qfa z^%v;8r60QQ%^Me+U|uncEsI5}QsH~KM}$g%Fg_DHd| zRlBBBsKz;|k53QS#|$T#Iy9n>Cs1?haMW4R4bOC!L!Py_AkR}GWV&VN>sZsfiByHb zr6P_5NWA0JY-pUo+IW>HS8sW1;eaY_>x=&v0G&swmTU9;FR(!m4B$ z+8N*__aGR&z01?iAmVi=hIAOE02Y5@$}UevevDOCIW68X(#$FEn6suJtLEH0FXik_ zk-izknSNTx(9%6Ic72ud1VdH1ODGLZa9_9MQWO@Mp9jQx6dIURabO?Db#$sNQRz18 zkTL>kvCLCB$9HzSyQGnBxfSGJg<2!UV z3r#KI^Z^;m>Gv@9g`d(6Q+D{eShI=)? z(G}!$D)Jkh7`iX+Knf^{uuY^PqJCuaM6`1wHzOav(OIYfcIjsMUF6e0E@LjxCyOjN z*ApC5vG6Z;e)wkfn{XZHVJ8yC=c~lMswBnXg`@nd_U7olJXKGHy6<{re%++KMn`V{ zcxF*6PhO~vD^2j*`?~+Mh+wJ<*o_SLFwQrD5!ER`Ipzs<4vS*@<*b^CKY_q#!P3mejeFNxe;emE=Da#0 zLHji5V12-Nti9B=%yVODz34kW2x79m4zKp}q%0LEad<~VOxFlR5>pw_ry`9Ew|#m2 zf&3cYNp{+>hs@M~sH;R&DK zLLPA$s+CabFXXI5;H8x8=&hLNDgEw~(AmElGGyf>B>tx9GOKlC%)80%<}ofJipS_3 z{t)FwFy2Yg)-|@v|A!yq{;Rn>I5Zk-03F{&mxrA$vZU<=8e@nzbRsDo%V5KWCCYAepB(duDAXM zgNE&>(orCC58EE(9LK@+@PJeU?I=uWLLn7Yk*|hWPKMg?!n6I5Jgbp)YCW5_lKjXn zV*43eI)!GiJIvr;RK=E-?9Vbhg2cu_me&0QgG}pvqh+!(+2}^kV&qF9}81(NpZ&r3>wpd+&mjBCJtXCeDyHmC6A$Vtix+K(O z-CU~GoOLhWVPLH<1gb^}e;iMEB$BbjKLix_?)IsXgZQF4RrbF&C_gh>rH1BqGFNr+Y&xlftP*|kQTr%X zhrX#}P6>Fd#8sm({Wz4SZ-)tXY^B?4>?AXsgkSdzA)?WWKd{oSe*f#Oe? zGoUMZS5hxjqgW}qIeSMtBU{VGXF6zyfg3{j?>=8bveVmuE%ZeOV`2Z5eHPw=m2U7Q z7m6|YH>CGF5I1FH_~r*?pT`a_6~$r&I@1YiJ-}jsO*tzlq2fr1{0a6e{F;=TDcJ#aB;@XI3#L{VgWcRQ zv?fs6j{}16BONU$F*$QV#EO;S+)tR9m%;%k-|!t+l`0UnLPh;1RuEAJw#GiuG8I$! zNcvP|j~zy5_m=1hgl; z1&VJY?0D*Gos;USGY;!?wCzTMTYBk%QGu`1Glcd+>X%$w@gi|0BnAr;VCRRc`$akz zZlqz0q3+RZ$QL#loT$K?vX6t2dmvp^Zq^`b`UKbRO5fB)HPN~PPzGHM&+>M-C+qlZ z!l;`~MhqOv&S!AMKS$5K)Ouy%)qp^C!cc{KU}r-Q3u`Q)M27w-wC^#_9^MqqS`NyFle*v9`a^IB4Ln;2kzlB-LHkYQ%+Wo!{$ z`#l2YL%goiQFeWr747=xT=gfIzS}vPM(unPcn>BUVy2pku}kOt38FL9O7m6 z55ig>12|-h+9`yy<04DqV5f=OOMM543c4OdF*|5$IGs$4*lbTF939VRu^yX=@uNj_ zS&6EVyU-O#8XsDp!5Df*)xHVzS7W~|uAkTIAC*&%_VW_2^19InRtF3H!!M?pq*Svr znou3}%6;P;`6(!c%*(0H1;UDv*)g0t?*R>8U-i0yuhS!tX z0GLLYvdox8qZxx^RYfxq8_oEig{?@Wdyh0{%{Yb3R@TD+P8M6=t z#@cP)(iUdpc^xST#YC^Sj-dV!gj{MSXw3%3AQH_(k`V?ZY0`G4+(P)^AEXSc^?1X0 zwE%!=Z$@?m5TL_mPvv&AJ&R7eDp)i^WQqsGz+2I%PF0SQk)5<6Tyd2q{{q)PbLHmGahP<8X~o8U@Lz zqPi?)!i%)=3%N%g+K7hGtEG|}F@upR`K2S+C_2W5ldr%q9?M=y%&leItGJ0;>Aa+M za<6zFDFJL9Xxquq^T3KL$X2o7MoCSB9khQz8_4f#l~5GqHlYU{UP0jS|HvdtfurPPLzQKXk4F%%#{8>05*K~>T z#e8D5$70e-G0ITnk37KJDW2tzZJCyn&aT+U8juS& z`*s#S=DO~Jv@*k(NXe}bV!2{bx#*=q@dDWxi3>2HzZmEkC4uyBuVdRf)%{8Z0LWxv zExCOX(A7=k;MMu50wUNi;Fap&#PXt64Qp%jx6>EEr_OT*n$|~9G!Kv3YBD&pB^xDc z-h-Sw7bgT|X;~}`y*L3guF{nWO_{8;W5?^;Gq}#2uB_rCGC1#Y(cileQH@>yNvAf_}Z8-C8Ro3$w1=3`I0*`LipmH$K@{ zi6gyCc`J8R_7sAaIR~U8*V+Ze9;U2Kf~rb4WDZ3{rDYVloZqV7J6YxT!ksbl4+qs; zI1@SJc5ung;Xv@`&OY(f^67^6FC~h^z_p6xc~>Z0Iq3_LD=|bY72N&%X#FUJt6cyk zf!!<gh17nH8$GGM5{rCQ-rD2`mij0QLyRs-5xy!Z#T4}wN8hI8Z8PeW2m zv_O9*``PRI8D2HttT;3jxl$=QS@N(W6tT>-s)ut)wC!jWgS24d4Gf&HO=p8nKF=Ec z8W-}xk6RK-3^q`a>g+4O&>oNl@kK05R{z$mEq->}ZFVl4`=*8{p%&KHybH!)JH&k; zKNSnq>!oXug=HO%z;q*Yy^q5wm}q0$Z&i;XJN2VH_(&H==TW1q z0aJ+9vM^+o6(Cj*^R$g>*vtuH-fr%q#U|n=K1gEc(#2=3&BdiU^&3AMo|8yJyR2^; zM?lV!W#tizO+8o5zHdD)P2imqCeLibF4F`t-JWZa*hZg`_S8q`vl;BS#By9ORvGr& z4_x(l(+gyh;oz;V(#7<5&#O3jn6_UH;A7< zzqq`Vb6%)%M(|GzYKxfE2HW!(_q-NQZbM7BydP~VLpagAbWXSLf({~}PtjWqU;2An zu%iwGp=~BG6YsPqgCxl^y4+v5BLkM{nRWl1=RR1$qY-+aDa{%NCx_gUS^6C-2q;p6 zo5DA&4JBzTZHh_Nm-B>isVNEU!B~-;Cj^h4SJacf>(nOT+0Bv!WbT=qJ_q||cC3Nk zA7;WrF`I=6mCDqa_KNAk%%hjvmjs8?zw}-p2q-A4Qnj9i66CKwNQrKl8ULoU9XOiP z6{bfU-tU4uJY)cEqA5o~>}0R!^jMM(mF~H~PO1aYj#ZYcZGxvGVsZiGhynNTw`Iq< ziL&3k6FG+PHNozYVKClIlbh3Nyo`l`R<4DU_Q?t8<4H!_cG;LT{I~6(gl~daBC1>) zkzV}#EL~+}wDS3ExCO2k=>pcd1D|XKp5^`KD=ctJZ0%QtOi8a8bTfYny@BX{!E0|H z)T0>k@%LUwV17A=fvy0`t|`l(Q@%ydA7V%y{C_3Dy-{|^)nt=Z>u0$2`uKgkWlnkp!oz9oCzHE?Cw{ zCxrqk8sHEPa;vp;q-!zESlg@D%xSog&I!&hv{d+jV^X&&iuTi^P+S@1PBR6vq|Yf* zt^a+#&2d~165v@Lvo#P=vj;+A2nUC6Ptrn~1RRRxDyGw9)%jc)nZl881N$-=BD4OT zjK@l)cn)v+FTo7zp&METNxax;8;`IvzqKz&LGX{;{@qJ?Z~++D`SxX4|BEGV2jPzS!mo08 zb%nQq6{;W(bW>r((BYOyqX2IuDpxc#kibD-VwM?CYSRPjNteS<)>h-IDPf)_OGQ<} zKTS;?QG@YSX~3)AIucp^-K+i#JRncgbchNrAsZANKvoYOh2@&YrEcXFs|9ct2cbxy zMoEPq{&}g$c)~MItB#%H13Y*45jSSeIyoqA81p87(sb7L(eNJ&Jkd_$I+kf1XN(ie zDp8IP4KD`&kP2P-&$3H<;D{m3tG7WBFRUF8Zu+S zdH}Ayl}{yLAwcv+Gb5!yUHeh-llBI}vbh}~I}M4Qs6`UqtC;JSp|bkRWpvH{BL*oS z;Bqa5`RohZG6f|RwpX0)5yq6o54%v|`#W4c>>m6y(q=jSpm;jph{&*Wc}2 zabpi9AxwzAkj?KC2=FgVnr795;?+0+SF}_WCQW7yFrUJJT9y>d1tl-PAW)N>w#ajF z32J(rD-X_X_;Q#+XW`>$F0b+gQVD&$rg-26Ah=&$+4Tg)B_>o#$fwhjjqF%HZ7OaC z)^8vG?Y{N1FUL(l5<6&e-5o;*TBkdzHtH_9AEBA`Nmzzz)&zf3vN2Zk2BKLGwZIh%$zEY@+`-d~)^!dAWm3ip>^4-j%|E6*crfTv;ua`xx zOLNR;m@yS(XCk>y>mNU<4t8XZ#sNkc;G5-;l%YGc8o1Nm}R?;h>-;qQ2L$Y1Lt)<%S7kS#%jG(KMV) zd*fJ-Hl*OAX}hJU4Jvt)ah75%EKFsrBk@SpLBB+-15~NLWk`C(^u9SN;b2uS`-ZnV z(QqIxeXDFmTMJ^Tcle~afV(Xz9Gy3A5IoWF{h$!91(>DdUH(XGfokLh~ftSX_;Yp>!(|RUjEuLizm0V?M2M0 z7pcYNK{o*EpqaV+qN>@LMJa)GwgaYonHgIQmtB=Z7F*}Q)O50IT?jPvx%qB>qUJl0lJnRuul6|$h|Vx{me$l2a?FA` zS!v`yA{J{CgMr7OF>@2vh1LGa)hgv4_?Il=Bv}mBWCAxR510#?iY12z2aiG`!~Kw) z&ADVQgvLjT>WJ?ub?`ecJnP5o$z?vTqTUv0R0$s@$ZrU%o(QB!QGu=jHeHbxR1@Xb zHlx#MdbEtCuiiG`X~N^KH6x_VkNS(fc{#)HoA;O_lObg%BTr>DXWO1_a5G+?_7$!oe36 zq|)^fyuI^M<6$Qc;t^rvO*xAmm6k}%F{cGL2`9UZe%-3F8x6!O=DE+rL0a))onT}AAiPX5k$AdmGoyiBZ~bU z1DOe~OHS#nGh9E88qBCuPzO`RL_oPf9!@X#sfh|3uP{nGWQ^d^mKig(1u3b>Ocd%H zHaFPA;K+~s9`FRIQ6%5$fo$%>CxR|34Ffv#rDm&?pYP;%SBG1h{NuBw6^UfwO20?n zCMNT=39Yu=jhk1uMQ;gVKWCEq5mLuP&xjR+pINHrAIole`!8*((0&bIjQe1we*&T6 zEKJVRUa;kWmohyx@o<-U0FY)7`RFNo z*4PiU>ypcS^yNxrA+UcCofC^0Gznkarm)E{#hky!eOJ^a`FiY{FBqU_P@a}ihJ@5aum+UZq zs*}!QV{%3KD5CDtXB1(SD*{xTJXx*%V(HVEfmPF9HlZh+eUCk|-fNjn|Dz+e8oeX9u;%09-SUe>m(kHix&^u~T~2&Ov8 zQ}59A*m01C(n9?5q+tk<$@^v(4r&Vkj*Q&-;EuJ{a6iM_Zqdl?MK9V-$RUiAz3!$B zKqI^8YI|6(ij}~ko+tH@ z%dXnTQQcQ1m-$RfTC!Z_jx^xHG=z<}AR<)y{g%$zRH`RA%+LF#g3w}q&V9inf@G2+PTv@*Da#!tss=^87NkoR z!Bj8{^7_gHo04G*n*@Hw-_&8O3vZTf92&QF*vkDWF{@!MG>0!!6ci4YY7vR0hko2a zLY@jzy~6I-LsTo!l{eY&qfnAqqz;$%VgCaQkP+`$?Yriapbl_xm{%2NdzXzvD+SWzpiatFmtm)JqeN%vX(GP#)GqkzU-ZAF#TqjK<{oZ2UKN(0C z|Bln?Rzf743>N)Q)|f(C6*6Aj zxLrj!+-Xef$$wy{@e?yT&fnQRKE&R+uLnODruX6U*f=AhNtz7o_BqpywpL!kWeAHl zSs$@RIQ}((FbA@lXO;seH1XY^s>Inl2%|5jVoZ^a1ZkhNjZf z7gu9!+L;3{i7+dzfv08j)fXMXa5EyCDufX4V7k%*s^5kbM=ad|EAceo3l%_2ocwcd zh!A4qHON6#h!fAwUMK^iAW*BQIUJUL|3QxxL&H;KMq+xIe52og>W6q2B4vW(5}hnd zP|G!JdF12sI4lyxziWv5u4yKHaV)*Eq<>tRzpMhKmk1MkEe#)-!0Sf7b+cEu+=n!% zcdpqZ4THz}he$MxDQqpbR}IdfNshgR<^MeKC#1bMcv>#$F7IurMI;b+x4M)GhM-^1 zxszY$@l!nxM%L5gi~JPNJwu3RpCnr(enrkJYcp$xMecs&`bPKLv0r0vQ7!Dcro_ua z822xfI67*qg%Y9?YfYV;elOMcOs%#+!0XddSrdh)XDDq=zUi#M+e=lSyifxJk54>y z@6`2cLGRQ|XBbj)jQgrW-%#C@f)pD8R~CbC=+D6k)(*FmGXe|G6<}Gz_~R15rNAUU zaj5j#xbqV$Yhz@WS4f+FLl+O-L=Ju;yI2Wy1H#HDtAdy<8r|AI?%-pw`@w6kN0fbz zu}Au1DbXHa>Ypd5m^|#%ux(>zJkRcAtIa0*>{%>tMLc$7h&2RAzj{?qxVjm?)tCR; zFcwkaSe3V2t0Z)hbGTz$#)nIKX&R=&dY-HBv}Yt#SyDbO>y!)geW#t91;g9`SH1X6 zOoVV2Z;xlX>Bp!;{~tFYdNgn{d{_O!d<(p`!mF6}Zq^9Y;V8aaEKv!z|5}^ojJk#+ zOvcglhp(Ar(y{kGF0s`p7h*qBbrl6FPDjBaaMmgZC3UK(1rtp%j#`L&TFZw`gmca* zdo->#z^0}^?RND1hwmA=1}q~46%NB1jET^KzjL1a2LO!l#&hRhYz;&hW{mV|vdu?W zP|6f-ag?ey)+2y<*!T2?Vkq;IMZ}i}$*Qvem1WTJEJdt6>>qzJ$*_n>jLJZ7mu&}$ z+O!jkG7O`};Hsf;Q;tlMOSYOrsKCjTJWhN6X+&rzT}BspG9U}ze7I5{4;S_ReXbV8k-k)vIkL3t7kchp}htGmYUZmNYOSbT!JAcs_$W&Mt`(I-yWTejVm1?Y#V zPWdz#{qd_n`6`PPA)$M@C3vTKMyptaZSIq}%^9PA&%;?!9myO*6=xX8QkUxa;vfSH8kE`?vytf#B_fpM zoE;YVEM4y#PHx&@Ul1|f-;kEeGRICk=s=x}-wYh{(laYHf$_|RFz39hxq7*yUkfmm zi^#Io&e{>|z+*f?Qs<2;pz?@X2JF;WEX!=BwB=lWL=mUAq-zSpTq4~&+4W1K zSPE0XSdJ$K#=l6dWCU#=yNyxS3;_K!=fwWw514*8f5}d&lAI>B>7eoCB3)ltwY`8# zqjZ*63?JE(kVEz@KoKpQw!)1cXPfP74j2)MWu-1r7&4O!GWMNv!jmPXs>7oZFvOzr zFks{dhbu_*4NooV6{HmoeIjVTB^q4Qc1XZWbO2eQ13VzNPK$#Ft*K>mWjc@ru9cGx zTjIV$o?#MvwCQd;x4WI+Y)eKUZwj+L`9z$sDOr8lQn!Lo z)uC_s&`@F;qE&~lV4$F?MaF4)s)G1g#9~rgYnAB$IZO%5+)Pj&sF<3gz3oqlxYLu2 zO$en@s1-GC{sylNY=(9dmekJqc1qC_=v18763C*M0bR2&r?w)-7vdnH>5MlvcRTar z1ez5BHW2XVy!B8h?tFbluFtnT#ETs!U# zf~km?YQK10DwFTo2iUc+p3TbdaKUkkTp8^!96%PId%)ga=`NOH5hdQ89OkyvI7Bm# zaQ>dbH~l(L(nmn)d{13Q`B%qqHfY^S;?!<-V;K0?4Tdj#A{{2{^F@%#PS{PifosAK@e1Fx!3c*b`>#v!dLU0LeF zU2(m>Ree@`g7xisf6;0AIva*NDJgN0cR+Ea@~<<=gVNHt&L zA7(ck;(^Dv61Eg^Ma`H3Gb$<7@I?cLp61=7SIiupGvc?4Pqj4i-y% zk2-c9Qb;mWMk0a)59A+I)xeWyHHM-S`1TvbEu#25pSh1L((=m4zeh=qnZbexe_w1( zg5*aCm}4beENvyEDn(^hRP%x2_RFa4JrsI3bQ49+HLAM1afg&|&25`eA*EweTr9JU z-HKMXJ6X$v>F($mC|7QPi>c(zUJ@0EFILN!u_oI`U%2qAQv!b%yAX!og@P%`Hsi>$ zNaiidb(nXTvOsDa;X0j5Y$SECON8UMT-~izYRNdYXxcKEurA$+=>4 zwm*n_)Zacn%iOq((gbAbpg`n6jiwHdLWvK_Y8~TqpY9hQpo65V==Zvv;r+EaUgAWE z)gt8&F!(r2&vewS`9~O6`)YfW<*jk-^2nX(`OIh1Wt~|Z;MSp$%W}^cR1#S!_ zh!Ra}Z1|7_u#x*$x}OvaK2duok2D}i?c|1@UECXT`2rc#%Uh@AgS?vmHir!Mk8 zhanD*3(a3%#4`VV@&a0lSRCPfkrc+3t z`;tTuzF#n@|PbE6Y-V&awR!HqGGQZg5ivBP2ex(Yb`CX5VxI$~Dt~&)UI(bBs zwVg<_r4`=)h+p3Af5?2P8ko+3gAwr*-9~@*xv8lTxtl)VU03|$evbrMWPK;*zFZWd ztHKbZtTfeLmgh#_wazo+iJMHroazGe44AK9ADK5@l?P<@ikdVift^j4NhPC}N<&;D z4^Rccz#W(qg?2yuujopyX*^l)XCy)YoNKA(^%)i1Jrqqu0;4u<0wALcUQy>^+w?6B z>TJ8~4h1AtbFm(4VPe0Un+2=I2J7udfi|H_gGdVi><97DSzakD*1mO7X!r)gk58=?HP;nhPA411`bndMUZfaOU6G7ObKM8yF+^i3?K|zb&6NH23!Sg9=$)#;A72TepNh4uuZ5d0bH#2{f+r0{_mMQ9 zeGFz{gmu-{Ge*SGI>L%#g+K{M`ObqzFIq0Zk7db?D`_H=BvdFlu*CZ<0Gt}XovOdL zi0r1Wh#)a4M-@iSd^V=gp!Ys87|)k9(pE*2KyD-Dr4j``!*dD)BL4B3eJ!!d_DLV6 zY>uC}RdU=i+n(v!@?j`xFlP-c>^ct`-u$+tqBmoR?xH7>ECcnELnCN#=O3*tE-XsZn?{5}00C#7y-jdjQwN{sDa zfslJgMkQA%+t%lu+2gnCB2)MGFq3-HnhsvEY_R@*_1T`+FvaB8=F>t@zR-uMz1;Tp zmGv*}7O1T^#-_b0*lDQ$Mc}(#ZXgj{Q7RP4hPCsLxd-U~s<6R_7p)LK6&QC^D2Qa; zA(@DDe{&ny6V>xG{Qr#Srl-?@ zGta&4Q^#cyi)1*|L$Sb}z)?{RjC_DeKM}mbX4Sb)s84hc=KogeT@Kkp}1 zR>t$TPjjt70#wW^(k$sB<~~F}gqi+=FVL(YA$rkPI6So^Efg~4(u=jaJt5t3HO^xI ztLM?Y$Z&^#$BiI|f_C$kcEt733|?&c<3mLb^E&EN7SXCuYxrBdjk6v_YA!d&El2us z*pl;1d)L{7D|w5IqReoy%cCoOXgNe!Kqrp141VCNKzVwE-Cop_AJ>o*D3YR{y@mUV z9DE3$PMQo;NF#Wyncqw)QtZ=h&X--0s4y_Dgz)%=f>w{iaN9ORQGrVQUgLU6MR`on zvW~li9(VQ~YF^J+EAcBrPl&m8wvZc5Okpy`wdQCGq$kw$nhK=&$f#emIq=tn$Qb3 z0on+>U$=+&pF!~FTX2hRgh9X5r8$Jd&18#st=TOm?sE-7@_k~S2y9(6^{j$FCTz-B z5I2#>RYdOkD-KJ=6w3MMV@Z1l-yzF6+fU6Q;!IIM57ZU6;zjJOj%RZDA($X5vzpx9 z@xyuVYgg+AcFh>yyb88Iv_S`stPLn(q;ox_1 zl)iu>O94QcKd9H#V9;+!x@+W(^?Sp1m$xY!l>uQ+D*Lmr^eg~b=CVt%4`E<^BlKK$ z{ONlc`w5Ga*dzo!Mv19p9llhJYKo^qFA`@pSF87+r0Y>Zf0?!Fp=^P|t_^F3xcgvb zpU(_&LOJQ^qE%p?oN;8A(5v3Y{OquuE`2{#VEF@?M$BduxUi4Il|{_qRmZae zmr>&A86bNcaCH;Jbh!H=rj}&S3Qyl38yYaSAJ0c;$vnLe7aD*iP)eghoXbz$Nvb-( zV&nKnal*|0@WlL~Evx!C`|Jsf6+VB!tek>S%TjD~l!yf8M0TB3O@cLxP5~A0(H7!e zp{}VUWH_4DKYTKtSBDLxrxWvqPqaRh3G;EPhLL}?a1Ybw-r#}^+s78sPnI*r^l(YN zB3%vaZx$MG#_&$9{C+M-vnR&Jk>{z0o@_&Mz^k?uNWzg40_S$e#S%s9cxctpXzLQq zs)H;$vxQCr)2FZ+IXGoH>>EYQu>Jskp}r3_mvIWzndoc8l~!MTm~-?VVk@yf1DfSb zr9~X9f~}eLb{T77o!fy?&7h=}yvZ~@3E~`f4bL(CjJ+;X%)UYBy=eu12$Fz?sbFzq z_D_)95#OSqkRs=%=Wf;DtS*EOUcA>*!?iVcVwTLTUCstPEmA`49HCd5fnJxHy`b59 z+hPPdcRd(esw+PFRXRx-XPB~D>zMMrWIfPi9-Fzryiw8m%FM*?Ip#v}V_Zc>6@D@# z?SGF$a7WcTY_mNMaeO`mX--d@)WaYJRRUQE_lZuQX_eGH!H$Z8_KAXqs55`&9zfm4 zFP2a$UW)B)?ae~tWSynlr~pIT#LT2hK$=`3gLCS)tcP&YMXBC4pd6JcMxP-Ij-oyt z{Q?4F@&)O{EzIhAv_G)Or$bJo-K(8=Q|;b%U1&n%iJ)i3JX z>%Pdf0rc!(<`6@;4Hal&Hcv$dsV+T>7{oW%%uYoFUS3JH4j%N|+=4^VB!z3b!#D7T z>f$lIfIXZ%y%m#}%0F7K%(GM&T52jL>uhF{S$q_fUB4Q8EQ~bJfwvvU@2dvnE4%Fv zJw+hh{)(PW40slG+CEkNTe}k5zOk+1cW`bv(j|8!WlbOUJx*+;@)ZNAKar?ha zH?5FgLWnd}(~_-JqD8U&O|*B58exWBi5|{wo=47V!y0DcbWuYKWpGX>bEVafE6Ad} zgEKPKCor4i?k=a9NVdmmAty24{H6iu&N zKc!0+4T!?XNAlZwg5c(}TLGU>6i0zl8-?&t`nu2hkmr{F#n6IZ&GGt(MAshL+_2OW ztzbv^B>DS?m~qXFBjD+pN#^Z~en5dDO!y5$W)*Wv<-1HTpq<;y)%wO;ByDA6?t_r^ z!%3BG-z2On>$o6-{e!1xle+q=3m~?Gc_sedlc`kWg?@*dCf`td@vq<<*+gBS#{JPW ziR9{{lM(Z|CC!*ZV?F(U{0l*|YJRy}KbfjE$6gne*dSVpYCG_E_QMan$4YG*RIUQb5Z$)lN{%K5EYtxiI+7FF<$CSvUw#{YYP z&1V3HXPE=~Xi&Ka3*0Wpq@KXwoWd86xlMYL7JA2MKUdhFZZFXi<~p6cT^378G$wQ? zKbqtUPjJl|9&qFCh)~D01ddpB&8_-kBwI0}0Wn|j1)nISa?McvVYc&jRJ{J}MaP5cBBhCn7;NK3WqNxr7XLCnZ#B?71QZM3w~b=Ul!vod#uM#!KZZ zRSjoy>_pfbb#TlX)~*0o(Bh;DqN6BkqN+qw8{>w-w#EJgiou$rASXvHUEYd3 zuz^;Ia`Q4s;gZyOag{2YcRz7VS_O&qvQ>II-6J(A5Mu72>Rkc>qygWB$;BX4I$Wn7 zZ9d|v_+i!@c=v0)U8`vhr-`u{Ja4C1B?Bz&G$sFld>6aYe?zE=hvkW^mLez}TM@0X ztB&ISmWqY+HR$J4`0GJM@Rp8a;3$$-<@>?{+YXT`<`Bd#pOU}=uGYiW_EtR7W8f|MxbVYi3cqUv-zPkq{Z=DiiG z-Ow#(YqNek5eHip6BI7Y_hBe(CQ>1F^iyc=j~2o238`PPXTW)`I7`Tzk6$P|P7Z{G zl&OV_uD5a+OOUMlMQaRcw0vbY=Xx{RiG}8sD=eRYk~~{rB-8@0PGMuHpUhPxQoe@K zs7{<3b*cg>(cJAG@GEHv5UWMtd9~+;7mGW+FTp}=rq0+ypFrw)(%If|b40guV@wmK zd8JqLW)=hKZbbPHnSpmAQzdE7!OISx)^-xo)5u!muAK4!cI4fOJpc=h;Lq3c%#K0)Aj&`hWN8ggbzk$C8o*)H3sT}`mwnJ&Y$nJ&>>t&+KZrWp2 z=&Vpp9>O&bduNnm zESWjmB9PJ--}H))p)0~%TZ9uM7ahv8b~T!1y{tPsERWu&=T(5#iq@)J0k9wP|Jk(n zRB%fU&xU2~^=(=+xK3jcOdE9u13Z~Qb8WH46{lt6_(SHqM9|n_@RH#idv>F;Ob({x zW3KZT*9x0q)SlQ*2X;GkXs6A5LKGEnTYSa*$^Ur<0f2u=P|xH!HFk_qx{?$HJ1$D3 zK`Ek7n1j4pM4p({CDKfX_VFZ{!;?qLhs3tXXTZUlwA97d-Y~Ju5{dMd-nT30MvV?i z39;d=;?~I1&GYkw-Tfd?40Nf+Kv;%4p0MrDn9YLPv38LI^B468XWjoFmTS2*e+5q5!RZol|ss zedba^>Ze)50^m8^!F>(DBWn4KLlEq`g73grxh9dMm%Km=U$TD1h7?LDibJg>^X*zP zTIe@y`j^1^<^c-ZPnARjJON;E>oWx|#ltW^LYrjE%w9>ah0_qn@`T=n3UAo-{;25j zjOs@w;s~ZUXMwj%oIVfnOq73V@R@yQRDdK_>N`N0vvNM+79-4jEZJai%#-QMU+1W4 zDSiR-Y~RshSW|1H`m$O1eUISrb@mss}$lh$xFaZ7bJQEHgnjWjQ=`@hUG z41b{}Lg~6+L=el1SV@>_tYo;fxTDuyX<&&2-R#BJjKua6G)nP@kQMPjpA3UUo5TT@ z2N!y2s6@QtV6}l*O7s7eQiV2Raxf#Jv6jAF+(l>^>MziANsVgojSnZ=l$rIc4e#+5 zjh`ge^w_r&gRDJCq)m-3PKx2VYjGxnGj{ZBiyGYH$(=&isZ-%z_cs2;J4v6st^7#NU0T7CUVHtlU|oRV{(?^ zB+F}&7&}p|i}IP`E-w=k-F4iJFR+;x8}oHx9Pd(XAOI2Gq-_}tS7*vJ*yg5WN`qeR zmI<hW)fsL~Be?rR?z$!@7Xyf3~V~|Ap*+ zg!;nJKK*lB*F(7ojsbF*<6tf8#&~#t7m`3qDwn_j5jM_BseBQqc$PVT0C-t_L=lgl zSOB;Sft}4_UGJJvE80dZP;ln+&4(>tsB{xFCXJ!0tK&@(wzC%QsdcDZG3#m7gVDb>7jV4* z2`k`sQ#ZZOPVc5jjWl+hS@=m<-_b6;3qG+`&T5l8B~1v6)1aDix7ZCqL?K5`TOr*Z zCS@Qu{32@=hG+oeTxudfEb7U*Ue}-o#O-+L_G0<0jdT>iEYOazG=%I<*ed1O{|6#) zSR=qs#Tj!CsevCPZ&Oz1RbdodTo{Gf>rpP62%qZjKZsOnv&H)oU4I9%Wyjxy)cu2< z^TiZDp66q8>(~bhmM`Sy{_8W4W&!@(5YlraajM1&8%_IpbQR0YVuvS$cl{eSyrw=@GF4-8oJ2k8XlJI3#b7T#gK^JQwu=^!Ec$vwYD zpV+}un5I&>{~|@`7cu)eo0A>gW_2}E$~iag>Rv0!DZNEv1f1}1i>JQPAAzyAB zAmZRDqHl;^Z)DD)_j|rt?3p$HqOMWCw`4O(hkwZf{`ILx-5bA^6OsQX*c{rU!dJ?R zvS5;Nks)$EJU-U&n#-dB(sz8(*`MnXt|{f3ijRjcv_4RsVmP+EsexJ4x(|&FdDziP z+xf8*b%DZXJJ++Ux2HQ>dj+4F!N4b2Miy3|oY)|-zC2XBjXfyfbx(lN&a531(=n2C zIX8(L(OAr0D@%5*~cL%dWC%3yFY18DQk&Gl4!Dj!zyBq!AsaD#-`b-SMBhB+Et8uqk&J79_z7xdP z^tBYVxT5mRl(bKQSfpr63)HW~nEGr_uch&>5$Pe7k^{^{H&+RO+b5LwAvFOhq`nC0 zWB@+-NyG*du|&J8lDY3x*CZ6Q%P#0~Wpyoiq{j(U;5RXm#nbw8Q1;H3Yw2WXMy*kR zs#G2|7|fH@mW^)dJ7~*Vh+U2ZDxqW5U{M8p5ymq2b%>~o8T)zpKPKG6hvZuW9tq#+ z9!*sU@k!Oj#uviST6c7^zdu2Z(eLfv5FbKPlX0LdsNI%tDeJKoSIMbE%=w;%jF@MMCqwy9#BNF)e zKAIyi`+^*Av@|r3smAUq=1c$0IFVyfY&8M;6*XcC`jWXYr6R28dB(8$Bwk1^hq4dY z6Dz>2Hqpe!fFfCb(kYb}4ZVcJo)=Vc;At!l?9*~$CW`mZ(F^(0Ugl?4Z80bM$T1AzDK`}+Tv8XiKQTq6TKP)b%47sOu+2F_m z>mUYw=7CJvN*#-Ha3A{y!hQBP&BDT#fxWBtRgTqRX+zJEqPFzUt{*rdpu0Bhy`i#P z>5w!iU2ps;)>VVMNGS;|PikWfi6@AyiJhv59R27uib&~b^A}07YS*a@fU6!K2cg~kg@kMRExss79$!Ot_A^H+@fWKk@=Ld51 z-pV6XQe2gWeMjanBsW2il+A4L$eP(>gDpR}Y?fu}(s|$RZavv-(_0eR6XH-webQ?h znu04t@W9X{9o-q`;`?2zS5hHX^8g)fi2qQb-1a(|D@spLO`H9$ZM0AIK`Zwcg{AQ- zuoNenWWKxba4$$Fqky}ZtPNefRCDP*uqZ5D2sj%z1P|#JBDL#}-wi!JY$#E|Z(eJ6 zM>WfE z>2y|kSdIpP7GPy3aXo8qAW~ZDx;raz(OMxde|CZxKqA3dO?GywrD-E-O4ShOn!G!+ zV?YqCNad&gY}W#y-A(6Cd=et6+pk1W7{Dg{N;;LoI?uEK5)%+-&Uih}XM&CW?ZIn^ znvmDC`@oflzN@n`K|f^A!fj#Rmh+he`BV>~7>Pk>BLeo?(3E`KdZ(ZZC#4thY)J#!hirANhy~gv(V&r}m}0f{ zjcJ~#I8FacE#ugp0z|6Lg=gWnj7d?;xmF9=7W;F>@F8OYt6HHtY$<@HEITBO_QM}B zf&FMi7t-(IK_xXYZxgQI;in=i#ivVS03|@$zsRhfelmXJQrtm9^0^-C*lWM|M)XcK z8I41#PoVT{+Oh?FYcp`L!b0LzF3`Lu!@Ehp1+D~cd3J^I?$eaA=+$UHv!Z~d`VTb( z68Q&6(H!TKRy1ZSu9gDzu7APrtuJj@Xnb=aJEX++c>r60Zag5;>;PM{Zak43pNsZ; zPDWPZjIZESsm55bZ*a1$JA#IBU+)5})Vb*^>Q;fUtx2adsd)6iT+1jWsHsH#iR9rO z1ce)3L#YnZ#kFcvM5qKM$bJLDjmH#|#@z%eb?l)AbY!b2ElBYHY9aZjVAbFNr5-d3 z;2)1YaEF*?rQ}@M6J*}t{M+|{jssU~&22OA`~-wox0WST)NZ$?u!&lR#UPXX_5cIP zCbWFq$0Kh7RH$j8Uj-q2K8g&EBvH={M{};V% z^L3@v{acT30LSF-s^_i3c#H^(6IiU>QUK9ExtZB<mCBd9s3FHEF!wnikUsKFGpE2m4=yl{MaM9l8Oj5Q)f1U z7jQM?hc~tj!E+~6yu#)O#em3Ug^9^U*H2ub8iju&W4oHZ5_PIw(2Tc;u+RR_qXBTz zp3(e{r|%6o+Z6IV^aSt#Cn)q#)oPUZ(Lt04P>lW<6s&}jz6=&3XL5qPnR}G1-u|e2 z3?Th9Jp;XFS_S7%71}39e_lZv5z^>m=-6LXtmr9H~pQ&NG)U35LsI35M9UF`)uryQ!8|8qvHJ1@IQ=_Ev`V#}n@# zwEcE9!3Qw0z?CxsWLdT{{uOxrc7iGxZ{usT0ORf+vEr<~GGLSvpbJ%Ry%d9;pHug; zoAIeK(_O{hn|-m}rxa0wt@;0+fn{B5EV^4;V9pe*>^pTnzeQ#T6f2&!T|84 zR9P`KlsyF)0cw8DXawtW+hFlgg zx4f1qRb@t5EKns*(H`*ntX^P4Wlg_6SJRV4f4eT0ooA;Y*e*S!2hq+Ws)b(|xWK6> z`_jy5{a4a~(g4|TA&#+Wch4eyAiO-2f18uhbbE}y5(b;fn?56eT)WZ+_+0@U)Agb0 zo-7AGmN@ER!vGCda~fV7^5)Jttf3)f={1`nns@$w+M^-m|6MdIbo>*u`RrxX40HVSIhoxhFUr(5xIZI^A}+MDXT)t>Ma3zcyo7v|#Sb7txF zSPtk41zi#*V5QeodD{dGDMB2V8CJ>%O>M@(c?SSZL*TrZmQk!!rqQJVQaM>kjH3%sqGd`j#){yhPtChroQH?fA|2v89bUfdcctj3 zd~&6yMT=kFmWOE%?oSQ9n7`4Go0gbI&4qt->%5Gxjr0ptgw@?NGJoFqQF+XU zoPm~g1Z+^_Rb=jtd;3jt>jAvm+Mz7N;hkO_x1aV?x%t>?CKjt8qbL3#=T5mdF>DM^ zz45F1RnwDa4m2yZZkyGDBjZffR+~4nqYK=U#03Li_M%=hoD$v&R{c0N}j?p3qQVqM9{sNv*0x9R+sA+!_&!+4P#~Rd{G# z(X?wK5POIemN!tS#)2I>ERc_`QBt+#&N3;fHf9PbG1i=vL5MSP?3kHbj<^Lwh^FD6~o|0=KnRarRVTEvv+(zxIz@|<{h}4CQCZ0gvkm5Z zf*#>Ao((sGJC69mkV!rK?POU@8jrOafrI;(Ul5UN?;b6la_vp3^*<{w z{~QA5y1Gf1BzB*KnvruLY!~7NJ#0HxT9DU3d(}BH`knlb0X|j?dUpePey9ZDdM!QP zT_o|7;H--&;t3m1JZ_5omE;~S!PSh$X~EaBjP}M&m|N7UMS`W*hx zxhV1JGr9fngKd!Gym7o7O}asrH|^cH4kEDC4v=%t-so>AH{w+m`vx)tF5L}-wRa~# z+h~}}OF7yR2`J(6`M?C!0+i)yWh>%#VX#w&!9Y9>@%P&m39WtNjM7SVXAklWb03M=;9UwM3yw zwIXY(TDp-1Ta=-bPa-i2L)~qM=bOt)m;&1E{xQXidJrUul3P-qvZ9Uu%DWIG{=b*l z*BW6ORv{5r-=qgB&K4Lx{ik^6i_X+U4$Obu@wM(gneDV-t_m}8O8UMIn_@7tqS7Z< zN3To>MI8)@p)+MFwaAd*Vz`g}W)2%$<1DPA0u1q8~ihdIspmpZ6 z)aYEZmQ7*g2+biA_uTRykj}3RKXf9mL{e>c41(V}n1QbT!xtjd zu-W$FfSc9xgH<;OPLb5T3!ULJaS_k04qRBW+gM|HtJ+*;eLpX$e0uY@OV{iMt?}|D zHSS}c2}#r>LLwI{@x5GgHEuu5ZaB8~9vERaZ&3y6Wj#c^cfMORYYJvsW5>mfdU?@5 zu_m|tIBb9M=bW_g94y7Od7t}t5({8s(Oi>OQ`5cnfW|mKg-3)7K59(PR&_l#0uU71 zeOWA~#ZxQX*eRYA-@R~F^M)=m8}K=(zLE`F9$i#Izv6f&nUZ-j9H=rB%sB6u1?EC~ z;^M!JAcW^LM$f%_)mv2kMM=G34Ez>yy(HX*qI6a_8rWamBI*Vdsz&Qf>b5^{eqz(~L@-T|kXY=eW zukr>Ixj;m{mfZ}X7jI76 zGy)Ue6s{u5`8e8IObTh0f;PwVM{pvSfLaz82YT6JvAn6PS#;|`H@o9n_qbn&vZ89>v_{7-0~^FkGJ=HBi^~v ze2rbxXirdYUbsS4X#4#g*md98owLc;#M zqplcV(r6(>R3^<7Y?+>SCpj)Fi6ZrQ_!A!B4+p>q zlhQB@-PE{~wTt%nll@OPMqXu>0%4k>P%cxrMu}e8+ zub8P_`gKCB==otaRW_(AtQs9F{BAayj(cCah+@!57-@^3FQrFyQ?%aD2O*7=|1U(B zEe&ZwVh+!L7KbE;DHt8=st5Z9)2P@-k7kl=?#jAZ9XJ~c07^a}*2rJXkXWYg1fl8p zGd;wGH6pS6d}EMH%*$x-{1WS5$=WarNej0(zoKVM;KC)}be7fKnL(s-Hx&QG@3r&t zwTUiS47fWfYk54``$K98rHOoHJPyW(HNMm|4Dv_`uXUGKl+4n+TC8#)1j~2xsr4Ub zIgPfb!=@unq3}kQ zfJLu)f+h8GFG3M<8{gJd(_v6QlM{{ zDTx;M6nv4&CTVAK@3LryaTpv6XfD|bzd^SY;CN6LK`<;4-ko{2#np0p4psokYfu+C zgkv!JuM0TR#eC_l8BsC6hF7T5pJhdoyk7ihLGG!KVHT0z|1o0IWTx>+?8ITbB|t+H zs6(0_9i2T;573d;(^*Uczf-?d_0?|qbo{3H{XathtCi;+d~(2H^AW?86dHAscV7L{ zfbJgC)u5>%_eE3BjgTuoaxaEVl3ifq5!vKghZKOGLU4b)b`wY2$)9SjzVioz?l&Ju z*swn$jyu8pyH1(n#I%hZ5O2p`=c|MB^RXoz(qL>)^*F|oF7ax>6q0ZQgBuKa-%_dG#rB% zSmde>XM}BeIgSkq!VihmDm7g$V|ua;_@>W>Ee{iI_{y zu3M0-2VqM?4TqGqXFTk$n~jlOLmtmL777T`v`Z@Z!!e00{OJHBy~CC28FZH^WKOF4 z2kncHi&vKXSb2}aU7PYXvcR*ljclKQh~*%mbKY)dpf2&%rum?yQUuIIGt-TIf?;-~ zn)hy-P74R7H}r^k3n?KksvM8G9}QoKpj%9}SfU>iXv)i|z*dKw@%nCFdr~!CXO#H=7b$nfc?S$Cl*$QD=wh|yP-`L4$oo9X^4UM~$EcFYf& zIQ%N-zU0U~mJRdBat@krc;SkhuNybU7o?42JR3m^4;j=u2SrE}=8A!i_BFRpl%A%Q zLUZdK#ADDJS7ZTt%jdXQI$vlqn02%znW@j=>^;t%b*#ypF1L20buPVvi3h?Dd|nkb zHgo*VzQ%(alkP1Um16`S9ULlx=)}Ehw=$cO=fGr+D4AyhulwC-Gg}Xa0i^6D1EcX^ zG?GklwvrZ%D)BVaiJ$TN`~q(wKf+IqwKixIMRt!v&IDSOO!iD1Y%D=$I7>IoAnLojQw{2DX&hR{G6BMetD z*T763S73lTQ1J^Y0EIZ1-8_%4n($!n?$cwB7|7Qj*AVF`y!6uJuP|Q<4+-}Gm!~}D zeW;e^U|cYS{qZ7*8SGQ=n-1LRYOMchkX7L2++z1|jPRl^c>>8_#7^bD4%hG^wgzR0 z%Tj#MsVr3sd`hR9`LX*cN+fofMSLV1#z0%SWW6ud{cfc>bJ2xNtiI!Urc1=yGTxz6#oYjcZ@EwxY0tuG?>PPN2;~ti+0hs@_p=pNJ+oFsD)tO5R37 z+M8l;uy42oOhPnEz^W(te^u148SoY@dv;O=iDD^lT8n28VH(pqB~2q;RNHuke^`z5 z3YRGAg*5az9kF`p7{%~4LKl~0f|-fS&A8d0{mh3k)HGzmVttA&CYl{|(m6zq>O(_l zz$tQZ4qP0}TU0wxDoGXOVJ_3ejA}mTdldMTbV{|li5_sP!t%2PV7}4PUd%~}BVaAO zaDLoX>WcOGV~AJ~pMl2=*=izgd(C6cnQMry%WrTL9QaBKLL0z0{JEjnWxzAt1wF92 zKEFYt8x6dF0!J?N7)11@H@wNcsq1mUEQ@H^IIF3RzqZv~x2NwkUO_drimksP%#5Z; zbMg>VO@DTerQ=3co}n~iZkO(l&6Qthw!Aie%ti~V1z;J?rh*SQF<*{)`ra4mcwMtK zo)&xFlc=1LN%zBF*qTv3hiAL%hqhjB2IOtmq)i2GlhzIHqe!@m0(Ljbd3$ExiQ^$0XC&K@CdF>-5aaC?z%6Vv55ck@84JVxiJ2 zfE489!lWRSIRI`gdm#}0hymDWEP&~v`Q&%h+l_ReJ042b5al zT^wq226G;xx`&-5SSQ?h_8>cpjvk}S{i&kvu{Xf>ZAvq;sAJ2mEC>{Kjxk}t7F<_? z4GtMUd7gD$&@l0X#;3Rr^C{4iRgPYT3N1a%N8;C2;6CYIF!VVJ?QY%t7m=^$cv8*n zi^ME`uaq@RV5k-CSev4Vy;@W!R*v0`p{eO6TT73#?w5o)fSzwD_+Sns2I?j|W7 zbj|$5A4JVfCA`iGR3O-H(ua3at3f{l9AJ;azw(M!EH!^e?F9VHondwZ9-r_y7Bs15 zuhw%ijrgd)wF{0iF?Zb_ppk*jimJ3p%RJ?LZ&#~|NhGIM!cDpKTRj$%(?imx!qS(7 zv)kJRAt`536D&_&Ujyk4(1u)7*o<;blp=PBX+FB0zb4miNTM{D_ak8E`D|*B(Qp0Uza|-+&-e&Ngg(kxF15KpAm35^)HQ|k%CD~RypvtcH zJ1A`RtIN}GOWmIa(HaS3o~&IKeSk%hePH+n5VQTec+9!l;jR?Qp*8bYL59i-(DI)2 zZI^hv;@Njg)6QR);N*@4VJ?C1n3Hu!jP6kOmF_hi6$wq?RQ~q|H^}$W)?0U7_{FH9 zajsk1A~7!XiGa1_nL>irmaVa4H+s0wo1$?rcJuzwMU<>&t#iF1&jNn76B0R-*`UTt z6h&$F_KQA%zx{rdg5;kCmK!J|@Q#x;`~tj*Xx&!TEwj7;Dz$JTH9TX{gy=Iaay0$E zccWT`u{8Sh&$Jlfrl@GoC!gBjs|N0~D#EZ6&W#r|;Fw;CST;`fyL?iBB!Md2fMf>K zSsfE_wWnI|gXbneoZ|rwO5S$xP~#;pv~7>6m(-GJhD6%Kh3(X_+h23kRT1MrfE<#d z?G=J`_vE}s7_nEbRJ^)a2|kHj3B?rga8cQ!iB2hdLhyTV!eq;!FpQZh2gHAwSBp16 zlQozq0gwfz3mU%3h}>*sK}nXRbLcF6Eb8g3(LhW67w6~=S5j{=UnG?l&d;GMSQ}Rj z^^(r22^;(tr&!d>&cL#v^j-|^*_3+Y30O0aCJj|aLOi;zZ|K3BSh zFBJKQtyMkn!GF@WFbsVUuw3sAv@w9q+Pz%+i0S#|5gO!C=4@n6;ldN*|-orKtf64krF@&+m z6D-c9(@yBk`I7p>5|^4M0)Pe1I79H}CZ*9(VgmbA#H1wSdt+%t!=?I6$aDATF4IJ@ zf8vnPJm-HceVA+amZkn~a<{CW7r+aXNTVIIuIM3-zDm{M6I7;?<2=SHIRmvKbcuF% zCZ(Fb*Zqf>%FKV2cicEk&>UJF5E;7L`QTYa?)yF^a;az-uoemy%nivGyfL#~%4t$p zE?8y_>%YYIqe80AjankjbRe6#5kye}P@^8mE&O_gsps|?Lt8EJQWSEDWBQ3=59R5q z&6q{%24lQOp!~?E$Rs7EV+o2F^Qve4(Z`=w8N0Yg6VSL2p$ih4%5d?v|~8uIEsZI3(7#7}p*1r4N~&;4vRB zliPxR@fbgtVHR1JU2p&P#4VE?&EEm z^ETwU-WiF{31cpXY)}}msH2Vipl$@=G}NG{YmdQfJ|;ys4YmSIZ2_#f=Fj)+G#Qet z{8$WyG6RGPdE#Svp~6Q-3*n>kp+SzG!A$#P+_Q30{S|QL8o&ezG5FGpoii6Bhq2*P zcApEn$!){t2OllQZxMWpQei7f-I>9uZvz32G}qlL(jVMi3bh=p{2l@k$_pEewu{jw zg_d^Q-eOC}Y&EejLIU(qH6DSSM1M`bm$2Sr=XKYp1-7L-ssDedYF0qUZPL!i7t20)K88@l;ulOD9zdSZ)SvV5xlts7cS&kxs6uL9 zBOv%N1N6tjXMjs;7$ijur@68neQ;S%f@p)keiVoCT`Z(&8 zcmKk%#sooOV}XmiOW2E-mcCj(f;b8icHaseZ|ne&*YH;G3S&7>N%uK#?xwo3f;_}L z-K-Km^shVT`XuspfcxZIft!pngbVbF|C%JwBv_ge{KQ)R3RNqX%*y zUC=N`zp zk`0%Z%3WKA(P1(aUL3j&QwY0_i-!$U7L(Fu5teqJJ1kTD$l5}kf$GfFzH$ueoD(J9 zV>>Mh07QT{iu83l>yz0r$h}2)#T^TvyEl}hhUd!uahE**{z?3US_=}4b&}~Tnc5-W z&8!$jFO5&gu-vetDtF0LSJ^Xvz`-863VtgoPG}0N#p*D`e)i9C`83mgT5n?g$d%N< zr=lJUj$S&jVJ3P<(goc=M+Mbj3BiSo?xA0s94%nDlt}|4%p^4*yB`qKx{Rt-k|MUl zz;m|d9g!XF{-2aI#lfV}@$6edZStq&r z!Xwli^3W#u$BXVZlwK=1tL3!H8(dthM)q!an|IKVN`51)ojzl0hu}Ao)XLXrF{2=K zbMN0}$0*kOVXa(GyI$-SJAIpMf=NO;CCS%&9|lJFS6m%hp1$UFXlDx9gy+|F-oY)j z2ea}itffEwP+HtuW@GqXL6LWSmn8%mKWVl-@nM}^*pl$0YV)r7e&{6*imr1Y?szm! zO0XT_uh;5B?*R+$ynlk!a#Zw9;(q?F0QQiO^_5O|El?9Dbq@5C32O>`+%{1QES{~I zgyw3?v2NkGM_El-vy;JWYg%4pU|~ddbb`^!q=aKv=(O}kyGX zKuI|xqV!iUoEH?r_4aZ8o_dBP79CTp^3NGLL>`J5izBmDC2i`@Oma9K!EFtzcAB`cuyumPl7bi#L*w*7Ez@qsn zRQ%%IL{gP;H2T`;IH%lSDC0i;2!T=GmteoY#~P-JD9qy{sziHgE5Az}OkS%n`h@-6 zWBeh~s{=;klGUjiR8YWc#JRQPXw%V1H8YbqwU5O^9LXjus}Ogm{g>BO6*0;Coj_|r z&lz-&D%hQOZw)R35N7>~DwM#pK@R58{CqP5YL}aIBZG4TRF(nKa{xE+Y1BooG(OM? zf4Bg_GNY5OrI`)ba=GA)^7%HDlI}o3*7av7HiXYB#*+1GgR;@F0#hy7@?U9;;#B>NiD20+g!Ddc(cegZs$dn_B8)Ngu$dq-K+zby8PMzz2 zuzM1NsW;FOaB{?iH~CjIzJ|nLfJ1nb5Bp-YRy`@&%e2i+f}r5!lj2lW&+%X4wcAq1 z*^B;c0)BI@)El}oNb0zPD+#h4rQpn+?jpnS6&vwO~pFEiw;yC?O2wIo-Fb zC^RDZzhHjfx>OTY8$L(o>37V_zpe0d&{K8I7T@K*$W&6cKL~+T;%Z9RAG%%X*(LkI z5bNuEL$4@n>#)h%Z&ZxN?2)J8_IK%2V;EEw_KWlJ<0GxzXxDO(-xX;wp>Ju*E|&dB zei_S{0+$ynS`&3&z1a3kr>|ZW9~J`P4!5)PuZibij;nIYSQOgPz(zI9pb2xxcpV~t zH|Qy67wL*h<05>}EELfaWi0ZN)QnWv;5I-6L+X2xW@jj-f(x>4ijVG{%4DC5IALQ; zDv+wHlTn7@sD%djrs`?L>?)92d4mWFRgp&qu7hcw=z686dz%GHh6~7REUB6CPZ#V6EhSE!9gn(OT z)G+pPGAZbTsbw1#iICm-6!I9vM6Lkfaa9z*2=F58`$vm^ifce zXOrVK-@xZs`xN=rQ_{-|Mtb0Yb^I?2a?!3_rJBXoOUkR|6RyGm4Qq9 zAlmDLsi!A7P-$w}1!GFC0|s~NLw(vWwG(=wErtT>;QL1{rc{zomwRI}3{=#d5*j;? z3ShXS|GdqWErtxrW`Fq!932O*bSG5D8K)y*Ki!TKN^{?UL!&L~hWsKBZ#CP|2!hN~Le`g?RWNxrx)$QFx>jb~S!maYD?US4u~ z!=mdRm}vXaG`)FNYszWh*Fura*D(RU^6UiJt!90j3HfrT(wdcD8@gA|?Ex{&Lmk^3 z%%q}`pe-w_WFjNZK;BGoSi%iFtTD13+A`4byEm1OieS!USKtSwZtDB_mWYoJw7_(M z51wzD@`bi-TsrV9jFUs(59k?px@XP7Hn@jTqrg1-!W4k(@iTGJDVa$$`p;|vMXvzM zrv1jK(XC`T{bb6U{`6^g6Ibrrc1KPj9+}5xOoR;cz;IQ?PS+Yk&NtJ<&aE~DW%cgu z!6j8*ucV1L=Kk$@Sk~K(pQI@}JgW>pbBR)LW5NMfl^-&i8}=5dPUBtBNm^|9(x_#@ z%d)T)85Qa!QW(=D{FdW6q(E_~jeqS~PCNA(99o{bTtJKCZXAIH4DeSq4>;7bcQaFw zH@!1V#8=1NAkhf;dKk#O3joT8zH6MED7Z-#XF{Rr1`UIs3_l(hRix!R=w7<^a2Ka{ zXw1^fAeSQy4YGFLFCX!3l_;>x;j=OthG4YG{!*k*nks=gI=Fsx8xjelEe8z9ZB1cE zXWNyunzuy1<5~^74JShcp0_gHsyngIs*de{RO?)@@LlJ>mCT=lbH0(l1WlC&!v`w0 z8ibuJD#i*6%EW5-{awniKZI}57_e#Usqo5N`R2S#z^9gMzq^#q!jrhJQ<=l3gXP9P zm|2`^8nJ>p`>hh=2p%_9#$ND*@1PHa_`8=7EF;+yAbqrF3M^3$Ia<>cfz+inVr}3h z^g>VfNf&Wn8F3BQ)c(VZY0yiV4B|?Z%=-f)6=ANwLbD#;F1#J`y7BbKP5eKC5gN>q zAx9Vz_qr=i6i!OF!%dYS-{f4Zvsm96?Sn?pyMPk6Rc_41?cOF>Ex&|LmZbQ^_&9^wPZaB4ONYn$&;qf<=|FpSpmcdyp;ZH0^gT-dudI5Uw*m_+(sib-s3}(E;SC5a$oz zZ?SQvY)6+g>aC7FSkT|p5?8o7T@5O|8iW@z3CFA`q6j zF4TfyhqH|6FJvnmke2YgkAaqjg$8Ukg<_21GZql|H)F_n8Q<240O*2g{A@RwNR)D+ zc@Z=xXH459CaSd=Q!?Z%&Ud4*stkmjgabMmD)X72qi=sDFQE|41F~XOGiVjbhYQQl z@xu0PjF+Nh5yJU15f*jiv%mZ;XVt9tsxE09?QoJGD9NtjcoLYG{;az}xi3T=@-P<1 z4xVTm7XHFsE{1lYFWC!7Xq0TBCS@+W8frdz>PsoTS;`TLV3NvtPdaIMH5w7$BmA{q z#C4&mZ><2UzesfBT)9VO$bsk8$@5a{{I-PvSzlM}mdp9%C}h!yJCxx%~=*dIf=Y) zZ?-(&oa&d@kTxA2)1?T5jQpJ4Ys4I`3un$XN1GDd<_P<=pes5^R%8?Qkvo?@Rxx=#4kVEL^p~#oTM&Mb7Xm4%>6^Wa2sbifGQ+L6f2IOf6o-YgOF zgz&nP^Vn1JpA1^_Sgd6?1m@~!ph)#`yCD?lYI)LWbgjh$C9!{-Z52GuHq>*BfQk0_ zL8($a`$^c*#A}EpIq-j~=3)|4W={Hw3nn_|t%qmfg4rj^TZsqU*{#uE#Ld4)G&B1E!;3$h1#gTQz#QclrGElJt?};7MuUM>HIhSTfG%iJAT9b2NRR zc}c@UDcF3p$~;0EU_V^CJi{22m%9WgW|A-6`xSs*cJOMKh9|afw`a*4@3et`lGi5EyoM z50#^F4B5g_%^;T6Dh9mU%~eyrWs*sMQn`#;IZ;B;N>iLN>MXBU+yNu0~N* zwwGcS28jzPU9O>m*uZMsHZ-t$Y)Fk@#8p^f1YR;?gTP%bMobx+)7}`Al`|P)@4~}* zmDlHO3Ib8FFsdZ}d_MmeV>_1W8y2RHF!{=nJoR!yo4C!Z*zY7m`exf&Q&GXVdy zEZJUvFj+kjH^6i|H{xQ$aZ^BZ7gF!DtbElK;B;L(^}ZFtT26a@u@eh9^Dgq%w^Zcc zz>A#P>8sMTgeNe=V>m^4&7f%`*ulXAMzz0vivC`Vk9~V-B0Jcxd&l~U<4D%K>3Inj z>%j;9%9xWh>s2X+$fGmmyxUch$hoKy#y24T-vpEp^+=%3b$p+SsH4LropixJ6!88H zBX>Zsn1%~Z@`fjyzV_Dmu{$bj4M48MnquIM;1{#?pZSxI;JGzok+48G$Gx?mF^! z_U_SNN9s*I&>7VHKrH|(NsZ^`0`yAHb6r}Pj-p)tq$esT(;4pw=%xN1rfwFI$os0;f`>K#HqSjlO5N!CJ#0r= z(Xov>9BUZ<1I$Aqo*iYvD?Z2_@+k0YbT~1WHfw8W;Fot)&3XqbPHQ|;1RvA^}5l(1f_4|H;Bf`%q z(Gz|1daHFh5};!pd#cI_bo+`!_;9H+NKM5w2=tprz-YuFWTAtrqZ`n1>PzNJFmw$k zIEyi1YDuar9TFl?$c(6 z&N{DI>r=rp0j$JpJ)Jj@3@6%w@`~Fa89nsu%6Sfx6MMz0#I{J|p@bMpl3FB7d)> z_~eyDUyeVnU({%bo-kLz*o(_m-}c{aagdI`8K`mXwnv*T=7CH$cAA9D8<4DF0%CIT z5pCxI1SitZ|A3UXV|&t8l_=X+Ka}4@auZVFRJ+!Qv5zb5QXlO%E9E#24Yy)yu{6Ace0YbAVYOC*)e466cECM+g|`^6%{sM{zOK%&oM>kzDwtSZP_* zx99GaGm9Rt7MT1)Z)7=rO%^38Q*=b{C0Z5 z{s4IKeQ=7X4d_T`^ii_uFkwHgcYx4+0S{~gbJcX|2K~~i?4ONfR}vn*hC52fER=nk zHq1(WFZ%Co1BW#@bYEfieh2-L)Y?{LViWep#iZ8XSJ|^VvsHTWupnX3-Q`Fp+EYj+ zm+Zz{b>$lz1SzOquvOujt0qG!j0@qjd4-02%FgR?T(L)M6IaB|IbQ9Ccmf}X1g?9t z5FeO~3lie*U!PGmJvIXW4t9oAVRi2=9a=kgX8v~^Q#fEe8M|KrbkuH|47f-^{~&pe zCniyvFKTYE)@q4hCp6*(*2U&h7KPu82UQ_u5&CyUh3NGx=EV)KO|L5oOAd0^)zl9MEb_1GhclYqp z(|3?FMjI==R=BNIdoZILV-Pm05WV5oB}?eXbuN^SX?tXbiN1z*DSFli*SnEMUw#Q^ z!9>Cqmbec4Gh;|7BOdDj%N&xViob%E=Gp&gxuIwFB`wV*xJ~fTu!=*)F|Ome(2;sW z>}1^m!7MMlxTQjp{89q;P{v1oE5h{ue7$5i_C}c}CL;>KPt0SB9yAvUNU+G?tnFwC zN&FWnaNdOk$M?o^3mR+KNI|BAW4vGYs_C}N^o5HF-jCfW4B$_3Mqk4wZl(V&MDUhs zM&qW9ogaROC%F$oYrKWGH~?jgea=}?U9VaQ31P=UL#5rXjo zBrkXorJ|K&mU*3@G73{l`IwUIwciFQ>#(zSec1G8mW1HHsIgT>x+qrYDymyWEpWGZ z?*&XmC{HH!?;hs9g*FMTDU_l}LrgJ^i5~wzRBcENAbWR_IGmPQi(prqL6~Cnp{^U5 z&TloN)4#h4Q01V3)!Cg2zIb(w{Ykvy^_5HUy9%GCQLjI-Mbo`_;EM^Rz}3ZQ`dBq7 zIgXozvQn--bIDIdMfoo#n(M^sy8NtiKo=q(&*JuC)pl*e6uSTNC4L1LJi}ndrk~Ca zlS*zp;VDF$FpqAL)9i51i)<3#>(a$M_LVh!wp(~F6z9*12r%O7b!5So4|GFNQ;|A4 zjqW4Cg)v4;^T_1Y7CwFK9p%Pbi6?4@5HBo(QROHlAU>ic!wm)2>X!Gdfb4(K_g6A* z%HObNgq*Qf1;hCbpT4x$oL49i-|ukqFT4gLuUm`yv$|S9jy#|yTd&b4oazn1IHxq9 zFCvcGZ@f0gm5!&XH~(7XWOGShw*j;;9!OS2uSMzyChW_QEPv6_1=x|y2q*8Ik11Wn zUexC;ZC$)3X7LzMqo0FIGs?K+qIg=~aOWuZS-7=@=q0F81fC+!!>Vc&z(6QyqoirY zaCjo+dI`LRHO0-XJCbOA_Ay44r|BX04MiR>zra(pPw*6Q0>jq(;hdTN-M!f@*s5`I zUDFWs#9aTs1pefWGcT1&JFP$L;oZ17hUPT^L2(T*9`>Y)%+)+CI?UX8Ed zE?%NE9chr5^f;O5~YK@cvRQg+ILc3m56Yn|OqN(jyFUSU}0 zHHGt7C#0Az9rhs$dglJtwuVe?TNva@n^SBEr@NzU7B+#c2P%f>)fWi z7>w22{hb!)!;sktTk=jv^ZV5#Ahy)NkVo1>A zX>bGr9H+d}u)SrqtO1M@y@_qaw&KRF(RZX98L|4c`_-m$0C`909e>}jq>oF7cl>_X z+6yg*sa7$i1E9J8!>_v6S{f5XLWHI_0u~BED;%U8G9laWryt0wN22C^#^CMr)pRXg z={lBHWcGxUOz?T;{G808)5$M7QEB|3ohfl5C>3nC??*HEi-n7^n>1ZlDF1*d6H6cq zMDh!NJ@&Xdg<)$pgTeQ-Xww!>K7F4Z3#50QsYegp^)Xl&q>vKp_^f!0zWf=CKzK zQVS=uP~dg#nvckBhO@VW;S}~h9rbHJqHq`ar)FdPcN#i z(1RBr)HlMqSwGr@-9cbOGS7i*81zb}Z5zppOTS6Ug}?P_=%vk1VaeY8Ese)8nUM&9 ztv!|8IB~MMZZ~h4_7MLaOZQrh0Wu0t7EE~GQ<(9(x5*nvDj6_$@)Y4JT15vPj;iFM zHhO-0GDXGf_|j$7MYQ28^tbXd#^ampP~i2^L|ag)v`>RQOQ00wQ$&9suM6d+W7{a! zxUt()-;pX(Q1qspg1jL1Fr?vBq7B}GV|zVC`%i6YGkNd=b1+`Hb;6Whp5mSJ>h5&E z`qMM2(pv9hROMFyhCOKr%N>INMJV6kd4S`A-AY;v+pce4Mny0gK>B2mYLnMsd6g&3 zYjkQ(pGp}tnatCy- zTw%}zFwQ#2SfyKr(*Xm_$Y)Oq4V*{+p zoEP#*#C_5Rh#}KPwQ7gKkSyRTrZX^5fntOAeI7l@FEv3K`arX*xW`8X^rwwBZh64I z3Q>@0=Ap-8`P+zU;|UgkC~e12c{Fj}>nvQ_)J%v3UVp#drPwoAcu=w3KiwhfkPQw2 zv2aAy-?4qVMC^^NQiKa+NjKV0Id9h}gOi^P3UNxuK8I87I_m%PT6q(T!vu@F(Y&Ep zGQY<`nH*9}5^%55qQ!2@FdivhIOp`hTT5kr2;@6X(D9UizY$a(n6u#O%0TJ;mA{I! zl0qL#$_D06w$nl`HDLm=Fz*W<$_vJY1`75XVrS?`S%-p8KrgPutI#Tn*qyZCDRgK#znnINAo--0R1@)+385r4j+z7(T zS>%^F>0q#o*WqU?L&DYH6W~inx zT9p<*Bc0F)7>EZTA1*v;!?W|+@hof7*>@JbTAj*_2#;1Dv!LbI*wknQg$NucNfstrU@)+^WEJFly)BTsOqY; zsJw_J5Rmkm<*`{1@wWq~ucedhx=(dYva%Y(aBB-^%xzA)j;hBN6_JHPgv=P+b9O@l zSH1dJMmlp{bucP=rSU_9Xh#?X-hJ07$ur62nLbeYCkYS^bM|#pH)){cXA7K@h?R3I zUOpq)=FyH5agD<_;4o60;586((!|Ym-15P`mNr}j$|alWi>;Umv-?}`MLg9(4g%mH{dIWv)$<)?V&{79it zNC`z2_vg4w--`r=%jLHxKcw!H@PKgI-ExaMs_1p5>9kFH!)ejvNGn&aE)2{7?@^~% zSq)#vm}@zNTz#VV6<(Ydmpo(E(+W$)D8j4Iriw~gK^;xoRfXYLFieQciC>t|aI|Q$ z?$Cr!!kM7fz-ZplGDx=f5WvC`TMMO!f`2^8DcWE0BA9D%nf@5NW;kEBwquUDvJU2<)$#-9}y#s0J-ibKV)t=KCLr8n6D#K!`+~G1XoTU9!G`2L57V zL#+qvCGp5=Vg;3 zqzz$#%rrv@6|XHyJ9NnU-(9fUKp{N|2(VeK$hp|M1)@WXTf?Ap(3vA?%Wm`pGhA7U zw7B7e&{%mK+Ll0E)^bIeF+HCfygdr1{nR~~yH$8>Urq%~z`(l#N`#m>S;v!DDO%I^ zygki#t~!PM8sA`r9@G4XZTQ)7f}GJfrO{0`K;>^>f`YIjfsUSqI=|>@BNG~h$_wf- zfnjtYY?RU5>`uEhaQ_0FBiRPuSyMK`vt|^Mw#aNX`aa(v4Ue|$yuz651GD-h5Po9t z1q;_ME3s?^e?KK8;)XdfAmWabBN-x>x=OwPYw&?MW_vI1XR!5LBd~0JoOB6&ddUMW z@6OHZ=|6Mf@@$%pz3skdR!Yim?J{t3tfl)#K|qG0?NdmXtwaSUJQHcR7Tv$EwxTC5 z{*ytyURpjxLoJ}O=J%m#q$ar$M4~g<$hln4CQodKASQ%F}C?Pd&uz9eN4=RJ~-LIAb{Ye)I2mXuYNV(f8p9zo0W z{_vuL322=>0^_~F+|RY0N|huFPppGj5OyHZv-0w$ah{M-UNiavs=TMO*Oa-EH`6GI z9KWSfZAx*INXv;3@E1-!KV&}^n<8P71-Tgj~@2Pn>Fg8jX*#8YJ1TnY4ghxs>y z8oEj>NO_vH;jk-J%>nU6Ax@blHUR6@UuD z4Shm#hsRw!q$$Q6t?t-VkfxT+sjOYFv&athiK}7emFnU&zGlW2McO2@=ACbPlxM;HZ$KQv;3!|1Q;DCt95h>=|3yAbsN%H(y(SZ~!rv_czEun1dQTcjZ znm3MWvrkU;tK+jTbXu_0;x6%tj%k9~BCMQnz59s~4VJzUW0IwUG9H1$QZ8uI@RkT8 z2=UG^wY5hFv)(zdTB0J|o1St{h;LSK=)Q*ZL&<+8@@5Gf$BkUBN}bPCU zB)OWInqEitE>pznUJ||Ym<1LNLc;{>paP2*RMqIXkuCx`%y5EfMP-2Y+2r=p6LuE` zbSV#Dw66J>(=ee}YYSK~5QHZvgRRKjLk$wU3%LZ|zT!k^9-)ODcj}cD!zYW)jK;x_ zPVa)pVKN=2oIQ*hEW(L%g?$Uj z3!NMwt)KvB0i2f)V!V+gI-H=}do(_XJN#u?_mv%C?Uvt?#CLQjJ8vqYHJ_Q=5(r4? z+%HboUz6pqJEw4Le>Ne^-Tq+0CwQO?3Y=4%Vv0uYOf!VNNCz`8lEmzj|Ggsqea-z7 zsD;90jzZ1CKuJxpV1Xb@v1EkR%P%lvj13G-dQ~2kj59{YaUd=RI&knwb~-bCoz_el zNhe5nRbJPDHGoL!D!3&rJ6YM0HGP^}Kj!UG+K;cJ2n9=I>dkQzNUYJiCj}n86$L^> z{JZ1JjO{sYafPR*(52!eY`{Wo=*CEhtl%>_$S*w9Olzi1B7oTalLDc`1uE)-Bl=NQ z!_-?fa#|j&xd5g@Nz++m{*?mk=tSD)C53~6^|2Mr-scc^4{+MYVJkzETVHuG(S?JK z_hfB;`gIWP*PONp>eypq!s1kpx;l!y07r_&3sJCeE{E26d6ImZU+wV4bUUbm`xkHT zF!j^C+OU1I>Pz8;>qgmQ0_x~6Wx6iSO2tRTo(RhG;=w;Tv>Bes70Is@3u@UdVMTus zFFuKO#szW}Xt)&OfbOFz2cWL$OdcDX3mL-T>l$NOh`Ge0H_39Yriox&w9Jzo$FC;A zmX^d2BkpRgLy{^^Rk~WoDgW|5E&xKpzP{!^g{2rg^Pt4=tVYVH!d+oXwkXw^RZM z`F%+g2Pgn0ou;lDUT*6tZ%(7h%itAcWvtD|lJoRp>cqb^2WkZ8EOu17Y$(+&;c@|K zu{b>#XXUP2S~--eu}d$RueTuuNxPg>i(2iY`PVl8NW11CDWg6@Z*Mi zD0V?3Iy`JG8K~Q??A6S~P2)BA3?Q<%$GR=?;AaV$(REuechVo58RYHbSRi*K#3eUZf48SI z0uO~MbGUs{$IUW#d7Mmo#G2kHGS6P8LJ2LqFR>aL&L}NtO!O#WW}6#}?EtDL*u9hF z*?#~tu-@lti^3umvlyvRZ{#k|a(%&B?Wy=i~lB&7_7->>(5*I6iaEx^OHs2(1z`sL~bOzz`a!sU^^dfG{5u}{Ou4qE;F zF+;}A0KT)HO48y#x(GS(%$>XP$7=ghc3cBkT2`N+=k3x6Cly%)GV^q&_X@1271I20 zo(na`NOV}Ex>`f?GeK9)H89DxL$|-w^;24Guv$WhSuO$Gw+z6uytYZ$uYh#6vfp#% z2t_RBOJLFT8SL|47rU7RKYA4@a25HDZs#R*-=w+G=Usmk0vnK4I?I~S!|A~E=5Ka4 z$x3wdOP+XSJ6siXGa-1nJB0!y>b~x!dnw9=CNYg~E`bM~>5Rx@qAVNTwI!XQa0a+x zZBb1NThDkR)bloqH2B~nYNl257N+)0^fC?p?~OZh%*eMlJ`3C5983ze{5}a zZD;#`Z;ZeFzt>0rt@jFFHX<2sUD}MIe5nX50s#?~aR4$ogK7W#a}VRYt~o>)*l?I# zQvMf`+gJm|gp>Qd#7QLHW;%G_6ejiitCv#< zl*>9b0?3;RWWZpQKWS4Ea2moSac9@cdXWA2Z3p;mwHZ1le29}dBU*h&>#RK; zN+{+t4R!dVM9*#g%{KlanTbAAdUEC=4(I!jL&nw4QsAycA;n-nAeQ2+8VIXS zTiQ5pwZ2CPzpSflG0C4n$)4)+7SsC|+YGNxA&SMaU* zD`H$}zv(wFGIhWD@G(D(_l<|IgR@cY?BOh?*2EMwK_;NXB*(2Kz}1Q~zLz5aXUiMP zY5sM05N>?kypUIJV2ZfPk$Yz^-(l~rC><|Uy(>Qps7lL2uqm^)O3-O9eH_y?-&t?$ zW9#b&4#!UNJ}4lOQB1`ZzI7RA$g$-Q_`om>O8a$VyHlsUU&J)%+Q4@quh-o$SG{&= zKL;eS2HOu#mO`jQv%J;*r3kk9KrxI5I>TQe;{UXbg$pFKF7>v9wdPWo7&C730^f^I z2Ye|GdmMK|eC}+PjT*kEm?6)vQ8PqZ-t_MWRBRptbY+DJ;T1R%esc!KdAkbzb;eua z58M6JH_tDIEpYA+&}bqjm9@o!=)E!CEPdc3>G6?{5j^vlXtB`}<+9`sg!c9jnBr)^ z0ND6asvf7g?hBnf-eCTdb&9{^X0#BwqpfR?ii(#1~HoORjDkHR4^^E5poG=1X)^*{W9CuW70M-P4aBS~0e$vnY|C=V^~2wyM34U5*U2YRb?<<%j0_u`NCc3RL=x5Nkn?P| z(|>j4LUl?(#&{3E=_{=!os%nIu-1U_GD}`Ex)V#|TycN(dYf(+!P(W&_0g&sWUfv8 z#|#Gi{)4ji78r-KEGuGotGBEHB@~|40I>XFp+(IL#!nnjYJ&Fm>39IBQX-_uZZU4H z0!!DkZ7=*zhfr0~OMC3x4-yS1z_u8z6!nDN3^Ma?*B}Xdcxd7~hNy0tA}(FlHOIPJ zBt`5@GAW;TsvbdAT;=*Ja&cs)Fw&wJM}Latn69Wqcb7|%G`&bp$!d^iW*Iy^Gcseu zViPM-qtgVlpSj=scKqGNnQ8a6DHhztLOQW7Lrk$|O5@^VZ2IkBxRkCCi<9wbJ9ebX zIV!SwBwGj*;a*R{ z9RyFKvIga9EMXwp?u)^7iWMlq-vF66h z6v}kUP+3ZtmD#2Slr`b9^#_W?EC)>5?D4w+7_1r6H%j z;S>+3jt}+q%!z;n@qqcYl5%u==d}CItmd$#6lUnJKB2Bw}(zNt?Wq8hM$tdI1tr@wv9j0j9FKkT?lZlA@jC zg^CSIxRK+1g=vlEw_UbXuQWK{a^91Eu(`Aml4}2W+_f(6^T?TB);UF(7|a1PfwQ7r zX}xf!_m7$X-O}nBX%z|qF2`c#o7o9DnEPeqw}6k{$e|7%M2PrfE}DS@PAe_~!T<%j zQML<~OrS5ulaS$VensesfUYdj#6PVwMX8l#XiIY(=s5`;MZLk<&2#0#LQ=z$ zesNWmG8>M=69j0Y3dvqa9mIu2Jjz7F?NS3#S?lrgex6Lq}^<7ZJn*|?hZ!31s2g#SFcM{^}-tTM@CH4C+E+~F+XVa6b> zO=YAM<6!bAB-vXS0j}YoPgPs7o$U$E11uTf*Ntqw-;ZtL&Jl!6KcGHaubgZx|BR6g zYiGw`{SvUPwyO48F$S6pOUdywc-TDy2}7+2z6=7!0N;?QCG~P`UD-la*Q)$T4C2O` z2)Bsh6*p74WT@_`s4!YlcJb#`R)p4`mFO&9M`ZilMrgfASFMxnqPPgP>zJ*lP^!`z z`fx*p;pt5*3iWdTtPx{BauWK;UHP+}dm*#@JB1CLlHuXjBspenU}xg3MhFSnHElFK zunD~Yy`~7jHBj+|=hbb>FpU~GtBh#&7X?;x7~00oP|xg%B;Xi+F{U>UbzR1An?9+i zQ~Z)6pvLL9PnD-zi6b-@>EotPbejhRRak(zKjzokF&=tUJ;~@@_hB!b{@jI9D6WUN zmsvBn^o`C)MK5@1z0E0Vso>fl`bbz4Y-$F1(k_vZULQi&>%w2J`=A%c>BZhLvKy&L z)M&j<0U%3>hgEeKp63gmic){(fGQ+fF_0>zjMc`sg-N^iVf59?chxASD*z|~x<960 zo5IJcLt`Cq_N4u?j=hpqEEGJJe6((=0e&tpXDI7TE@m9Ufe^C(*JRYv*mf6fZPV4I z0}$ipQyrIo{7=LC~8G*PJmfO*2D*1HC+ zHQo1MB$7Fs=P_wG;##zQzU&>`Og*hdK{_ejs6j|q&z+_X63T8>%J@qI=x`q;#6;%l zo+)AXFR>o#Xj}58=n-i|w5MzIe#a41@~K_X8w43+T3@-PJg(hftb^@u1*IF-2ikBP z+45@E7!3u6Q%%ogL61v_Si=Yg$_hyP+M!QOj)%!Wll_Wk^x};#saFkNvxqKrS(AQ4 z(rQxAs&D*uzPR_ygOAZf1>}=;HrZTq{OLzgbJ_Cx2!(DatGm60<`;4BUqRa>x9V_p z{|F8F4&TzjD5?VF#r(tlA*h*3CJv7tXlxR@4I-L}Xnp$tcXS%d==@wf6pSije%%8O zQ6^6CdG3&*%oei-4|#k@0mFr?j?G~zvQrA%L9jhA9*a0aE((W<2UFy{63d{CHjdxMPh0F=pTK8i1B}#iRqu)(@`26(02#y z>K)5Wst9q@Kz8Lp1Z2k;B0HoT747CQ!v)0*g# z8vAWEmAh1DMjS2&)_@&(!tLPfySnw-zL$C3iM}j;)6bg>4}vE|(v_EXz9W`GUhXNRwI-L4^`q zCfc4D?ko|C>h19cu}R7ztXEgl>{UObv;R2zQ+3P#TE_oV`3BJdxvv8Yki3Nc$T(%` zEk170oBCw#^cFXCBJr&2N_Q!xg;yFqQZp0wdpR;25aQ7rS1z~yq2mmrc#i3w5Sg=4 z4!~p9Br$$y=WcVl;~Qi^ACZ$BzbXTwN_NG=N_6Joh2RWNe*5+&uTB>;a8bZqDye>d z2A7fKyuggEY`YzDhmkb`4-~;9nr&@y63{aEwP={3_GjAdPbZVTZ%DcGF{P{C9KB2x z=UU{MDj?GAj+6KUB7A%xw@0(*fdsWuvw1*lBvjn^9D=!=f|;a;AO}zAYBpw~)FghLHQHOnxK0OQvW%)K2(6c9Z#(nP=qH8nbnp6@|QbKBgyE zc$7~7;iUHQcM)4~HP>V#mnDuLtR{__2#BV|>F@q7CZNXRL-?}vaPwUVR|RA~A(`M9 z7YM3S2e$WF{V@oK6D>)U5Q5*HbD6HW$b>-yBWR;f4&@dd_H^2e09p4x>elrsyU#A4 zxk06VTJeg4b@VlrjktI&7b~8dr&FC7qHfOej~7Q%Is}%?KbOM<>Q@&GBx15MUB(iR zF_QRZqm10^BA4@Bt>IdUA*=3xXqmB3JCXH3yqlEB1-Bxn3@<3NyV&AVW4T{K2PqJ- z*perg;HWARmYgq!0xMYczp{Msf;+>Iw_odmGO{?6i{tQekBi4-1r{71s9R4sU}=L? ztR60$&zIkUyr|&hW1IUQJLYCH>lo~89;D}753?c2F68HIv{Rz@t3{_=HaNTuUoZW6 z15ap^F~WW;u!C_3h8MMew2y1&L~e+taF8|T=h^+>wy4G5dsI5{)OSZH)SuNJgFIQ# z#1%D_$HF3^jF>~#hLKX$OSW-`u+p9j!LP~TvF-<(8cEHX zjj8VM%D^)$&$y~!%Oom(CeEm$6JKEtfK3S}&i$Ko!Rj@k$AZ1@o^m`11!xtG#EEoF zpelzgNZ)ULw+vm;9X6if?}@gg5V-`QBUlL+H+*75h)&d(V+UC05lP4sTfJlVh4TaFwT+CfO3quY&uN?-Y9ao676t({Adxo`u*qoAsubP zwzUcaK5J&QSW$$W3PN0grjWaJm;dU=z1G!(ijkL2&5BGY9d3H3hP))Jg3wcVu2VOiYSxX&>;%X?WE)t~!59Tjvr@X2V&-vx|;j0u8Qw;`Ol}^xxd>W}} z7P@==y^&`nN#Pbtvn*U-WhJOLRTftu3rKFk6{zfu5<>bEedsw)L+{6N_q(ZS@^Bzl zad8s=V_WfcBc>0b^oTidGDqTk>sa(X~yo5wrg73(S!hZ_B{$|AeIpZ%k5nVlG2guC`@^>_J~0aWuPIO4r*;Y(r2}xRZf4 zY8hyqM*7#|Z>T?2W(_JWYomS2$w|MLa0d(@-te9qG%p#`*}>L!UD)fPn1`d`I$%^cGaa zHgDsBDHD=_T+g>fzN7tYTCG1SZY{*16~eeR)sy~iH^4#EVQio-VzcUIh47QvN`^2- z*D6&AszYpMT3oBi(MS&dVJQpk4H3t>LtPsrFW>7=Vz-wKYB(t#06NnK2qBd`;+1zH zdP6zIVw=TGCr3AWa9d{?u>q7Q|LyG3J;1Ue$lgMkXv)4Is7S{bNC@W7TCKEYNAD(Z z;H!x{m;ja}mZWy#J2v*C1o!>ckpz?@w-I1Z z^6Bqf6FXh9IZUN12Rs5p$xX|rc2$ht0rI@rQKOP=3Qb7%2M?5-|ITk@{Z^e-0Z>|L zM!DFXToGF!euQC_x;P=Z^9($>xh|-v<|p8*RA$!zNhu~!bgP;6v~wPMY}u;BHRNIS z6risd5iqwkFtE%QtYA!$5zs|Qb43J25G^|leH(Ug1>)?ec@95A^kc;Yrv}XojTp0J zpBaJ9n`#H+^{TfOI$ILTbfXvpkpWZ>fB>EfU@m@TXc5uuU>yt&23$=^tgy;V^8=Xb z0I<`Y#BxrX%>9GMD^jc|i6&}RmkInevkPO-Hc=K`mXu_;J~Joo&v8Ns*oGZh1fjys zm_k$bv!3d?4}N5N=O!?h?eytY{(Ocd#`9b(?F!43j!48*L2d+~Np9Xy49Fp}R)%X2 zw{6ZGyDcxkLA}cbv(W3FV%$}K?@0`goi!Q$(W%yFp1a*4d8e2ZK0{uMquHH2+=5Vf zFzg_1U_XC_6fo|X=VWI0^2sE+L_t8S>VWLSnA{>xSzR`=n{L56ClUY-RmP+~bEQcV zUX?WOt8|0lQ@rdq4xc`XF0@>P2fq;VCCUG_h#C;T zNyQ3u0Ryjj9bg2dZ26=AuXh^0w@c2GohQooRO){1bo3HmyHRE+(G^;CI1Eu zxG517E8=n4_cum+#6k(%P}TY}xI9oFD-TP+Dd18wn~n){f0d&m3g|6gM~%grI3wFo z1k+S|*zxEbm$0xGvC~*TN#%_Z7=}f{6~sMS*-V}oD25fQd%dNJwwlMB|gfoLH`Dt4DBD% zmT;L%EMTE$dow}be32sn^8N-YVUN4MecJ_Drd*w`>?BFy!P{)yp=kB-{OU5W+y$r& z=Jas~wc5^5T1UYjm67sSNI^apk}8P#n##eFsa1WTvyjR6vqCK94IF`5r`20)_u6Qh z$C7)zY?#!^c zJn^7E2LG~e^x_Qr;diAk+n_{;mP{QD-8^+~!+EJz`OBIOP?ILkcRR6g90xprzrZ0) zh$tfO)01M!i9@SBG|Mjl}S6j}|sH!@=F= z;n~y@7{^p5qKnFulcyuy<}qzDT_P48G26;zb&1i~QHLRYhrGApRudVelCPoY5?O!K zxp2w9!nWW&{O++By75B`hNV_2d5hdi2yu#mIpR|r3H=}7y<)r2+j`|^VgTx)Y?Wec z0##ZqlYh5SEM5Z{o0Sf{(PB*`>V{7Lrgi8ChUpcgfaV#rqw-8Xs%fNscFW7m@h#5@ zNIiml23v+6m#@k~NMD(?d|Daa6%ToU;?aoPsT&no84O!OW1Ou=Y%ZqzpJ6l>F&0pl z6!G&o!~vN#(yDZPnsO;T16NVstkH7kXnx^=oN=m?%6&vA7$SXQ0(f=7xhh~SIQSFU z#Bu9Fehkn?AQ+fo0O7G5EdV3n+3(ylT(B)Z9kWQJUeL~2k^!m5?6^VRu_(&=we*k(6hjiepnFcXJz(Ia+!{-N}}8(Yab zFs+UZ!e-Id$xX=qW`F%BGuMs~PVGl-IFo!y*w(&%o%}A~77!lEL9wZXT6Y(Jqs@v$ z&A97BWgpZp>WBUEk4s@`P#q8=xWvz$L&FomeEp^iOnI;55bJyQJn!DQv$ymQ7OgIZ zyl0CD=}!Nl3lp>L;qj#voP1ejp&91M)GolJn`7U5#Dl| zQ^@3_fx;uxWOC95Y2+TkqrfNifR=g4ExnEOJt%wckq>(XJF84RA5cs$gWRVQ0d<1s%s0exKQzMQ4`dHhl;+IcUWQ+V(~u@U?gk z|3g_|$QeA!mGYEM+TFLkhhg;(3NC9@~2>z12WPyUxcaJ6Ui{K=+dkq0qcX2cPt-- z3*FI)=Xl;K{qCnA)Z_BPUAc9}hvR|$=|epSB+eh#5=|+Bvmf)D>jty>D~n7W_`o#1 zwpslsThpMUJJtp+&d?5rx^E9X#@gF~FNe}y_a6>qnqnZiPXH$1e@|Z&|Ai}BeZ*sl zu=n@deth`U0Yj`C$f1;MuT(d=NiV*3dd7jOTX)|wyoIx)YehTL2vXiVqC`wx@V~1m zjj2C@Ie=id`b~I=V|rdf?(qR?8Dp{bP=eDodLz1=%eJHvMKNmX9 zzCjNu_((CftSUX%lia?o)lB@ohZcpFi}oG+s;f|Nw7TAkrh6Lv0JA>1MFA%LmMT-3 z4YGHryLp}gD>V!O+RY=`{xb$yGXK2P*UV6g&>&@iSGQvULN?AV-5Alzraj+x-A(>u zBl%|y^`p@5C=@XY(2S`gY9W@$WZ8TfF7hpz?b)n?Ap%@pPgM+`W#0tJ=EnLOl)MeQ zw>JJseNEm1b~gb}XRqnbFH6%D6!v0;)w45a_!9*&%kazU0~6r_b_ppi-Lzph-7D%z z>G-C=(|L5s8Fva|G0=l{o)_r<|Hk|cFzZs^ zVa?wdTbuHq(NlLF?LIknh_Ec#=RbW?=bj4(IGXQfsj6yQ+b?&n7nFXkLzbQWD#^i)83j{Iex)!hBC4oj7xg56>q38vPlX~{<nlb;w}_h^+7mEDakL@yXhV>S(@a`^UzA@3Oe4j;U(3eoDQI{quI+Q`irTwdU-Bgr zK4IFQ(8%B-DV5Q;PIKj@?l#K+%Vyl>*l+ZO*#o6n{<41k@Fy8yhgAR=9K4(t^%ZR? zCL~DMg!IN?(_2X;@OQYI^yjt49cp=XWyH!H)7_B6mV7eVYNmMmV7QDd3)&~70{|io zV5SSatJ8D9Dyok5%7%}0MBgz@Z@Fe<-ArkUoh2C3x`JcxaCR-1Gq}*TDwbh|OR`@i z&efi#K&RebY3L#alDj-dBcBA#v6>pT=&(9R>*GG_#q$H@>?*vi)L2tGGl9J^b(qKagJ zXU)`6lIfHy3jn+V5c-R|hJZaUG%eunEey3H*2*KKUJsy07s>JOHLK$Xf;3AZFP}6a zOf*b6?B*keGjUWw+!v$)Swy3}j!ycCN!V=o!IbkhmI7ER!)IDCrfa*zR-!6ukV?nv z`*)K5Il}J~c`C%8ejTdXzj;3L>2ek$k~K1?t^sIw%m+IjZ(8sGct^K;rW{5&_)x1C z3)KRNVQ*^$kVs{C!cEe4;*4r!h&_6uyPAA;@ARZwg91+lyU?(A?c;Xk-pO3zkn z=0=XrT42q_^Ys*#nFu0s;@WXA&uVs?%}T(0;v*{{qeFKO!JV9?EYiZ7$(-0Gi>FHWDy7$C53 zk+L+jh=9u;`TYe>0O%K+L8yZR(#W~#LK=a18vh6_FP-A;B2w}RV}n(k|OD!QTg^{w(4sfw>1gcv`zJUeh&3srd4&tHZBMUN0 zq<4bd$v%$=#t6%b*m*_YY1h?hD3|We)bbyr@XV_MJ!i{|Cw_c@^8FvgvCUSMoWXoc zHpn6kiL(jUAM6+TUC2#wt+=@BTUB=}cqttQn~DZ6P+!b+M7{%abfjPf$4J=IE&X{Man-wM2Hd^m0W}!8L}rq9P{0Z|WmWDWlzTXv-h`eQN8c zxA-o^8aeIXu|u9&1*{Z$}lKgGkU|MOYPaLf;JO4rxyJ4nKNv>?e2C1dd5Jw={OXXIEj7 zirP+t!+(`h5pxHEc9mH#%%ree(E3}q4&AyAX-$H*2)%4H@%OE5ktD>ChR47m+eY!i zrv<@}ewTFR#fRy`@KAcWDI`AC_e066HpwtY9hfpAT)z(j{M@x!g2$%{I{~0JOb7V3 z2OP^|I~7^AXD!GrRmNvAL3h1OZsk%hoWF22Df;>qpL-iW^sL9`g8=bW|9P}@nQ!&f zN~}mC%ACi0r6Y@NGP8}@t0z2^u-3_K^ZhR(3j3S61t!Y#Gx&8_~tEwxoZIN#@sZ7&FkM_ct1&dM3|*56c>o$cQl|M<7A9ue=e;njDt9Zg%%e|DjSM5WSh=UMCL# z+#Odz5^sMtjwcI~gif&IHqZtEVjwok?+ywC=wgc-zB_n*#8s&N_Q1W@mA;bZ%Fqtw*C3Aox7Dc0;8CFa*;cw=~e96 zm&#=r1N&NtSR$1if*W+6_9Uwso#x(zZ;mg}BacQITpgP1nBlCVVGZBl4t)7n9G$Pk zs?j{K>*1+%%!y^l+k3!dfQSYacnG+U2L5&PmDjU{;DK+7;pzdlH#gSUs22p2yp-3J zmzhkV7_F-8Yqs9>)!*n^3pIn5hELr!%tRm1Y74(-5E= zg-OD|>YPMjBpzF)`l2n)>olZH82sLf;n7Y(SZiaCNHfY2?l9GKVJHMZ-p7i7lr)gI zsET!oYB%2)O#%qGy31b1*c{-xUoYzK_noX6M$WAy+pFfizCfq8QN1T$AWMc4X~d{_ zQ`K2{pM{aki^kD^RO8(a!=DmZg?)q zyb%K`Y`1X~wdJ+(7Ef6o64$(Tg3jlf{@u1;fRjF-YURZgGNzfD{0W1}B{_7QPwOd` zySGQ}DQPQ@0TGiTrxu1WiA3ycJtBH|OCEH^Xc-WdX{Y0}o!INidkUE9c3~n8vO^OJ z|Grt6J>%_a}6&9@JZQUT%mXbrOGUc`ENPTNk{rHVq>i8XYjRJz?JJI4Ti&_=@I&+reL zaP)xd=Qxe>*ER@4bv6VYjXb5UFpI2~!~CfT7;Fi(d#GrXw28bNE$dGOzr6^VL8Voa zN?>W^Na`o+kA&H4EmO$N&2MM}e~E!aJg3$M^AN^!7j|kIK7>rlVbP4J5#!fu^*YPa zJW&(C-z2D^6CwA1zGhUm=&g@QBc7tJ{Mj>D&uS33MD#bsW2rkC+^e@h*$wHdk066b ztAVbbuVlmp@lVpI5I#D}FOE|_a&~Qa=3&(D@QBH-=gAGjMVJ=;P z%|z8T6gIa_kJS}vg(Ja7F?NNPcvM+DPT-%|F*zI249Vc5^VqKr;mB_fAif8+P3dV| zndOIg>zL*)fV2IAzuC+Z#fhk_e1RPBJ$g5j+AF1xt5>Dg4rva+s$6kzM!5S!K2YxUq(dq)5#phK(r}!NiP0 zp^^|&&*(!(w8@x0$sV6k;#1H|+5Nz+MpMlz;1P5oAf}lYoppH^=pzN*ElV$0kN6yBM_= zhpDhzq29PN^g?R4!kk!Ji-Ga$;j=)ceE-A5S4R)|uQOrl&@r4@55cGht!3RS&Fh>w zC(1O_X$sz<8uQ$(GjRxyI&Ok z_N(_s)LHs)kDnGY6`Ux%&*mn`n1NbNV?tHAe(`a7_&Q%uSzs!ac15HPG_7wmF(-+) zNtP&eP^#jQ13>YeEu1>rJ=bGyypd8<+)Np^(jbUdXu2?A5$gZ1eVCCpR0iOfbCma4 z#2WjzMAeuBkSxFBx0*F`YuL$C*t78?{~Z3-Y=J{QC@zvtzIhxxQTrZI*{*)M(QGZ6 z%A|o1DA%|s*20$$=zzM22k=>t$`)y!1QI0WDaOq^YyXn zxI5lFqNIC>jb@6Gy_dT`3pLw@!ObzpCvkU%H}PJmlNv`D$~WN%1)N!0Y1GopQYlTn z8Y(F22^SSGE%Z^G2;%*sP@;9zQ`3PYbJtx{pQ|$sq@ayy@mvqGBC*>LOTAHs)-Qap zvv_GosRVX1y1|iw!9(|GRFtvvlf$v1_i)&@h1PsC7<=)Wi+9%Ta@#Akgrfq&o4WXK7?__lLnHE)yI61rZ+k zw&*(-6*9 zJD!mB4Is9Z+iX9+840P`EWbVXhtoQFmC)XulWOE8wn5VqX6|yVfRiWKuWc-4yn65y z($>#tV-{dNLKy*YhZzXhHI;KdO_6u=tYXS1qj=yLdC*edaci$njWlUzk+x8kWmPXH z2eQXOU)#I^OgWj!Iq3Gc$0=sY<|D8eCl6xv5jDY1Nx2Zm2@!vluB`)9nxHPIznIQE z?_@ya0j?QPdB*P+cT0eO*e|ZNsn8raA6$Kh;kh<=XN6wmcr3+Q_b(ghzUKKw9NC?EGF_Lb;k9M!|?3@RyX;i*?v@?1$elBM+=%$WN@<5w!XCEkKz*|^HJnLeE zrz9noSr}>*j*%tmlYeFT-MYsS_$NG3ppAU}a@ytKc_PB5)Zf5>bbN{=0h1 z44^X8w@sySs?jHxkpa7-S*BqCa~!)$c;FL>Nsh_D&m z=5OJ8j)0ialEgkX_{uN_1^kSx@J1&*;*bcir07p>Y87k<3L{Z+mJjvMyM!T{i6qXChmRl(q zM$o438QI0)5`A07e)2`4gzIQ=RgJ71TbU?nJcUTU0na;htL~y>1NimGVCmZZo6Idc>vOHf(VUDV}uh`3YoHEmd76cLViSXB7jyA_OumX zo%%FRWsn$Zu23J^-I>Wl)vgz(onNy0ykJcMv3ygmZ?d-!Wn}yYuCq_^UTKqvw-P%@ zoo>TENI8)NLKnC){cvk5;=0~(vJm*`55b#K%*9HcKnoB6?|H^6()Yuc`yKS|{%EHC zG|HBYb=fb6oj*5y%o_3WUurvO^Ru72O=0fw%&k;pA5Cg@b%cu*Npi;g>eF6+uX}vk-zFZjB@FrpmJP+qAHg@_L+MMIMRzI;g`KGE=SwlM zhO)B;A68o);WvHVcks|CoP9gSFXNVH`w=dPV-6rn+!RA>4KTv&jV!ng`c0IS%z#g7 z5Nd&<983!hDAT*r7BT>1tYaH%x=uI(Du`{aeVM!CzP37Ylb}$+CAGx5apm{NOfl$b zWaO{GF@ATG-d&K77}yhE_>RyE6In(Dihhwrkfvcvu~L582Y5gfD7@mwBs8Iz(U;D? zVcb;@ov5`$XQxSvgpMPP^h(cYfej-1u|(*Q4sx{Bu2BLX0Q&xvI?IR@*5UmmoVa9C7;MF?hPS_+BFUtqrW!wy3we5sHd$aCfJ$878GR>SAvCV#$U!CJ(FYYXuTP$T_j1%q)mZ%PXm_+4&bSTw=fK>P& ztceh@jJpc3mW!Fe~@G`g!LRSrgVF4tWFe^V> zc|m>y4mCzyM6>o^N7jQ@iN_WVEFQmVttY)Z#}^g_E^PTESw(8Q>srZ=V68lhea;VP z6*4bcffQBecMFy+o$)c+++X}eU0Na0RWJ$v>N36f2(9n@JfEUP?}J0&#GLVNd||F0 z(aH+D5MBjQ3Re6@O`aw20W_XHyth@_7C;TTKAMAwgP$fnqBQH?nov5JhW?TzcL8ii z!*O95bugsQQA-nV({sVDcjrybNx!3PeY{f`t>;2KE<42%Jq*WjkQ0S;3D!ZEuCH-wAlp4HGE^?I_CMRnIJ1b3Ji$aqa1E3M`XfbVi7sW0 zb3|^~V$sog#_GeIuUIivPO@6;YAN{=MmRGL56CIU+D=yUM6fJt@XCROO88Wv^6wbd z(|96EN?egCp2BMV!TZ8t0zDC(hVT5Kr*z0pHS(Xd z-Fi^udxE0YltM%erAtk22Vge#lqr3JAY132PzQ2eRHdRN} zys03x{LR9wLVhM1DN`Fg2^VYkd5w(0)u6*-Ml0=FF+(*+M~yi(O*v-ktT?l4qe=@O z=}=oP9ADmB31DUxkj&}j9n|%E-8QK}C!rLa>{q`0AkJ{$=msn3uiY!`(@9t_Duh$6(G&6;K||e1&=*(wC<8 zW9NCFA1>Bqt{8W!w1jBw0oCkcmg$In5=ndO<8;2;T!l-=jjpqqiD{kv*>oEC9gH0P zt3+UWUi*#Jvg?65S)5HSIiX-vAyU{ZON*qQFN4ljSimnBCD3V+Al~Pm=VPNhChNX? zEXdnfYcnp=SMR1S94g|OY&8jJH_tLdYdJ;e=x29B#ZoGdUjiQtx37nPPe=+ zRXb+Oj-$qujXHW|-U?ao0s6c$S!b#b5^C~HNHt^t%Q=W!A(h~124%jl|DJxM@5$6u z`ia!-kXE=Pv3X2a)_R$wf&I>F8B;9tsrnFf>`z^OVGjmEWt+>&-MqMr8E4sq1lb|- z`e{3-UEoBOMngAY9L@E{bXPy-G6aoXnxEQ1A&hVmE^CAM)L-O;RyBi}gjaA&aS{-$ znmWJr+uaPl1_E@t(5~W1<8+gGG;yytUm?2;QYc&Jv=+NE_oksja=@3iI_>~7&h>TmIfhOi?p)j zxkZyBIqjlthrPIrsvqgqc)y1f_8p?B3^}zn8{jK>#yYk~$je5Y_S;}ueq`Qv9ZTZO zEuQV1qUsm_H)~O+kv_$5XrTt@50*{< zOTd5-JRrO7LfhNZXSc|<@Zlxq_^1=I4z!UPT8Q)KoClz99Ds}LPqsK;ZcNUNo+MDph_L--WVgTQ&6qtKcUcT)-FuUW4+EfztB&7Wjs5cgtfVk;l4)1^WZu7ZAo1BSf-}M+D#G_oN0r5wYrN z|1dXIR#9i6E{p?=_NCWx3*OezMffLk?0+llzSzm+!0^90aEQz&6P1S z+ZiMW3BzdCCVJqo7_WO>$M2Y$Aq4a@m}_#Hk=Y+BOh9<|bk^P#3FfO|Fs`Ff){u(b z{gx5dB=B#YQO!h5TD4@?H-zZ$5rUCASds(k$RNZ4PeLuWFQU_fXF$U{pPMl}{ZMdk&ZcH8B0H9ylmps;(iNn8_yHTAJ23r`Xq|H! zO&^}p6fksZl#h%7?rG8C{;9*%qb1w?G|@DksMN!S$a_z9WCv`apgHGhbQ-bJUmTOZ z*`9YNL!zET{MNz1%Lh4f8C!hzJs~q`K5~Z|A7Va?Km@BQ1Sm1WX(*-2cqW}~;6Atm zpPYfWY7OYo8iOO5mo$IITMusVD!Ppnd};9))EYV9sXOeXgY&=kKVYZj1Pd}lr`HkD z(8$^44{SRC9MSv$N<#2lN83YZPj|4%6Q>VxH?45|2xt)DVh5BKxWC$NW2T$J8`k^G zl&XPQ7Z1^BUCGIAVgLByG7kAbMl7z7oI+-73r$sEv7=~S=C;XGH7mL9^=u7euDOp4 z+`ENm`|-WisAOFabZNZ3mzn!}M5YQFbHJJ~Gy=X+y-0)h9spwW`ShyM&Z&BguTabs zz7)tdv1;@?R*aygrVCLk<;I_uHU@!E`#DU>MGk=EHmQ=SfK5cR3@4s#R0Jx*S5Bts zur}Sdl6N|I9xC-m47_jrc&MwGf$ND5XcYYS4vBI0XO}Yo%-a?S63dn0GFD)hlr2fG zqYh`qtjar z*PtT~J%2!|J3qsoT!WLX$UA(b;CLOSPS?I(wQJ5N*XR&viG`*Iyy{137f7z=o^u;3GfXEPpG-9zLkbCsMEzsplv;p<;{8sg34I{=Dhay2Yip zd=duAo0Vr3LDF*Mr8>__=l5Lfb-Cd(41Ye9p-~lLtofFg;a!~!`gM%g8v>-n?4Iny zXt_bdFw?#qR?~Fvy$?65qc~}aSx@o*ED9G2+Ipt~WKsF^x6@`0pMkX}0h}5P%J?9& zG-7EO$U&ASytF~wEOCo#7DM#V(p&1-n#%r=?+QQSTJy~U=in=KJU~(EKiT?^5wG@! z1W)1FcaeVmLk2dnbzg!wq=_YF&u@M}6u90B5%h{yVl6T@37~c0R>JCu zE5L4syHC$|_`NREhxVMkfp;da{Vmh$OI(O)inwYGg!L3uN?1<9B@&mC?1DO8#%An4QXx)};YV^PC}fH1&o1ojt#xJ6T8nW>Y87Y49q{|VCLU7V5FIhR zMK-oKv7j%$mQAry02<}SuLUQ|;HwHaXv)N~+Ya21L#d1-)jF5S=BY5yKB~S8IE*d| z_GR0dD}B!A+j=}JBZeHa#dOG>O3%Get-_n`g&GYzJYq0ES#;9K%oM(M@ zo4K2pfTSMrgAm&!1d^zph%kwo$(RYgaeQ#n+T0?fI|8&OHUV5H)CbgC)W~Es0Kk-M z-QuhRu-zsEG)xwz?l$NG-GXO^(_zNK!>#G57(3A#NVdmVRk3wh+gS96@Gfnh-(psN zSPhtb8|ps;^nC02d;DHEZ;-4k3=XS$Y(LSJKugKBq9ZEcFadKr|Z2JuK&owSj zwJLE+VjU0M7^=glvo#baIwTq-Q$f0#sZ0%VGu*`r9C`q{;&<2oJ77}Bue_@^=H$7n z8w&h%Z`Pm|5T2+!Jpw@~`A{y@!eT+&A*1+$bw-w^{FLD8m zpY~n=APIzg>$7z^6IvO{$$Gn~4>|3ODoi@r5cIt+)%~w5yYBIH2hAWOOSXw!{1+^= zwpV>-2-4Qn&Z=zo^+sbBtLzWkiASPB7|B6&!rk5Z$O(9UfJh|i7aXNrVJa>JQgg8n z2vI9&2ug6&gGS2$R)#UqDA5Dn>-;-E6wnB-3iG=`{h`+3bT&w{6aNt z-A_m3jlRrpT3@16Kf}fVg0~LE8&PNC%nf4hxAX8>2efhAg$?N+JXz|SLIie4?T%|j zGjLIi=wudvTi&>5Z%IvyQR;ei&hLrBX3L<*mKy;mhpQ!)V&wB7_GrUip@S^>x9ar+ z6!mvR=Z_{F!%pcy+>qbHBep1P_E>*qD=Hx3pQjM3CjJwWGvTk(zSmu+e}aJ#q-fOv z!H5jIb`lu8Z$o?>K0O-Vd`yr+2UV>e``6~FsSZ9i(GTgJ@mCum$ns-*T0m(*Xrq-^ z*pvBe+U~%gD{m8E1V&0PQ+(_sF>iVl@-oD_Zlyp4dFEa|6@9}4mwfeD-vf3O_BrkA zw{RwC802}VDF!iyE)C}%=L;Ivx#*3wc_KCMyRXDFxK&OFzx5_C0~^_pOKvIA8(1ys z9~Uyt<8kixuZrw?Lj2CzzTRH(<>}I%4*Y2g_+!&zg4q?IBY-7M1}}kmE?-VhpidG! z`a1+QM3cYf2;==rw~6>@<}J29xe)5MXjpKst{Q8oQI=>%)!?2Ue=xJEDTM zQevQTRR|u6%9vGG$8SA=MSTpSybSSSy3KNQ9H#j6Ns@L>SDYsbUuwL28Wic687i2* zmEZVAaX+|-ma1(epkWR)`0psFRRKd$P<2M7*oTl~r_!6VMPkR++0;HPA5)Z?+jE+>envI??Ix+7a(aFv8I^iJE`?zI-f^@fH z8P*u`EU^a-Qaxa6baR0oi}#c`*Qbs!@AA9UDbY41);tzt8U67f!oQ&WfVEwRHb?i0 z;C_p8Naq7O_A}&x2RoEET^$9D1$1^729dBjlr){;+N(uZv7{R@`nM{M+aL8sASEgo zLTQ9+2Xkw-H_4^Jl>(+)^*n~T0@IUDO6NT1oN5_w351_;C)rRvIAr=T5@`6?c$rV* zo^cj#);1v^HiR0?uQs?5V>UD!>`8!~*K6|M3oG12p{xqI4d%1=%B-^g?ttSXN1qOlv`@lP_(zQ z&5RnBtufN&dj<9cyt%S%W;bO0v^9eA-s9i$fa$$?X?qJ>hAjP4sW6CQjm!1hsr_vj zP`Un>3MaVAQQ%~fM}3+=tybnbIuKKXtsi9)?gL1Jd!#6@eRNS@bTa|T%=Q})FtsjI z7Bk*IW_s%C!lm0ynBf+pRO-Vx1c$~34vM$71XSTT)%9&i#^^}`ie{Y7SFi4c%S7?fM_gRK)K__Z67+I%UP->qq$|SmRit zkm4S$op7)3ljw*pwzZ;|YL75S0S8-+DAGteUHD@OqJ2Dar9r5MWvF3M-IgjfrD3|r zvJjz&hmnQLCv}mVg?@}Vc`x!P=c(aXr3tT*rxTR?J(^dM+_UQxyxd>!7RJn93h}#j zZ;{~%FG%8j;`1aF;FVqLC;DB+MBZ!)O7cP9Jy3?WS09Y4xlLk}(QPrvxJnLzY2Y{y zll3r>CDy+^1_MBmY;L{52(mHRn923ltfW%^vJ13|MNCQsw0@^8u~1AT)JEg_@JwGNq%$x4BTbY-VL8DmW@bta(P@N z*(^+S-ooL!1!AAO@O%q(;c@)VzQhR(TmRk8a^{(TM24IkGXEh;`>M`+ZsR-}g3^-# ze2r!lBR?44FvNpr0Ed#;MCN9l%Oh^-uZ>OzN>k+E5Gdh`0}-|R)0yu(PS1~XS)<> znYY<27FsKTjvi??aw&XBy&9A8?k0Sp0$d6tz1 zbgq>4z_#EU3!^BK9)Gb3;0LlcqjgDjNlGC<`onvo@{3SnTO9t; zeIhC?l~gWY-yOFFX7uA=EJ3zBoVP4`zY9f{I`0e2zD;+cbsP8oy zX2Lba_OES<4xl!`N%kmmtMV>r^6Epc)(lUg*GJ_!>APARzk~XNR>ZzowjkCt^ubfL z!ObRneunPqV9T<`y8PFUn+>xk_^J{vE9>~RH5#oJ^e=cDl-`^;ln^3{68D|DNl2LI z4J*ACyZBI(jySYM9h=gNguD5?ae9;IhldC>{qL+I&tgow}uUr}vTs!u3J(tF8AzvCdNO5{qZJ5ut1O z4#q%y#RF6FXim9NQWbI*$W=jmC&ga)bTAa-E`D48H4>j(BI!5{eo{->s##jK)#a>l_JWL|k;JmcJ= z@9UJt&#zrlJ}ITgNS%B_3@Gz+)8dg~*9g6hj+N}*LRS+L6X4AjPftNeA^OnA;x<$h z?`h5qSu*a%@zBJnJ;sFbifAt^Tiqmtph-X7H}qcHcdD z2Pn7_L`rNB@(_>uC2VM9*s5rQuTDEvxTW|AJ>_RuxiC-|A={NcnW^pIcPd#QFQ|$D=>Bb}z^)%r5s<&`j@KbShTp>nF5H7V<+ z)_%F)3YHEVmez0PC2gaS3Ct(I5$r^aO2vK436o0svi}cPK~mFBh7}8FDN~R%&l!?w z0xOm2IV_b$8A1{QjCM38T})55_}Mx8;oD>OG)VmK0pa(CX(k;My^S?Q0|?x+A8FTu z%G_`$@|g!+$)6C=MIr&JeyzMWE<5;YqxAUM{;$@{ad{;raL$y_9=}T-ePsUN1~N2p zt)Nqm;{}ENp)Ow;$VFew{oFdR8SEKv@7nD?_88L4du1NcC6+>JNtQ3~U@}!W1v26) z_k>u$hUA|J7)A$(Taun*CXJ?Nn$Ks0y(td7g|#`vHK^3~4P8n>_Bm1lj;52&l=2aG zi+hhK9n=F|)#8I=4MS41y{A^mLu7DZePr|(l|U$xqro`37_8YiTJ38Tw(j*yQ{WKo zoJce$3NNa--|WYgy70eD3z(V^%^hx5`CO`wZ>htzFFW>Ij1jNt0w7p!Ce~WTFQ`De zMF1au+1@R#gd9mWC=q3T`eeWP>9G#1#Q-oO0pwrCXb5mG8+d}Q;{ask*C9m|aofwv z=|<6%@p8G?tqZK6y@Jgjp*3QG;t&?-OeoZ&yBn}37?j;2H`_j`zo`J%Q^Y7$R3&md zcuDBkFX>0fofRFDT84)xq^sz~_qoD25I>ZN0Dn%EsCBN+s|E5-lywWKjBb3*JOHt& zJ*`B(hp@=-RKCa_#)#?V*tmV3xSBA6t{JPL!pBSi&81iSFz-kDJp%dGtnK0igqQV$ zgA(zge8LcJ1ST$wa>!62Fz&VJae`sBpjy?yHm4mb(#UO|_)F-%mH?F9=feimm;20J&q%)5aS69FZ?>s{sY$$ikuyjL# za8CdS6@&q&`g;ONFfA1Ih_*z1X0MC;Ym81=l{1`c)Rn?iGKc)rRT&O+Ir{596M{qf zFvc3e!~~p)HK-ED-vNv12S=5ng;3K| zz(YX4p3NbxLM!McWB=DYTV^3@FO(ET(t1W%fgtKnJn>SjVSI0_-{b4Ri+M^1a9Bnk zFKHwzF8k~whsY};K$z%ptSZ~y_H>F&s8qUHFh1uf*~=i)%_e?kkV_>Nx5$6;kl0&B zd}t`U-q?fMK$7!F-~&l@con0CtaVMRPIHvs)W^tIX!FgR2{t{NZY)9hLY-g1M3A^qU%W?u-2VTu!}Wez!v* zzpEpSOTvi}CK0Wa0_Na}sYZz1JHo^FuO2(DBnT#DQrCyqmIRtH5(!S--e>uBZ8Nzz zFJ|Ta=PTYUhuiEP>qkN>UkUT1tX9i1bLJUGB*j7TPVP4A@rv(iC2guV9WU{gB+eCN zKNO7Eo3n`l5tawRnflQE5LY_Y8Hj&AtCJN&^nxQ+uiUXZVO~%@0Pb0jX6c}2FYnjF_VVNZ{8hqlk@}?+F-V@e;>sJf*x%X8 zS<}XsY#@iFb!rf+VhoWT?x8DA@P>iDUKvPa)FSE@85pG~k1GOka{r#kH}hB3IR3^q zw2Io&QV2Ox*CNqUfclY1W@wJXpSj!c=(&b!!gDOuT(e@2KGN2P^01muE=xx}VGVB2 zZvvM3uIcKSfzFU*_WSnCmjVlvk_XZ6AB@4Ba!3iZ1YBlX9B1KF=sg7^A2wMtRC=Xe z0@^)jF{j%iC`=_e#M@t*m7K3IA-1bj0sDKvbSb2_AAiVi(CPKyzA@Q*AlZD#v6n5CSU&ytwwHboku zLIy>V(A&Wvt1mQ7;d`lb0cfnkIYs`>oeYxrc2fWdaBec`N}61erQ3>Va8I^W=p^UO znOJH32zT_uNXe>LFD%@1Nx&&*3mY9?yOv!|Rz43=Ycg*b1vTN)rkP0FFfutrb3+R# z`+&iIItxK@8mDavx>@Y}yu@x3-d4bJ(n2kWTo~FDGv`(6aq-^y^l!KSD?TCDMm>kF z)b~lGz)!39-MAG2%v#se0|AQ5=5!x#p20?xAXP#ikt;qsov3f&OjOW8GrIsw){T50u4ZMUb}2 z8G&s{H^&I{#2s*x=>!INh?_n=IdP|}Pge2695FEY?XVC4NYo(m9eG2(pO?J3M?7J*e`)WSCC z$A}OV9A`B2S?-xK$go(-Hj?gEsC|i(fHFv~+-+~2t`uz*`}8Lqg9xKqIIi`ileRlH!F@KrYG8)gu>vXlSb_aF%|Q7VRVD zy72+9HXl%PdaYMG|HP|MGPK?xD^b$dcz~~C3~tY)zo3^-Ef|=!)1w2s5pO80>Mmho z4+6Pq9x6DtaO`Fa-LiaNEnFiS94JkPO&7%oR|9$83~iOX<}NS-;NeB)J^0;T^T~dV z3P(ofwK5wAWG`NAbBO+L9lv=0EFd{L;$})!Ic61(gzMDBC$0vx5-Farg4lDk`EEfo zP=8_R4GRNL=f%za8qny7v^hs^xy0-Pf{C{Z^Ojr7B;h>-iR!l-s<+KdTcDP*@9F@* zCewP1lU`^4365O|dHwuF&S-c-BIU5C=)annUlWf7-)yiOXH<_;WQUaCRv9Da2Ep?A^-zZLsG zianYb@a>fT(HS;qYWr!>I9zYCM&rfvUq`O)KB2p(Qrioua|3R06+#>XM+}sv4Fb($ zrAhdwcc-|iKc>GhU5^!y@W5O&?*5zu$HJ>qvIyWj(^vrs;$$(*OT;m6gY2fb_64?0 zrkQl7to7fu`(^hw#)Di0jSH-dj*sao%q@W09P?bPQj?ZCz8F6?PJB zf%iR_BCA)d?Fjkwx*k54^}Jnemxi;?NQp1>CE2_VZQc3NG`j{VDBY}g<%7e-i$;xE z7^T(vLv2u)dt-+bOzVf;C2eCmwKPS#+C1vT#cOEaOJerO2mX!5YHh8=RhKLI`{th> z<5HvJU>hI@gNxG;`>4q2^uN|7-<;1#hJj>45%9_1%u4+Ez7lBb-G)Nnnw`n5&p`w{ z4|qRDgIitRx8C-X!t+D0tj{aqSL;YwTJWvtx3+T6>y6J-3$=A(wTU$UkFFf1S{MNU z79!;&Fy-r#t#u~=Y$m8RB=Dv7S;#ETwn@pW(+7^9wQ{-0r>i3+%Hy>_VKb9y2Iqom zatfwS_7-jD8$iCM=Z8x9Y~PJ}&44U+66|b(wP2*|g(EDh%WKk+Jv#}BBo0~H>6!f| zSqPl+-7q@|s_HSjo6;b|Tc0fe4g5QicA`=QlaiSnV}6o+hC0ogFv#Xo3Qq1)?v;N^ zYy25q%?o@x-8Xg^Q50AmKcqY$(UQWAV5O^}u33)YkJz=a4et?aZXK*4H|a?aTIQ3g6h-$p4W8 z83nhFy1P@oofD`oc-3#2vmyNj6cQ0GVu+Ij169*Le;@0_1J+_&|j_7T=4ONKa zraa(AO?rB}&jwO72;8%H|N9UI5s!=E^z=lJ!JxTXMzc-E;{ zqKU$t*Zl-rfx{r9k1a~2;U<$ywJNpzbvTg2YY`S==>jTR<#_H@x5}otGLPXF1VoBa zLtUbG)Ws;CZ9(sEtiHL80`I?RI6af=g#F6pR(ZcChs7R?U$TZEP*3P~+BOSw)l?G{ zgdiCO$+z>?`a($afyn6RpDisxWr57N}YwXu%A-^3~gLdurFdq5}5i;X7DmZQnQ)4O`kXm)KVGve& zMuw{@FXkS-26N0YRC<+^lKTwhZfgVXs)gMbRv-_D?WQ8()>|uJEBnzNdM_e`V4-X1PC%zw?I8>czJmUE$4)yzkg;IO z8;+l*Bce-tVpz7cwI(>M|H<^UP9UMF{a9Bl^WgNaL7~mSp#U{X)sd)W%Sywb6^=s3 zy{mis?Dnb3OCN`UWCn5+S$GTold0dmcRby*m9`dTo-Of}CDGKYS8M;a2_^x>*RMyN zf{OT(PY;4m-{v!3K`whI( z3n=qFu=f*Xrx#KUHOCgyB&0k)NC^^Ke1Y%;DJntpP$g6SUZg<<*T!E|8O9Gb3nGJ7 z(S3>n6_~&-2M#-Z)?d63a~XAiVmD`js=h(Cak=;hr|Kcon^EV+XRtmjN6*JZAJ?V5JF|M0WIiL2&KZmSw^#@v`_lLcb`H*kgRj;yF43c~w zi=n{&Xrp0u5|9_pT}xOF*)sIDE8!@iKEV2k?Vh@=3XJAdM_fBpCP;v3c%gj>SEGK~ zoyPO2cpkr}sn>{e)oH`fWdp~3_MngBVZSJyL+~<6Am9*0nLXPmos+6hzzvfR-K?Zj z+W_PJHV{X-^&w=rq&-sfS${VuD4*bC^*|$3D;dC+Oy6!F{Spm`^&&ERFY&5^x}UXO zb$cs3Ab?GkQB&5;1ZWNzs6@?Gohlc3OvyV?r+oxKO8&-5j;& z(-u=KG?KIzv$=A5f~{Rg9)yQd6oWbNN;9CriQqUb9GKqMzPG^SrEuG+(6ogM^I&sW zlw#6Q-DBYZ@$-$+fka;F&xVO+#G|aFsE{9(t(DFxZQq%?#f|w})xYzx;|M2=R+>9( z)(n*KwJxYlLrYRyC1hlxtD_`Aj4qG`ulvEDXp=t@T47RPOQ!^u<*XQqY?q9FqlMtJ zcKP3^dyYZ6Z3yMB3_k(G5d&5PpN(7K6$hCZZbJLwvWuKxfvV-aWl1UB#%O#jmB?+N zF5DC7p8k?_^(jm%4FdduqoF}&9-5w2WPTZ~ifn?V)LXOGceKKS7jVWq{r%9yZ2O7y zhX;X_KE8t=<>l&#r>0l2PbLZO-@Z<+(_&X=u2n#eF-SjZeNjARH)_%>clU;QE_Mc~%q!UA$2fF%FI@n0wC~ zMn$-y+t{a^t1Jy>Tj8&%z+9g(cGA+({i+KRjsE;mF{MeHLiuf>u6J57uIsl?UgT*! z+?}O#a^|Ek%#;qTFS|9im=tHn?*4-QccCH@^SraEl)!b9|J5j4R6NugX{`bCwE?$E zeWFd>8mIix#F0G^;ba+aMxnRE>kD$RDQJ|+pD$hV1ZXVra+i5m*jJar|HL^2l#;C! zN%UWFFPpTn5-~EBG)oJ3I4AtBCtvE6bg;5zFOW72f=Q?l3A(}7p*4T8doiq7%I_u8)r8^)A68I5inpmv4lC?(ZCS!v;^$?LXu)<^%FF1$t3l z6D!CgjN>@|a(OT62HX6z_C{@|K)v_1Nsc@;L*`K(9wlwUC-~~$&QV@;5Be(#2R}T% z45=MfG!ohUrvti+ClbuyZaItoT#niAUV^4iV=*lRs*@0O2NsSL9_%>EmeWlAElQlt zvpr#c&@T6}Yl@n6)wbZ20a)2rt9IXTRt{NYT^X4VdgB);eUI7 zR*WHX3{-9s(v+uM)czG4+K@L;V<805`i5-h9#65E{TU`9D>Ovuru}Z!JE-1b&=K@e zNGcmNhfsHg=Z5ox;|*TA*YxnFOiij~vo4lWs0tSiT3}Jr{qmXGy}kk;3hVMMN+XNh zAx;GPfVP2Ix+UD*Z{{e2yoaX=uK;v}#m!f$cD zW~PA)Wp=OjTDcuj9adg6`W+``ICXOy^--%P**68u_emevx!|Vcrd#c}BF=h?#(OEH zND7AVGG-PO%ZMBzvr$^qH-&l{c0#}0h`abE*B@YA4qt=p!Y`&%$&wizKgtBrbfr{4 z0S+z&OQ~meZSXv+D83U^&213~-zQMyDWRRdwjO!kw zjkG$P3$mC-x*q)Rp|~QFZDLH{(L1Zu>LNa(+eI+J{bYG=aRG7)>iaE(`yPC^d3KeHih??Lp|4kp;krH3WbX$B*{k|$*fPl=ZEkhqq?Ek1b2 z=Wsh*O8P_Mjpjt`(3lJ87Bj0CP8J-3|9``%7u3WNZzE_wnT_(PE}3!Ly)MUW*Z9ZNw}M6#0+ZUDSaw0~s0I&3;-Xy@UBnN|zZ2V#1tTFvT{C z=$3WTmcHzol+;*aw-35Y?*aUJR{!Eb7Fi%U6< zfE-r%DEabSeRiZf8=1!p`v6FpV11|-20^-T9Rv^S>~g%tIr|O}mgGtLc&-sAs9kr9 z1E4MfbU^XuK7l`YXSh$P+yc`8@izi{G_9RjFq6o?jf3b+L%C4+2`AANrO=bWUuR3E zigudIowClxqnWm=xPa^A&;n&5vD){^W$o5u8r}HFMxorwaVA_O<*x5s z0VwpZf6!&G+-t1GpzdRpOVs+aQ`9#ng@;%_k+FSi?VQFRE`^sFxBs+%-H*?L!^hbX zv^)qb@y#mucP1Vy?m*ZCCq!q(JmLIQAh${NxYc{g*lV}3lbJ1GTc4qj;fRr(x5`_8 zUlY~SKeP-vgET;*t5j2hi6?N476xjaKWlQACapg0di!d`mg8~Xjq}|mU;pKeH4YPx zTFq!!sW_9_EIOW>5lBBwUGcKDMLL#hU8%vnXZ$qlMxzSP@S9@;07=Z*EXc4 zB5m5p%tbC{`ch3@68t&U6 z8q6}27sAx!0}E5g>HQp62M=`hna=dTD_F<1;$B%R67< z-!{JxS0%=}6xe%(jLstxa8;Gu3c0SNODLn@_a)g5=@(T?-={VrB;o-d+LB5uffC4J zpRaHwhZ@B%g66{R|7@IfdDt_^9&@)SxQ@!X-FRN)U3YIer!T|B;Rm2T9}*yL_0_Vd z-vB}^_eX8`Pz+HChux8f{bJ5Z6mI4h(|HJ`Zd}6%#kK`pG(tjO!pd~jwU)L*ftYZU z256LV87o+QKgWsmNl8S}*fdkRA8&_B`4M6~!^R`l>n{OZA(KN1>BS%VK!A&DQsrq) zMYeR8;YxT1$sD?Qqq7JaN9V6Z@JY~GPmBq8h4$**T-K2^x@mgX8$3YG3Z_?$cX1?Z zH6>j8<{3y;K>vc$GTa=@BGX^pY}Y8X2?9^3HGBXQ(SN5mdX-M0AtaX^xow*+ii&Ha z5=Q=B{HXHd%Ag>`{%!!WH04r^g}uB-Kn(huXCWS{5L^L^fJr*iqiffJ1kxQ#Z0Px< zH{Kppinbo_tyX)efpxQ@Y;`f}s(|Sk<~u`e@2@X@CwX%+KCA)CD@!Cy?=Jt2A9~*R zwh#KY^#yHCaI-SFP6UMQP6`8l07*c$zdQ`(aPstf@C%V~Y_-jqmLXH> z!-F*)N_!TCm{~T3G6=l-n3IrE7v8+kSepI%fNSo{`yW612JV~m+-~ES)C@mk_|gCZ zkpa`>#%0B?ej5cWYHErS$ieif_?va8`Xu{HTA2Z^YwJ4sOj|UdDeHB>X}2tDf-IUh z-Bt?qiz?}&YI;@W8CJo5FObGjmh{>so4;Y4jAagjE!mXD_r_N_Yd zW-Y3T*8R`c>!zvMu_otFg{+8JdlhFU-m#(#(LhRrTRP8Fur=QJ&QNd%#yD28W2JtAm-<%~L5U&d&MSpGAK z?jH{|4@ZNwdVyAgxSr=f7)7&WWv*Sq+f8*L%cUYq&=<1^cQP^dpR)yHHMgL_-YIVq zO3oxd!cAGh(qZYfgxa6urf=srii~t+Ya-H4p&;^K(Ti*l!~}$}a^f1^?Fn zVT%5Y338Umz%>*eA4Z@~3a_npnVt-!nTP0k^kN>a-9tdD5p?)~-F<1JTLm72kG3FI zQD9}meOmH?iX78j7iB=)1@%3Zwk(+(G-*kcX=Lg?LdRTv-F+>$fp+%5sSJzt#6f+U zd5WJT#z~J85MCH1GL;%n2*dt}$AuF>HrR3}-c!#f=TODCC^)v0?HZ;dUeJvb@jL7R zL-f+~c3$r7)@U@nj_^L>&|)Xh3~QRL-|*sPOJ6mkX3pgDebGV5Nyxl~<>-`clB+|H z6CCu{S|t;Pe2AiVGLcrm3`=DGAzp?=uN24jTXfQ&9Hx&-*$kLizPn7oB}7wGlGI^G zMnrD4m9Qv9UL7h2wE|m=0u1K=vJ60F5yl>B!EBdop z?O~KU1PG(gk-mfKxTEtbsf1K0SG^lw5}(w}yZeFqBHo&^_a4(PHszNl&=2VCQQ&!G zawsn_Ml~p9Gu9NAJc!8J2thK6u_WfgFhWuWD<;_HFxM(E#a3M-Ai; zt?si;H@yl`r6sORS~(OT0M9F))^L zTW%vy&w4&Ul~Oq+l%t#A&)ZKL*aa4(w?*u%`W7@ENIw}3spPR;;&mXjY2(&lAL{~! z?_~W|SALg@j*>=x9UI+MiH$bvD)BgDA%pM00$y_qg zdg$WkLjK-a9{Aozs!adj?BqZ%t0)P@h>-vWL3G9^<_O?EU9zJ0ou+^;L{cY!A0{E? z8v-R!-W^`B{n!|(F}7f$Y{7y$K}nJYr`mxwYJle&Q{W8bg%s(ioQVnXFkPtA7G-Sb4{Y>9VlJUMj32>kd5oVuda`Q}kE5Y8YrjTLI=w$K_4GyO+PYHJFW zmvU{%J@mv!vaYGahxL{gK-$`He=WH;;_B0%a#v}M+`E~bU{?5uSJTlux=;z1u=_^}X*WR*BUOWOTHr7z?vZ>4xEPT9i?aJu4c zdm^k)dAV^1C0y=`?2-^_9;7BL6pDF0$ny>#?gbjEv&zLOEv?Dyz1;;9x9vW=$yw~n zleJm#sBedbs!wNh4<8BZ7eanpdkCPP7N#pKa*798&4 z9D4CtMa5UL`-2DaEZMO6WqwPR_yTaBlo7+2WOR6*UB0$XXFt<e7k^7K4_31Zt zO4%6-Vpj59Hc+yI3il0R41|v_`H&E>#rKr2K<62!_DyQ>Z6OSy=n>2fzZj31?Jq8I zS_pf($!SDu!9yKRHwuisjU4pqckM^$!vpcGX!^ksxTj6-(XS^1-J#&6^L^E{jM5Cz zIz;+xz>@aGc$Wgkfee+8y`Agb^zaSU4Wjf#Ej%EV?(o)ht>7&_xnhe&o*5(O=tcML z<}TbyHzZyjp0Joac5T>@H6h~MaI7SfWXCu3ehgjcL^7|x6pKBGf)1bv$xqKQK$Ee? zVjF!q3Q6T}?P{QrQ?U0$y%>9{<62r_3>(N5_yx1;T9<_=v~{)0rOlcbZV$B?SBi5e z7YE}A!5l5_fY5j6wzM|8D$kHeW3dDkze4i-X0N?%k&Dw)w0&onN5)CiR*>OmhWS8lOcJ3is@hOZgQ`xUoLuo6mUDBZhP4HN_+|-XXCbQ z(q&=L$e?&Z#}H+kjPsG+p^?cYv%>`!NPsmB-adzmyFu9b%&NC@`1;sg7CdWK>MVcN z8IZaTU`mKbYHMPMiQ3;v8kiCH+Ep#N>Zbt<6yGAUzp-&Ehsl_NA#Vwdmr&F@Tp>GB z8i)JCV*w<&NPIGjNU+J*Xtcvo|BuybOrRl}(#5XxqtBQ(wywyArQbQFJbgsxhW*m0 zUFuVcvXFMd--+ge%|GaQ7m*;k>piO&iPq+K(j1#Zb<2 zksz%ali{nSj`YUQ9^hA%6qO(ZaSBN5dG0N!%zx0BZv?lyD5nurX`bcx1ZyPvH;%I~ zFUbLh*TJBrvjW);BrRVVD4T6;gUp-KB~j-M$}(_0%%5nc12N=g25g{a#n#i-3<}bm zAx5lYE-qk&+LPOXlAA$<#km&uw+3YwD>MyHeh>k_VALFj^96U+&|Zskkss74l}-^; z^xNbb;FLG{AQ1=3k{cktf4h*sK|m&0dh&)0VB@(j|E|h~?*XM=&V97MSHa+Q{uZj) zy-^Xfw2Pna4l0>FSJ%qqQJc=P zmYE8>pH5A2-4{tCa6g|sXAWH_{hvaU7;Y55NpnxL0Va3P-lSv3*NEID^1W>IF&e8g zw;+A@R(7K?IgKc3$d(~V$Io8Es+Ul7!W^Mm0UwPn$Q%3NsepNB0Wyq9U~*ksn<6b? zyy%G_&+)eBoPG>PW@`r|^Bc3;37GupRFhL**uqUn%x!sxI@=&FN|)tEIb;D?{R_V@ z!3GBX0zfTSo&rbOF~oZK@_*?V`h|~iv$e19lKWxI109COW_yMCt;ml^PYM-v=?G?h zA*M7uca%y@kUqtupnBI6J+zW``R{8o_C_J7pgIMLl;o>i=t@t~F*#{&1M8>NGXXd? zb?<~SI|4>9a-pkMI6Dr!&w48IWxz$ihWsuHpG7>4(>stREDTgu)v-A*+X$8(*ca4tgwpny{)HI~uV=ofq{b~FZ z;w$%f9Gn_3)ziq#Tt#K|^t_u~(V08@OWg#6;wz8;2)cSpl@`w$K0~4bY}bPx$sF)O z&Y(j{@cS)4n3hhdL&(IU${JwZ?13GP_c@MpS?0OLJ5DiYcQIO->5qY@_4vJ{6X8>@ zyb@GQgSAn%Y`NvK-H2}VmRu^CzsB@k|8d%sw#D}-KoLY850Tvm(`+;X{y#du=nmm z&Rac90M%PqL>^=G+=isDtVX$NJN6r+$1Ab#?sk5%JTl~hY;@N|xUs)@ajvoB1tySc zK7$9?K$?ZsBW!8SpRu^9q9Q^!ZgjGVH=;2ZdCuy`<`i?dDhxu>?*br9Nb%Xg_6I=X z?`Qv1#f$bGJbFl&T%=t0#E0! z-S6MGfW&P~$?nadAzKR_0OoW~-sWMLU>t6giwA~Hw2PelOfh0W0xrNqvC!>vWgEO3 z+L+PyjYd{uR@lM8Xn*XMz7EmPgfmR9B_8LHF8VKTOKNi)xu$Lf+WzV(Z2#La<5#p+ zO6-@WOCiv2U&S?CsZw_R4A9&dpv?7#Nh3lgLbj~eMc5@?)6CD9)&Hgx$c=y+h}|R; zdz-d>_Yqtu-l;688D199EEg(307OHeCgHuiqbQzbk(pbnA*W};Z>PB{(*65tcdzYn zv*Z#tSQS?lQ=o*l3&$3J0&K5^uV~O%iWkl(0e08lyE``2jYE_rpSJ>NxY#YiF6vco4)PB5iV!he7px zf{O56GGkE^gC$9W?8O%&ov3oH&?{vL-V{5Gj->y#&c)|nN0y%m7KTYu$3Zk)R!{Wa z&qu6QCFgz~C&Yx`CX%%$-M8BY32B!};uAuh-`+MquOR1#7?C<)7=YkW0%|NYj17OD zbMDn;uVZu(rQknEjzlj7t1S*peR}SQiAF|v<_!_iUzf|kN`lw59VojuL72n6Iwpu- z|1%-@08HCy9Q!Jl&u!`Ucy7BXR#`7pzqUv5f{8xBgtO8tcGJN9rfrxZ{Um49uFOQs zYhMSZ1I#%|1?uOMy=oI1n!SLfuDyfkzztWLPjW)FL~>OntSl(1nRocRh6Q7j5KB4n zOrIUC+aBfib)Nc@s4-4T6z*?rl-3J2#mHj1slnTD=z5et1CDKu32e)erM=`F|l9gIRX*dg*M7&^5c zSGaSWV-Dz=e7IWpG-EI_PB_D&C26+|M0l3)7*ho9em0`Ug3!;}MJ;`0GD!h-@W9&N zcN6&T0cTh1c=v%Qvuk?&z-q64Y3N+|>{8nMb|L>)c_nvhJ+#Fec;jMCt5mVZ&jYlD z5nFbSw*UgJja+qB_$sfC^bfxarIZDQg$_)YpcE@|YnaRh!-wTOH2NdgTA=jJ} zL&jslPf&Jj*^uw&cab;5(NXuA zYB5Myx!d8r-EsQ1Zk8L{APNIO+Qz|q8u)>W#VBkG^z&`u6qWS4HX|1^Jjq~InLF&@ zGu%g`aLqlySyuP81@6LoDLKcB6-f!UuZ=!Ym@Dy$JB-*EtI2UjSTtbZD`7582jneZ zb79Kr(~r_OR-d*gWVVVp$;Q>StjuMpx$K}X`Xj8_?3xrV4H(Or0jg(6 z{|@+FL|-Xq6HNX&LD}WB(gD{q^4vViqFXDl`#iJ)WgMxh^h4{L`bEZ|oZIX)GZx1H z(lk{f(Js2^G$$(S}DdZWdai7AFtjU?1& zYFwCHGuOgJ)a9XbG}B9Y_az!MYd$W$HE)TkFo2qGxL7-t=`slM)$mG{0a0PK-V9r3 z37`5bF54%=O&1kYN-?_In6;0^94KCT^O(Ei4%EyeB{F6iUJqvu3q+*8+V@J(mWCZE zeU(e#Kz}%uA49cyOY$xl%b|UVPJp`CgrK1OPU}sGx1*(x=Y`nb*3s|Ck1KNM$ub1> z2$b9W+jCr>-#kY%_ASAvOr9}C$e*%&k?}}bF1Irr=2(9t$f>h)DLMX`io4QbY08`` z0KY70;a+O#D*|yS2`fyVuAbkb>b)C}p&0SiALj348VDOFD-}U$KFUZZ$JLNuZ!@yW z>iIxJq@BpfZIsGH5)KUj5G8V%U3Oz2b@a!}h>~XOG$eQ-$k&37OnCgN{{s}c$B#}) z!5M9Il^@Np*8D))E#46AmFze;qU22EdbuER-wg!#Lmes_w*-r+3DUSvI3au?^Cl$P zR~Dw+%!&Ej{#?+=_(zryv-LKRVU8})Ed8rkJ^p>U=_#Nsc1=z#3$CiI0q#-k_fU-WT7_r)ZjGrwN6!ZdTn=A zH!?;woV;|QT%K&is&Lz)S-5_ooIkVaZ<848yNMGR=&1}9U42~cwx^pI>Hwd*nn3Wu z?$x>XeqF{0q=@Fzic{PGWW`0?T#xGXqnrP3^j%SUnNled@D!E+wp2%>&!U04)GH)}dLDajB|FGM8>MZ7cH4QNSK27G^o?aC)Bz_OxVKLhqvja6-1 zjR&Q+?p+x!k$Khi55GXW?6pf?qU4Tzp~uKV#obM~`|Zd7*2$KSvG|%t+^tW5!#io% z3J$rccqkMnl-FppEFJnG_d(<$9=olnN=KmdcSwGTbabxlG#-Ynv~K&_kLDY{vn_~% zRz|r_U=Ut0h#;>^Sl&OkZw>9(x;JjGztb6HEZ{*(zx*>Ux?hYVz*QuazpsI@r(;m2 zK(NOj*b0x0_xc2<-ertjs}cPVfyo$T6WQ<(QfR%%keUe(G!s^^(EcpOP<*vnBP9iv zW;5W&VdHWO6(f#M3ED4NO#FGwYrG*#N<2dfwwlq#sEp*j7 zsHh~TAI{g{kHqNPP$Y`58iA%-AN8(54LTd0u9R}PGg7S^Zf8c;tu{p-PZEfcsh&8i z&3YQM`Qw`9vN|&QQZJ|ecWy$nI?t@>uaNQjv5E5#T*m6TH~Aup%wZ&R1WL>wYKj^< zTZM7{%`gLL@t-p&Mk1bzMDE6YOto|sdVOP-`44-L7EZO1iO1HXrp9=WTp3<9}>_7h^!$`3fam{+@36nPyWLZ5Ww z3xatC7Fven?WBlFEguq{*LoO61msNO-z$YdR5_QU+yB7N@RC6dKnPAYil zo2?2fNs1dx(8`TP5;iC(R9`ukQqx>BKXJVZts7TP`ehgEBi{5FauTI71ssjabhu)A`_%_H(FFC0jEjFsUD#$oyM(SF)?|Gwq2YSj{7dB{9 ztw2H!q-)}CbacF8+)%6G%bzQqgVMt!hd2|%u4%K_>>dmFtr~L- zjqRGbG&ex#0V%7QNqA)9*O2`Rub1+3oV}}cL@-xm)_$$GLy&8{mx+ozW z9B5tk`z9QBjLQT#gc^Tvf0FQ{!L1U6)orzcEjYWor-*d1dIHwW7FNS7CW!!}njZ5f zH-1`gRoNhoaGFX}4@r^yrOozyyCjTi>E&HaZ4&4XM3aLlEd^Ym;z?V43~(LY_XGaS zD0XWT@$ZFid(Bvz|)MJkSSu{3-k zO~7BFKGda8uYQ~a^na1MewNfiP9rhe=Bz*j{ni=~5cb-sWtDa8ZK8mtxu$-`nbY(d zU^&w%j)l6Cl%P`BV~q6urRP=YA?Vetql+LvEqQ7x;s3AKBd{9Z+T&NuPp3jHvVbA6 zeGB<`V<(0KWqM>9HYoRG+#K5U&2|nxkO^G-Nb`-XSh=j$1~+v4DHeKQems^Zu-;IW zrYz(K{JZB_%8R;j?YO~xdmvtI6zb|E4M{=n6rcUrdTdT)_57`*5IcTHBToaFgFD= z#lb00O6x@8MKC5%iEG{jCD-yPK`IbenLzv8kRnH5u1_=NFW>v>G&iE9xmJqGa3lR# zMceY4u+g;%?`TmY>BgctH~G{>+XwXth-#ifwJ~dLB<>s1U6?dpDe?Qu<$k>Lv{90G zPPcqL<>(SdGILYD{pA1ptIyk?#C9C&3@RiFt*VY>pYtAUaoz28hVCo2ojlF}f5AxP zb@Q{=o02iR!w8s&VyW(|Ku6Hut1_{ERin|Z9>hB9 zIC=sP+TDvaMPgr{eXJ!r0dmQH!l@#!w3&dYl%KGc>7TRcr{B_IyPBFv(h>oveU+@N z`@AWRSW;R*T;rgnvB46_tLF(Mm zyV;V%?foV zH??}ksP$wW*i}RQish{Qc@1@J(YIAIRPV?lb#A(OBjX@J`tqe>)m$Tc-=enekzPC& z+(spTUq*7cEkdVho5|Xz#cBeEXA+illi5_VIoXjGGNYZ{y`bTWcL*17>%&E0P;j7AOoA)8hYR2~Ga&aBPB=Ytud&kgAqKb%d- z=aBwKlWu`%Chv+@ynSD{QT2B!?Lcu#!_8iPT=cz#O^qkSAG{e~DrK7Z_`>E4)*5yJj7 zOa25N%_>VU&ctVN@J98{zY5tb;+sAg;-cprDpn_Ch;N253K=A zrT%-c!0S9KLoNk)oC4{0<>hAU?q*f&OYy=Q+#n5oRolLb{jg7bm+&vC@7J+Z-%a5u zMipK@sE%aMDIrcGAc!NuLtc6`mFiRdH>~_~KW;mYKI1k&e1PF@On_5%q6~;f9|9ZxAe@=6q*>G3g9ZSJ7wP0 zU!H7(1ZtKzTE~`)*8_b+noM&Kg&Ska52}q%VRFLgn=N+XfglV_U6giB1dPT9W9Jv| z@y<^~I-zq}-AC5w1g-C?jGV!FKPIv9r5475S3am^t2+t0Rb4++ z+?|rX4Vw|v0~du@-WIzuo5Alo$B;`^(Ei;4m*=*gy(;|E=InYXKI2;P+RPGE~ zdH1h1$`esFm^)9pSeJ3Zz$g#X^=v`hB9nTo4U3M-~-#N2}nz8G!y}aDI!s~ z=OH!hX-dR1TxN}QLfOZ5z&Pof-jrV_+;r$vj=hy*;k9OYgvp?lKc`TmyVodNOpC?v zIJVeR!441gVQ;0M(J7OSb4~H~m{eOgM7(STON|Eu_oOQ;+!DBUxvMnL0kP7NM4RaT zna^0{TA>aFp$e(gXriYq{b#UXm3lnwAB#1U>Ds}tr3h&#Oh?c=1hUny0bgfd38j?W z>xXF{l4``yYId9rPE87CgP78ul;{$piS`^(2!w$R5+HvjRMpj zNH?RzsjbmiI^y4)b&==vR~WUDI0FLV z3C$XBf_2*IrA|fZMuCk(*K3iF5sC+oMo|iw8nB?MeLImRUfZ5+o1Ki*%nL0tc)S1#vJ$$z;0Dox zw|qjy2%ySc{Tj7h4s646aD((eU(}J*V;gK$kLKlA3LCW^TE@&PLC-W^2BP~#Pe)B% z5s@R~>TY4|G<41vG+k5jwxk!Sx5@sGpEJ(EASMP#n21Js_|h}%W#9MA0Uar+PYI8z zFQlbNl9NNXpPS(a& zU9%qj=MqHIAsYIm83*nSMa{Dw`4nzJtd@1ilyo6~wTmcGu-r0fNxr$!xn?tIXI%pW|wqQV+Ri|3S4u>ZOP-}yCwf)87SzvZnN%BesM?*6VQs& zZ97O?03h_nR4GR=y#Y}ZI6tsGue;>18Njfa6K1oVJ`Z((-K1jpG$CX}Y`v0vAT-p| zA50{%R+vFRa9gtOcB=BNjL|lf+_(viR-Z&%Ei`!Ek>7ms>%=KVVj_syZg$SW%jNJ- z{y_Isv=4@~x&8aI>6 zBPgc~4n8*%czWj1RLc0d^9!#TQEXl|0JN&(IfTZV@5Ln>g408(-J9I+iytXPR6V8- zhk1Io%5V%>lT+FAv`pcSq}(Rx@v`%$Gi;*o#25BfZ<08MNcK#d8s^32(>?{# zS8MIzE@_U7(S9gciQj9i=D&@mGrjDT*uBIH>l&GQjDdl>!` zuwI^-epv}|q=TNssx$JrM-qb4n1`c=2_nYitBH`)4!}eRG5*@o3bkQ>4euYC0a){C zmMnWIyBCmKk|o9)^UAg9?&*j~H?ha!B_nwm11sWEl_ovmMH@^RZht=I??#M5IyI{o z&#N)zWtan(o_1JfLvw}9jnkylop0K>a^mvemNwtcfXagE^;mqY7qxo#C)c8Uz`(U; zkl1=)(0Ou$rX&Dpmx|_i@SE2)SNFW<^y875KXxD1fp|TrPa+w6<@pBkv=U#P@hOWW z=zkBH8Bu>iX^hX{3z2wkwhiwbt#5D?m`Q z|GlD&FR`amUx{k;E-Xa~JLI*fv1}Mt$wH_a+^M!gqxKdoS7Fk){5oAM!JXcSk>XB9 zi>CpTrA))@35|BO(=R}g?rj<^Uh1BvX@Zg9(Q#r0qeN3k8i!5~=wuStZ^bsd^+A zt0ooKfto;@2XP<$xsjK}xc-%!@yJ{p{4%p5)BhG~7wE39Dfw)plH~iLiTiZOju>RC z<%lNY$@$w(=N`xhTD0cT;^&*bk@5%(&AXBSzUq^@nrnrQ!pr6`9v-}O8}moi6{F>* ze5D%mfEg7HX4{YaiVbY;FINs00c)UgHI(q#biC>t1noWbQIZa*wb;o+j=$*+hFGS) zl+UCQ1*g61C_I^|@O1Z_(084jNM>OvqFv;5s^U@`7AuD}NG$KF)gpj-J|9vV$V@#B zy9sqIZF_E~!bd?x>VA{O?7>u{i1NoK<>%^y3(Bosq(fi~wpqu_y+`Mb;X9AOi;XP? z_?m{uLQhDBkn5cInd6|Q9fl5)`G@5|3pqrpSSt5@oTg+7yqC&m5kWlCtMBvun&_eh zewqw$v_`!N3Y{%x^d4LDewFHmyr+S_6sVM11&b7oi{g>re_u6ba zr}M>%ZcU2{u&-;V8GrkOOL1BEZxg(Vuyp>jDNTJ^oK2k6Md$rno!*rmNgE!0oM8)w zfpyB1H#^z+PS@dMG7t>Q;wL_1bNyP8eVss+R)81BLHF4wg9n`n4g(Fv{4(r04pW+R zBc~FB8#ou4`$VJ7zZxMrHsnX!iZoSk@RXEGu?B_$=A)yTo@!fKXSXMY5se3+GW(>VdG}jI)z5DybBh zg{enO1OB)U`MyO#i&d=ONDNlPZV=eNU_Hj8GdbYbJii^BYE|D1FGE@@J8o1q2D9iM zQcx*%(1`2CHx%2y-EIZUI1qI6EJ>6rwXB5k>5!d&QNo1mm8Z<&X&KcTXPZvUX^8&z zKK1blyZyi;1Nq#hcU*27>KQhWLz0OTba}%WUblx}lBPgg(RwhL=BVCpJT9XPEi>X! zQf;!VY~7SXIj6-;a(oof8xl!x>7J>Yv>De$*0}2`)i(|#^px-O=IvfT2hbY!x#8c22X5(yP9@7ZSUNMBUXv~KWeuj8y71B$IwAz&{|>KoSp2Jf!u>g9K%v8 z_G$N$=xlUYVAeM8Bxt$nSrcj!jsNO~YVvtv=WYd9#QzLl*{*tH<_LVoX8dv)Bbe_9 zSwEfkesL)E^AU0J(A_H}Tw4*Vms6Bu+_Qo41M(IRow-loDwd#WWAl~(TeFP5A#qj$$VjI3}AH zi*-!g9$US=Dts%u9zyR~S~W*8HFu`=m01P1QhD7D>~W?oE9)IG@3SBzSy`>j?y4E& z+IaO8&s9dmRRwFXpB>JvoWd?|7ZpN*SAv{7tU1Z2VE8n2dy-z&rC8hPNN|NbivTYy ze5%dFHFLd9xe6E@Ji||>=~*!eV_3<*IaxX8{a~=G37smoD{nst)e#(;KfkPmAl_3k z&nCzu;0=n~Io42FT5J-ZnVEWd87MSTJTHHGt+mD;41V0>)CScZ3&Kk1xuT!(EiNS~QS&@2t$9%09X>W;XC%9VCy`HA(M}cVk0+ zLRZ5EMPW<5N9w*W37;;j~*CZ z?&&|(;UzeP;5&HZZ8doGFh^FxD{yWL^TsKIi9x={MbnEB+XwE#!MO}zikcdy--rI&;P@J9L};f^&@d2@I& zARa}p$Wt;ZzyI_6zK3OfbmY1RC^(%W~Nu7=?nF|!QJc;v)5tdsIegoe3?nl2)L?Xh{jOSfC-QlOTZ#KXk zqqs5*E5o_~JCbMdOoVd*-azPgNI@x7EV!&M?xmkdLcvou6|y3LXTp~He{AhpZl>2S zPD#{Zj@{sL7m5*<@*l2?Fg)uk9%m1RsxQ^R%TvKf&2ako?D42L(XVOTu>c2Pyp_9X^>kJEkKN_5cJ)gQsU$9_(|XI{K$T@`lj7Dz+sq6r7dG4BD)a zVki;8lmh=o0}LuW;m6j!DEV|8C!$Y+5=Q>4{N85v&eqAW|IT2NE_L*`BBfE)WT4 z>j+8XKj^7)#-#{f!oCIqVtX|^EZ`>WIK;!vu`%k|f9h8WV=E~)si_=d3D;N}m!_t& z+Ri!);asYFCELK2x-a0^X8oTwQvH&G!^7iYYp6C0iZ}kSuS_QffUXkF>4&G;_kgpS z25?~AJhw+3c}Wj^rL+oyF)t_@)tI)U>%^Hm90_Ua^_vA6*CBNH1qqEdo|PyxeI=;J zSee9by1ems9SZ?)-zA3$ukfo6x-S*W3x5Swn`FNBV4;y*)|OxzFa6s22OL*<4j+Y$ zd6*3-gZX1Rx18I#O$!e_lPWS8kCEWLRU5?aD2zsY-!Od0lxf)AGbLh@At9{%4H@R+ z>}>t$w@}k)G<6ayR$kd0jz_5~QdH$?FHV~?ZpYvbrDBkBGn5cRm2jxO5;i-+N04sC zZ@iJ0hm*N!w4eEHlmvzJ&SvQOH)WLKt0!3a64~tDSsS0}(D>`1+oq58b~`E#u}AaZ zr*w@8C$H>ZI>A@kvl|q6WSb+^dPp9T)}L!|{Ygj4C$Xs& zBpqg6R{S%FLwjQJdW`j`T@Xl1pfRP#rPz*;VmjWCDRiI0znORU z#@jn0RCjw(x)CZeG8R!t<}CvZp;~L6BRIuxWPf7AnNfwr_JeaRTs+lL535}gBUjs! zmTOjPZ=C-dvJCAzuV$Fxk&IGfy49~_gc^@{+pqyo*lFm@_uND{t>a7H&ZXtHcfH9dRr-~I2Nf+fA|%^2t%a7G z*`u(z#3x>~Du<3X7$&b|7^bz|(`LlsuMZW72o^C(4@%tB3=m^3oZ(l1hV~r$b#@&G z&$!2P-p>lO5!S2}RYfPwN!nKjLWV){4n$hG@);75_$hf-f@t+U1+E>%HA5;v&Q7wM z9~Hz%cGYE>bWy_h!x|ea%+X8W?Q#kDE@W* z1JSkw6ENTU8Jd4uXC9ACs5r)(@t)m?P)(;odZD5mr0t-V!EMhpQ009#0xQueD$@d2^YC)D&>nITXGijZJoz-KWWge7cTa*AT2WjruF|V z!Pa>B2D4npiY~mFx-jsm+@}ga*KnTOAO%VNv59+TbLcUaCC+Y)4GAOZa$NcFJ&7g$ z>%LV{H=Wo3YF-rqvsEV1|5=0%IDInCOtomk3|YV^WPEzGlHXp3^==}r^CC4IHYf1o z1s29_jE4n+G!^p^p6#4B+j2f2`urKnTZ7sve@uh5kXr z=+Nm1{reIKtlI+UhYqO0$xngKN;5_ssW?wcbJV1O3&{MK^BYgP`X68a+G|*BXihvf zeR6uWipyTbtH+htHi6u}5)aO<|HsrS)JH=;-{avVmu>@?PDc(Xy^0PpiigNpj+t3l zfQ~k{oloIIhact0=5RpAeM4}|0j2SDxT2s;e(JDBiHW2MH%lo*PjQO=M5Uqbv$mV5$~z$;{#T(JJgtOJ ze`QjvKAgR+$)rVoNG9feo&Vy4jQmtK_w1-Zd(^q2?K^(nQuVMG`R?E)^`h9=aU^~` z$^gTI5gfHr^L#S(>Z6ejrwA&iPs)m&942sWKwTt!yy`kq5z8@X z`PS?JbtPxw++jW=t=pe%%%qg8JN0MrrVimT^X)=No@ZQtkJolnlj&IA9{|tbO3j6A zHiRq$Z~IwiLpAU29`%JqejwfTEkPMloQTn-J^|+@XVwxKI#>{teGd;^`f9)_dx>Ew z?~p$9Yuae$DYPg-PRNEx<7^8i5J`9*GQ3wZ$fzo{pT0NOmmaQQ?|D}SIg z#(`1O12V^H=UxF^=gl8(J3Q(D@H%O}%zfMRZ^bw*h8l_B$z)-2$yAU1{esaw%Fs9O zl%PY57n25sR@3?-Cezl7}y;93}1}KHkPC*uM zQZq+Nhq<&47sfOrJ$sZnn<9QRAcp@T`Lt8uQZB2qEA~ipn@wuc|2PTv9;Z2qlS?xz z!{SiOGbT{dWz)Rr8iDsmGa;T%xT`eheFy?kd&x`&`hf09rhGqj{FiMjd?4JMu<=%M zt>tnUu4qmxBmP&MhqmsSldhq+;wVudJqT)CD`ugIa@trF4c41IEfZ1wK+&YdF+vFf zPo9v3lv3KI{_z03UCk-1UD=Gi6J&|b)>GhYW27qRU7tGCO4ES5`}IFO6P2KHr}t=A zwoC5o6~V4Racx3b#us0{lT~7hSyv~t0VpNv>WB{>@s=+2R&H!(5&1G zV|yZ{-X3GkeI5nY5Sf}izJP+{*eWUQ*dyjv1#%z09O{4w-<8GEu-;nq8d0) zD)=)r&yVsYh)H@4tzjgmrfWQBYV0h*yxnFH$0&C=Hz9esHoaeQW$+GdoMa)(eoDtT z-O=Pd%W-(|DB@jT(n8jY=WB533(Ep|-o7PQyPA=GU_e@CJv;5hMQ%ufldhXuj+WyS z+1@J>&uU)`Od&OnP~iTN=WtG}eXOIMf`8^PBg)7nwtl066iAlR+~Id4IJz>juL*M= zNH>)&I*yP~gf8iVSPyDWfI3m&)**(V#L9C-c`Jymup3)siZAz`jpbq|LUy;`H|2dFTMd#GEI+YAn z2!eromLBmKa8al3bP9ILKVPh{)vrIm65S@Z{$>r=Jy^dUjf2oYpT2^vk)hr?a0++Y z>0EzULcwUc_&OR1M9{DtOISR-k8BVZERrs8{_95c?=tS-*k;_!8W%`pqj+h+@xt#H z&AUr{OJ^&vlJ~j{^ClV*;m||md~l(6B&>9_C~bc{{4Xao=W&&AJ1jXPTsv@mURd8C zehQmh8Piyw^LK|NcYP4V|K%!gI@768HhmiCJNH%4j$Xa-l*$?t8TNg{q!p{)Dyx2Z zEpO(lU`i#NxNl?pA-0wnz#cF($`)2cRpzT9(mTG8c-*2NcoLCGrG8qel7OM^ev%#x zdP%Mp7@<{qo&(8zvD1S^6Lz4jjpvU{ysWX*M7^Rv9kCTJE{q1(ycepWwB7i>{ao4y z@^OEYx?);v?|?PoVTxb>y;K*6aLMs?Jnom?wDQ;J;+jmWGJ$lX zL{pb3e+gF1%6qr{sZ9RHKC{d_-%C~2;kA@=u5Dqw@Ghf|10yUcE2sgs%6aR_ zE5^wA{lQ4ebmJXe8u|@-sc*s>hocZTN}lrfBmby0ftQgfWvY=v+hVjwildz9+!yOx zw$Va}wUMu1&QPXxI_cbkY2y|D-GDzO?#QgB0ty-n6HR@vRS0y{`Z9BhyFU$~#~FY4 z0_=S@ksO-r>i52=+Y_Nv+6L}DRej#kvknE4fXNnQ4-Z%U`f)N)kOSkmq$MQ9YTeKR~5)%c0i7x z0uhe)Eu(PN{LFi47?uR&c+7lC{U8R)Z*7h{Np$8Qx-pl-wgo@)*JuDy#~W+G5##Bq z!$YIu3&V%M6*P;7@z%so@3MKrh{Wn=jh&%KRsnEEmNR{V(3R$of9wDZG^IQmy#(9-kId6#UKeBd~G&~U7kqZ4nG71Q~Q z->7bsjjV+Trp6aSaqG*waoYu?ATN!35giBV@~xcydT3&aoOCRw4@R<>tzPG&e~$5E zjq7u;8}ov1rJNS|DHzBIh(aIyu3eSdeD%$ci7B?r@;&u;KUwy>)G50k>C-^5<@w;w(l8O2do<#QNR9Zv|`)rDwm<{RrEl@n635vN=fBfbj;JHY(DqEYIqmu^V(`Qw5sF}_JvE- z`~7cYlw8T9_N^w|tWl!xA2M9kMa#i_;HPHXA*=!AK z-D6*kKaM@7K)~wP+|j%EvGrc{r~R5>d#Ey%h{J|}FW`>??_bh*%9-7hyyhNuG5C>PAKG?9jPfuKLCh zC0vJ9?6$Hrm?hb5?b$2k!@{?EB(%aDGqX5ix8{ox?E`L&Z7ngA>_KbAZPx)4q|}+~ zLQLy%nrb3AeouU|na6<{Rc)||N>zT~4!tLDK5gW2d{K1%2Ek-3&7MB5Eh*etKTPRE zuBz@*y_D!gJ0NcRLNX*>5nd+G`Z0#AMZUUofO1%Ow=)MT6UsV!WT-^ygZ?Am`eaRsFu(S zGy%SQFRNf?#I)ra1;L(CrKcpI@p+2o33~<5yhX5r?_tgWrI4wA1Y#?s^L3ifIpudyZGw-25iKJPv#t=pMia8&W4f7`_lyJdTE*ly z6v4-gx5`cQNAIF|ZJW$hQEOe_!@6vLOW#=FewrstNbC6V=Y$A+#=V$x7a%KKz^$Y2 zX9aUwRY*YCacp+2$W||bqS|iFV&NGEBKT)2wEQ5@_c!E2cb=P`Lofuoq);1wfvOQp4taq9^LKF^6V*S^0uOr<#rLpqTmt6olmDLp5nClfupI$1RWoajlOohmuPR$(ErU|?V*ZVw zH9T^k6{cPBHEey5|K8-Nt)qPLz?5WYq&w4&2r=KWk74{7z{_~;#Nr!6t;=Lio~cH@ zft=r=*a`dE($DDwjLQI?;>+jZD~>JRc_6yj)`jq2>me}nfm=bh(4HIh_!qX~%EoNc z!DN&2ZviK_P9g(M-ijD&nhhL=iZBt4b2sH-Y1j@B7#*F5jXi{bGl^)en#~q&{!2%_ zspUwKQwDxF8^uj_6Y?~{?2+AEh?`RY$Yb)QAa-LOkWptd)C@C*zf5IqJMo#PnN@;q zZR#`*l1Rw!6WJD|VlU;wivaB4U00nuHRZ(C$w~~%^!2c%9ay1^Lqu}rHoyxE4A&f( zXrT`lP59WXgYhQG%uBFre?O%%UtpaX7=6|wkS4;ako*HC3D*eAjI$2NeXTvfaB0VbU55NMm z7pLX2HqNoQ`uF$O{+FOBbyOXX4VQi+O_fL)AQCIx7_lRR7Yq(;r%o)E;a~%b9C=Ax-x)&C#Jj-Q%Gh|8You4Eru;6F{6`zx#69UXL>=>m!bZ}8 zjz+sQOPnXNwn(qX8-gJpOSfQ{{$>n@(TkI<<@3`e(bX7WLHS2tAZR6fjv^Yt`_nlJ z<~oBXIW=Oy2zt!Ibc*#wYQf%7Z~%9eL6G;aanZ37xyU}2Qt&1VmWEcQoMHz9E!0;! z_2`=A!tUm%J+n_Y$+`)vOJ4JOn$o)IsnPh}iS&Y;dUhPqE?LxL34PAvHy_;MSzEZy z;M|`gEwMZB^#~tHF|xc;oi}~*ijQ#|1J|*N^cICJJF~l;*ZQM;9pi2RUWKuRVv0l| z-o-;vYl0qC0I~btg7FP~_8)qqlK|DHAaA~YB`YUt(qvC|aB4DC+fICI=a7DFcr=eg z5220T@SHIF#Go+LijfN*qL&Se>?X)XgK58z@lXcwoa0(C)^LT?IyR6x9jM?{gkZ|y zCj?gM*hgE3+@K0`nLaqDH`+)s5jaq_mW?OciD?Uwh9$|EjBr?v75?{{EF7TUZ#f>R z7n#rQv!GK;Ab|dIlzHuRb)Xu?g1H+LLAHr`>_}@*OJRBKR@>z39Sv@k|Ahs!FDz=$ zA6O?{eY>sv7ujCd6s{*T{{SADyDq48ONJA>u+>Vy-kUJ;IU8`AF6h)lu9kpNUDC5J z4bT(uu(A^Nr}0ZckDd5=el_{zkArK|S!Bkw1;Sq5A{(*}hd)Ax33>B8oRYK|F8XlWiY#>gw@7@^ac1kS;gIu(cQd@HGWednu?#r}vwzhz_YPb3Y}r z6_Pe)^ojX52+*1~MDp}qXUzG*L8_Pg)-H{Lga}h{k7Vdw|DRs+FL6HCU1VpK8B4~e zMq_Ym`Z|xs`*yPbeJG~i371~TZU&OjYrvjz#`kGBLaZt)^nGH)51012zdu?uzK4u~ z?EW}E(Vzf2oE-oI4SBuL{(9?-HJZ8B57-baWoooOYv)TtbLv@BeJ3aKm-DMdfr&Lk zoc~NxMPV@ckJHsMX~CE!E_z)PA(##gbH+a$EX%6fG;yM9oB93{x?sbpQuT<{T80+~ zEFvKT@IKytA`{HSDEa>QA1sM9drDZq)n&SVK54Q8i4Sol>UHO_K>zN z{Njt@%3&^xgJ3|oQYH_DN$jbBu`L?=TCptF2?HCU!1*^&mod_8S^rMnQqQ((IzT)pS%QGCUG7TOAjMlQD(NVeduFqQJL zjgw_7Cb@fO)nmJNalO!7%!;U?Gv+mR1NeHJe-84;%QaiQ7n)UFUZS$&azLhv=7Zp- z?NEKEUhXI^Ki=oYaP+J@ViQF&G)qE|G*JV?QSi3=Elk;axU*jtx(dC+nFc8Zm>fvmC$;EyC4E-i0sb$m6|#+Ba>>KT?PPRI7b-4gH1&o=E*VMV!?a3BiGnfNEDzr z1%I9|3B5u&q7~s0G@n9e_BMZ$hSVUT1X?}_+HDq9Wxa`_F3&~>Sy|GpL89P;lyP1W zl?|~lU;Qese>z?@|BS457N9sM?|3;a0&`$udb)FI4h6qg3s^%iq?m{w`kk({_GxYO zI5#02(}XVUoyauA#`Pxo%v5TU96WL(GEd8+ttTETsC$!+P?Z$Hi`SgHRM-xJK(-ejnaqBB{ zuzvfX4asopy5r{|A4W72qbivnSPFU=x;itLs=j19iEIFG%hLBRgO!_OEFoBp;B$0F zQ41QT1LEbW~w+!)9fD8v;$_ zEdFHTk>*}}De)G=j11nI!HcZCBT?0QP!kvi-Chc2*ZG01JQ8yA;%eLYdQd91h|9Z= zRgl5kDe01Z%~zhw=dkRPiZETT7`fWGb=&~|=wW^mqIwWUH>HGAfAuAb%MTIBWS1;g zZ47suwg_fS=#rId*o-M`pQT&ms|hb7lTYns$~K!&H7BH&19XrtKR9abU%IhB=A|mW zJV&cz+*1f<8vuc^_Cz+Z=#pqE6f323oBkgaB4k zJpif@p*O{wkT1w>fn22;FGn?}*|@&y7iPWXN~lzwS1acWG6PIRdmbM;N-(2Shrs85!*YA)N|CkR&CY9YtUJ>{^(t74`WmRsa$*QNMnDD30tDBf&prpsf~!=vYz5^V50SePxXeDNbD*7RXKynpQ*Jj{Zuap z^>dR2(SJ+}c?*oDZTmeG)C^^n*la~_3BEuG=(BMQbMB7YVS&WRpdN%tt=(LtY@k~4 z?wK!$EhGFBKSLwNntw8XY+|!hN1gP#=={MeVw;>tS%4kcz6Gs4g|UcT`n|3#s@yjT z?FOk)rFEb&V$iUor`v2AVp+guv~|vv6qmm$PLqIzWRh7@*2^VCx~rt29ZRZdn)9#(h}D}E<+QlwRoX?)t`H~Dm=>qJ&Q?U8ck za}`ZbCaF`SNVH)uI8tp!DK)$*5jE;u+Oc2R|2ia@3_t-rRt0IlC^wsfmqcus*ANxA z)JN4mD)Du6NqVCWkH{nVQ7`W!%=`k{nrFn7MaBW|C2hpSYQhs`kM(t~lZ^AvAah5j zA@S?jMf;?}EZ{DKc(dsu17D4f9Ep_HhEqljbo6!6e_GBog6w~99#A>2#_~4bqR`C2 z=D3TOToh69wU$meYFq>!<+Y#|-yU$xPvj7j9cF5yC$sCu&NkO=n~u4M0RTj4?ic#) zFt*{xNWMpS1ijQwE2|K zg?y3`+#0Ix@!D5f5{>srhcamR&|-{Z6SVhT&j%Zw*toFQ8(bU-y70($cJR4L1{upd zW@*|Y?=Jo)BTOIZzPW-~+#f6}6<s~CJl{8N-i_$&UG?Q{(cibdZC`EX z?Vu(HEBlGqLfb5L_Gzu(f)smBHb2cEdC(2IUoN$qL2$^NxGzHD1P`i$$Z&UPG1og; z;~ucB(glrM?#_7=C6P0Qpb;ig(U?gll}cC$Sfq)E^IW$gz#GZ4q385AmX};NJ@LpO zvFV6P)rl8~t%UrkeAUm#fv`br+8vVJrsvw zo$_a$!qL#Am#&`-kv_^AV%1B9M8BpuP?lfyGQYbGWhTDRIGokL$1I)~>@itsu1Dl& z$N?PQOfO6j)nOA;u*lxzJcK$b<_= zj7U9;st@~ere#v_nV)X}0zTQ&x-ZeAj--ROli~u;Jr)ZMec3+c8y+M_O{#xE_;zU-Wg(E_?&rWgfIa z8S(?oL)wbe>KG~^0Q{yRWvLK0Rix^?YZXcHwB@zDeG_$5R-}>MlfIKj$y3cT)!}x# z4Swu(2S;HokeMF@V{N4Ehf&MH7c!!j5u@@LNEaK^ZuzhNCIC$iLSs$(pN`{OuHdSK z6}6<>+*V^_R7vG{eoK{VzNN;81iXH;uO1eP2^y}=%IMJ2Up&%2sxYJHiU}aXi62pP*n4rf z^3YkPh&ZT;MAAN}eRL^ipPoKQ+P`_F{Zjzy$T{>+cNgD|<14jthZ>`|%XdW;_3G3) zJSpBYRHkoRVS=>9)0YMOQfYN6#Cl%rN)S?~UL^R@tE zvYGc2Z|LB0xRk5g;`rb%Y@xV1{t{BZ{*e!$`0tM~{oH%eqhpkUz;j^$_5aVMmQDT*g$#71$o^CeGiNY4K{g-uu{f@6Rwdd6SgCVX`Yv>VMFxvobjN z=|OuSM(sONN!XhxtS(+2XadpOxbU~@DF18?DCT7(5bU#x6~>$iFPc;m{Ll`(AhJQQ zVO&7?iIDh4fi5Xya(5WOdejJ_Wyh=@oTcwq@vrQ5q_o4ctGWS&&Q^#4aPXgmxK2V$ zsh>-lup(UJprBRJL9)NTvt<;((u33vKC7*(1!|w7ZJ*2qXek4UT^-~t0k*%rG2ziu zIf8j6$~;>Ibg|wx0kUMF=DAghVTxk4o~8|m>+g9anM`QwC8jzo&HWXuKCUcIQ6ATK zi$?LUAH;_wF47rr-+BR^%x z9&;*)caS#63{@vvxTmx}A?+;be~S@I1)2PvW9h*Gj27faGouK300O}(I=Ifn*&uZ` z6MRULV%~z?lbK^9lP1ri!fgtE<+A8~$>$MF%QwSAbBpZ|&Qax6AS&f&pPrTz=_>(i z757kJlgi0@%rz2Q)gZ42IKI?zh_zk2&4J!3Q2k<`HCQctI*fb+h-!%I7m1O6?~d9k zSCHahnU#|f?4K#m(sLz!@-oeA~#5-b<2yHcU)PMfBQU!I{B6Q4j#7HKDjKc}YT7Q1ENLNpp&CD=6=bQLxi*agz2tKdOC|5+fvO{6uSG>?BU0KV z=`W!cV-POaHaVX2jjK(DXB|QZc|7{}_TKf+A3}L&Txc1MLtL&q_7|^63^3CJ_rCuH z5YwuxG^rni8(z5czc!a7lD|RJLG1_!rBp>H3OqPY8@cBTfW1N3+T@Ip67NFq$Y^j z6fSM(|9g{`FK7t4j-uXbOVlz;uZUPv6>KWnWd8RKddVm|us68iD=l84;1z582@A*2 z^gJqz))~^1P`ENMarr*PdZM}$8&M#FDYkO}058#4HzyiBpPXfuWF?Ps`Ykc%)~11G zQM0w99H(L+rqYrj=Gpm5-vA}uJtVL7!L5h;go`R#t!oE^ad#p2IYjT0lWmJ2H0KD_ zfukrb1i#^KA7`5ys5o?c(skHq-!7#1G#z47B4@{)gH+fGLfL<^m9p2;snzOR`4COM z4xsi^{xJCFT#Zb)pZ%6oUurq{_9oISR?<&jGY47`83~T#U4;u0;h0qjWgarCK9>8B z*Hp*o&C2{N;F+^kGdiD^Z?pE?ljS`T>QuNNwwrne; zMsn^v(6GB~@JJ133W(L>8DGtHQ++&&(u*ws0jn8^a237ur@$EM&l=}M1Sq=6AM3SO zp0-fQHe+f%iud$M&qS8miL9J(PZHk_QC49#PhvGR?}z{u%Z=zF~t5p zuv`$k5EnB)-N17m*;Gz#nmjPqBlZDvdXb3p;stjMWuj%7u`a5uzW$i zlqMUwTAVv&@k4t6N-i4Z7apCPuz?|835cBg)4WR`*g92%Zez5-CFA7(HRgHDRw0=(e>*N6!j zR4XS#`unYT^Cr`8an+-p(GI4FN1nbY9H3>Vw1O&=5QP7e+6=8Dxwsu%dmC&l8ALD0 z;(^h%Z2l%$mIcFc{6Ajo)(^tOV1mz&fnuxxG&qiMQ(>`>vPmY~#iA}*! zzAAkp9aejjnv5!9kRdewklyQkH66qX5(F@JY%0qXOQ{j!r)57X6-Ko85sNZ@oHz=^7pwvf<$nCk3tWn*eVu zgoV=mK?Z45_X)zreztrw4oMM+O4N;vC1H+9vhKyiF)Bu z?&e9pYbB5qL|6n$Lnl}%9+64X3NP0frE!i@n^A|=%X6!S3)z3ftSg>b#@1!`!e~1Z#I-AeBNu0-@ei^8OjTSJA6SGX+Y^}BX;8=vX%s`t^uz2^ONmMy++w@3?> z@$8fahgelHJ4%}&)Oh0#TLP7n@!-;RMiBNX`MoWyu;7F*3*u@~qMwIzO530vNv{*M zvUsEr8Xv%RVk|`o;HM3_h z^{MxhLw6bsXlJq~RYKeXQp87TiOs`AB8#~2P;wenWzfzAQVGlCaHD`#r_Ob%h$tj* zYyPJw=CX0PzbU-+OkTl-bl0y*`C|CxOm3W06%A-7O{wT-D5f8vmlGsM=ZXkP4e&Ox zdE0jh1;X2S79FCo!Ih|@by-}^w-2%AKC%75Wm85a*S4;4GT&il%BP8WaC3!H;$_sc zlW6&BDv--m#2OOA*L~Rv<~DGZ_w@o0g*SD|E34lBf0B`Jy+cQ$6b+)Vl*Xd-nR{!j z^*+0vW$5Me8vrO6I}$@g>}pJ-?#_VfQc`@rm4Y#_Kr#wI*YXL~UQ~t@<%)HIf?WqL zF2}tmz0pe(YbIWp&Ue=d2m@{TV0T2CM{ESh-1djf%YbmD>rr}k+$_3lM(vTS*O z@D9$mgCpQk&4V^^=_Vb_^xxzxPhh(Rp}O z*Hr$Aykn_%eupYHZN~8|->;V{X+$DUEiI}Rz14@U9l@C^zP_-~A%4<&1_HMD1|shv z;Z5q3GO$~Yxs$r60DT#(EyZpi{>KUGai>Qscd`$gsy)&=sJKY)Lz|CgJ?r;Kcybr%Tb7ccY!X2ZQFU`<=R|4 zbFLtcs|z)lTRgDNec_4z8Nec)_T(LcG3^Acs?eOVNUW|7qjf|Qn-S6SFkL;2QhLcH zx4uP=k)2$<2Z-&8p#Fx?GJ9L2d=LWV5-L&DuyY78HNfIBu7#**KAqV z9ww=$ykLB1Oo$DTEWLv%1pH|d{#J_h1@FMi*B|{u9fT3!%~H$;qZPQ2d3xq%!zeD= z0!G)zoU@_LpRkWCi^+1RtD;-6Fk^4&o%wu=2wDiQ@FPIhtVu)~vHoWAg^LBR2tB(X z%zUYz*X=5zHWkE0sNXzih@1RjUP1pc{Cs2)^UZ_Hp6BCqd7=NlAED!$IU!1=Pn{-g zL0Q6ze0ZZ;i5RbGlKG~={KxL+n>|9jUS#*%GQPo8rWT1sC0O?X{-iVp@_7hAdWflr zuRqZP7X>UBfX67yR$9V*Z(*CH(Cz8>2s_H;Teo1T6*;D7M1?INEZ%@Nd}*+lwv5mj z?F6^MR@(dAI!C<8XMVLEvs*XnGkbXPZL)fwi!v_H=SRE{g5(iuic#g{BDc=|vy$Xn zROaPzjKx%=Ut3WJ8SUaA!K9kcNz7m3m6Ol5<9zG*PSSe-VM~O(kZubu|K7_q)UG@Z z_!SX{h63C9^`5`ta9OxUapAspAl^RP!7$^u$C}|aK->U1!1Q`wZ;Hn8t|-2yC56ND z@5SyK);7geA0#qcy^5lgSbv3mXI=)D7fmdgay*hj*X>1m?&f2PE+jxpSZg#Sg(^*@ z|3!?DzE=W?2`uB|lJJAiL?@+k9Q#f}9=koO7zh(c?(9O5VYIuJweZoBw>bE%rijjl z&`X}60zTqMZj4CkRuU=3{s5ea878@Bg}VD%3My9Fj?x{^FCEo}Yk#dNr{+7Q4l=K$ z>qTv;kvAJiLWn&>Hg8;n^jVhRDi$4?d62)Je^pG_d$8?DWph6G0*yoBLTU3jiMOz{ zo6dUGDAO~}+X8=c~C>&5~^Nc;;9a!0XlzDD2(Vj1L{y)1{6*3hf*pVK?4gs`; z97}==@wwlZ1cq7yu`?dY(9x#oVEI+ScNzwaIu&h_#Of4<93+HAU?fwH*b{N2b5-*_ zTg)UQ_P~_ZvQk~{7{@)wADFDHO<;&TrN-%GsbLxAA}}}j#_^6^kgbvp#$|R8)vATs zs7CMlL+5=k)-NqkId{zHk3|433Q!xM&3bh}P}qeHb6$mDfI-wwn2mvt zsaMcgDo-rQay{Supe=OyJMgDckRrOP%ex?pr6#AsSN~RpZ{)D?5bJCs!$n_(*eM;#oMBKzyPI1wa1(^U~avGHN z42~szXr&|1HmK4Nj$4H1F){vjOwJ-1*h!el#`r?K->J+R*yeR#MaTHaQWP>kpFjs6 ziWj#&Yz(9x3|R<1Wfd{yj%p7}yPh$H)mg{H7g#BU@gp+F@{X>|LRVAkzq(>P;!UwN zF}!2z<}zp!Mc)UsFoI1&<7)B%yFv+MOGU*S_KM-krt?%Zz(?S}C7g$1PoKLyY#>>8 zTCjTVvi_iGK!jo<)h1%^1VUmcR8(-~^xUK>`{Jd|Ebhd`KW3GIiOrCU7`9LeWKe zxunzBjx?Xo=TV2JgEb?>?=X zisCw?7ki@Vex9xa-C4%wp;uUIanltS&;~#3nmtApc8@v8Pv@>fmD9$`+tYZgDb;+J zXVD(pVk$x{&mQ z;L%$&D4?yi>=I{FqnONA|BdTYpb2R_r4+T!Oa1kGDf=!@ZtU*P1S|54>=X|~3XdaFf>)vVa9qcKmYJBGtpBzqf7u-;h1$;t;1;^Eh7gy#`UE1R&!UAsq>Sxan z-s6smQ*XbE&ixK=oaO0N7NL+5mY24~I4H6eZBM72tTUw{IZQRDm|KBg%XEs}85!zG z*1UK}Ey++Sy{a94RdB*+KgS!&Z0LGB6;}Dgp4IzPWnRbdx?c2f`wHsrJp&0^u_2yO z{vX`#LOn6o&y0-cuF4trsnV_B)2q&6{)_z*OrH{bH>y=df!M8mEghh5eFH#p=@PKt zbG?01QF`#UKOc!Nc@VGzbklXvBrvnJ3t#2>ONuBu?=uK_Ed%ri==MDhlyLQIQ#}3^ zoZq`b3dcwBl=W-1_D?`Y2R7Dm-)`qE^Q9r0X?Uj&j1rv>c*SpJy8?3ai-#9QFRx`B zW{fs2_l-|wrmTn@O*LtTLe2@ov?vEb7~{*lvZi6BO>_)dNNd;dG_xpzJQV+uG4#x=Oyqes zEK=qESD8XE_l>o8*MEW5_8E-<|AYht_| zR(Ln4P2n0J4TM%gG)#N~Qi@O=f$@+B`BbG`eb`Fl|0EKEo`a~QzIcu7p-6iY3%OtD zq?fW}=P$1KJiehdIdVb|Ir?PJ;;df?WTR!V*^*Jjd160ka;=(f)vsp&)>yMI1#8(5 zRFDPtIH~|d%KRUyZQZ-qGuoYtU-kJiMPD%s@HVV%xIk~{Tt_d*tX8Dhki3)mo!t8> z&P}mjmeaD?dG%D2P_pqXZqj<}s@2{JQ6Qp-%r|ZmI)R>Rj}TFblvYl+0uYGZ%Vvwp zVGe)!Zl?`)FewZ{hd@})6Na5#fbmyUh9?F=m&CngtfR6l0j2!s0^3+>NewT;x4Ozi z$t_86wkvW*pv&HCY^Rjtjx5Y>inUs$nC%LA_(X}_`gojwT0wQzGPOoF{p|jtcuO>j z?OeJO(xlhPA^A>RLIH8}3l)h>xAr^KZX~Cy?q>k35dQC;zKE^1X{)q2XX^C)dkdFi z!spZXAm*j6{43kK_@PTf5z{n+Xw%{hK?7`rFRL-)&zbp0Vnu z))o8j1je-;c_4mJ`uOTww;~rARa~EG8KgZ&!>8qa!HEpIqWUCdL113OfM`S35abSg znb1*rb&}O!iEdibIslJx>@T^z{j%8g{oLrVU}iSom}%tjHZzAG)&=izRB6|adsVbY>-_lJ7OQ3e#JHM^Qnq3` znUVK^xkM53Yzjm`O>w8<#_{DKrd>lmLzi?NYGfXs2AuTjUNerS>IH8MW+t*z5xB)+ z&_a|uPPjcG(DaB0>!vxee7%H{Vm~#Z?KamP4D(GF;UGJ&=+tJHAl{qxXg(x`78fw&O1FM&zC5=SYlXztF((bB|MKQ| zSQ9f;mkB7&&c^`B@KyO=uCMd+MPLEM-)%Pp-hCc28#O|%vt-Rgc+GJ>J!`^bCh?gDTtO!s1^Vy+IA zE*;&-zvy)bfgAK&X-6EtJFz?ivco_mN?(RnQg6I3+vz40q}eAEQgmt34HUry+oEi+ zaSP&W-MD^WVIQ>;U&VibAqN^_iYTx>S#1|Joq>C=0}H;;2D>)vTE?<7zgKfel|bXu zhzcf1TBQ+ip-IBuqy-OC2CjN#&@KnVc71&U`OlzTo~I}H-WG0=I2yYNy4_-b4DDX5S?Jz!s?|rdG9g%vhMA4t(_s*#tKeO`;+mcw`>_BfV@;XIfMkTYN#z zmh`NR6WFyw7{13l&}}w*Z(C9hN94N&KW5OhC)}@G!fAY1^^^;ruiE8g^)P?>B-yF| zDl>+%IHpdl2mu>B4{La5@ZE;mPqJTa{#OtV=hcr+Gq5p_;>sXSMqie#zNuA;xEKmX zQ_rt>)ETqjmOsQZam@)|Iq8HZy*HEa8?mlc0)AIqz~CrAT$o$i~Tvr|0ZLz#yow{^m=wcMKMS0h^>ah@x%Dw1% ztS*0ftO9) zMUM{nfqXJRUC=XMvXSqmnV+*_!I^W@?I_77oev2mUcG8LGP~`*$##GiWMDhPY4HqGNp?<&TxIA0uEL|!qlzObB@sR-Z z6B*z+_A-!L&aASvV}$RNZ(0T#72rENPy>i=EzF(N8)x?DYJhGP$h(l+J9y(2onG)UOg5?bo&4L0(H+Qi*OBxh zAxta^cgkua6b-mBJ{VqEMr`TjzWqvVrQ}4a@*D%Hgu(MrG3B~~%ZKEUM5#--zHTjR zs19T#0ls9Cg}*QN2@b4%B@QQGvSu1L7`iF@nTza%@0S5}T+dE&fhYV6SM7#1{mzrH zM$EZE^eqcVm1z0;)2j$_4L2ZFz_s6sX%62jGxYw;_dGo!-oJn@!v@_LE&@2K@L{mP2UuuYAynoZswFuw(^p>*T-CE)-*;Q%s3soOsh4o=9=K-@jR^n|DZko zfY=Yat2MeL(s2(NnfBB%5J*eHu;pkR5v|CZ8eC^J_DG)dDPj*6@etf~ipNX)XMFFs zReMdFaPzADZ5|4C3fd!3Sm6k*I5g0p2=b9hV$Cz2Y_H&Z`T~MBxDlw*1!ilor23+U zb3%S0s;!fKVPKk|>TC&G`KMM@UeWa?i~gFShvB7YDy=FiFu&1EZv@p?cL?X0T@beX zqfIBGO;?qx6Y=(cx-oS;2kSEZO0gKrB z^P78@YIe&1g8i^PA}MC(@1&4JBmUUcANe4tm%FBPlJ$5GOZY_#Uv)O*ee(2X!Req} z`;^!8!=`s|RUv4`S39dMuvVM)UWDVV$n#h%1-a*~8%cTn9)nDzfYr7=4R4G=N5{2l zO{xBNu_-IhPD^W>I=ZS3!LAQ13io!2&847OXBOo?1bkp}N&-vtSAAABX&M7l#D0I6 zw}@a?GIgm8iPC5|Wi6MzSvbSLVj9(qpg zv)2$yuIOJ_$7@@8EXN}HQ$N7*PyxU3YuVmiWP>~Qo>T^Rxo!K503te(2XplV?W1)D zQBq*C7a!o%2?6s&FJ;@ef>OQu;IvGM-U%SK?wmM&j@a);}{Xmw?!HdAfOG^T8CG#ei7hbfq1)3}E@qay^?LA2&xg8~p@WLJ*AIHO6-8+(jQR(2f>x(g`> z;i+X6(ja<(zYgp%3cHFho7pNaZn_;Pm$!e*m)fIu*$IKV@p$YPDai~Cj#@#Q>+Nvh z;rW;*i#>iDp|9xWYIIluI?q2N?Ggs0tq_?!cI4lwX~UM_a9S_3`q6fJ;3uLQ0zhCK zu1)jK^YnRoqd}#;&p&*o-FT7Upo=IRoQuSUZhty9gTnh8(8uh2;j|C9E4iwns{1^N zu7GnZ|3BCb3R~4m z*Yn;;rTEfuOBVLB>Ox3^O~j-7vJN*X$vFmyH#df2UKnf8v;=m!8WC(8N?=^P_Eo<{ zxZ0S>ytIC7@<}-pkdsu>)_BfIP3qvFq8#37+)0w*ctEOzfT9!th6%>e*jS6V3yS4_ zhIRMO`S>QE)fAAZwpW`^>CjR$W6r}vBM!()kSAVJV-s5fNZg8td~_;tc^w*nygbc2 zH?kYIva_n|FWe8Vwmkq*K(4<Pl> zsV62GM9eGO#Ns|{aCGOen2%bt-?Gywr6^Al zE=z6=Y{8^w?PG-glu-B{!|GS(k;#ixLI#Gz3AG_Ec0SLxR??HN(0{@-3tS+MxwK}x z(Pw~Uz#7VBI^bS7LRE)WfI`UdR;U*e2Ukhv=J*L!Pi9>Yy=(S62rhK*A&T;~)X5EZ z>~&=*P;Oml(BywY*9qv`_js5UON{6!Gbr(VWs?yc>XK;L0W`i(4UKCft}+)Fq87be zG59NAjXd=zeBc8;(qdRdpbpx@xkv}TA;ftA5ZT&J)wSyKW(iEhK9}WAN zDqar|6p#@+*eqk$9v3_ulfXx|CU)m}BT5pezyn!IFYy%mqc&{j+9|tXi0NV#FJ2BS zAbJ$Ec9rePeQ0~t(io^L13`IQRAAfuyM^&O=>2(N^erm1++c(aNup_SuH}z#Ks6EV zq_g3G?-}5v?etHhy=xP)_i0Vl_JR4b?2COuR%#W+XOn4`!03XN1nPoJ z=4~)}o+ZaM$|L$Cxi6DroSsgD--z~-(SP9422UM6%srlNcRQz_%`5DVNj;kJzxlX*CeJG43&yG&H7jdTTj_-@k z^Sfk}vR9o*^{nEcOxZL`L}_agSt{LJ)+LwQtefe z9|~bVL=h|)EGIgvy2ZAmgBbHVLZv6GTkpyO>mn~+Fk$3WsPc{0 zlw?W=xmS-eZm)0$M7_Nup_DHrj)%@L_=ECeuF>`)s^=il(ZhCN(^EAr2qghW+eB7gKX8a6m!g6uq>PSi`Nkt~dwyWQVSPN<35Zak!EPD#9%(%G6*P_A^E{Uh+L_jN&=s5E@4@<-{q)5GM<(-AyE0o7l_N)! zKs<@ok+$NmBH-F$cJ+VIp(7N)$F{O=;jOjPHC-dD%?F^fHzNo}5fam?iIu7Ui)u-WA)qSjFU1l|y<3cBUsv^+8YfphFdDPA@oLpJ2IK z>sZevSc)CseP))$ewKZziAlfImb#Ri|8Z+6jfJx{iXa&s*8=fsByM^-tWWE=v5nSN zpyb3c#4{!4AM{iBvSB-3d|=XsSXHnmKw9pvhO>A7uU(X-Tj*J&-3v=E8-a&Q6pcDj zx=;N}G@S8jM0H?meN#zP%#{L^1}>>g3V;m0(xaRCcqK$%UrBJk_wQe5tKYK6I5K~J zil$17rm;b^kTRE{*=O@t4(arRBtxAS&p@Fo&P9cvBOGqh+PJx{2)r-ESG|ry>sz=_ zKKoin2s6B84;AG~toMDq_#u2&FhzT36xqKKvRyxOj5c8?!Y0Y5{8OLu*W?#pK+#Le z-eMm3fATXgbE>nk)^fgfmR0)FSk+nFriqX+!C=wg5_FU*7ip-_^O-YIpza%FC85dc z*LZRX2trc!VFPXx*wUV2-IQJ}tSR5Vp$9v-ZYv5OmgWhW0*fKk7qV{fDj#^xPq~JX zLz_OWnX_jA2bqX9_)dKC$OKBJa)C)~1Q0G#yP42}_qW~%XC7N{TOxL3X;m*iuQN;qRk|OnKUVO})#{*b-`wB#H9*qQ^Kz0XpA^ zRT>spm6m50D0ZLul{6Yjo?|&=wUaA>@(2tXwq1o0no%h-rgJ$#t5avK>iJ}Dtna6u z0eaKH@04YYicDb2@Qq$X3g@CKD)+cyD}I*wq@8WYwdW8YerH1x4Xd|b^PId~njd7v zOnh_-!Eu#w{cPy$-vqcKUr>%!8u-qIjDO%as^%t(b5#5!^(hKhqB*W zS-~K@U-rsp0|u|d4Pvu49ov_8;JVP#e0#ce*%Sv4S_3P1hrii;JN607B2biI7fC&< z8)$o<@J>JEJXMbF{-0?HH#)9f0vkK(u?4Rz=2=~k*IV@Qhm40gfAv4+Xd29e=87rY zn%=1ZERpW(o)i7z_x%^6t9OE`vvuAL+f{*9BO2J06Lh7b0wk(m0_Idyz{F7tsvB_2Wt#WuI z%aY}Tcm5J|vL~JDK?fh>gyFd~;eESVL?WJ+5 zVjk>$ITITv3x|k9=B6FY+W1!BEXGkt_430Y2Aony|9ITb5Ay7%CFfTH510?t-9cdV zPmq0_%%g}#Q2QWmG(bac%1*rK3$D6FZcjN};_X7kJYD#1`al9}X{A;w#_7$@RG|xw zgm~$o)i*7DYAa-8xLPsSNYrzQOi+GhSdf2(f@%ms;j=-+b6A+e5`+OhLGy8IxAlg9 zE`mZc`GZGAx2XF7BO`$X8Es?mJg;`hPd_AN9u&@uEN77#dZCc{e+@hdibh zDUyReV-!3Bo}GjiD)V7ao@95JHf_Op-%S~+cm=gSepGp79+?=)pO#7-(4}dG*6SD| z#17xQ&JW*y2%;OUcaa2za9hxyXkx?);1BWSP}&t_2%1>U~q zF;8@yNRBovtSP!i_u}8fD$<*2%`KSyZL)ZImxODQUPd1`g*A~YEG>unu4y3bhmPbk zLRA<8wstR-6}*%vy(j3f!>2vQpf^wnE*pUl2KyCnnO{7Hfp00_ zQSS2770IhmUa}~8*so*EK)7C&+?#<}EMH5ZUDEDuy8{akIT9mPv0i{8yvWqbnWbHcv zbyBn+nEKu9U#%j0rU^mHt43TtC8%0Ri$4mv;x%YGylywhD-}pzx5PM9g2n@>N@i>x zk^J1u_-m9{@FWTan4#+_i;h~}^+{RQr zL@!d77hWP^*jZ{^MH{DDU+JO>LAo34O+)D;zY)(+C3l>nVzdJmtIR&NUggim`A5m~ zBT4&;6J|-%b^fBpi5WM|eg*id=8Ru-duEPkb0aEvNZc*kdlHWPYcHU!Dt9YIt^!B& zPaX`G>4pxi*x+AM)GB_g>swK4cKbfI>#|A&I!L;4+chXD(3bJ_1mGl!&raC?`F>ea zu-tu9-&LzOJPC4-Chm5g0aV;oX$=x4OR*X{IZ*fhkP-9DZ%h^#xynzuPLZXwMB4Zgq zW%YAC|E}o`=hK-@>oFMRax2WkWBdYZ1+%HM`L((XR}Dp5?+$t$??hec=)*G+imfX4 zmR8+p@(4Ig1U3^f8IAVd>%8@3v9Hq}pBaNef0rmrioculO{e zq`G5i8Mpt6p=A0=WWsjl9G}!mRvJrQn?J;&PhmWDQBNr=Hyc7I=>Zzr99LL8k$9sI zBk4(rZw1ZT@?ASWCxrMEnMcY#@e!%Uqhos z%A&D%i1dm5!-(^1OJsujGmj4Wk9)KBchrt0zN_S;9lE4DErelPk;zMip*36uvCK+AFNFe^((TL%<91 zHwzy)$K^r+I#VDymp9Rl;7x0pI>3H}P%%=p%v)*GAllD6^)_-Zdn<&83UV1ohLS|} zRB^X=jhg6{lZ`aq=}j2DXqB0&+B_W&JNJ6s7YZ&p7IS~EBUr6kPTZUB_NAS?SzO-m z`I;LSRH-|bAPRnnU;4;(&1sqt=RREipf4AeS-ph1EImMbSZG>#92Yu6UK5V|-6!ww z!p;f0J{)!=k)=FhhQnMTx=bBq4 z3W?yN+Bvo+eCdOA>DOGou%*TwGh zjaWa8sE7rr;iqaIpgKVIn$8~LACZhwIv=gucEju~HuVGE+$SU+`i06uyqKjC2L8wh zrwi2k%k5H#+bT02yFKTsH(?rcssQMj$4Qo0Ai z<5XUzBRVsIRl!inG2ZRupTD)X35fW4h=;l5OPvA861|#i6_KxxIrq` zoH2>|hn+t=oXKF=+Ebk9?WTDCzXp8VC^gi4%q;bhbB)U~hE@zRvw zTcnrF0}F+>NePpy=ORNL;8<;tI&-Aj{5`3^Ab^&LAz{h+WC+=K#pEg1(I^SAagp z*c%ydI8dH4wEU84cL+Rm=WEJ#P!XSP-^>W#eU;*)((2TPKYEZYL%zfKyw^q6gnhW-ej)!YtE0qa>A}Va|7>r`G&}lh zkUP$Ssoi;@LQm3d#3Wa+>ZaP8wkkt{_q+H{XM3SYgG^CFQ+S*(-d57BMfnq2dI>tcO5#YoD=9Img`Fg@|EcT>0Iq(Z_5V& zVsL6y?IK=Jx9xVu?~p2)p~d0o$y9rdBXFs!yD%=S7CA;MsgpN!jQdao9wHD}F;l{+ z=^r-@_1!Ujc(Rj+zsaz+O8)Q;j%{1b1a@u!Ur>2yZG^-30y#FzV!8X;XvjrXaas;C zW3wB4!)XY9ZZI7uIV%}E2D!5l$Rw=Pae-w!F&T~oLPiC;Xh$=cFt}YYoYSB^uL@7N zru=F?2S*A*1kFuVq8a7#C129Mh8o}y$9g!O;3teG%GO;*+{UsYJn)obY6y>l?yeV5 zH;^%m8(Mdb4s2rFKqiXIyPp$R^j5IgQUq|qvZPbCu1jkh91eZ$93 zn7E^c4x0s@w78*Jy)euLH&G|)bdwN=p0WteTVX*b?q8W$m}e^q%ZtsHgSir1>m3X% zoZZ?4s2k{8k~c$BcX?}Gmvd~-B^@2;#(4zfpsV6NR1}L4gp{y-e$RH4NO~R#PPD~% zu{;KTzw$mV76iNJR0aNG?poOb!|qU4r-+|RTKbmj=4y->4h&F|R)J@F6OTYce7qX- z+QPNM>#)~0QuEUrZqNYfc;-~%3P!9!ht@(ENL$aI;Gz*DwaP4AG#Wr-txF6;j#kT+ zXE^DT7IZrmZolsKBQ-nAzTGsgx%aK`21t`U%KnM*kU2J|xP1*lju-EBD@s3j4?u*m zA|2dti}{Vs;Qvc+HT-4j{*)3y(DNUn2sgyM2^uVkdDzc#UB(>0?>}IV%zr>GxcvDi4laJPj_m$VQp|D&w_n>qmUC4?adz!rpiQ)k9 z$3O$w@KBp?V&7b!`41!^WWQyNerejT$ukxB(#!u8l;-SUt%60T1O>rXNoI-;DH-$x1(BgAy#dA^8vs9Zzd4yuDTB{r8KbB+30a)@kUXepm0;1Wl% zC&N#^>iM<_4>7KgA7*i>u;`wZ$r^9*Q6><6ydVDjV$ly`=GZrR*YHX}+1o+_xb<`Q zbFt_QQ(kSu={QaBGqNsCRu971u_eCP&@UHuiUiYjcpEqgf3mto5s@ zGn++D@((;DM)u#^a)v}KDt=>A06T3xlpr;BA@RxXVjPnYz&X9z#_BR``u?3?Y?SRYZEmA?^ovd1?3q71Bi?j~T z^wW;Qo0ot+V(^4mHNn*5^><}KEdRNsSjoyt?+AkY_thyG@(4|p(gT02CjoIvXt^*G zPt2n98t7`$%r#9~ZHHlCm%H)pD$j2ye++$ASAgL**KVyUGBo}Po_figwIn{Epb)aH z$V2IWU3qm4Y7=`*Qx}5%+BYp@yys7_2Ges}&E2*}&NOY5g)Ypvzww@m{(VTDqCm!I z;!g13^Eh~l1{pw&Tr_VOf2>%!v7jLmgkCL;snP0PxaLv^>W3=LxDv7njiTA zP1R_cBiE~|3SO9?WSvf2?0LT1D7+`yW-#MHx*VQ%?uir(oGaOQN-7kW*(G77Gv^T@ zU>(?yu8lPF^J?(zzap~Ri9*1@phl4XsE+|b&OHs&3w#yOu!CI=_vM7(Iw#k+bA}x7 zCsBbJjN0fPjPoV6Tpi*=4B;vTUd5j8M0q-Vb}*gxOnIYp}+ zO`7uutY${U&10``EkmT}@QN)v_!Db3lsC-qZJ?hEErUypt15m&GB;K2A`8!o`JB{?ap@45jIfvP+x95> zSLlJ1B$$o_KQBUJ0P>Wb`RNtV%oLeJa&Wp8~XL} z;8!g}Pd*`Xn38d@#|BfSTw4>rgSEo^$X1-i7-ry@nRZ*aEmG=C81u+`U7LHLTJ$}@ z=QeIDlrekxp(Wxo@UhdTcv5*)a7AX`_)&dnYGj&TmVX}?Lh)!DK+^s~v7^bi zQWFQvu6zo0c=lQ4g?x90IP)}clDlJv>0T6_Mk6d#hsGCHUCpl=b*Z0BmxGqXT6YS* zZdfIjuYcoEXKgN2$pl@#WgsbPLmK<&92f6p$4^4f8E)k+p@65|>R+=6drE?mCD&g$ zO`v*IT|9c2kbQ|4DqlNWA1@u`c}#!gXY*~+lw3gX?+Af;^pVkJ1`bSZ)tlWM4<>W= zx_iI@AeHTD&!_`4J((vW-N9##UAW$A{}K<&6;Z-&=t(qmNPt*gqi2;~3m7mwbGtrG zzvh_&omB!2h9Oja<&{hGWwY8`X$b1|B7a}z;?r1K5_PTuurb&2eoEMZ6Bl`Q%{|t@ z;o;=A`=j6;^&2a*$@yz<=14*_+vnaMFlgZl2I&~3{Fiw!k@yA;QMvtMA0@aNtO<{V z`lICe0>*}u!PXZH1BSOTe>&doK{Vka>3seQu*Y5eT9c|+vwtniadS+2qVZmsZibq2 zE1^Th!om=S$9v=Xcp9;9++KbF4HJCM=w^TT9cG*I$IkWvDOKkvK` z2%2RVBy`^~Zf6niwsKwNMx}{|u=S?FLt^*^?8QDx zZn5Hy)3}|_*_|-WGzgUJ+tCd1v47ofn=eXxK_P#rDaU{XkIxc~2)V5j#Mq2G&I>6T zxlEbv)N}HrZXu4i9fzcChmTzhzkccW(aEaAq7SV#_hj$fEs-!B-EAM{EA{p~Gas}* zt{q!CW1$@i$Swh1bXdT+n8-i%UO+EVUjoeA(ru4}TW*Ki2();MbR|@-Y`}5SH9n+k z-@e~&?F!mp)j>E~pM3)c(dDn>PqTnH>w)B-kol&R$t0tkju`Cz*%-Dn?fDMwylVhY zj=&r&J*R?fB|FgEo1>6>*=KmT`LPN&{Y^^pa`oaR_*Ml82X|lRmVnrvN7Y8Q>eZ!KhKIIA_*-!HZK5aT6!eH z4mYjOm$h69>DD8n-_;0ST_B68sUT7+BH&@x8;RAcE@@O7`tFyCa(_8Y<%(NiCZP;D z?|K>T<%6xZQ-<$f0eEzEf$U>$i2yb#=LJNUgaNO4eVe?qP-ZrELEtv*kJ(CX7UteM zKAo@bL+=OQKNT`k)^_4$yTBwHw(Tvb%1W9Iy4Hc6B6U08>&YFM_vNuw z!NY`5a0Vp#Rrw@nlaw%EIs1aU!IR_snWxg5`>s*|Rm`Tk4Icnd07F30(Pftle7$@XNw-M+=$D4{c|iKr%_2rF38Y=+ zzpkdNMV8PfH4Y|3N5p2HgsIYYm<-pyNooYHA87*DV3$^#5u3JA$6vl`8MclHyQivO z(dSIE*5+f|k|7Ap3{K;(ZOe`5^MI~7O>L!<-23x)zM=+sN~6RoINQIbR`OlT(%zcz z8o_K(I1^=QK+yk|E|CptNNQ!u2V$@n!3g475Zb{0U;g~f+Z#El1SFLQCx)d+NPUASu`pBj6SC^yW)XR0api&@b~5Q6ktjoP7mD_Z>j9g z1swDS*0J0~ee*D|#)h`~RQCkpz@;wu>I$%YV3NTR^|DIkVga^38lZR}Fqv1#S>nKW zr3npS?fe|e0bk?lG9J1UJkXd^K*h%&Ee^;~vgadx(n)OVrRn%(A! z)j6>k{^C|Oij%*6S*gCFGP2htCAOQs08@izjteRpR$`eU=o)4&Hgqf^S+l?>WZNXe zEjsERo$?o}U+<=WX{9Ij5S+RfOQ`R$iFss9lUf0?=Vt62czT2N1G~>g&8@k#jLH|t zAq;nfN;s7yjmj4U?ev!dxC?%k7{nBh*{S5*z_7r{LphP!?r73K6}J3= zOM$Ol_vovjRJml`3QoYZW4#nlIX|(vTiojnPz%Nhvp#*oi8JE{yfNx5`id=73Z!*d zp{z&?(Dnfv{GL!r@>(g&(f%qg+6|KjRfNCLu9pI=m2GGT_Hy!*F~|OCN7d6ZbMt@6_(LyQf?LTcm?M?6j>v zj3oc#M{AIz83O+o!5>kiab%SqGajR5IsLa@@#)8waWBB{_)Z6dLrI*ur$j$HQ{T*p zAC3!dfSIaFv@x?VI%uVmUt+8yToT&qf$F~Wy?YB096xvCBT{Z&qrJ`&m27@Ko~+#P zGpH*Kv?1)4+O>>0C887i@WO5k^i6-CL&#JWYkyV|o*UkC6KXC3W!Ho5FqQ1{Sc6X6 zKF|E-DU8YM%-1VhUwfJg!=LDdlx(LR$&hcY++e)uF?!O^ANWd!phGo|t0<4O`{6IR zn#f1dDBc{H9UKpA8bd1NsmkliRbu5(HyLrE0lTsKF#}C#G6APR+p%ebGs$yj{i+Kw z^<-l>w|BP7fiwNxUFb2vk`xya1hHqnHO6bi5wYk`K@~?UiTzp{i->e>wp^WSF67qC-#4PN+ZuWjr?es~33GguUM$c1n zeW$3o#3F%0Fi@mAwv0-}!@Qwo{SF1|W1WSuTlcL)p91O^3s%TD&lfp`2?UUgcQCx(<{*k7A^03lL*Js=@u!BL!`#_|(l zOBx-zi6!fFSTz9Fx`SqrI`7G^?B&_-d!ue?)LPvpdWdtqLE-V*HQgf!AWEq|>I`Mq zC}VbIKrTP!w}PMkSJ_-L52#LA{_lDY!qLw#`7nOS^alMG5e4c*TN@cL(T0J}1xr3HjA1bt1Lxi?E0cI+ zMpbQJkBEF?npW2NCD>a259KZLb9A>Xtf~flR55wSqO|IJ86iJ?&)bqUiZ}8rjNcwc zWrp$6o;l$7LmSn*&rmy9kxtE<}X%)*B>P4O08 zo6RDTx@sC?H==p^RD2(|n9??pM%Go|+!YOefO#qVFn=uFBE$+Rd@KU1! zAoX;KSM%qV_TLC47hBR=foAKDcavz0TYl6OXUu&iTxv;z1CJ(<-&(6m3yy74(j&r1Pa5e>E# zyN2%6p$^>OgtH_a*(F&a_K)jk#@XPhSS_q%s|IMMOXEwossNwD*j`8m%^Yp1^515H zB2P}@rbK&mVs|xtOMTMz+M;*$@F|XHk=filzXNj2pXP)6m1z-WgeqTPD9XD~N_Njr zXK|7Fz~)fQXg{=H1Zm!6`PN}^kr8mk$t^#}9*glvGa_UzREj#T8ASa>b^u(z>sD5C z6<)qp0L(Cxr--iHO=~OR1g{Too3d9-6e~{1(SO^>HGVVIWDYKfS*Hh(mLT^F)qA=; zM&{RJ25~{ueQ3cat>&lCI8}Z2frl3KtH-wVG(+-M#h-yrrq9pv%~=cZ<=^A4cqh|< z{lvPz(Brp0yG)=tb!%b`WLYU_g^DRCdnFFckvLs+3O-yIc?jcjaYMe$(wNHiEAv2zF)~ zt*Atm8U-KmBANg|oJ{z1g2aq2{M94}dLzEk`beQOE;V5i7kcV(#d9#@(yWl`i8`c2 z>Ng(OgFGAdM^5+J_{|b#>gHVs*&Aqxq(C*zlhjAQqg_e%7m1GAUh^q;dsXWNnkoOI z-JEL9&y_Mm2Tfyg1~@mHgB(v`SAwYK@$n2#RWLJZ)uZAD@_QzMR!h?mkhgF{9Z-RN z*_}*U^Mu^qiR13q8u^`LRWYat_09_mu9#IifXFcfuC|ajSBMgBo=Es}UHkTz84d32 zsCkGBl0d6lE}~t)vFRyfXl4t|9yw~P)|jrdp}rt;+C^M7Erg^N+_pdb>Nml3(vmK- z0U^z958E~RlK8rCf$QdMjPo|*!u9cJ`fb6U);Zt~kZo|eckU)qPpO0B`_Fp+QB>&O zTuSi_FJ*I~8I~kj7^~nEdM)*x&Eu39aSd!L^TJJmQ30|=dt-?adxbf;geuEW?`DJ} zUrP~qw%D5@`ADu-NumfA_?nm&qvIh+s@s!CLL57cJt-jt@H&p8$YvNa*5YsCBD7aj z&2E2uDag*+L{#A14IvpHfC47I4E0Vu%I|TKGBsTFh#KKI zcG=^}pLSkW-gK}DwxuzL40s>l1KQFtH55?#k{v^Pp)*vFoqH>j6p_v|xX~Mp1Jvjc z`T#VW2C z>Q}^WG{KK&I4_Z6Z3d10w8w>*7yhcN9vr3qoAvu)hLh9WZW3%fC?T@85DP-bfCSE= z5~NG(tr$=k9FfdIG%(xQGq8}MTS#yAOY@r(+1-AZCs|J7NartA!c!oZRd9mfFKLv} zb1g>7G*q9teLX)u5;Pmz;rNhCfI8j>DA%Cdal7MiNvQh$fay{(r3Uh1pW;ZZ+?Efy?A>>&^` z-=FI{MMc*1OIr#wJbnbLH}nN2b@F}qU4Gn5EUj8GZ&d3Y!Cbo-6t;Y4@>GH!M4>o?#V*m;U>OG|JcrMHb3+Z+5D^w?q2{fbd(VyaWpg~!?BuEi z4~RW^*?aO(E(N#!e7Gz<1eE>UlC~Hfk7eG;OZ<_KnFq&*)SZzg^j&Lq7ms+XM*hxN z=FM|ha3AV(oG1h_bSPtW@T6&H0@vs?OQ8n;?J|N)Ejg&mlW!NavmS}eSUCdjECRfu zm1k8BrMLq-fS2KbjkL7nP8{43pBK{Vb>O$gwpeHpXt0 zAv+Y|vT?x4V*4%u7@PIP0X80CDPf=Z zZ-?WQ9ni?eC6Lo5AXQh{(!2R9Ddr86nYSu^JWd9boZ@LATQnT|3{6W&O3X&YL)I#8 zXmihrdQbXgrAvv|ia&`x>otC!pZy-&n#4)Xab-GYB$z8G65yu`Ikr{7#B{w2LI~Im zSB%bTRrKz)1}*eJ5F@hk6JRO}j&o(`r(72$2R@2uj4ckB$-vVd^6>=>!WFTg!pisJ z-pzmHCg#KV-N~-VqKmWR6&fj0jSLx_ZCNIgt9D~NMyQIvt|@Wj{n|k&Lp^CWVa|%D zp`=yOwBmNnzc5W^WpRP|0rWKNs76(?%kWq*@9;3(>${txQ@9GtvZN!;jM~G|yKppZ znV$bzPof6IiACPe|8RP#*59T`>1jCxB8VmMEQfV}yTU_Fyg*X0vPMB{#Z=84U>W+{ z$?JqLW%@YBeHE7i&|echEiLSpjP;eHC(S<2y9tjHjF`30J`(4_2a7YEu0ES)L6Gh< zEM!pAR^SMjylD$_g@txFsYCu6MJtW~!ua**G1rYM8eHWkN8x#03Qq-|f4~mO$1+~HU$pHA?|$)FJRCGda(;mjb;8jqPqZsSt-&m3{;=pn(LXYPr4 za`ZIt#Q5Cg&kuVa|Cq^5M-sZb{Q3B-FC`2EFX5Z>L#JCMTd09`2W=<7pi=8lSaVlY zGtGT@{D;Aw?Af*MbcowmJ)`oUkVUkQ(l-m-I=rg8iblV7D&i^B=i-Phw>V4~casve zfw;r%811LUd*x&>F8X6F$Ym{Cc^hoo%sr5P1g3?_)J7ib^epvs#)gMn0 zWw$$H02MHi77f~mfV_t}z>%x=2=N~k#kMbGhZ)4nq*Fohy4KQcq01#LiqV0=(Z>Bl zb6E*=m?fS{=F1BGNe<&IdzF6bzdlqv%kqo#G;Nc?8-Q0zO3hi6H;0DSz$oK;?NvrQ zM-zlK1IU`&07Pz9EXqc&0C&)ChBQ)G#V@Fm5m-~?RtOR{W_SRALXq`oa59dGG7yu7Dw zOfLWq4t8uihSR1?si8jc%EE}UFdgufH)?BvJBsZiTd3+e#;=7R3K_#!`SL8b!4^dEgy^Y2eW8T0RFuX)`@hTPS~Yb@%O^16c$ zxCFS3@_9NR?SZfyo4w=ml7|a_MET`q&^%bkHRobDYe0$=I$?D-KE7B5E4eDJNcRG3 zt^>-xA5y%wAadyjA9t}@GtRd$64gP5q2M|f5vqSa<@SkacFXkWq`no))KcZM2+(?Z zKhas)!9yKC7^S6k^Q$2!?MUQj+Ruzrzjm=t`i2idFa`b-ov@}duC&kp7@*iGF}=mV z>k^-DMFkm4lFdbtm1ITjtPP7jqO!Rq0E{{xo`@bjz))2kWslC6mRg|UZ#`Hu?Uv{h1K*WSspdC<~f=rcYM=;E_?Q7R^>*^+6_cfQMzYn0;x} z6tm97V!IU9IjY-rR+wa^r&69TLa_WJc8aK>rsm`+<%8)g!;DEDWF2q0Zx%KHlU9yUW^3t{<&b>e)(RSy z!j4!1V@|+2t5s!vL8^VvK$s+RdTPgA_N+Ti@1+r11KIcBzB8RgmQ; z<)#}~#`+7k5^_ogLkoFlvz!nN``_Ir#kN?ASO7Uf1=Dn0zMmPxDCROF37*aQ^6iIr zoRdNZ$@GyBqJdr#I0yl7mmH`Rf=%NTt+^d1>0X#?c*3S~k#3X84GAHqh#%?!on2zp zM04~{uPs>**J>t1=<}L(VvgP@iQWcyD?E#h&m4-3{K6&#&Q`)>6@&7l;?{ zemP8KZF?hgq0B(K-|>Qpef9@heS8hWb-|ZXGf%-uLpQ@omzc@# z%#%U`B}aJ)=GOJA#N|C>jDxTW{#4j|xyr$`pG>voEEtXIISHLENR4bkB-1-fme>WR zH#sh5{C|%>rKc4k3MvVJ5fOw!B(}q&*gj1ZKh+FvUK0maa+h$Yh40N5EQfgc$-6m) zQ^SIRc8ZO@GJF5eo2SC}BC`#8pxwMQ=&N^8#Zya!oQ4{kkcQvq3bJ(y@cn#*#evrr zt-s`U2TqyF6Fd0dFnco;jV5NC(%4qhU z8#_XWPxTH}3a*~BP!fiIu;|E$pN;V`pr7vAewzx=SJj$L>T z*LbQix3~)&KA0?$orpZ)12*4WZ@!(8DMtcMZIFR~EvcV?i5-X__B|vM`|6$dCk-jU@fRVcxeY3emi80z4bZ?IuM# zkw&3b9n%%ECjPPY(&akHa^i1=iSd1y-LHam71rvaOUMF7gQla7$OHrnAIF^>0#jEC zx4*KW$q0CDP@kPo6c4Hj z4vWjy11z&vkYQWvEZfjDBp&mDb;+FxqmmvtduaK8D`d0t6BUQ)t|#it*-`w9ohwu;yk(NlL9>AbCsM3JTV+y?wFm4=O~?l-C1xFnKA48$<7|np_j38MmH~~P{hE-8z;+iNc6Fm9g%xoFkrh22-c7qt<4?BWHt0vO8h+b_eXV{D5oz;Y1X@|z{)tPi#%Zds3ki1!B|vh8GIPS^1^a3))*) z)dQMUi$`qT$)+qaZR7`+wqi)WwnS(5d`3z^D$p}7Ebc#@IwNUnc=i|H>(^Ly@eLlS zlANNEo1)o7{VTHgbN&T?q-tG~`Q1jqEQk!G?$u8nv5K3IOs)c$N+fJdL>_DG(b&+3 zu#B&?R^nt5l{Hl-+m%M?Vs)rfV?QGkC?F1dJ;HlUAzp^J za}FNdEv70E))&>8Aza_=&iHDlJ`mSo#JPVnf^`ZhMt&xTSd{$%SJEhq1!~cpuEoJA z^WJ!4H@p{fCiabB4Xje5HCL26`)E*X=So~z=;DRFv*0#o1;S99e2Lm%M8|(w7%pd4 zFo!ORjtqS4V7`k(aL}zRcIusT2l&2mj_)TQ$^t>TdoGMDva8woMq@j^{+94_8dKvf zkn>MC$-MlZ+FKcPq zXl{xB)}_fjv5U;nLj+Rg5kA8`ims8kUp@%*t_ob&-UW-i9Q2!LcbLW?y&2%}dYPT@ zrz}sp+|ZzyoYak{F7@&~d6mc=qguuC=W+W*T^D*3Y-}&Gw$b2BR07MR3Lf&uCZc!c z(0@%#W?j;2)LJ@)T6sFc#y^LswS38-k0IyCels7tpp`@I9^y<`{&jcx$3{t!*j40CGB}^nW$!j}BnRU2h}FJGVT4?k zjUDssB=1h<({yYgCZ2Bs-xFU&7BIf-Ot!Z#5Rm3m`AxwTMYF7Rp-`W%o59WLX1i!k zB^G~NUIw3SI3CtQju|J|-CDgYX6mEHkq%Lc>CVYiD3~td&1AJc6hA$Kd0Zian)M6| z{XFP9bGge;wG@PERe>4x>qH89q<XT=FB*2bRY?fl?B$23>DlJ9$)%M>$_84v1@>KG{IGPDAR@lhzWE`gh1ESg~DwfU_ z=AMA`vnYrQQ?y_hqoJ*ehj~F!WLA^7PTV?EK18luh27hk85MgiP`#V5ypLaE^iZL& zRD7_H3&*9_?ljB~1p|`C#uRXJ{uBYHyW;%6eAr&@qW%a7ElZ73Z=^pP@QbUs{F$8K z-!8I0dI;1h%jH@X*4}If`-+%)haLi2)#kS^H$=nZJAMvxOURBScVvc%N-0DU9;3W+ z0U=qyELUliQKa#QB6rzX&-uGD{Br^#mv|7Yp`{R%hf(1gBu+>4Uis70a{p)3PwYV@ z=!tfX9yTenN4@f^lU@JOmA^BAvB%T`pf9hO5j$r$!$n*k8Z-NM@S2!>{>}WPc8W)4XAEbQ@uEP=;g)3m zp8m{pV^s!%cy<2l_$CXJK%z1xx{uhl-N-F>@-UTJHJaT30Es~%gvIXi4tkr*maK_B zV?9F-B^8yLD36av-=nr(ND7j-VG*MBq2{P!En(nBrdIa&SI^t)^QZXrHKr13#65zH z8PeTSn@wy#Z8B`eu1TPYt-c0$FQ>!07_Gni(>-&8Erd81qjVw--zIx$A<1&O@{ zHVRyF49ipE3G3 zif%E$6Rx+;WpFj$whgzwPvQhZF*ET_1&9v8uT(mu$Yk%8ix9a-$q~u`gVSKx3Aew! zEVu?&ed4=|p!FXSHjo!m9U(G5D7|w;eYylq&hXRDLsPoUH@KVjyQb_L`$s%oLq4`Dl+ zu-E9lY=GhJ2U!rm=cffKeGV1n$mYi7o+wmO%Y~Q*Z}~HKw-&~~fcJ|W^(0OMWOr2n zOIFg#5Cb1XD~}_AzzI<9bdGXoNLFRgTB`4P*-`FPFqpd;CuwK2_JNb|)Cybd)L)O* z1n#)>@BwdT6Qyd}q0dk#PB+I2CcbdS!*u*mP6MTS!Wd6N-^M5<`T?L(7P(IZxiX!d zmxQlIe)2JV1RfU<0Pq)ye-25A4xtbw9Urd=B=?saWe4LuXb8mZl&m4!>5xz+p~i)( z=@pLkbc!ioLMT@ZhWx7d4B8-fN?i0}fHXUd&WLDUa_uotdF{#G-(;$6RTNe2=2scV zr43(=AS-o0PMwgtzxbM&%2|IO5JH&cv4{<9-%X`Q(L&$(Jv0D2__HrH&j# zul{7GQZKL{Xc@h27d_VCeAY7RM)+Hi0ythI3s)RWl&y3uvi2dnrO&Fb*}_=sw`x7k zTQdE&qSB=r(Tfui{c+f7$*L`ew0A{!O{=#*;BsxD8BOqFuf2)iQ3vW8yP2|R>h2;8 zJhvL;?1gFe8)Z6P9dh@ufpxhIAWbx_jVipf`A=2xPhezWGYfSoRFcH&%!~X;p#tZ0 zG#2ddf^1g7$+Z2;4@nQb4VHX$|B$$jh)LlBE^`z1XyLp$bamdwwXiJGUR`H7i&T?Up$B3vjMhk)x3$1bFBtfaZC-}&I0ta{Y#7=W4W&C^APfGEmaO)r z^`lhJu_$y#W!TFt&7&NdmTbaw`!b3VdS5Ff8EW^zXW;pjv!O&pqm;2U+76R^?q&BX z>Vm1m7(!K2{7@|V;R(g;sfXqDCw|dqfmSzc%&s=Ha?RG{Ha2{5OY%ojXSP$&NT$+BBM{FJOJZe?>gqEM0?H`Cb_!d)piHd|yu!K*cNFV|9VT`|A7||0lcoSRx`##o(0O@7oBdN5*U- zqH*W~5>z#aNem()xAj}8j5nodQU^5+bw9n$r=zH%k{e;y{e zoHWLz0JRufh14fq@nIg7XJ14GXpD(Gc@s_!`vDhAdefQloV&>by$6 zCX6>4BweV|8Z**EN|k>zo+faW>?jjeC-|n`ecMA&!1u#M*w{RpasjkbSryW}BtnhE zOSRGcdExUcx5-sL@PUI<(mGnk>eeFabin%&CF#05PJaXoCC1D43a+4;k=reRwSlZ)4!u^Ljo0UL${||Iky1rH^<&JQENE3Kf`=h4ES% zQQmoj<^X~isES#_VPDQZ#%as#0PfFRQryqo;j4LU$z@d`SI?lN3}2S9xlW?0M_SAq zC2bmG4jnNGF{L;C5$yT=39aY$$tU5UR~@1QIwX~c-d#sqOY#yqo|bcGD43wkRi&Vs za@A7)i+Kz2dM(ck5U?JzrJb=Cnyy$fvw3&EeAca$gZdN*3rWz?=OMpw$7fe~dF>~- z^oan(eIH5@0ZeqHe>#X2n--^B&EGJ2@8)YvBL78?ul1n%`cRAO{=UzetsC-;-a*}C zMat_HS%%e?wEph`2jujMnvNSwio9J850%W*^xkgaBH>Ep35ua}d-OK|&&`v_CTQ|c zCXv{QY}^IIx&flqj}71@nh<~w?WfR|gk3*E0Ryd!>>j(22*7tNh26P%Qekkx>}@U; z=i*|1!9oNMox;MlQx)lOQBNJ&m{W=m?N&r1?S560)tIFUygbgvyl9WY8v-UgcmHpl zP*i-AS5lB`{oo-UZ7$)vaNe{Ofmar8xgWR_N=EJH$$QpQ145iU;=$Y-ZGXj_}x0aUspFP z2$p9JqvVtKVK`UA4MaVf{j_p-Z#fTXeQVqLPlu|}lcYP6(Jc1fnal8mjS7_L#1|X4ju7OhC&Dh&<_v`8# z3Ub)LO_rLdjVQ6Mf`j|po>5qTpHSI#wKhri1098wmpXY`yaN_PM7475k)5KPSQI`uW_4|@gak@ zlcF4=I@!N)`y%@cPXrxPRv{>X0Dt1OwC1ivqVNmZoB%$odO=n;v#xu{+?JNh<6UQvwYN=j9Z(zpCcs zZYl)_25W^0iD_piigQ}rLDdDCgT}$f!vfG`}t3)06w0_mcd?-eC+yLyi zkPN7NRlsYJ5htV=!2GVrhd0wfwyw0pQZ6GCk+mRi17+E!G9Y~F*o)?k*}MmUd42Ds zEvo_HSmh}uomc={1~nC0A_$dcyXj~u$0q5OC!(1X#(Y9pWA{x>_f<@Qi;IV(r>tFZ z=!oe=Le?5F`;-6szU`EW^1b#ex=3$joamoCpGEF^MJOshll~Qr-5XJ3RZ+s8_WiNy zwKBG!7=x!;H{Fu9rG>SX!dCY4r)f=|_Y4n9B>_dElLN#Pa*8DnSG!dJiJHC8n=IR( z2E9W}z@Z~IG3K1>6HOIdPCu{tC8e9yz<4fN;uxI#haYH~cpMM; z2&8TxAy+t;0!+R%t2+?#R7~MWbIDa0N}y!)<$B~YFtzZ3H+!^bKbBaPT4zv84|PQ= zI>v@SPFB@{^F*?TXp02(OOLzQn0W}?algIdPZ;HIHMsa`FR(J;r89K<h0-2nletkR1 zlw~5|Ixf7hvM&9qbUefPj*5z0?m@Y_l7C`ra8@D|v$bHdLeq`*di&C|rJX;;)uR&g z^WNH0t7uNT%uzsG4C(g1+Yz?9{gmSKZw3_fK%AA$LoOiH+YJWVh0-Z0yLP9%r-13i z>{#|+{O04OQ(Ag3yK)%a$RXp-3*0p-YPmT1URX`QveDt|H^JESDqw~xSov*8UCH!? zoEy0hBFywct?5Vm4Z3~mku9YLNbaMmRSME^OYobNW5*+w7}3aU&6Y+$8HydAe1o?z zB@)PpIBRfqi*+6f7qD?ko{92i^VkQK zvG~jBwTT54eC{j8jxL4aK_a;!$j7I&$nFy#Z!6anEMFHupoYK#ZwQSkgsZ!70}v$8 z9_7vHP!=J^b>^l~78aQ<7eqn2VYjirI({!5Gou?`1_$;>k}dCvW%-PWVmuOV5{y~D z&uMOFGYzlO=LD0|%STENAW@v@RJT=VDkxn2YrzVpN?VVdn2r;KejTD=X1cx`@d8c2 z@Na7u6b3yCv9V?)`Rs>px2|`QJ{M{>74rdaYC#KWbV|0`@OOuiQ3%t_w|!`NGfrbe zi@So@1T+F61U{f@ff#HI)fTDv z7_%2<*hx^9Yy|d|q`<_b#oyNYb>RFzaD3&XRpWEAK^GNw;+TQF?#$%nhGp=ljy`0{t@TV~qC}}Z=(LOD0&C}zl=0h)m2dccq&OY;k<{10~K|{u@=FhJN z4(NU@8V!ECe7o^_Y+Rv6>ug5qb*`YeLMbbW4L8FUlPPi@E03zczc>aeCV`gaqJP19 zh{cd#N5<>$;q4F+u!G1%S_Ck8TtdBR@qqw@ zrzLW)t+84;p3sc%1|3pF_`V;%;$?iNgg&ZdqDuC|M4*`@R(NCHG5e;9ctLYM?|E@D zVL+ilsmd#E2KJUz*~zM9gf8xXT0xHIzws|a%WQNh*~4K3|5tj8P4(A0Wm2%NmJPbW zi}BYWvbut|n&_gMfo*yfXr^W?vtlx3X;Xo0L4mBkv9q1%>_AaO)x+>)Qa!o%*PBN0 zrj89LzH+x;I2CpoJLKuk&yp!9R3TRIan2Gfnu?XBJG&PmrgbTKiSi71 zaUGk4HyyU{Uy8-9f6(54%*H2KXs@1P6_%o!rK8xdVNf(3Ad~BfzBwBHEnYx<_^G0e z!p_^le>rcVdK^o$kNH72R34>7LTR#>m&aeoMZe-~|F|cgqT=mW zSGX0>RU(u*YxRld5e5CK+7>K;N;ao2?laei)O_?#go)uxGQirsnsg>k)zLf7CMwj0m zSxKF=T*uNvgq~jajH@b3wh(ZU(;5&=c)51}qYhQJxf5blc&$l_v~nE_HtgsVX{2a2 zCQeB|I8ZyHQPZDxNT@z|NY&E`#^&5%`~?<_(PEbVr5})L$|D>oai=)p(x$?Wu5Zhe z&|hyqa_2%bIj+#}J)qDj7u2pKxvQ5St@@5tLx|pCmI5wDuTc2o;ppzd)=8u?)|htL z!Kd0K$WW4ZS?ut!&~I2fa*BG#+si`Gz0^o#R$PCo5d@&`CkTFCzJQ7K_&oAw@M*wH z9k2cNWD02W2Q47{Y(=2V&uJ5B3>yeqK~30z;^A}coHUJL2C0f2po&(IV2y~fn+qAKtYiQU{U| z49(g8K|E7?wWy2RdEVfA`3H+(VGZ5)ranw8TjW8SId*7Xu3eM{W@C;`281f(wICA6aSRqZM_8B_veXDK(n+e9VS#c^|s=6QZdFMna(|> zIlX9Y`}d0XhnK(u2JfUqNbOUrL<1OH9UYE4_v1n+spJ)dOGYmxoLsS8_TF$~je*## zdy-%_qzh2pXifm6RhL>J+93G?zAg^uQMzV6tShc{3YM)Ggx}sj^-;V|Tnz_aginZV zht=*)gLviYq!KVapo*El>O!6SLAnfValng2vIn_cqCo95r(l!|`t+OQA%CvXb4YYS zv5hOu$${I#?4dWDSkjtbon#k@u8{yu9wZjNY!GHtDZ3(9H_qCWT6T`SAtaa;K0Wo%{^Mgu*(f<&20oamw$SA?t|m^s6Xe z1h*7E<+ZKkO^yRO-0Q9llVbXr{Syl=0cyL49-%Dq)UChFt^2|+-=pc`$F{yUJbPS` zu#RH!tmQm=`X$AzVi097EaG36pvt;gDJW-4tb|IVM4CZGO3on1zD6~<-93P|P>u>( z8^rPE)CtFfe(}$B+ahPk&miPo!I#-tl>vP0sImJUdhdvUs9SJB2b@XGcfZ}c)wt&{ zs;roU>z7c|joRDI_ObfwndcvKucB5ZP7LDJBfuB%$;xxiR|J$03Z1kpzz* zU5uNF32zuI4P&X9YJV-6l&s>?w6MjTwAmyDzmQmply}k+j?PTkQ%lxt48SCAyU;+) z$+hII$NtqYOA~E;3LgdXtQrc%UkH9_*;tSBw6;p`P>jIdlO;-!`5cWv-dSSpx-!9_ z#d>VOHha`>ZsB{C$bBq+UMF9N&w9*5dOnMP_M$j~^r|Vp#w$`06Q!@Y+TAAhxvwYuVpVOZS6U0z zg)b9}1l#iXhQu+&0%y1cN#{d%FlVLFA32c&NBS#lAAF4Ck|eeM>Mn2eePa3hoM~Qf z7L=tOi9u`1O7F3ZnZ>1r+tqR;GDVC6__0&GKbB|So4Zwu*KS!*xazd~eB)cVy>uYQ z_c}b$2cjj|?yrioh|}=hZyBy_dm(H`k&uNHR-6CJcAw^9euRWHNP%JrU;uNe@_k99 z#j;D)&GkTb9T0+2ufII#?2ljSm3k!lzhdW*9or(SVi-UEPL7YbE?k1#iArG?{3~cCAc7=gr1^;3U}y<3?aDcZ#?(IPXB6syW$N=mD%ieSaM>ll0zqAWruU zPkil(_KfSkl|}lX4Vi;nzmJus@X^Xx}y#+*K(j0-uJ zWhz5JL&#w5Su%(m9|%VuX>qqUm%4JtuJ89afq(U6;0S7ufQ$^A*8x4iV=GF%R30}P zQv?61w|kOP&?sx!JtR98p17afWgK3XeX?YOr8?weN7@nH2*B3_`nk%lm1BcH?=&Bq ze_ORTrD-e$7Y?qKL$28%X5rCE7TjlhW<$3pHzv(I705 zCPBL8cnanAXcE@)dp&vkB&0n!CT&R$toNWCLvu(D_yS4Vj{bBS^g=g*{Q7UryKiVM z;`3{`_p3O8Y00wn%E$V6^Pg-bFd{_bHpmjPhtpux3L7j0myzP*#`3W%k3qIH|M%G& z0z>@2uZMvzn1%Y5Xx{3toAu#hy@_5vaTrwe5#6$o`9;yk9&A+*53@=@_!`5oR|lC2 z6?R^D(pU_hZKOIF@Rim8>M5ua$=2+rj9lzazmXB?YexRc@ zcWP=xBUCxYc=$n%?-F(@?-@L_KQ9sB^3ij!u(sH=0BDohJ~dyG9~|c;eNU2u8AG3B zWbgGyS&33n;D{E&AB>KNf1K;Oxv7*RN9$svKk$}p7jsY%a!Y`;;|{Wx1tkUXzzPI> zSLCX?TG0S)C$19j@u{k;Jbe^28#mUg_Wd0p=5gaR>>3p^$d$ICOB0PX`@*DLds($Z z_0V7-4*$bkwBCoc%iaUmly~E%$8+fjsiChlSiz=@yr4cDx%4E-(OKL ztCD&yU8{4Z{~Vh!0e{3W!S|;DGQ)pR?)`pnWC*uQ0rnDQID}jM23bB7ajGZ}ojh~z zFUzav5-LDoECNCHWKS;(Y+m<0oT~cSg=^kJ)Nj)#mB$=5N-I+iL;BDI7f7!mdL5!Z zL^e}|(9vP0OZ<$yE*W5)Gx@+l3+efx+V{deyuHb{&6VR{bC_j$$*sdjZWGk~=MwIL zTwpR=Jh7_T4xHUKD_lSg=6Cb2WdBg9py&PQ8qV}~Na?Tl%q!!#3_Bs<3E z>Qn6$=+rcee&e=E&OpEGd4V;r-Da%^-O`%zotdKN{%*f|T1JG9QkO#t)*jY_btoUA z1{Vo~rsX#kIz#Q`LC#EIfJBfp{ij?;Bcd_B{1<$6dH><&WIWV#4{lQrs$R$k(scqb zm+YlqyTe}tw0IdK#&~K$?PmN$a2q6!Q%sx0*(vOX6inTXT7oN}+|j_6RWL|0^M@I0 zdA-1U$QBH4D4QvZi;@m{n%(AWI$UU;Ta7s)18LDx4}c9!?Pw)b3L5XIJQn`20xBxwB z;9mXxxU)GeD)_l)=rrsPBpSPvwS)JMJZUN^uqAGQIF?j&y|k#Cx70vL&;O$y&1bBh zMX0r1W^KSvi4T1Qia0J(;CNAQav$B`La)C)#SC0Xc+fkL;r9+t^H>q5Ry)3z&`~{A z7Cb8^%hVdSu7iqwvo(vk{ho2?Y5{y14K|4e@F;9^&Id6(q!q;s+*hG+&Xn^ZuLPMg z-&N`rz!DggN)yJ07Z$Iq15Xv`*klV6UkpllLTFd(jyaQf92|R*{4l}DkQ-RIEi zw>67v#j)51Rd4_{3s;xL-5dUC2TOnqxsS@jHp?M6TjWC(YmgQ$Kq&9^#*?Z44xGQUTMO4}wWM?nE%F@V+zy)v21H zR{9Fc!MVf%pguS@_tT*r5H#0YDq+&JOk!ocF+Ibb%cMafJC!vv}bkZwK0ksn55xf(m zj#cu~YtCf*vmm--BMEBQXvv%5{|uZD@icZelZXBuW^qX(8V&4OJ(loO7sx61N;-W= zATgzZxZ~h9S1o5OBAooPz(~DY+(M*i97*>n3xDfb3by?;%g1RqEVTg-F2gZ<%y%{d zd?9PzjXY&<>$fY|LJlHpp2+6--i0qFL6kaK7kpimNa)pvBw+ zkv=5%@kE^Y0t{DgT9%MmGC3iCAy$^;Il7z~#u>O$pjiV6#O3w{f~}2Kquma$v;3_ps0aTEK#mQcF19bCcj+%dZYrL4j5n% z-=HLjUv6<||6-N^0^hs>R*Q6$K5(A4$?C@Mm3PvOvF>E`$+5@$QhrV);e(BYOPaBxMAh zTxgi!MY9^ZW8Y;4rpD=t-N9HUbV_6;1lt33YkodVJ0Uy8>oNO2)7D>ulRi*fk#PZI z4c#dc1asVKucS}qpc%@AiX)z^#u`(e>fn*Zi>BoIeA_=>_$k47RFRG@hI!(8bjs3g z(p8=Eii#&A;TGNrz*T9**=&tqVJ25Kj3gIv{ufce)ORN*K1|bY0-l#XvW0Fj5FMZh zJ#h=H{%2-papc1VL_YkDZw=4|^+dChs0P$dM; zmJ@Ft5=b585T|@DPyKnb2di?o* zM^UxqY(o11{#-`(s35At`tPGQg1c4KzaVh48Wl#O^f^e7+hq5aXig&l{rcXU#WOjJ zS;PB```{)^(n#j%7NY@g*BR%Q_YBQ5msQuqLDlZ&+q+3IJ-n6sJ*&hZXgcaz_;19l zOQVxa^~m(52w@5RTK^4OS}UfF=z4HOTD*B(Q6libQuSyjhX zuiI@0KlVB|uF;xZ+9*4W-!tTnb#_q75bFzRR*^a1cu|OhY(JpHn7ieOpSU1qna;JIo=Qh5PN2yc9usaqaZmTjfkrC{_PcDz02IpT z91D!1gF>1>Tg(MY2gv?~Or{r5z*gl`qN}Il-G3@M)#3XShs?V#wYP4xK;Z!`yytsb z{dygSpX=rI$)s(B~&1JFBKxi#%h%;S~)Rgw{NY};Va z>HKdM;2qxdB|a@6kZjn}kr`~4{;skIQk}F_AD>jt7*-4iU?qB+SNSQ9wRCOhxR$&rU(s=Ep3p!Agd|bkFUyFQo z9~#1ggBtVUZY|avt8pMMuqu*qrvw7oXI%JhijZ zx?J%4d0;z!Z{pUoyxlg&@8Xl})=_LiI|uveQkGfr*Z!QsqZRIKxMP zrA^Z>>c}QT#yQMUr-eXOa4TXV*&N>qiY0`BJVSEQ1v|F&AsWi+tqRIWy*>QQrSaAc zU?rxBZONs@L!<|S#`;Ta&r38_VR&@TQI||jDY)E9ZDn%blq`o=bp)me14n_w@sIsl zy@0FdvDY`&eOR1}$h{R|hiurQ!`3@yO)`~uKMFej+}JsYhN1HtpQD}>ngP1fRH>#R&^b?C3i@#G<9O8FKF zcRoj(t%cm*TV5H6RLZ9&AaGlR+dha@Y~>&xpp9!XZq9xWjz+R&&uk7B&nI&xJ^Dr3 zo{}y&wel~>WWGMl^5+?bOPH71zV)_!4?CL=2UW|{<7>lDQ3T3*biXlqrlAaJA%7LyY9qDY(}6gfKPDyN`He!do+ofc z$;F)(`50GiE;8+iKksP2w&Z)y%Pu=jnoocjUs=lf4$u;%hMwfQD#~mOIGv&iB!c|9 ztD$+Cicfp0mp=~GXYzS(-NQH+|G=dH!7dd2t#|a?T@X%fR(BdVB03}Myq2U!BQh&s zO60oh`+3+~I)3~hN-}s?qzo8lr~&lEow`ISiO$)3OOug!Rbz48p1=I0J_4YL9!;Vb z8O69r@byhWG_BHWrGeG9pa20tnmfno1W<;X8M+KREWu%g+ewIE7BHyr?2!JwE$QhV zOksWA{?0_7Z?PwOp_V`6AWz#@ccCh3lGprmQOIsE(eCfdM1qG61nBT&2rf@nQ5&7z z4w9y;HWN!G(a=t&L-(n(rgNX2$jwrZu*P9zho&MnE07^RM?{n{Hefq6no1xLgq*M4 zeE$_dKNtFNvFDyEt{8f<)@o~F-)?7Tk{T_lqO3x@p3UEo5QO(3YlsP80&s6~%J{j1$EMR$R5m)dd$& z_j1R~gCob$;83t}h6-Zqk9aIG7a%33q`NiIuA2GEe^rIsOg2sN{h}j0V^e@j`N=n2 z*yT9aOd%)8?)D-^R^{<9g@&G4DmpZD{pdqbI_cNtVw~}s9j%Wm4ut~-fZXMMVo=6& zE8a4lvBTP9-Per-A3Wo_^Hvhq_j607HZAlZ1I6y@B4oORYEJmRc0=mPQG?PZD>$~8_@9V@|@ z+v%Q!>8_D~-!45Ll84&{_N~)^ZgH{)FYE#E#aJ2|?vDLZL?}u*adG~>Cu)X6Sx8=Q zSV1XiHAa`fNCWxkeu`l`XLWt1AN(@c!ew|J=pI)b>+ zg^>)uF)a2&w~(0b2|SExVuGIbZWZ|!3_eBrO52L!1;+PiodSC}p4at4w4KI!>_4Hm zvtp%|^%V7bJ-4isXTZstT6O z^btg0?G`^P!-cX(yYm#_HEP@v<0T%ayPnl6gXlQ-GsArfWbZMy2AnFAazQVb&!AC1 zE`;gN@|~nkKoqCiOuG~Kbqae%IVB55`iwMsZ$4Y&mifr9*{&DIOX79%^iV~&EjA*> zBnt)zd7{;2Xer(TxWq$K@?;*z^0fL^uozLPMtw~<8<7sg(hFOJs!$f06?tZlJv61_D91f>hDu8GD74|Q@0<;LhsmR zxpNnC$3ZbI<~pTmXS+B?1`2GI(=UYkI(r33AmpB-8%+#?Fi~Hrz9vcdoO#OFuUS3f@@BUBsd0^#X<1`C7njSeO)> z&p52-Z?6-3y0#w?&M)u-mwv!|DRPSk0d)IPHqBhJE1WgaGI3e0u_h}%<8%tjYP{9i z*O+l#tfb}ov%o~{T5RSr`5)0GBofL4M@=M+YB1+{_k!AzD%d2Bj9>Zi;y&Ycj@Qsj z>#(%N&TbW7fLD-%*vj?Sky8gC`a_t-7}+5SX|4Z~U!0Cc&K1PG!o&?U&UP0UNDSmV z&#+La7w)X|ObqtYSpFkCJg*6Sj!RuNHDmXp!V17#$i%|0Oy)1&XwYxA&O6-DQ`_2^ z0%)v+QW|oz_X;vXOPL9jJ9F<=e<>5b+oxywhbKw_8DRPCv&vw1AgyGs)!Kd5~D7QW*IL9`&QJTk{Vl5YE~4uA_Om9c*MP8Xw68b$Sq z%a_P4pcI#&&HHYEvF$xfPi7XroKt6IK#rOpib>1$k?Lj6c!+ExOy zFE;FWFJtVsUTu#4W_Ln!dP1jxS{&6e_RS(fpjA)1%izZcQTxk6;Z>V_-CZ-wyVT~G zu7Ow*VY`80L8=u99?~;~uR9`tc4eOhfmlB%LmiqZbqQ`1eFW zuOaFrVHD+gjiC)4dD*WVE|Sp$g2>hPUUX?qRnPVV1ui`q9QCDv-R-A?kPsNjXZFWs zCPBXEOY_FPsjwkhO?WA2TAkPs%_@|RqvA|G4Q1G zyEFA)F&E-7HLdEGi^C>vW)PVbOD=mqt+}2|!0CNoBM|^nzC2g(X2GBQf=_Xg$t1lY zSB|HRxV5r)d6scYFpG0~!qcxRxTvm)kAJ;gM+}0SEL#YaX>*zhiQjk7P*cqJ+xO4* zbdqVV;2r)r_*5`#6V@Kog5D5MIHsW3G2S^lv&Mo@J&Y!q@mjP@bYT!RuL`_+HTQJ< z=6}7;@sWH1(KuY}_$D#Y5?m1`*`BEJT_QiW-~&seBn-Lq)2+umc(+BV9t z6id<~rR6w=Xja}Nyvg8Ac5=2?+=5GXTPXXt2U$$(^i`(@vPKx{AR+jN)I;?W{uWqh ze{<8309(g=>VSR@3G*;#A=#QQTFf`PiNvB9GwHbT0OV|efffcMLOfoE(4l6d)TAY@_@e!% z2aNnH=17v6;~V1us0NY?r}FXJ*UA0I7F#p{y1vsL0~+VqqtPz;sl}tUzGnN*5gG;s zu_Dkujx`jD6p-H}N5%b(AGv7O#P~50B}fRcU}f;1x6N$eN~lV1yrbn9=6-GYX>G1p zvM)in@V*&FEMzLKpg(PSSR6ukZu%M^6BsI{+7CzawwUdQjI;2Ijsrngcs6*a5%Zu- zDmUr46(FV9lyYnx9XR(0QswQWEVt-txWcI2po5j>Zu-c7t~GLN4j11~t82*+DF)$mr94U+NGa!2jZie#jKCgpmf0Kju3HPtp{#_-RpT7l1qP>+8iyF@1TQr5*jMGg{KYQ{|@MTrZ?ZWA*gKw3>YWY0T~! z1S1DeJl{q$X-(pN2{o2yu$c3^Mt$Wwh_vVnp#DH-Y`DEJ-dCi3>2co{6L!0Q0~Jlt z*fP_HJWoieV zjP%@MVg2cDqqC2WGY7ex>(^^Sb9Tts@FMpp#E`9yVQT-plPuWhOM`FSjP`rix^1kD z{=_L3D+6*q2EnQl1_3R|2mJ}Z{)G4M2J1}urXX%s;~$TlKxtigM*<99($+OSB zW9*K4%sCG~w|ST4Qd2MHagc`{YIF|y-1b3MChZ+hozU{Z&;voBYlVpEe;h9pePNVLxfMo?uaI3eP+|Y#~70|P7Vl6N6|1WfYD8zw%SNm50U0RqS_$7DkG#S z(8H)ZJDOmTAi-90QzJwiJBhBM9el4gF;tRs?MWddECyS=DH38Fdbc)CI&|xQl>~#K zbOH=i6LAUV2<)TQpE5>G$cOSNCXy&-Mq&KDX%gM(#(=LW`39-C2z(Tc6JM?eE}gdy z5m|cT*`*G$bxeHq4!RH@f*e!9-Z~+wewtI}vh=lEo+UK{$CBX50yW}pBxHE9q6(D%Uq zPKH2st98&uODjj-(57X$*Qn%EKr9Fi>@39*itLFy-eDO#& zq}QJ9I=3ZTUDYeTo4ovcf*Z+-rw`=u5?EXj@Ipf#i?Jzs80S##7b)5qgx+M2 zC%ul8@~zw9S`=#=kqt8HlYwKK%G+~(HOgzW+HNLf!s7PMB9(}13~rPIy}yTeDwJDk zDCCt)Nf2KrLM8|6?FLw{7i7o&`S*`pM{vM(Lh_1561_a$OsRwtHDD>`XQ&moXV_rI z%w2FXR5=CNPwM4Qo59&I%6VgL-?hk?&8hUBS_utmk;8elObdw;q^rYAd)i8+&gJaw zb@S&Il-FacMP`0Q+)uCEH6m|X;Ml{uQWsn)0awB3KK9jgCKBHvI) zLfg5sI)Xp8twsDI8wz`iZyG%Y;2^KXoKdy28L@O;`jc#SN{yufo%1BUpr#xXFLT`p zp-N^UEc6%Y%!{4-(Vi%Xo&`bSbv$V7`|%|9G@*l~)NH-4EJ{jMb-OeYRVeHNev@@7 zGW(Nc;(8`J&>^%d2>2&u;CNhAyX|~K7~2^oP0`q)0wYf(Ds)H| zlz**l5Ad;)^H2`>lE*X?`ZHJ?f0ZoZVYZjuCU!9yMLkmqiAdQTr!<71L+d4fwabP z{m}Pb)#-+43Uqhb8)A%XbMoVc{rJPWT60c>`oF^Y>1-qFhe9a2;R5XEyTcTrT&6sa z(Li$q4Lt;5ngVT0v3>BbKBD9>$tk7!3)}yeU#%xpf-@+o_#fR$KE@y4l%X~~(`911 z!3)Kxn|b&#J#7N)+8N1xRYtZR#G3L03B_eFKg2)sp`<|h4yPNkK^<(q^%U=2V`GZ9&^bADC4VYeE+DCD9p7mWGa?A4X!5^ zCK(k%kpB!rbt5fomArg1UK?^+GW@+0rx`5{M522UQ1cvUI!TCp^JE+apGlghu4LTu zJk8@OTLGGP(f$6@s2dxhpA+{LlJ#cI;J9>NeuZG*#fa5{5b_K;t`VyBW=ULxE%*qU zZO30mW)@1LjXe2GogEDyMVXPT2fb#2OL1qGTktT2e8zMBDUTcasA;Tn7hYFLJUu>? zXIqo6VBY%mud_#-3v|!dst)$GeU}8C5%X#686H|5v6~$rGsB%qlqO5`ArEGwj(?sl zMu`{CSM{E4bPn5)SSagoRe=XQ2gigslB{0>7O1hi*Blb++ z+qb-$XupF;d7R*`uR#$jP{iv9&62hAQoz5l8?c*mF)6=Jj3ZRxxlfQe8-B&aU$c@Sn~;$+j}t%^wV z{7dHS{zJ{AjeSn(}=ecKup(7V5AVKXzZkI*XOp~)XKqhmzj+lmW-zQYXgHdSQN zZ!;o5i++pB4;&N{&Ts;c()rpNmpM8{us^AuCqjBUY*=z_rrxO}*m}~wcKESPzZ({gmkQPqsoFW_!=Kja=QTBs{+&9z1l)PF+vy*9IG$Qaey9 zlq>DT2p!LT4A{PS5BI2L8iQBBAReUhW>!UDU@~P<3$YgXPWl=FZTG!)p4QvoUA2PF zpJKG2cMLix{oXGPaUtx+ODr z#=dXd-(+Csl9()nr8x{1XxpnEu=KPg6|>HF!s)>W@2@tf?tYuseJz4 zc@)Oc2};K-mgpm?)+Lqp17tBdZr`6)!@)W0iq*|V>f?>t=wzOoXF)cWH-{X?lrr^1 zOT}=Rb#M+x5!<1_8r;6~m%0s}#g)-5s0bfBq;gkR---JrbMs|*R#%FQpLKg$$SE(` zC0Bm6^Fr}#QPQHHgGu0#;h2A;RFz0>3q_4HjKy+$LVYrb3Pmw60{JI{wCV^|lM0;fRfutY9z8!CRizAb-i`m`)LTN+p)Ha+_+mhBdUwmuf-j^K5&b6HTLw1h z^~zq=-Q|Uj=1r(5_BVvQ{m-J_**)>D=aK!Jq_%fQ@kf@&G=r-hwC<-nv7IbeV>9v<7r=bOVH;c zu}n@@Y;t5Ahk_*=Oi>SEjU9KSf5j+|@oZvAhCpgUedT8`kade@# z9)KWDHn7XXxq83ZVJHX_a=cFVVT(S=mKr1?vtfDY4QSqw8_Naj6>e>fzrd$e#F9J5=&0Q66jxTUKOjND4z%C z={)_z%3^e{2Ns&TvPO^AG;~S9yFVnK0)GY)nQ+iwSUf| z_<8Mt;WIo9u3>6fpHe&Ek1gsQ{N2`%RK2wpI_juwq$z)<&Zyw(_>Sq_*NDo4CVmEp zl(426qFpQ_ekRhyx&5_v(v=eCO#dZr^ZB^T@-pwc;~&@af@A}5d%bMYyCo#q(^RmL zY#%(8i#R8#rC~8lHy7ZUCc*S>k)dmklT@?L)!z4>fn86;<3eajJzC2eiy)b!MT)Z$ zU_t=Ow5$Q-ugSgJ5L)lma-yAOy;E9E{vU4#%dKr+PSh6J(sR=?XQ z)X=)M5yEtJs37$d@;bALi_6F^wZT<5i3^zm+&II?JRWl*@ALm@4fxM+N9>g+u?3#92N z)!1%Z;=NxyB{+>`yHGkK$EGau6TuVoYe{qnv^I;A$v>Tp;as_e!SHc^+ZL~yu^Dh= zclQ(&#d`3VNmq#OtofXLv#mjD?WZszPm>r_nb}sF#wQ!mDv8HuY#=*oXoJ$?dNbz) z*ammd$MVE6VVnMN0PZM--OEub<=OqrYsR|E!8Y1v@>0RSuS*Iz#FhroATt9GhF)M^MGQzh6 zc5>~=SI{ck|HY;Fi2M1L6G(e$u+glz7@8;e9U)Pm@qPWjJ-ppwm)r#)V+cYk;dyoi zK;%ND#WkA!4I#N@9ww|`0XHnC zARn726K>f#;GScw8yGVur1{XNK~P9YUu(6c-}Mh6&v1l`usQk6eZMDfy$nhQ^M$(y zW`$+Y)>6l(xHMYpk3!t5l&5xT{{$(4DA|fqA+NDoItRN-3bV9baZ(gXWjx<|FrRWu z4p0bUT;r(y$aIDYf&j6IhmxS+@)-H8@ZTKl`41w2J!MPk|5)xO3%8a0%nHY4i&ylW z{*hVrR$6j=$GZc#Q3T5zw8n`pq3s3Y@Ki)X)Rr%fo|~p*XO)d$DN?hcZhGr^%4f>%d=-UTYi-pJmsH|(x=ASe5b>^>JrU^i zfhLI}*mbdN%s~-|6-)W~zkbNoZ$IWFQt)WlxG%A5ERoqo@o(!ajMY~!%GeR z>UWa{qkj*2P##bmt}_{2O2H%n4fO7Pl4geImMF+KO>w^22w&s+alA3k;pr7Vk#)ub zCkycQh!%4^_$cEjRT!t!p*a>*Z8raBD zA=@MVd!ICDd&}&w)56D&35~q0?_mEpCq{X>r_zphe*^!8q7lX6l<0OKxl(o(LFvDY z92{c7-0RmgC3?W9J#;Y7!a?l#$J!SL>E3?T&|`PK69HRep+2a#I*_{=C(hzUA*WOS z{l~>Z1Y6hrZ2@G$0|J#i8O<$&nKiyQ9{%1N`#7^H4|u;!JQ!X@XR1j!Ecq5BIHoLA zdCz9Z363KMe7gcPPCB!SLrMPtZs;8D>xBv>=egrl3}3K65%xo_^;pQOs1)k)MvLL^ zrz_@>vv}QugZ>`E*sUk;igS5P22l$YA8VJ6|3`Lf=HD_#CllKloJ#4Sunb8;3s}IXtx0AR*T2|z8t?q2 zg#7C=W1=n%^;o6o97nnmd8pZ^hR(j7cpq+OI%EE=PD2~CAQs>iBxQEdT$(jw;2-;|;~irF z)SRiM*h`qyRlgflWL+)IVWG6fjvxgtI~nr*ZcP^pdtso+ibJ4QCPCpk57-e8jbL=U?=T9faL zWU5a<{U+UjM0K@67;aK_Oo!+0s!e)b7WnPewjyPcra9@6_V%~|TeK``s!-+Cy+!WQ zs{r8_FYze^I0(rh4kC?kva;o@i!QRZ3FtWxY9y&Bmau^(A<+IIKY;!=w%fL@HJbqI?0*|r$lO_Arz z@hbsk!hf6Yk2O?k>W?q~TP{*f#YL#Jkl{o72Yn_y#hhZ%q# zKAHuqgH6@pJD}$g4hq}7r+BV4#!-nUEoc95)cJJo{Ra#5Cp$9(Plv@Lwgxh)Lr_IEd@VbYNEC0!^+g_WQ*MaI!QBsn5$Gn#N9+zB=1wFuk$ym@i3^ z)yi3Ghn>i1Q&>5>Bi&ADyvi)D5x*kHa!C6)Nz-pYXreRUc3+tN{pLev0UttBZ%H)G z1Dk*qsFC0IG%fhMu zR@a-;>Q$NS%B4B^#vb8DyoO(lI1TpM)IwyrJ-&xS|H-Mw25I5Ugh?5}A7ZDc;Wtb~ zVjRXv;lcl?@Zhfu9`*VBbs9iYrIQHn^fkG0n!MKrazf$a0$}AtX*t`Znhm2WXsR;D z_W7UB6xUR&2#pOi2&kId6c_pxH2djmw_{c3k&g-RiVqKXqY=$33pqU7E2o?Ba>x}? zupN^64>K6Cf^8ajgdM5)m?1XZ1+{i0b> zkP-*t6P7yx^a$u4`B47M-93AQxX21EC2QRKw=fbV3L`mZuw7W@*=Kw3Gm50n#fdS% zC)Jq82->}t zgAw&8(WfaJ6y?rX|L9WyTy-y4)d;S~P+?`~eleiS-Hdfb5|@0=L+&jRqMUx_Es&&t zB~rn9V=dQkLP4D4MFM(qa}Djq_+b_1+npt|^G^rt~G zWBT=*+L`YtMh^6Vk+iq&WT}aJptu|kDrjsBn(>?}L|%0fq^{Bd@+PTGCtM245sN-c z9mbKW-sW&}EaHt+a>_h1KBY;u)`*FE56|b#U7-69vQZ3v&B`;rJzvxhm{i+cAlQv- zL5OseJQtb{{-PC(=YFCfy}e~~af^6&w0YsX^NFp66OYX*-FQK@FeJa4Nk}LfvbvvR zavA!z5d7f7HQyU_^^ka2uy{j7d#^)G%iS$(QO5~5>Piy&louJ`mwtdz2}-57&(CQ& zcUtGd)vf9?&U0~X)#8e)etpB4USh5NFY-fxH*}nVUeCdgV^AdLEaed-3=(@M!1_8; z4A0vXtm%^DX=(df*6$AnA^S%ySleuPQHk&!cO1YyI!bkp^sM4}y+M7$>bFfD0(Aw% zvJCk!Vq(K1_;hSdsJm%*Zf zVv(%loZ-{C)GFvDS!y?|Gx9n%RDq0HO(#Mvj$;9NFqof(Pp6S@adN%B*IVvt_-**5 zhion31XGrDpQS2sC~oz@%d%aJVamke!_I>8IM*~GYFzTeYQ2E`5FF6UNKBP!W-gyb z5Nkxds7xI;(WE@ba0v#?doflGF=2Z}`T3Z4I2nryIPMYu7&aRopGhlh;0ankM#=3o z*OC;j@c+WDv!(vG7%`86_Xe{?7B_9@HV?(&D^@b4*MnnwTo8r3=8>=i2IiPwtK6-_zt9^t37bU{$OMDk z2SO+?2z_9sClN@o5T>c}&y=PV}dWVSXH$eq_d{tbMb{zo2$2RFq&73AcMPq)gn|m1g zCPliB<$g{GDwq|peCw6~pHD>QH4r6EWAKX9!&l(}Jz;AwV{zR{=-?tXhjzqS&3+C` zHZsK(A8Rc1^m;|pwiI(3fMuO7ar*sL?(oO#EHDN{NtMbAoK+C_Md?*;nooXn0;<_( z;NRIhG;b#_q(8{Ra{!4UNoaj4RHf;NrH{Iwa#ZSqSD$2QyN=c@F5y!KAd&l-#at+B?GYAB`5r( z_ihN=vY_Y_>4OlHuBRmh@5D!QuKUAZ*;Y+XGK}tRB{d$qMtb$^Kwg1#A>{Ub-mB|i z--!16tEd56Qkmggm7;EWh-tob&Ra51c}_m{U(axcp^c54J|YjDd77H0d?ns~c7649 z>~@lbRi=VmY7F~=DO$P+71BCoSt0v0z>LyVaKC@z+WeX;`UpmtptCY*0YSw9^ zLXR5DdR@cG(=F_)jTomaz>f_KjH1P+?v9nI4zTK(R@fA$QrvMLQ_?-cw9^I-*Cqe-BI9Xt`IoAPY=-f!vKM8@x#yL0Wxorfsv9mbHXo`E> z-sIj5TxKKcNk5`09W$uodq>i@gX&U>ErcLZOwAR`5{^Wfe=`t-Y=-K2%i5QI3RO)Y zx5QcWkyPK;M$(;pDb^Q{?hj)}H%?0Z|Jc7%?4d^YfAteiG*AaCR5(4E=xtE+!UvD~ z@dfE=d0h0lm5l48i>CcF!^v8n2RQTo5U~`Jh{}=V5ljVn464=GTTYe8O_&Zsa2zrC ztnu==66;vUM`AI``UaCfpo1qE^MXRTqARd?qS7I2nV2{rSxjdkQVo&$9m>r|1<#@E z^cV#yuQ?XxI^ZOAK*cd)&T{2b90tbb@JO83UTJdEFg169lMQenHh|`F=o`6LnQM5G z=}yq?Q64#Foj!=sZcs%FO|QvDgA5{I-k3>H6@Zaj;WQi|y(+Pt3_Z0(|)~*Wa zoVSo8F*MHnA!OHB|CQj5W@DzrfLiVNLL{ULQDy$%I#gCTPsXq>yE0R5GAa0BG<7j^ z2S#Z@%9!B|ki)CHW%eGcKBzyRvUUhg2Y$sED?-)WG>h5u>J@Ic<)feXhL;FC3rr%6 zHIl%QJsLVo(#YXW?$3tW9V)>oI*7Rs*AT~Vb&3t%A4#U7%SX1)+aly0!chsg5iTC! zi}_Nm*s;1SQ_9syx&khwDC7^-bn#9Xqnk~I1`u9HYb{eI_xZ^hjfs2^Oq^oBBV~>( zA4qGOt2zlhV>Bg0Y>wQGuLrF!8udu)bUn)kIE`YVfiWl|!MJ<8?~S%uV>zR~z|{{M zw_g$|0DvWJZUO_=(uBb~Wx34vjPO5=KkkP+nhZu`en#pc9{wUgk`7MniQk6bcW77! z2^f#ZABE}>`RdYQyPsH$nV%eHmy2Y6qEZ82Xs5$5vp`y9w<7|X6A(wJJM1y(hx$d= zO!01gv=JoCf{Uq9CZVo-Me@{koq|$_M7u+_AsN?gV(jI@(cQ47{xuhfX7z%aKDDHj zCC@s^p659D&2Fn`vperPyX&cEK`wV_Ysw%`IjDGSbwWs!u^c1mrMTFTimufw21wpg z3p`TxSJ7S6$LYT$^*{4)^IV9Z$iTvhR_ETl_4G;c$|hhq@KHP8zD^;$__LZyyJR=1 zzdwmfddZ(}^Pu0;pUzs(-n)RJAgJJao(7SlOp^hSy`M3PofN(%vCQlqZ%!O4Sj%hx zSjxUCjqS}6&fp!XG-q1sf&LgIUA0M zn4$xUq%~Td16_&Hv7kAfq3!E|q@-f9YCEbhLh2UJ*uv?B{v(6mz>SIt!5sq#QsI6} zFAU`LKTscNiz!8CdX5H=*8OYs;U@-SkDR0~=JAF>%8OBNS1d zLjd6;OU>KnV9_w?OZ%*Q8ZHpRZt;3fw<+bPmFX!XP(K}O>3e?=8D32>>S|0j#YgS+ z1dcFKg_CR|=rd7ws12D5-9eF_uK=kJqex>--LB$J`!pWG56k6N4_zqc4c3X>&_G#( zwN6F@oF)Iq_Cs}HV;I}CL`450ZvR^-9wny zmiQIzzMj1SLTjzOsH;t|!^l&vP|pPSC9Vzo=^U}BnNW+YnzlxlQbUHjb{IZY0Gyc> z@al)h^B%|#kii!vxdb9tAfOfvfxBA_NlAeWZ@BwSf}FUo$;7yO?luRJEMNg~_2)HU zg^ZIlj(UEooV%dd#mb`{OmmK0=b9c$w8`M;M-Uf?f}6+n34yO*7Zx5-^#)EetC%DZIM4)1VUCROA-jw9jhQ`zA0YtO)S)!2=IA> z*x!*_f+(pniu%uvLlT|;J?^dl__m$s998c@yrohU$2)WO2Bsl(`eTygzV1M2JXDT+xkl5+=)${vpgH8{TC28| z7Ux&6stJ%8?PIAeu^UHW03f!bpZH}deyWHQH>y)q{oc{9wYu4a8IH8J`=?TG|rks+LiVP$ns<&P6qAgh9oLn+ECE@!%iP0ww)H~7|VO+AC{_wn8UpO zAH?hJB@8YZ>1izE{-1x0cF|BO+j7ab19 za*-mWxoBb^MLwTIq!mvr!+$A6)2F-Ndt2o59!A+*m;R&zt~W*b&);M)UAqd2NV~k3 z^vUwe)gl_=;Wkal{aZP@VIxltfLW_aGQCh2Fib7}23)w7d59f7Y-et=f4r3Vl zJuqCvRQ`xc!DtLB!<}>@+uoa)ArgF3x)l}Oz-Y1ULa-%S<)e8FP~oYj+De>2@u)p8 z$bi`@vk3}6>8grZw;$*ecp5!8WsXNbY1Vmfc+~6{Du5)x*7~K~nK>e*R!{G`Vu;7q zYXr+VIhn!TkDA65zH=@%`X)SnbYPz=YoX~G=DrN@3>B;;wd#mHImk@@at-I>A-TXj zOG#o1WuMKLu&4LXjn)UL#|VHAo&QCw6QH620k1Z zIIx~f1GVEHBDF<8I-s{Ajk65-+pskCds zNcPaqz4F;m+Fq2(7h%EA)q{)Kg!p<)I!2k0(bVpGu$D9O4<(U5es3d7XJwSJAQLeq zDUm%;dxVsCSV(^ca2gqbEzZBN@=RM5)i31LvFLEVqR3GnSmSn!ks9VT@EqGsOA~`p zQ-CwwHYj^cpT5R#*uZ0_$oeTtS2G^iWhErIQgt%+}QB!(?K%^VKbibm43$kQpQRJ%6j+@lCP3y?&{3v=BrgWnD@10MInDB z^&K}Kxd?PVmI5?k;Ne;%uFDs8X0{6HD2`-X5$3?D`qLS7cyq4ib&gTwcl21coRPJ8 z2?@SG6HWIjqZuu)L5pV%$mXj%9WemdWF>MmHm<&aFh`*96xm=adxJw;V4vs6=g@C`A?WA|Yf`+H-zXzJO>&NFU{&P^ck4^xyBB%it zQU5Syy29#YZy0M-8wfR16UYuDj?3^8Fb4{rq{N`vgpirQ*ERep-Gjd72m73c8LXgL zn%;+o4;?hJw@bgOcahMGZ*+X#%;iN23<(M~YJ`ROBu77{2@|ln07-I@#1n6JDiIf? zFT#wqn0a**QYnF3#KPTzbF@bInv@DzZQ;mbx{3~qn5w1#0zb?AjXxv- zkGtS)tC8uh~&9Nd62=}Bu_a0tn=09$>qM=MnpJ}3(ldFK6S z$y?`{_r_5Ni4OYg77z~XBopMLrp18opsd^T(QLW{O9GJ;AuqZ*3U($rFlhfR86WOu z0b0QX+xPiO>s>Y+Q}>}0fBDMvS*e*Yo<_bmtgYIzepCVRL5(;mHg8X@KndkJ$Lp>j zRQU~8z>#H@IKiDggM*&`_OH_Mz@WbATI}$YV1;kN|G8a+MyW8bD5LRp(Ge=VYvw~3 z8N6S!J0twmq0lt5QQSVsB^tO)e@(9VMui7~sD(Bnv` z?mR4;MgtnJnr^_G?a_juLr!$IF!8e8(!cUc>qMj7ulx)raMC+fnnbQ*4d*Aft{PK; zZjpEWiBHIf5zoX{%H!*8%tb&R?>fZg!dGV)}pbk1zGybWhy+E8lhxTNo3MoHxlvmU7URAiwn!)%$2^X|4S z%08CedkuCZA<%%^?T?Y@4OR74CUam|Y&xB8x7_y1?!uM`vj>!>sDsyp;PUchc`84ipIF-FhnFF=and?(yjgD9%a5H2gQswhc# zqkr_f3~rF4*s{=s`C8XA+Xn@`f*3b76`@_~;@e*d{8vS=OwZH}mN|dXSSHi9SNGMT zBaHQnN4}WW~}nz;J2J<6V>&W;1@qxzF6bP`!&!O=SxtW1!3AfOS=lS$cC+P&63@sjpwk3 z@kesF<)|+(+}!zfe`?pIa4T_OBs&e{iS1pJDn#iiX?jcvBjM>=ij2H>bEi0$h zn~Qm8wmJ=St;N`{Q~COyR;I$)U#FQgv8Ah$Xf8Y~g|cK1PD2N?6y){B04bQ09cp%l zXw7x=@kO-|k9rD!iL5E0lS3%x3$B#J#ZcgjrJs&*>*is0Qmni z8f52M2l+xC5iJCG1q{d2>2PBs%twT&>MNo2dd0x4$ZzAXz0Ck7zG|X3sYqavmGE&ElCks|Vjk*cQkZP_N4GNPf z=8HM*V&y;@LihZVd335}Wr`ee+oJRQr$t>1TGPTJpv?wKFF>xjMWj(N_n%brDNY#JXNhP{d5 zFvq?gi2O{fHZ~A%ZrvGjrrsgLP8<0TnGo|dy2Ac35Iu|}%AaJCEFpQ6jn(;&0j+hY zo5qq9&8~!a2pIBnINo0{QS_$gNfm|Ko{Z~`1%v&Z3DZtZCM5{nCx{zG1n%xziRCRz-+5(V zLSt@tgP;eiXw_a?aTI;Q+(^2>`aspNosa|{R4~R>dL0NGC@F%s69KqfDpTVjoB9t}%dg0^4 z{3WNLTA>I~o&J+sYk#}0JQQ_pR7VNF9n*oVVr zY<6z68?86aq>cVi-!tXCd+#kL;rg@3>S)Ut(ORO)iMaFrK2NaH-DKA+j|{;2Y5gbZ zTAYM(!l%lz;vW1->NCeDKj(fdK3Yk9BZoKLr_is{Gq`Z=Gl4sog#PfpXN^HyMVY&T ze)_8aQU^~vZ|n*ZZz&cs!`kz`tLLadYKs+>M%$PU6f9IZ4se?*G5{x+^WIKAcMq6r zrT*8JQE8RN^11bsRQ&xy)!idfKF5lxzz|J~J=Y#KLoH|)RlVn=+SECD3X?qRWyb1c?D90`aUo4!ydljiIf^?rD2juD?N%~q;0ZiGwz0g9t3=cS0zOdZ`!LL6iI zA5o-i&B9M&jHqA?D5jCh;CKG$;UVdilDT`L_CtX9}BL5@UaE@5Ha) zjzrE6i?uW`Hbe4z-NC-Z2RgNmm8>imOL6YPaj!#fc_7|3E2|7cfyz`Q7axC7q`POd z4GaSXgp$Wyxmjz5p#uiv0Uga{*{xXpD1UX(n`_LA?4d_L@(lRGFx~8Y!A_-f+XqSZR-q zOBD)I6-|uQkZJq8PgsB zY_&C?(aT>G%b5!l9d%%^%J$P8#WU<$m!sd0iAZaAH$o4@oO(NLFSt-s!_mN` zb8a?u;uqSU-UWLox_qQ~lU-rkWt$UEyJhYS;+dAl;yN1|684lIXL041{}Qva6WG9I zZ{;{mMMy_A>foMOb5gSHS6nx zt~I|l!s+0b3=NBrHA@9$7`3J~zSaBbrZ!|n6*}K(8duP>uVVA8Nb;f-RRr__0bc8W zM7G7)In|JigqwlVE+Tf`dQ!Hw!ussXGUt?`L1RO=PG2NrlK6Vr(Pk&DanR;7jTVNl zs5QMw$@-;ILGDT+SIUFV{Sr)q*WBe?(mq;Z0AUm>-INX)qT8C7QYV+<8wOUUOcyqL z$^+NTP+@k8pF>gL+8#v0ylN~Wc(X9CL;^s}=(HB;E0|%~yh6x9WDocc*0%`#!ISFY z>JtJM((gB zjc`ByRhe6{PWn^cpG_h7Xr8NaWN^MF%LM$bKWd6I0<*_i_5Z>A@Ki>?Cqfz@?CAw# z8_x0_u^2N^ovm@>Zy?+2hQ|YG+^>NTDgOV7>D<9qWG5Fi!KtX+%y3>p<6=+-W-pl} z=A4IXOyl#i7^!E$>l=Sa2&O9BA10h^Q9dljuK6OmLY;5OpRDKIwe`dz2%J~P;|Tu0 z1iS>m02&$&w=4u7hOK{2thcCKzf2}7VtHh==Vjui`(w3*n6Jl$k=Z7Q@T8nt9S9mM zU!efFmfBs0wq_<$N?s%*`1!W{?<33RCY?IH-3gc9O0`B=V*T&EQ9;iuveQ11fOujg zQ64=!))|rbpr5I%&PNE87|Ox=3HYk6Q=c@uaSw(brUUj}# z{VmK~e{(5yQpeG^q_74dzyrW0Ub4*P!hxaB?LAgA1G`1(%E&~gMe+XdY`Dg$KU23` zwV3!yVdk0s(OQQTah1l>L& z{l1)=w~LpAZI43P%I%JI9!Vu{m2$~=L-qs7U8)7$xow@tUgEMhWCC@b1NC;@L zMeUvy!+(xYLqoFIq}Zc^Z$cdB5SF;>P&S~8JFQ?tdd1CdJx#-<%hjSmR;q;_Uyz>~Plfp$Z{;+3a?-dV1 zOO$Z8p*)Ygz8~qX9#;dddX|{)o3}9-)<|rD(u^0?O59ogQI3(qA*!U$EMRL(TyveC zPPw0yV_4x?&<^S`2NPsM{sdj@1l7**6JhLi+#n@i?|6ogJjI*&tT^!!z;ehK`zcIU z7O#l(TYioS3=!!y)jnoA>Xk$RkvI;x?fa<*oyHdr>?h5 z(n|U420%iLg8aUnxf*{jHW5Zwczy5FNr2$rpLI9_XYH`MNai;gp%gp5p|;Dt3c~P} zXkXJ*TPC7~Ut;YI#Je@jYQ^VJ+AXzYK`}Cuv?J8rg`%1W8CzZ=M6tR)CocKsfd5lP z+n~EUsc#c1^AFKGjt_?+AT6^FX2lCBb9X!vH%t<>(2fG-=3ic^NHZ=p-+r(dr`W|) zOGMrkl#<9YGXV3a{2Xil4Wc^1&kDveme)!2$0TcBhrelyuk27%mn(HgK!!KN60iyc zodbhGeye`HNYiT%l$0Pnvfof4QRXV6lz#FMpN8TW8E3(RJ17kUZIcYdzh<+mX8I5^ zU+J>8+2E`Ixo#c8@Kp6J(wzc|(oD0y+GgY~%SWoEb+5NDv_@~02Q0klYvlX$&FZsD!(Pow(3EjM5N*w{M#{E^M8 z3=q~(cA61@tW4j7!0!X&%U$yM%DI`lND=KRdP|c5suDO|3tf1$l6e#1Sld15)K@Z} zpVZ(}b{hy~t>^|}xZT*Tx2D%^%wIx15luljKOmdjWdUz~>pzHoe~TkLYMb)F+};IRI&+I43x`gI*SnX=DBY z6<=+as_HARu(&SHGfdQLLGz1ym)Dq>uV78rrca71>Sr%>xeBRH)VlJ3`DukVimFX@ z*n(+2#?|Obol^*5Edu1s9bd9GN9~w=m;kPT@G5$xF*`>H z>g{Slcppscpx%ynS>&N8sI^LLIW zTxl(Y1phl&Eew6|>+>s;PekfH?ds;SnjFI10ba_37ho@lq@Dc-9m zn2NE+@C4~RDY*MfAdMc8BhCeDee{AUl>9k^rTZ+g_uN(>_Yk`(Yg0YB?EeoCNhQBp z**~hJ2!}EB%Vw7@i%L{PH~yIu659YNw#Yq|>?k>ZJJQ&|6+ zjI2lgK7j2&N}LCyEg15-ahwu`<|#BveEe>wnwsh$PVMrKirRUw#MdQ3z`E9=9ds}a zxl?6hcl?1u5=N!3vgTAkC1Ng@BVoY~1M-T(CCrH>YTRf0hmRMHST>%0F?NsIWtM!p zMVaYXl2%`Za=#r$y?;g3ep$GdOQ1gO!bIGSVy|3|PtV}d@(yS*Fp?XY8CbxvF}hm| z-sWzkTKttwdeS1oVvhIipLF7H@P=i%g{9PKgeqL9Q&6vHU^{m9cpJA~Z&)^pVbT)3 z*XtTrfwdpCC@aZNc6o0S-j0l9{p|PE&j(2@Zb=BS7@yE+HCX4X9Kp^_QfKJ(=_ISt zMy|i>^hJnyZ1G0kBNsN-sR!JKr{PCD9;t(8A5%n1o;7P~-{f=f&}}~|d+#YaF%eAS zMZSulN!Jo=_=}Zty_{QX<$eOKdy$2InMY@`IUAD!?6)=bA zSL&RA+TxyO%|@tzABDacSdlnUyUkw?|1}K5gJ(7_pNV) z_%jp>_gJAsR-8B!B?%{WYYhy*I@&R0Nf7%|{RM9{Q z%Zixs?G^?pketV$hmnB)!9pA~s5(PchzxgA=+i7-_Q5$<3VSSo-sFOTHUT zi<0V0;5+$XfAy`*d7)PM2rqQ`5DUqKfD_XB;o~fk6_EPyC~i>Pa8gmzAJ1;Lr1@$b zNg+Ld$z{yUXaIhj$Q-xnre)BX-j|8Jy7gqRr5jY|c@RhQQhO)EqJwmB)3pi%7=#c% zDbPK!n199702?MvtCk|T)Wer6Jq`9-BN^%i>tUE73KhPn)0SJ(wp7B0TqeXkl2R{J z?d1U*+2M_TmA~hog9SsC*m~qWXvB2sOK<-DRIPHEGD9;Y6r;}Z2;4mOY$5|!<1S9c{!&TC8;5QEqagVYw&5c% ztBT{~Wvl?3LxVzG@%r6Lhr1#;JV8|ePe8E09*12A1v=7O+6PQ_)&=VNQBtjfmqZZ@ zsY9-a`@ie+X*9%L2c-I8aVKb1rGy4WoWRiKkG{Uzc2Qozj7ne{8(0jXL5=gQS4$CZ z+`MKbJ!$JY?geD4cIxJ}MmW_b$4xm*cVq+(%_ELOtWN#9>N$iWUM4IS^I3GrENFRjH|tYxnZBuV*+V; za$0M|A_#6@!Iz)6b=(dJR6K%z_4x=yFI|*&=2#mH`X{LvvIm3HU{L~Snl7t_o zk}-*zXlIO1X5XL~LGCkGvKSs0Bw`cUX)qxddRSey>Bm~u0vqxI88|vwB4Z8?ptbbk5V=dP>G?RKn zV^~v43zY|C`70JP{}ZI1nocxnqy(*OU|EOz;(pPNm@!=skOkGRBU4zvqllzXh&Z0+ zUJ#&PtLEyl4Ge;NS%{j=wv`T)c!Wk#tM19yv^R69;OZ}kk^W?<8zZ6?(A{Ml?$(;} zDhY1d%1*Pr925Q4nc13eSQ0z8Fl2dD;xB;Ch_2|}oq&JVd{w

zPUzXT_SX6avt0UlVqHJ z0=|f$b~x=G>)_}`SD}I*-F;i2eUPOxgajA4lqZ5t><*AFgP=r)%qVw#IWG^;oV_U) z2>bwY7*!o|ATx=>UYb}*5mcKzIMviu7%2Hw^+z4n{%b|>4ILFNgVAM}p0ppH*2TX_ zA?@8WC#zcTT*rOc6|%(gRcJf&K5n>XZ#=eR30i(Hht;(ajS!z}6twl;T-Z@^S z&GBY>3=Ks%7DuaDIk`$p^x^iib(s$@n~W4C8Kdt}tpcg+(V77?ujj<1`&cpa`u;)v zLaHw`;WplY_#Yp0S6Qq!4_w`Jdc%n)s0&+7L%7CTVtHVKWV{BQUu9)1U-eN1lmSS4D&3>U3^rxC1 z8`eYLZI6w8{yJ$-e!D9FM~;-5eOl)O9dTD$r*SO2(K>|JRs1(j)UCr@`3pM|RHDvi z691yX??1u(I5O6a4Xu4dH&UufRz}6omWm1zHbn*mqs6Jxn8WB5Iy)7j*EUW|jrQ6B z_k$43InpX*HLu3=EYaym1goobYLyThF(de3JimoMY6J}*wcVM;n1kSpbs=>HOa({b zOJ<=o9p#BK=1Yb%D{TP|84!Z7tV_&R#O$l*7a4PbtzNj zB%;@`+q&;8#kh8n&S05q>ntX}^kN4{)-Yj8nNx|NZIzf@l}y`Q-(O&!+=KRYTk5t* z1C#N>nvUA5S*4P3FV!SD$UW7cP|Dzx0cmVgIqUAOoj{%1qt?L&aWDco%<`epQdKF^tkNM$ES7LerXy_JIK8i39f zCcL6X>DX}#l&vO*2OBi{IVm)=!@EmNIu^Bl5iS|?Y>Wt^VP z+57X@;kaC;bwB5|w7ey=p{=?0uQuNWn{klC>zmAnKq{Y{>wKVe_V@X;H2k!k0iz|j z;;0^?8|^%BCz-n1b6|Cf9f`;!4MhL9JuA_lz4O9IbrYqR4E%upKrl^RJs&~Thbez| z+X1VcsAgsG8=bN<*KuWEy4`*t3CiT%_n~?o>irj!b4qP=Xb115myhD2eHn(!y5SkA zt6yyp9L=T%>qiD2g&)9-1n~BSnvr*7PXbZOH39|Iym|Rf>9y#zNBLagU>6#vWvlTt)G<+s^Sek(eFm(pljP(P9+vALH$6?w9-TPO^C z44gW-JvAoPc?0N@w{}0tx{gfWBeU?X^5HO?y+}e;9=>z>n64cg8NI8n&;^E{Iq3Gy z+bvjGtc=H71c?_h!IE?N0!1+la4^PY2twWLTaaVaQFMCoWg1Nj=fM|`rg`i>l8l}6 zx9VJ~XYx>LuG;@}CPUv$7=n}M_a!8+88*L;Ri9eV^KTtz7|)4w$>UuN!J}wmnxG{Q z__h3z#jTJqGQt!OfG~i!C3iGP_#hq=$QbQ*&0O=fc}mtka*LvBKUhEp&P|MwBC$Oq zYuy&tmgvZ>z!_uK&R>=_T0umb)X52;;1@-Di(6D1AJ9pgGybTF!}Y2q^=db6w^lac zZKF=2)OPoNqfQ}Y#YeXO7Wx8PnY!2CALFWZ-`2u4Byz5Nfv3m31}Wxr(?wsdvp872 zRNF_g%x;44%z>ZHi|jvBm^poKGDTEy)M5+SgZ#p#eFfl2D*Lb!B-ZeSi5ThU1`HK> zyXcD#hS%O{VyJA-zxYpv4D~3SiE2t)RL|zTcqG$z=1A+5deFjC9Y(KvY9Pgt$Ya|O zhZ!(ei2kiWeBVH9QyhXjIbHs$~kuF4{tMvK8IE&7{=?nV(!fORWgwsACTGOu{Z!k1kxsKxN)|Uhbm^- z1Y^WJiB7u0c);72r&!86v7_k(JP>ULWiXBpmQn*n(ml;uv9iza*OVRnEEu6LuL@l z!HJC^Wr5*;`jKS(%5;iP)2tl8jK*VqirPV0RTSa<^UKCi1x>Z{)w1 zN>D!f7)RqsnVBr<<0_eHFM01bCIgK@NBS@S z79ap%$^c>du!mm~q_hwqtyYeC{!AonBmJ^Na%;7Ghop~JZvKuA*jc=Vqc1LT^KFyx zYqkK%{F6M)O&*6u(c+5$CAhJ!$bg`vkDti&&p}TG*TfLeqs3d=!ok40(RZ`Jqb^H3 zyX7eB6jzMw^%P3-oF%(Qi~SFJ#I);A38wDp z(dymg_#rDVh#_&q_gogbID2rc{}j|;$8kan`h*Td8E4N6Hf8yG`isA6j7)=QoiP@U zHbLpV7JeLu-XAms5-h5aj0=O(g7vaGzNphRoR=ZbUqVn63 ztwakzk9BCQs-Tzzv1^u+9?}we7R!hbS=b$!5_H-7{@ffXbk0Zzsz?o}*_SBmB*j6` z;9+{=yZh$0Sk9R?$D9S#PHY`diVM)rz27#%uQ7B@GYK1=VLEPk=aH`4uGir_kyD+w zr}0JAkgO!z#3TnNSxf`HfoO9S{-kI4T=XC2{`oLll)a@v#6}IY6_T{Wiid(?)Fitp z;E1^ciA;E#jXvDV^Q)7br@{C-TfhEn@1$B#D-xW75U6StzEbCmb3+ zQPeaoUg6aIsRi0*+HepZR<^jO#^?@d>8?~UTE$yrN?WY38MKQ+wVC%__+HuQ7l~1E zWOI0F#>+&ojH9CDs&yNG#9srUD|PZL@UU@;)N2{hbm;)s;zR0AaN4yJ_Y<;$Ls}%b zhOb8NfJ=E5TDjaHiP3Mtbwr$ibhI&0zu+|7{q9G3M`26EqYzh@%GC3={*{O(r11>? zzN16Ye)cb)7MvGra$?yihN>N8?FF`=z_kueSySvBL)kh(mP%V6`9O_`oI1u}C6lL}e>k4r%gxIkz@8SqD z(U?fwzuG@dTSLRzOHDQAhRFR@#JcLs)$$m3d`-ML;x750$RD#gCaJNgMwjC@qspfz z<+E)Uu6+j0!N}1Lhl%(dQ21kgEOjb(}Yk^CzxrtJ1&RS zYH@1V@#59952#BrD0ks68v<_v9`2qWgZ>AzW{E2CKgr#VS}Sy*UH4upI36dkwmO|T zzl67Z(n?kH+96Daep&KgnN-y8jn1Sdk8|f`)>r*3?C>(X5@EUz)`7)4tJtjGE65gM zX0EI8-;nHIWA7Y;Bp;fMCpS$vn#33k2x)q^#|b(PK%2%rML^quby}?FWI-2;Js!66 zd)~*PmY?W$7BrYywGKmwSzJiwT+72h7cr>EQSN!DQbv+t(aMU{i%38c&+KaY@9+3l&nA2-3#B{*kZHD z078CmWV%A?=rWsPOeZUh*j6QUodKj-lU7INb+9b{Fo%C-3*4Gq;TW2%W#3uK8(4R56#<6Q_0i{@cr&cq;{VEzsLc5gv#TK1RHXFk10CPDH7)fS&uw zcF^nHnm#Ywm`HHl9vn3`sfL%#>+8#->X1w{f>QO4)e+S}Gpcp~7vJ*d>Ju9=4s!|m z1}@!;1xNA;@%matQ{e~K8-r^22cd6ba+tGnGQ{$w3wM|ukAT-H=MS|h*GzN#ONu5X&@s9IB?yd1-gq9dv; zU@EA3W{hLeZDhk$)V)3dn@)R=$uzs|Cu%G}62_M{c#?6G0i`D*3&?vzm;KBB-oo)y zryFA|Zo5vm|}DGWGG!T!EVut{(ut&g}J2rn40(F0!#`OdM%CF7C^+z(k|jC&5kCU1M3T) zb*!%mRxq~xYmf979!R*9;P-RByGJut*LyArbX(vTL2IDReuRVOiN|01`q^B=RLo(1nD^4IHfbQ@iJooyiT|1ndv z9GMN3pNNQgKO@Z={fXWNUS~gRLMamJHjFNpyP6%r%l?K0_*#T%PjNF*O+$VZj~SDg z^5}B%haz4RTJXN51q@IB0@@A);?*3`_Q9O<4H^ic2Zhe!i<5lS`*9CmvW-hY^nMYv z3y!rN%%c3phHC{t=pCMaJ#2GsUN{JG#l0P9bz1+A2ve1K(6JTim$grjYtMF-@5=;- z3!+xjhC^2mgp*M#Yj!u3hb6C!R*KiC>39bAM~HcnBoq} z9Ct8*%dYATJz>cF8K>0q^3a#XGa)liB+^60QxRQdd9RJ%M*^Eq*yK?6K56Q`PcZ_= zN=cUbMBrTbY9+cW17d<{8TG91HAn1D(K`{l=!SJ750)A*8Izi3rlaJtDkSsU=76JF zg%XkjTt_Gf&!7+Eua_KcNKsIRT182bVmTzONHx`G;v1d(0~gfNYLS)&lK}&g2!@lC zFdhFt$pD}tBYVJ_%cud9`X6dBeSRFtuS5P}Hj$*kp+Gmq99=U4cCoJR(fc~<0B@J* z9AOu+0XC0?0zT`J*Z%m6a6EUM>}R^m&o1|~2@+w)`Mjk-DTT!5&aMAA#vB&X0iqw%QnFb`Xyg#39* zOpi=xE4@r3MK8M+wj4_A zp%N^}NX39puk`%k0iSKe6j1H&#aF_o>(fwJ75*a{CWn^>*j?%9Nnr#bzc&ZNYeb{MjGhCp|DEJrIjAu&(f~#25<;IdE zm~2d-Z$c;+evyX~dmDZT!>8Yty486zAqV`;Q9BB^Mn%=e5CB}|&j4Hd-#LkAEl>|V z=KE>F@)>?;MO(!~hS1D5MYs6EOZmsTdVN#qewCmBEX~kL+Y;0Fg7lzsIu~^hY*Hb0 zSWNQmGp|T>#g^)@yTlf`|AAXG;{ZTcSX6&v2t#`-b1JCc+T#{0$zCz*3Glae0lN_E z_wJEb3!9bSFy?H68oxb}8#mq@YAZYnCS#2at z%Ai5Pks*$G4Kmccr1b{8hF-sW`npB9T%KA80vZOqnssucd4`G(rJb{;EL6VrpYERe zuBcKET8L!x#)WqBD;Oa!f@4Ae)UnoF2cC-v;ALqPd<+foBV$SMA(uxVCct|XGLCXQ zteqzO(e($r{aX`Z8)9}J3NH&E8ByLnYPU5h#cszRvaVMAW*!B#arc9(=;3*Jv4|vRPo*vmFR*lZ7 zGTu4nl<6{O{tnJM0m-O!ExFOkzHZ$Aa0_ZvO>>z_C}V5K61~ay_Na9Ms*!Hhb!7IO zy94UO0y=ewe%~33bb}I|UEyPI@Y>TA_}W$8ot53`ef^Fkc&EhIM?iQMj_Qmif6#Z6 z0EVmKFV)K@<(KQ|0Wb}B^z9_J9E&HfVZDY9O)g%|9ZKl-QLXf&+??{TRgV>3+!ZmA ztHE}4Ui*J?PQBT-I3CKkM;0YV`GMJgnRi;!s3$Qv{mda;({q?rE`e1a!aR33#9bG~ z@9LN;=l0v@p{>d$i=nt|{BGpae$N7gl2zdiLUjPQ4rtz5RuS)wZWF!i)Wg-OTw29@i(fT5=Szg1dCIz$kKm&5vocM>$ zcbs{yE36FWbR34&8&l1z&*zX(hfoJza1n4}uPaKt0v9_pfOCYcQQS5Of8E{fHOC0F zXEsJBl^Z@8JF7NH(#g{V7?~V>x8>vQnxy|kMpH5X_5ur*c} z0Gf}a(6Kz}=31=ndIl$*(gTJJ+Vc7|T|;428D0E{&3GPk+vf_S&5#(ila4RFw}{6Mpm= zB4%%6a-ZZZ8i&YdB}X6A;SDulIf3o~hSy|wc3q>enR!X+5Na7$_HheJP~KS#c06|m zNR!{8cW>RAJ8ibS;}^}W{WZdN^xkU7b)>JSepdlH=ACE-nWu4~jei@6x=&^}5UdVB zc>oe=DHbc!zcm}cW=c_4^L znz(yd7oDa@2?Ul%o=TgmyYRcP>yO$dK9LcM#m}uB?oKz4MQ6_KSCx5p2b#GVoN$7P zW!GH_-c=ffj2wYES9n=Z5({QjAMOeDA#@GPHg>F&EiCObz(QW-T6G85YaK(?Lyu!`@m)$Dy&#zBH#ND|oXrj9QtZwB z3LrI*!P?aXAY~FIwSpMOZ63<;1-U8ib<-L3!E*FJa zme{Q>f#5~m>Mw|L8-Nd;{!p3ti`vCIQL@D2wlf(5MUPEOJ!pXRWS?37`YGf(=9<{vGdA;f?bqPfyGZv(QAi&i zk=3CT`thLbfyO{6&58XRT074VoY!4th^ROmrxLBQ3dX4r`NZQvl?U`DmbeKqOWf=g zco2cPcRY(xxP(P-7>ykV;OrunSB-g<;x^9Ep9j&(C6_w-MDLirIFgAG!G#V9ngUT? zx>mmHmh9I-ZM(9uvU*8?QzOFgQ)OMn1ZUG-D8)6MwQ>;8*-m3u7*|hx;MZP8JB_t> zKVb`1`1$898G0C}h7|iiYI&yxziZd~72xG835+3|5WQYhXS>(nXEi>aKdAQ0+#TG) z<*>NZi4Q*)6T2mhS$vnswx`*yqQ;(yEjhex>KRY#uU@ub1&Yd7d%w)95V1wW5H1xp zL@pm65iX`sQ_?N^YdPKe#y7((+<`LU`SN*t^Y~o51RAG-K9)|_<9P(AlF~rd(@Xu? zYUo+Q`#mKewJKd&c5(Gll3cql=z(9v2z-|w5hXFn=oNbUc?W>rAqg_THS^iFGgd@k3t?=AZi&Z`B%ReTjR|e z+_R;ch21d4KlZlN?E#H$Me4&?5(g~vtL4c}i{D^(5NbSppvgnA5m*~4v_(Dl$xza9 zA9M-ThOzIU6XQBxl|RFR#1YMWrx8ZE>j6Jyr_zKv4ewW*VQ2O4+M=B<9=w%xfx9xe zgH_25El=u%af$gsUjR|1=O`du8{MG%`VN%c1u#iz&&yYmwmK z)}1|30X*K+qP)JTAi#_Vj~y@e0xE3tC%)E%Rg)eQ-hVa=vbs#J1SQ_md4a0e|vcR-*QuCCFx8Aj(53Ty@Sw zClmBBwAR89W?t!Z>Yc|_b_z};rQHPa)GvPY!6feV8sj2|v!&61-{v>R-tu#T1Mjm@ zC#jxF4gy_-SI$pCaP?USdqAMkIL$=Ar9Beiy!-IcaGlL@MAvz+M@=XZm-Scam~02b zPU09@WZtCF@O#Ky-F?u~C+NbX#_);jr86h5<|#=}^DTpogmVmLGN{1Jgv>J{Z~J1gOKWDZoPUoMy+A?kXh)a*g?x!l+K9VUSJN)gtX zFu_mPXk6L~e)wQbVOmLgHy7=3!QQ`KROVSej$2$#SO4!|nxZ>*5jSg2b~}Hd~(ikF2$C5bqu zP1YBoGtv6*V`FSuf>e{rfD&ikQp=%2XoB2x0F??!qK@1Bumf+O7JG<>l057-1iQi# zB3V;{HPtz=^@r}wSR=Rec$OJ`qQ}VALK?6umZfh+)AuGxI$6it%D<|Pjk+ywh4;N} zp+s<6kbOTIB@G%+7pVGS{B5}(&ONi@9XUyd?LeiT=mDdShReCV8m+dxq*{06z{_$N zKAxXsk&Qi8G`B6}XM1NA1#H_ewEP7jhxR_g!eqxG&W&J6Vn6)&j$(;ofYDSb^J3pr z@IqrjnSI=wPkNNk@Cz2&8y&L6Z{nd*bEBcQp%l(wg>x{ADIT!pV_~Qi&be`PyLt`Q zuNveHpc2mZ+$Q-tt20V{+lsLKY|Ua0I*o(5F)jKh2l%jRjw9IkXp# z2H}x0WValXFb3|W%ZM9bbUh`rEH!7w4{^L~6AzPb(YiLWIV+LYvLA>^>f>*1J+LDC zk7HxedCDU5`>=l?bAHqiQD`SgtWE(9tRCDe0ZuMr?U&zeS7kYF2%g&lplH@>4__PA zh}cd+Vpb$t?)UMmfgCg2o@Tl1F1bJsa{g>YM{pdXGFjRbNS+G;K;D26tS*3qH2Te` zr7xz>mq5;A>EIH6+D>{WLLZUxGj-HPP^(Z$MbQ3B>!557NCeaH$ z{C(q3@c`oLJ&-~~NH^%ld9U+nUSzkLuU_)~SoqVdQHSn!fC3UgVZiHYm`)i=jlaH4_+ zV@y=$Mx8;H`EU)h{cZbRwy>NGU$9R@!iLpU=a8NMZ^5Mto2=<SZ6iXu3;WvVwic0C0^4S|<|l>=KWe_LQzeC#eUa0rDvXV`x5#+FOria#8MpyJ?1+ zJ!ra%Ina24s|MTF*)#4AMu?*E>4z4PgDEDZrY(oPB5Y)tH=WCp@VwvCf4Gc4V&FM4 z$Ca_QQ78xxs?ytjejc=J&Q#W@y$=B`r$Q0RoaRHu^HuhZyRJtLmAvR4;9*o@1R!t@ z36XWTlY;?Xbmsb~Lm(kpVqy^UP8S77gt=_n@NMBX^$ccs5Q+GH#q4#D6gI2BR{2OzB>>`Q^QqEKnA z4wGpE8OwhxY&GauVLhgmf6G?W)*ja*Fczy{{a?dhk|XCx5i-QRN>3_K=(}2EbAehufJ&fl2jtgL00Tiyy07hzAa4UIOS=DB zHo6z(8K@C_1ccs4f_XtLTXMZd4cwJE&dWmselYXJN=7(L%<(u^^2JCs(4`EgE!EH2 z{kTk7B;6NIWexq)ks16vLG)p1I%QKwlV52lzMR#lVH(Ji`Q9k*+UfG;$aSQ*sL*CO zbY-5HWCX>;bnlPJH&tC44A)FFpH}`JMB+?3s z=kqoL5kYI)Q|LCNVbr+-2!_t-Z8+8iBP+;Ouh?Q7vqGV=n$f(VTAJuNWo-dsK5WIF z10fx+9WmhMcoZvog&Oc4G6!cj$}Nx6Po1E(DPhHq_NZ-##<$jhs@pjxBtK!eSTpg}nCV$%S0pHv93>sTT9XCA@bh`EveVo;!I4#;E zBa6Y!KxN>`1l~mkqM$C<8kYTZrM^j`CYs@isW-UO1-R3JSdIm_S9A@PbntQAZT*O% zFlBvZK_8~3gH|8SC2|4j%&mWnkVG9Zu(K{UDFhs=t`?)8I&MEX=U+4cR6uY7?uQV- ze*6*ZNKCYi@Nj&8IddKl&S9=yZAPRp4;l1h-c1y0J)iU%5lqxk6y`eMK`c~y-$XHNV4I8GIIu8txh;=cQwoCrxZn2N z6-l3O;i1;!LBLB-Pihlc-0n7D9AKB`SKo~{sa3-NPE}SG`y2b0+DtL|;Xn)M_9-LD zsg=OAvAGxl6rVZl{Y(M+6#QMl@}Gv!FP;P!)TBUwGWb=vl%i!p1NwqH6Gi)#J`vRU z%VO6G5AlQ(wE-+|Ub(pi_ef=NmmIzK97??DqHoy4&qlP*AxfyD9Z`$|&Jx^Ni}s!M zt7BrXrQhzwHJflnC~2S=1?l9hpu$r{kGd%dZDL)l(c%|uP@j z67f~(4;&!sw&YjRJ;ZcHQZqCGzoJmRQ_*Bd;Ki4z54)P+jV)Zqv)eHJK^j*~9Wa z+wNy0HDg!DXWp`JVZA=}lLo2(RKtw9TM$WouTkE}RX1_O@%Dyv(|3)zYoV%mTQp13 z8uRHFVy?#}8rc{Y|LPekOL+sGfHW-KHquVIWzZse1`MrXPFZ3@E@4{vUUs^Ghkz~N zFs)MN#|z+)U=6=Xww(|2J-6HO7r<{G+A@B>7tJ%}qd`hoy2qMAdD7w`q7XvCXU0O$ zq&vxPJ~IoHj39WD+1jRcQ@1&G9hrDU-A;VT@V!`2i(^1&ciI|;y9TvbT2M#FY0Yi0 zjm)wO!i16Uw?U)N$R`GNnLhn^oU*zWB} z3(1nj#G&`l<|z^4!Log=EF+40nyyxrvFtAwsnV7p=s+sGp^lgH@ZB%u@pgicPU^Kp z=2B=~a{Lq5a^EHtlkxe1hrkP+iBM(Ry zSWXys_3SxJrFh@87z8}qh4QG(mV3%88h(0bfV4poTc;36A!!b*v4MwoYL|;maURPs zW_*F&gza5-anj83H_lmb^YpqK4G5+p_0)OY?k!#{kuNRkIJ6!!y3j2U>s z&zMc!p8%;Q42&NLl&WxKY|vxl{*ri|f8ICAI2O_QtAYtTDIl4{mR760bN-xaho0TM zwv@y@Ah2G*$lvbvx+Po^pfoy5H=j+X=DR9@Ld`;%I21z{zRVkW!r1$^Xte=Np^C{; zW08dxM;umUh24JooK(K0;zd)`7{Q5Nq?SK1x!m8l1n@Sdk1Wukb+_~6S^ydo_grdQ zHuXB82PSf`-6gB3{>JvK9Tyh1b+TSz&7}mRd@DL#af5*lwv?V|x*x+(3`*sb3U2n_ zMYQIFm)O)|BF;3i>Pk($Y6KYQ* zcWq~wxsQ~JgHWQeH6?tIHG1AgZy3lsW)ExJrj~G3A$69>@_z&kY3*5WnU%K1mDY%v zj0m${q$@J1B)8-2;7yOekF%Y&b0u-iD6HEa1!1LgLeH_kJf4%=qz|r%2mo@aAcP}vkvZ%nAkn*3u!?2j)cgM z_}x-sX9&~&xlMsImh27-dZJZzh;6^NnSKv`>wv^Xj5}(Q5jEATJMY&^R|!-eS~Ps@ zE>j1K9-GPN_*4@(h&!iSf_(W!%&Iz=mULf`D)A$>1K1WhGv0G{Z0xh<+@8W4VfdTg zIyWDL8E*BdJUPAxFB%`WF08bFX%Y7G z(f8H1UOtY+vHkb>3XQSs;Ar=fPGY)0N~N+%pFNJDO*&v*Z^dqU16mX8(m^Pp`pBOE z8DykxJ(g^^j6@}LZdR+%EK__M`hAY{PR?uyeGyLAosNg-mGL0iShelHG@pKZmRNMB zON0iS)vzFMDPuZ&^-k8{&cZr7Go!*pkI%oaQ1Egkou`W!i0c#O(Xp@edUDF<$epdz z9yoKT!odp$@o*nF@fF=o!oFDIuT*M*^UcW}1=!}YL!Sm@Gvijq;0^R(l6M1Tlzb!# zMam?}&hoYO_wE1~if#2#4tW{gXORM-UrS?3DGn&|(#`+8KKnJ!Uwva-B zvD-KNhbQvc%4#p{)r68r*Zan|9oT}>(+kj%1+dWezKmMRzCZyCsQ%Jh${!oMbZtqy z$%5Co7Zb`tT4w=87{ z3zxWY>_U20(tPeqJfQgK0wKPIdMn;MuhO0*>+#o_pgMqb&#*?9;DlHRWCr{U{>Tg+l!W~ zrVe-VuSL!mvqBT_gg<1v*@{Gk5!dh10PcNY=Tgeg&n8c28NEE&*2BIM8&F&!@hMbJi&(%1e2qyrpSo3-I>g(#jlPlGuujD2R`V*>C&Gq0@+jfw4Ro`=+H-`DvqRF2Ct#GFtvP! znuR9RAqcKa^@S+Qctb^yyp1Pgl%+IynTxtPG3NYCT+CiCcfy(AA^NYRX4vDyX%YOR zjv4axP_8R7%*+G*a&hXazV-k=+bX*k|f z`2w&=*I56+oO$k6rTio?9xKs%I8Sl!vz5D4b%e zV#9tGQ2~f7<{Y%c(!lo#b&1DW<)6lvYf?rCc|v-y&6g)wn(|~{^eF+wbRp~$A>ko( z7P4By{pYIwx_`5zp#$|gn4fQgBxhy3o&h3CWiscNAO@NmJ!D(6NM)sTc3mTIE3tcm zai34FO`awD#9)|*m>YwdY1{oXL%TY@!tg~w-mxCEw`3)Ow(QKFKVRU<6sSi=DvNN7?`2)tq53bhLEu#jjJ5XhYE(eDA?b6b20_& zo8Iu|45B-`x%On2?Aqq*7DnVaLqY!BH^O#5xF&vj18mW#pvo2NZ(tITrS8p5^7CUY zP0XlT;1sS&EBgYHc zkFq`L=9e3;L46%#cB~&2AtTe{E5Ak|5P9x=k7E2|dGi-ATQbDFyo0>WH%dUsov#3m z9URcmY#iEbsh=L+fw}2}mF~LlUKS>&0(j}gIV9fTVsA!WB1gkMS6CArW!+4DVZAi2 z^${)y_9@NuGZiz0z&?8tbBth3Dtng(0trGTD{EtX?=b37KtE^rdCv<%B#&GsVLA%b zT;_v%dME_!=BUWWvA2|vR4UOlC1=!Ia8%)QtsKMgVUHgOtZ>&CI(iY%21Jo<`peNb zXx6Uu5-18KMvyFDsb5yGdm~?kIPffIcnX^&N ztcGoy5X-owZ7zG;zMA5q(Uj4cQqNhy&?LOaE<6!R!cd8oHw46}n+BU{`J8&RYZ9#m z;<$&nWy$H1EeVss~&gjtgiB)vRcWIoO)2kZMfFgA1e{ zrr=b%0Ydk)xO2zB?v+Icf`hGc9TyimXI|CJ4Dwv_?o^+YNtj zYac>dOwqxkTMi&}C9%1#$H*bbJJJk@>P0Je^TwYm>YNZ1>krSk>!2kV=tV37jhUGf z>$fwQ6S;5)rb)1rZuE(Wp{2sN_-Wmntn;6w7%1vlEsQ@iXm~ z*5Mn1*_i7e8O!c|URP)0v{oM}q>vdeS39d4Nvr8}#{z>(aJ^a3i{Tm4+LS|>D#t%% zgf~M*$m=Ve?9kS2;(4}P)Eja3{7;?3c1Bgh_!9AfYlQHRzdVD5lDy=*3ajv7-RHy( zRY+(wnt#uNm1w2e-c$l!h$owum+WLfYvH)dRrKW%PU0&f8@u#CZ2|8p7Yf|9sgl^W zODuP3C>OM zpfp53Xq@oHGmu`cl#A+$q#4TWPf|9$F2PN^i*`kt{KoK!4K`k(?Z;N3pxvm2-+N=v z2(xHS{e}>yPc=EbOT)6xnqhoyyD9#M{FNP17{}ZY5_OVDU89VLCbm(yjhR*4E86OQ z{`C~!wBb=_X}^z$St+U3F==r3)b&UN*G2kbKtLru-ChzLV6v2aQY0Q`C(2)B?|-Vo zYV)ZHtq=!p=k6ycno+9y7mRg&)bymV*Wug^=9P26m1EQCwFqxet0k3V{PoP$A7o8p z2`lY(PWUp+Gx~^8duEe1e!xWz9VFAQUO}_r6$b5rND#JZbzPgh)}tJ;oI~iAVwG23 zA3sFpSUndFL{PRY322GA!HwO+QDQi;d7Ey*t;4TV!dI|tTI(~Y`zdbjaSlvP+|?dLc1oO z#{Z9ugym;q%D84EV~V!!4ygwluqB;|p%DaitzOoqmcj|Fqsj4xSFLr_@vR=ks~)bI zvkUE^Vg>7S;qkR>;lGWbfC)-Pw}Ha~ugIA5^FGRtuRZgN)n`hb-hQNJ_zChcAbmxl z(`ex`Nb2&LZTEnqCndb3%mT|9zpZCE+*}E!)@IXn`6#BnqnjYIR5qzu}_|e1SXL9X45q0ky-;MwCXhX;uj-?kd6aRzPUi z1z26sh}EipYc&UziC;IfB~q+; zhm)vrweGgU9*F zJ6GXAs6QkD6oAZ2DpH2(o1m7cXR}*jjaOqRyGQhTabq$?Dio5L?D0-`X>MqI~ahQR52)f30`4uJ2K+MbH_TY(69WG$jJ{F4=hA$({-;jP10CNu7 z*&JdwPzCbHAVDLh3la}PH>*1GFR}2+@>uS$PPpNM(XMs?%qfLABg6DW41B@OfIHe% zI2@}dESN?27eaYwV4&1on+|pkB>~QBysN!+mLM+^-Hb`K15VU4xRq+=>lbRYAU`a%JVYc4hRkroxdx`FnavRUBY4Y; zn9$F5dQg^$E~7x*4g^iU4E)YZ2egW^@4-zS2X2w4bfFwknJe^Kuy zs(^#zMIDE@19^eJ5}0Xi=;2CUYnT-vAav^{D7c-re`@{o9XTdeCfG8(Wz_>MUo5Pi z2a&oGYGcFDGGLEN+siyUtsRVpN zV&eTq!+%BT-7c4g#79WGb1uMvzi@GKggk2HZeFErh1Lgf4V0oa&T3M+p}iVP{rYAev)cbAkG za_bwV3+CVRni`FC&15U2%VHXC(e_wOwR7f3*At|11#{ zd?iMElh5y$_1lk{CgLON+ZO29822MYNa)Q0cIyN;?%yvfoehEQ1G6n}{1Zr>krFED zLpc0|0Rc0s0q!6Q)>x}YviD8Sl>60W$+Ji_pD=J_z#H?n;;Jf}+12Y5k|UXH&c9_917qZ%3U&KdOeDf`SjV_O4b3yJ+Paj%5hsLiTfj_$bh z>W0pN$l69tp+kAfNcgeVX(ybN=fX(zl|M)TZ zQX~v0KHBSvl11JM+dqeIr_)L2jB8LhGaA0zt(WARqXFk1%qX9+mJy!)(1lX=G9m~^ ziVWH3)711N8xP$2U_)oxS7@VuG?-9(%u;8q$H=IQkDL&eC;&_NGkcYXG^Cx39(SI~ zeu)$Eyj>fBY(`vDXHjd!GSRYE9I8vW0?0-vwc3wQD&7nQi5}U?<;sifA3`7FIM`ZD z#Ye5vJzDSc7V^uHw+tzXL}fY@_~&v`9$%TisblZlfI^AlFgVCAFXgoI|M@i%8J3^Z zokmpN)myaQz%NR(1MU%Yf<{>koDAd=GJjGeDTk&K#2h|4?W+vbgX(SnngH$)=)jNC zJ(D((VM41_2#>Tmi8$})uhxdwIfRI^p5=HkkwQFZ_tVue<*nJP|F_HYHZyi&5eH!r zI=Gp*3WM@{$$G@bQ%gd%V!i_-^IYy;iqg?_M{E*Vz|CMSaRW`)Gg4YwH{uo%Ebv>Vt?iy&)Ig2VxV&!CU6(Ok6PL3crR=Z=55K87Ic`jES>hI~ zcy$5$TJ*|Z`qarWoCroW--3Aiy5+H*qc&QN$f?=X!V-YiVd{hwvP|YgU6odZNNKRw zY8&yB!(&$@nmm?K`t@&#hj7&O{bNXO{%k-F-_=B3_yTIDAng+Ul3;pvwUtU}CC&ZV zmAlyn(}R>&BS}QW_c&Phi^yo2UDjfp>C(7qBG~B?*lfr>C}Y!{Si9Ou8?}CYJW||U zWMg{2SUep7%PTZXUoz{9ZyV1j^vgk_{275WQ#&ot`-`o0@d@SYM$^$PhmPs!$@2{$ zgS2UwA=tt`yXmcv{bNtmg?5={Hsj$T`V<;%Fqmp!Ee%aaA{0~@My4=aB|tADJ{4nA zS*Pls(np>QzEut)e@$ZW76`F$A%Gu{vxWP)G-xnM`^l~VpWDRlG!swRw+u2dZlQ3fo6xDw4G&i z@^>X}`!q(5oFDcJzjKx_PMFsdVfCLjn=S0%;vC<_?-4*6v&Bgvbd z*~QEbA#V7SXOmoMRUfi^F@CS_Ik=Y8-LjOEDhmE&1iP{a_wjs?ORijB9&ae|y`4;I79m907eaX@h&ELKJD1KRIVRALS zma9LJjLOuBNob3)Ug{dkXzki3c;^iB$b!u4W8V7tTBl=igyST$^hw8LQw*LyWRCxH zm<>g*pG(LI{plrYZIV{EhNwk^$R;x{AE%$ZPFrd3I6lPD%HiO} z`zr~fuc1c0_-_UYgm8P&suL_qby~$BLRbA%cXW#SnYQ?|cyQy&dP)$h&1?$D!}BJ# zw3bkyX6a#Aqwz+Aa^f*HVD-H7*? zfVzvKo`~y4_cxwA;%rRv(B{2Y!_hkx)=t_j>PQnqX~!ww3JKGUCoge;_4Jy0sMB@= zL9d#we|5`Gt-V{XBU{oNLTu;85-=~5V7egTv|xl5(h&l=EqY*;J%iq9)WKYtA%8bK z>#y$+e+Ev(RqC5fI7HkMSQ+id3{r@WQO3WXU539kDM%|KBJnvVESo=5=#emw)YS|9mUrA>;ob$G6L7x{&b&;V9tgB}1T60r)IJuRu* z*fVRf+9^gRFj6s09i$E@2&9VQI^m5x9@9eeL-BS;&(fgdj?}cLx-c{_?X(GPqT-R6 zqQ;^3r7w~O=2GK6-06Kv1S^^|@Z%^7tX)rP%GFR|VBMq$F{nLXL9M`NO!ZU)H@;|h zM_s7Do|Z~T-zYo~0fOHCM06Qj?K|>ABEY`42um{zZP^(EH05fVUiE(%aIj{{7gfH( zqzsA-YGs_=*pfe#^)I6+Y1+zO34Zw?R?8ya+oU3EtT#;+}cR zWdr(AW!ub#@f)ByzxQ+9R~qgZ7@M|{X_n%A*K=+>E;(7Dk4nP_ z(el_yV!_c;$BTwEkiqIHu)QVD_)xEYtz-&?ozhMXSLQ|~!fh|7#E`U0>^ZD$PcZP& zB=iq~LRy>twoJ73J$T)zdc!;=4$tOk`7e*Apk9yLy;s0=5jFF4nRUH?#s;Q(iIYM_Y-~*nj>epeFt4-wM zFFmTsJ?0r&90`f55Rh(dqTz^41EbX4Va0N^FZMAAIW>iB|J(IaC>;~mdh(iCY4r2S z#FyTTY0YPMb7U$e*MV}|3IcA%>op_hko{tAD;P^V17|~_R`~)kbAI2%^6dZlfWA&< zg)H8WbNKsgYWYiA8SW!+J<){`|2$G{RbTU?{%P)Q$>p`yh_fwT{g%dhGKH!G$&S0P zIX!1d*>O4)1XykX3*L}3IZWq@q$U$+b=W)5wD8$)u0Au2eQf*}c^O%yD8+75^mfX9 zrZEZ|HECZIfBsU;s|2>pPtXvG&2B)jYUtcOC0xjTG$CPrnfu`nxkV-^bE2p3 z!2T`PAb>@?z9W`RjUnzLHPx7kqBk>C?g7zg8hBA>n@pY&W+3OxlKG8d1Qhv4*( z53nU9Z%+{o>W-gwjxL2q59$H0YMnSc-;l~+E?Cl^M4vTrdYQ9N<41q zEYo5=?*v5MU+2J10(=0Fi~zT(rv$Rx`NC-0NisjcpTsli?k>^FP^=J~ef3;?48Q9b zgSCbzA%;XOn`)CRr~j@!NEf1BFYH7*{^807VRM;|ow z9gXOgTZ=RcCP2oon+oNB!o&YOPdRjoy>sFYAmY77xY=vDKBkVd*1SQf-G53B|8={A zAorx>8D81ws3z(}gMjwIo-9BcHB4rc_{*(6DAW(DCpjj;G>4$z&87;l(fuY}h=^~} zG@nk?kP+)nv75lu7C#+HSv8Cnj9^ZWLAAm9q))YryZyoKY__G0LS(xMn8|T!H~1@I8Vwob{mQEcn&EuJiEzm^73b|UoW8NVwU$LsC=3%?{dO_oak zq>#B+h!R`{!2Oh-D&7>=uG*_lsmj(!5m*h$OOWV)ob8Vb1zI$tI!KzyS5DF`s!WM; z>-tvNxp74>1(Lci6chqHPYBXfrUw;aFbwd+h2I|j2X>U9m$gxo6a$^8jcT<*OqSlF zdNj=A<@KnofP$+fbGs;>fMBGVwmv|jYHZWRSFLvM0epngsj$4F45Sm1Sa>wjLKed7 z@kQ%9^eK6GA>&oj3%exuH&yk|qymf*E3QfLI01t}>x=%pkVxKtH$#9aPC$m7R6UyQ zA@WD>xY-a!;DK$PBhndCE^AWj(<4P^rpXT@I3MFF5V^$nAnj$70ntK%5^#A|<8)jy*5!cC&>GV^ zeR*&<;O^FG4Iwc=1;6P6;lx!nP`NT z#hvQJIIMIpo2xMw`YR^esbQhkBQTsmaDITNggY;~2^&FtKv7S$mT-yf-Z97*^-*n_ z-4^JS0jX5FE6uB4e0L7s>8YAn5$r#*r>GzbH`VkHxJ_n2F2= zKJgy1xC@M7NN*KO`8bES{MXyA0j_-nvvuvB$)M!3E)|YSM#%vHU%9kT69ny7KJ`$+ zt{nxBa~c0b9`2W@i(wt)sy%Ts)!I)-`YrIUh?QT{Pa_2o5B}CH%D&-nAq&K^<_N6; zgdXeD&c2mKC{MeHj>K4JTMg8!W<_Tg`QIh(Xg%7Q}HLhTfw{Kq>B=k81A#1ML8SW~#dvKaHnxU%Ss zEG-VPzQg>JYS9Cj@VIMbtjy7O%6}s%`Az3Z->=`g-k7`l-D6fCCm@zc2b~rIqV(>Tsi_3K1Fg=U(oLQb+#wb|ot7NwvtY(*c07IAIcB zvVP$&+so1)%R#{*6pRdbF$DC0YX2xTsPxeYk!Zb!OOM#oWzHL`QVSpl^QN0d^#_17 zY?Ab{R;!2S`O1xBgge!Rvp5oB=}0YK-ylH(Sc(&h=ZP7)nkId@M>DdBeY}IX&=r%s z^d3tjWR=o2YJ`q%wgY?mAsp+CyIC5wmZah$ory9-_g0n3KJ--tQ%6t3q(jnqL5P;L zDg^+W1eJ}yvagunQcHGUZL+*9$L2Y630DzP(rDon@_rcdRr=VW@l$FJs=D3AmTvAAgk<=x~Xc#0brw7(5fSe^r zXqck6qr0y*$M(j{wT$h1J7q&&UO@`f@0y8>znU6Jl0EpuuaV`|CGG%nQCW5x8d~of zS;y%njm^jjF(Y>^+k82Alqt)qfNeQ%K$wD+ByMtKs{ruIQ_(i!$lIg=chi>DAO42% zjzpU5j(O(wN;2ibu@iyYjBX^u^6BBE$T#?G0j+i==z-Q|W&=Cg>Ohy3_To@vQoBg7 z+QR;%V)Y|efqL|uyuaBmJQ{U~GfCL8QcSMq=9C&mWyMP+f-wVd-{VP%7=Mm=%tV6Hd!bF{4r7X+zK@MvaNb9UavY~uf>N4q;V-|Z=)a%>Qt!{NkK za~vQ+7A>EF=K7-0U#ow8u92^|cy=o(+6LT&Sjm)v~N@p_5G zMr!NmX0FiL?&RTdp!JofcV*D+M3#mfJ0wg$7y& zAwr#Ap_IG#gR}5cNr0Vh?$jQK0hJlKD3?<{Hwp}%wx}Nq*G1oKqhOw;M9&x(7e`ST zl+c`uDdRN>Kz+Y?r%|1uVUm`p^|9ok_j7ewe>^7exbq6-3zOpW5Y;Rupd|kCF?P0v zu&E>B?E93pZrh3kR?fk*N4J*EucMeBg}ngMbb==dhM+(X&?xy3{w#$wODE9MV@s=g z;ON#35d&N<`oTc#w1IxLjk4k&y@t+%9%IyA?JjRA)~zX>$}|4W%gYr!>zIJbsTxGE zXpFpB8B^}9F8)BvQWq5lSm5efv=vkGaFLgV;|jFX>b}8B@Xv&IL{Og0ic0F#gL!hu zFX{YKILhdUyJ^WJNp0biRm&BKTyqs;c7|srr@{hinCznf!fg>I=KQsPV14_M6L`nv(faWYHkYK|k8^Uxb?j=)* zgqHqvM+*d+z2U7G1dy~GSl!*@w5eBED}6-u#~IKBY*?#t+j8)XyQ##0JpDc4z=1KB%UDT)F3NNh zq-rJ|Eo~yvE`*pOer~e zqKDjpUHC6bR6()7Oxu5_*yl~jwauHj8`Ap8enqeylI9cv%>~i(8p0*L`jMU0X8v+y z+i%0w>mz21{btM$k&`TD$9>w<7Fw_Kr`^7aG$dkx(o8`kB0d(Wo<;Sv_7*H=WHt`y zBcDxxztAlWW!qNS157+bQ3*qcXD~F-W%~V)uvaDh7J6JW{=j2V5eP)X?;EkI+UONq z^M!%(YJD5U(wtlkwpqi7?+8VZrtx=P>~Y~qNmI7SLdpt|_-V+2rJa9tt2PXlSSY&D zqJ*>o4k88lo6&rK=9J6pPBM^%Kg8+?as#urWJ~cNnB^wD6$ydXW6$N(A^(jRY9U7@ zm^6{ynyiisZ%>p^-&^JYZ4T8$vAH2|-xlU`b(~B>1IvhHQ%ECedv<3OI+XOq8I7r1 zOapY@6CwVC#u}E=0#ZyA(}Jue>ukt6QVcapAt74m^YHcnSE*1kN}r=(;FJ>RN8s_6 z;Dm7BIysC6?6YE|z^&gBTTHwvF?!ld$!J==`b+~sFT)6Ke-_3a9ZE)*J^r6EwEvC! zBgbWBsU;TD-p&i3upxE*rEro&hMx@QGeZu`$S4W>zXu&>U82QW0P{XhKBndzh@y-` zY)Ev3x_&Wb7E1}X#Sq}A0}h`9t2jiGV7{o0T=7CCo5hZHlbNjFa9=~U>4%9f#XT5$ z0*RP31mYw7)}#m|2;&?7foc_MVZV|~^pB|7GBK} zD6SYn_Dn!@IZ4P|-04d{zzNeR_hj_4w#W3<;i7D+%xv~5u;Clu82xuF=dJH-(rraz zwuEEDx@C+nS8J1&-5ro`^C6Dk?(+}%?Z?Up*N6giT?+F^2{La=RO^SWYT!@G9e{bU z-ez;dbZOS|n}b--XdXGzDMx0&u|tXiYhsTKcmV5G4p5c$mN3(wTz#+5lek1nrhK5) z8o2+P+i+MC+R1sEmjS>Si9$U2bH-ydl(}!R9YbdPU=}=9Q2HAs*TKb;0~I-u_bkC< zK&N3-RLch;0j2boV|_Y-#A?TnCl(>H0njjhfZeH`#D!GX-%X+oSW^6ur*psJ(D5G{;P#*#7xmwoDLSO7=K8qdXA z>+&Qcl@2zS$$#VU<=7LvcEp-4PR2v)S25M{ryID@Jwb9J7>8MaD3}`<(=@L@NJT7{`O&Iw%S8+IxV{!8d z5Ce|NA59`}iq(i4=Iu8$WT1qK1=jYQ*p*v&Z^6f#bu}vHgovr`3ko%Nklj#eIRq-g z?P&u!4D#`b@~QvG4(+C}!%?r?_LYA7pnnVdMDjmtmjta0GVs}wmLL3K3v6hkH=m{G zR87lR>Ij&P=JrJZYLhVuKbd~gcO6F7m>lpl7HGqyOjj34IaDDkSWK&*+Whx-I$mR) zA=7X-2K2zq7i4yRGoZ5*eCW{YcvE^Sa;N;8C3*R`1to%c=HEg^lv5J()h%o}NaOla zj(8=_SVhEN_avyEYrl969y%sT(PkgTmg@{@8jFF!KIVDO|38r2r$mY&`6X-rC{D4Oz!;xIQ^J4A zQL^(Z+v~+UhP~0R@JEb{-2TjPe+@v)+2;WzMm2gpz{ZO8P+mq2U@&-Ud;zL>ixOy6 z^$tlyaVJsq>YwycLzM3+yq7$ExW5ojl+{nzJcyb!%q2&$eMW>S3{oP%J0~aT5`nz* z*6N)${`#<-*Lk;M1;2$IOg*c15)yG`oKF*Mt!Kr%%cWl7h!L}cmFH2Cn!vkzp$VJR z)kfjNc{iG_=0ompwEUJ|VWX8giYVYK!QRWQ33OJ{yJ^ zzf{35oy8*7gd&?g3*gbK895!_@Qy5SUJ05#wSuAKr)-2Z++LpRZrAJ4 zFZ~W9fDUcEGS0XXrvNHZEkbZq!9c%|DVRRpHb(ul1UG>q7scte;UjGvC)mW?G)=uL zW|t4{J$?D;(2s`e(F6@}eh4~eS-Nwc)1qlm(_yLUh|x`LCz9lu%K_$5oq$Z&{4Z8P5qnH;lr{6mA=&m}xO`0xPR7z>co9&u$40`0h5MC64mEO3`qdiI#5guG z4A?c!2*xQkT=L2PDw(HEb8bN;L$OkV3I7v#s{x)s+8w9w5%YV$(qHErY;q!DF3nw5 z_7Uz{g|fs5vapI;Q-~!D@whvG@`6(2$~P95-LgvFq)1B1FHLG+FLMkc)Q!jFO%-o- zlHFr-z6p>`oGmK=WvsReK3m-*D>EDiwZ zw@6hVC32u^VS-~Fnr#za*T)}c_VlZQa=p>ka^OVs;#}$d!QBDe2rq(|%$j_D=XJMb z?wRq!oYJgkpvw7yJ#RWagBG0n#Y0}kaT2!JK~wJiDBcB0YbIdtFqu0&HUV2)UqAZL z*u9^sIx10$8`LIE-27NxYVp!twynx62q2Jfw_14uSREy5KUxn!)Ya2la_wO{sQmJ7 zY@He^^K+sZn&H#IS|s(p*6kk;;gN{o*>1TSzgEY)XA70O1N(Q(xAvy<0ulgD_oHLF z+~SPe6gp6axrbUr85dbkk2<>Bc2igD>y_QdZEGVN@(kXDe!8Z~s`3&rn_}7Oxv{^! z=!Zmzi=gj+T!|M>WIj<1IMmV9l<_cwK_^d%yYmgatY$N6$5#2>$|h|g+QiFU5X`aV z-|8s#R=`JNP-equdt0LZ3+6@Q+Vcr*gb%5FTk?4CK`7k8Jv4c+Y%Li}y`#xjSbO-n*0r6dC<{p&nVyIvKY1L? zFxl!qO6FiuH?6H|gtObz&J~)gncql&Jb?yj9@-@=D|MsZ9({(WpPyaL+a_ipyxmTrD zgRV=Ih4Yd{B~;D7Y=#L$ya*4-27TXn^CfJG5DJZ(k9#>#s}nija&(fe@h6pqo#xgJQO}Lq;&oAW~l;lQ!d4Q*P$=y@3D>H%EbJ!OvKsb>sqskY-3_C2-1JjX2E^`pqH~Tn1>(Etj@j zU4928D^SUP>AYQSWYi#dT*p)mQrxHXtV2X^do)(~uj>b3t9rSWn{g0&3O;El^31^v}%L*lf8(G!tk{Q;)c&vD@rjGOmf!?_cyw1 z;knT(3kJ+=kAYN8(RcB6ueO#(skw|w=83jKl6U}H101OdABy|ix?;wu_Txj;fnr-g zp>xA0ErvvaPl?(VfcQ$a&FEDnySj@;^WszjM&ZFP9KgDeS%KV{MS6@dh_5OAnT`jT z3^j%3=X^9uF+n?R$g29lKDt{>47}h#EnB`sxmRho+9)IDJJYD#O_65-wDs577)clew4tU;a*WYp-Ja>n|gZa+~#2+*b4Amz6AiQ~3oWL*S%9m`IDglZ(UD5c&{9aulW- z`|q#>jfNrsddRxwsZ)dtisSYv5IDb3k{;5Wh3E@EQtBsEFrcy#C5uAJ(A_J%b_KYO z9efJHFJEzPvxa+-y?c|sD=^#YFysV;_4R|b95^=E<5QkEqL+R}IF<2FPe zkD`}!ih3W*4)8O6~j{>B-gbke!WH&Ofu#6si{?=P% zF4bdAT()ATS~x_*y@k(JS?+UNL6vMBXtf=ZHi(TR(`os^bFh5Qs`a!i@^14gy~!aE zVgMtVXhr3xnb2|3t03@Hj1A$baiy?K;6N++!t&H@W_`iZMPFDh%GEU4gyVtIex9AL z=XjpHP}jI98tmn#`vht(UL6lDw6orvan{HDzgT)BRV9V)-vokfrZ5xU?~UGoc~Wy> zk8t`lngL6B5WH!@a=}v8etS+@o1un;pZ~1m_(y|mhA$5Cu9I|l*% z1R2Kd>UEWxRA+(7IQ-mME^VtWC~=^YM4P=$lYXf1MTKb0Ql|N}y19u_hm^9)brS&1 zT+-E&cr>U_xJL)ozmS8nln(}nbM

=T8c->GIfre%h3Bu+>x^{i^(Pv9pr)5dSr0 z40u^)UuLQ)Dfnf@Z--ap{15`ayXT6@`2f8%y%V46y>fB}fGrbBDk}8^MK%N}T&?;5 z{dKA(?T~F=sdj4>SbGTk6i{Q&{J52n@`i-M{FB!Y8J_G%$W0mWz_g&Vzu&9LE(aA0 zN-G3K;2AS#460%ko?QI7j=(g>H**e07va$o+PakVe$2$~nx|&28nNRRQi>H~gW(mkuB2T7|5IkofhC%x^B|fF~g@_wCW$d1q$%B#{8)^Fqqi z(^C^np-x64$DuRW@}<)4OlR7uJV9;qAF^Tp_z{_jq*)X!6xEKYrmhy)GmZy;z7m&< zXds!A<{vHZ?MExM2dOlfkfqso*)Ri+DwMyFs^UQ9dJZ9jzk){er=GlCf?&{zT5j$c zIUun&9SUbrV4Xz@2Sz*jh_KlXU**p`bfgQ(!!oPcY*-2AC+xO-W@)2Pns%Z? zI%8jP>`w!Wl>aO+`JwjNV*+$R*tU0FZFU6bA(1H7EMcjDIIOB-nd*Hr|V?j@`)+E?saV^5vu^FDM}q>$Ch_AlisFtX7(6i-lUg6h{z z9lpJ(bPpe#WgLMFrV(C7fH|VRK!N=zhO$N!|1xC>XD2!3(=1lz4@t!oFvXJs@d@fk zNEi*Az?37x$qSq^ws@s}*m!eP;=8Z+yXp7AA44PdM;yRT8l5CQ2$UTItdd>09~3Hh zmX>CXXcgesonfMe=%(wMw^n33R5VLYovcbJj6b0Z7^`2gwln4gyiK8}@s=9}r^5ir zukRyaOBe6F%eP8iz?W@2Mmpn!aA1B0lcZb*kVS#XKi69*hyG=rLn%200z&3mDBizq z-+Nh4aU9O4y6G#1HGv=KJVmug)I~u?CV2x;y#n2_g%s(-ehas1FTUei{j<-3BmN%a zDR!BfIT9%M8GY)w3wi({m|JNujY=36VdqG5se&xOSnPV#Uqp7j@6*r+Ral2rtS3Q!# z2A}tqpo!)7Km)o2^illn0NidzKCUyGTpwFK2AJJjGXC}XY?M|S#Jjf=e4IHTzH_O2 z&Pszc5o4~XJ@zOH2?LXcf_EwZV@1QJVDUlIr{T%SSd^3 z$p%D$EJ71tNO~7Pg!+18y9HFh$^U>gpfGrBb4iX0v=rBHfFFe`{=YK9DUe$~e~`0M z)QT3$g%9ET%BJoA>1BV$Pn($|Xjk zdIwfHw%y@g)nnfk#y)gH)96S`0DuJh2J@5p%WluXr)X{!ApOXSsRJ9WL|3FFSzGks z-TI>X0$Vb}O6HPglpbwf&tD?dAibjeWLLMZPi~-ZdhO(_#&iydhOYK7mfo-&_6xu{cqA!ki5xAg_xAmM{!OE$IuG5V5weHUIKG=_;(5q@v#Jdq>P5;mwAZG{IgUZYjyH6MMVF537g zGKMLmq_4MuD2FQ0SfMPRnO--#^iiRaF{K{y0L;8%nEIcL4Fq((&V>ipd4-FU^W6#0 zqCGR<Ur#LNy-?k8jAfR!|lIX#=fX^k##Hw1*{CPO)ZgB zN_6EYQN))Sf5znp$X%G-3Eh`N_ZXk0n2-6fdW5 zl}=nOaB$mgTlrfUXZH>tghaLibDpk&h7YCg6(?(8;7)lCwGp1w7TR9ALdq^1iA(ha_5gzNXQ zO-W!Ieqs?uyu4yQpF{hxm(3$d?szoBGOcFeF3`p4RbV$%T^bDXMK7J`L(OTA5e&|^ z$gaZG6rAYL`!5*m_Ic<(3#U_kz%c{A+f0FEMApzD0SzIio?<`2F+BDb7O?Tj&j5*THLz>nUHi%?WMca^eWJ9c^FF!$<2t%nq{=Jio zHltsJn-4l>p74gzYa1OY;9HbQjV+!8!=~w|^^zA?J%-;9Zx5X^EbrlG&z4A`QmsAZxeF!mDLY6E51BmV4R z-U7--0*Bc=L$?2HubUn?SuYY_oXb(ypw%N8!v=R?XF^tyi{CniVSji1N-Tx)8xy=Ei@U)qk5S6Lku=Po?%OlZ z0f1}y!>Wr0@A%X7rJIeKo0G-fQ~z!^C=ix~bW-G{E>}33sKQ_2FuOaJebpHDrZNOu z7F2u!5;M`W69tl;6E4P0V)OFYG~=$A!`Hb|_n_WFO^J@#+ck)E@vF@$D4v5)56hFY zKr1FPrsO|5dVafeCr?j`pX_oiYJKL26@+gkRBA&k3@ZX5Vp}uz^-cP6@LB`ePWi7R zSY=4UMV=fYbsFXmJCA4lO)V!wxO+GqgL9a>fR@%fns{t2UQK@H&oT<40uR^YZ zUh;NEFG_30yb@iW{Y{uN4~lIKBTm(D_M)l9_3cX+S?ut6^3-trwu~#NRnY3nb-Q~0@xKFY4<9HgMgReA=X_#Pz zN&pu15pZ1v-uiEnk6#ZZ2{b#N*+vYL+hkn1JVwM`jP+!9-x z{-8(BoafOatdVJ}{vLC23BG5F1#eXIkub!|FuBT{VtZr179wm3ebKN=B94BS*{4Yz ztI(E?OjJB`2By0cs6Qe6!+zX{L-u`Th5aINu?<|1nuxq^OxFBhNS*mICR#jTc4?HlF-^sYx=_D&~^} zw5a@H*;<`!J}c^*7u}Wvw@tj10$6D&)9s&IcE$cYJTzwQ1&HD@dD+biv|(h+vo0Ln zD*ggnKhlltyF&GOEv{eVcBuX?YKa>tie7bch|{M?s2~zPF$%6S`}KA=wiZiNJY${nr^K1T;qQ+L9{9HJ~2SQwLy>brV5G|)jii`cwnClT#Wdx zZdP}YEHWx?u(eLQK8J1wwb}ZRmoK6djbZIgwFb%Kyf6;2dTj?j0uZ@tIeH zk!Z@PdWXc2y%`%g4TL!_ikzN&9!mml!;eN)Oq-5^#H24?0D6O6i!mwL*5y#u4~r0^zM ze{xlH3QfNZjKHIQbjw6H0(sVMn1ojs5xe3H+`F@?mb(>Pn;lHX=$KRq5Km|9V`ZI;l8xaJVxY zxKAt|!%Gv0Nxg$|OLE_svW7TjWF*d~vF?yM!5i2;t8wCf&ADHQ)W{y>e#-2JI%y9G zlbBO1LH!btqG3BOC!)GCgdVB;bdo3y;zBl=NU${h|B>+j_65hl9z@5c&dj^_J(KTQ&;&gh*@ zRUEaX#fKQ`sS#`CMxTc|DJ>?`s#Z%q{?r+L3S~wEQI?tXa1}6TuOPd8dKNaI#n^zM z!OEll(RiltZJOUi%V?9Y1(L}V{J$fKo6f@Mo_V)2vMCk?x5Z_%W^#Scg$9I<5<-Gw4|0Y2Z^Hy}c>?FBKD7mVt4 z%Q3?~$mczm&_Lw`SUVP7isvKfE#fjxj`GjH-L(sl#_vk3!sM?S07nS-J3d&k?$Zb_ zX#q&-{&cU^7k#=LG*vxl@M@c-I0)zE**gtFLZYE_uO^yiRdm24q+4c`Z+>T~5+T!$`%qRTUL=P^i*R_xocoY{V56d}_Vghsy&Zc-~_L zq79AEQQ>Rj$J~A*%@Z1-B$qf!Hb#-kvhE0;m&mbNzmsulhDOybUslP5gi=7ei`PP7 zM61PIe8U}M*v(7-Dr;s->*F z?GFi)NRn>vPVYj|6}VMdw2n&)M;@E#7!L`91y{I%I>A3sWkxKUhkN;5!P>G;&+2*{ zTA_5#bRc^;iR$Y1Ho=!ucm(urVy}bDB|axTUAwaz-JB@8R;(dCg$fZh-8AN5m>5OJX zSI?nFX-f<^ZH6AvLfh$0sYtgRgknB}WE53dfOt5I!M3LrN8V?ba6vXfhIvapTNoLk z)JnhgkRIVx^|QD2PLmLfGt&mj;UKF*Ed2cI$QS--UzWCZ_OgX2{Sl$Pia_)8Xub)D z5>CLho_xrYBK3^kwNI9}+?thvPTkGVe(t#I2?>J<4R%%#)&kdh>G1{!gu@3qsXP|W zDTp~Vfbcf3!`+G*a)Z!6`Gw%x!W)=e*Tze_=6*rU^0nZFJRy+f%qO-CXY7>$^TmOLu?+}SNDlm3XOyr!gY4TiF z#*gtKRS%?ZGqg#ZPcd+nQ!|b`=Gml$EX2~+A&iPB@6C~0EjEAOY#9Z0=h^@pbO&BF zV<2zr>TC3nJk3p~Dj=Gq>}JXZp(JK)-JbbI?g ztgB6lK~IGFP7%JVL-`L$Ed>8F zy;M&IvYVj{Aw23DQFi#iZP@7nIlWlk=S^?!Bz`@kp$y=>j(jLl&idaGCZk!*08ZOf z8vgkJ`*9y-E3@dL{zrL4mOL`?u$MAE-AA`W^P=w7qv?pE6W_obUBV_C5|n^C8JPi-W$tf4ivm zEoD>+h1gU?7(Ml49!V$tmNL1daPegcUmbR}D`gu(T)`(z z0s|53pLYAk^Cj>)=<(sQKhun)%&$eq$q=%sqoUH1wg&)yS{$_b_Ma6xEIiFRglPhZ zG=qRszDgsG#8M(rQeK1L@MBjaACs;vnG2eYLU<%Oph{jdhONI4`6V@@e8lI@x$Q|H z65eQ`)Kj{%u$D_qE$Fcb08d#%qedzET?(+&Bg@lOE(0gQ;k;in^9*2P+30qZ6W`8h zlm&guv~EMnqvem}!uB00!S5zR@Fq}~=Efj&lpMiB{b|4{hFc7Bd6bV0e0!kd%S_TE zxQlsiwdLgQPR=!kBuwX6HT0ro;k9COoM}Nu7#v@KSW4cOE3FUkK?ilt-=C zsck?6!5!Cf%6Z5UB zI5JyFV%mI-4)zF@h$CX`>%Z4q0!k{h<2aNvmjLc&Y*xHUTeM)HdC<@r2OOuxlx=~} zTf}JY2&QH#MM(7x2cal3W*Cq*UN_=0c@V9~k zF~y|;V@F3Y-@f^<%Y6ieep)!Nxr#rG9yg5lDo29Ud0y(PeoN42pO)F0Q48cA;Q^4@ z@gBOSg&5;3yH5p|JR~-3lUN5ciqQ}wv86i`Qf(F~x2WS=q(dfUXQQl7kV0N8)OU-B z`CLyQ4VLxb-O$-hIosfqLEFbqGNQ}oB*2nyM-D#P;f*j_S;I~ZPDO{8%3l_>V#@3J zuRK;)k!br(1BRK;k^XC|s)adzjJg2q8Z;cls@)8HBSb2tH`&SY#{mk+thfft^J)s* zkZf%AJ83*!OE#h=c^s=u)5dhsyhyf-usA?sEm6@x(lG3{*fnX*oNJ|#UgJ)W47v9$ z^*J9%w%|nrDcUdkWnxrFF!gkvT&Fcrq9rZN>9AFETBJ&u5u*V2?`EXIJgY1Gj|E>B zMOe^I1kR5jLmv?v3REi`n&n}U{dn%cb+ezBj zP8aAMf6iVn6J%QrDAfllp|$U8j_DFZ8jarnmgp^G1r%;kFNS74UQT<$a5e1#jD z(x<`kFR+zeHEjI0f^jl1?Qlk=4YKb8(sVrFb(9%!9YR%6MHpusPaU-DW8X;YyaMhE zK6Q;Bx4s^_bG7H8QjD2E8{q2I2`N?Z{sf=$D^VkIrVTNuwKbWC`K_s%H4>QDMhDN^ z7#RW;ADkhx9xRU`k#+k-TJekJMq}GtfV;ZuYsjl3f=5}53mb?S6y<5n9)}3O7gK#z zeCd?6J*Jgo;x$NBfNP&g8*Vv1XminE`3^q#v-;B)N>-R;VKE9S#0Hjk9@Do&J@Uim!ix~}Rx11qX`X?9=^*{;bWVJ12qEs_ zCoGCahN;RVaMW$wi@T6FH2%RQgcpte*3HlFc30k@oi5EPpS6X+(_9V|vmjo%y+t<( zV8~n#o{Vt0+S{%f_RK$A?q-;NcmPo-33*GkZx%8zAJjn9(!GllMa<^-t@R$};Q`84 zt+|k2b~FYhK)Oo9seSn(i)xB6+k`o+8d-W1TV&L2OWT8HL*ZSWwq??af+9~Z`}|sV zcA9%Af8<0Rx%0}~=G=JA`jg(O4efb$v92lj3QHGX30?NEU|Qy0oIm8ln)jrosev_U z9k6MMAO&~PyG#>q+iTH?<;QKGtbY#82)5h03WH9F7Op{A^lgw@;}xS&EOzKGB^$rt z6qdZ5qMLwfD!j)x{fk&EDR`84Hxj#++`gfb&8wB}!)$ba@uas{>?SZLPHR%29=ips zhDWLa+9tr``tF>5C)jh47uY(5z&wl*;*>~mYXEc-rRZmT!g8g{V0k;5P*( zeXz<+ls}QrZ1Ew8nB6^I8SycCYel1=kL)I%QU{vSgD_Sdu4w(*?Rb&DbdAf;q+W6E z28+8m3@W~-8S;E(R!#+Ih99r7^jD1Xv*RoQAXoYHm8{J8bsiCesa3)H`z&cCh+{~E zuD|)N{>X$V{=J66`H0V|ferI$4lD?tm67&`dIosB4s=GVTlih|ru(DIZ=eT9a7+@D^CYcJ~l;vm)sAGh#;;6LuQh9s??XWWRxEz=}L$~GxZOiNF#ggpNA$6lJ z7m7_ZU4UPAO)bgse1Aa~y~{DbjH}~#3u>aBw!FuKkbYcC-2KzQbpyMpEkL<0$Vq`{xP8f z%ygQ_8Dn$jCQn^H5){|EfE5T`OAv4)Dc?)UDO67mONvUeR0BZn(*1Hz*?Xq$gnc{d zDMYrs1b3b~7^=R}08a9V$s}cc)ESCRoghiBI9)`n8D-=L(~hp)WCqqKKcG*Fh#e$f z7`_w_xo95bE${-mWfHiWYU;H+c83_wLtzd4(Z3}cIOUXEQPfUqZ_2N90~^CF=(Hf7 zXzA&YrA5qFS|Sr}%IJZBeq2eFoJz~9*QCoQp5PN4o``|Jbjl_9Q;k7(<93B zJr+!}>@di85(UXFZ3&MiGTKjI@0C`8;Pmc5hnjpL&kFRB77Ksr`IuBUv}xrMY$k2^ z)M*2#z{}G2LWl2oiKt&=75)$=T4iTfKpxF^58KmWEQ3sD220UPIftfksNAj#34;A@ z7Dk{^LVLuDR*lLF=ZcMmgvIh4}J5nN}gBl(RREm~dP{}J&-wrX zucK7=WWNQAl+Enm^==GxetR}^NEqmBvwL}^z%FMv`rZxsosi3vv2+CYDYv()&L0BJ z?D$rey68mcMPJKhWwYiNVG`Hwu%W@!6Y)a)#Lm~CE1FCJh;~13^(Y$=3A{q zc&E{cXO0!qKz|^r0MQ!T?aMr#dmmmG*AhdK}Z-~C49n$TI;sxV791YML z9}dV^Nx8CAKsz1p?AEL|u>)&l44Q$cGSxKFE)%_{fOjkJ0LIO=)@otmwf>w8SgeT| zp=%C=jUXm;gx@ADeRghis=PMqHnxqTIG*z~I4F67iKGS;X{KV}pja^XL^NbRk0)WA zcAnzv-D^zlEb=^DWuv3nZ&?cjSyoEkg_d3eI<%w_$K4C-!5cRHT643m9MqAZk&4eY z8(l&lQv_v44M?IA`YtNqMX681io^wADAX*JkW9bwwXO@TIjYuZsB?Xt)M!(sDQf^ z99*~;+u-dQH5GbtLPwZA$0&tNbgn!<;GKnjU2DF(6kXq0;3kiG`tVb z`);6ZA9*%DT5r6{R=!^4XFgn^m@*A*{b7SJ8;`ss6{1@#B+&>*=cS zum{Afpq~O)(O_C|FIpK(oGrmDFf~jDEg@D3GIM+Pg+5dn=o5su&oLoLqgG*-$w%kC zD1N_sy1ssYNcbdP>G6a!+`zra80jJ?kWf7)Lulmu?p48vqWYjBUs z<6aaVA-`s~T&TZ?xH-}%ljLzMpgHuUn}+jHpCD>ZFm(qlsj%h0bEnPhMdlvT@jR}F z|6?hsYHx#^%oP_qJ#be;GNgUt{<66EYzJSoXCYr=a^p|UOI^b`ElToUkdd#{kK3P3 zzsrbCsxduc5YIEKwzSAvA&K|Z;6$~wzglS8SBfNzf)b;rS^^>(A7Ch57DuSu$yKuk zA#7SxSU$zlqItEVWV5F!5`X5F=b8&{oov5OG`*Bz+1rYMnxbkb10R-&?R9t6v% zRg|9w6YO8VHWi*dQ3$V%|8V{1ka??=jQA>eq%K*lz-X*Oexaqg$ilP|$K@9HAHB&Y`x1W}{*L(d z>}GX(+o5)eQqAy+9)Dz3k*1ChZ-AhwRZMZj7W-%svNo7a8Vb zM3ZVu`yi;wTRhBW>C|H}Xm{R6$tCsv`x%In3D6>>1%cR;g3*U(LjTf;uRu#dPj!hvaJ=(+V4fuzqaMLrmp+yVRw zC3dmHoY%wJE|M|_T#d+>zWaYd+nE(gsMLHMtESXx1u_oBy0>q~)&HpUfeu3+0ivqS!>|j3aQX@F{ zxWKRzZExE;jylDa{sX$PSDP>WkzI^0xBb&l=<1H&i?4DFPZ(N;T;P+3}`nd;q)RTfVtvrHBm79R9ldv%TP8e;$K;J+2H^Mq+>CjwU z_}(O!)l(&=1CC?!L>C<|#!`miOhq{KREqrS0vlk&KO?iWiz zN_r5prZ7WFRy(BVEuXX?V`!r-DvIufhlO|u*W5+-ddYdj5&#WbRotbwBGrW4my*3a zW^!!6i;F8Ew>xus^{sAWCGQPlT1VJTWW+1nx zgj(VrnjhL!DS00o$}d{%9A*Y&-sD|e&52Jr&(&A} z8BFp0%yfI(Iw0RXEk8E_aFxcY`N@pYDKVfLZma5_luO2!*1`C)_K^d=1R*YsQccM@ z53UAKzaZ3`(_w6nHbAH1c%Le+ofEWY#?H(K0~5$0;;@hAM0VG*ix1#U1?F#sU(! ztrF|5T3f~1TsNyqKj6ZHOy+SM)cF_S5TJV)BWa67_S6aNDo<><&JstnI~m~QT6m1$ zkoDuV`U;-8;H|dvtY%bd*VbA5dAUx)v16t8zn8tL+^e$y@l^32v9s79TT}q!C+@BKKL^-GOOs7?hvA4Xm>wV>uJXUvVW^`q~EVH`U@RsCyu87)e*b zc{cdSQS>6Pshw_X{gg7W(E#8BdVDoPvUC)Zi$w|=;!#a0cwGktw^l=%XLl|XNpGDO zXj4c00Jaw0-;g9@l2eJRMhG`^`Pxh*IGWl!MF2?~r}MFn3Q;J+MkyvHG2jRj?Sjof zI4K>D2P4B~+uE1kM+v;2qWYlq-*qj|v?cRclE+QLGYI2-)gp)Ijl$<^l1ASsBWQ8* z;@4oq{7u^8FKW{hjnu3@B|pl4bGc_6jln%_ManfPR$Aa&ryzibNTjzS@6+DBnxF#Y zp7DPvZ$vda4{V%dHL1Na^iY=szCt(70Yxpj;(*en$WV)A+qdgu>P2|l2vY1t7Ocog z3H>Eoxv(cH7JFEZnfILJ9#~dAWiuLi%3H>+Sm>iRxQUy}D=(`nkHAcB?CC)fwFf4> zRg2`+Uh#F*do>tThh7itt+jh=Bth18npOO!RFyCq{ZfbolQX&?jDxP5-_#n zPB;)L-azj-AFx9;e}QcBK&fn&>pDXx<06vNKx{}lp>)wfP$)~#Rd2@IEPvZRS= z#2)w)=deFViY6pn;#xIof8!j9S_r^B+GAGU|49~aIbZmTu0_PRcL`?wjRqCznGuY;f#5J z-dv87?z@go24O0*S#{*e&FCo2)au$IEBcg`ao_dKEr#^}xl~G-*)m*J^BBBIv(^Uq zWUH+7f@@~IV$GnvGJydM>ugG{P1HqtI$99YOhDrQZ9k&bY|nw{1we>Wnh#)b0R$Kyg4 zZwe@JutBN`w+`k8AEIu{%u zJvI)hbV!Osa#*mB4I<_F@t{2b)#oabjE`3j%7DZMN9IGWiUZ9iK~+vrk<`%!!{xx5 z+8jz&3w#~q@Oydjlc`50-ZyXV(iXmenA!lhzEMo%z$z;%ob93w<>)oW+g-fqjjAV% zcG1VfUdg>Nd}bz}FO)E8ejJMNm0=0#;rFFG9T+MTD!^{AAqII8Gw=K2@=VpM|UVVJR^Ksn2CS)73$CJVCFm!cfgOK zfmG7X!Z}}J2%z;nZp?~K&Rgo10Is(#u%m|})wnE&=!UoFQLdL>DIM-~^^ty*Tkb*| z*7>2+oueEcu{?}WbZ%AW6{Gwg!;5U=zd|@3B=g4Sb`h}d<>N`Nk$A1`ke&dCJeGdA z@gi4CjJ^|FYX14Yb*0*4y=DS3Sx${g-G5UdLb2kGbq@7Zxn*J}(C$N%e(!hc#GJ|1 zluTcvlniYRYVsJl9BW8tu!gBv1lwV13;N9N2xwiV=M4ENuTj6O_f@>!)`@o$rVlwi z@^R6*>hzeIKz{@RlycDzav!)}#{6Ng9)pyDgSkieFCVjZJ3v3Wxh8CQP`XKDxM!e- zR@!&<5FwgoUltaJHw)5rzFX8X;Qghbkmz1yIxxu_7{H=br@dwwrPx}c59L5a=hLNJ zlWwM1HP>Y)!Zs@}P5W=fl|DUhch{9WVh@vbnQciCs& z^O(#-Js%iLU7N+1^)>l7AASKJNeWgoOH}Aa*|^bIj0oIRN1`qYcU-|>m`nG(wfTPd z*GQG=L7DjJH_~yX%54hlSf%iNQ1_?oN%fllT`ZB{IoNl>CUKni0J z2mz3F`uK#RK~EexyWQ&u%teE zkp6wgE}Xlar*iC$IygeWxHG^^Bd-w5ks(oBo~3=eF+{@20}u(6LjJ5z|GWkJ;{j)?s(|Es7Cqtm%7LIpDI?jEBD`^N$BtWDz8-PFK`(i0YI1@eUJ1vl$TCA3@>B#lklAwZEWzslIc_ z3c-K%GiFv%#Ra==j0)w~FqHSz3QXiSv-y6pM%LIMeG~ysxpi$dzdHRzIYk8NUv= zZ&M@ezDoa1400$aIQ53ifmW2&`OKHUTNv0DxK%Dx|g-DsBF8V<#Tl`lxW1_RP3*7%cN z(|g<$dXf!PUA0~ZZoG)k??tpit@&qx&yTwq^zX%Cn$}2G7Nq~%AD!DdMciMT$xnSBDgomdWe*q zG!CrrPNA3(qS;p9Ep!c#CH49fnQ z8Ii4l3vVExkBqEc=Ly!SIwh`*E6eQ&0AFp!RS*F-zOoM@@jGa~rMK|N#0Oe)+F$qQ z9CB_=qojeX0a{Cci*}zh`m?wGyzu69+ayHz1p4dt7#SK&M8SScy1cwgY;inJXeD?i zr}-)?i~4bYV2G#(Zd7J*TvG5jHEKivGn)r$=k<`@fy7Q9@9x1$Q2*>P6SUkH3d`{p zH9+w_v?}=T@&}h{_{oN;21kN!k)`N0eiH0|zwloi1bL`I(-CUL_R&~KXMs3ME;13r zNrQ!uSG&HVJ$NtysC#C9AVk*n+`V|eO`y*F*`iR4-<#8b^cd+EQpksWp;n*l8@eg! zTpQ+$3&ldW?ZL>KD5D|_r{#$VTfU+cHTxd-0O8>ucxN!fwMTU?)iElO0PIx#Gm}I$ zFd>eedD2!8rc1XA;HV?Oa$i?qa9T1clo1n2rXa&@oviOx*po@(@MMUJdLFS?$&-rI zJLAOG?_>#Rz71-Hh)QlVxyc}Hrhdlb9fM};FKb0E6Cl0z*&$Y<6K5Ft zF#ec0Rf~1zs5YTE%_QUr@dRqEa2}0BwqIEn@g~^X9QZpXI}n6Sk#Hl_0Fy{tj{LZGgfte|nPuS;PZvnWD$ zE$yE)vv_&HjZnUfQ^b7EAlJRp7l#+kRtRBDPN{tjVtc*N4Gc%hChZAZ;^bTE&XfDw zYj0ns)zRLo7c6q*)37So2du#qkn^l5eY@*&tag68uA!py8yEUJE}XLYdnJf5x`N^& z(dSEd!!{@xhtK9KveXPy%x>G`2G;@5FCkVgXX$9&mo;Rf>YT;?^B!Fv`YI5~2KV@Z zdp~^b@Sq*w0SKy|W(bpgZ)s^SsNL}Bdg9dt5RE8r@aJJzxOTBgf93t^biTOpmXleW zj>y@bPLy*E${Ejd;5w~;r#r;?w;!;=-v)ELD&%c_G>qUU4#No~1EhXt7KsWu5IIU) z&S2zj7#6-rAh_b@t5H8xT841rF`)Jnph>J&`#P%enG&wkr*RMX1;GSL!I-40h1qd<OlK`Z-KpR#VVdal&<6U_P-TVg6hg2q*jrLp zL*etZpR46?hLyRW^=Cz=b|I?N{R43kFN*!ksm@3?wmF~)jsn3XAc;%5tmgTx{0X^xNjytbX%DW{J6Ub{*5NKdDj2()^Rg*T zV1FEgOrjheG-Pf-@h$W_c*}mIL*tOaKDCJU@fI=b;Zja{9Mdk8aWhlj)f(`;+Fd?YoC3ci{jA{PJiEpdMtFf<^E}@q*IWCKqD<|Y@ z6Upy8St()n`fx$q<|)$keE;y#Uu!UriFB#<>;6k*g_E_vfN*SqPq&-BU%O2YT$UOu zHxdk|nP0NyfNKlh&~q@&Cz_g7;~y<@h7Kkny@mngte%&GWcr&) zAr6?xkY(2{Ct)ZJqlu+pcLX{!la3hGgO)HURS3$Ql#%g^|*L zEj3-vz>He*YNbkmdy(gWgG7Kz0;(_TVRb`OiUq4l+v)$w^JEnH3h%d&wQ*fHGLv#a z5r}<*1#xXlNe5_T`T?bqksXX4FNQ!9^PV>E7YiWwl4>|pYkR|j_sOpUhfG>M{1B~JAE@g=W)@G$x$X2S_vj));^4&2Z19?Wee~=Sgi~I>sv>8m2nysfClS& zq>sA&EvvH$-tSYi(?n zvIlBLt@3&|x&`yOfdPk{E?Yerk@(f}xOXK=0A|S`wl*@_$m$w_tvT7VoERw_GLdZ% zFH;1$F?WqM%`i4oJkkP6&aKl!K)h>MA0k$uO$Zln__^_8GI*#_UF5WU0)=+U72ZvR zy(!Og=c}3zA?@hI7{^+I;hz{)1%l>W2kusxtBi_nyOOcb>VA8M-k30N`|xBsD)i}f z#rJQ74}VqslTzI zYlWUWHnGv?*)n1?B)~(#Qq_rD19B2Ux{-=%OSBEDEsjBOF(2jCTE)^CAbEbf6|XL?svDROSt8T_%^q{fjD&6?@0aBo?p7yM}clfODu-l{ULocQ_)Vq|eihAftgF#&OVb#NeQnfhwn?hWsmHdq4UPoa3 zTb>L+YB)3_4r4bqoVJeI7$KUC_N|(cU+ox;?gQ&Q;a{4<$$}ReffedF3nmKRZnDcg ztkMRp1A5_oAEz`5cluO38_N-u34aUV!(xUw<2fEXH?EHkSnR(}p_=;I;NQUz(apo6 zp*Axt#wF^Z^r)HUi22gUYDVKP48!zqg)#r*pmk>!b$prudSgg40UcTR0Tfc_4fv*u z>6De;Ld2()CIjqeh84 ztDcQGW~OvURyMW`FO%qchTvMDNX)j+*>1PW5|0NOxd_{Qk(pDN-pD zA?GL*QbLq6)a3$ZXdZu;T!0o|6XaQ&H^@(I)VMuFZbom?Ie=ugL zJjwt)j*Z-j^Y6#Se-WXtsMscSq6z-Xc=jeft#Ktgr@XcMxi}c(3sV_!fTrVPS8O$> z3GxRr%QVE7W}e)E%8VVuT&4AIzd~qE@Qg~*62s;|Jbf{>wSq5&bbx$pc9*C0h#&{D zcE#w%iMT>P>GFl6xSK^xTWAj1b#MDV#{1V;%uzKTTOq&Nia&k&?%TjC1G>;8t|@RnoGzg$INpI!@1R9t@R> zl#<%pWz0ngT%HUjcL zyUM-CKHf&!$A#YFmG1v0aKE!J(YW{XH$HSYR{d2z5c)4byF@Y-(1opL;=!vE&#x(W zPQz{j)5?(lX%2+RFQC$zzQ1cMjGkUg3Xl+WlVND4)86k*j4W;Nv@@&10pm78?+{fG zME&ZpWFq)Nx*BT;rQP;Xu!IhgQhRt=@|;R1Si$^g^|6-*wN_hl<50Sd>>tKo1|PPMBb54ej3*mF~f+!szluAoK( zUN9nS^V;)Rvlc*?DSr+U0d~J7tu}03TSiBLj9!F|(p>Khkt)cMZI462GrV722s=PH z;UHEh#KaG*lXA>dlC#ClkOG0%tu416^QtPJ=jnJ*9!#vU2FFLt8XNxm!|pGC_L0p6 zx@n5DX*V?)ns3Fa`Fl1+2NTB0y0yHLFT7d=da9^Z9CMYm!uUwek}ff-R#XB<$;3^%lI~ogAzR2x)V&0GR2A*D3A%tber*EjzQz+$9!To zW@#7F&Aow{yQ%WY;|{$6sgfu@))FuDDBT4{r|2s3?AbgyI5H z9u2;&&gabdzdvhdm9!Ym<@8c4De^@Ks?UerR(-upQNSOgS9|)#>r<=G1E)3g53!n% zkJc$H$F&3gVD6zpf*uvrm2`D#`inr-M|A}Qnv>3@<1D;-mh#-8c>8Ajv1v`@kzih$ zJ1~_4mQ>|Zu>WMtApBlcVwygpnA7tAB}60MC?Ih9Y4TR{wfmJ0;(2I`XKvFlop~5}G68e|+L96mU|{KO0q`SU zIN3rbCi8~GE>r5lGx+lq5FfdbH@?mz=#Q>}kot*tN_F_eH}O(Gd$7@mr%=*oZPi@j zHj*cL3LZX|L_>Z;yhjteou@&#PJOuzdSd2JfJc&9*Vg7QwLu6En}*aluYilILWTP{p)w`|3<@p z`<|5vIvMI4-m=!=#7Y?&K*peQO@l#l@^m#a63Y7xL*M(4&Y@X*V$;@z^3D0P(Os@N zrBE?=T_P97SJ*$=Jw) z-4$y-eu#xAxpCH9D!>xc_l)l{+)4&EIUlu?rfB21x+ z)Uj~hF%__Nw4YT}5Uk4KZ&tlEC7Y4zJvfJTnz0bsRQ$y{7%VBjqdi`4iOB@#hO(8* zk85Q)ABLg$jyu`{k9_|V=a9%lE&Y<{JR)IpGoSA097IXzmW&#aiZC%}VqD-1%JBg9 z=~11L*?s)|B`(D8P4VSQG8TMWbvL>F%qoT7LR6_jpZEUDrnnCoj~#4WYe5=tc9q$) zb)|r^n4rY3_#C>>go001^BSXi2V%=u%3P6(4-{#vd-cuwc1tn_^j{%zVw>0K^2L2z z@?WeH*(m5c4mqFWvkg^`wN>7y=bVP8L0R=Kgs7NFmkqicNV*_SWA^vDz&*cJMn^N9 z=hbvo*agvmGP$>9p;b(#Q#nWaG>}B_rOW37nV{!Ab^2n#m}Nsl>7X2`Kav<~@fZOO z061w@kb|%c9nI#E(RuKu@`?@Z^#c);bDFUevwaePpV~Y;kqa>uv^FLjx{BDvp>lU; z3lkUpC)MNj(w$kLyJhnCJ+l?(S@$`=-Sye&!s~HC2CH?8!ao(eC_6(J+=XFOnBa)* zU62kG+E^em{P{EID;Sy&@Mksto>{&fv?1bGlZ8>@Vua*%oMEtO9^$_*2V3jJ>rrDc ztBOto((qrob;zD~s8NKcG$eA5x69wWnU5A&H6Ck1As{!{HQ=MH0rvD(L`a6V*F=xU z-qd;Ds*-9#+6IGHU0ha(8mn?sfX0o$*NJyg2w#Blj-zBurdKVZc3{%_bDa3-^8TAE z(K|;NM!5$e8W=iyg_6j*_zB^{eb3z=^O8+EkdIyp!&M(rTuTM*PYj1$CRlXf)Iaa& z9NDe{UMD28Yt501x-Ygca&=s+1N3rnNPzj99c*S$pLXu*rXBP(CsBcdd;FvJqC{?9 z<3r>qW<9#yE{T@^WFX%{{2xz+4gQ1QMu9*k%qsu6vQG)Yn8y3`<-KE zhe(cC&Q_Z!BUt}MSBpD(uFT#RTYw8LG9k@qJi16blL(IpDtbgZ>Xwqc#N6`WSw-kC zzf1}TJYOcPi8JeH55uuf`!Y{7hodXS?M`V`FYONZ(iwb4{)fxUQ<$;0swu4jE288k z^|PoOAVeAM_0fEsB=nZ{Mru)XXN2JH@^l)+p=c?M2*&C zG1YvzX*R)h=1j`OBVph^v-%TbD$TCS2=4c2f~8uDqZPT>?13-w&e+lGugdf=XPkM5 zLtgP@Cb6-5KUwiX@joFBrwCFp$|03e`@`tXyGC3XFdfRr) z`!)wM=cci#HT{|C;BGp#ZZZhcU}j|qas}fy0*>(RHOChRnri!Rbka`mwY+=8{%37S z(xYpY(Eq^U!h5Ht463sjS2G#}Hke+H7j{#%i61wbA*dUXT!A;6su`^LDyQNK31e-` zOlT20McjQ^tO{78Lkxvvjv>6&{C`>;>}6zclSr&4vTRMbmf=i)1KsYQ!3eb-B7S@5 zrkBsm4)smocn!v5UNQ04zs$|8)IHH2k-A#ruSQK2%*1~hoq(yKlWPX{7K%ZS7v5e~ zu+9@{#;`k6nrbjIz_FK_vDP_0BP42kKGmWKr2TintZ3%+#P@e}sw=BR}kPR(Gic zYN-p_iKI|G6mwrI_Y8V#@;63aCu82K1+;f8?KbgL_7a!YyRpO=jjNb!&TX?)(vjrw=xIK9KCKWKT+xFKh z!pa8`Z=iD6n}}A=guvWmwus{ z+3tQT_mb}VIc_9UDnPFQvt5e2gs;ulk461j3Cfv*aT+9m5J9;(dcg1mp zK%{9ZA*P(Mm6P#nQ-!?}Go}{N>ik}~rTd%}5EkM@T$Nw%uj1%LMZx&U+n#;4yd$&G zub`SkMX`k?3WuS7v8)cgCeC6}z0r&)1D4$ee&t#0V-S?Q`x~nLUBfxOAbJ)qkiU@b zz+x2L6mmbW)PAQVF0>k<7*fItBW3gYn9c`@f39`DRcb2%{I{rx-1{ls92AdiUv(8e z{D#U~ZW%;HYPByV3Fyzmi5a9|sW~nvJQd!9KAS2EnK!}DpwE-za$+7P%mdXkMa<$w z(X^DqSD_zSI>86eizJNECjTP2hQaPf8wXrCKdL@*xa2wHqyBgQusNu)TTYs%cLNPY zsVKh>#!aVCMdulhgOnDh>!>a#h$9J17(MIEZMDEVo%qR+Bq<$MS0}m2AQ&yjIP<&D z)#bf~A9MDIVcO!4Cf)Li%Z7r&irCO#X+SIg4GL`gwL@J^}CZhYY zaQH?26 z<-Ani5*s%ackZVFG7&&A@m?SF{42k>))p~aHpquZ!m3GOmpY;;GK1w{H=7tg%j+IR zvxoz_{G18<>fGM1W?w#s$KIN@y!OI*MqYqB{_&7XsX>%FrjKR%F{VFOq&E83T#$F| zQD=$0#)@;D8_xXrOOypA%LL#!c%Ow8w`|l9WU1b6d87G3x7TwV8^_;bsYVJAFw_iK zus?LM;&TOBwqrSR@Q#0|n#67Nki;CZz#?mLVZ`5$oQAzQFGycR(HUj9j%{)q#$t7A zXBfxpBE#^%2bB2ngQ?YO&_#JtWho+eUSuLuuu!W1Pje`QzR?$JSD!pRAape4mk3IM z=pqI_^-qnypq&r4>G`4$fKaRdE)Dy|?B!SLFFd=a1}PRTZR(G2L|DtYkcCqx(j{a^ zT}x8iCP7gU_@j$-;t?I%8vz0S4}|{1jYXY0J2n0%X&De%(Lj?sU28=(nLsE z=M@zim!q_V0!4DHzjXklERF_(N!KVAry`*`^ua{He5D-goH@29#3<%o? ztzm6n3VPm5DhPFyTu{^iFL{Mc7HKS%xNvdI8AV_Ak`(=FD;gBv047d=e5HfQ}SRkEj;N z7*fRq-hH`q72U(St0uinK`ldtRk2pY) zg{atO!wSJ_s^n{u=Rf3SC2Q5QXpC&%1(LwAzyjDJItEnUnG?|bmn!QXr5YBx!QQ(h zVvQ;IJNRKRN)W223rSbu>jujao7o#ZpiVjJduqJWGNF)%tE(dsj2@e(a+vQT6)Lq310{-lMUwFG9 zkAJBjwWy#beWVnXogaYXiyDJzH1pj5pPJ@X>`=-}E6q!PO`N7Xak z4=pd6OeR!7uu#W1%xLE8;lHP}w3+sSXyZj+iNZ)!_o}~gB_qTap5jZG7@cfk`}f@mw~kEMAeNU>%aT z36zYSXjUlLL~6v=!u@?lOvs>LH%`o-T5P57%)@4dczdthHHr@^A!IU#mtW^D;7eGT zd^3Q$$L&GP7_^uly(=E0Nnq!Pw z2<;wJhr9}>F-!}FVoRqw0GL8+iI6-S;@u@Vzy2aIA9ZV>^gM)I%MgRBqLsmig?tGC zAt2Fru>x|cm(k}kD%xi4;0H#-6y=|J4UlAf-^f5U?Ie_caZ!Gu3@diq2@IOU&Ga5_kT_ ziMnOsO|cr=2dog-2SowsmXFo+>i%I$>{IVl$jqvkvRA)6&nsYox}ikgy^oy+%k=Gn zp$XfHSsV5F!p0Ih0e2>^g~S+YFH|{6x`T{ew&{q}-YEB|d^O{C(Pm5A0yUp`zV3rM?pQw})^Y!N|% zjli*v>E>Hp4)FrPZ-_z$K|h$`aFw%*@`>jXXw^m5ybur6*GOJ?;{O&JM)?WK%reI< zd0-Lv4JMmk{z>#Xt1AUxd2KKG;UE5Sewjr0e;h?M4H#Wq{ujrwtp0kK3*B!T^r}%` z1I|oZVW4?*;iKT<1imYh&8T2urhQD}q-JHIDJcWg%H=XY%meyfo>R~X9hj6&vD{5e zO7OXbfoZk-I@e0Kp9>cri|oeAtpUSRtwY5O(iref+_#F_EUdj_P5&gu2pDM+fOWyd z6@s2gEoIm~q^Kz&=lt5}fj#lwFJpYJ-TX?F+OmvXS1UJJ7(uPa)PKLF)&k!QJ%Nxz zgW{C32JmYz8$)C99L&nQ#DLF0k17hVq}SKm9X?lCB1qVDmW5L&)qJaw8_kFbHzP;9 zu~4&F*yIi`7?2a)>>1`!WBosMx^l<~m?f&P)P&fYxWfAH3Z3lqq&;FkM8sH5=Tof# zbf71UK8A6>^(l(WUqz8AANEJZ5KGW}5Q)Uo%v;ET*48E%n{Kg$D=q(evX&mYD=VMbAdHt zw!eu4!(m86WA#41C%YCESioIqXUP9xippC#dc^Lk4L_PDUij(C<6L$ukWaMR#a z2%R@XNqNz9uOdE%uHDAGky`sj5%gparKa%cVy+3>@;?)Kko3ab2FNz--18raCsck}~T`~W~`RUlLywKmn*!W>T1 zW}1DtTlrPPQ&5MmA3}*FUORKj48lft${~}7p!k5^C&(2=^fTAMH68pDBs{iz(?m4!|IejRhS};Gx*VhS&vi(YtykwJ<$>8^ z)`r~2u+3y10t6FSBgDTONxTIZdIdwS^C8ZAT@DL=_CtFA{32=XxP^JaO1~Z^QW#(L5qL}ku;${CG2;R?Td2IwPTTFz!vNCFj**?% zt+v%;Ve!Ap;nCh11u()Dkt~Cu15np@Dv&Y}rG9iDNDAY64WH9$>GJG-{nW{lJ-j<% z4g1wy-(EP-zOE!H7J2^|V=j1|j-_>EtYY1WW{ z27pIfy|RYHfFDezkP^iE2P8p{c~?1@jR#%e9i5>My}4aRHdcPuj%@}AnRi;=j!+~Y0S@O6L%Q8C@amY%0rUq3Wx#mpCG>9Dm2F3 z|65(E2-8J+-)5oRjq0X@8&CAySbHO0=mwNX4(~nRu{*oW%_F1s{TK+O<|Jgac{;?~ zK2n`KZa!yY->i215}zeupBQJLdzQFyd<$xbBegVF)zCw ztURlk)ZpJqsA~TQa_NMXH_E;18bQ<=)DoTIvWZ;OnS6(D+@C8^+w1tW!L zanA#5XpqIiLj9K?4k~W-UlAmshg|ZJP{G)KKkxP=;)9i6ixcSwj)C|?(fYo0x2?= zZRzc~6WG|ksDh}WtV%{Wcl`x=slAHh0h<|3MBw8U$5+_@`qv9Pbvzd|d!mV?sFb(` zBe?CbUkbQ*zl(aupOV^CaYsI&@yY~krd4NN5p|@mDBbc7YAB!YACr9Q!cdjy$TTux zRTmRkw0{Z-qgiH3=E|PIpv&q>r%!=hO&Y6Kz<-w9LPJ^+%9&c~9+phSEiwc5vD8TX zO6j=A=L=xs30(EPxO84oI5fP@#>SKlWSjJK3v! zMrslH#@b;1<0v5c0m}1^P4*hE6F1CpR^4&(Wz?VF;c;=nZx$~z=bZm1fRmbZ0?eXB zzx-+ro}i-XX>FMfi9i^SSTY|s$L7a&IHmByLj3QTFLD2TlsP(h^9s?rL{%DTx6;4^{b+mU3 zC9>TVe)UG$0mv*9)-A4LjEKUH7Pf`x6GSv_WT-ed2ima{NpE_zb$6w ztZw317$XWiudla$1WWaWp4)Vsx*yZQcwDt9T(;~<>Oxl2@!i16w^l_;uH~0= zIN(Ro`bC}iPe{X3`Hm{K#p_tX^dXeM8mZ)C+KCgA#6s|VZW~XerCx)m8g0-iERTi5 zjn;k^Ooz5mVL2VfH8GC2ovuf!oUJrz~<>)+~?blP25MXxj7{rLl*iK>^T6690Ua|8T*|i=l8%ReX{< zhTD6tuvB8Gs) z{=12b^H@osqmrz9MoCiv{~2Pe_*pSahVms9l-i`@CH^X7#E@|=*!O}Gx%aNM`+9H* zBh$^Ki9Q4YGcyaw?#Nb19`HPFr!>?6*B_COL~j}yX`8=MF^1WNjBxv;?71ZHp=Fi} zJpy6x|IEy4taZG@8S!6J2?Q6I7PjIyIWH6*`l0RK*bdxhInYvMxo_DPvp0(RTG8j6 zPX6_VA1bxaxUO#thu-Q7755oAva;T%-OGdmr5VI2x>1kX8C6FQ8iK0}|ILonOn>BJ zx31}%A5E!Krn4%a=|)jDP7w0IuL>S82h*qzJ+HMUHDPfJXY;zttMh1QSPUV!d{~7% zj3ctGkGYgfrI{A?{sU=;)J4u(Rwfj?p|;oivhS1RDS4R@uf%(J zz|?JOFaBl>7~G6rKY**L*3R-bRUlUy|6OZefO-+-t-Z^XUbA)Q8@Rft+0>}_;Jb?Qs8udd zr|#F4N0c7|H8D&s34w;5t=<)Gxe^R3v~+&O?;bo?k9+NK+~A*vnYVMNhk5N^H0(sm zK);uj;j7@&7FrS(_4S{Qlor9wK9AJ`o@Jvuh98gv+Hn`p<5!L_0L*Ycq+PL+*Xr6<2c%5)$`3n(-O#E$h>*;2AC_G~vk_q@RC3s3fO&M3AuhBD zw2qdUs~VA0H?_(3LnNYDH`tzb>s#A)XmIeRBFG(yljt~Y{p29C)Qnhee~rC75d zd=f9;dWDMS%6MzXV7aJ9#r+Q#_dJk1_Spy}=*9KGc6+VariP0tt(&TjBLT7CWQcC4 zNt`G^g7fI0d}aLvf~ryE3(ucFaph`Tkpda?C@8^TR!w)>Q*BnWo$IVeo}~+Tu+7wz z=r1>8G0d)i+4Fc*q@AucGj@=kcuQ!IBT6%(n3zt`ERb6BDP_tr!? zJT@Eqj6>@_O`!Q!S1Q_Y zwj^sZR(j}`pGl2T&S~7G{^MqlP^>tmr3|`)AGk6($grC>2Yg_FRg0RUjvD3q zO(xvMDqdUFQSf;JFB_%Bz43A5$gu*Wccu}<5tSB-C%-iL+-(nPRmS8O16@!#=GEn$ zsR1#YBsormOqRpE+V9(N=d9^a;RcQZ&FY^SRoC7;uXx}kj+oV=9*Z5m0w_Ebg5Xj) z?3KKub1%+0nKUnTw}%X34z}S=*G4f`=+6M9u8Ws+gr!dzyF9XA$(#x5Tc`$l8`=fT z{sUsAWM#XfE-8u)oVxH5H}a;@1A>G*JVyFJ$%Tp2hRbzb_HHI&X(YW2S*$F{4xbl`vKP&iMHh90%+#mMJ}Db^G(I)V+{%4V@% zE5h4k9o_8V3a2$j=qni)-s6v=4LAGop?rbz(xV>fhVWv;LQ&4=+Xa1spv^`PX!ZT( z*AG$j$LYeLT=@=Wh)clu$W_#h6{p7sx_MC4HgbyGr8p9IpKknFjxCGf_ZIq93{FN8 zA#l+!M@czOneJ-oZ!y47IZ0VsS$qEH9OV$Ce*4GhEO)y#X5lYYG_Jp&!l81-RjM1q z^z6B&ap@CcIhD_yCW5{9c8ls)8UrS)yIv)Fb4yOt-~{tPz7)iK&dnU2-XeKF<9bfX zzr(5V#SQBc#F_5xM*6)Xk9{~{$8CvC=7-Ff<>i<8{Dl-8TLF5oDY3UT-Y!?-oK~gN<0rm00<)O!o1BW;jwN!4nFXuFmH8Hmjg2Lke_B(IZgxNFcJ(d2$ zjv`^XwCtSa?NSa|9i6GqJ>fqhzQ~ry7-5jyA%uFG=y3oO&;47KCjAklEfu)uFJr7* zR@?-}iVdJGFFH5 zqwff|v1Qm`78%1GxPG+@)(09KSY8%W z;hVB0v$77N$B?~onIf7F@mC2V8do&K$S{7O^oJivvt$P`Oi8z63LsV|z(jJiYzry? zRP#8AXYO2rG;XdxaCE&j8fn6)pr#l40Sp6#zREP|Tp#qmD}Kpc>@+W&au&BthCP}B zNrGRk+dha}>)6C;I>qBrfBp%ZpJcmhc6*2FRkrUB*q&AJlH4}&#XXgxVAi&=*$ z)01u$Su*k;_=ZQ_XT%p6z$ zb#_pGdM+2rdz>}%d>4+r=T`ga1~0W|Z5YBzcBsR<9>OFfQ6jo5_#v}xlRgb89Ba#V zAZ_?Y!08ZSY|T{D*?QHv@@Z?uo6qs(dq>kR3z#kn-AtOW%HCsW5m>(~%;1z6X;e3| z{{`Dod=tue72#IYpGx(N-z4r71_HE055^dqv%1E%VS5VKaY;0{6Y5hh0qcu`>IXVR zWg@KnQI|rt`!B^?8=rC`KOP#E@HYS(uY^J)I+d`ifPqmy&kcpz27SBaCHNF!cxOY) z?W0sJB9KkZRUUg~ei_@DPBc_8nLv+nd>i$*Z{VuZQRKedEhHR1z_^9y0##)T9eWc* z6B0756XCH2nwDL4bf^@WTmEY*7Zq{fyQpv`w(6Pm%x1qou@T5jau%a1S`TwtLCdfn z59uuZpi%N`af?u}{AsBT>D1^-H6OPDaz}=%G#2FQIeX;dIl-P^9$nh!#_}!jW@T_~ z<7VmbGAoN&%SJa_re=46g~rq;HHa(NM9({a8J0xOrFRC?lJl?)HAIxyC4WI+d#q}p zkx4gOI}od4@C-mS0%^4_U!?Am7RK`M^HZ|Lj@txmETcSJK&qF4NKG9n1Yp&?$r1q9 zz1WJ&Rz_&(UFOsk2-65&ELrZf3~frtsmSb-ef9f$+AXhB8F2;qLa!rXc?TgdSay(a zBTLFqj=>_1S=DN1>R3A4B(y}jRP}LON$5_Yr@J}^Ws6vkVqV2qPj{bfMZ0z(%|#5P zG1VI`*=`4=a)9QYnqvZA0IPV#?(A#bRRk7|aV^wgua?!2a4EI(S0ClhkOI=;xfdZJ z%tp0EX+GK2S9voz^SE6U|gd zC`&3w*yldX@rI-$rrA3B?}a4(OudE{=#k=Qdp|#8;EXcBhfR?AP+369J9sw|WhWes zsa5-H#1vmg1lLiqMnFRurg?Ir^((FNe7xIlTF(JfBC@)Qk;nUU?Md``2abdKH63j0 zwR)I+rwHx{IPdH<)%}l@hPa5|e*UzqHZ+~spsM@^>Aw0B;!e}@8zo) zbSbHnhT`-Po6+Mv4}|IX2l^Fv1WZ+9l`Li6Yovy%HP_Wd6BAFy^U{(djV;Nb`B9|x z&@&V$(?$PUd=;ekN;n5NuchSls!oJ4xiQvfW5whe0Fc=M!>n-56!l8oE}Pg?C6Mq` zY_Cg#ui|f^Z(%`CoWCiy^n>a94k?*+~NHhH)m@u?@%l zsP1cqDY~9*-wh9c*$!efZTl~UC&n&olG)!zJl((RmwS<&wlKW5xDwCHIfD{+5Atla z-a^*mzyI`4+E_3FP9x`Ce8hb{K6uO&_G2*)$7nW6aS07orLWz#J8axw;mv#%oV5B% zbS`&gb;0Xg&gF=5A?A=p7LvHl=f(lioNO{E<{NSnZS2)TlBeI}!+_{kd2Vx0l*i*Tt0zUd62IImMFOe+XNs$bQWvPW&s45$+p|RgyJE`}`?Ipx6(e zT>Z7a{d=ezp(wdH$+igEnh5xCsUVf7k4xZ#-W3P$ZZ5}BM{SUnM*r$8*!_G(^iHdo z-=Fn7XAUm0nk7ApAj-;{?zfXHcVa53HE7O~&zg>pYHttEIE(2yKHvKx!d?sxGT~P| zyM7>jbV~;QKbzqImaQSMy82^d&06l4X9?%mT)D^c!JbdbIZfnbCw9F%9*>!_}|%$nboS30qXfW2nBUQLbQBwRl-g|Gf^D z&TXL9-EEt!Hz30Up3zkwUoxNZ6Whmwte^}^dt&$t;^x7dx_;>O4H0oM>RqwCMHoO4v{lj@pX8yW?MyP!c52KA`B}}|9>cW_EB74-xkJB z*+?}diu1>tG6z!d0n`(Ip@+Mg_b9LFq_$Xb|Cy5=hs^=)FoE-scMt@X^Hi>!guec_c7&plW@O4k9F?ZIz#S00 zz}HW1ip1!?8yYtv?oq@+n%t)YD{zO*wy$*IYD(1AMDp9LZjUXoo8xs$ET~n=(PbXw zb_|eEP=mPIPzF@cMq%Fjbr;eLuJva}`#hfxTB#5OEbm`8VT)Yr*HU1WkE8|4$yrpn zk`pz6rASm17gX#Zk! z5u^=0xQxu$P7t6Lr&It$K4(T2Q@t=(`FaQBKRIzT7vgtl69h5L`d^Xa9dVEr`GOJv zxJ)2abhbl1gy)$Ro^1cc-Ip*rHF_KKD@{3v>G12kCUJYp!6Nis!1PBG#(P7ta?3bj zQP|5pF)b)2e7jVW{N3&?@H7LuqtkjpGY2iN3WyaQxQS{`r9GLH`pw}4pR*R|jTOK> z$9}$kU5N@Q0C^nqrZ?MibD)T`n{U9?@^zwr@I)f?$kjdp&1#D~$WDp!El%3CB!GEHrVq46q`bYvv5z~_(d(hfBhcUHi*MZ>l+^2H&`n_-o)y@nvh_%$bo<7&`#f}%Yh*k7A6o;@q0 zIVkKMDM5wZRN=Xw8`$G9IL>iZ8lMDEhXV3D#$pnGTYQbsND5t{xlJVl-RhEppHI97^yZNVr*4;k8 znN$3p;e>r2{P@h~C*tZYsXyLIT=1O{oNvL*OQ*2r@aD5V^+f&A7;U~i!I$OO4tdy@ z|MOy~w(0?K!m!g+N9FZFF040YK@2kPGGI>yiRr%4&l%_Zg9?ZWLW~Y)nrZdip`@u| zL2_2|!E|<2oBZe4@&bh}{`tKmBrPMg{mN9xYI9`eKUsmjalVb0}*Fy{?e)zT6PUR=wn-mAn4B9eNES0Y&fSM@Fupo!un6 zT-ED1^-NZBkSw5L$w!mi1m1c08%PItzN)HsE^jJ$m?=iU(8k5lBbt%arHvc^v(lZu zWGkN&&2xE5q2ELy0w>9!0MZLS8K~8OwlmT(kE+94(Q6cKi8JzDOnWaxR&dgN#C!hW z#$KhnRv%oNph&y-8|Y7#6I&O}->&kVd5|hbU{!1rtm2h(#aI_<)>ym)zc?svAvjSWR;e7_Z6;X7+9&jfZ?eH2! zYOW-?<)f1-jIW}m4#>G>2q29VZ-}ymjSBH}m^E=K85l+cuI85$S|RxTj_!Rq~%8n8|)nE%92du7Dqas4|sJi@FdkQ}tLrwLOtRSz`{VPv)rtZ1{EtMylVg0COJFxRT3+P&EW5&6{WREkXinA)V3q-XClgZ*E-(*XD#KwU-NIpKQ#bkw2 zq2wtED=IL1_x5Rty+~MuoVZ4f$M4bbs)szGG&tpV;`dG>7$Q?V?cHH$qgn zOaTlVEQ|ax&g4!AYcU{(2UuHv6Jt-lkWjE|sDTtQD>qubR!V{t+{WflIqdwrfyvX} z=hda?u}zBH+K`D&&>Y+})?+Zd)<&XCd;o=`Q}pWIt~E+ll5q23|0^0foPM94$MTDE~z6r1zJGZ98{%}`s7N9 zCV<9s8P=5CNQf_AQ~lT4Wd>|0V`g9c<|GujuzcyA9tmWQ1<`(26-M!WE8vA6e9J8J zcOoXgAj*yP!Q3m%pOHaHm`K!=hZGVVbmGtjp7lHBPxU4HUq0BJ*9DSj8ZyT8x>hT9V4OKrE)v2Mmqdu99sV-nGzP_k5Z_Z2x@Y z6ovjt`K|)Cqc8T{RcO2wW`XL)R|LwWyE?Po_=!kCufn{74nyJpkGFTPzw`-@z=hiD z6t;{HyIPLF`y{>^=Cv%6t2qZEpXxTBj2h&|F(e#vuU#zoHm%3AwIT#+`CZ8h$9Cg} zez!|0fb4>xb;pg@2TN**h%;RNa#L-5=F6rpQQH5yK$i6Q5(0QiDkeVelq_Pd@<4%; z8srozO3p&GMN>|i$kLW^S8{fSyAjk&6@TZsX!kr3SZknMfyu5zzrzI9*rO)Gmho4B z$@|y{5XlwF=7~&A7d=8;F*@*##52bD4gGU+StTQPpu0$2lbB1mv*TPgcecNqVabqN z0LizTQpQF_e&=TdwFO^nq0RWaNI2Z!|b>@`h0!T?e6OW(|inirD}X1d>zwr|d)lDwTj*NKq3V_e()_eifTcdU=`X!J?q@v} zT9o!ypB`8|;y9pvrV#1M$KPr%%+x7HT{}Na9b^9Uszh%M^FL;^q<3#e1i=#7Me=nI z{SHZmYq0ra-V7gqI;_%RpxoDodA5pFW#K`bDP{&M?eF%S`|6=XMgrBCSnv&3?yxy% z8}5}xgY+MXZd^Hd1@n$X$|TU&Eoe zNrWwjtsP~M_^Y;ruE0xYkwv5Jc^Cz?;9^C@L|g5IE*(O7obgg0ET248EIDi55G7NH6!P%-|{ zHM$pmpih*w>zIUg-&10GF54@|F1UB&jMZe}(|AXqS*E1QAl(nP%eJeMizcwcK9Kly z1~;L6Lo@Qr>$BlqE;I)Hy7qp@7BjQ$MRD+%Y@wgq*r4FZaC>k{VG#5n9jaS_SJU+-{8CRZMLEYC@F^A(2)ZZs{>`4p*f7!?&%1E^;jQ$ zxDn zmHR0}g?fpR79yvCw&#Cl4sbW#dS_e+1j87c>sW&PODm|jr0^~^cTT!`IC;XJ^gZo@M<>eCn5H;VSdX-6Dv$P z);%6TXTCXxU(Uz4hXA&Gjr*xb{$U4pOL|<+MpW4V-ZC6AF|UkRT0JXZ$WOT! zerd&Lyt6_Ew!=E)a`|#HC$^ni-g>gd<0ufoHK(rp!9-8pk8(pQ@D_Y+bf@NE%lSAF zv0ga@wg7{aqJq#56+R*@s+6O(b8~9Q{zOnaL9g^YZk$VF3rZ%B?yiS!MQm6$g}Ami zRfC>B^Kb@ouSin!jg+8y^S>frZ4<`C>->U|>>9O+`Mu`q{tR%MM6}hmIiB>Ju+f0h-4b{2~&9JC@c;sl~myxo(pN=55jc~j~ zGWv`SE}=8-g(QxgLCsMA&p|5j?)h=}q;NJ&EYw?^7iIi!+YUjeS$nt~B^nKej^in}}oc{HV_GJwV?GIK>AfQQ)F^+7=x2viUldjys zMX^`&iQ$}zp4`U;+P1^En>DLcG)w|z=6SvFROR|dh)c#ixMIXU1HSQ9#%JvS=<#%2 z1Bi|JuksBKBdj*K;edRO@P`07acog_TCARai@gY9ahyW{zRirrRJ0pbh&Xdb>t7IH z@H_05Hy54)Q;&pZqti!4J$uh0`#R*jQ2Om6I9QV`tf+D}u&l|aFNt!+FSW9@@h%8& zkL_i(Rxd?7DtpQL`&haAVGxv%vMBR$$RCWGM>sGFPb8hcLcHLBXN-Kaz<0^l{I^r# znRvZs`t$8JqN?@sOTVkvy7{=IFcT>yt3R<&RS zpM)hPg8&6=q*bK@UaqS08sMvt=y?=W07yW$zvMYCy){qz1F&b=d)ziI;r{`IZGr(Y z%yIme2i_`MCUDU~O3_F+Ra>G{_th^ImhE;w61Xir6$<=p zES$$5_}cja_cAxzZeBm02R2E8*Y#`!HCel|vKWlo1M2>KSTH*glTnCdwwlqjc#byS z!FrW9FvIDeA4X5!rd`;BU&BV$`Zw-{YzR)8hX*+$;;k5nRL4w|N1O6Y0;#=N!cA1Q zNMGZMj1S5ubWRu-3@iNkMNcl6cCn9MM4^MjOZC_}vxM7%$&UO*{3IXvgWEsWtiuy? zAG1D{;uU>42alQ|ULYdQ6yQ-(YhtS0;h0Aoi2) zp#pS>uNzkCC#}@aC3_(NrV5f1eQ5tPcbxVsf($mSfc^wfbguqh_N_iLi=znK1b_t< zV}Niu13?iIBr9kdb|_9JcF>FX3^X0kDb1+Ulo}=h2OvlQj>=6lo$ETwMI*dpRN-Yb z^b83vW1kQ&pkz<6o;KyOGd;*8m_@H*)h+F%4b|7eXT$h`H>@+DA!s&5^Nq{q5{SK&u#dS)@3O0`lm1{HrM%BFou00 zH>y|&KI#Z<5^(0oqFq!EwXZ*vXfvFFrZ5IObH*tL74K5)T)W+gz-WirgX*Gy zN9ec&4cB4~%td4jYJ2J!I4PzCWt~* z@Y^Hwu?(;J79(W06mTe#_N5cl7P~nZOQ-6?%E}lZ>SUNmis0+t6CEdM-%xu{X|@1> zb#6U0q6?@AA?z?r3q;JY*WU*fx-`N0lh8Q&ESXso%9pBpCuF97H{rb}?b}_Ogv)t~ zujSn2>X zLIqGK3+~GkIOtzCID2NpxT2ylf~Bxz+*Q*+1v+1doI)QPemv9Sow}>qbahaP&(RpR zN6)dHRqIN&8;cJ8h}W9-TT4WI&cBn#`ql7e1xau;BB?;kkb=gN@g2t!tgTU;+gdGz zjGO0(JnEx=`{S_E8rs|6m(69_g|dpU_kbEqgHwyD8|2mWd$KArdr~SJptblDS4d0lVp-rq`mDv_g zhdG_YaA!>C&63R(%XHx0FETFAffZN)`5ecb#gvxq#&b4pVEDI1G=^S#5rfVwY|SuZ zt4Gdq;hf4-pRgf0``}MlJ#v0Fd+D3{w%qP@PMa_HV>3d3rNoQFE@P#HWF>zwGj{L! zPx2iUA;lqt2L-LTcQGw*B)DWIz_Rw;VUidl zk4*GCS*n(HGI3dtfg?eTmk+$=_P9tmob~&enxY)kZZ^zQXfIsNo3}l(Ueu(WaA`(T z)-o*D1iYSOuKe4ORfQo+SM4NcvhPbjdzRVAP+lo8Tgm=B3G`2^koX>++^t;MvRX!h zMc$w0=IvZ<;?@&T)2cL!hU7qEo_N=~(5z$5Isg^+7pCidDi>88nBvK~62jmtyij6bk!5ug)zqes;k0i;vt{v3^;xScetw-E5!5(!*0UMDDujly3m7Q|I zZ{t4o8Ymv>4-{BgILb76Km}`{K^$#!lD`bcBn@muzLWXM20E;&4!#L69vVVt= zG0}iib<1K~99G2vs1ll`&}(s6?9-sN<$$nS?I#tbhT9YE6Q)x=g}r>K#WX~)o-7nb z4KAZzFv@N98wYOnKp=X|{;ZNJ7c|o@R`&0)Jpa7F63&-bIJLY;I9?%rPljlvxp=Yh zn(8juTXY9H+Pt_MOReK_h$E9p@JaEx5?0^PjY{{!P0J-GpT7IUu7YlSr3NvfqxKz^ z&ZSsgl{d4jU5cjj$i(Y0Lq%41B>COqgUfpUUI!~#4(-eM%@zzqA^v*|yRtB$$f zn6_VSDdqnhG!h>N|7_-TsVstSkaaq~XL21q{ZxGOxTCEOqt6%kV+gXY?DN`20?O)t z3rkKTe4quk$H5=ql`KzOzt_>(6p9Oo!i&*#m)ANoY5nJIREd%T%3&~}=+YU$q<)4f z_z-G=K*eG3&X6DQhwOHjMB)cEMb8hjS!ti|auCoMP!Bp-l4UK9D55htLO>e)TY-GR zcTwdj0kcOUBjz zRH%1S{I2lr!)MG9l-VhV88WcM;Imj5VfL{H_G4?bU4HE&fT;dN>*w>)wY_n zSt`7&&dCI>EINy2@9llicH$(B)(dF2be%{7>er{#4|VJe4KqfnGZn>?Qcf#e*qO=l zk`3NY!UZ=lfdRu+leSfP0dT@XoO)TnM%85{a_cHk4YWc5m!3)p{1z!OQg8s9`LbJR z*M?RYv>gZSpc1~7*Kb4QrL1dGq8&o@s)=Skx|usYMjR=!kCwkYwyy0IA>MT&7GT zuSf2z`dP`q_za58+c` zF~%n!I@+ag71TFaso6!)U=`=*s^-Q{go!%n3wy%n4tRjUHAnScC1T?r9}bY}?n+t~w806r?RIEx4mT~0>dUAkN|9`4>I5b1X-ZhQz$Xx8-=E&}5^u`k zmE0UVlIjJKD!s@{;>QZ~Z(J*c z9@(?w3&gW_qL`tTrwK+iLrtXD#Va zCXfI-ZyEQHvF(Mwnmk0#=R!OcxKqO z4O9|q#Cel+N>wUCc6%xwTwKjq33f&Avl`v9{V*ffX$+0l6JrW4j2cYWg7_)RlE=w|G~oJMF@PZv^$dSSTTcD5c$e0g z4QI39A^tnhBsVFkoQ>4rhxGWA&G!L+ng@I|`gc3PYU4Uah;`OwL=bW?yLf(;%-sem~i?9Xz&j zX}rqV@=psQmVvs}OZH$E(h3E*pSYTs77_f)YUrmT_epe+1B1I4Q$=J^W!mzH-^1b( zH~!MFA~C_>QbD+_+L+w}fzT39PVHn_R7f&BA9L!a29)agcEfGa!P>+Ujkb5q{O<-2 zu#&?{nR#5<|AoNcRuQK~uH=ipVnK~`DN@Uw(Pcm|ZQJ&9(4b%ZW|Y!A8Rfft4>2AX zV@RxS1WzDFuD%GFMUZiO8Z&hcgpXIwZ&6xAmrjnKMkmZAE{JY{e;7`uj5zm*b3kI- zybzPr@ZSHk_1v#tFlbU8#7s<*#PpAAL9jwLTG7x~?6TrEllL@Gh`WrVY3_F;=Gnw1 z^UxQ_fhdzeoXJ=t3s`f0h8u~@7yAhaXzKz}IUY}d4XR@tni{ohWc0JXVrZ8Td-$Bn zQ1=e^#tvYWCO`*$dDs?U^;hF-W*%oFNJ^#uRPon9frT#>n`>2!x*$a)?KMP-Q8pVR z6ftBmkL#~{XkbA!X~z5Xr~$=^0avj`*+VrF1skm$*0U>6A^nB0yV@gJ-VnMcI#A(I zagidufE`#%ww-lMvLs%$o2%2Rdlb$lGgoKGhz{7bI$WznkZsTdr8)W@WY?#?$c^M=v+s_3-8n3}zu&ot?f=M_8hfRT%V@oC4(fINBg8GnEBBSd*J}{@>hHd{GY;VU;V7us$6Xr<$88 z`Xhg+_SPMKdhF&k;71;~Jx78{^3Ir}uaJ86ckhi5;Stpl7dE7)I8RT6Ir&Npi4h%yw?CcR z4b65tIo0}R=0Jl&C)6hlHrIKPQAL9YaNn6z6Ut0CzdmJqhbFs}Dqo>= zoaFj5_}Ky`4jA3HR#*mJM}kHt+$mh>Zmfk68|7y4qYiA0w0F-pbq5J9RK!pn%zXE z;%b2m8RBx%i3G&|0!onc6Y0_Mb)`TUD2tf^zY!&>5lP%FJj)Q1hcZf=O61YeZUSPk z3sNyXDQM%3NpKT*-Wl$>ZDO`a**OUgBU@^iv~==bcTVWQcE-p3>JUy?;4JugN*b2~)Pn zc$KVbxq#S|%2=HJQZXD==QGQwR&^2Is2Qg#8C!sq=7lNG+XdVn`9oi1;ym*|Ew5V9M#Lw;MqZ8`#mgMYynLZ_x{%*Nnhpzx zT@NE_@ypS%2%)x6pZI35@7+kPS~n0#VK3F3&lop1xS(P;J0mOl`fI@&#!=P5DK1jH z#a%2W{@Mw`^)%t{YJZpimM?Y7u}-t@cJ+b~S`>4j#(HsDb38a;8fgT@iP5k}xz#KB zxrPgNsy(j>Somn+!9ghR4Lpom_7$;6O6_H(L^@W4=y>NqZ~Cw`jV}iw0V4!RslYjX zNzntO)xs>i*a^fPqT~2TTIYZgi&o9Y4Ub1B=^gxp0oXbhbwg{c&huP@YU+O-D!cB% zKPU0?1Ur^Rdj-vheKQL#+yZS>g1ImS=Ir{1d5Rj^^a;_W^LTex2 z#tcAy0R#*@h6JnVUA3rESo<4cO4Aa$t7Qa`6IOvx+GJ|?iUyxG?yZ6gvcn!%umKqUeM>gL)-y&mP?plz)C^NeetmzWzd|m- zE1aZ0NRqin-lKG6v&G@QnO*k99#H_LaMjr}f^ ztEaa;&9U~eKo-I=h7P2wPjir)I-+nED#tRL@xWXSC9@Iq4#2$!L?O23girJLz0juk zjZQ}iHlPv(epk?jdhDC0{&exj_ zxCI*pHA4lAcV$gXpnL_w%u9}ed&eaffXVgCm{darWrG>Lm2P-C=YQcu({DX@vH=n*!$b=G?7xV3 z&O^~ppRp%-BX>1j(o~uJ=HWvTi+vfELKs0IcpT8yu92l7N5oV*%(Pc;stpzk zdw1+nDmx8^a0UPer(Sr{==DxiJ0Z!&Dc;GAz|nSU1;koz6B>GQgC4O#!%UKHJcWm~ zzroL1ttBW+a)8C2kCg%91PQ;+d)v_*Z-@ijOIuMpO2Df`_5Z=`@h$)ZSrV}=OR#A& z#<)7;@UrS8B251lE{asn>b>m*NYUI(+s(sV*42=k@g8-NhR0Jv6r9SXFX`3KzT!iU z!({37-0cV>?~YDQyJRH~6v9U%X{+z72R0q?`1GFhk^bv^B+&lW5D?^?RJjF{S!4X$N5SZQ5}OH(c~Iigr|-oF?3lMu?N1Xq{y!ZAK8 z>L%{&y6V3R-(y*P==i}*0!P}+f#U%DY5Ykr5i(!Q8*j+urUg#b{_Oy+RoEo`Qq-)sEKRVEOe(h~|^@dOR-Q^BzUV zRnA=Hh4&uj0MmI7jw1y#1>kPt>87II)O50e6F32XJ-eX?G4WYx+73vxY=mW3Hv8HV zuwve?D}E3VMV-^f(fQKTg8l&$h6D)OUhQ#E4q8hWUhmJ^-16JA;YlPsJAmSHvXRv| z`o@f_S_Dx(48H>EQoCv5D);t}B26+q;LD!f3jUsag%TZ?Vbw&dYJ{c!^nSMYN!YM21Q)vMt| zfIvchR*(gQSSGGUtSDGKqa;Br&TFrSs=yl#4>iy=Jc5dKqGalAYk#~kLM(woK*Sxf zX|#3~k-E~Ab@y{3R(J|3o#L85I@G{Uu6{&*KiSPo7aq%p3w7k@kyQ9}44K-=B3Gnc zD&2RL^e1K#c2riJDJTm!t%4pX{_-$vo2hosr?m8MRLUHGS0bhpzUc0Zk5vd9leeC|dVs%82^pM-U z6Q!~KU@G$!?uJzOATT#(MbzJ6y`mEmn(ggUao=39wk-bxdkRMR@e|*9Xx@ z=zjH>tgpiaXJ?UAtu+Jwf#fp|RLSUi5460(XVvv3ghYr0)Km;w?NoRTyB3!ab@p@f z)Wh>0m3xTy@&KKYa%<0Mt^O%2*w`TeP}{%w9IQ=oVggaYQE6^j%N1wrx8|BpM=oM_ z%tjjfqVmnUyO7#Ovnwxf84edikI-PkaWqM3KyR_`N!6qq=T;wHo3x%gJ0LiVw<s-IcMQtF&^U4IvSbow6w>hC-TC!B1#qF?$2csZxeav1Eqb!y-E_W`K*)vFKlV9M zY9z*|ehgO!YU}xz4A3&9@J%?HAPxa(I$`}i>dc_g{@XT{N6becBCB@tdnbr*mEq*_ zy`L~#BVBAA4;dVuN+*~wnw?!E1{ipCm=5g|*o~(N=dBZ8tJ-ntUn&p?j@YMDj#$!D|5u$2XE%UXPJe7x2}aI!EHJ37F>A-<&KtnAqqcL z(-qAS{1BeE)dQ{8)5OZdjgQ-=0F|lF`n~pe*lV6#S}?7m;dLsp&w&~fucI;{!_)1O zFPK$5Z2lEK871CoWKevg7AWBuWhvKwdzK$pi`BISeGDzZ+Hf`FU+b8uqp+@CCyol5 z^3*K_!Pddn-oCR$aM|owBD%ev(2U<@r%}2|8;6E1lEr7%yhFg@=uc`l@Y2N76K98#Z#7d>pSv;$_nX@#Digp>7J~y&9Dc_Q zzwP;YURqj#`gR_zK&3`>3g$W8Q+`$RQ42VkPfux>bX8HY!nmoNL0Kxa^%D(oJ- zMe4dim<1_QBEd4N4Td=CQNOv3NuoQn-Cejj#YVoCx54J+9DmO z-ShaUO~LK=2J2N@Wpbd}GpDS$1~B+w*JbK)wMq;+Ivj1@Be1!0((_n`itf2kC)PS*gW-WKIl8q5-j zKajAvPgBvPqO1wW=akxn(uL5;&Ui-5biapnFx})iPY+sW@I_E7*KZG@%{jbDJYj;A z{7AdTN#jJ1%v^ND$k2D#I&+hZG^FxgX{G-<13fs;TTeU&RFfVI0l{8+`%2mT!Q&3e z&GlbbRa{-9RU*UXnd^Aqzcgnkd1?`@H%C)6@O;qDE)SaH6!_ z*tw6<^_l0`uAIB>0BlESsF%3r35472k9s&4D{=&XR$!ChnDEhI)FD+i7Z3I||NbJ_ z>dwVPb`&)jXwhSu2%mAbQqu8cvf4O zFKQB#yz3)4%fa_wwt?(xrVbLBWY`IBRt$3%1^Ndqrhok983D|p8_6r|UZ76$EHpG~ zM8Y&zV4^&*cMUVqy2`jWga%QiL2!MVtQ--wy@Be{#hvKnOQnQhLPmRd5C=of7! z;*TiPM4GBZk%sd2BVh|1;nVBif!g3DMsEXB>3z+m6U6T}bQPen`#|MADKqOePZhBh zD~_ZHt^f`hrw%{J`L^8A*Y7tyQJS}#Uhw{o-IkbqYrl!HDi zSG;b>eO_p4jskWIT~N{Gtm<&_8PAgt&)Yvu-pM&`zG_5TV&YZ&#@x-d`>26Gu5xy& z6HO6_DPH-a9~+Cr$OujH40nRblyfRQ0o@cQSpc4FQ;u43S-UX=>9gIdPzdbvTaPj~ z@T?Y~BGZOTO3_FjeIIc;uG^SVMSbAlmVfQ}@gdC0&1QS2c)Cu7H08P%YrZWpma;^D z_Tlkz%Qgw4=8ak?1$|K#qsqecjT0PT!^m{lQM_V5+)dN#bP3x*u&bV@i;)uTZ5NHq z@KEvN#gVQ9!7))PDjQ6=sb!&=?tnw$bad4+wufptE|pG;Tkt01Ti1YpS#y1GL|H=z zb@SeD4G(TnynIbL#uxmQ%)wo`KE6H{0K|y^_GIl{L}SD{x8HtCg4HJ8ibl9Wcj%*n z`-;i7w8w8qhHY9`j_~zkQ^DNrLlHJTPRsqaWd{ zHuF9GP;lyX9NI#z#bjdnTrs+8y8J++#JwNLSfU}&;Nz6pIdi?C~G@o+$V^UjskKVw6EO!Cc| zkLI9}1CtzX<&UYjVmIyy24(O_E@nFcKAj)B05)9q%Oq8#57}g0RMM7XCERldbUJtD z0)%TRtDjS5bYX3;GqUYI;!64~0h9Dx(AeBzDi=n^zIF6QxGec_hW_aDWacn(?=&SU zKuhIrkPlhX!ty&^8mI8RzNUt@P>Q7jssZ=ur)qKa@f(J7kcGCW(k88x!i(6w_H(mH zu5a44rQ^eo7;h_BfJJNnA$=`4@ZIAn+z~$?C_3{rwkbz~LD0^v!X}Vn`&1KsAK06C zMtfJ$jIi00qoiiueD4yoM94K$uqZV|@#+qyr6_r+apU_+ zH}ysA@KsN&gXnQsdQ+U+;p|f2GC-CGAGFR*-e4-WE{q9IgGggq*oeUidA2K3i=D@X zksBBHUOmh_hXTzazPc&yNlsUoi1!Doe~pc4hvA5K3G7)X_H#b#y8? z#nLTWE@5n%>Ft}&?T3{0GP@J2iVV1Y3il@zW2aO7w)FZ_sPt2)$kHDpu8j^%JVDvH zVScT+8L&?`I#)MAnVuKx=#4T&r*m|=KKKJs%T1jt z)T>ngp`v0ph7hMk7xZgh3Z1%J3~{*5?idq9#VsH+_j#@f8gc|hD*L{c%r2uzjHO2| zlR_S1@`VtHj~#JX@N-U{Mqxykm~=1^vmpY%b;-COie;u*-~<9H zV6T=Q5e=@8$3!9wZ5Uc7^FNOpg;xnnuul_TI9$ zFvE*Yo*ANG74)4H#|^lbycpv$-ebU;Lpy$5(|@fB`Yw=Kn7iIxlanB9TE#3-%yE6~iM)<0Bwsx^kea=>gtTuvSz(_84W zAWM1(-p_i;ynMS&goh$}WP-@rlv``qaX({q363Mr=Nf{G#7mpoySxSU1RlNBETp?niEF&O{yFyINX@uZ(U;?2={berei&*}Iu8y|i{QXO`=&n4Br0x63GdHO_a+>@7FC2h;c(Bj{;2@T$|6gjEpV9pzs-0twE^fUF{YP~VllX8aMSzCw7 z&tdufxOaOr;R;kkBQ8Wpt`oKUu2yDYp?NXxeri+jC5VbQ|6$QM2e(gOXzj6NEl>1SVoPGfXbBK!FDtT{KUh}~c-N_M( zmUOA(W9nsk=1V>EQ(ZOLe=pz|YJB?rE|t6xzOy9C>Bsvh)E%LTA`tf9J9`5Fz37v! z==mQRd91X~EmXWSCvpjlVGRUfjaT(rg&V@Mk-JmXAa&yJr{S(BY13QIRKvK)ad)1u z<1rzT1jLkG>H$1)0p=31kQwBFUflP&{3iy&WBEWD9i#E?R8i~WO;9~(m+8%Iu?9F- zk{(e9I%kyKzR~*ez#&v#^dntAE&wtOo{u6DS(p)A!03nmQR=kpybdILR!pdMly7;B zsQD?8>_cg*MS_DRz@0GHnN|!6Z*hVj+_tw-=dFF}9)q;UzxEOhfe5ELi~qa09MIc7 zVs9Ncb_%sMj2Fb01egXq>1Y_EL;i?n%1D&l`~EZ*(LRW;W_(e!K-__8L=QFc_cpRn2T|wzeLo_;|RpBJ^>nB2x@cXlq5zQ z?e!9Y2B5_d&Xwt2W3;^r7h~D*cMOL@UGE&7m*X!Tn(-0hzRm1xM zr~b1VM}>3+ymwFWX1m1dzDj+T%|dZ}^2@pW<5*9Rs}ttX*-RwOJc$Zy}e36Qdf!`rRG@KSH6#Lgo|c;U#=6${Up#y%EL*|nT4JCX=b zR-y_H1%QKFxA?b+0Oqphrwye3!5YAY#9in+wLxq$#nkWB*SXAAZg$6E%O&vHWW5_Y+tmPi7_sumsFnj94%yukyjf zJh?pS8Pm#N!81LHs;#X2PMH7M;NX(v@Fb491XmxvJEg8Z)@})Vo5w?a#zU~*tTfpc z^XTMTCRh3NMG#tQ6Ms$x{%$4Tj)OfIoE06dbgvE2eF)1_z)}}}brd#>ZP{r@^{e1M z4*H01s>{C+I!u6;0*UMjv-(7>|d1n zXEo<&kPRaDANyovl(@(<$7qRHQcf37VL|4tDaY1KLA_y_=UMY}Ik453q2QF;*i;;Z z)jg*e9he4BCw0zPB^V;sRxV6#brv0=cz>;ctP0fBt5S9Z7D;078;(c<7zP^R2 z2(1pk9|oVD>V={lp!uxRp&h^tv;7*;%2~eq3?strg)7YcL$~D*soiAG0WTxz(Vyp_ z{&)5ZlRfptN2HL(!nBrjEUu60IIv{ZnM}`{Hq|L@JR$E6HNa{G!5_#4Oq~l%n6A7c z5dCLP6CiR&Zd#9WdLxRU#fUy^M2RRF=w68XK!O5?7zbQFx}o|NP9v9Rk5!5&5^9GJ zZ&2Qb)J|ozn05y1R|r~j$4llUjwu}>@xY0a(_iS-`|=mZDAj6fJ4Jlg9IOzB`xjxx zgkph3Grg4)#M0n7jxg_J@RCaL;dc_l@-4K!bYgH%+Qvm5obv<;Z1`Q-PoQTmjh!kmI!f^@={x^M9m(|dn-QYdV+!^ z*V%Yq^Bf(#cA#j#;&>gzUIMef3!Ci|FSeJlnYW`9fj(Kij}o8OHJ>-5WMAq74SI-o z&chiy>|`)Jg=-?e{Zu`p9$+*~V%>>rWJ#~j%#)-aE-OX7B_7Z#;bTzBeRrl1Ax7n_ zMm!bqIjz>BwPA4a#{-bk4uE|Cj|po3jKA&eqn@pXuJ_pT6;yIu=OOUD$LnPSIe~O}uuaFdesO&^<#%-eY+(B2F7uq*^ zC24h1|F$3dti|kv?qsBszg!RYg27eZ?X@xi%s;uLQzvCb8^M3^%t^xlfgT5Voy6I? z%Ru;K6qjX6gdQ$!3`a1={$61&ZhGuoQZ$TWI(IIxADeANmX|$a%%lo z4s5!7Zw+QVn$0?d7Q(4 zD$zhEI%2)BZY75)F)P+xug3D=qQesbq|Dp%ej$l*yoEW}TN&Ag$e<1KkM2%3aPFwG zsp8yby+49(hRQNcn-*-W@&5sq>}wbTwjpx1ex-wuc-F_-sM8r7mTLh1ZogJ|fucll z>=MR4%G$7-&CKxOMxoWjPLcy2=H>r^YZ;UF%lS0hU=pNd5Qq@YB7INFdmgyW>5(e$ zo+gS05=7ImN4e0{mlT0C{BN_xfi?SRmNf=VVa5zO&qX~shn-W*wO@y6ta~zMorKVO z-ehfQDk5;*K88<#td;~(*GH^g9On=su9CYN0-vwl_L5s_*whbYEp?4A5C**#cHxgz z*KuCwpt)^ds&JDGU{I7cz3THq0<)TN{$4_XC;G-MZz8Fv2In(bMge52aj zXN(<>K(?H7ITORdu(^11yih_rj-yV3$mITFg<9(zJ-s<9+QQsTkfo37)@alMiQ1U9-$P1ndxVB3l zQ^K%3Uan)xHr-IFXQmf<{FB(Li$f#uQp!csOxNI>l>3dpw&jLGhptO43;C@_pwgXStH*k$-BMuXMG^{{PWrOG3<2hXa)(J zV)hwFe7G3xaTPj7uvV5q_aX$EH_RItyez5nNdDgLFI4X4y%S zlXaV`IA2YspJ8e}SEr?gmV%97%>9&twBCW!`H#X3g(G(LiKcOY;!pyLy<-5|&$T+_ zOX-g&JLo?f>!F|Yja5Dk8O`U%8P-K7LWRo=jyUEmaOuNR?dqVnuttjww( zxk}VMYDk*aRHl46b!oc5of_6rn~OmBY!ulD9c82u`s9iV34<{eMLiq|nrQPmH!Q)C z^;v{13ER|kC=)ntJZ#P~9A)wIlq!JsTU|aw$vfHJyge?azDPmcN zb*HE_O(wq5eatk@!pBPPPo6M>uZU+28(G;wFfQ7@Mo4@7YUffqo-VNEr`Pbj;~3zJ zG1l1ZKbsl!O?;wG@^y!KOHG7lOZMb|*Meq5XdPY`PEU<+Z;!ps&Ry*xjAHXKPj+yX zlXyQn@>dnTHPwtS9*O{>=obmRkhYoa3BvSsjx_Y~tjL4a4^e#>dHFzW1ubo;G2V02P%@6nejuA9iSXczw#12?7W2e#HIGVA5OH{(U%xDIj;IQ zzOkNraC5&tHRW({jqnsB7K)K!G}L!XJ!-#&hvgCw_}hOw9tem~<+q19-9t;@gY+M- z9hn2^IZ{3bv^us>(_eV8aoGh90YH0vOg(M+9^L|z#$xkc*p&gy$%31!05@iR%9?Mg z?-8!t9fqC!WKYOjz@4B1ILKpp06Q>^5}fKXdb4P^^E(L5!s#r->NAUVQ37E_&=BQHevhwVG{WD^liIJd`jTl+D=d!G1$Ks(y8GP9OVZ9M9bP6XL zn6UtknKLD0Sa++@L}dyy`|c>vP^V*oTVp!RfN~#$@ID|lu^!8myguxq&<45tv)wX< z;#bM=O@A^q`B~!TS18~GMbv@aBa;*oyTY(ksXiE^KboMKbT8*J>y?^gTLwy~CA78P zTvcLDL$@@G&fSc=KtHCvhewbV>`(EH`LMRNcLy%@h&e~*ItOj+#~K=TOq>XL{KUL_NSt1BtsEqy z2kY%mg<&CpP&))>DJkDy!%4oqH6l1H7N1B@g%NCVcCzFAVuEAl7zPr-z*bQny#KCb zU1o0NvcWj!7}EPb&yw(WMdSTI<=CYz4|i+8O(~LvVh9uuME*&jN`1{n3)Jy0X4=@; z0&RYT!9UeHRd<;0KXu#3ec&@knOyd03tl5ThCNzyP`ZQlijYnPDGy507whSdTr#x3KY^nX z?ZfhUbF`CFl1mDWS67NiS>6d0Xvx*5M2;=rZ^P}@>M(A};!;e)JLG5<CcfQ~u;`_C&-ReddV#0?o6Z~})|K_4QN;1aYjmB(TdRZ6N z30+c*b`lQ)%VGDAk44Qq*A-lm8>QE;HP;^&e(9YFtFR}ws_C8W)}L_bIyO#6`7eZI z{Drz?hN4%W^7(|}!|vqX9u|`Bcy79TH12f%h}axXWH<~Z^}{d+k^cML8;>PRQgA8b zod7RD(7%@Kj>|tVmn~Qp8r(H}i{ifMTjq;gdUM=hq{%3(7_T|8&P#q;wvw3HIp^U4 z<`S7{lZVax5;<}j?%#{f1grgPql2wKWMnbOO|c`Ak=uEZFTWX);XI#|*<)+h2g%iq z>ULgZWLgC;RRpeJ7T|~zW%-^(l52@(;~8cF_*%h(+?A&reHAR#y6HUJjy`_*+E{T7 zE06kq?Dv05^aD2fLn?%TIBk_r%*l(WFRa+4nr$bb=(AW%n6LspFth(ODa`#9{+3#k%FIw3hE(no= z8L%(1XjFPHTw*J#?-kl2w@e+{@0&JFss6l;``5k;<7k5XiYs0neHiR_#JkHGfVXAh z>$Z}8dEp3ZnCvu!qaenIliin#?f+fYufK`yGNKJba@!y+)&zs8BlLxk1j?B)3G9BLz&<}}nAHp)>5ypXW-RScwC zlK4k5ar<$L$cN2xAxYSD&3WL<#g&1> zE7+C%ptqF~zK3qgmF+Qv?X*$cQuPOXM5vR#c*!!~*cG&B2VaA0TNN!l*@qertYd}( z>q?0=(HXb+BiEVqRgUt1ILUmDG39_f$jhR=je`*tLof7=jDDByr;0~}d^qPym73P# z=J(MB%1mtrcKT|c#e=aWnq>01GLGh^o%_F#<5wv2tS>H0UGBp`5s`PTAg90hhQY8f zrya0Z5i4iTjNKw0(s%NfRc6Smp|Yr<7CDCXJjV$kr}s;be$_fMhxN2HZdux>v?-$? z`2~;lbPqz;q;iqP(*SqcD`FRaE0rXy{&X_DUfnc`{dO?H(G zjiw4`H!NJyn&|8osu^ZNd!hCd6zzB{7vA1LD`LW(FV51sh?W zc1QAs`P;eph8-$JZT?Q|ezSgfQ|B@T?k~*E1w92$RyhAE#pw~ZqJvu%Gs@rZ76~H6 zfZtJ$73`ME5A534#On8ghCwHTEvo4tP$E4NpQ^ARj(>A9_ip>%$q09TjY2Z5_Sf&2)j9*tO30Y%M2gs;Dbz=D-J!Yo3+f>6kz$Rjo}`rdGUiE$-G)L{eS#n3MIE;IEf2gx zZBJHlLiGXYI_ylK_1}&g;^VsHGl1AuXJo~ zduHMZ)6J#?ON>0FZN0gpQi`&(cdXC`Na|EePea5vC=e5XZ-NiPF+rU99Cajj#Yz<= z!MXO%mE6JUcv`gx#|3>{sW>#xdhdcZ57+6^#@vMkEuM968AR%m!6ix7jFT}J_{YwT z6-BGkHo(8W^Y0^^)vwwpj}Ag)O`#1`rL3uYrPA8|4fyZq^P@Ofg)E-Z0<8euxd?~q zglRu?H&!f)7eg2!6eW(_zw$Kf)#=|M0y|RptINWiO7D84xO=djcCA4{6Meni9wZ*e z)0z~TiXCzM12o@DfqT{@s_G*{LYH+XsGSales$T0t^3Ka+us_S3bUF*&h@{SrkFA` z0NR8+CG7I-59@x7T34QivL;E(&xkpW{4!ax-ge&QL#2n(c8rSd?JbKQH4Nx>S2*N= zPG)3xp!PrVlRyrc4VX!^(8~}en_AwY{To^EO5$3jl3teFK@T#3Oqg4&AMz2L!ZfCg zD8A(Mh{Ae>olmH-=4l760c~@^nUL87|1p9J3A`;8JkEU6d7@K)m8|3a@2bYO>tdQ! z!Q}Fn@ziA8S^Xct=kh+Ys?>~A=gw_+vM+#U8Ka`Gs&bwJra2y+B1q1ZP-Cu|+_xPw8Gw#*mvsV;j zMFpjL(FPTajgqFU+>lhV)c<6qFPb?=>vc-t=60PJ2)7>pU3&G`O8cEa#U1V3XEGb{M7aEIHyL!XX+%HgL5zG)Avu&d1p}Py@cqiGf*w2NzaRT)9BrTd)k&*H+<9vU$GBmNrd>#KYCM40 zo3G1O^*_2OJ&UolnU-U`S5^;qk;3E2y|6xF)`|b%?8agyeN<4KzC+492d(fB`HUJ9 zED1)~%L1810a#J9jL^r@G(0{726MPh_@}`TzbZtJw;e~Mary9=6S@fNQ)(QaO5D$HPKrYeMg%x>&x_ZxB1htU%Vb(^M~e$t{TH z;IbL0>KGhQq$|>E5}f|y8pEw@3Q)ZfW5bDal*gVvxe3DvJ{IR!^XnmjZEB=9R!d+7 ze;nM8^an&IkEy%djg+wo*!H9Z+*WuWFFd699#I@gW|+R6EHo1~(ZI+`FGcg&xtm^J zr(7v1qrG6bV@nA=jB<7K))U;M;%zbpFQbKh_*}rM{X$NgC zUWM0JdiRERTRuy2)qqr^Rmx>g!$qAbXXO&YZ`AiI$CM<%-*}4_>(}m!7D5pWLSx*C=1vyi1slj&uB#Uo=c=3 znv`HtxqrddoYFkfwDuwHmH`-0yOUE$9XuTG%^QCWZSQu^_crjUN&=L&xn6VFMpa*( zGE-v*Ko~WBV$leIQZHr1fKvBh5pM>$|DBG$$waAlyW-m!i*JwL0CGln`4eM``L0;> zUjK~3O{xc@iFULeD3Qc)Q%pD!!xvwkD2~7DIt0ZU>$DlXRv66TXY5vqz9>&KtfN$kjAP4T8$d5; z>q=QBJ-NO=2AIuMkR2;2vg509hQ?c(SlZ|PlJV2Li=lTN{798E#>gVFg9KAO?axlqo@{BhIqyBLYpVezfvwec`{)I8x8GpJ7mZy zbwueK6C;5}sC(R5{JhdB>HIHK-|z7o7p0?>x}+E)u$y}Qu?xAI(dd=_RR;-PI>d-R zG<8abQ_ci!fa{9`W4WkcLe+QPLX&~_TJpsZoJ3mo8`ZsS(ie zn9e!5(*`M7w_^$4LfjOa?02kwCJ}__LI9RvqT)O!;9%x;D3#)ta}bkiO61O{w7AIzrC#;lvEF58RW34Z=KIhddZyi zzj0(>&I$xBG*m2%{dVpJ-OIy_Z!40=%46Q_`48WDkD#oOeK|ai{>=`hw&gu9o<1*m%2IB;w&PtltOs7fApmI( z9N;j_)c2JVjqtQb+69Iv$B5xQG4R@&+XUhEZcT)>d@mBd4PvF!yj(8DE*4#;fj0S= z9xL1|2A1&86`!L(Z2rot^2i!x;|_q-!4zx6r#f{wRO^;dQs%dJan`*Zx$apnOR==L z9F(;Xs|RZGDk~`4Dr)S2XnI%-5Tf%9QDYm@x;%OHhP)iVHio53d&Nvft>W`e0@=sW zS*WLZp{Tz{>zFw1vNBX{PAo~`)eMkIR;^!eaLv%*bf=u&6{8-G$6a6Jja`#^jfMZ# zkmdYL7hEFqNKe-u$5(j^mUdXH4Z9k5t*pKH8s*{Y&H7M|t53$bfGS0Ig+UuNh5pDEd6ZS7Ib2kT!)ub(R7b))fYk%ttL0n5C2F?z!%uv-{cQQ`@h?@lfG zD4b`FD&=1&k~7&C6-Q#CFxi}pCpeZ=hSQOfnOJ;6SsQ9(V=hfg8=I7UfdN4YfM#eF zv7yoL1ri2VRLzb#wwJy7-TgiRj8Jimq#4qMk%$sA@Me*?NqS#VC-wJD3|hwOLq{xw zCuWIYFGNf0K!L{0x?4$41qHBTo$c9{StEZ*Q|CfTTaOEg+%Sm+FLM#5FDsL=5O(A~ zm&xTijq?W!!~vLCKYMU1Prq{2e5*4m+%je^kj^>W0|T>?snPuDD1AUK+qh@5x@Tcf z0}jILmb+bAjM#ei$h%cL$lif|!-=f^Y6MOr%d>MSZRptaLC5>HNcVZ(uuY&nkEWKoFV<6-&%;*$y?D_r~_o-e?k$d*HjT3wQTMrnaM6NKxr!G=2m zG3okJ*KAJ($6)e3c<&I+4gQ6N&V0Lg5zq~%a7teo^S*80oba>m4ve+tc0|D=a~-3? z0Jbx=ErCG>UZ<3N0NP5rfiW>0OO|&+#?=DM59u>fr=1u*i@32HudI(|_c-R9`3Isx z?1pz~KJe~Xe{}bBb3xT(!VNCTfE8_?1`tY)?`6^mNAYGLC9eLO%2=E_v>WraV}si4 z*x13>8Q5YjreJ9qq3Tc{1wj9h53;i{&qFFOLUIOVIRc?t_@c?r_?# zMaJGl*_mDD?spIoHT{jdzrIpTtntV`pOiWFnI-+Kx_c(Y2kMlSF(6&kzvDFV_1pz8 z9gPB0{hy^pU}sFpd1P0)*2uKgDZLwulk|VQUq}GGAkB^}dDF*eMNc%z)RbCUi+UO? zC@=#l`i zIu|S4pe)!RzQAw$xd7{u`?=+(Z$0lq12k`I?sF6Cp3bTksJBmPXMpZ0!|d^|52}nXTEVpvpF0hnNzTg} z6+Umvq`^a!?k)@mGdhPa(e|v@pDsNC{9MQ%hwL-@-guaW$a)@;>zz`oHV4e7pvOq^7}eLT7Wqi z0~DvyNKEdp*Z}#48^B{Gg!RJosgs%?{37`F_ph|&vqwSBA{-P?MfKVivQ*D8m=nBY zy+m$rd5F~|RQI?jOcc}8ew74g;Bv3DxrA(}Q^sE7@=qsj*wR#*NACJgJ+aE3fc&!p z4^@LomAba|8!gg~nbbA>>}2|^L1`fJcwu)3xy~n65?s@r%iP%>8()7J8*H>_d(N>N z0S3XPG?CIYL#{hb277xv0hlomi`Ic>ftPYFo91B$pn=NTM38dO>Bry}8@OWn9 zouQBF)!yT*t#B2O{;CU2qBF82rK0wcV+^EMkar>IVPdoY%}ayx!P9HvUt02q0J z)|0fQnyDgDA_6&ut5b}60k(hj*ydpKc*M$E+32|2yqR4T^Qhp_4n&x60r{868PC&6 zn1reCGQlm?-^O@4NnXei_IQU_)~)B0P4Mx5EdfCZ6mCK|@NVHTdkXSL%v~m;F@@5h zNPBbZoE4!#&#kE4gl{F<9FHJ}mQ~!#x}V4J%1=(HE}+4g`&OIV%%EbzwvAyfmUm&2Mp1vus!*>%M98Lnl7%Cw0n?FS8dx^d$cS9!p0?M{EXRc52mX^ zIgYLTFc~3`LfC2s#vM|cF?lWg5cl;3=W`GaRY8Of&`lM{zuqTelAOQlTOXj_IX^p= zc&;%)n5Y!I2@^X3Q(QK92#VWMQOjv`I18%B(lXQ)1@4t08`Ccr%4%4DV7-_B=_u8& zWavb#M7;=&{!QxEKPCHQ#uQvMTEvLwa_SR$bE@8|TqLm^s_5Ka%b^&2fPz0d!ZJD3 z%b`@DU|l;}sg4>`Za;b&bl$q&IR0t5s|#lDs@b?faS7hL&=I^$t%0;Whs6#+2qnhj zcv_szIGglT_e-$Q)!5=?V82RZB5g6YNh~qt{Uzsx64l8TZ4rVr9A{Nw(d1{YV=KJ?56k#YJQYT|7|-Qp#exD3<8YAfDY_W-f+OgN;s*M&_6z2!#N&60+2G35ac_cQ9qoHk+K5871VCv18&%YOb$u2*r>|;^40z z2Z^1V%wJ%oh)Kr84Pcd20PU?59o0s1 zakMBNSkP;^sWHN8lULS6WEV2IsjBUjQ{2o!W-bTm0ubakRB<=A-3=Pto+`kp7ih2r zqm9VM#*)f=11VE_DcER(bDr?>;CkGgj}iAN93v|IeE~&vM;&p&rn|JWxuCMe!aX&3 zc)lemu`D*Vo1kCd|5p4blJYxMEiX$Y#wgBbbn0K|CikiW;{*YxjWFHWWCJW-)E9# zKfIyfaoRvEHgFWm`Gjerqvz(7?{2;EP|gfXdF~Vp?Bal%AJZ1=qeZC&jjCzJ0i ze3GjsW~K*;D}aAk+ey1QT}eH#f2%B`a| z@xV$ISXYVEMVtaUBIRW=k8jTwY1eZm1`^kSh*qH52JEDJfW68X+_HooxFrOaw$^)F z-Hpr{O(PFRfCo(P`zJFtLFuv5q_;V?QdMo?4L{4V+eQ3&ABrwqt%I1Y0~A7xOyGTiS#=>Ci|j@Bg!#5SgYOIDc7fo*0t zwxAKugO&&G#R5bx=F{FqN4im&gx z6fHglKKG;<5s@+KZ0v-2PS&4br=c;+d^u788gIX7Im&k*H$C%KI{8b#W>aAh6V}kQ z4~`Ra{Bwx~q@(_!(aEr)bY(#i;z=g1RdHk}*h1_>p9@&4wge-?v+c5%`l{`nqQ0B1 zmA}%)Xk{IW4`3}}wz>hva0TpdLVaEJmX5TlgvNy#r#`OWIFdq9=a{rpz);~os3t6( z(JN~gVcaj-uyY1T+7r5W3?Yr)!kdVx$q6+^t#tuin&?M-(wBZ*Vrigrl52UamT5DG zm`>X^qQOpAr|By@aj35G%RZ!ZqcM9~`V<#kkch8DL3+eY3=!E7NGG&-J!Asuy@%jf z#J=Qb(thd!Os+ z<^+Tv(|P2-8Sq~EV&?E%18Hn$)8U5moQuV#*W16!vJzA zGlUideUXAkv_0rPFq245F+?Q{4Z5J2S;O0uM0G#Iv96ac{ID*ia}@oFP)d@7zxW;_ z?HmJm<9(~TweABx{w0ip{25Y!7J!-gy%$IiFjv$}R~y!qsvk>#1at6(5xXK_&*r?pb zH>=jtA#2@lA-z5pN@$Mi5G>T?|AR+^X%xHK;12ETYap1-=PEJKi6u|QLixS?gv*qMjw}bEl6J%+F^i8r z0)_wDp1FS}KEB#S32b&M9g2AOIK|lqG+rd%YH3bBi!*>_{^7LBOKnHcQo@%e0GS4- z6PN})tM{cpm7u%TaGbf;>k%wF)vc?_eF?R3#{S;WVx}weEJL{lJ9IUxvT|Rx3Bfw= zZOYMQ>^#7tK{R-m!RvC#aK@>qS0w;pc>fp+ssCsN!|!95)WrtygBHXG`=08@0G35V z7?GDp%5d!MBeeNK%yZMxh}rb3zqlnqVfba*6;TQkx+Lu~R~yb`*%-{u9fCTC${0T; zp_+qWY*yyc3KH1Ts^*8MhM;+D3y{&K>sKS*z1|P}5#(|pp^U`=HT+t~K>8#%`jY`F zp3)i+$lP=lG6xeZ0^7jFbY}+n+jut)HVS3&GVJ3j+~rE&C zF1eKq9uK@ryFODarO8s_U+$$)O-%#$Yb8lk%@3l8dFz_DFmF;8kP82}fqc8S!6@a4 zR?>UnwVt8)#b|x3oXfMiE}OCNn~%RkO-pv~X_AtqDHt)PaJd1J&RS3?j8Yu7$N`Ba zcl7SY6E~A-C$Ww**4ToBnGH&GnI0lRKbN=qFTf^f2x~1hDr%EBCYWm+%N$X&(3l*% zw-t?8(X5AifS5XGizX^+s!dofdJ;|nvj%k{MFT39qieDA-PB`!;}^G$T?CP&@#hGF zKkh-L$Bd=RHe@w@Zll8=bvKOG>e@_nv5b_dC!X{`qLE%dZwpea6 ze8#UIi$u`ZT_|J#h*HG7)4R1<`l+Yn(ryv5fOcv~c{+#M$K^cCAv^Aw!Dz7W9F8~p zqZK*RAIeP}G9Fjwz8i*3%Ifv*e&{#9x(y>~y&{(JKW?RC0kpgPmmit~0f*UW+cj z77hcS^B!!gon+oATm%v$4Y6H}oy2;Dq@kU|w_H@GPsl{?VYma*eA?+Z$|_jAcvx|* zEukS%lzsbAfvNLB4>sDAzazp_u8R-D+e5p0N#RRh>!*6Hrt@3ly|Gc#=BOu>Ps99G zl^y<~kzRIC!hi>#U6fU5yDjd_$KICwIXm1k8zw4+`1ALJJjK}7(<1~;sHHzFy2t#(V>0A3~^GyG1H;{FaFvt?H5wS+346y_7!vT%LQ9xNfg|G z28GE~hKb`uqK25{Ck}%ci;D*7$uZ3K{b2Pgcs#ZnAz3JAPW?Vm1(T_8eUHk#fkN25 z4fD3txYAZ7lef!d&wRqo<{Zh5O}67Th+KJYmN=?5WB3g&qEoJXry0CW$3vh1&HUD7 zcuR0s5P?iwS?G^i4V41dZj>vMZB6Dg`IT-&ym+9QVnn?dh_)Uf2T~)yieW%9LCKLh z(L?ttWISDZ5(l5)NA>u~*Xd5UbqR_aDkiCzRXhXp=4Z$rO_H-rSfdUAvO;*3X?R*# zd)dRtxZD6flEo#~1g!etEZKWRH347I`if}it`siZ;i`BHqX1=al6AS0emMIENSA1j z{1qa3dGXFJbQq6*mPqwkH0?YrbPKY-NXt{gj$)q{te@Y@i2jnYGB^8q<~#j1LZZ=H zTRR)^V`4F`o(rE6czDtFXerS(T8jES4>;Kj&K-a3kVU(2gzjf0&Eyj%nCSs+X$H+1 z(VPI$LEqvli6q~FjI)6=yB!K-I=cGROLHfy5CPi7kQMCmW23_P2t1d^HMXU?z) zlT%kEG>ThnJG}HAf*3I-n2qk!G0kp;5592YI=;SWyO}fm4)njej>BnBNPZ^lVRKE| z-`v;6I!EF~i-btVqknB>t8$Adt zTd57JmuX?fm4w}u_G=3o0)!`a9o)r(lP}$O%z$=UdsS@6*4;)}3sI(7YziPH;WxRK z7Lg8qppe;Z*nu%%hmd^EFrI?IS8?S)h|tui|9XVZqH3wA&h}U?^`$4VJO5Eb+{#EwL;N+7~lO8wWBK7|j(wBlI9@8l=F6}qM)qW~WSdvx(7+`Ikl(^+IM8a8XG zFIX55SDd%f6pfycdY-dM-O?J$lcByLs%T-(BsvXs*qYpV_`2_8rdz?S=C0hxwVL$UJmGVXPc{*#vcjG60V33ICBHh@k8FtMv6H{TjZLI;E~&p&6}f7D%; zUX8IJm%!K{^@yIsWADO%LQ+^Cc~7~76uFbh3(aI%u2KiCBXu;0keCC=g zd_5cw)GQ0#Ee;hYBs`$2N*{o?$Ke=&{r?$6QHGdJbsnw8Gef2~RPC7cn|54iGq!SM9#SGy6lQLHx^mmr+eY49=09`L?U(MrGmg!#nAyU;fCHmTHQ z5#S2Bt4st0|2=9ShbUUmGNT#|`w}o%=x|=f>ZzT;9Lu;qiP?mZe-psE79a2)F`|CGl-6za%BFw0Sd=?XDcVY-hpGijFypr@teqJ$z@ zdXkd*|0?Ok>5U2^>y~Ymd8=Brjkx-j{l1@UMC72XsrU+3cQ#iy(8@@rUUVD^IuC31 zDs0QanEd@fihY0u>|J^DixwS1R3hcVZ&Npjp_MW;d8vXtEJ?n5ITox_N<+B;`zd}S z@+hJQhY3+c#B`qYN=uAF+dalYSfU6$c9D22nl_Di77jvC2YR(5_jqEu8hA#WF&?w$g+RXKO zJH1I?}V>7@SnBP_|0eF%B`4M_ywD-$UB- zPEkhoau5l=-B`3HVMhY(giVqWrcNm(A%A4bHr3kp_`s3IR$V$kt&8~BS_(4%Ty}qv zu*ggRC?VAHt3HSbe!VWBwmX(46H#OvRZz+oO=NDrRY*oJbdjDliAR-g0`OU#+mcBj z-&uZMWH74NR=uT0(8;nkcK$0QoMD~Iy!9Al_bfZR8cpB)%IC{O=EvU4qE4PUJ|ZXG zW+j7uyRxEg7)9SHg6b@S>9%Z-GUq+jg3&2%R^KMn1zlA2$zebkMO`bQ=(I_=A{D9||NSmNvMEbyP{!yPWOKh;d6gj-DNDv(yb5WLA$6Me>2&kR&4O zW5*I=PSwOp!7pJK=-sM;?Y42jDV-x9#5j-08xTBWPhi%^jJqG*55ieB^mr*}NC5EK zvtYu-r1G5>Mf@+Z^8Lq%2jcWGmXon&MC~d}4k&*bAY!oYoe?6L9SN5clBh!Wu%TdO z3%)}iu3Uk($596zd=-(9P1)-8z*`iJ{GHRAsxzBy#~BV`9*{smk89D(0eY8x7qY6? zwrZuYskI{ziRBBFwwYC81}|~EnW<(}iAalq233Ukh{LvBbY8bFSYgyEpuILeHagYSQ)&K+GpH{h6RRj

85q@~ZC71()BeeeVYk;OJE|4H+NVOMx&uNYRG)KGNsG`px-SaTL z0ge03GbTJ*%rK5~*6U`HI*$a z(VZ-?XHf!0msDZhDvWD#sHK#U01J%*o8H-Lj3bE-`a z(+o<<^{g%ojYZ2lt9$1o07y299_!;F0(3xO;kaz{)8@h98C?UJD;Zc^t zz;r=&^KB^?J7-Fsz!+IE1wTY92`c_C<+t3;$NvsYgQay`Sm}x*Pwu6&aMh>}$BC?> z3~RZSbrFy|kXuhXS^gRtqy5<&wISRZg-X8=4$TEVQfogL>)+-^Sx%Dko@rWfx{Ykg zu#oNRj-C)JQhzMNRT1g!zX2}97L&qekLY|G@J&^LVLOe&ep87}&b#y3BF~+SfNZk4 zUZ=Ai&4yg2BK123Dyw1{gC=ilc;WS0pXo!tOYfV5nw%oJh;a4y_v0$rfbFh#ZG#C} z{YOB3vRHlNL!4SOTDNMAiKvkhF?Vy6w;bBBJIqqYXE(nB4u$KsQm}aA z2*Cw;b^`d@IdXJ%UIwZo^55;E^wbXyF7LzbJ^~uMWBaBZ#Z{c@$MZn-!%3qD+sDIl z!qx9HvGz-bJ_?*SSGf{mV2xd!Lj&6Ylo?CpOS}?yKBxGN403DF3KgroRtcR6HbJLN zX(lhf`tF~Ys%JZb_$BzT!G%`sB94+RSRSd0dqJ%U6vPb}csP?C($gfP*(k&@5ba2@ z>$xl2lLBYkL=+rpxiM`s7~nXF@i-b!Y41}nT6lVm`TQPRnK6WQ(YM*Yw!7)5x{(<$ zwZOl!p4@Sxt~Y_qnvZY25)zr_Tf(+EUtE~32CH<)UP;(EJZ!wZzv0K4HVh*tODvVB z_cGM;358QZL&Kc|!?uI%7 ztnN`73tIT?f&d57U&HM8=Rw=j2*{1wV=6T^MTN9y0Z%O_dbS`M>_IgcbElkv*WZ?0 z_MHslV0O3RB*CZrOrX#ZmBoB8Ai?IwlXYmjjkI58?pJVQLrTX4{y+9AnK;bS){N^B zV`V%ww?$9t4*OGYVnGe`FKx|A{&+Is%eyRFsg>mc$xQ=^8#}B0``q>d>xRf!D%YZD zzMvE9Gg@l=81q4R7E-1ZK7`rx!q874f-m+&^V2cW@_a%lIT%*K(XZqOntkk;;IkT~ z{;sC(Q1WR5WF-kj!f=I|4}IDtE*K&c>qv(LLtSJ)xr@?6)>PL=uTosArJqD9_)>{t z*z|YijFSQgYGl9$!O4p^ok7C8rjvHtyLN3$1YHd9(*kD16YRG`QP12;l2z*56AX;D zkIuYBeVZ#4Cbk0&SkE)RW5*=1iGOLXZ zu{O*+=|rxU+NKxFgC=~F{72Xwj2N(shV{+avPxG;F-@@IAJPouq6&rU-CB3@3y8x> z7dL5{g+aFScGTpMPtBS&cA9i5JM(&ilfTze0R) z6qybqyygbLlZthl2D%ACo1sx(K9&t5g{)kiD4lFOUtJXL{trC;5SvPpqSD7}>{@gl z@EkIJi|d^bifEYi9yvUyrW!+wWK>R~OM8H` z7U;Q~&5tq?IX=QH`a5czA*&th{qHkDr z4C3EC^WcVqn=fRn5Zp;us8ZqEhW*m!VF6tiD07WxL@J72mA-C?-|Kcj&>S}X*8~1g zj(qM95BS{jgKk3#;d=&gkxrk}v`?$- zK@)5?fK-Wo;+H2QE;h4dBcH3*oF)lqQi=&T@(-+ zq|{Fv3IpW!OHqrU2di(|Qc;qCS;XbcY`+w5NUof>k$cwP1jtv-zEGQ>L>WS;b7%-k zs4%5kc0xR+jKtzMRK3mb$2Z*a% z?Da%E3u68DO^Z?H5(w&GfX-l=yS<%S(zLFMrjaAiYwg&h(-9q?E&IQ;^PQ_Uad1;O zClR18SLtb2vvW|pXMlSoFGh@SvA}G<{pe>j33wPbKI*AiO{q)jr^f8Ms{~APB8CPl zP+ zuzxT?9G#N!%Q(`DBFo8b$0Hh*YiJK0I8<^YQ-X5LTM^RnybwLbufw4}3Efbw=kdy8 zwFQ+&a1#m0DsP@h#s)AD`Pi|x57mqsiH6B-fPsQ{SKm&%f|pFtZ(Qs5;8UB;8l$E$TG)P zcAqr&r%B0nZ!-};f=dLXsmghJp^&4$t7dK0x3 z$Kr(sop;dJ{r2?^(ee}^o&wPQ!lL{Dqa^`DwvF*nXB#zA?^dPa5mUG2jUynYm6 zK8a!Im)c~qW?SL-WEVvU(O}=^aVN88)!16X5&~8I6I?>5)#Z}x-N$7fWqg!Hr@B5{ zpDyng)%p{@fT5pc1>@xi)txfz&hrSW!IS}5L=+oW#Q9$3Y%9{*(bH@^Cjl6Nmy2%- zhf$5IB}G+0`MP>*g-D+ah5wtX%2*ovhs%P$e1O5Yq!G_@Y_h;7){@~DSOB))VhsD3 z@WnqbH<2V+!B=BA zOYK|6n^(?{%c0V&61H<#JvJ)!9<$G8thY1JvjuFC$h%B|ooy10jOcHix&Y; zb>9?wCJ+GbUi1PcUX@fTl0mcWf8}eow-p+>4~^(@B|k~MgJQ}pkKYzo)iVf1B%ldN zWE9VkbO1g;!M|O@5cWpi(F~9P9!!>}FL#AqpwAQGgbmb`Z-kx=5pTFjC7-!ANdZMm zMKbC6j``Wr-nK8nV0=FVA+l~vZG!KvlTCq;y-pOLiO8H$Bijacs?0B2K=0*WY{nUIeiR~>e&tGb;bW-x1hPOz){F@ z*U#&kRy=U9&B@%K7!6e42T|33Gk3tis9TCi{BsX?*P}3fNb^52 zBIy(3V&x6D%!$O%f90ikGVh7wlnghw=fqf3dL+#h5~N*9WZhlEPSSjHuRi$l{kTe4 zXlE#$5ki-b6%w|=>*>$V62S%9);l4GLmejZZfE{U~ zr1r(4$J&PMQ&Z9>WO>g=1PIeX!3%TKkpL^Z4G1W%{Ieb6uL$lfz^t^6><%v+1#kOz zNhjc#g7Fc?ugc#_DkCbfTkMzn8m562R^;b`&Lcz0WG2P*cE$R!6 zpXrVM{Azm|f>j;u@S{?9d@m*aC)bq9C7Z!Zs=dFK5)iUv55^JdMHGbWq{euSifIl0 z#13Z2Rl?X|sW5NEmWJEk@$P4}9Xq!ZQ>P_gA~V22u))#;s_G6+?yx`=q`y`k>zJIJ z0a)OPR#J7Yoq0ZXZ!Q$owAVBS-x|kk`k`)}N5s(@+~ zW0kz;kaLf<>syEI zi~i|`13i2_d0)Bf_>B0ZjNjsx^@y@EX#RUp;yXm3ar;G{cA)$!+lup9K8c!lA!FttA=51uf_ zoJAGyF%xAga4G0Vel$%RvhCqC`d8wRR6G;mN_dXM;i`dsT;k1 zI;)V27Sg^5=tx_N>!RK3?rvhdl1}4F)NV^g;mE~1Uu5`wxTV&DK&P-y-k>_9JtK2F zxdH6#$3?cNhj=C*f*&h}s$ernm*0Epos?`eGo}c>Rf>XLcnr02?=JesfsxC;bhP>0 z?<%9&g#k`7*n)N7aA?Z-oy`rNtH4=l<+EuF$*#YDtSinc=BU_ry@Hu+xEfa&#?v6D zrdu&C(K__em_lhP!}T*onV=E$^_Ji$C+)DxTu?^!jThNb`Jh>>v9(I7KmWy#j@nRFtGk(Pg?(fy_c z#)jTp?88?S_)nmmMk;sPK|DoM3t3>&LbY5W=4<9gM%{x-LDqDlal!dX)P+N5AERTH zxKsLUCJY)kW;QO2XjxWirYxFj$D`s&U>5zoarFX22vLsn;ua({ygfTQlWpO8FWSKJ z2>8tuaj*;z4qe4?H0pyF%VUO*2WGB{Xs5RGG=hznH4i%wgn5B73KS(?cghB+>B+Cm zQ!wkHNkXj2`WV`a&n`P9uX2-8;Z%k4Ku;5$p$!RY)4@XpBbmoR%(pl3Ihl`0kKp-t zHeKretN3C0FApf}4sf^*>NWuut5Jn19Fw@Gz{(B46p}P{)IL;Eij!WXn;ecIP z-{Q>V*TLc!Ke{+%0chBKJ3%<5+}7URKgXKUuR?Ba3v^yeU?umoH`guKsgCYZ+nR}-wr-xx zv7!AXb>beQrW6FRvK;Wv0YVk9c-=x1H1WPr)~ni_)sdLLDAGSvxr#8Ai9Daas=AtU zS?*NRUg_|JB+bnGIcVD;gfONr1Ht$1tgVXoQU9hJ5obf^J;h)?;kz*Fy)|1QV1GJ! z6l-vo_GTurU-&X4C$Q<#B=h3F&}dCakqj_*QD~OZ#ViM<_p9;Op03{Mhzr9EONsn! zoO`IO9w!FsHLqXxVPIeSsYBa&0t+A^iDbWpNF*!BY$-KKgj*-gO`dpDi(H4ybIL)X)w~E+?U#Zz#|#yzohN`IL&7yV~}gNT~Ga$ zQx!|1u5BkOw1M5wrXx^aeW7@TD0md^q_CLT#2Q15nM5nA*mgQmeZok$@Rof*KMXp& z)^}{$0A2q6)lFrZK2JAd_@##iXC??{FkSr(Ggz9q;D)v#*iou_csqPAT&{SaUK>Z5 zgfmsJZw9HNU+~L(v#q(em=%ep`V5XMs@MsE95CAi+F$S{Irw1B`aQ}R-^}z^8 zk)WP;q$&lL{V2TxL9=YLc?^b;W4HhF@&>x9Tv?1GRAz>6VY>(h02|bQNf$=FlgKvc zI(FsRp{s?SZ(BK|o0_iMEc{Wu1AmZ6;()2z!2K7LBBSWufUVB8Hfoop;xXf|X=;#` zcX{Qz<(>8*0;}C|L#2WDvCmq5E}Vy%=<8*%ocVB7ZGauAzAgFVx305_j3r?`utbkL@4(JaSbyh-Wr{ocm3H50v3Q3 znl9gYd01X=r=ii56p8Xjjh3g#RV}uD+*#<820JMYeTgO&NPdeMuf%*jNC}7S3Xy(n zZnwZ}cww9$Z^~$Fcf=NoW8MD_3XU0(uz5Ap()x*YyuF^r$+?~8;cThpTM)#A^Zbe| zk#Az)o-e%qE67ZBLG!P=B~ZbsKp8Ea0-UKD+?kIg@-QaziFZ)yvc!@Z#4Q$R4PEx%X( zs`z>oHA)&SM@iZ&+@C0`+IMuOSIMJmhRRMI5UeraC#iUWoAYIa*-Ru@EHmC0bPa?;wME z_EtgG%93M9czdx8i}!V(TIB-FjPXl(Zq&%ChwyBMx4y*)M;a=o_7_++UKeOC<+Pv* z9k=ZT3&&^n%DBOY4=Ps8GHh5tx?9zBnH3vXYnsK4b91L@t-OSUg|cPlXzL-8F-W7GYz0ud?i8>a43 z-c~aDZK8nb>v8|2a*s?+hSe5-t# zh!)6$P&J#^KLpxNU`2SN`h4N*s48qTUo_~EkQLE>bCX(dPuviFZma9x$PjzXFY-4@=K8efmC?20q5Q_Vxb!e&WWlU$IkVc| zK4Oz)6ZIb(E7@o%U`do=ruQuFL7QcFG8mVA;rtQ*NXfr5K2Of8-K&n?j0Xf8EK0{h zhwGlB{2Md-UZFWI$7tb}iD4sGD1Z@_4TkG06MRsYHO_ckD;zc=9MerZa$goBCT@|- zCvO@D+6k6+FbC)1P@?(y@Pun^DgZlFGu9)maK{Cu>V$8_v~YqUhQRR@>uc)IWgRo2 z10$d&G{w^d=I8}!0Ydl7Gc-ki0ZHd-4<>2H>r+Tllw+aCx`+~}bRL{B z*t4LyJT?~&TnreJpR?7Sp)cp^qgLXB4oR?v6Two^vFoKbgWvKoJvvmeU zHnEO@(HyJtg^~K3xl7xLIrQ4Pr8>GH#8Mq)OR3YIx(=2Fi1-huJeMb*hxXPP&2`Pb zn@pel_iVg>{za$UyIBmF3pP*q+J)54~uP{hWjIBr>OFGh_ z?np0>hl1TlbtI%^HMCm|Z?I~;)nIIwoVUr{KmL0Dn-*bKaM8!Ry;o<7i{cm8Qai4j z&lSv$+nDpvxBv)tsiw*wQv{rz>97BO_~8q=pz?|XP~R?vO454R-uD8%Px0a982E%u zo)EGi6;77=j5#mHbp$5LcSO{HiNwJ1tU@ja04AZRZ9Qc_Ee>$|Jb+82$2><$$`)@4Szq(*Fl>^s1Q4g%k1Mt5R_At*j{FZ@}59^HVI9PDD%{r;RE4$a0#i3kxIY2z~ zPnhmWG|5dVIm{%gck2TApZ5O9M*!zLSU7Q8Itf|NubG^nX(oaL?0{Sn}^Lb5;A^PGXOEbNbxJe21hg8J4`)F%^tF>`v~sHHNGDkX7qIFhGku z(qPX>mgUkZ4#eTXH5dL}$-nUcbJ#-dPvJHuoMZdg3V;RCMMR|sd&H=5rRl|aSB-aZ zPK(I-9pd=4uB?r!$q2Tz5-b>m8tR0uFwsb59KFvD>xzkgw+pjlOYm9GH~pxbG7x4e zb@Eqa2N}ssnv$6C-G074emDIFd9*nykhFI)D62CIR-~EvGbDGOLTAXhS84LOK4~5M z6JW8xxh&LDqLw4>UyYeK<;4qGzh1-Uv?y{X<$)0|%_5lotu#c`-CC zbjI^nx}Z^m(!vl?Rf+j00+lLKNAU`MS!D)G4(^!s)GCrBh6xZ{@z#Mr_ghX19^%~N z2jb;|yLSFmnFe*_DjIw%_SB&Gh0S4caLR`M7yK+M5wz~|L=R{X3%kSWV4W4*-Xau9 zLF8UKfOHb^@BclW+Dg)PEQ;plY>@5B3U(0Fw5lKU=i=%tR85ZgFZ{bViX88ye`^&kx1voXqgJx^JIom5Wa2d8F ze>OTLMCU;jNKB+^HYiFIkNol{a9;MZsw=#dwiR?uL*$jkk(6{D_8@sS!U2a7kr$?P zCXcfrKZtr6E3IEL*jFb~l7)i%htx1o+bndW!?9gT_7s!MhKq29F>BYp~vL|mc zIV)K+ltHsubF=q=OFZA;kvi68+gZsL40Ti1$g=ql3Fv4CY6>vb6)UKXDPStB<>XCw zPzhGxVM8V#?%j~Vx#)Ic)7W)~3II^Bu{-RmN~ZASWP-l`k%*=z;}PdKn%>c`m) zZpCa*xTY^9R(sS(?5tciDRdlBS%A@KJfKSMikug7 z_Kn*hA#*pMk!8KBxlwuuHoYOcFXr;GeLTE)&L=<1lTM632vmKdH>X+j!X~ckB|^8% zdt5^hn<=B^w!YD7CGr<*#X11uKYoLVjsEXejnPt!Z*osJ3wxyBv6$?6*6+{I4nrmD zLDh}@*}8+BI!ssOYeo99qu^gjmLV{wkqNS$E*?lqt7l4@eS2<2`Y1PnO@`(Mmafe%oRe9L8t`yr_k@fl~&5h?Zm<&Q`cn22CC&Iy&2 zP}{83i^ZhafZ@~TJX|U3q-yffYRqf$#y$7*YcRXZuP)u%2d|(@ z_I^EZcilD!gQ@2e8+WkJ29rKrZOn`@DR-V8iCkT`8cF{l8EH@q2c0k54QaVJeGb!f za6{cH@eD`*J{Nu_OfdM-caOE5LNQg)ov|aozGa|$i0DSvY}GIgB2dlm9te)zap%D9 zST->~?G21S=!Zesb_1Pb=^-4b$sv}d-bcni3O`TKpbD<%d^OEr)eg$OcT!CvnkZcI$<3jP3aCK>2~ zB%=153qSo#NBT~ufm`{jG+5>t9GVxbu8^lJC6y*JOb_$p5tdSdV58S_{-tWK1ywxF zGS6g*ic0Nzm(V7$)EirJXcd{A24uK-!=tVVCif@MSy}&ms%coqsMqR{d?laaO87_b zyq?C+7wg97oa9uXsa98e9NowMz0vh9c_{py1k1lJus9RDk}+n#qH!z4gG%Nm&LqZc zFi0}$shCvm?kVHYB_{82LY(O8cL-cdrZB57LDtUS1XBHH1t}8x+;`DVOKn6_$nzjH(PJS2uoA^yk*M+$J6RYyN`?M>=r2GD6YJ^_wC?BuF&FaEEL=N-qC zm?Wc4a4Tt1clL>Kh~DvRx=suqaEcmZVE<`Xm$Qw~hE>erEE>iWm_5h#S~%xX4MWkG z5T_&!3PA30!4kQ|X}Ib#5fDG0hxvvw!z2hN7Z*uX;q<@WF~8RAELBm4g9D7APPIV& z12GU6A@NG)H^y12$YtVm`3wP4Ia6~osQ<3OmuJ~x)=N;YOzTR#jLW)k%?Dx}r6f%%dPQl$so((nuL6Dz-zL&1(hoSKWIh+0IqU3o5(?JUE zhFJ6R5qlA_r)}S|>dg^VN$d4a* zOP8$sK|n4Y)mI&LKHe-j)T^+K6zSstElT!z^wH*TcSW@B1pXvsjJY*9BIu3X9mN19 zb1lXftJ zkt&}jEO@R7&WJlPSo$-tu(O8O1Kg!rGAPS};%HhYWqh5dAk`iDq%ex8@#ue*{uabH z4cy$cghliOtt=;HEfdSzT#Jy~m5nmeQvGO2sWpSkqazuKpy(po;2BGJ2_$;n9iupM zhUuYzsR?3-+^bu7s9VAc`9eB#X|?)%LAuB-f{H8!{18RCftlZzhMWF1zSGaHVQ}Qy zX~hZ9?j%3&Kr{tXe^q>!76^%8W25cPx5U0}>Ke=vrBR$W2XxC^TJKg-m8-UBS2vhh z=7e{}*E$&~X3+#DX!5NsxkQkmJuC)uO`JFk4ptVjfpaS%+SXspTcP%d>M!V{{)w&< zr}rDWeFJzp0u37wyHTa}$BR!e%*om@}veH2n} zRfLJUM{el4Xf{rZ$H%i{M~&hgLd9!CAAupArzg>zoFr*95tEZ=KOS$jRnR-usq3fk zF*Pla8AY4LOiL0#WwP$0U4Bus`Fq|ZRkrR=MoaX&fZ=Lq_t_iE_|6*i3olTk;h1>a-BR+ z>-5JJILlOR?-TqC-q9?MH8Yu$cfRDR5DnW#O=P#4?-Rn_0Sr_hU!wkuyJ*gS?GAoB zGsxmC5FG?6?+zb}TVfo`4^E6J-+43CzgX_5KV4Zq<$Px+FqF>Y2q`d8f^slfO9F47 zf>m3&lnrgTFI(%9wc(}S`}CW7f2bg}N#apWCzM@0?YX>=Agz-kCKj}qg&MDr_@I5b zlMwXppta?|F2rjLpt@xC*sNhdxI++t5>DgU!{@W5GRHzC66ILr{+mpAby{T0wz;=U zKRo|X|8(3^(hL1Z5ysltcRu9uRD41m_m~Q}=@%mkFK1@;$MPnf2?Y}JodjmCfQwXk z)Wr9~`r=tcuGZ=?IolOQ(>=OHqO{i6-}wVa@JA zgORdIYEQlEW6GI?ex&dCZ}#) z+}I@$AR3=5H;u;Glk2CQ(1T@&^R4J#rE00R=Lg1Suv422p5R;0{NxOt^z?HAO*8kR zLgi{N@5XAjaOguY$6zTxPnrMkD3y#3ph{}6_NQpH4b);Sspxf7!$}_UG zGYEODSzC^!Te`r>fR7-fJzrMidnb8>Hh6Bta@OX<^t6Eos&;XW-FSSvVVQ>tHd_8*8E-G zGL5!!C|afE|wnfawl_i+gzt#rRzz)p9mhY}+?4M4bEc?Er~;Q@L~U|V~H?_HT}i^R@x z#U)V_Es=*`U`btBKu6N?^ISfx`f~ea)APD1dAF`faz>X^vuc1)c^c~${USrBil=xl zh9Y)TD(pWx)?VeQniS^xuYWy>-0m{qhU90luM(cpK7Ybb&qOK4Dn)r4G(vpnJ4yEo zY41~)@RrR*gpP{Cz!Pg5QI;xeRd-*Obr!2cXn%EP8GD=S|)cH}~vyR};()?k*T z9*ef}kG&$<>xtb37$c}HsvPeoHu3dwG{oA!yNPWF=+Q@y+;+ci35HJX2R(wA4J~%M zaByucmPNhG;$t0Q8`T^~+ZjP6S9|hV;cCExb%#rXzbE5AUnMD z+(KUOQ@x6h+Qo^Wv5-z^J)`8oAh~;I^>;_MG&t45U3rtw}Gt4jBXI&;j@% zRbT!W$s8p;`S4i>yN_R$(~${Lakl3)5Ot~Kg%vr0cJ_7TTA07hXw783@F@R&xo#6a z_olC(cghA+b3wS{x7d)y8G1T8(LCv-Q7S~}bt)t&xA2JdTPlnHCg9+g6O!#AV&8eFBtnDwUCFz=pV1)9ffNT#tcGZH2>M9*8G*U&|XDON*b_|M^tLgJM$r8M2ffGfa*)92$nvg}HC~euXa;eCqKkm7oI1CVm+XwRz5hivxk$ zR0J)*>PVq(9})7!9!ZKvmuyE^0a?+b)8=NqiqR%EV%aNmzIQPWnBZWsUeo?-?uP3g zyeZiFGK%a2;^gB_>%3z{9v6uqJ2<}~`5;CA==#nB05Cu_i!Cg({1L{XaJfy_ z{mbKoX}19qMC}{s7T;*%Ln%b<)>V`Z3k)RZTBD9Gto8;PB=yiqFD~z$5<{FFspDQo zbzlIeHr9Auk{P(5UhO;|*iF4nS^B=HW%v8XZwEehMM$|!_Fv>qeRH-IbxuDd2Yf_Z zB@pd;p;tmb+OY16+d*3uH18>SX>ZWxM5!_sx7NV&!{SjiTRy6u9Hn&3g)2HV^C|Ec zQ_AwTKL$4VB`b#n@cYO3?U<)@zc@SF5h2eEna9dF!;V?jjB zZ~D^6Z7#bzr5%YU2>KsLT+RqoF=pYc9#MZ$o3p2YSzByw* zLVw0YK%3>bPI-hi8#c++5FL7GS6c&q8#}Dp2(TGRJevl+SS`dc80t|ZsO8kpG^nU4PGu(-_-$!*=520Pzj&lUWhG7`FOFcgxGM}PHg*!))R4HFCO^qacyPAc!-=xat|Hx?e!m=pq#j7oi|^5?MDT(h zNXxW=%SnRa_LHx?!z5OCT}KEMb2Rq{_V>Pso(-4N2a|>4Zd=kcJv)sMZ;TFSnORHy z=WVML)UAuui~2(8{M)rJpK}TVb=225rskMw=vVJW!J6!u=x`6g(EX`G_@on)*WSk^ zPDzInmU3_22Cq;S7||<3or7exTt$W2=QP7@c~1a2Adu+(f0Js zV2z{2ZGC5ysxqYPF|CM-ip*HQT~@OU>|?|v zyx)EjX^l79KxHp?PCc6|H`|E6B(92q^<=m=-!K&cZ@;N=I<9J?cCJb#o#1gFyNJ~g z+XVj(Rs9_V)&KesPb~{?w3j+FkX5G8ePR4R^&49gBWm+)b zrI1U2i=lvSJ;DfW=^Yw|elxir4h-yoEU(C0yE3cch`~SO(gM0X``*6V?)j zJ5C{GRVsLXcF}@}mwe2>nH6AzpIXwQqvgf{3ObM!9(Y04I&}L@D#eo|;Au zMQ`6508}`WoK7rS30E!B6>R;Jf@5`Xa5H{n*zgT%!CBpfHK8qA1H_CL{{J<&m-)vr zoU)AK`fsYt)ziDHK*E@?dZ(RqKcbMm)?adQ{>IJUDcj8s&HIbS_F#oC3EC)`}@9)XIk#;C8UT{X- zqQ26E)f3&o8Ws`7EmejoSgU&?o#T%z4ZlsMIfj=R@GjQL3jYDt^(*7q-UN?4@c-4a z^dvvOFQ#@C#WshOfB4n^e!L!8s--M(|F%vEFc7vmu;eAn?-!b6i0cr(q)%GrFOa=7 z;Cu&8_szd}S8e6BfKB9@(g_b|A-T}Mm2S+iTxi;#Q-N7D+}6_~SRxD?23tK>YQGaD zfL0SGt4O(t01g}Ol0{%@s@#!gsPg8 zt)Z3GHv-0h9qY@PJ!W`5;aV7)t~UvkCqVl(L~aU!T&+ZL^v4Fn#Wg$4#%W=S9zv%o zk^tp)mjJEz2a!9k;7UA?AqGhrQ&rV?`#a9Q-f$NJ#^CN-$j;gH84tTN`Hbv)EzxKf z`Hj{+D{XsQG4B|bc{%9-Tj0l2EL8wRnW27ePg0N#2mlB;39v9)Q+>iI_9VK%qw63{ z2^H$2jFO~@?hFjyUJH(SFtGQkux$UXlGjai=Dy2gGDHLN6=kb;45n;GapTJCl|hg5 z^mUt^6+02NL@%Q-L8z=62b*Ne1b?0h0J%M{4PoDSQ}#0513U%DHn8LR z+{5m4){>lD{$Z!$PyGOCIqz|yy^44ZO~3}jvvql0e&on(>H6jt$${JQPEU;mJDiV4 zK0!QEOW-FKjM%7%=t^X3G?aio2wT`Zu7$t-G60Ad<}w~UsCFR;)6k0W*is1u>mtym z8&E#uY5$|eAu8jGP?4gxD<{Ifb9ZYv(C1@Ze7g}4FX8)@^E=b1(p%@L?Eareuv6H? zU=lN=KzqA;w`p{l)cT zZ*oSwA8k3s7Y6c%5h4|nhluUENH6`+pWo_$aS#w|KFg^0!Q0z#FEM&MnXcCMc3W(9 z*g6_Nb#@#;g1A!#^@m7Bc^=ZMtbB?Vw81v<3G@mTq??ihuxNowznkuN-E&3~W{`YX ziNMHPumg*3RvVMrY^5-+pd|0h6N}4W@25Nl^_K6T1X|&>!ltjDKMDpZZfsON1)w^PVg=-Wt(z;RJZaMj1CtTSVzPJY*}!K$ z#lt1`3-At@vG*ASy3>Myes1{IH~l5rTZ-FLDNwYco~60!bu^nxT02(^dJPYbFZOI} z$dCufolBd56}xU>LU&~p8kh@7(ye>@r5ImLdrc!Um!&O6bvGS>`ev8mppv=omX}!OX07$cib^2lWmJ(^-YKen4viDAmOZhOO?FaGfNzY~ht1b6vtWLukf*FBMdUUwVY5K#pk zG2>4P#Zz$e*KSZL4~RL(y)W%8#Zuoe=|VrZ;uYp?4Q;3(C+?>a>ZhP-?Pzh(_5}$w70Mh5%_RLXmfqnCOwRTs zv`<-wl{qYp+n}+kKZ47G*yW8B{FVS>-Vuk>lsGg3a#Y%;tAU`~osTXpb5rsZ5??ZB zLEQCmW#o~8whOe zh}t5LYP+?22OVWVm&2T%?Wex$%_4p%i89vQ5h-vM1=Fo7t1=~U2HdQ>w&<+V?<3fA zNr50`T=@k!<=AT8tWG+^7I5>sf=%S{cEWL?g#+#GQT@z>vq#RcEbNLda?THDt?0Xd zFCFmwoG$K~hh(zi>wAo^g0>LOp+Sz7!?u2>vkCN^M{d3}4bbO=STiZeIj2JQL15+f z*zDczj#@b8uSVy7a2A_=y;!2L&x>|#7{**1bBJbAB>lr$kkF>Gm;luhOsiNU48=I; zzB4CHc5l_k{v2{A;FMZ&Qb6qOvhNwswPPV{Za5FFPkof|)^+0{`rfpAjjb`aQRUFf z*h@Cf8CRDQO=(%h4aC=C0BM}vUCQ#*JDx!ZGHM%$XhYQmN5$5D-O$sJgzjy~*3P%* zN%L+FSYnfV7LVZLPS9P-@6#JPlO6wz_F+#~z!74}hcM;$0R5QTBpFFRh+*)2!%)zC@ z>!(1hI%gPI^oxQS$>0Eb-4WWFJ9*qIQrMOXzSAE6g@X*w(KE8`8fVQEx|Y@aS>txP z<_d^$%M@8*&<>g)Y=V7CE% zMeq7f{7Weqti|DL|cMa$m!WhIw=}UA;Bs|M9k9Qa{n1;T8zCe}$yTJgfMe z16&s2uL+B(Gj%%LBwKmuf8Rjd+inb7RtyUO)T{Tg#hK!w+tsm1vnXn8+$)hI3%f{M zYa79oP`Zku{)*IxeTWx}dDjlAF83(11D}5@dg%2TVwy&&*G*9%OAksYQ{(Pa6b7m^ zt)4D(^A3@)I$kFgmAWM2LT$4vh3#G*t^Z#ju`CvJKEZ6mY|^&LyrAXm=QNX;z6%ki zQhu(#Ko7pq1U4{UB60R zl%nrv56QR9*i47%4_WirkQl+C+!|u_(IY}I&H@~*djk-7fV!_p^tOY*Ixss(wFxqVkNPAe;Xn3q9m!i9%==x0qUe~YEf|fakBg4UMx2=y~!!Q46 za?RU>{95CkKK28QI{SE8Dw~P2LejIPw?A*`5?kH4`{a<3`zzHbvztdOz%?_wG&VFR zrc5TF88*$m>iLV~7)p>;YUf@5#bN)ZiiYKZ3$wM`Cbbf;zVCO%UKl;O5M&f}rQK0Y z*)?KBOn(-VP)o5lM6bMJH{h0fZF*%nZiLj`uFDT;epjH2BaKHL&^CU-F&O5IiNIT(tXoy;#FM1rGMKX4 zq0~n2HB%l3ZUZ*6a9*!gokvvdZ*sjO0k+1F=w<6TgrXl^()Lz0p;RxZB%*m`<4r{B zM8s?_79#C_jE)`&82&EJKT(9I@E%MXLKOaAJz=byAOZBiHt1k#L3m+Ls>0=g{9S9INK1_f3 zXO~FWjlyIEbUc?0JzCf3GydhXn`<#sM+O)btQ514FlWkAAdY!RyA{)|%m`b;Fed~* zXUtsuW%Q7Ss!mv7hzH%)kc7yjg`D_?mxPBtt5xNM4|6AE3eD{+{_>t|ymKVPD}^$v zUtqe+)YfF-ZBg4)Y12>?;Cai!mz<@O{fo&YV8at7BRM6nxm%AjXbZlr7D@x?A`j!T zxo;E0x+l#@4M2gaJ+(0}uAy6m>6lrJ-S~X7h~NTH-(hUCCSmlEjz%RYiWF~Q8Phs| zvtP&{7Ki1+s~W&9Juu4LNqz}Tt;zPHb1(tPi5r@#C|NM-i-wkR#NeD8iV*aAV1JE|Tf zIKpvfwa6LAe)j?nwLTxikp^UNc7_^4I{hkRvVz+Hs-5D6^p8UA4`num>~MaW<9+=! zMy{{2s4p$Jp0*YH8v3evp4Og{Qjv?l0ZMVv0{$P&#Vr3}-z` zuKI7zO`c*D+G9BlHsN-3NN6c6y9%H>C`)m+g_KNwT}~VjIKm z%dp|1x$LIunU*SMniiw%;04qiim*8ner56OA};?^A@`L^K5zj@-!pz{K8G+} zMyF#Mch4hK`?|+!m)1n1fLX3^1B*RW%gcb83C69P{h|zdlY7%{Os!2MZhF?cpSY>z z*}z%4X%9}TPl!r14J>W-$;uD~hCd@#J{aau^~L*99mm2wNKp`jEGHksV;C1f4dC(B zsH9(+oC17NDE_sIjrbNtYA~#Lx{TOfAYk(*0i{C})F!EUXegmY)xTOUa+Nk}0nygo zG>j4^~fux}ddZPo3RCj%K3zF{$i|DTyR6&Sv0S3yrR4uVglVZ`O3t-_9Cj z^psA{)*$I26z&p+G9a~7z*=0v?{<NI)Qm{==As`Th za#?kK0wkWYl#cBBL$?=kWl z<6*7Mq2~@ANBd;-Zcs;!G?s(R0;TKME0!D-cK;$E zy60`ifg<=lo5$@^DCeQ(nfZ+l!`R}30_qYA%lf8B*h4voDzmJI@!DfGO6*=2wFGy>$Yx(~uey9mMh%=h=i!Z6C_Njwpe+2LGCHXp6wdJ~Lk+>2JW7A^gE8r> zrbC9|$at*Dv_EW-%3e}Hy3`VP8*=yBAMzaCy(u+rLQnbbv&U4NA}DnPpfAtf_dhqU zA90X0ORT}wTh9xN3jI!dWMqx5Q3#u6?|p!iMYsH3HTV`IisQ#Zt|zRi?Eej7gL=%OYryP<8ZU znAy=QSTA!J#wK3d!R1(&zw9mt$eR*)^0zb-nMS~bqO3OSfT2_-Gn`sx>$r2~%F)nd zM(mpW##lL_`e1&5X4*IvuWS|_mL9C5ALrzG@bzD8R5jiC&XtOKQ9ohd7e_l6T zy0XhY7jL^7%l}@i7q$usoq_{(NA9)d+yM?}tbDrU7U~Yekp7;cehXY_RDZi?&h>Kr zTEP~~{pMm7U{h^QM0u4gs7HC95jjL zeA5{C%Go)wRRzzwy0%1z??6~)fX)M3Ak3bGC$)39=MXwKq>+mH2ozM7xrcD)$(-w{ zHdzL)&%D_FJGF|SkrvXadko`Vwq0Fpd2FJYTeiE3$;yI`3XEsxxr@J2inIlyw8m__ zxsCGJ`S!Ma6(FjNbZF!|g zN4)*m>PU!sd?o$fBNJ+77wGP5kJT;BHHrBU;>m(agzSl3!SE(=<(=d-XBvOLp5p6; zi#oV+k1(QOPI#EJqO)2yiVUSD!hD7C@cCsM3OaHB-u=ExG#1W~O~oT^1u>Ddz(_Qa zJ;cKgw8w#!%7<1I2AI{P^bZ1aJ`~yfSs3fTXRuqs2H(ONI&CU4+`%pyER+r=)w^m%dX?S=P7;kuvVCKE@v3`7jhQWT`T3C33agsi06&+Y zf_gTv;!N7vIcw=&U9%{G7|EGDjrL1`1q=M6cd|bU06Rd$zr4L~)Rmx(ZKGctC%v*qqjPN=Zj@ z@EKe0%JNKRy-D@v>yyPGaJq+|#<3vKDnk&mlh@&qAAwcy_2y8=z3G%MN}0e!*R}?a z!l-NI;~+P%hZD4)M9#@A9*EX}Y8^6q2r#Lr|Qn6grX z!`vmv6ND!S2eaU2bJ$PkqQU6Ko{Bc^UU}RLSf$o>O+0AovWh{crl$cu4*h)We|>(} z$&2JTr8jAss2i|h-D(&%Z)7zXe?cGiiTB`P{qT$1CjYF#S1?6-Ag2F|_n%>-pp8`Pt7)t zx?P!ta9y!yy<+UzD%tEx`@Yn`K-bC}7?a)`=Z{0bTD7OUZ3F0d&k>;qP`Pxii1xgZ z`E^a>lYMR7f0lqQ9xQ-#GBMrw)m-r}HNwkYA2E{g)3L?mXTvA&l?wPdkaOrHIRAQA z7{t7tgdVQYvJSpY1CLs5W}6Le3GQObW4E5d+NUNTwmaAl^(bIT6<0lmOsh5l7<;{) z@(fKXVevZ5f9v%M=$@Czwo7+DI>TAAu-tyQft;yd>4`0>?XIl^?UczWVKSYq6KzY( z1AbyIvzvK@bAn6aLX{w2aqZd&opMm7`Y%v^KG38SQQcf?CjRzPG$@g3N#^Iw5k5R7 zlR_?$Exa^?OZ!Hm-ZznCIkh-*_K4JV;pSWuM}%zvlYW2?ugX1Y$#Moc;_i$QRfCdS z1<5Z8Zn>~WUm!fjjz$k(wt=;8c+RPVftr%?Sxdge@)7#VPIjR68ubgQHbAPJ-z{3x z%_HJBAFGJdCcW+yVt3ShOGFSr;lEUa!T?P{@b(fc@ay-_F2k#%?aa7KDf)6hR= z;K%~5#@!s&;;CiPaUzQ}vQ2%K1${aVDXlA0MmP8APHy1bbzu)1%_;1pC37^#5`B_| zQmA2Kj`W2YvdNIDv3!C9L9V_~K4Hk)k(KAQd1FAyrE->~4Si04-1cb%nL+ZY@G*vD zzCuUv;Dyon4bPuWiN*y#=G^teUp{k3_{2g_qdvq(-P;ReE2YJUt5tFhRvLTKJhH+R zJHiEj&j0uuLxNS#3RiKhyP_087j72`%VqH1NN^JHd$0@}){B%Yt*JvP0HXu`sm&Id zo`U?$s{6BP4;j+ZZ)HrFQ?VSlFp0(&pB>t~|_``?x z1*mTMjHu`E_veO#wNBpx(gX>0P=9PM6b`=5PZ+^*=HauIbByRkwizzAE@hS0AFQnN zxe}#?JU--p=@_8jB@~?0no5pF9K^pjQMCcDc<8U9YtFdTJ1tl>r)pcBcutWS$iX|* z9%k3|Q*JbeJT(OXX#I3oT*#Ph@>!m;Xd%7=Bk1S0t=ohI_QHY9`Ox@sWLq_Q+Bv-c zDb3=<$0dz1uQ6j(O;1vdd2e8vN6T{|U@T1x>_jN)E9Z^h_`f{SG*2GKY@{R=2&VyS z*de!GTEJwR*MLQ833(`G!E%(jGLL2WtgN)W2RH;oLc9?PFfWBTTcG*03bVcQd%jba z6G%BpyuY!n3#XKyd;lj*XC5_@>|9-uma*>n?!iDvZh^`(73-!W=p#c&n(HG(ZdrJFsg(-dE{+i$ZObc>B^h2zGw9xK(kFqFkDGDB%NpidR}-1N?OV%laX)AQ zo}%Byi1vqi9y5+_wNK7D&wp@Ib&nM4P{OlogCtn7>Sk&kDn+Vkd*{XHz`n4UFxHc+y0w z6nm{RR-`#oLGDi3V$|O(Hwn`*U5>o)U)pO2Cj#EZdZBewT#!+{?l7!)1!93Xt-(=m z{}N1wzrECaIWa3g4~SmDUyIMDw-rMz;X~{ESunUJjZM!AuoK?GwB_6utE1PMM_{XsF}I3}>eQjtG8rZR!#zYO6S zv`iD*Z=)wR-)XU$2u}}JU&+283C>Qi#3*OOiiS4$C?%w^E>&`r{(0l*-=#8EBPoUU zO~4Q4{bCnKJ7cU_GP!c&s$MAxRw*Hm>t)a%_7R|906?l|6~42}J}X(8%!#%pGES?- z|9hV#pMd+Ak@rw3!%JB54-3xhs$I(~(1yZcuBz>)W!qqdpoYkm!y7tA+Ar6EF~k=q zfWI1{uX9rvy_xCf3aG^A>e5k&LO3apBZX0*uR$>Clf@^d!N1lmGO*YwGE+P;Ywwm` zbB^Rkm5P2UBC}|FY5r40fgUC+{3BM9U;tgvNFO!7tI>A^;NK87K&uLWM_s=&|M##> z$ZBCvI8x`8Ev?}LeW=fuhEHRm4e;ww8P`e!Kq=8tZs_(wa45`<_ex%vW+KRg4L*!=U9dk)vR?Hd?CGGs2;*!@@t0_CIq%Ra%}AX z|4uEwX07liiX)3#ZaRp&(KYOo%gAdMPLpfnp-Z4>zjYcd^#Az#Et}NO;47x=eEVoT zfLSy);}&nzK@EjJgCm>yAX>)KJ@92O^qe*#?SE&c;$!D#K;uf@k0iPv7L-f9kgdxh zG?xC5^8`Ug-%aMirrup2J2bdG5a>b}Hbfaf!ZeyQU6cY!SNT!F-zy>Hyk4%Hk;`_clu9~9|`wfKN^#s?hX(!(Zr15&?}`Ff=Ngc~lj%&8ZvSXLVr z7qv|H1D0^EfNVuT>0riobzQinaO(t_%FL)8@NtQIrx0OI1;~$M3H5y!)$Pk{;-NPB zopz-wu(B*S?o+-G;>noU|9Eh$%~hOwZtTR|06RNQJC5ElYc-uxn^$zUH*PcwRr76X zFo`Tk*(<%n?oc5>t=1q+8q&g;N?O9xVVN4&d?pQVU<@uc-mx3Ai|!=n&2Pr zhNF)Yc(!Qt=)4Oy$tvSVQ6chH4lPjtxy4l;M|x$~n3sktK?*slEn`-@&Mt!W-dA>c z?aAO+QIA=&VM(lRS2LY4q__n}=SV23)|N4gLtjAe1MBL}wk_GRu!l+Gw&-Js~uG07f0IpefUzr z)!Ih>nlh$bLX(Nd(|z;q9+wgXq58|g{l4~75c>hSVNBTs{yV80rFUhHPiyt6HFq^@Fp9*I=;7(paYpHe%m-u3iE3UfJ? z(p-r7=1?A7SdKzGjdUrQ`;3Ago66q{u(|NYzUXH$bj+*50a_5pBggj<5dR-)%UVc% z@EZCDeSAqRUss)PClJTM$!C|Ke7pP8)6M&^nTGphpXoqa(xgR&5#wjeDu?R-I9{P- z!>0i?9wsxdfS8lnTshA@&~5G|A`*|8Q!uDZwVi zIk4t}&a@hu^A<(S^A={Fv{PO0=h0dma*)KT_l&~T<{90t&RvyYf(WZOmz5X^0+xVw ziF+t#R>=@O?{A`JQ*Y+lIJeMNzWr_ACL@L1ce|)z3rtLE@OD@POjmIdWklmAu0GL9 z#wxdr4oN^VL^H-S$3a>KBYf$?U&{$+i&}Z&KVB|5>Ry`Rl)zl4H6_3qkv&T!>D>e} zR1WI;AW#6a&tCK91&m2(rg(8;A3}^e3-H!W4^{YLTKweObD+UQzp2;PG1(IY^WuGR zYoQVng0Sx|da|T>W|52ur*8EIwk@M}YPAS4-O*Vr*oXJH>C#!ku#(CQ1YU5UO$uCv z?RUC8{+p=GsJ&9CSa_k{n$sWE6f#vLW0r9XL}=vGbksgz&AGZxDUBNPKO(e@y^;ob z>W6T;A(h%_DV&Eu*?%*HIY1HM8PThU7-Hl>;pI-UVQT2@V}w69-%%uu1=<J`VXe?T0zbpCtRJy$ z_Ri4EobNngD@T1t3r|c0j)G-zVzaC)!!>dmFo-d-=k;!) z051qDH{#9x6JUgMVxqW->gH;XUh}^%S$w>9Z7K8SNZG?(5T1( zirz#GZ`jhiCRUK@Y@R3|$XE}stm>$*+FgEG#4e`9VDejQ{mk&8XMS0b9xn9~)|bV1 znHN-`HI*|GT6p&Nu*->hLcJpVXl_U@kQk7xXKaHkX)h~Dox3(@ncYV3hhpDTqtZ4o%OzU9d7J#^c51QX@5k|J`)>0d%CVzmJW zyOS?EZ`sUT?6FuyY${q4EyLJe83BcpQQX=M&kR!tI}A5U4-RkJ^x&;7J__%he@XGYHt6Wdp) z)`fNSaE$HINK1qa?b%C9bRJR=*1*91zg;j0sb8N)WCEViwh%1!Agi+#?~->HR=DZ2aDYdCTFCIO|pM=%|<+_-tW?t7sQO08Rk2!Z_`_L;V&1?>fUl&c?*ip zPkhy$OP?E^#o+Bi{zmgZEcY6ERtS@xFKxa6e0LlSI=)Dp5u9Pqx4y6eLDbNs>N75a z@DF%K={9cN;2R)L#NY@({Z}rJl}f{ySCU*?of;KMNSPZdMgg+gE`;x#onk#;%a5j$ z1fT7uOyBNT*%Lp;1=j^C#NGP*q*P%LTPFJTRs4?*bAm;wt99LG5?cToMJSfWT@st_ zm8$NrJ>5$s<@o~LgfUZskz0OG|C8xRQ%1`&k2IG;V4--(l$C4FD!73eSt6Db)e|%V zlT7Xc!KGaH5g3)QV!wUkxQg!l@316C64383u(w=%|CO&3vUUgVICQ)bBG9&)rLtQ~ zB{ve|C#LW4wkt+jycw{~4{0z$f&jyM^AjaO8B~k?To6~sg3Rm;h8H&sE@D*(AL)ua zf*8ppbeQiHfv(nKph*6!!stpveIvVT$hS<9z8oPh$xwSkM>13D+}DcOK-FyJW2r_q zpGUo3U0z$fOI}rFjHj4T&AMcem0)J3nd2(*OJ*$<)#{Lfk{#yh)|Z}sfh9|sJsTLQ zMYf?o;1|VuGih<4vx{~uLUEd)ToFBi4?ixBB9SE zPrkAn4{Qdo%XD84bM%rVaIyVsO|DY+lJ(~J6S$sG+_u$1|8v=3NO+Q!ft7M_ zq+{2c&E;UdjaD7jjc5D1RzgU++q!Jp+mM7oz)3FsgLPA|-~)p9RT#9WRRYrBeb+xf zOt+Qk_Wi#-U?2TMC?Hc86HD1rzfonv2E>Hey`#kF3huDW;A*+!Lq(Q08*qR$ue!}L z{p^4HQqtoxc;zPT4k{rFi?OxhRG?k_ivfVL^5j8kbyndzmqMUO@e1C)C!PhHN6@A93y`!v=>*ddRa~sjphIrb;L@qJ$oX@2!ATtIi|6#p|8j?LY{5Y!mj+`-w z6M8qozkb@4FC2eSjO@ZWhM=wzUP-v~JEF!5gEnITog5ja{6uJghbLM666~xGc5rm= z?H?-2!9#}~x6Tpxxqq{7(;PiQH!h1d@4Mkn^|zD~!gLdU@=xTsyi3Tq6=6e@IXpaC zGcvcnlk?gIVK^rMI{!9BY&3m@H-v@kWz!&zcup0$dpX{Zy;SP{blz8lBgcIfEB+<~ zzqcc=tjbux(oBsLB5s8MAf|^GNq!GlEC9%zRS9BwClL9=7BAxR`zkCbQAA?3LVfmy zb`HBir^;508dQj=ns6!M34n(=3U?f zLqOS(l8@qz(Uu$vzM)(I1Jv<3%ieF#X*?_o$ZUE2Q4-%1`^s=cH9$`)>mZQRNllLCS@=<5ktfLEX_G74nF7H#4rpkB5wg0p%4ggH*FYTH* z*gDrziEf5e_i;)yhro>H1V#ew6K3`EoI1&@Rv|$M$#AOk7Xb)3Ln6;aw1K2eb7ke} z>#QYlMdY5C_7mPRC zmf@=)0yWx{rJA@Yq70bWBu&lS>YKQPHbKjsoy!9WLtI*0to#}jUm14qL8S>@!FEO&L&Fp+gExaS-D374spCSiS3r zrV$rv55V_=ag;Q0*GXyTMFYDc@{QvA#3I2PIU)fsNWN7(0_I5JONZLr+i@JsiaJJA z0?3vbE*qxo{@h7Uk_9Lj^ep6Gy4u%jvIQVg2KABu+^sBsC_D}3KV0H&K8#A$Nd(wk zRpyx}@hfI1`cvi!OG98yGjTF95bo)69kU@)#U8!b=f2I8D;&T z6d*oV#yT*G|3Gc9OXTbqpncKQ8uQ{h5;N%@L^lmpO=s==^inR^)CB#NcRQev?F&tp@ZzB5qWBPRQCqRp04#F{DW8fgXIa4I^iy9S;vLy(7LgrXMXTcB`DSz{PaXqp&_{SH<+$j>ctQ#JWZ z0ozwB=w~q^gGG^f`MQM-5$i-C?G-RIIZd3jb)}CcrX@*z4D4nBX!ZrTeceC}Ng!Wn z@Y!ALsv`56%Y3ZewQADTa0dV-ghIIX4|uj*4UTG6MkY1Q$6eFu`O7xzNhbmm3T&N1 zA3Ef%AZOe$Y?h5Y5m%RvH3SS3G!`ql49u+PcB4_D@F+G6LWiwifT4Y2KKa@5kaOJy zUx^I}4lAKly7aoqMeIxIA&N|`SFMtgnaFw)^14?8#Y{nF8!i18=2)bR=Cd7At?oVNt{s^rb{j5Npp2nDOu6MaUH>@bqeB;VD-tX#tboUPkuAcfsVN%TETEqrxJ4(;Z|r>K)A`T zn+@BRIEeGZcqcrm=V!*^WS&j4nvdvCfwRhRq}(o-wS+TsvLfoy&aB#i)yXb;kA?7| zY84a=C5;q>zJeZ{_@(q?Gg2@?vwP+3wP$#^giqie>y0d*ZzoM84rLQdjy;Gk)XGLY zRj=*uFeX;L6kGsqq5!dLc~$fb1q(AyXqjJk-$~=S38Tq8pFD0z(2o$ZWu&Q%j2yOq z);Tvxd!L4u$KT$|ct&LYH+0NA=Bqq)`+(`g@Y3tTl4Il0Yk$E1w%392u|zo@KdM$2 zP>6Wr_U z*$?R~7IXn%u4Q7-?U3DOFq*Kv`3?4;KknQm-U^6vLcK}&1^xzuBsRfK8!z zEG_fQ8A|OZnA?G*-SCbuN8NZW)3nhueW%}MLt3SgX%9ve%6;Xe6`x}dMC)zEnTX7|t(Y!oaYZ@IhGeu+Dz+0PynTdBS=$YY>>`An*Sh||{sU10TH{c_o2c;ov zK}Zp~)g@@r`uVaq+N*3LW^LPes%kQ$A}CF`X4WJ^cXm&sH~QK#Qpx_XYXO&wbOAb= z%BjKD7H*fU*syCO)NjCKTgd8clc*-)Yl1i-k-oCgs>|v9;okx~5+f#9Q zwLIwML`+B5_j%w?cf+Iki{}@!c(Zm+P-49s3j!5$jn+3&x@MD~=<@+{fkQ*uqnxPp zJG}q`#hR4jjx5XdV{F2qi&8!$RCvmTu>;Kv9^-dzSs*yi)-{VD(XkM`Ckzi1fuc@P zU4x*)@_SIi%!k9Pv|XD*H1f+<#vW6nXbxVa^SO(AlqL|>fOgzkxI>$_cdt2a_}>*!`rOo6)3$9QZvr`aeyjPI-N* z3L$uAhK+P5M^@d5c&9r4qIvRrAwHoUGC2c^yAZXG7Kt3UggzDyTzovn8??Vu1Wy9g zh60GofLk+6it21Ppigt|ii3G%W8)sA0^0$8das3R4dDk90Z}$73d)3X5CkU7LD9&8 z)tzyogTW!P>gAGs$9s)}imyI0_E_G* zHTt0jnP;nuE31lvHju>FhE2lr#ktbCx0nWxHI!-4K_EbhAR8HiIR;T2@Cr=&>Se2_ zmFoL@C+AK9VH{SMLb*W|Pdm74G(Wn(XSKbEs39k^4~ANk1A$lhL5_h9BB_wtPEhsf*fNpLkjVCb&oHB)0RQfq19O zJ0fTSy@h|}71UApZ0qm^AoMoS9@N#qEEM zLD%IY0F8DAwy(L{-4r7HziHw}RUOxGE6;N}IhG1Nx@r9ViV235VwYk_yAgC!ErEGl z3ph6<|8@5;89dD(rF$TA7bJ4E<4okex{kFPTOka%9Ii5fG1vVR5mtO9%0i<*@Blgq z4#RlARC;Q0zundNFcky$NuEJKCS9bB(7y9`>vbtw<^!o&HFGVuZH!ekfP?k5KCAXp zzxv{_AM5Xxhw_C%WrM%uIr$3COz8&(hv`o`%dZ=E-<3I5{kyF$A7rR-|Ix zOq!0)Nm;q|ysKeH2Z>Gph_)soMq-|lAmefbDp-0eZvia?`2FXN4})@y*yNX|x1QOb zeLt7#`}?hB=C#UXF*eu*TuBFt&zTd#?-IH^Ov_D+f7`7uX+jA7P=Tgdxei;R3z180 zmru~-EY>#MUkpFEoQ$hHVSy=fnoZT^nZfE5KB6PLDS|;lfLmWl`Zt1@yr9txa zn#F4iI;H|w<|X&->5AS+I@Ba<$S%x|>RP^EUt zu!BUaYpWZ41L(>#R!40s8u9sqjoeEdzs&Q12+zsqI+a)E+tfOA57DnX$@?~i0*Sd_ ztn)q8I4A6%xSDh{9^&}G?}krBwuE!LEe}(~mTi826>2(1SA(v!=$pKCTzVbm0(F&p zIdrHffPtnc(XeUDx}1ksrTABgEpZdBV5f>RvqZA!PI+6Y{y?|g zb*g4~no#3;1qj_Lovk|a)x@|6^S|My;im*aLETtoL3$H{3!4)V#1Sp3XNXIoKWNlB zhpy?i$-kEKOQJX^uXyPqnYidVv8Y7ikv2bATVn#)2_M~(kX^V-1R%sB*#-OVpC|{1 zfSZWkX$K{efIXM!pI;u3npToL|C}_C{+U=Wgk+2)2C?~?EeQnK(QO9^ppT``{BWF*^K}QF1>uUG&6`3bPhD~~ za=9Cf#;y|h@bRZ_^oTPP$onyosalmxe`X6dD&x399dBJiR1nHUV-BtpIvc7*XD?h- zEZ~e7B+m_Cqub66Ib>0VEAhU!G3+jytAP<@yT;D@hGZ5z<3%2Y0&1a$K%m*x$t4b-rOQu>2r#zaLtXYfVLDq@!*n7uW#l(7U83c#3 zgEOKw?pyQ=mtgvV4h=A~N2HW@i?9t>aa;k0EbQ^4wdNCewp8=w;(l6qfksNWlg1`> za1SWcCQHPUS#95{Z&%DG`MCY^u&$L2Rh88%5lQo9#d|ZQzCclGk70~-pRWiidLq8s z$TY;kdK!I^b(w$=dU@sZBPM9MXOm-3A|P(1jH|Klix`}dw{a_^!oc+Q;CuQd$`l4u zxdd}H&~%?#tk?z0=aGbB+*CZV`^5I9o^5ZYB}m(MrCl|Kq-VfF($M$nyDAP(_n1j_ z*i=~b4b7Hg1^6^%{EZ}vTm8Oul=|N)%na*vULUWG{$-y~1weS$&|Ey)o0IJ=1%&kZ z+SK9RbuB0X;UWa(NvCC&?$m@Dgy*C;y#$Up9=>ndE48yKguvU_7=D< z|Jii&FR37Kp6}u!vnsih^g0qP_^bg9@%GhE@)0N84VRq?SrhDt!gZTO9&2{4W z6&6S{RX66}iKJ@o^~pq7yP0huOcfz;jGOu{syJ^R9rRzlO_q5oj|tHM5&h-4w4p3B z2*IrY^w+1kJh(JaHA~R|YK99WA3~5qL?B;|wm^9a3_BKEM_AZjco+lxJ}X4}G`#RGDJY@Jw?ACs8TeqxAjOyzrQ+JJFD} zD@AYs(z9@Q|M-92wlNIyxIfb4Ox6+j25=;IZ7Rbsr#$Mw>H+gb zY%P$`(>3i!+3(5ce^uAU(~KALw7W@-#~sqo6&t)Cu7W<}&JH`kbwlvanR9!z5T{&T z>H(0k;{yq`VjVKRGO0NI7WDVTP*!|!%X#!uq8-0&zFsw~@s0n>1%+K#5iy5496TUc~>?bc+m$^((*1`6OSN+yodTf1|!8P@*T%r;qsZ*N%OD-vfCh-k8Ed^%1I2geaw2C=b%Qa(ml zMyO0TGR72W>A5wn<$HoPJX$`?i*?GJDTrCUz15BF%nr-n9C|pmCFLVdl4Ep>FMLJ# z|9Z{Q-mea-Y*wliEJxwju%a2lB8R4>QG5!;6V8r7kCp9FK>7h}Y>i3IIu9XAft=fo zV}sRr(%W*~<_pr|S#hd36pt=+O>nN_NfBK>Lv633B}RH7TiaO&bJSK;51S30#{%WNqk zjN^Jt-xYU+)A0l$r2l z?@5Pc#q-XXz&L|$llPqWluO!%?Jz0P^?`YVPYd-5@jln<+I7YvK_rW^*Pn$W+gvzG*moo=1G_BW@l%oz zV#Lk4e7t~0%+C0CZ)wk3!IVWA(od&IyeENZM*@YAs#jkV(#)qG+wpw~eyqHi?9m-u z4R}5i9EAN;I~V`SNoUO$3cQH*?Iu0o(VocrS4=Csx}5g93VD*@X^$yq&z~|u zq|s^)0n?+wZX}-wyCM8dUMOZrrbM$#r$jNdP=`)GfAAuP;vBl>_%%%C*@`AdlG_QV zIO>~V9Uf=AqV93$)?kRs|8;9YZGca3_|0)09_k@9J03YxpUBDi&+i(A)h z?bq*`Vk^q1$7ik=TyZc`zlfwgF#2I%;IzDby;=jk!#>(4PhFcE@jq(m{RyM7ha%wv zwwXKgc8|KPZcOWKWARfRmI(r3?TAe9k9T6%5exR3c>o3~0_L&6qwTGvRQ-kvwS^jf zAAc1tOiN?_E#jPFR#5X}>nR(rSVF%4+o+t9@m3*=@OXRr>e$}5gR+yrP)bb$3L_wl zYHB9k!4*=Yuh9+G!4PrY)%kc5N@Fagul=u(TbPK>HIyNSQk^)|g*pharM&+?*>5co zstrK~K7cO7X2M7oNj{(~?(2*=z@v>2TYCWV{%^PJYjz_iQyWOpZ**?L^73b98`4qj z;`LAlfHA7VfIG$a3tXzw8hP2%qq@jTHOd}o6Pea59?1!ICLO0CafMMduXneVN0dvV z_x;7Asxe3Z#D*W8Mh}+%V)BGjBCoeNPJ$7p(Veem zv(P47oAH(E67W?!gU|+=^f3N`Deki zj%Q*KpBnpgbI(RB8Ik&dX__3--@>efr6`Y=y98TNVWd~|bVS6?mvnLSF7nh@BTFt` zO7GOny4`Gci(|NMbM(t~ItC3S)=oKUP`OpIR5x^Q*8B%?(26;h*iAz-y90kZCtn>M zJ_(2XwP||+h`J(E??{(-?OCL+%y&$Q3I?uCIU9=g8dovQbPBv?4>t`REAK>eYY0ru zx@}O$z5em9x-G|TFDyBj7s~?Toj#zIX)awG!8_tH&b&IT25Jv>84Ro8Z=mk}wX0j8 z1!^#S&AVQXpw;bvOO~H<@{N(LyNt<@lD;L#Kwo7R-$5&5jK1DW41m=`A1syF0ecK6 z8Ls4cpr7dum6c9FZ`Yp+YdhvMu126cwuW(KDNM)3Q$74KU9rRL)X%kcF_hCsjf-u? zl-POG4XDE!E`;Ss&57+Y+HI`5c==!7kSPiwvnkk}-vKas#3>`R?x?;{>20W}r=V;f z-m^`cd747wo}p(1t4L|gS#oZ%O83jpI}Y_DfMI$Py}7?Evy=fT=06rqLAj)%1KdwE zGP~5~=YC&mr5|HFCRIoBh&>p@aYV`r;eL*SYEvMj5=P$vmNUp6uSIn$ zq-f0G{OB+3oc$3^%t#ib-lJWGH1pjaoi;>_IuBS9EaVc>jR1<8!6bdv$oGp)93wvq z=|sdXy~Oq&aquw6)uR{K z3}NymEay9cSezTJGMl|u)#dus`#2y}%%z@%*o@Ez-^VH6MW2iCfwK3mXBS9oiI6f8jsTB%ng>EHy#;XYu6Y9v=xjqbbiE7_S zap}OB{mOe(k}R0s%4PZWgSjLMD9uJVc}tY}GM{WeHNp3rt&4j5FSLN?QR)sBYV zMb#gpqK6={Hi07ZzgegVZe2(BJ}KICOkd}ZJwUV@tep8^qEq-KYRJW|l8h(!K?ddL z&ij!Z`l?n#ppkR+%mzmzUq@vdWFn1rV=LmoH}Fk2Tg^=qa?s?rd-`Xz;O8`)RzqzAGXQzu+YKe`6S9td1!gUd!jJ7wp1kBRpG5xhyyex!3Q zFu7aBxy|zM%t@Q^$R6)dlN7Z-AUcW4FkLx=a5hJJlrA;NaDa(!bN8!7?F2US%=tWN z?=%kfFp!j6lWu>O0w7(Q=q2A0i$}{SjziomCPlV)`Q3|OA3vDi>@C^^cojp4?%cpf*X zAYmFA1AsfBHSX=D;;9m-s?8;`2SU-|)uLtKdO!i-*xYN{^+d~mr&R;#y@jDqdZfjw z`Gld~k2phN8e1my?F`HXJvXyv0;UVHKIbD(02XG}dsgRDJB>=b0BK}QE)M*#xttL~ z@Ls1=dG4R56!c*P^-p=0C3wlO?e}JxLB4}9>vNW51S94guaw$M?V_g(2Vhs;lx2%R z(qL5i_zn@eYHz%uUj6^kX(u5pxYFU(UeRXsYgBD%1Cn6v%{=zLu{X3zhSgRLFolrE)Qh(3XT zQE;zDyh{}`b34EDVd@Pr6Iupe&BLmvA6Fgp6jSzEGJmyq%@=(dxy&3#?J9w(Aah79 zHzBc&#Rw*4E+8HtMPQ)468u-nYC`&xcz#_%l>(Z>7q$7Uph$p@VOj#N*c}Yf) zfZCstmkvhsPlB#HbS5&=tR7v**B9gRImSuPFbh$3+cjDdG8zMx+=-nAd4rNBCrE=^ z?0>ju_N-UX#CO9(FgIU6%OIeU8prBRdE9xmG$W_zi1mtGeeoKF!!yxj##+@uBvV=j zr}9B0lxpu9GohQH&)T{=tpmZ?1{%)h^@fKY2RM=8$)Q1&v}MN=0{>DWmob&=pPG{) zk%0e=Bw8;H;?NWN5W3ebnI(ouJFeFT$gEGq@SJ{Zq;OD3p|f#XHMub|>{k)pS!Ypt z6iemesrbcs3u_%kQKwd*X^xELv?~&VeWV(2n{jH1N(5I~if*5%?)gv3o9p340daK8 z+ib?@Ib&o5-c;O~0F?VoPyH49@8kEo+(+h;!_;pn{hzMhK5ic-37W~79Ja~_@09if zTY22~bP5acebPY+<+Gb%K8C{HM!Nf2s5T1}yw9k}MPdCr*ip!Rxd{eEk~@P0 z%JLqomv<%2i-c}9*3L206Rd$zdSFmQ{$7Q zStq62F9!hXXPUj4mV)RR)x80N(G?f4Z82v?>9YfVe_Was4ceDxu^r+s^JAMzQx)2l z;Skb95A|;SpSb|Dmp&Oe=+X@PC7aQ+Gv~btmK^fa*=?O~iB<*}Y?{XR950x)Nnv7v z3Ey!=KOPV#IAmsn#w^+@psKHt%=C_m)!CN>PY1v>x^c?-`&ch)M&92DGea=%RBjZW zveC2^Q08!EhYZ*-q%z1=V&E=Fdqhj_6oyxlsHd;s^C5!iy|k$n=*i$ zMBhFqYjO;9-JGa7#qq`=R~3Yte4zSPlUAi&HQMq(1bv7){CK;I2ksdcH%mksJ-W7B zT(%yhX;+gy-pnH4yo=IXd5ObxtuA*>2@blYcAE##Bp~p?%8fH(5(A z5-fa#-b^a!u&s=G8t!Nx-;#g@yc)jX#bFb?aP>GF0|NuB@?dR8R6-sZ$k2YX6sifN z*+y~qZJfS+IBs{kb#QkU>bos}B{{Hp`=uE4;$Wx7?2|&hk_$goq3{mo%4DNhS!c*t ze2RadmG}I}gUOoM#<9-gExLr>eAnGHE6J~y0l=Gkm`_Pz6QU=Y4|dYmRy80QmBA{X z;_2I@txI*xNM**uE*?O4Xl&1mlIqU}1qI#psg(ghmHe2lbEUM$En88lunws>^{KHb z@}3O%8r7bsZhs=OV}&g;g^~lpxCdT0m?T#BTcrU1N1`S!QIG#o-;CHfP zrS5U+K-6IMf2T{xe^ zmLMw*CIk1>+#n3=KJ^{FNhM|q@dbfpWbsdh(442veK z$ctYp6~t1{nWie;s$yVlJyFN+UUi}gYu|ZFT=IIPB_kVQ^KIyZC)Co-dM*G)7268C z!!}G(=vBToN(geNEQPm*NB?-xXhg`0qScN(-!e3T=fC0Z*2#IEuCq8FVJo5ac6t65 zV>(iQZo;n<$AVQD)A7G;qsg#-hS?&u45%n~G?>O_GaenSZDN2E8D`ciT_8(vY&24> z$bgWE7@Ts&nK<4qyzb<+(-HtdNStIJ0`^$!UAGDVMi`#6j+B(p@49*6hp2CFJC>bV zGZ17e9B_srM22YLE#By(P%$PyeJ@gtB7&G&hUl=y8nDbYvvQXn4RMTVQkb-;zsw&B zZ(iI4zSjtu*DIV`)uP9J60+~zng}rrqv$(|&hMML$);Ak)5mw*YW+sy)i9K>&^DG0 z+(B?=EYM-qn|{jYkm8XqOA4t`P*d#J$0iuPQe5`=(t zE7x;DDP=JAj)L&k`QC49i>%&Tt_g-S1P@fJsa6$-Y3JR!&txo?fGm>ux9ZHRhVc|; zrXdV3hW~h$rz!Obq{S!QY`90xX<}|5RdC$$|UY4yMWkIUbu#W6s7_ z$YO*@?RcGPqmaOthfik=F`@JZEEhSYLi;qLL{+ylW<|f9&lzV>u6Lk#eYUrd{0Xtgl zQUi%LosZCMAog?m5O5RYGm0jFRZIiJmO3!^7G@1X@BL#pYas|HYqUUf5Q-=`afu4cvDSfto^3eCNS~VK*XGK$a~OWnDO>gQYEHuNE`W2gU37 zNfq^Zm`I5j1RDV+wE=lj0`{%2q|%JrZQP$_v+sPIODPK~p|gA((;uK`qz169oZ!w8 z7dljTi5U#Ls3T+*pJm#M_D``-d635k;--(cT9NZK;K{aOt|_Ou94*MDT=zN%Ieep`)BUpqO+BRnGQ3nOn3P{) zZB?EI*WYR{2%rb*YleN^?onXk7P1s^gQkXG*q#l@nAn=*-JHtvlY{)MHM*6nKXyDu zGkikhiy)iwr4xZ5!xVPV#bvljo+fL8JU`iERq@bXac^a zb?UyxEw}~R^0^;=>GTK8s5S-+_fa?Q(#2*YPDEf29D40{2hZ-*+Q+?Mc^jWMi%d>3 z{Iaeive5HrQQ?fPSP)`v2am+rNHH=mlh<{(CNG!)^(_)<%*n(z+=YYqI}ZnFxG^yM zhjr@8e}0hLTRTD`Ns72D{rI6h%Z>cPt~v9B&kX+c|DAe&LP+XPIqBP>Lt(G6R@`d9 zkMc13cqkl3iVF)*JWV635IE?y4pRi-v0Hvji9#*&p+02{ZGQ$DE2G0L-Hc@RJ_Dc~ z^R=-%?M_G?sn5Je-59O@=N3MYG0lo+5S#tVMXr|t z!yCTrKDp~>4GQf}v%aeuTaW48Zbv5)jV9%xtZG8fBK?23=0Q0g!ZA8NM2u8m?4YEaBwM;3x zt~BL#x1Oi(oH4-*WeAOy;u^Rxa{VtF4`}|oIH$h#*$O?&4Lgd@oYPs?g|yfOB(Hg! zl=w)n1j=SkpX|5wbIUPX%EE}IkLi@3q*#?(kNK8}E+b*#!i>g3)jTh-9AspjixQa&45^7#{KiNW3!mgpEx2i z+nHcwJ%y?J-@WybE7r@4e(!z^$(sh%O)#H-c=^kBh@j&BN!d325iJHH?=WK9xJ>QG@wJq9!Pyh{-F! zUFd&3){6<9fdIQ##`WX-mjl$IM}~k5k$vs94IPVNr(w)kiw?6)s*+IT_T&Z%qfne!?ID?S_^ z?EU%aI{1Ww!M=kvR5J8NW{Y08CKN^r>7X=Q#0blH+(|a>zC||S^u8MqB{iC6{^OsZ z-(>jLx9|UVG@(WoD`~`?0RDHr|_%`IT1FAMVuVV|6&~)+>dJb zTYpPpqL1H!3QUyt%-vDPa^kt3&bivSVE1U27|PdnV`GKr;5OC4f3?H;2qyj_5LT!9 z)yyfOrIP`wCrD{PbsdvwSZL-v4EP=y_2I8N_(p}nBHf{vIpaf3G%A`mOoN2#l9kD* zZ)@${tq`ljW$bR2 zW>kaLV+()9Lpf(2A5UcDR%WNT5(Ye_FPdqjb0-XNRuTLsT6p zM74uo7~ZDHq1sX$EceI_Q$32>62o-vAwG=cOhHiM*$pe9j-3yD)QUrD(a6yufSheR5wp-)BB=kZM8@`auR}H&wtZF9m8o9bXR;w zkRrl%y7x5h!FDwdx^dFxuU1JK!XpAU&-jtqMNyCP#@q=WB zZ*3y*L|Y4ick+FvO-iy?X9|8J#%wW>0fZf=vC~lQwxfISK^cP#fu;lN7WfPP5HCdL z1VO+8JCX0eK4Z|MqL1*Y&%zZ=hH;@FX-0YocpsL6k7?<2<9M{ND#Okm)n7@62w^Tx z8TMe4pP-rb7UoAD%uW5p#PO2(-j&WVq~4#&upxfy|-L z;F0DQU-0MkR{rUe(F+oDv=w_Xx5Vr2mnex=fBSJ|A@2)-jb*tP%+3^M&RKoBV(XS` zQEwO=rT2d}XOMPVjf8RH|B~iUTw`SDuZ18zs?PM3WCawl*6-St<$|z0Sh(3r=bO6F z1xF9<3NUOR*FlS!oZK zkf|87-8MPmpmlQq>wJZHQQDG78Dx(Ak;dYji5NCp{4RcJ3)(=x?ZgHRN4>auktQYP zzL5XfNN0%kAAi1d=iRa& zU(%>M@Y{nd&g}^sfEeM&K%&RsjeZeP{ z@aMWLu8v2WikjeA@(~I*MClOU|HhtMnTbtE9w=*yO=RTkL4|2vCbylEC_t#Xp#S8Q zut$|DNnVpBK>mDTFIEeVEgtp1f3mqqC<`-L;gYByL)Y2{&$^=H@KNB@?ei#@s*Srj=Z!7k6A4EAj5Asxl%b$&sk&+wy!#h{iR zbG4%#VC|kmpg=egMD@EK5WWp5Ij$)U`xZ!PakXI}n6WEXQcj69e}YCeMKE94G=y^p zlWgR~jFuruq%_j&-xqk3%4E#JG})@RX!OU+w{A4-LRyV0&1TBHEI^M+uUl3wx62RE zxUtf?!Yy2v&sLH)0OhEKiA~01nGluqNnZyHo|qHsUt~0phtz4H3N!50u31XTve2Yz z1*|rx9NlkXB~bV_L#Ef8uo(Td<0e^VBfm!Q6+Kza^r_~vEsS)kWCwb>I<}Grg02~w z)9;4gQqY85mwrs=_A5b|z$9)Yq*GAbtDY=vEIw{$Bft3tBL-ky2p1FPx{|{VboobK zKlS#2o~bmmq!%z)5ah%+F+6s;!w?&cKx!fw+a35b_Yfy8$a!XY(yY1g=DO9@&WznS zf{mIl*=ayDhaA|3AN^qeyp?XZ)%@odRx753luSuaKfthRb@)_YIWqFK(=yTMTGbgi z9VKIirGwb6!@8e3(uqf3P4d2GY~QwTq(rzAEujrlegna>vEf8 zOH}s_r@(x3KeSW@kKhNvRxAP|F_xs8bpH=hPWU+IA;)sw86aUQkL(sup#rl zaCkAauUwg@uqq`+du-&#Z78TRxn*_W<2gv(4_%=w1$lR=mA)^$i_7bSENt-QvL{4` zdBFt=0#VU+6T6pFi1E> zT=t{{i(~yeUjp%+bAh={@X09<<{G5$0c*lsez(3H9SPERIJz}t%My+%AqHiy9JJdt zXwIHOQnUjgp`g8}MAU$3uR#?xl*L5UjCs!c1`gx}G@6bDu&W1}|H`~&&T){X5g*{0fWu;0 zb4{||LO5M}^SxAx2$sI5d)C`6yV%Y(a7mgag%Z$bN`9#w6Nwp|bGh!C3%OYG=qZ@4EvyBEu^8lt1`|ZyTsgim zkwn8Udm#}QexLXB z*Jl{D9xhILDcEn_7YNMC2prza-eo62yBI!)x-7G^hJ3;~g)6#0rBHMS*P_xx%Gzn+ z=v+T?K-T&N@5jIWB)nIeMv_F_VV)(8@gHg!M*F;L7>JWq<&&}G9c`_QToC<8tR{D6iV>hC7nu7UX}kh8Hb|wW zMR0)|8l3N6{DdR^fgSo(-Ow%u=xshsn3H>9YIjx>YgYn1q+3hvB0wmTbp4x5!LC?k z8J@k@GBtyTA&vBfE)bI{8osZAghAt{MXT4E_b;EkAMp9inJWoz*Tl?~OrvCEX>lgw+Zx7dmb(IwGCA=F1m}Fr$}WVC61)JPyZV4yrsygLnk*YzuSO=#pwP=SZl_xE;8q?ZVh+Zu zR2&++9^H>ZD~a$m6?4Kw`S^*HZ_N!{<3}&;jLf5SmNyby=#3!Y_YQkBuqB-zp&2@uqlh+iU(y9MEiO}|B z!kZwz-&Mlr85(TsNCdsCi#9(1tg8ERv9uE#C}=dA%k=LC7%_%-vzt_SYAIIE>r$~& zkoLG3psoxnL8rg4<cj)*7$4VDIPb)K2l0h`F?rwfujQSa0K0z#$6G+0LjyaR`tB}#Z z>3ZdoBTqGFa}qB(1#oYD&zQxA9)l_eXp+TQtw04{oWF7r6ox$S3&bG-8Hk)6$rdCM z%SxeUPu(eziQKv}4AGoM{#|KET-JJ7VJYt5_Nb4S8}gG;9SgxWT&iU+3hwkr>{TCY zt=qQs&_@L(?@5~BOd=N=JxVC@oHFaNa7s^dG^X>Qh$QMiJf4Bp1)E0`4}|0A*V+oA zEiKP_@C1Z~_>%ERRr**bIM>~u$WrV8>)4^}XJ?yQoKbUIRBul=8T;LGs{XwdE4#1% z{A$_SK+caBVwfI~_m->#hW*pXh^B?jba__n8jQfJxSL%uc$pf8;5XEP-A#KFQMr#U z=;n7;APj33-ULm zWivyq`0GF+xb)qY1B}X0@CzwH3v;`xN#E5HU}0N=KnanU`%1|!uv)lEZk``w=CB#Jkb6E$1Jhx@ zzYD(3n&D8Dk#q%|40fhIq6>>$67j#0;iD)fq`K@s7h;Q~o9eHPPKwfTs(h;&)c>jY zM+B_SOD*j8xGM11h{lDg>jI*;Tm24)A5P|F_zqH~5%c#FwY^ww{kYU4n)~VG_5m-_ zT=AVs_9d?UMuz{Y*h|Kh&T{lFsWiExDC+GbWdd*a-hW6RR;(P54q7O8xhhLfG*lIY z3;-x7IQk+BIEt7=7YNH(0;WD8R%9Pe3qtDtpPla!?Ca~^Rv+hEiV;_X!sK7DUh+(i z74$8q;MyGZ5F=}vn}=8|9qj?*zvGrg44jN7;yIwLQDOV|y+i$c0`W^{rM>7ni!+mO z@Mz94UFe!bPW2PlgT6>Pi(qfr@P**cmoJ}Avr(I`r7*ndlFFLYskkl;-E`#Vb*`=} z!@7TKhya=`NK29YSz|I&0bi%V#hNfUSJUt#OSsttqi^f#uu))=FKmllCNi^nLXqVb z2kmAkiboT*4|d{^a*SE&cSoYH{uGf|4$=`MA9mrn(!>3=3BzA$J*$GbiTmrC*~$X? z?lu_mTMZewHSd*~l_P-^0#Km8OJ_gagqd+CDT_FR5NaPDZ1R^}Cv~AR>YZYrI(-qg{tJl4v)pe`Y!*v54 zIEd{ALDI0HiMG$tF(*@v)&JT@Ngmj7CTVJqcW354YPWIXC}iXSWHwy&2<&jKA|3z3 zn!D5*uzTOwXdx*f&yZ$%ws2)|8rkC#!s2N<`MhmeXCkDo<5n`Pg$$TO3_2xuh4uHb zxQ?m7*uhuL5~SX&w9mS%?6{U+J-O=+L7xVPyz#XI#Ji!`dE?e1{$-lrE$fdwfO(6} zUzc?Mqnm#?yU-9I@7w42F5xl=;dEfw9tz-95XlEaW#^Hk;s?x+y2|$<4Y4H@dNltW%^!4l;zL7>-eTyJ!63{JQq5!kQ#Hb#-NizUdlC zTS?c0pm|$e^!A-Hw6~=3orO~Ru{2b?@r;*GhQMIa{$&q%vu} zUc|0GNFi+$JF%qjFb>-jGh{EYilzs2Gp!`L&V1pzNEI# ziyIp4&vAEiEp~0pQ<^7g5h7(UV5zwAD0#<*3TDVf-@eJ+e zbr%p?O-+*;<45W#476REpO^va7_Qw3_@G&VLAa3678WCL0OK->8u# z4k_VUaITIPb_Yy29q}*93jOZKU|{)R$HMd?HTr}ZRvk`?xuU2b@bLixLRQ-~>^{HY zxyi+?Fo2S+x@_TmgEHd(qNEXLRnFIMU-unw=e{=gJECBWSD;!r5Bg?=5QTpabla0h zELV){@>NbaVbZng|Bw z)Zk-|;nq%;427D!;N^u$<2F>Ndgg1unbXCEqO4>5Gp+^jc}ch%DNWaGVNd&f`F@pS zvt4ZNbfWYP5XkdiA)Sdb;FZsseK~(D4*y?C*GP;b7yn4rK^J@1RI3VLS$Imr#>t#y zulnYB-ay}UL#sB&JkUrXRkQHO8TdxhFCNmCwVQ(ttS56E8P4|sI0uudbV4MEFeq&k z*wah5v=d6eZArhUC@32gTq_8Myzh__WSm{?`AKDsEpRp1+q`H6gg2D#N9%!1CJ-zH zALmx-Lpf3xwq3>}<1%@;^FlbF!vX?@TcOH$8xY3n9bs)o8BCO-X;4olaBvG1a>!ZZ z*NZ_N<2OeRxrgS6T2(nR@sFQ~nNa!ZB(f~e z($2#bP4fu;rT4@@4o!iZ7R<0JvDFBra0TFc((A)FLxG;^#L+ruOgfGF%U^m-&t1}2 z0RkQT#vPun1X%S_SRh$LpF0D$rA4=BJDR#mmn$Jo{? zrC23D?SE|rwRR?Q(&VAl#ZBA8G1nr{;0iI8xI9bqcBVBi|0lSB66-ga0!>14wBHJO z{1W-jHB?{XPS;I&Dl+^mu5s3#lv}Vxpoz|s$G+m&GST*9tY`YG$JR1Cp1y9+Q5M5O z32fw{VYDu+h0Gd{^mRvBJeYq+cPiDsDRrme8vjAD`=8Fm?NkByIZY+|yP`)ZcNLPj zmwU?ImL+)X4NQ@Si)oWorh#kcmWB=~1=<6KE`DkSY={|u)bB6H+!gEXyq|DU4)TKb zsjDIoZK2t_BJsYC;fxvrP!xZ*QKSojn;~swYO;7c{B&EvWeE{87IdWQR$L1&g>3=) za?7LzpP_V2GjvdJd4rN|Fj<^N0!*OY{~#+r6+Ynouzkre&sHJCl*Rv{BQ$Z-$({Zz z(7O00D#rzXb~QHjlT*L@x?-N%H`uZ~O#Zpcu2*$ijwBGK(KdU%B0}+2>#yGw&oYgcrQS6Mk z5V+1&v4_hkm=pjms(a)&U!kuu8@wu{>>uXFJ&Op(QZ=xK-Q$aUMOMkNH;;uO@%-Aa zJz1Spjw)!9p#=tm|XX>dimNxYQ3fYvk_L4 zqh6T`ZP;NJy$kr_wjw7;K z`X6^0bg4|sVpS*PT467U8|m6?5wu{A+NI_Rg^A2|=y&J;Vmc6MR4U(jqFr-5cZ7DO zNJU6Szv({C!$L*ah_L+Pc`zp6*l`!<)aUDcGOCgQ5+^RAE}=ho{6VG?v$r28>sqDclT=H{u1GO6)BqhgI`i4?^A_43OlOG<5ug?5&zT>Sj@ zh(sawLCk&!L(YZ#7J7P*HZ^Xrec|7mcpd}05=Y^uW|!VX^Ub;`bHw3%#Bp>A`v-F- zjZ1i^WQBes$inpe?B11roHm^i}|0)577)@kKVx89g(i*y9~guL6x%O{Jb* zJ1y~e8WT*VNkQQFCah%qSS-HUu~T8=_HVbx8h4}ie=ngkTz-nkfk ziO_Y5yOgpmRpp$3=ja2350*2Bcxsl>8qev39Z3Zwa{uLu7`K4UA|OyI<3JIz0guo~ zhLHpxI;3#xsxA)<-*^q@0(fg!7G`8u8Efs?zBhx7c)gwA`o>UnMwBA-5JO69GAgf# zBzG|&Ev*I4ww5h(tAdmw+AZxoDE1M7LvTjcA%Ulqi27&%i+!_A!GgZ8XRb(&uPBfO zJaS(xWw6}5W-hEW@0shc8IzV>d7pcxIAR%(7@p!VjH)rr8(!kBa% zk~52(_qp@~mPeJY2ed;>Le>@D_`1A;x#-ZZqD#_`LocU9V~`z~v0C=Z;1W)(`jrHu~gS&X3c+ zJp(jO{la1r=V7op+A@ld`MVUqKPR!NJC-VXe#NuS+pF=svA(b*D>Ei_UcM#+a8+Ux zdJ}#XQi~F2nCFusuHs%VK+WAd3h$H)0X#~5a3%^VW1v&WEZb@(t4WMQp9IExVE4nK zcqa*qe{&NSK$Z7#-D4*m$!k6=*wv)Ut zOPtEKQVd3Mz`={h4b{R)ZdhK=-dYH1(e5&Z{)MA-^`bkjU}<+!k1tXu{XY-w)u}RF zHSJRIlVIdy+Q4%=9v0GSnG@&*;R`^n2jFdz-bAoeQbs=qP)QpAg%fzrycY@PZKYJf z!7Y3#t)%I*@=}DA5FY8+*UB1C_~?MmlX3Cz*M=oI@9vT$$rf6yU}p_7j^e$YE-??H ziv-Ept*@3lviaO=ElZ^#jKmJXZ*A|8+D$&Nvs1#QcY?0#CAoGQfFJp1i=hQWJ)99~ zB2F?L6*&H2aOH=)dH*^Dsmte=aWi<4L254&K1J&Jr3Z{5XlV!9V)#$Rbk#potJbfl&9123yeN*Q zbsZ&cTRLxXav7XJ#0)fe`r`~&CaithVUPRwx)`F}^iLe$Q1Ya}nhu=EE~|!Bn1u4z5Qny1 z0zWDH`pIhod%*2Kme^DWKTN$B5VWhc2&G>(CdP?A&_G9 zf>kHP9Vb^K9vzgz5^?Pl5=S2t+{(qA4qp}O#+;`xS1Wsl^U4;ZZ>)TZloxN0tt<)J zaZMMvkBYXv5p7(7bz^Ji^=BVQF?*#jO*;tp@P#~@lg+;h6T|jt6j+e~Px}-wtcaOL z48W-Mr{`)^g1*g+F~RMpqhZrBaJDqq>7(FMC)Z17>-CP(B+6neS)%4uiUrV(4Q!1{ zv}e{;jL-`M1{f@>b7i4BGa7^0{-@c^(B4ht(^EvcN&vEi9yvx-zFs9jbvxlCWRzO- z>9b4?jR=LUf+sMu({f>RvK$=w)GiyJftXj7{V9Jw`B2f8w59eIw3yPCE^=)H+HP@2 zcVHj7n5>KSu<)>1r}4Ujp~}AvCI&@2T!e8CD^$CVw>Za`F&6#@8Uba*=XvivxanaJ zuVf%_8slCQI(*4oG$NsIDj`ve%(wF;p@CH6l{8mD@~huc7gl~wx9ZKFHwcf#*MJh1 znASu0ar@7ik9vLvB!_8Q(NU@{SA?A;75{Xy@S^hvmLJYsPu6p9O-4 zC_eL*17Qst4K}6#ZfWIY-c2A|Z97c8M3#0V+dld14210B%oG?0*~Jf0KEBQY;T6zP zgqqvJi7DgeFB%tH5q}YEc<_j$8b(chCmgB%H)LJ zeFhxMrE|dbw#ub-r+HwLvJZcK5da2>{E;E+=KH(j@MKA6j5|N~KKtMJ1*b?2;DOP* zWCqQA|B}xWQPmH-55j~)FZc&{&xk%2AP$68NfZiYDgS=ym&|ARY%8T{>?G(zA_d*& zd0Ok+$hTYZ4tdHw%c+qNLz=5&kPQl?on17LH%Z4YbgUdIj}br> z8Ay(+Ys3_%%S6kZV0c7Y4zB$@7E1;AXvEda!$oykCIu-O3&nHUNR7nuek#xWXu%uc z6a)XwLY6|O?nvW?&v15HPbyDHJ0taohZA}|_1R+qDU?Pd1zXm2gUssxsXZeNv~{dL za?_;n4R=K^8w9Zx<>6dADpFMQQ`*IEIrZL2@NJ2IEG3Ut?^P9!6{cB#8q^uc;pgIF zAh_z64K=nGnEOAjdNSz1Q3XrXJjUIufkjnh+!kmEiYH%lI`~Ptp8fV2XQ~9~>sWT& zoDC&f+dUP>-%>k)lQ$$&({+YfypT0M(8ER7L2Y@1po?pK0tA@-;(^*s!gPi&g|HQN z(i|TP*yf5C_dbSl*|^~c%%@S#+=zEOw&{FZJr+@rSOTzvY+s}tdVvL_`e$a)f(Eo+96SNnag^M=mp}x{FJKYHA0}ts{Jp0U;0-AXUNE6FR($=LHx*BX zU4U5aTt1&OeB#Vv0@iCt`g8CvN7Z1uq0S$(V|R`U*W!VEWV*DXZuX9RV?Hu}5wxGf zBp^M9ADv8%AM4KqQ^JcMRPN(HzD~hVN90#xoS&ku*UII#r21UPZ6|_)CW1bbsA=#Sd7xx*>psHUc;WUZ2F> zn9z{4j&2Gd!h?U1IcE#XM6|@se8Qh_n@wQI1tne_@zU|5?kMS21UL3JqVP)2_in+d z+xB49Fd!K~4M7^6C1ajuc4*rh%5BL-%xTW61Cr5}W3)m0aT{{Z4yj4Q99R)PQR7&T zlcnEnEgK2PQf@tZaD+KrsC*?BiKk+GJGYBYjU^YP z1kKq`womjd3iNbiLby;#1bRfWYZJj)N+;AO{+cdoLe(&2_#i1`eZGXFY*W+GvU zpA%1i2dfh5KN?G&EL1o1s#&%MR za;mEu8z*0hMD+{*s&Az4S@XSoyU3?V`aq$rNRLWXU7mPk3>lFN96~2Hx^_^~z}Cgn zxz^CB17jcBhOt4ks<)yH_CVu6VXU0Z@XqXV$#U z@`Z5+c2@|J)tu_L#Lwz892-9;-aAIBEcAFT<%+MY^K5@a1(OhU#?R7S%+9AVh*;*h z1hb;IKgZgF-`cE8>^;L+Po^!23dvy)Xz1QvOh%@RqxYyk$;bpgxQ>?WB8G5|3QywU zDqO8QQj<dJY2iay`z`C0rj)~n$p0hVfB|lFqlPYo;ncV{hQtYy@Y191c)TW> zT(H$S4Q8NFDZ_OPW2zbBvc7gEbhHoQi!qkMPHLEp>GQ;4ovRIdHZ;(bZ2wy)!z~;dAg*n8 zz>mKd2Fd%Of_7A^A4gGH^lk}`zgufu*R4SL++<}7=BeaX4 zoDa|-3Ry7Yk5@G)qZmK2$-^Kp`7Yr(TH~Yrv8)|fa?g!~B#crkjy+NQ^lvxBLefcK zlUk;2E|io1+GaP^_Q-Q+~-tKYicV!g!|DIivj$0~JO3T?v}xnnto zMGWwsxap8dQrdjicvFCU7oMtWWdC+WfJU)zAS^LQr8JpV%_mme+3GN1O;}k-AY@YqY zm=WDg^vQw@6FUt)o=^Lfrdl#*YK-MjWNUS=MjUm;{JudMI6&e^&$ABB;K&2L3ovrE5;=~#i#bIP!gdlYVp;eHksXXiDw*h^F|8mgQN7k zaE{|$Z29~c%tC$13;xtCy842snj!|nsKupufMq6-OZ@lJrMA&24qcD9*FcQymNYJl0CIqRwBQjc+=4Y z972chm%jyFFqRs?sal%GKkFb!fH40FIp)D;RHYDSE|I^eTRAiRZ5=rLBJ{BPi1}ZP zvWbK1Lw>fg{+|hpg|m_voAGM~wB#F-Tz%>q0XJwzd-|F7~R@dIgTXVUm~@}41=6}X(Ntv7Vp5Mo^N zS8dbVao2JFv6F9ZqdR@_pq?x*;H3YBsCDr*{(i{v8AxEhFZFNG%9)x`8=y zH7yacZA~)#->JfwfZxX~DTDKZnh*iKzI;4-ZG{cF-=SH0W?zi4>Igf7U`+yPX52_N zj<(k%D^v6F5V((7oCvwzhE2xF_d%GE)qY)Zdeak-J=ZHK$uv$9Y?`1ldEw_*lq2F9 z6@ETU9z{>A^@4Uhr2ZVm-H;Vg{ymJEn5}vr65bGgIz5Cg^_GrmUB2g*PJeOir7eB-x!^?Gf`HFj4VV07~}Y7 z^?ZJOB)-h8scztqEudx0*zC$`z;?Y6ik}hUB8R3HaI;AOTztiTE<;aF=_8+4jnA z1s-~EZq*a3h|9YPz{hJ)64gZ>`(lH*b~a(siawl>88fl!erc#n?y!fwJt-)tD=KjV zv8-~Uo5A@wbU+|UFDvhi6@k*IE&4D^+s1oM4Y;RZ%$VM7!46NnaBtVGk3EeNIh>QK zSjpD6dnd=$MyxBlnZSIOOoGb?F%>wyB7?l0IA|1cCHw~i!6)*WM|yU{L>m*%!S}_t z(TjLPkQtQsYbl)Fm%5gnxOmkhk1@Zuo^*K!aP8bdqFtYvgT!;Fllm?m-Iy$jgmxB4 zA)zOq%)q1@pp1uoOi;=`H)TFZ>I5)gK#{AvL8ibS z1?{hiC{E(-6pK6=lHItkMznY7=F)}j_g9~AJUhuSQ&-FVPp?_%f>ASH$QI6G+52>i zFV{CSXTp-uu5z&1!VAVa61!lQT8?Iwo+TyVi9MG=#ykOhAeWVm9Edp&pjn-~P?Jy| zo8B>?aG{hzoU%u8+HVtu3Q8J!CDMA*R95vxh-i@Z?0Kc=3t$U=15@y7LC7^QMi;UM zc-8pGV_!@etWUW#AyI^3$t!xQKAd*Z;SZ3EA;tf2wu*ihfZc?v6!Oxvn!HKyL(^s& zoe0OM&cOKRP)L53({14Fr_Pm2%EC4!#-K<|UgXoQ!lJlY_AuR3`s!p-J+d6t z@_BR4a(oe-@WG8OcdCeMbwTJe0@j_AGUgYQ$gL7Nta0EL>kQ-<;qOaUgV))%` z-RvK0gZ$kvDlt6_*?6f0PNVy9m5rd9AHmc?WdVI=275M_Q}AJPCkq^3-CJ8J1=s<; z{A|}xOodCqD4WVCwATe*K@zZDLWfC-aX`GoW~w}m0KRFNqvuP`q@PNRLG!8xRsHAl zC!%|FBTd~4$%k8hUOXuEZfm+)#^Ji7J-kSMmbGqZ!bxUcL-cl52|xuLL(44%%C(s3 zLyas&=h!!91YdO%8S=ap)ylGY^0 z10M6=JEmz#dBxYks+9GV&4>84Q%Ah!5~sWBn+{@k6tvy{kVW}+w=MDaIFSD~{B47W zA9<|1VGYfb>wc|jV_&THw)O;vje`9qRmHA?sD$nE_Qa;oG1i`-1n9$$;kzD!ya6OFd?cy=rVG)~ z8tSd+?9!&U-j-^Jt4d<0yili1^K95xT;QwH8>c8G1kQ#TI@_WS6lbF)mYabw!T~~7 zWQE|{0Ce)iZZbXCK!$@u6t(Z5A$g{w}UwIE1WYNKY@Ye_9PM7CE%WSt%7o%-? zlwqjLO;F`umdmWI#DdbCg9PpCL5Y$Uu=c=Q`){bb3zmffDfTg-@~2IL>N2HRGI<6t#vUr{`aB6pwSC| zf6Fe+k4P-{Nw^L&#<1_W_d938OS`>=0l~63^9f6P z&z&M`(plOWNOcLLtm~ac>6~xJCqa>$a9<47EQYuNV82X!+S#0*VNQ9~vKiPGBWzAr znS)f03x@l1O$6=1y$3?=o*-)w0MKRG$GqrPhxW0*IALepYQAt&-6&C9VYnBo*Xnly z0X|pA9(%`6o~7(pQ!R;GpJjsJuop7de+hUGq&HaOd9Hs%<)1p2?Hxd4{JU43uJj+D}hf>9p8$lLw#&W zk6zkyuag^}CwX&s2xRZF8Vu;oT$)YG%9?WQW}5Dcm*cjvkjX}ON!c~B4%bJx@Yszv zm7eSL5e}#oAuc$Kis;}au00`8#c+g!Wt+Y8Qn~z%4Y&39_raioAu(1Op{OS%_aO?h zw>@$t8EB7nQMJNnGOB;m^5-q=ou4k08fxLZbC>%uuSwR=M9|-lGAb}^ZtmyyC{r^J z^Bol!4&4i`hSLGMxskZ()Ub{s2%frfZo(!jgX@zZW9vPUGrqxQ#4re6DRr{lHvJ)Z z#SjNoW231{x8N(9KjBu*Qzy0+?k8Q>m0L;q5J>ff${Sl#ZZ^mh6Cwq_$2^&czMz%I|11%zvex?k-ea^C?+;#IY9Yco&3QY z`W+S_3=kwaU6;#Q)(@@(X%dpiV>Lq3@0ve2e{_jIKfW=d&>OLVnPygzTHTK-k@fV7 zYWTRs+C(vN(CfMA4U8EV-V8HMVGu9;-?4WR3l*n|fBw}*1#HKR>Oioqt!;zO!*@%B z!KW!%$-Lz$%pPUCL@#I$w$c)EMWqXIs-{B5`)?bZWLoIYsJx%hyx!jb_>eC7%B13nI728v_= z3SM{2C3>zGmE;hFCSl_Yz~QbD0P{Q*W5gY=$e~9P*=NODfwPTBnq>o&Ku7oQI1HRAwy}KBPb1_(;*Sh_{#0)tR%zzNK zwHJg7)2R90Zd%p3hJ9x%I*KO*#Ml;;ZUAC#4{5L{SWGlv`a_ng$$=1k}EvJ2BV;{X4h z+u%*x(5@6`KywQ#zs++x+!WsMrzTJ@8T*mF#dk-?8Z9NTA@qB#LifruxHy00mzZ45 zAP@DmkRHL+99N>rEJP5Ir@|TMnK7-eTpf1dd#oia8hNG*H*g>WGNzF0RR9KbC+$7d ze{?kOVMbVUr{Gy|GIg%9o&{Mpq87FCp~6{P76G}AZcweJM7OE&7p+@SO&_@6QB>9< zlLj(rzd=o(?fqP(C(8lh$OvubTWBBkx?`n*33v&@E0_9c9Yf~>X$b@xP_-(79@iL2 z=I_QU=RL1no6DAT^O|~((3!)8AZmS4V+5rII3r^+V87ct(a(V3l#6GcbC4&sI-9YC z6O$=>a&*Vym#HInXJivIzdyYJ7~=4i1K!@#-SEnsa^D`zmSD(T2cAjlXYV!-eDwDF z-LGpfqLX-&mGcCr--iT;MMj)nc3}B>nFcNsr_NXD`Odz>bM7hv=rqR^nV|~b>aNN) zSC@r-w(*vnWn}EeoEohV$UzfU9GP}OnC0O2{7xwO1R;`QNkI*QSB{*6NC0<#^Z6I4 znsXSvAVcKP5uTtIJoCbOh)c(ZG?D&}{PXyjZD9lgHh*4_2-HCZTs(2P?uABGKK=Hn zGLLi#b&A0)OH5*~nw{XhlrnO(3BfA(A4Q;dtfE?QYg^E#3sf6^6BN1-*})!Aq=~=_ zib0=nWqau_>Pdj8@$y7J%{j3^7WAR@2N@n+Cs5KO;ZEf#yI(?&YU`BAlM)~2ecpp> z*MRkwpsHz0U5p5scU{J#3>$-O_afCRVAo;w`ZwT6%x6nsSx<=(_VzN~;`&lHOMBX3 zTNQJ)tI0OVNpw&ptH8|<5@kufk#ctc@b*b%g(|6kTz>7h^Od!a7{JJZrVKM8EJ7sZ zG9Az8o^!>tfY3r7%F)Oy(-j+hueqers`Xjp!zVaG2E&;bufeW|zALk3P4 zCZ{!cc>X@A8R76*mjDk+4y^ zh=3Z4`iIUOt3GR$LNkDM)LSE&fa5O{wQu#RdJ{o$$*Gs62850J(p9E?RnsOIVc+|IW4&*ZcpD0 z%tM05`QPyN1oJ5^9sij(iqmd{Nc0;mRej!^$71!u-mj$j6^oV*iRpDk{_YJrl!BX^ z;9AAlCslEL08}rb{$@^XTAv~QJYf4?e%3B$Z=?rB$64Zp>deu=8X68Xxzt9NprGLN zV4PlnUuIcQCJGE(OzPRBgvx`B&0*vsQpTjJkeA2L!xH2x`(h=7bAg%=4YKFY`k@ll z98)L$w>&MF!mq%NLOqx|1+J+gr2iUB$J7*{-qo<~gz>U)>*aI5G>pN+zzfaK8Li1n z`0z0tINCSkCylx6|IrFiiziMXJL>KjK30po21JH23uN|`z?C-;u8^0e)6~T80enRA zUa1fb0T|xmPJ7(q28iIA+I5ii?>=&>Q%6K;lAOAU0F=^VROphYw^hP2v$UT}^eGgJ z>`ww9H)XoI)OAp?z*6FJwthzO;^;k^d_VV|r}=egh%cS6k+Cc9F4)sS1rTy!(Jhp9 z<07d95oo0NZW|KY0Ou)@wKCxaQ&+fk3+> z)sG94mjbn|Thdxxvp?@4XrUu#yd-l9VW05YHsGRBDVG1uWJn`onVZ{e1W$HLPrF@a`ei&IY|HV!nnj>aRnDFw zqWF8%F$5QYCBto+4|Df>f;(5lS>IuqS7gUj`-~Pgu~ev5K!`Q1z~obe3VxV=Ld^r+pWkyRX=sp@8fyAzVaLs{$Y=c{vH1>d%fQX9{PiA=Fv z!gtFIQq@`#OcS%p3&a|CtymA8{hhGVmANGj@}AyKl+)fydtCh!fnBy zgyy~Rm&oOVF5O+_VYD!`RgBc;-&l*JH#J5Dy-drk9a`#qc?~qh_=Z4XoU}*>@$RQJ zncKfCTNt`wg5bvC60pYx%CtL2SPt!27bysW?LHIErIi#yo4p}mQ_@2iz+X@6>_R;~ zvS8hc?Pe4*4>xo}%=bKfn}ll&liKu!%hi~;*&LOs0oEKuxQbLXsqau5UH?nSBGOQ3 z)-kHrY*)S*&RbaXp~*t_+xNr}FgIeOir6R2iO-e?%PrH-D@52}%RFXHmR)(2yjATg z4L~=a&IV1BF!GQ7#ZV`7^>WIWA7n)H&=n0VUXu?r;%xbg^)vw`_Czq*t!RivHU*wa zidGA$`$NvT`5`r&;#wP7R-1moaHOExG-^g@jK}s^^~9-+U{H{#%{bnpNE8M3Yv5)u$3(Q<7c+1P6bMzct*jmvU>%-PC+yAux9Q&#=>)D~a zH|UVNf3a=qWYio8NzPf&3yw1FxyLFC7hMc}AeptAQ(q?U$`zS9R_n|M%sA&cD^!Jv{`ywc6sK47rb64}9T(&Vrad(wwApz)M_sgU> z)i7zU95r*BTse8=6@ypDjunmaH@<^>Vlb@wI>&R|8lrJR?nHwoLYC<`{;yD-sUXtg zJ+c87tn5tdiFsg^#hlA>5)|1Zt5 zNiTzVG|MeT^zN2^6i3Ap0SR`tOhumkUhjMl08CMd$bNg;HSGpjZp&o7t`uzJ zZ)ko$xy>u?EidQw=gE7goeZ^>%})pfQAMV-Lgu%lFnD8xc7NJkv1(^&?KLy|0**YvmA zR%(`Q-CbVv7Hb2#^=P@Zq+IE0ry@IiZ!5&Ia3}-3>v}Q0E!9uz=qQtDR!o)J*mv}M z3QaOIFB<-`n5%>CXGYF@Hv5Op4gP_Dj?yM0mFn8~G>EaDNB)iz*PT*5q@@85eHx?P z@Fi{w%d(_h^xGV+(Tcg=lrJ+lZFBc;tVG-B(H5n9M*Rp68m7~fSur2{fTV00>>_(k z=4)?!E&gAI*Sm|}HKAwJf~v&sTb|xLRQvA;*uwm-npc$Djf~+NMohh(r9sBuJeQY2 zU5xTbj58-P4_B@fzuiOL4U9(h$*HIh;X#%YG!M)`E4on)WK2$3WUdSDo~Yi){pZoI zP@#CVunc1_zuG&< zZ4hx|4_y{3axUo2|FOYteqX`w4EWnQFF|XORX}-&l1<(%tBZN%h5Qb-8}Cs5O7yW` z7)-l$H8(WQ@`cthwO=~1(v-vV=GLngO@N7n4j~Z`Al4CfxwMw4W=s zFui#^;;V}g8T8$SKXsv-@AMyOoKT!P=xON;Bw~tDL69%fpfh9V-sf3j&2GT4J7vtk zfWjHzAwE-i!G7zvX^cd#MbG-YkvEF8_Bw5PsW{QUmCDh6c2BTk-|8h``=a?6tSlj)yTv&5ADG;x5-PYGk#L z0yY#(?p92+_AX}M6wOzGQ7{x=HjxZvOewAKEDLEC4tRL+>Sa|Yz2DpFZ$=rPh)sRC z;g+&-Dz5;j0LIk^_YF z#)HDy20!0TdP&N2cF6I%5Ms}UNZev&-(ehE6`Z9VEsj!{LOG=B&wiW(KB!3|58Qt* zGySaUAzk{UB7`^HqpU^ec35}Tz2vvbKVBv`82U^(9d_w2;%9f|KmW)}M!sN?V_~bd zWT5VSJdT`9$&@73C(DN89RECo_4vy6>#LUj)3d+G$bON zj`z$j<^%MKZ5Cy{{+9#SIky6L!OIbL^$U&R)JCFeiW*rb1Ccsn58SB!3MaMqeG|KV zBCFcNpJ=@f%e4uCLA`)xOR+ePIizGuiPJt@N8vvQ(s}nkG>lv}wMeiCfu)-NJx(BgL9Y^=`uI*$hxI=NxcO!5NeyV&iO))Rfufgv|OOk{bsK#p#Cw| zHnLP9dg_Ow)`3YiiVodOw*vwm$WK=gOC$Z_4cU?rImLY(q#zS(0m=z86Kh8AiDPt~ z{1Of3T0_=W?4_0b?>GF!k-?{z@0T6^MkM{<-40l)W%Ly$xR$|TNxgntH4H=CZh>9M zyWOs3^$wpkM)i5#OzM~VBGnA~|3*zR7{SRo;308au#fJDMzArd_lp};L!b{zbxFCI zbz{kOk9OT#(DV4WBp^0HucXXa#Bua zl}GgvU{y($jnsn1p}xJWIN|q!7U>SA|8hPxDai(3FkS&4;?(PKFJe58d!6rhHzVzk zTg*_bIZqMp7Zhy7%_5aUy`ITq_J&fkt$r$EgU0;^+jazF4OQin!u(kq_IJTq2dlc7 zS&5p|dYt~cB(G~w7iH;J9x*j%xi+BrEH}M1ywO_M3uLWP+%M^IRw`~1kO%{ij%qsA zbyDCW=%A=W-^u<{Rg^`uxR-nHNW>A2$c|_uGc2c4mV;rrgo|lAhNY2>EE>K1!%*1w z?@NubKv7v*H-rzfBCNd8GK?=REbixptcZiKEvct76nz1)ldkKJ69p}t+Bpu5tYHU{H(4^(%?+7SbWQU zwA9iqo&Ey8sN^Z-I?bZaiYO~Lmc>574ofmTK5kqN+AnF@-jUK1lL8DFgUAsCJ$)xI zQIzNImJuIO%f){=hZ+kCW61FMRT!j`xg|bDIW7OdM=ZTXNP0uea)3w9g3^&_y#}Ck zjV{_b@KZH#)7coF#=r4eUB$uag(HU5(AE&iOP~{Oji`WU-Z_T8$DeY`0&P@ef%8>2Vo5^uY8#}S}_WVeKm&`W^ zEi#CA6FWeM8?U`BrRt9X75p%m2zJ3=iMO-c?z^gY$%l(u zD|Mo9bW>bpei9U}uu9)fPMf!ZdO$d?3gSgy1Vv*VUNqI9S->N;8eQRbqXa6X+gm9v z&0sJyc()sQsrxC3+>kQnTS*_c`d(29a_2#Set)r!U*~%IwL7Fnj7H{SsnjrbOf6^$gG(LpvYw45OibU92sv4UD4bDUs^<{8q#fdtzjc@t zs>f~zx_&m@JeuTEawEfVdaO;b+Y4TtJ7Y|hyZUC zC+IkzXwl18F`ywrGYy@mJ%XhtQ0$`8_YXN;#988l1AA1hH-htQ{LS5AfQ9n z%{)h-7{8B`Gezk-xw=H56fcfBcqD zi%c#cPrX|izNx2^@xa^wT5Y;CqEuz+>u5U_BbSVJ4h`=-HC&=-&J+RY7%fJ0IAI|X zLSd{o=R`H;fkyTx9599C(Vxow%2H;IQhd?ABFEgpc6u|cV7OHAC%or}9ooz1r(vU* zmLo>+`X1zJb9cnLX*98-{n&#TO4-&9Yu<2ovFaAUBSR}wGefaL)9QV&_$(dhDJbgC z-HJ}VOgM!ct!=o8U;~yW30`iwCP74CTP7Ahz=%zV|3~7O^=$_GzLk|`G4xOLmiOMv zx-h7mM($xIAx2vzA25j-ApS6%S9AK2tllnX)*`3Lse;bVo<>p!3*XsyId4Y+YBkfB z$3k;PHWu(_zxq)BpOSo>V!C?Tt0|CpnS)n6mo5CDP7ncR4u_bBR_aI*kJ;F#^Qxb) zy7}v#viq&Fzq8Ph{;#o)kQI*FA@hU5;D6}As;^1xj!flLBxAG^+UCfj8nzPOC`lIG z9K?>tbt=6w672^MY2yR2%3NwJVS4~OD&eLoYzdcPnQPF>z8&BBrhx$M-nck6vSAd< z-s*gy!&04;gp$PxRi<|R5;^m%eQ|}~dlOx8sBm`+^vi;-?=S{yfJ$`~y!V$kLOLUP zZD&qxWSk|i^(*WX8oCsTB*zZoGlwiP{Ba|MdnGSzO#YpXeHBSl3RQMhUSatG;tmsf zEI~%)I~#EGd7(AW`+mn99ES@Je!JM+5!ck3cp;%}616}Ix3^JZq*mE!Ag5X1ncU=C z0tEbVX=&7BM%8;)|Hr&WKD=dRHK}SR;cmQYe)R#HJw<8`xXz(YJp&b zcYh{XnbeT_tZL;^)ZZGqU9F988Ww>^jGRng1!R9~R3s$59}#urKBuC12+H~UXRrG_ zgfYQj{oc&|h%og>>UsRHyIq=0IH@snbO=7dqEPr&X+q{AW5d5dM0i^bvRaOg%}PKC z6s`o@08Z=mpbAdav_W5bi;`J_{lg*UF$d9V68fd*=J| z{zz=Pr$de1jnR7W76KKcuxz3_0O|71kwEtPhf`=0Nenp1`P_QBz`-`?$ zd~-a;D3}1*pkt5UGvzxMs9(p`5pD@_uNs31tA{Sa_eU5&!;_!shdEgrwWrc*bPI8t z!{9~@Y04H274|I{G|lMj&H7Unz}-&Q74EzUM;Vo$6_WlJ0Sedne38_U3_#}SzUG{H zw_Mxhk#6{U4i3iqVT}W#&(Z6`BU_m+3BJqJ2-Hbnd>rVxwitzy5?tzS%h)e|JwqAo z``J}o!mQlVq9bxT#ASa(rAh0yOIWbSaVujmk4=`ux?E+V#*6m~JhSia>946}S?bvI zS7M=~DUIuwNUTFujjexZ0Nj2VQsH@+Xu{pWg8tT}bfRrny25!mpkTp}aGGE|FhFH} zY`>y*=?OY!uqhNu)t%w%Kqsy~VnlfHAn7R1VWE*hnOQlt|MzaNhZBi+OLw}`xpn*q zk|r7_MzB@?;cNvCnA5O)WGm}Qxc##ulq=>I_KN4NL{4SL>q#y)M0 zinC9YZ^~;#39QhIR~c##BtAS+y_}~>E}KC?A}-_b=!aqPI2r?WrYS}Ao5{Uw|LeGI>)P%SF)<=*G(}}+FI_+w{zt(wJ+^`2Wl(6p!7VK z`ASVYZ#F*bGqQy@X}!$J z-1r1l_CaAmuc^{*Fc>Lab#P%mdYoqk%o*1wsZ@i>Ab8vm?x{nij~Ln!0~Vi8A#YGt}H)CzwmNvL_-!YNzu*Rx09H@U(qvsB#!y z+P>{9S_P8J6c05T*cJM7^oB6<$3OI5Pn|1&R&ckqfb87#Irisd6(=i3^~ZqPx(at9 zK_-`)%8uN<+7fl6Sb&=AI0iZWp3L2!6s>889Z^;5RYEDN87hhbvWM<;EtH|N z(Rzb*N%w{wB~+h@O)0Cy3_zCl+>PUOLE7z%zx7&)uMv8F5dIKdybFfKiUGBW+$m@5 z{i@ePmJc&-3i)0z`4-tbt}L?-FF88V2OjD`BxA6Zjc$tsIyUD4%Ar!jtyar(Cwkhi zc3Hf{a0WYgB=bq8NXep$i_`&kM6pAS=658qQg#`ti2)uovBs1%`)}OWSgd;Evp?uk zd17kB6))cT;)x}$+MgXps&3lEK+JsA)oV35{6PK^j>a)2msuWy=$le;N-q7ljx;z$ zWg`)+=9TZM`K0`zixM9bZTGKGPgvWfdJBuDZhfoB2e_?kw1qzcYITT2p0MRN4(Usm zgy;4QM{F_M2}nD9`81i+Wwc-Dx2)#b&zA&Ws~Ge0gKYwW2d}y?n8STWVh&HL0X>Qyl^3Boz^Jb%g+^N8%~#5E(TL4*tbO zwDLlhuB$#I=dtNucYLFukih*ndk8*xL30fobC_dPSGzJY(|X0qN87-b_{wyXb(MQg z0gcJ4w8>RFog=VMO_T?_N^#8=`!n#MNT+~y|9Yj~92HZ^@rHc~B#A5}Dsf0;!njBT z$YfK^ohf4sN#8poebO>GKt9RD^TY9R2F!NWUK_MWF`*T=!G$cyB5=S(P6?)0KMy2? z?t~BpbK=L*vW>1SEA;W)yn1%J5&)rt0<0qaB_5Yf_b*tikq|`RzFHQQ!jl?V7s{K& zm}zqDI=Ybk;&#;gVVK*y=ut=4Spc>u7aOqTpqpB zfA5X~Y`Oy1%#<|s<4_PHsi;0NdqHk>V_$7!+wQF_wZ}~c zZFn|2`TDB*%L4>9VW{NKa!L*y`y2LwOQH=QK0h~Uz!sVe5+T1w0YS$0BA4%>U!q58{vt1r3NXvgCZ7JrIyo3 zJis2#buSsQq&Dm(thXuqbxq?66YPq0f}Z`7V}g@ul;*-ITo*&^$Jo|x8UhT=`5WO^ zv&b}0HmFP1F^5heqJojd^#Gn-3im@g6&qWBkujM^pggwBv2yaW%WS$oinPp^aYxtq ze1uH9g!(xfOv^<8?goHn%fu6c4eOGfJOr>|Ps9#NS)j`t@A^o$IM!$XC%RFCGiA%I zGgRu4@veV>N!ZMYz5Aw3J7jm!d|OI4gpziFvMep>`Ct|VwK*z4d&g~G3f@6169Y)k z?fNj+3bPGfIQ`8XtKyGcV1QA%yE<6ZpzFF*PMEX)Xf`cSkdPvl2& zYe$u~qE4IX!NZJzKrn-^7*nIE?NP4 z;Gl>-Q~kVRd2-|9#2CGL>x*G*a^~Wxh(y{TU6JOF+zsy|4tNYSvBxD$s18QiJUB6< z_>>3wz23AABAW&0i&Y1=DVb|>el5GJz?K1e{sH49Uqh8g1@&ey!~PN#!9KuK29L0! z%)cbd*;_d#+&T{-xZsCKzrV3FGdr(;8>Q)j)~$1IP(fIr9?EX1ygyTj@9+uzw5B#IRRj}I@&vFC8WkCsHulo;sHM-7M9j9cHP|g|s4OF!O@FyX(9zQ&+0B-`|z<*qG zyQ-9}Mgoh8LYYPy8w`f^z__MEfw@wm)b{iIX{q_MqIhyZsnyc%oR@Ni_#u$<=?otT z*ctHxSMu0T4vrw0pOSZ?nnq0P=C`KmN6o3TW&jghiSgTshHvP${es^PcG=}+s_j^! zz%(HsatFJLNG0hGd&Cc}x9uD&UX|`+*O+r{=tBs2r*NB9&$F1;L}Rl}o02EO8GJaXjm1yJ@>8YKV#u zSg__awnPFi@m%In>>sm+U;vJXVQTBMtNHj`N8Wpc#mZr~m+XBWN7`s-YO zs?%nRBus*)|3lSIrdXU7nTcvxC-q3lu3a*kQhCI0M;y^(EX=?e@EzzY3(trTd!{jP zfCN~zi=Exa8R%4neq{%WhB)fX$x&zt4IIAS)FK|MNL24b#2na0-v9FV$*DamcEiSMUt8VKQD8u-a2OTWncDB-t-q*sbZW5g1_(YNB!Mmk zHy-0)ht}OX4*dPa=^I_|xm$g<8&LK2tL@52A|HnC>JXoL_KsWD;%#PkZ0ZK;t4?Og z1bFG^DekqI43!w}b`RUweB$^k|EAQg>3xX6=ii;vxxMLeC%91v*ulz7RCfVg4lQoT zRXfREqdNVoW%aTw_(V0iC26t5k?>MI+=p6md@HfJanyZ&^NY>wx}~Dy51mvHLPt2H ze5y8O=!=3J;EXOhL;1QR#<#|qoW%+3O>ZtfGD$Br^9()5ofws839$<fyz_;wAzqN`x!ZN+&ly_+0Hm{21BND302~U%$aK+tA!%9g ztD9*f4Icc()iS9==;xNWM)EQ?%rdP6Q_CVn3HZZhz-v}zI*>=@iD7-ta>v}@-WWD6 zHX|f>?PSkE`?tH@ZbXB9kTHF>t4yqFwBM18??Pws!IdwZN8WeUzy<2jrl4OgLtjv> znt+4pcDch2(n3lLzKfV^*^-3tY3gXJ zzDu2d2ck_9>{dVwAeJW$T6E72B9#y{5G!{Q^a00kf8#m*Pv}e}7NtgLON&cFYj&3P z8d-@cI%sa&DmxzQj?j%fTq(YCC#(2~poacDrR)(;u6Z1JVGisl^cD1SLlYg8{yU=<6X zWNq=8f}w4~))Q_9%@U<=g72mD9m{GZTFA^DIr2)t19(mU01Pzfcv%(|5i)>TufNN2 za2JD_0?a z%z|JQt#u1W%w(eoAXR~U=IP47#3@2q!0hN~R@zh`fJ&dv|B6H=@T zQV7a-7MtT}S_WddPWVFM;rqQ?2YC`)tg`I!$oV=4*t+jeu%Aup$h4%m1-35Li!RX@ zl~4oO^=-;yukoN@%v#`-3G*;Qt@xUb3v5WXz{3!+L53qiqs zyx08?BBm<!L1l&y02vgUXhjki|?FQ;ef) zP>Ap3^de8T?lz7*?E!ld#@z1RDc`ljKt@XXAE2S;j~d%!vPpieJijANJXx@hN;y$? zoy9_M(krB{*HxBIl&7rHB^O_PUO=E~yl98!mAkO!PAW*r*lUr7rO(#qDAc%=tleoc z+0+?bCD*fQq`ep$rLg~LUgP?@hS=ARX}Lk?uk$*%QGN`15$mJ?wQf`(hGft_c&?mZ82n4@fy@m z_Q8wltW63GvqkjHxLWfPizii>2hn^6j~4O7i-5gd^uuZUv-LCp};rro7opcpWwyFV3*T%r`o96O&WtgHE`B9^P6 z!*vB$8N^P;3(4B4HVeXmGV(TD4X}^~E$1ft(qbI=*q3R8788VD|H%=y-K6 zUrv-0<^Cx#om1itj?p}(D1AnBfu0Dz@jazB^o0{^hzqG5`_ErHlt20bYOuqOxlDL! zF#xT>M${pvSn@p>KX-+KC?Bg?9t+?>qX$$$c^7=A4X8PYUFg>)t+(Q#0^-5*DnVwN zp&m>oA8Z{yfZr}bb(IU3KK&#>M}@0qC_X>IMqw8?u;K>pToP#U%9*9kiJ>fP)+)3u z941p6bLIZwEL`^=$n}wvjvP8Gl`?g%-`(AZ64L4%tmzzykLr7N6o$oap@s#M518G$ z?}?b{u%tYe2zSp}aocj<*#`upp~DvrZ3hil*GYGI^e`+VQ8L-;uH|l>w`=^x6ehU{ zlsOnHaf*AXm*c*^|5ju`u1}1oE*kZ`ReDwd3P(h-k8b3;7XY!xwl7l4Ca;}g2D}!; znG)K1L|oSQGkKm+o(wLLaj7Y;oyG9}$10Vj-1YiSFn||*lxlXmD!PBHObX$1oZ`eo!AC^|Aa1Gj4<9F|%#N{d*FS42?QdWuBb^ep1;R#w0qbeG~5pz2DOSj3@-Jxi5+YFb*>R?`iGg)obSIEfLEpD-t~hc?T8oHa0$E+vN+8C zajz7cU<=R@x7WT82xGvsh!EG4nMV)pmO1?J%7`CiBEY86LJ5P?C30*Q$q3L5NC=0a zU^rzdpC<(;4&O#Dw_5B+yzg;-jRbBd+xa7nY451Q0eu7NTYNDa1YE;KEInRt8;DR@ zmIxGF%05k|19!b2H-jxXRs!olzE*iLKq>m3nzM4?pHD3KX{H|?E{Ye8o}>+^4Jz?S2fVD;>{5=KJ1d58&QsRnpi@Qv+HSwi+c%w-`RHEf5!)v+ z@Z}B)MAO`CvWY%_29EJ;b%Tp1VCaDb6c}j=GtFj)sYV7Hr55c?l_Z&9h_wZ4_wjL! zQ2)^gV+r*uYSK96BevGSh}OqnmOYU?P9S|>6aErHTi7?Mb^W)SAi?9q?6f`6-$mmg zDiZe+ZIrr0jFuN{!1akse=ACg9xFf8TFy_I%>u)b08_QKC|$+4{=c1!P0*!LT8OTjrHa6KTcnyIDWfdY`hMU9t1 zaWJ_s4wR98!=~8vhRlZVgfM}*q9WNJ49uT%x-JMxU)lf~2M7||tfA`j(XU#yd!alP z4BnqkSD7P0^c};Lo{g2P%xvd-9;FQcIOR@Va_aXP@$)``IiD(G~{2 zT8-KpA-*H8Bg{LfZzYiTl1-Fu+MCBcPYEPl|8}4Z2weq^)vK{hJij?Q!=`rs5G;{{ zftTdI#+=L;n#QlJEH9rC9|9rgn@2JlVvKsIk|a#hWvk<54Ztd?~1F!`-|H85;jQvx6)c$7JVi~Sw>n|cm?W}lY_w< z^RY~s@A2o1OHx6;2kbGW;_9G!%83-HFj7kOp+skobS()6hQMqjmYVqn1)Ncf)I_tW z<3>#|AO<0>8@etY9Ej}4=g0lpwzk-n=g+@5LUR?mH- zcw^}4$9ifcp+B6kln>HcRCbrbPeOs_SV?+mYFi%z0tyndj!rlk*{2$WJu>Yj!g|jJ zSBvL1;sTL<006l~ADOL*{U6K*K7Qz^C;g};Tvev8z;&+`Fj;vFY00fOLY+Wc^wvZaXfm7|~yxv{YRpna{F(WrbwpC}s4N zH#h`~7waytAla3!ef#MOTn$oPjJHIw!UbsGn0EgON1Arv&D4a*C9DF#IRyV|n_7n& z5|#*m^HRwXf=?K!+Kgx8gb6%|h>AGhw1juQ2L41n?&S8ck@oF-3Ufu)lvw7ioE+tn z9{-YvUMS0M+6xMV3fjB&mbOui8^C=PL}E)@p2{KW8L}VmQf*OaD}JE=Xu-?6o$nXs z%2#$>fqX;!-EYtk=^txYyUU~~W$0p@AbqWamXW@$vpXQcJHc($DOHQSu8$Q+R&)N+ zOYVT;_1QcG&%kC(u_0Zq|EV1LUe5jxT&3=iXJsuSlI&u{x;@NdglGn7?>-BRGG|Dv zR|Rx^@NtDt>#Ut>l0&-1zL@mF-MMx3$CMx*nqgf49DvJ>cjeSbxO{im%beG&ghtJp za)!^HkbbpWQD?P@PMl2yvWlf#Xcu`T`iFL?J*#_ozPPsKA= z`SjJsdv06vRf?V6&0+P554K-QFjJ{Zw7Hs868hWtKtfud?Z+BJ(g~xb+8Tmo`2Qc6 zW$7oSN{SJ}(+A;NuBDud234F#oE&&2uLN+KHZ)rj+e4F98nskowO4P;C`tA;efD0X zW7zyiZ~*m28F!4ItImFF5=L=Q0639EtgZfL4Lck^*0`?&M@r@yzz#}1%g`TL%8Mg6 zg$<%AMz@^90**P}TTICFAA2LnUssUw?o@c(06jp$zm24uffwbu$QFdXkF6pXG1P0~ ze+Mi&PiR}$E`6^&@jt&cUh}ucc9HOZ?S&sB+ilD=&JSjH9h=M}_C44u09=2yFH7rE4 z&&=djnXDuX4}8ZuiuF?J6Qp(yn0gkXi>}O~Cd)HjTMyW8Mc*mhT{iKkwEgiZWPKHp zwl*78wnEG5--6~tNNV1C9O9%jdzr9dh;HqUyrVs-wxXsa^nGmb%f^jESB*wsEc25WtlaBDscS z@N`->!P$SYr}H_o&FH0yYql9W8cX4}^GGb+KmNB=912t+Uz!xN3GI`^IQt1F8VaXt zpi4@nlA}U+tf6tKJLQ$bl!PkLp=B^Fw;q1@KLo=xR!g+dz^;$GSeyrFd+CT$W<`^h z-`2@G8XWC(B}zD_-aS+AFvJ4o^lo?sUdXY|mMfC^aV=DUfqAy0!5WqhV_%6rmY5=; zL#?53UBui{YaibEw>~wo=+kbu4qhbM*lMExKXNzy3GlYVjN>A`onttmJe*b}Dc4rR zDQc4BiN=`oBHa@B=iuOeQ1-Z`Vnt6{ zCg*=rnUQ1nj@CWq&059j_7W1Y1oNN+!pbopptFR7^EF=ho?DRG0(40 zGo4j3M|+RXU_)2iQn~V%`^-y};zGHv3}Fons5|b_1T;`p;485r(qV72{tN6p#EJT3 zgc@z|qbSnVC%^d)bcI|t;hK=F~;LX<||To)2FW|2Qh zcgg)#VEN|csFgLlTF9OoyUvS7T7AZrHtO?SVGtn|`GGGrBlfjN3av1$UW$d2dG~mi z2Kes!0%sp}1&M=~b0ERknQ8dM7(+X(Oc^(;WnklJmx&JnlQc5C*FZvmDw#Px$*lPg z|FnL>yh5$%FLMv1rWyb05Wq!bH#_av0lR{;;Q8XYXVhuq3QR;%he(ZMF`e2FnNKYU zXO!5(#-_mmzxfF@kySm>uN&XQd-UDzu8F2&;Bgv|%-K@Y`=%Xai$jr(KUdP*UF!3Aw#am?Hkzk zGFN}}FjNm`JH3T9T1q`aR|aY%O~rT}7Yi33q33Cv@(iT89V{~V&$QLkS}xPq1#58U zu(YUc-eKJ&Jn+R-RO&-crGzCa({WUp@TJ4(I6oRTcY*3YV>jEpP7b@@?ty-A>xD6a zMMV=nUk`jZo%HPe)sNiL{{`x9sA${8wsop+CR6>h4VdsUZLHo?x}qLhA-)M;q^xk| zn#0)LX-b2CYG}XN`a*JYfH*`;ujc6z!S(HBlGU9kJMx5&dk60 zNFBce5Ydw7s*rvfLYQaYA*Fnb;Y2Sorv`U*9=s`^i_tN~SQI(#fZ=tMb<}G2Y=p)r zP%{_f*5&z!x}n|}zJ^#D&-xh~kuxGt1q7l$jAMfs4+ z{kpuuC>JwO&WLZ?6fgK@kT|$F$+=TK`Kahfa43$V?>(G5LyRt=`iK57zq(H7kHiHH zL0ADqW`4@7=r&u=f&v#YQ zp#cOc8^DM}0E#9rF*JjCY*U*%=v_csq%E3b?2K>Xs^$tIeKvc0&ydA9h(g|S)xL&% z+ZyUa!K#JrB_&vN8bpOkDq;FH*dS|Hm0J@rmx0Vjk?%fcT=>5QYi-WqDPQcK}#8mIbit14A$J`FmEe+Dg&jM)TFPP&z~HQa!TaaCx-)z`5MusF`hf-f&_g5}3SlPP%>WUpTz#htO*H26 z52|B&{YrvD!o=mF8of5558ZX)2L1s&CS=y4drmsF3wwAb->MzJ-sBny8*q-EMg{zD zzXj|OsU}mHbKu8wiZ7L)L=eJdz_kJ)P- z8D_qd*WTna`$}|-HhxMl)%x2hGXDREOVZ#Nb)Q~vkdM;q3YgXSv?u7$XpE)DmdCqr3?a(6dw zYOYHPYPrT|KWR3QoJ8Zn-16;MbHg)XKX(cbq;6@?MmDmBmi&kkHBU8$P1t}#GXbf&IecG=m?Qqn_9X% zPTnWj)i2nJ9`h!D`(}T~H1Xnn_cNj6*L8v%Zxd5B1u3rSsK;MeDYh{-6b zcRfTJJwpLtt-Klwpz(UqxRv8Z!fR<&&(S#{cIDq5(SZT@}gm z=DZjxh6(d_K>^aOD0CqAGqLO>=t8BEWv!Scj1r-EHlV*Pkdr~&pEh5_EIQ#W;X}Nu zDWwXd7KX&waL(#3Us`sDwen`5a1eyDTH)z5TV+M5So_D{ zXWbHjNi=k9@C~|+GMW)JH=VE5aUB7`;)yWJME?-d0mXg3zv*q~7o~ypc!_9gc8mpb z0r1IDzZ`QdcPtT^{D51xO)2(wRGi&J^F%&aAjVa<`kl}a`z-&Wbw6+Glj!*OEwLO; zm5m-{6NO!R-5U+D*IJ)muS>fjDO3RzEb_1mOh+niss|!$e{1}9&V|HUGQJZzNSR0C z_WC42iwQU&)KM;$@SX?II5OTuPDE0pkEh{o-*>~Z|3`SQ;^5(SGu)zlS1oFkwn70F zy91Q6sDQELC&@uvj(eA=C0FfNQD_lzq18-bJWaQpb*=aNv&IKuF8oeunnmv0mcedc zRjR~O@*)t@G*GA65vAwhgq!tWP>d*58K~D>=Wxq=-&jY>=%Wdcwlyu@RdrYNUJO_I z%nuI8fp0oWO0??3ge>~GUK{w}l4YqIUrGR^P~-jE8PTuMNq$LrX}?7QN@-ardY+a- z6Lx22W%Fq_Z0jx8i%W`Lb!%y9HVNhXrAbJ^XBu!h*3CVs$eU05=l+`OQvGa2GzVl zo!nLBLwhj2z8@a=hEwUj47f&A)EEO9CpO4F$s_w`(L8U)DG6}UJ@DG|DpYm%y`#cS zj%RFz**m=qvhnnqBeo|>K^UQlRe+(0&C7{YM@Vhx>OrI%)rzNr+u)YGmLrQ0X&R`^X6T-VKtR zPHJ#NTe|tk+az}Br~Q}7LxiMjp4GP$6U7z3^L$$@)?-WG7#R+*Y8O;1b4juNFh90g zX(#@8vRp5fq&n4J$x0GKvbeA+cJl#;fQzBY6c~=P#sb2xWYR3W>39n;LH1%OHf>6G zj*R1VC3p8Gx@Lilt09-ihhyL0jPyYL=+H%qDR`2m%@-v)9!!bt+64iF_kV>SUYnuQ zj%wU=q)7!q2{9ULz+3^I2^SlOVOC~Fg*1s5HER5|qzcjpU2fwfz!0{sh z@@D#-W@Dz0;3NtPL2qjl!(3)>aBzF1G?#xg4C<&^0xFts#CrV6=3Q8tDzPk=Q=C|2 zinEY{>s4sZ<8g!ae6R-=)ca3JU!92(7vOq=YUki@l0(l=nizy{5-sI)+F~AgsTIOk zO%Da_Dt>DU+vQ{XkrZP%;kbOfci=WLw-qa4m~o|o{@i-Rb|li2`6EdTb$M*C=5 zdX?o~L)fg|7meh;t&28jdZC%mkD97pjS*LjAp}hBLtotpvXYFl*GjGh^g)%N$&E~; zVwuX{>xR%WfgrpRLG^o0t}V9c73cUx?4)W;e7C}sx2KG$mHizn!CEl3jhn>&#Ygg- z*JO`-p@s8D9E7&XUMO3cY0$*86|d*~NW=u!~1)sm%ynQ#|;h-nm^SwtkN0b z_!M3%Nj-`->%kkQ@2H5o*o?SG5TO@*UxRO5nC?dsRuWR*{?|W-m4KHjNS|x+w;Ej_ z8q(2L{o%{T%`Be%dI(V5Rj9lHO);^M3G6h#7k#FEkS6$qLHyzz3wXF`;hL~NPmi4r zU0l-WAV5qItqiYFg&=HG5X+ib#h~@nc}K2qJoIme-1*W9&iT#OsrrPGZegp^9$O|FvVG_V&T` zT`wG3npTW(@m+PLoAdOrl+>S{YN=n{E7tRSlWH1mD@=ox&p842)?H*o705RX!Lkh^ zRwIkE&p$52V-VekLI-+Ja9-hB)2GJ`L>tFrp69yYUHBFU!OzBz3Otj=f87BMPUxIs z(XvjiTzzk}cgrPwR{beS@pF!#m{Cxd69J898M+j;N>>i5s%NT0U=bKU78h@Vv$!1F zTKmd?Vz-)G?+2Q_wr?^1?tY$$$VxzlIT3*Tk_)q7o;h1jjgCn3%uiETc3Oz0<#CiX z8j=zaxI(ytCk5G2cuvenb&sh)$WE-^pUK z9v03(WmN+_b!@(Zud>)xS9cttO60=9g=A}&TNr#<%8`<$1*L!?_{_hATn^ben7=KC$K#AvX&th(iN)-DOXX~>(TadmDaC!kM}VjNUyg2u@J)d!=|MS zi0iu|I*|+J4cYL+ZCjTWO)O|D4k466?;(wt=}PZKEb8j18jU^u>`M`Dzw(b(d9A9K zK6(CTS9vlS19JSr>Z_N7aP9lQ?`l564<&;9kvASZpjZd8?1NIFTO;{-PEUjrTMXrl zvFfMOIy+V|_BDshQ@LsuQId+aurznDEFh{SoGm4fE%I-6 zbolUBhTLLx$Ci(WlmdIfHEiK-t(B2Fiqf9|Ac7w&i{>V>EC-XYn<%J{ zMOcFZV4#r?j2fC4jhk06L54|g1M_uO=j%T7l{xW+>;d#LPn@8C@DHAGOb#TFuwri% z-a3p@;w&3C_dm$eRu5nys$*KK?3xJz{&fZMZ!aop zyb=ciHo>shMfS(^U6IwC{~;A{wDY=^RIwu>s2-ii6kZlY2Vd&wvV&#y(-oy`plI?= z8oIW~SaPg|osBLhSpeVvdy$o&HY~j`jl{w~3$}tlD^IOjr%mgXvSmvpR6d;3HKy?n z(^44i{DnJ{ux)wf3-0_1QiNA6r2y^F;h5Y{DDMem#dB=fH^5#Y5?JI!>|_~bLXv>7 zPs3@`Wk(BSsq1RNV-+Eof+N$CCN&#O8>r-be|8L(Zdero3Yl~7wE1@??jzyWLxJNT zzV3<{s2sSwV~*uB-qTVG{~(U(ujAb;z4c!A#|=6ClI_=k&ih&I;I@n=6hucF(bd-? z^ZIYw5M5idzQFZWROg`Z{L(!H#-IrKSGN~=twSd9HWmt)e*AB6pr$S$!8Xw?qgEdN znA2QR@^*t+MdAf|ngbs_+z4|2W6Wb9apyN7p{0PmVR{QvnK^O?jE(bu*PF=}q0h1LU=tHr z5Gy%(mrW|kyCbN?Rw2UEJ5A*s$pcQupm3|w^p0FkXR$$U0Qqk}Vk*S5@&&(u4c@jA zF7=QN-E!F{p;*)`##&sZ;8fB=-KM&qKAP266_++JWKQwn(I$76LvUNZ zm-O|xgAyV%qYucaJR1&N(l~<}S!NSU1m>d2lJxS3Vp^=1Bb-+Qw zjp})&)Stu|RZI$O2s21CJ;fXbRLAHc2!b}lj#<0Z73iYYNF|5(R^`(1K4lbbNl+HA zqdRWK0a%<;X#tD;kTzBzk#Xx7JtVJh^!MVUZpe8(LJTn4dEC|Th&7K@TGn6)4)ojC zW&Ux0i~rFIb@}9=hfTSJ1$?HoocfOlm0A=YmfDng?4j61ny^lK3(9(qQSQwQ9NYtv zO(#2QZfWW<>a%p4WoonKb#>`fEi0+gfC0Crj0gd_*8(iXW^L6m#T<}WRo*G)I_Q_D z@~&heeQD`!u|%cfvAV=!KJI@Aptz0@n^AnB*U)UFa9YPGoe4<&gJ^$&cde$I8pfzh zXpaXuA5@1c>`0a2*8y=?^P9vprGbPvI1HKXtKo+bC?GrGQ;i5of*lY$(w?eyJWr>7 z0duD23eEr$yP1Fsz=XKB#*YJKl6$d-aY<#GVk5CzISCK6c~sYgjP{P{odtSL=JT(t zOF`>~!5?+NX=bpO=p@m%bpt(dw}052?8xeC&KIJ0 zMv@U-?YtE3*UfmPeucL80>0p%XBzi$^s-D_yglTp)I(%>%7$h`)0*Q9U@^@@s|h^# zv)+0vo1bVqZ@Dp=>CVtQAUrL6SDnkuH|i0>zt|qvK zAUY4)V027jNe1c;Lj-`xsaUf4_E)7bW#?90pn5-{#@aEfG5g@wl7i7ZLX9-g>_|XX zbH0$tmEClC9`L}<#z;qK)76%(ciU%lg=p{Cvxab2I_WJ4N@x{RhiAB=!2)*qMa%sc zv69}GR~$nv?wrP#e`u848$TXuze6SvBgC?U@%57DiCCtn3ze`gCFo@8p&%kyZkgT% zt&&%6Y|e9&X}>%93EmDS!tBw8kMBj+-aBstqFEJJ7L7Xg0XIB7AsobIt!v@J5&M>b z$X9i}Oo5OvUU2Xbs^j8(q!=ZOu#{zTD&Qz`S*-G=!;_D{7ksIIrVHM&E3~@Gh-Du5 z)eF%x@&b}gOCijG%PdWXrp#y z0QZV7CM|tFyBtRPGvXqHc$8K5S5G`Kep_&efMpJX=Y{i^Z=sQt-Hlky_AFIV3b1I? zO@9rrx>>Nxy!5_S4#X3#?~4@I1Hl=|_ij2;1|ELU{5mvecS772@y6Hb&jG(N8K+`? zoRQFsWkk749$v(rcJ5l!kgGPWuKngb=`i~BuL(F`y8tw5YN!G-h>`e`0XTBtr|Y3^ zaJA2#tCONNO2nKLC1vAvy6|b}s01V(1yeh*y%|o!XvyvcFqOX61RkltXHW4Iv zxB-Rl*6C0&iFa|imA5|Km;G1tXvQeY6wZca5DenV2GDG!+^~HXkcyWa| zxi1g$*}K{ZW9uM%jGR8QuT9HDB|X9<(4*B)Q0MHQhq4<>(-jw2chI*-BY5~(hRg3O zr9cyWZR#O5<%_p_{($w`fb-s&HFV8Yg?j;5s(i?V_`&)|o}b<6^x<*r7&}O!gB{qD z?N#+La=rHQ>B);}!ZU(BhORr|74@Z51K~YNGG5e-qcoPI%*oW@sOkVKRR<~LxC(_(wx04Un?qpyDs?wjL7PX} znqT^ezEcA>p)%Pg8ktG-R2tX*?L{G$`8@xBLaDkO5x9)u1tRWZn!AcK;lWYza$dd4_B7cO$ zRL=TO8=0=Fgxj%QE|MtF-iTPNOwzIX@okZ;@NjTXO!o>HpufjkEEzwA5Oq3VzQL}Dn^XS z!0T&dbXp`ca5ivNIURl^aZ&h0%K!w*9*MR^k;2p#)MT#m&we^Qxq9|WIbGApQO zH0b-%u27+57xWNHmkqOR12qiC{oEvcRLT3laNzwK(5&nrA}E{-?O`2?K~CHfP%>Zq zAK__O!+w6`RpRtF_x5`0Ad*VknE=_FzL`1&?dWbjfdTvOLX%vhs<*ZaZRV3p)bR4X z=YwM&2hN!(LwNUXbGEki`-`cJHm5M*O3p6W2JbQa_OMm`p4{ei`7>WJzetz1FP%_G zM`edVQi0StpTQ$kNBefdVYycxq%@KONB<%LH+ZONpXjuaDHW<62d&gN5DV(W($rLT zMKPoj(uDX^U(*367`e1wM&+4I4muY0U|RA#6kRW3BQYaxM4`T<99F$FtgIhaSB!n^ zI|yeB8?CZmrowrpS9?o&I7k_7^EjqI#tp&4g{axCHpx(Ph=q{hdPEg)mRNI$ zI73DO;;APSonM#y+WbWR>C119E7$R|xH$8!BQ>dRnmLL=V%N)!p*+?o?UM(7{}zYJ z>E&HuQj zdUeYO`EFl{&Lnw2$>X`$q^-qA6wQO)3d5n2Rsi$MXtI$w6MegF%4Gm)sFq)8wV5D; zpT_8|@$CFI%*`^*F|F7ZQGmoI@>qPY%o;UxV>sQt;BPCT%H&5TfrX&_GBY>&y+{`{ z)cKJ|Njc%xw};Y{@OA9Mf;LNSIi96$xPN?^<2$w+AgJi(-sD)318{HovkVNqMs;|H z{HpslR3necyhrnU(YJF)j0WSK-nweYE~~n$bH7q;!!I6ot+JB3RgcxXee)>%n8YwZ zlG><3KQafXY2#J>^o6*gnEjgCb0|no7m=gcLm(57zUsm~g{#vU+=*wz&c=u`juT6TmvMsimyeWjG$!=#{+Y2;39 zY^Bu;B|(t7L{X@e%qdY?A~l;fckVzTic-ciHiDxB1e1@op?MPo;hg9z{r?e}Q$}z! zNEh^eWV3+NdR>IUt)q`Qnwz+&A(mCz1r#KTK`r0!5^lTgo)J;)_&|w-7$$yMaM%C1 zj>i+R!Wno^kM)sFeU-01=%Jd4O3JX{4A8>gmdYAwEhk_~TL~+Km;qe^t~>{Vc0X~> zJa%rw7BZ#rSDCoQ+bADZG-Tu6b`|hQY_2iT$vHwaN?k?JhbA{MCeAOHb7c3G_0#++@#>C$fg@b!X) z_>_DNWO@)U?QL=o+VC|X5b9$K`o%Arn~r?HdoJNDlq2|ygY(w>Nqy>BDqZRMoKEfq zl6Q(TWa4tx{ZX^Oi$;64_o|#66lcPG+SYG9q=Ou+a@Y?m%f&V|ZEsvbCk z6VGk6HrvAMTd0NR*}wPw2@=CQ_msu#%}+++|855AQrTXy!^bxdta~oR5=U|mjm$ci zTl|ZE34!S5mEh~_qMMZeU`9GY+p~{*WdYfR+&5G+Gj5JnPT#0O-%)zZk3u4MvvaOT zlK8x*Z4Vg;r(+3Si@3Nm3_Y=do%mvjIKu2B-eS>di4CDTvgYVM#-EyeM;t)ciSKH4 z!7Ztol`?LsBK0Xmk)PCl7kz;yRdTxiUFJa?ffbqW_*V6b^Jd3be7$ zr>`<>l?nHUA~dpBerdSPxR?8!k7GpRa)|;+c1-+7=mZ$1TNEBKVquk+LpPBC#nU?r8WAb9P`+Q-$17Px zGvE2{+7NZUuf8a+yFd)B+ncm~)nQKW+R4Ra89$m(m3 zmB5O2yBEPJ5-7-H3Dq71FOJUCkR0m$eJDwo90pyYV$KB#yvk=M4QcHu+#c;WRSQTt z9GCui){OX)&M6EF$|e9i&=f~xC#S|IF2EsQIuSw;y(ERp&xkI2cYRBKKi5RgV zC5EiiADvqG=W+?5M+>O`j*3DK?Leb2hrn2hc`7N)xs`*@tZirnMviNE0ZRK`9^f7R z)uS`y<9thBM9_Y{Nj>MXhuKphB0b1o)haJN#QAHP4J!5)VV~?UA_%~B53Vl6Vo29T zjr`n~+SCrXObwb!W#z9QM$u5{?IQjeeNs;*{A$~KA|(tEO_gkqQvkLFVYEQBHR@IHlX;;%&%K8#wZJ%_f1*hWhjiW~hO-J{M@y-lEUD2S> zUJT}#P24*CNk_00mPo+EY`|=@jf8z7UrJ~D?$89A{-j9x!AhdBXiB~XmgDlM4oKCV zW$i-Rj>I_wbUzJwLKPMA0LKC%9=l6I#dwQ*SHJD1`)4*0Vs2{v5T86KJw?KS&H$%I zMzVgXqW=^YIK8X2QfGeK-UN@5Tm=z-eU z^iMQH-Qob@b1^f@+mgjAZk+Gvj0*9k!*dj=dhHM)VJ6xbuA(H`9>#PpyPoM`(HAgF zJLLrFUXFii%~xlJArfh|x2$~2cofOTmBH#IiL=*Yy~X8wA@I5&BDoC5U+xa6H8iOt znq<-@8$MK>q*~w_e+0=~Ebf!W@?5FD2m)uOvO(}w3A4vo^1ol>oE&?{+44wQC9-@+ zq2e(Kx(tB;#Hq6;fr*{ophXPsG8bJ<;d>sm zND#R3w_9Ailqmbn|VRZ5PDEWMLPK z%VBy8=F;B~3+n(~L2h@I6k2sOdrqK;x9g(CZ=U@bLc|=t!vUNa9A9>b^AK47pf9ir z>*Eo9!SYl3FdCt3?dBSNQMl0(y!K zU7k#h%JrqGT&rh|^4uuwWh~Xq4C!q(4!c*T~ zO*|I_LdVkCPW`Evm{6XD3Z1I?7u=6S=a)X}Sy@=NAmNVqU7@%~vpgt^H;ahY0)l@f zspUs-tkWE-iRXXyr1N;T!lo!_pKQFR$ibT-0H_;r+*MQj2}V^2eJlVWYvra-7Q)7j$3M zwr>HuGHReDPCAJId0x(RD_4IVqU}Sr+#q8KuL)_XXmW<*iouYFCwAW^8o^2w%~t8a z^+7YpD@xxI2A6e3(BGH_*X!wN#`pFt4IP{t)lNKFAE~q^KEAg=;(dXT0Gd@gC__Er zZGZXwO9s0Rt;0RIS{x=PD{;sfG^mG3c4aGb$71r45L)Y>u(sPb1E`Z8K*=@ab1n|I zdnB#I_^^lN+eb%A(5t5aFot$@m5>&T6=(>pFh~EGR67>zttY1dF7{lzVMtAtAvLt^ zxS1jA4k53Qr`oiXN3QmjmAg_2-*!fxGg>Ol8gq;LlM>*&87tk3cNbVwLdbNoQMmtR z;;IfoQi8X%_v3XmPq_d!&O3W*-N%y19rIjk=68Bxb0iq(1DkKXcNIJpcpYP(&YBEGW{m{?$o2FlVl~`n*GqddkJwTTNEsgV%X=e z47Ed8pVjCaASrNBs3zQ5iw@x-rpu0#*J3YHP9*iH_ZZHojRV}L=4?qyYZSjfyUVE> zM}F9RwbQ_!N|J(C)S;1hjJqd?F*!RqfhX2TgGRIe zq#@`HL!SC@tNr|1`~TheKN&bW$ZbUP+~-e_&RsuSJeS^B@>DdTSAq>iLIYRo6nu(l z4tO^<-SseAPidk~y`UXrA%q3%x;J$~U<@3pq zcAkZImGRZyL{`h{?NEB>QdtCf2?)pwMfPQ(cMtQNo9x>)P}VMP(VV%=3}^q4M4wNL z?@xzr)_F3Yi!*05VlPesbVZkYDwU^fPW%4d_SIe2zh|sWWo%#8q>tjAHHB&282%go z2-9I&ry3vHdsL{l){u1jf&!cf<-ymRvI#cHks-wRxn0ie1yZC+O@v!d<}Amg5N=u) zVG|ig{KluA<5fsmkg)uC!HD)SUQ)VYWrwwfLXo*6#;%v9M2GJ`ss?8FI|08oFHYl! zcqzCKSh(SJ5FuMpoFtOr8C5hnnYM*k>%%2v$A4G!t}&~f_J>Z3rg|=ksuWrlZrvQ& z8L95UqhBMGl6|d_%4VZ#dG+g&2dI-Di2SYBXb$!1Vtv~LL+bgKW4ZY$%$&bOaqyjZ z@{7@M7SB1SvP>(3hr};Ti=HQr{8cSYuWC~3J!xflaeVjItZVLcu~Ot0j&-33Y;w8M z$qsy+U4+1_d`)Rc=CcwC%vhixu(RE&a2CHh$Vq<6u#>N109tt`szUc;=$kOxVwXD< z(wQL@FwrYV#f9TC&YqZ41F#8k-*|OyyLrLY<>S~e4QluoahWkLE-_*yUc{A20^HZK zjmb1xBePnz4_0B}t+FI!rh(#Ve%eE=7EloPV?bp)WHQo#S(RKZHdzm+|VU~c?C!=d(m<8IdqFm{w+_~Ze#B_oW$f3W?>yU zPaK$wRv8K9J)C<3PZ9`GhbVGCI{Kn`;qB$MAvMSvvp1t0-2e9FBx#$`=wlzRRqz=>=MVSaOAEra|JwvN~ z=jFN`Gcwmd=YOF^kGRh#@>Ptr;higgJIPQ^vn-3is0KCe*+PX&~kD`qtgxPj4bBY zZZM*qcTKQe7DuWx;lI+>GaL1N?H>Trs5z;xQ|x`m<~YMyK0Tr`Q?qx8$3JA`0XDXA zHfiZkFVFNnqW}sj%HmC^{CJr4c*7|cQl)XFaFHVOrhK50V)#P4av4G% z>ynM*kK3=|Tw_|Q2ze7e862!ry9eJT55s&b>bjKf0$O&z7>*T{;z5@}BK3&rv5;yGl1+O7(NLP1)v= z+!s$(e&=%*WVw&ZJr9VG>6Nx{Mt0A<;1e)Zq34?dKq2&_@Q>Oesh9xpXC4z9|E7UBeMxLbde))F5tdjE+G(XzN-q6VZX{kjgRiB*aJ0Sr97XYM;brPbRf` z(MWE&3b2e~7P?uBIFR$I|BC2`XOO%2>@J~}w^O};DE8y)Jmm;17cFIYbcCVXJ7_JV ztxQA76wCK-Gk;b`3=P%Q1!|^>|kDGn8KC9sP7W zEfSZ*w?>}Xf2N>~O0Z{mgP}y&e`-KyGIZLJiPTsh~5} zcC08Da~SC1pC)z{sH`r+)lPq~7?XEMYJ?c^7#6@RB#tb~i*a6Wwp}|Hlp*@wx)q~Z zAcDQ)HQ=o80QSg)K2AtB&yAmep*kj}t!Wvk+TkTM`}!HrzrTKB38(nPyR_f}d`BvY&5_E=% zfaXU7FI;AClZbli@gBWqpEg>4 zTPkIi3=_8N7@olZ0aE1r@e52P_l)Wt?qNSVYKS-#iO+cDfiW?dkQB>C^!!LiqDrD}irm#f_mVdmkU@=kMBBIY^n+y9BC8osN za0Sr1wZ^2{TKWK;`jWs4Uun4@1Ns9YU>YSJY1)YY>uQ&fuL1j+jCO-4p)qO+O7FD1 z?4#Ir0EwZ6K43ciVq1up85PCCd{?k|2!$r(pO71p~IBf7%Wj zlUGj64@>_iFD&YXviDi0?P>nmqFc5NCyj@5B;Bsn4Y}sdeMUG&A!gBfb#qe=sSval z+%@UQ=rR#Uk6P#O{hn{5rkmh= zi6~F^o!H+rJ#O4wVQpH4yNa3DLw5e41aU%flueF5A{GvS8B0}bZ*_Yiev6KeL+R%n z0tq-0Dz5V^m2RK#-<>&2U)gMMG%aV&`rRUV|j zeRt>7p=lUQi+M=nK=q1kUm$H`g8&!`S7!Ndn?Wa_fx|8CifDLq;|bs1;q$@Z0Mmgn z)XnC6U)+5=aHz3KQmA2k$DHE!sQQ0;Avq~zq#16=!1l|u-LQ|X>pcTHSeA@9kwq9} z@QR4qO>A2wvly0EI_QX7+^wD#3^faD_Cz6yt?<%c01DSOZSZDsRcZRurmO&O4KQ5f zrU6&Ge;wlqO3-)0MK)G15mN2zMlHajIyb~hM6!K+ls#NrIRKhtVP(Z!@ns4`Wn2s{`2{rNm zQ)eHgkh5|H1AUwXrBOtX!3hUFnSWa=rfX4n3A*`pV)Y;Gqm+}J&ba#=)AG$mN})9X zEwp9S3-kUZV)^?>gD0V|GFI&@b!%_ngg;obo(nE#h51LWk4xdNm2NuJyt0_9Q5_TW zv3-|`{nbv>5{F`FaNrmSLt_dBwDZ-r_J`V|#Q=i><|?+(mg z`z(ETae|SlVSgp<=mlSo;YAu+h!^Z%C`=zsk^146-~xrqy*mr+^uefm?ORRBvp1w}E1-*>?B7Tmb2Q#@-qqQ9^ln&-@+}Uvv$KGNaE4}cYt7SK;j&^u@ z!GY*<=o=wc@<%zJXUs>SjiH|kJ)g8mLK~<)esZ53m29nB@c${Ry$dMhgi$r$9hV*K z)@?l^%$_gIbK=;4ryL3U9_z1;c=Js$cF@J~=E))FRkKDYE{l&*&s;d#GpIahM>)VT z=ZYtJ-(l(C)XFKR#(Ycsj1xP4;`&s8)+2*dz1K+C^$ z4@<8=D2&vw!tD($$8WxJN4h=bT44y5d|<4t7Mt;Q&peHrfGQV=mV@`Ys!*Jq5aaBK z%HJ8TLm0%z-zS+T{Fh#e;v(@KqP0)gYEJAd#1EV^u4t_ABHvhOWlyMY>Ce3q@e#W2 z@y_NHk3vKAE0+V$ie`9|>TPEO%80j84BzgXklH%3Rjxtbf|p?6n`1rBOxWduRw^fi zs_QJ`*detcna0=4~9(LKjlx%xte0T`J;~kP0F)^ zwhis~5E&=hR=>4^{>9kcvB73yjE0moz6fMjxc^<=?zC%d$_4 zV^Q`nc(*wSwb1qNtp_ZtiPuA1^FX8*5SmZkOD~Kv7KTa$TnmWd9K~_%euhlmvk|KF zZu6HTt==ErXj;65M8nx8yeCe6PGUeV&bOF}O>rwZg_y%4?k87$>#BLd2<)i4tR^Ci z!Yic3$~S4&B`+PM>#L^0#k=(qK1L8iJFC?EJg+ z?Tx$fmjNO*LUQP7xM--43#Gb8N3+6jnLdeU>!9@!A?G^5t= zLUXP<%1l>Tc)LoXebz_OF6OL-;>ofOaANk%qSuY*C9Th`Isf*SEope#?w{kLh025T zR`g_e!Jo=#Rg1QuEnPe^f@-RGE-4NLK8bHI6`{ve>MQA`_eI zGE@XXoa-p*fcfbGMc+)6(M*P~uKxL9)V0QVE0e8jxLTzDl~0phE&>=kq(5BKj2ODs zjO;x#TcT{|B(T2iOMXe}I!yk8xG90F=Dxg@So7-=*w^079dV@cUd% z8aEwdYLZQz=GKld#H$o@pMA~9s0*G9@B?PL&X%C&TCDh?x_e=ZBL@r&9tXLZ7JG(x zrO=iRnGkYkER)VpHmBAXvU9m496_a_|B>zJNz9{C4<|s173v462>M=-IK0+&F`?$8 z!eU!vD)bKrB8eSe?|eWGXG{XfYY!VD;}vN_yLY#!UN)>R>1$94ujR5^VaE)maloNic1S=rP>#=i9>z*4o6`)|@5y$0oy%RMH{Z z-XUPzjNTI~+&2U(0*cj;uqWEg7%F)>fKm`6ZQV5$4*?X$dgg!k$C><}nELiTQ8M2x z_*KHRrzKoD&hWne0qz_Cb`BNEr{V;EE!v`mThr1y{sp z92JEYP(y~ygV){q9)ke+(PF_C@W4P}xh|tGJ@}?(438b?f(K~)*-_E5XfC%`{-fA@ zG_X=!)gxRYpH`2HF?a5cW*BzL`o%n+?~VD2c1$svA1X0hJ;4%RszqFa!bXAdKq>L& zEDnKgbhK{3e_}<477_Tn)~WT%2_eYFHxMyezPgj}cHi5rIa4 z94G@1`=v3avZr3k^{B?{=;&}RH-g4}7Qd^}_ufs~$4+%KnPn5iCwwRr&gPouOJq|h z5En?AVukrT`CzWpT7v?oGLlh()vO1*S+TD$B*cZ2izLag1>4XRY}d=i&Q!We2H5!aWxK;X&;L_hn5n56ytP&}5-_njb!-z?JN>41keEpF6p5d*vkzn~Y^uuqnHu|8YD^R66)`)VtuZ)~W$|;nUb(X3Z(vZ{HVx z9y{SDqEyvL88~@mLq?mw1avj}!J+Kymk?uA<{US`YJs9$8<1kzGS$^hi?v%C-nH1@t#RP$mI&+aK)<88Z@JXuV zJ7@!&ujPoo$*7Y3I4Gm(IIZtEmt!z+o-iI6NxFTd>e13L5Kiyld&0$$;gs^NoJ{mX zS#eF+IFtN-#&2~GD(wS6v;f5mzh8J?FvJ+rY<5xS zsQa!NaX3~1@Ka%FYK=Ug@{By8)YqIF;yD~JnzehuNuUcIq-{%ks>2#={SNh`hj-xL zIE|KUr$i^oPBtC=SV2MPE_ZgVT z5zwBfDq7r#VAgV8L8EZ1OjtNq#xOAOQpNcDeBixa@;?o3>7T4k%Osz@8JRtw z;Xnb~FV`1H3*9!F-gn|*<`K9IOUi90iBtP*LXzgWhgljs-r+ABC&Xe5X%TN!LO#|9 z;4o7c)7etn+^Yt8_k9wCT&98q;+^G&XJEc}Is zoH&vrmKdy^V~%$B;n=<`a(BhCNIqSN6XtjFY}nLg3;D58qG@>+T<*Riu{=KZ;U?$_ zK~E1|+X)b`pt&%Lxe)8o5M&_wcbaIrhMySeN}qevU|v2ht5BxnB2HKh+Fv27E?*GYoP>ctO}@g7et z0848CK)%{ALIrmHbHW}oWK^9_rYCP&#!w6FVTCOQ7}3n>Iu7LXlj4HX(u7whr`#@_ zfdPit+YmbNvhL2j*Kp;b_B8ocaH_h)rJaaD_~S%{R*-}iO7jjW)bv246vg^sp0+S@ z;@B+LM#3^Zi1_!GFQ$L_8svZOvD4&*gpVh*K%R7MH9v+(R3asK z(KgVJ>-XcZF>@9!)2y$8(3Vpczb^h@TGSx#A2y`-zXu(A0#uoetapOUh(?-;j|6zV z&knewIB+!=*7K11&mM2fjx>OK{l#iXt1;IUIhSA?z2+#ePkBO-AAz>G=7{}i*UtJ{ zgZ;n>q;K~InIPRilXX1k_P%@+@$X!F4zXe)G;KX=oduV`CLRpmfjsyAqjw~6OB8b$ zQRQirphWue4b{;s<8q>o-F(8=vYnE(_g>eRjqukX`6L;)oC6@?BpK1HYReii)s~ab zQk-VAQnZ{xY-42{{joB?SnBfGT;Q{q%jHTdL#D#_TnNowy=7(SX5Dqcv$U_dnOoWF zq`Gr*>CNbDX>k9>pl-kQ|P$M)U{v=+ZUW-WK`D8y#PVUlDeTjD4WH(VroFyUmW~a=2WeRDB1k!=x9-wO1P z0LK=a8iGHh5hh}LMXJ^R(Y|0{m}GgaGlo;;2GNFtG0?-U>XUNT zUi}0@5a}rI`SjCM#scEuF(xw+UBWxeI5YgBX4_yrOo;)TUpJjeUSsSM{W85T?c#jH zU|@Kh!Z-@t?S_aQla&WN07rrOiRrTX)hNydcFeSdU!2|R5M|gVEx=sY+RuMyD}XMh zV1qh#u71XvX#{2<(FDHsxEw2+Z(GdBjoKdusb|{to(?43VrM;)pFX`PT092U5Qwi1 zRFgLawsGjwU%&AJDR6y`s`}W*g(-1!r>uX;eEMR@lwsk@v1MqHpK_SQXZP z-!VQrE5)CouL^d%5Nj2yFs7@_Bi&dHY}N@w0}Ju*kn|%r6#(&&IV?1Wfb%0ZT073M zY6@Ed*fGvkyjz{48HOqxNZpn%wHuQ6snU#jveN0tJU@knm?2-|GMm%gj*x8m$Oi6w zq@Mgwcw|@xrB$Noa@A+r0i=T_ezN@;N3>`dWs|V1!D4 z(u0!1kwGAqW;K|kFPNXd_6i!>Hv7&|nihN|nuwFh>3uLPkQmH z1X;w^v_L0&#ryo;q0hRohhz(my_mg&Wix;b;c?O^xU9#h;DJ*KbM zxe$A$QK$@K(S^H!CEQ533M%za1OxKUXE6MAbXsH@mHuXhZ67`oED!8xq;|4G3nK{= zuwc@)9jrbetCsI}AfW3fb;3*P>*6By7K$nGe!6N8#^gH59GET1LMH?{o4T8atHe2mthLM3JM?jd(EDx3FB5!5}Cdvs}kPK#gW#*oab8zj z(=Qm{RA>O72uY2wt%d_wN*vh!*wj*XXP^RY%7vE2{`B`LcZ8D3DSXHPxt3-8+*5*7 zg;|3B-JyB3W|P%|v&gczkJ{kIiVoFW3{t$&ItGz;13^EearjKt3f^OtrbSHgfCb?aI`r+OYTR{_x@ zj<>SpyGbbc0s@k4>`eBCOD@?9C>@KNiZiR9&7kl8+5H_`fZmvp-P~4lS?p3qH1>H?100meXr$-8NfZdy zcB+2$lMIT$71df**;pDIg%!ZtaQ>I32mR7@Mk z9m*RH>4ARSp&qFaqlU?z54C3aW(weowZ~&wZG(A!!fq<1keEjvtZ(0=jVSJ8IYj%P zX29=9VKoar7BG{TOW+~R<{Dl^e6 zlEbViOy^V)b$xI57%C+DQj#H+<%AMjpTTTpL^(eS$%TtFUCFn?r?)M#opxNYzJ+O+ z5j;l@u~3PMdhg9UK4dMEpYNTB+F$K0S6{s;WxJ=o(5Cr~J$-`>KP*VN>GE_qNXZh; z=it4vP8qgQl3yjhM-FS3YoJvjYmi8;7Boa<>xJQ*so2-==|(;y#xZ)}2Z?aohuqDl zbMRtUmkyg5)lp^*eu))tH+!xmVli7f6=l`eb%0mft0YZy%WYwPa{#0Z zE~rF8lqf+jXAV_ub5>PEU|jmPBhq<3xSX(hjo&r05qsPH(>wK z&Z6E}bnN?4ms`ld2R<_L)zY=^Wnl8c8`JtKG&9SE`oJ1#BV=L!ijP+;tkRvr0<~sj zst;>!uNys~@zES)k3+>rBgxNjakgdE3C0#lH8?1K%xBu_RIIrNC%+B3a`K&N3{{>u z=JxaSs+DBv1@mjUsGgT7Y1;F55XVWesjOobD6Bl+n(&v_bLK$4N6!H>R8$buTXp|2 zVkRgbtU^ovGETr*w~HRQ2Enw0nlvv!#uZ&un~%G*tO|W|$apC+nnH~E4yb&{HCN4< z!F-o4_vcxc<@REuk`1h^4yy2tn1bk7s5LPinUxRwmbe%)*j+;8+Ml78Z~r6LSas5;RN+^<%x4Sb-20KN}({|H-xSys!47H$E3+!hl_rs zsC=H21?qB0hR)j#3miLn1d|HBvL?NkmGG-UzZJ3RITbF>0-f7U5oMvS9wzyHh3ehn^;DB7VDLoVMy=Yzxp~FPJ2s&;k{(J1dFr`^)(Ab2 zfe|~~<#qlz2sQwQ&GvYtVJ|^WNvKfP35>9HHogZvZ%RU_J?Z4eIA|?f9>Y0qwphG;8>HeL`G zHH5k!O`Oki>%|36@@F|CYro1K?BhPoVbeJiI`m;_OM++Grc|M4T{f^r6x^8}E(jWS zlkzE2MeM$LW4k!_WSj(V|6hC-9%I0^GWfWJIvV9M+3M9@bGGdVv=)&?GW#;O+T%EEv4Umm!67eK2J`xd6ulBFl*_DF|a=(@`HnP;*t z6Ep+KgkjJUx}`PqjW15XU`pp9gD~@gJ0cCGRKVb{jeY#a65UTQ)D$XF5|hulYzmi8 zf03%o<13sj?GabE`4&d=+#wT~YgHsv23K zT}~C+kj14K9PYR4kjjO+Wzs6!j0bI-GN`{v&c;*G-qv7RojA{~1sT1grL5viY4H)GVa3?Zhk^0^DfxrEa++=Xd6i0nW-2@zQkB z^2+ThHMpO9`*9R_(#h8XRGWL1@zQrY5)2vzVUocbPg^H9?moWk&ORu0Vs5@T_sG3f zSKiXbxssY%fPT~!i)N8|Wdk5q^CYDY#CBMaKMMjob zwWoQ=7cF*C+H={vjDVCyHMjekyVmbUKaz~F2<^I?udM83c8|a@DK`eD}O zE%`B>^K)u?sN9>m%UUo+6d%2q@sr1g0RpUkpPq*n-~5e0ogZ7Jn)(-&IrBRr#aPw^ z6>rg+GO3&K$L!h(RD{{i%UKA?Jx%x^KT z$b9tuaOK1x1h5Xlm(U4N{928FYE%W&8hIO-)k>j(Xho5??wNI7Ki*3t|JPq`mnx5O z;<7|@cOl~U$ty~_4vkn*KT=_1_RzVqghxoR zmMFSHP#`t-Nurfyx7#yu@{cr)cZmO1n#1p zMcgE7*weTTy~VBpyo?2JCuf2k_eKXpA69ImXwb z{ZCe@PLZuICjPhx=vnTNSLk#==ZZ>Kz@aKJh-86|+&Ndm)ryOCKT1FeIMD+SosaQQ zQkS}O1pn!*W_+i?d?_rh0W7-AT>b%_NkJa&o*R?+axgqZf8@T_q23R|qkfDJrBn@Z z)Fty%l^rR=;Q9?p>8?*q?B_2XZR9I`??3v>v=2#24Iv{CvBN#biUAMW(Avc?TjTH9y3(NgjYUX>?HLKhY@Km zFdl?gj*tR$$vW^I!kXkIo*=1Ltx132js=Iwke{MdoNYwT2hEO+G2r}4XCazVK+tYy zXAywrpQFf#2{_{^mz#l9RTUl3k{c$?Of-3IYc+_#Fgxvv>|QTq1qFoo{L{0`pGEsC zvYypU>$90_DOw%VuOs6F_Bm z=4+ve+RaDE=UxOGvrwj1LNjsv9=WC<3;3kSe^OZNT*_~{lbXZNZs&l68z_zR3 z&S_XI#DIO`DSk%ABuMM4!jRaLNZ6W zIRzR0;F7@63FVJ+c7Wu^im#0-EhL6KT9UV~<1AGlZ%&V`U8HA*GIsT zr5ubDe%zu?+Ys*EkZ=Pi@jeBy4>@in)=ffI6Dru7Y{Gg|!Fngq_|eSjz3skKMkc?JoQZI7{_tO|J##L z?<9dDk;bd(TiA`^_Ix7w`V)=l3Jf|S)03q6QblFRBn8fCy*A8vV!Z!jXlnADq<72g zO{+!X>R?a(BC?hhbH%*yt+_nJMiqul4c576pa@jjd3kzxzwCZo=FV2HCh72_tpe`r zI|rpWCHU18rsiB4Ub~z6N<=WhI^W%RzSbur^RxI;*#$OHFeJ0;Y!J6la8 z&ro=2g5WVsC-=z`+dk6T@)bSU;DA8waYmE_2vSEWZ?Yh5*J5bc68nD8Ipm1^Q~AIq z9&|%a5R3RcOD#myub`tT9%TZMtYpUI0&la7zL>BMk|7KFZqZ##X)KN8wyGhSL0{eO z_b_?6u3k)`*lc~XeKtalm8iN+2(~7F*WxW$P8myHy22M>r%_B0g}ih9uG(jSXOaY% zq>$x63Hsv8F!NHsARqX;AK>OgTdK!QLn+Yxd#&)h6tcM~+}2pFZh_yCX|&vr;jCbX ztiV0`$(jA&D&$au1DC39WzXG6h&j~S$~EJTOh6pYOlS^Mb-!Rk*?5AAF|-yJwr-Q@$bmPTiB|ZgZ<=opwo0jQrxAqS77G3}X`5#fkIal}b29og90+`?&cSx4FoD3iTX;k$u#hw?xb~f7@Oh8q+ceo8Gj!42Kg!)*8&M5S53)v=Vq2{weu9#S<3})hm zu5qacO`HKEJOUufMv?JNLTnCm8tWL51oEIx|PmWOrM(=zIn?V&|FUCv3bU+XcYox>7Go?&Iz@`H-};o zI}lU2(oqB7Oy{x1y-sp(JO~PD4mqvvy zeZ7Cvqorj72TdD818B>Dq|iw(GW3&fPinx-z{wtcgSE>%z1IDJr_v9@2iE_Dx{ut( z{8sykDiDM?<(Wf0FP*5qty zmgat8(z7J$m_mMtj|9@5y*lZfkIWmnh2az(xiwr7tIaLn4m0H7u2I$~S`3LENVAo$ zrtAR9%f;OBp9U~vYS0_ zoP$fGv$TekCDvv_63=!B>GIkeQ#TcnfiP~^vi7M)ZV}8k!%kb5Ubp~|JF#;X$Szm- z!gY;hZMq|ZMXlj4Eg#bX0zO5WmP5UoHiT8ry;l%~pRk1aglEYKE7(!6*U@7f-EtI( z>AYcoxbiOPWmCoNoL!K7o7#MubUSao@eIRdAUW|%Lx!8;PXs`cIpCz%6Jry3LFa%2 z_IVYpOrwpfFUZI%VtT0-!L2Nond)Jbmzb)J_(>!ca>vt7kG7t*+$z+8n1on~XE4Ne<5~A+hEwlVL&!-{@A@vy`ocW=x7DELe!-?$SPRL6tDZ6xI z6PK)|=R#^CLVE@d-EmAZ6{h>noz57f#vADdTlnB+fVK?;P5i~v+2h?vg5Cq&dl?jh zz@^4Se+b{qS!2dZo1l#+V@Ty0;V1RE@z5dY7YmC*<6WiO@X3Wl9Qk^69h0o#P~3Vm zH2R^LunbbJ=JvHs*o0<1`?5Eg!l3-Cjw#Met^;|rSolYP#N;Gm(25ZMsL;eo|s;zjaeCc^Http>hxY+&bP zf+QPpDT;6&;>i~~TZfB666u4T88_F^j~U+D;Vm4m@~lI>Wm7m;)YE`ntFl=odV67l z+(}AHOiSVqtCl>ptY=BCM9$p4u`J0(w|OPpz`0*$%$d`uV)$?xa44%jmdmk2XsY3? zo}$sPe*=`$B?eFOHrD78l%PyZUcyd@fQzr|$l`Mq*R#hd$Rj9TW1RGA*|`Y7;-9X_ zUX@MUR7o%NRHzF>k~$iK4!=rqckbxH&Eu}eg6{|^Gpn~kX6w{`^E3&&k+ImGo`ta^ zRXtE|tc(tEnrL!mpWzVZgV-D+>dSpY}H(TSu1J++*X5!lWCopz#~V zX^uFFzzm9iT=Hg$KXoU%4H}C(K|)4{=kkF-5r2Cb^kS2)tV%Uby%v1x{8>y*ePE%6 zX5fS;NU@>Li%Mmbd^QfgEUH)>jjSALu*gVMt zF|Hxon<5PEHT`Oua70+8iEt1Z8|Ksj;@9lW?mj?l?3Reh-u*hzo44#>l-NT_u`ugs zE3fEF+8_ACH9!CMz5T7(ypG3=H9&r3`g3gwl%pKJQrJK|u08L*1a_NTxAmi!Drt=( zXkRkxF@sTNvF)=ZY$4+MKD>#;s&W&n~G{Bn4l=Qgwv`z7oMcJ&)G5BgM~@i7KNju4BV ze{)kkduwB5`l>0QmxSNx7jm=pmJ3&GtnwzxYc&nms}z!l(yxpPk+nEv(XmvV=BZ@h(({`5>A87N3G#Xt3m$9h9s?%p)<7*^dDjPXuKY&R_y~pL_eL|mwVma9nw;rhA z4W|s+c~Q^B148~VUph>lV0G*d`IJ?uio}4bkrJm2a#>fGb8gIR_aEOoL~kZ1>^rbQ zF`4FhQDaZF&?PkSn(@8{h7RZu^&V? z5WbkPf(yep{xlbNXWouA7Uh!#ZW`2*ITg{8~nWO$(dr|nd@lnk1Xn62mri~;@F`W0QbWV7gOHwG)V>3-!M= zWGF*ny~TZpTM9Nt5faCtp_9iG4bkd%O+&%XL}}R6ZOH6Mu&T(O75Mj6gAF{QOXmW3 zEvqM!Jyv;-p}GxL1S z_6w~9trEGY*wk|5E9Q*RyBDa zO0HVzt1xvJvg9`h$6i5pfHEH$0~;+70iK}PTasNLYVXPOTYavAEd1KKDg*j`b;M0Y zGEEsTfk@~Kke$3BhZmjLl}#H0gPrXLKyv2&q$6XNL>TQI)|zq>l%nyDK)Ev#W+CV& zsY=0>UlQ&PV(MpXIwjn1Du4m{+6Zmot6s*2Khu|jJ3H+8R#&~mtYojMJB3sm-NfhR zAni?{-Qel~OlQuHQ{*xJIN})7!aYuPS;M9!3aY7l>#<@@?o1qZZM}Czl_6GER8`{o zs}v*SpVvnjI>h#&y;Gv0g5wbE#-e2f#R=dyY){d&$ga@Wj(@Hf1Ohv##@2^Ieznwb zMfEcXt{onh_YbV<^Fi1SgUb^GH2LSlX1zY{ft|UxdtYzTd&eknVHhCvR#CKt{dm8# zqR>4|4sW9I-!BwdR`qX_(gu^fqN1W3*z#=^2)XbH-3Aq^A|k{KBsY;U1je3V;vuv5 zH*2!eG0W7toVW~ zt@Cp35lYBVW^$^y?Qr0BvycST9@#WAJ;n`f{Fc7SOfDiAt{}zrF0R0LRuS+FDnD!+ z^sWulzCW-_Jz7-IwBT#MPRPiU%MYe-8NJ*q)&N7jDx%e%Ax~LvZ+f`XTJOVlrZB!? zcGytl2%WhBVuW-&7nY_G3(TJWZ&)KxSJD+J`DTQrLgGeIQt+08l~)Yv1$(^1NwI3$ zlE3mEwf~JZhmZ&S|Ihk0P*lDfHX3OcOE^p07E$0x{HhXy@F<*9Vqp9QP!Q`jfroSS z^G~CAR@%~hv$7vGufP9Qymv(>xNEr99c0HlO%z#=-s@GZ2AM1sRv(|RK*m*&5S4ie zLQ6Y`md#r76P7NXSddr4YEn)kOE1xw%LtWM4mK*cn@>%Wu+0`ZKRN-w@c>t_Z;>Xx zM^?TPdZN6II0NI|YO9lsRj(2stgayMS5tC7 z)@1y5{LdiC@!P4*BY520Xhs#v>XNek^lS!Yhs&{SyN-+!LWYVO90~Jz21Wcc(L;t2 zJ*QMevHpXQ$-6=&Dm)RC?8=(ry!)=6#?4R3GiOH! zsiagw_;RIq2Gk$+C;s7y4*goAX;$CCsZAQt=`DK*i;J3{OY9oQqUSs=+FLou?8dt_ z45OV=#gmw;SJaQ$SQcxWWI;Y5L*-^MOUv4a5uy*lXSD{KBYg z??yxd%imXS?}Ivr>mL=j(TyLNJ=zbVc*L5wRY15Gdh&;<^qCD5D%a$SiG>}xRVEp3 zqYy3?Hlm3JM4uV2$|DT;mu$i~fD3MB^@SMorr}~QCQ5W_#Bp*`4+hVQzBIpPcqJj| zW8vTinR}Qo+c1rff{qtUtHa z@%!%cxN}^-9$xF$8s4JB9YZA3**DWSr?1q`medr6`=yY@ir9?je+u36*707)%80gU z8sq!9C$~!ppN%I(qNS>R!{Pz?kQ9;<5?sn(s-jDkq=;*+Uy73p~F zPH?i_kSt(ERh7Ath#4%N_{Gb3KW4Zz^kMDAH}9Zkl^DOR`HqKubyj`$$+c<8X9{>w z9{;T8xP9m@Kbj9_^-}$CS%0N!K1~AY>A`D0_#<8{c2G!_YuELArC2`NMiv2gxcP7A zWDXI~{KNkC5~?;EJmc`B31iYdkA4=AG2!1RUww06g5K6sd@+5Tz!AtIh*CR}Y$ z^zB_cNi4wBAuBc=BsXWp8;E6)&s(UAzskexi9V}ihBOJr7>phgxWk97$V}GiyV*#d z_T7P*MbdkJXwysLf6|xlEupNAX;aq=)cV46^Vu=4^$l#Pvv7b$5ud8$ccd-$wCB7U zExI(L1$D$q4+o5nqMPkUD&CI?F9T$Q+*vX?ltTi?X6B|O?w!4tV ztN!`NJ!|?B;go$tNq}*R8M2w2O8pV)FLSkxu(){=JosN89lQN)&yidgxbwi1s<^@f zA@h8$?GBg9wQOQ38fyknAyRB!wJXiIq4_dAvm)d7^>y$_d$BMNBQm&RFEjj2>~C>b zOJq*=Cc_Q%+glfsiE1;YdcD+eAzeCxb8Crh{pC+Xv5TybC;B3|q&zdo8z$Ms|5ws3 zngKQLD$9eqI-mFU3)A8Ky`z1An;gsKDRE{@k;87K5+SLtRla6icV~SCMLfMoMcW5l z*;12zHBiL3WHu;vFj3^{Dlw=3JRu1)pT?i;v>pexyAsU5jVpNt`K|XrJ_gkESkfL7 zGYB8INnqY@u-5}dE^@jZgd|-5xS|D)D#-Mh@vD^L0P}*g^A&b2ObS#Ysj(8wOSf+` zbst-h1CWh?YejtoLLo&u?*}SxtuL~!PUGcxwkJ$FJYj6%nDVb;i1xt;j8ET(YuDXG zjQ@p3ZLLBes}#ya`9jFywPd3GSI6hKZK@&u16>T>N(iky+a+}Bu#dNMD}4<~hyBtA zG~evS7|z(jj6)nw2kZIql#2$B@V8CK5Oa+1o!55;iUq1_8|WkrO|o8Wb7*WogB-N` z_d*wak|eaGLFXm3%of7}L5|j}X81xrSm0Ht2TW{$B6|);?kv~#G=R%xjj5nqq-p*1 zMhKPu0NEt6&kUUOdZdz22YVA0x8HZ>E!?oKK==clPk1^R<`72}2qh*czu4YIth*H2w^cxK9FB@yM6)qS{AhI@5?ZJ|ygtvrix8G3b7t z0A;oDQ+<$;3C{W)USv z7FPe^*m_kQvWhZ<8RW5O?Cg#U-SL{Xqu-WD`5$#DyqZpZ91*!X7?e$9Wgk{odY*kHqJhnD40g=Tt2{#N7dMt+mli?#Lxk?qFA zJ9s^lbN%Ux=2WZL;+n2;eIXSm>XB^|iJNvKYQWi=&mk2IDd*DwZCD2J)4KEU2ZV)i z*q_;{3MmQKhs7W!dl3l0g`r56P87M3i+e!MsD&KmI%vT#NNfTB-TQf`IN)}|vNCIC zJntdI!2Z%Xj|$^<#3{ zx>CmTHqI&2?{tZ49=YiAlAy~`jCac{G8Ts-6I!&Q3qrfb@3j8gVgj|yv4 zmmp0-hnm43M+U!?zvf@u{vib1j8lCSX7WOZlXQ{^?_dh*^msp1^Yk#yxO^D{N~RNT z29C%?qBz`?SUHQ*bGD^l+jZxK;jihJBd}cQ**`q(VI%@zm&QoEo2&e~XkA(X*SROv zzKxAmr7$4!+w-~@{ho18vqZ-gEi39avo9{iR$CDu7-lZhm#n&HrB&dl@Z-J8>ab(y_4t_2MzMC&U2}ET|xM9Us<0o29 zQbq`oqR>cT64L~nf>Ci}Z2H&KZg~Ag`XwIed+zK?3X1s0b(~}EQq3(kKgh>t?_%od zPle#xNyC?Yv0vsR=DCviSgP&Nmg<6_O2!2-UAOre1Ooeosvdr_1?4;P=Lt`SHqQ@5 zbsAD-0zi+0y=5T!X3QVsz6?$|D>}-tu8_Duc7OVfsI;W%u$F^rkNWhV#X*c4>TAd< zfz=s0PocJPhfWMWYLZg1C^a)DiQ}m;MJwxV1f5n2w$KdHnrfO4-x1C@7nUT zIG~`NEa>zkwMlL2_yUKSg@6~nnj}bg0cb;y_PBm2s-e;rFmI-^b#_^mqxVSbE}N}x z!!#NcSSXvrq+~7p%S1p|FmY)X*hrYcB)8Y*gNLLseD4#LO%VRtO2%`C}jiN^k)rJn^_{<|I>DwS=R*25UfXFDNR-U>=Yft(w-PQQj{2paY-o$pv>mZh0u%|eyetZ5FiEVd-VBrI3utQqwoPQvJ^Z#iB*YkqY-30F`xbh0RD z*j7{L5b9ge*yM`jhQZ3~yo+Izyl&>sT`)7u+ahuL@r+3LZvmKu-i&33^pLhkjbwP} zP?W4`aD|EAwvP3+bC2`+c+z~j^4zp92w&OEdFwxSsfeju;NOVqceu?^mar6MoxRVx z=)U!H-+W*YtGs5)PJRpO<(C+kN|8c34`i8GOLQ;18-$MQlQw)~es?~*QUU=Nr73>} zq&IL@d%I+N;t0hyjH#~4@E=#N8nhbIQ-|jaVuG_F^(hzR6gP0!(Ex#Uu?Q}xR5vDt z($DMg9GwI6)L0o{Rir>-lFGHYuYcYeGo>L)Sw6TbTrN%~J}1a_GkgPi=O7>r$IBLp zf^F7Q@X)+>e7TyHpzj1HRT(5}0Y8N@du1~cgcO2~C@!e(SNk<>YEk-n;%(XZ9Qn7d zr>clT=cCt)ovHF?HVkcXMitLm+3i#qm;UDW1%rDX@SBdX8AUXnxB0B5lW zDqPC(so4J9Q_25hvS$inCGX7P4jMN(=08!f$9WATiSJ+l3iZ^@W-4J?s%#GThv2ag zo6quE(A7KfhBlS=S? zh=TQ;R25c80g*W^A}tZVQXkE=F5X13xnIpOKEUPWX{=sG2<&Cx`;vHBFBkaecHn$; zeLGI7K11X5M>U9R#h&Qg=Q-T<* zDi*qsfU-nW!9k5;X{`J@>y{TA!tq>5Q{xmZJEl(M%nmayw%4tzZ?pr);x=sCJ9O~8 zYW?Xrxo|z4o)#Dz@~D1?YF?UiO7cR2FLwUTHtlnh*_Hoaod#pb=qg z@Z8e=QQS)-3LqtABT;2wEyp&0Sw4B#Us7=ez$|=a*M`>FzkGd2uTeXB>!&qKf^-bn z+u{-cNIHVhiWuM8X%o|2CN^Ru~_L`Ze z)}Pk&fNpx<_=v*ZtnUjqNl0GqHJ0wLTnb-qLeRL&h0+pT~*QoS~LXxwnF zJF9Xxr0ZJAt(V=_tB`2zs|DkGDo7HC9>18~fJb>HurBq=OblRXb5v+V(5jEYR7lNZ z`SpYHnCltkQhi8e9k(TYo1USbq97$A-r zK#fXegq0fI;x!_dwV28xf1s}1oztfT64$s7g57ylxhbzXylkXe4MUyg-=7X5%oWR6O-?L@5TUu(#> zurjGRmX%99YGq9`q1uVde3xsv5-pFZuiH6X!xCn!MXk)a=${CIvP0hgK^7AeyWwlc z{|=>sqyLG!Tkm;S|Do>m4$tD$%g-*Cmsds z{*E~m2G%nYmhkg6PL{A?^tbKAx%tRJ#Y6{UIR2D``7sMQL|9Z7XwZRfG=Lpd|op0)G0ow z$gBAjJ<=AAM?5Dtf&Q+XbOh+(P`-ziWjApOcF4&UtK(zXSHfKTCYamUGw#hA+K2C;Gg+?tnAV+eNbECjpXes3p9>498!CJ{H~`m2YIr za9^v10y6j>Ken#To8Q-!;c8!Ca{Me`nmB#fp%y!9;pTJ5Rt#%(EuMf?8Ik8+KS?|) z4+B|qGE3^ov><&cdqV8BYYsou|LHT-d)Gz?Q6HFB+7MgajR2MkA)&wxAy?Yhr84p` z3TG=O(1iC}1>$&>hb2#H<_{-pcr>zIDJ8Qc{BC0&EN4w}p+z3BzmPnnEwkJwbB`}M#OJnA8mJ^=Ph8uvSmCtlk%WEIVXTM(R+S5Rvmu|Qx1sy z_reQ)$n*sE8RL*M!qSGrG2D0_L^_Y8?mkJ%u2ycN+(1Fl<|1=MBZ?dC>q~Zomb}?^ zmTI`XGG~yAt2#)-bnI4wkhRZ${X=nc}OC9~n|5&Iqy z`RB-jOpV7}ht_8DK=2f^Lz9v;;Bq4gir8A_F^aVUpDw9= zY3VVC#J=})GT|!xAURf~7AYvQEj3X&a1mmwbS6dv;W)&yYEmZ5nDQzCd<5}riM}0m z1@=%gnD-X4TTK59nkO6Qb)!1O16&$A3Q)alAN3-&2+;lO=a3q6JaceN^B!hY=?eJf zgjsQ^+Z8BBI;oRpDM1cx${5&zQUQ-X!XVN*6;0YqC8k{S+ z3KU(u(BZuf%{*SwCVgq`7gTH3A1!>fj`pzPs-4kIJ<1E{-gMr5mH`ASbZ2a0pfr-j zF;$t|m!~e;G23G>Y7mrMa)1w_cI`hNC{REriA_Rw3JV+PFfLFwMg&W{oz%c&(h%?; zCGSr|6?(HqX9rN@nC&HA^{gvHFW6jip@e``kla*xT_aE$yx7yVdkN1zvu7f(u3>H_ z=b#2_C*VV$h{7Z7r3sGX#g;HbT1_Q$>s2;j9#+ zzfnzY=eCPr*<5l>;EicxkPkp9zVwFIFS>^_xRdg-HO}CmcdtUVEr-feHiT-Z$ z)C}0-+Key{Z*}-igH4!kC^K_*l%y%~h`r3+HfHx*cM{4;oJ#7!hKj-lWu<0X?eDQx ziuYeIK^TUu)SC)o5K`goKh;_mHBvHOQl(Q2er;0WUKpl zIPJ6k=8z{z{3RurVPmimW^_l>-d>$V#B+4|?l{En$ebyzpDg+W(k@~qfWa7}1-`lO zWG9)~P;NPL8!TGGVKvaiIRSAxdO`uO{CC6yTF~?`f7FVg0#IHt@`vfA)Stvk@luBoX6BMM5t`wW)O-xQc?i?Jxm*+fa1oj@QZBp05b=Us#x4x+Y{9 z2U-j03Pe+^0S?^WeyUT`o&C6Nc~qqapn|<%@*@Do%;@a(9wH+mW+DJzCBHK1WnJ)s95`ely_e(i7EEArdG%Xi7sz(*GBu)fVckH zkq9p7-p^RHMz#uBaZdDXmc=Azbw}*h-CgxzTqgkBd4`*S`wZ6C{u98{Bo+XPQjR{m zl9yyk99Im0+V)rd$_6(+jbrd^^Tr}z7_O8n0P>UHE#6IH{xuuT)^;^&zNK0RBAz-p{=Tf zX8T8-^?GU2)lonV!`s;T3T&`6od~rhvKH$tHl)>u!*FLjn$Eo9Rvy58SXLT%l1Gv? z{dMPFMIr0nk2{+&d$>tcbevy1u+H6oNI1=0`*WM`5}=7rhzRz07495ia z=vRap_Z$G?p=RB(e#k`>mo z*N){({PzS=zi zqV9j-R#U3IAzqb8@#AYpI#4!Uw-&Q}3|`#k+x%)h`veGW#EP>C&OxK=0mBwKmobMw5 zD;TQx3^E|#F9~F4=`z|fKzK*~p>EP^qhiTWYN@Kp*gJ@sCpXjjVegegayL|C>@M6R zs3c#!Mn6-kXwP+pUz?AvszC|~q)eT$*^c;Z4rXLL%!VPe+ssqw<0+Amc@N~*sHV89 zBh!9chppqdM(3OYYdE;rdEC<8?jj|7y9_tUsUXB>t+^Qa2gNHb=$fMZjE*KjR|{Ut z)7rcNt^=TYb{P&PBQ#a9AHUJFu%z`8jC?XzpPnPm>)!Jkj++^*Nmc{R|95^T(7_PXt zz8NHJ8yiBuqkJ}gfg-BR3rkKrl_GJi-QMH0{i*Ds-8-|qw)gzNA}#J3k8v*fj$9GU zV0F+Ouf*s@GC3_wk{PFw zx--XdQSZ**zbJw=hsE#V#K_O(LC&WX>}FfaSFWwJdkG9Izv)P_pB|rwL9@PWN=%2! zO2JUQ^`9QOV zDy&K^do8|e-%CTJf_&?jWB$aMA{BQd{;7UjFH7737pdDC$4BTmnoN@mqzBf1S=UxhhkaGQsQ_gk&^voz`2Btk9o^DiSExaTkenLyd zoUH}t((U-~WAuYgGeJ62lF7nK4Ae5dIk6&-&hfesgRf&p#z2%AtxmhD>IO3Fg3*n& zfB?D3dq=Jy%4r~SIdjW{-SUi5HL0Aw7>Ls|e>M#LV+D_b_M6Oqk#kN&El6;8I<5|n zL_2`jWc#o4b9Yv->kDw9@C0jutxaEF%L$Of-m6M(M9I}wiz{q&OpPY0e+ zjA+M>bsIL$(A}KlN&ioLAP&sxIqC2i14&N}80$}I=tNInJ|O1!O5gTfb*sYuq!!^p zI`~!qz7P4S;H%~t6 zAiLwa2L7)`42s$XJyYm~ z!vsZmL#F|9SI2qNC9R9k_`3y-14S+ykJ%$oE*!VX(j%}Oc4~HC{5|hP5D5n~O$b4O zONtx z`e15~XzQbBMU(*@G)_DX^-YJU?3cahGQVz%_*ZdaAV`*9!&60aSvoy{$(nNcy*XEH z%U0@Rqk&wheHU=;BA@;2xZO&k2o+ATSv((?Omxn^{{)uFT{FBw_nY)V%1~Y^orOW* z@3m#N2sjC5V#HhtNwMu62s4g^APA~#Aq?jU-Sqm$GVaewpWwkHDOKMyKI-@xVn88j z_PoF63;2}#Q2>Z-$(7bx;-h{jacW!2D0eP46waod(vi@0F6Ta{OUBbF&2nj}6rDD| z!uj`%Vf4R~%3kJ!S?ilP#b7M@WY*oQmNEUc4TK_gm*?8qnL`vhC#v1--%rrt*D&L= zaXVV;98C2v_6dbWJaFuC5y7873RjqbcmP2@u!C77^pV_nb{ zkz!|mXrS^dfUnNlDyXE*Ymwz51yVF;uC$b4KwRSkOUX+<2Zi$O{E^F^6=?H(^x_24 zc&-pWF8|TA3y|w1%gc^oTxG`Pp7d!h|?bB(^&ezqZAH@1aOO)R=+luA!R*bmUV->Saq_?KZ1;3Tp-; zU}XGZKYAlrBuV8i4(cuvwSp*{ep`Lxi#DbO(ZQyZh!GEk!V~EF!22PgfE-W)2bjZ$ zLu#W1<{8!q(;kLmqEe>(+Ne*VnJwFcIoo4y=A->M{T@#pZ3&*KJ>Mpxbie*Bc2g%_ zNg?hPk08towr3(RAWdb20*H>$3Z`Vb200FT+%@=v%{fUm5Ug?rh@c%|)NE>R$P?3fu@Q zwEBkp|K~na2#YXNW8MfD^_}q}>}96sD+G};cy#CwZyj=FlJjc@E|1?r6iiGER^h#E z>*PL9Q=ZWKRtwsbq9Nl(Yodh$G3xbPhGf&v`S|TVhE#LHUZ+P_& zo>wEu+Sh#;1aTh5U4FvM_dd3r^X+Jk zkQ`DnX|-T4c*?DK92n`hsxFbz+qb4o za2yDsL->}W+~ai!KAd%GGRV7tjhnmc7mK!Z7)WsFXA~n&)({JTGzyzIqu_ZBxF?_! z`z62BU>%9LLLrKibBv%Vc_x!0ygN*S|AR<35vaTIs9o(sR8H=>hEhHffC$AGKTnjB zlYUmf**(?W)l#RKWti(0pI+4~INFtE8>y9Iklb zdTCj>;_SnqJKi6pTjhm3I=~Ne_P(+w_GrFXj2YSwxD`jL&!tEiXvo@{@j%$c9&oK( zo(1c>w=%+dE^8Aa0}$&)p4x1bc=ie{;}1kInG1e`T%=oxrvp;`h+l#WR#Kn{>5>+0FthDQT!;ihLpa`pAjeSIx(w4a#} zwQxe|{*_;vyE6|O!&&8^6$^*@^1s%enR=_k;2?2KP{<|~1sQHjM$Cvm=GqG*myBAe z7oHvVnxjyJ>YTco!NbY=GtZm+ojkNAHh0Xt1xtt>i53{>X8*4FWA~<%9m82rN64B!h=kr3V2UmwhHJr*;chR&?W^D z4^4eQ3@S}}$;vUa`x|o)91>GIT9*iJiek{Fjp7Jaq z0%br^saJmCwdUX;gbe`T(nGu}kzIjAokFI>vqWsK@BK)uY|AB*xwg>|q5y@9%- zDHoXL81=M+x3^J+(-L56R52hD-4^XOWbF7Peu($nvC9ayMg><(2}mVpI&5=u6-Ka& zV5r3tDq?{v{E!4Mz;x3H392Jb+0_Y^@rC>Nd4vx**lTAI>x$tS#(&l{mc-HrJo>I)s zXjM$(mjeV2-X_N|Dx_WAx8R!xU@Ss6!q7FanA=^8X_2MHS2EwW>|6J%A572Fc^bdH zw#95b5)>fD&Qx{+$velG|Cfb?0p%M7G?o~40bw?N8=uvrBxtij5*g>1r9sb&X!{Herj9r))dWg~N!<3e)i{emGsv^4R`ja2C1;{^J>VKzo+RC;^t4Y+<` z!aV*`$kWp%Maaj;xBB164+ceHD77}M{0ftT2Q)GKwU7r;R+bkKFw=OMZ+@^@o3h;o zavV4e22obNKTsbLmdg<*@{V$N?lmw~J)Av#SvlubRw{{^SNll>KUvV*LR96WL`3Y00~5L3q&jL~b~hWl^Emnwb#L(%KdA5>2ghEa32F^vQPWtCwQ* zxNyhOD}vXWLif*k$dogVv&mTW%UsK3ebS(t+O{kLHyjsX5=6z1brLW%5O$qkmghF% zK5ZylrDIcB;yb{M%gsWpe9?Sw1jv55ES6LTG(ZLOfJ?*5^Sr#T@N2C)QKV@?H1Xra zYscJn?rtv*4ooGXHO&lw33s@_70iB^8$@S^EfLZ}T|tMqZcW>Sjjo`iMhpamnCUsZ zIU=u~NJLwz2dCiudg4R$MN<*HoHH|KEZR{HKjZF1*cAynlId_M@-cno&7&z}$Phn8 z*L@%b??`fP@=R(Zxtt_q*$HA?;^@h0wN99bua_wv$ap5uf4OPG~$iNac|2BOAG8cuDN1 zSY^Gy5Hdfw(sl-tC4OUqoKCCVDqlLc2i6xygNp2^jikG6k7mR#W@wm;CV@uc{d~uoecBj{t>yp<|E(`@Q z0!MA+{rY{y%S$ZB4j}QOiXdJ8jJ~Tg_!ki~6TZ!rgqqGd7{BH;O8o0Z zc$c`@Z@gc)DOTLR6zE(m8c+$Zc-8UD-_j@(M zxxm^dFL|@yfZpRutw(tH75diV16fVeG{ugp`@3)f<%+|LIvk;ZsBDjgY6Wt4v;^f)3oEgSorJP3v*nO%VQbcaDrQ!e!(ja5lIr! z3;pgC6CRDeBQ+869SFGkgwj_Z(>6hjHKigEh}EkPfpg-Cy_)?2wdaA|yX2r-F2 z@$>!Eykg&O{ChP`i2(}(IIDF$Q>#gMeFYoKD69xqOl%$U9Q#lrXb`YkC2X;8U3fiR zyiDvysf1UyM-xR)bKU_R;~C~XT2jyUBfY(Tuqng`Q%3Z>*FU?K#5BAR z0PycH{V-Z8y1Odwna&+#*V@IMX}Zc8v+nj#IUKzNsnog=mi%(H$EJT4^oP1Xkv#cGKec?V4(d=h)u!IG~zkZO{^gt zKY+6^0vk5Sc%|%ODSwr~WCAir2+XzVd`wp@8Mht0Kx*l-)cRvYYBbX0U|*)SOn7ce zdfF6o%E_~}#0amzh%h0(mE!AZp6%Sz`O=9TrlR1WNzy@PR(9wMMy`Nigc4gXKIb24 zj0CHV{Xly6K$j09Ck}VD@49Fo`rsq*ZF)}UcUj)Qf_D@|CbVf4`|-=KpI$n3nx#O5 zegJQ*;kqA!@soe%xKE$3`ZA6>P*Woa6mwAzK-o;v7+FAq2WMkzl;rmld0;>Fzg({1<^@cc+hn)0cKiZ9`LCG*#W(5*XSc9UXl=nbX*M@Y= zD6SPGZo~P@LO_fW!Z;i`d7~|V7d&5bT?)4KW^q99E((7c6b!wx1P*c?v< z?BTpTsAlJb%=rX0A|SkWTcNicDG6`EXgjw91HSg}c4K{$oJWf9x(GYvMb^<7Y$OiWA}BMzoGvaF$5B!aNUkFZAsPuqxSa$GD_V?7V|k zfPMiG_+aRK(aEOiq;p*_)5VqVdOR*)IdK_{5I%0S67o7kI{tmn3ve~(gzw5zT6BCS zCY0MmPodX_Mp|r>TJhGhIT`fSN5BDA3RoV_2L*BwMXfJ~9Mlx=1jNs_`2dXfDk|x$DxI7Y2wl(ReSw zd)FKz`i&ue&eu(STu|0Gw6?1?idl&{i4!_Esn3%^8E_NR)b}9T3{Q08om4%>Fj0UT z&G|JVPUD*9Q|Z+HLknc=Jp*FAmCz|WkmqoEmesNz~N(R7g2nj@m>)L9J@^?-@vfZioE2cpI#?<<% zpF6I7WGk}DI?kUxA&K+{5hK?i_Hvn^$7nC3#lfgE51Hn#EbJ%8#DoRoK5W&Yu{rId z{MoZCp$)eD`wj$oCTlK-qea>!`w&}lY_(!ql8*9>(*!p)=D7_XH7m@ve~Z1+K282> zUdX8IenG(Ns#|Gn9C)Q6H29(EIK1!BL#6ZK_>de#kcKS_TuQg3`5|-K!C#pM<(r;G ztQ0e%*x0GqPT%tyJ|*0hOPa;$K>hC)NEHs_3+($`^4FMqQ;Rc%q25*+E?AqJg9E!u z*0cq$B^+A9$;HiK9L=WIjhW%ka@J;06N4CuDX^!vTjy zo@o}eYwQpJQdY3ys{`^Ue)?%28D_);3CYkP^aSpl$$6*xCz8tE-_hNPRUjE8a!=05 zm}o3Gr4p;59PkTGXA>DQTlfF#FN!}$h&r$6udJ=5HY5aKO>R{c<|x7M#*ZOE2It6o zP~w>mDdeweW5e=rLuMC454q&*qUr?)$xndz7;WOm<1$oJi}f~XzUs0gHc_l#c-}Hi z1KZeT+>%AvW42NC;VO&}5yQ6`f_g5uIO5TFqac7eI+Vl+S?+iEuw4ZQnY=;_#G6jF zCK$_!tspJ>!Ob>9HW#~0W`njOOuuL)btWErpv1EX(aRGe$vRs(!F)I4Cc@Ey0rjRp z?D1BkihlT#KRh)XN9&#h4Vyki^Ce$|%BRk3XQD%K; za(ftO%9-FQr(rQ=H?H2N3d{&uCSU|qMpCRyyai$I9uhdCTD;oH%13*}qS4oMTp{j- zT_iyfcEcce?UQby076D2b~;Q>@ji~;4z{w7VpjzUc~R=eInBj)NOELv`SgVEWG9jZPoS+n zU#=4#?F76&T_V1~!8Uu=4ieix=j2ryF~Tc4#Q(e4q^oC&x~7UmwIvbOHN!40th@I; z#co!6SorKJ*@1dpXXP9g&hQ*(hnI=L(thP_l>Cke2z?VZRO~Uqx?pQGtF&)Wrx#$m zpO3aJwe+ta4hN6x_aw76m3A)+LrVejVA$rNT2FV$Irqo z*-t)qh*_2Sy&8ay6^Qqe8RFU?;|Ww9>4=IS`;4_9+|cd3O_H zS}4v2f)?$aRTM;eYdMONcS*&e;P5B*i9XE%S82Ok8%j-4oZJk$rRde`0=J#vA2oI` zv2ucG0oZ&)Nb5%ISm$0L>p1nBJq!Ey8$0yUpzGlw@`|dc$mCDNU&wK~qUn+vSbX;9 zKic!AcZyENHSv$gCp==T^-Q!XeU1u6O zZ*zCG>*Y`dxURl02mY3-K}3tUV!<^Z6)}k1#NX4*lrj>}`_Djg>b&CBf0~gJu8bWb13aO7A47>0#TV#==FNPL&e9joVM^f{K z_+b>ol`)h1=54RD0cm-MH4H&~0ahqXDYfjlwFo1srY)Q)(3QNx*J6=a0r|S# z*lSSiADF^F2}jW}fcO5~03mqP%ulzVPk*JJ#m`DG`wrhpw>FCnYF!|R!;3-RWVBe| zfaVd2w6L9^^CWpW{h>B;7_=qHmTGBlE-&50KE*i@cWJN`{8g0)nd%xvS)1}@F8`=9vv{NP$P^-qq=Ty{fU9-ADWn3`Vcd~i0xZ?CHFBj zSOt-BUY^BR6%^qZYVt2&=h<{sh#JQZ>Te@r$Ppo|i8>be_z#MJ)j&n&FB?{sz50_( zg4?VR#}~Emy4LWk zg|!HuY)hd|&ed+dcHtPEgv3Q}k9L$DAAsxDSEkt)^p<@Dy0({}*fZ zkboQxAnJeYV9WL=`&P=}boqwoZ@G4h8u}~?Ck56&I^%|PdS3r59BC^p4K$vbN_cSw zk%NZT36S(Ay>!G|^tS96&{bs>`dZ5dj!1pxT#~`Bl+doXU9W2CYS?b5q|iB5M~2Dt z5?c&5xPM5%5)H~KvlPZ72Q|7O>wBvq>>^>l4=0}0$<3TN#CZPQXR6j*4ny^j2^CKf z%cu#Luj)ya9*VK+ZuDX0c#d9OmoM1%zPD9o$E`Vfn`%xj8}?u^KV_5+?$U0C5LF)(kJ8 z?WA=wUk%a{fH?r(*-Dg~b^I^T4<%sTg3`^a)CS^I$|f)r!w|QmM<09HCnU7C-D}I* z++LB$M3}IRKqozUR;v@T8GzViMT&^;sfV^*9v04MItX}cm1oZ@U@BxF;sSzv5CNQA zlV_As)}kN0;!K0u&o3dCTI+%1UXF{d2!X#b0?MaNE1j6yE?}W#5781 zP@AYB8@QHM=k9`icZU)}GS;skY=I2jn zz5ZW%Flp}Q%)EOg-VLP;a|w{06KtXK0a20#V9(wNruYN$DfxdUjwk7{i@DxRAuayh zRB!HPmx9VYqc8fi(E9ZwB=0(R(2t;J9|2ub|Z2yXrnGJ=nY z5}AA)tn{8nw}-APV|fA^q{ePP&#a=7f;4#G+ppD>c#==Hc^m_BxSrh`%f& z3h;X&D|5@Y(=6=F!a5rrptnpmtw|@kyazm7l1At$b@Eu$COzx7Y8phj^e3>qA9T{u zbM|$?$-rbIH*Y13M8j6l;YRji%azNL1Z~BYSAabobQZRMe3lAj*~owU^|y5~(BRtG zeQyTdD&o9c`p?X_F$Whd4A)aiJdue570{hc_msqUrDUH45ZF|j*{*5|`^d@dpGYP7 zjE|e_<^srBMQ>zg3RMx!Xc8PWg1K;wCVmG@2d}NKZp!Kw$+8~}5BpSUNdy0;w)};S zEAV&H9u3B4`+QQB!nOB5YWoDW^A074WCueceqojiDj$S?Qy2HU?6(q=slTR_48J?O z_s^Of%kOO$$)o6*UkxS#!x4bL;GB)n|R2C-+( zx?qRv7=9Ianbp559}!7r;~!C;I9S2!#n&i#TP=DMU#^Jkhl`gviiPzz5C@AAa|P<> zV9AzYo}4B@Rh%7bTdT`(iCo`ThZhsFVv5?mOBDuHLX)$W7$#2ZWme?wXo=fpJR67> zCX--Dwd4O2!D#hAmd7c`_%v5z4)gIaGbKvcu62Phjqfk6GHT&8Cps$qVWe1~U=wWz z!zBnZqAwMXnI>dQ=ESn5n0l3sQzJao$@I^T&8c{s2zqU2=ll!;R8gA%)!!DJ@v#Tx zec!X7=rd`pV28r*uE|1cS4zK}ZIU?wh&=x4-7}mUp^_ab#?61Tl3t?CkE>D)oU28r z@oRVLrm`5UHGj}7>ZVN_s~Dw;%M1w)@N071>GDl7?e!`)8Zv_vwbFeOj&%(en?JZy&>K5!2JGMk6jI~nx08+XDX)EiGDJ3h4#AZo60$K<{XsF8|>>bZ1z=+MnbW^bG{F3AVF%@pW|u`!>Pm)ww84*ZTT!dsqFyEyLz*Vg-R(Fu$6usn z%F8#qc4iHHP?bsqMA5vHZ!u}%O(Z8qMxpjLd~{q%UPt0EPqH~YQL4qRP$G;AFr>wn z^}`pXr0{gqaM%CaI02^o?u=iV9^Od(kLTs64oEt%XS-r4hWLf{^bVY<0K)1vkQDyZ{^>CPzRdHW5Z>jW^ae zxq!>Ct^@Y^8%eST87WP0c~rElX)O9((U@v+N`{X#o`6P#YZ0}%3LydAl{yLy#MSON zgDK98hHc&!n1ryDxqWAu;>ZPPpZlUmDjmp|ja;mBm{zfMvAO>JGoB!Yacm&P`2gU$_!MS(DhHPB!)XUQU z-s4`He_D(X?awG}nG+0;7}1vz+9vvM2xI%y!XS1OZ5;_7XoNqkI`5P5kP0huU(D76 zGr%Oc>F$vmU$;)pSrhWTAaAWj{AOlec0hhwRD^+P@CmH<-Nb=zW_nV z%c>=6Ayutp$&Pou2YxcmMCK#-(tY9`#R z?9XT)M|Up%v;n@B2j0JlsjR+fXO+O=tAEQ*>VCSO?O zXL3eY4~KKdtltziNALVr{=uiWam$>^+}7Oqb7_6gy8LG*YKQMyeRD_biGraOwzgsq za|}$J)qHz6s(VyIVMTjNh}G+N(M&o0gRnnYHOE4{0o5=n&jxN=uCU8c?F_cB0v`p6 zL$A+s zhsl;(Ila(Z&4D6`n{b){LCz(q)}|oCwDC6zT1(0rhX^ zUNc$%B!?eKSAj9Oi22YJS(B)fLR^gP8BV7U%9q>v^e#U;V|VItFMj})x#iJ7iMrZt zP28jcr)=ic-w=MlrZ&S`x|Olvs5fK6TlX|HeP@Lbylt%*i9D3|kra|206_19A$EoI z2>HYnc;Tk6II>uHi*5rrh^x_k0lH9>r9{FT2(S_c;gAQzsF?Lde=f_lyMV-E8a0D=5zk1D8fY_=dR&eE4DZ^Fi8%3L0l?3U$uf-ZN!;td_ev zY;MD)H4$#N{D-QUak!8LSz+(Z8$!uc8Aw6B?Jtm2SeKk&W0;^p*uFkK}I?3rr z*r|C5Z8qD>GgF$-bK$}SgmfNU7fBBk2FKp|ypf4mR2B`Btbi6rS%N{Y5sb&p4601V zs*Ny@nq>o~Sm6$#$wGWw)Gk*9UDpDX2VVnv01@pHMZ>Mf=6?7`d~oh~bD-wV5Ni3| zQ%7mjikpWzVTqT8DaUziP*2ay@2#E7&hLT@i&uLjc>>jKoR4 zBy+`Ov8Zjmwy$jM~Xm`2%YD@KLkX&gK zxU~({9lK47VRy5U2?99h*h? zQJcu~j2w{m+BkzRNiSGV?xo6!h}6mLmuv^n)ra_O>@_mTdnAaY7#ZbWy!~^!90F-~ z(7bSc(%M@f6u%);Ko&z2{EySh9VT&a?rSlc_)N3X9e|rK#kKaJ$!CvP_>mr^xnRAF zFP~YeO975&sK6)43{Ew_dJ|4ZnKA*kmKb(jOpM`}jwYjiCh_INCy)3DKgUd|TV5)r z5q9(Zoud;niZB`+-;X)vtGAemE{Y!sI{0?a%*_GrVLOKyGp68!-m*FNb=xVhj%WR( z#i4hGSL(r}Ekk9zFDtvG2qCcq))lLJQ7DvN&&5Gy&s-?K!vMfZ0{`vO1N-0yU}ztkzYs0dVM;wp9n`Sak9Ww&%aZefX6=_wkMu?) zq@N5-*)s6u&R@(6C5lNcPZvXrG8sJ0be=TPc;7{A1Q@1$$7B6mhvWHc#pqkxq-y_o zvAXe#V?WnA{T^)Y8eawsi;LW>9CK?%ti^5)Lb_-Ev;ZOG1 z1QTA$wxVm3H%s`W@Ldf}q`c+HvfW=-hnv|Hexa-LVpR&wZt#g0vUa1c*v% zaaESD2ufs7KFJlaAFm7NM&*q8>oq2!hs`eVtL4n~eC*by!lcJR9n$xIZ#@{(s4tHm z{84^9F7zln9CM^u{2w$k*<~f79ZK98KBh8BwOU7fRj&CfZJBq?dyFNiEuDi;3FP`i z3he-~b!GmD!rz6#EGPp`CJ4qq?N=s|B{J)vBjP{MU<7h#tw^>9o9_yo24j5%nl&i^ z9=leu#&3*5dR=ATpR`~owwj^cQof`8Op1!ZwdDowxj(X{uD_u@yK{{_iYok3165D| zlS{2%7g%OI<;#qeSav300=s$k`C}p9F=cZhjX1twmIYdFky4ITS!7u7=J@3k1e;Gt z+gJuLDb6mj2`%Z|0+boB;0TmwyWg2`jhud z&7T^#x^gA>)%7ujC%nU!r@&M4a|?@5vx(hbOg=vCXwf7|Th4=w1WfKPs_N|d=uHwj zKLpsp6v_iJl*X%;wnQCPmw7wcJ;v=P9b2ZzoaA=@ECa1$`C~Pa%2D?|pjUWi%$8wp z5;^^N7ODiNt?Dc;oMK8-C$zIUu*Z4`FKJ$^2S!I9V{tp;Auqft)WBmG+fP-AuipX>0O^~#x%l~cwUr`;?(#%7AB{V2=0a1t8yyHt*DV8U zG%lzDy-}1XHbZnip4}oQq3o>ZjA@_BrJ5*^TZZp}#9pV-Yw5(W?y$OE)SDRr+pYoPXEd^k ze|8cnxHv_B`|250P-Y&uC;a)`uR-++fG4!Agbf;xtQ=DH)|&UWYCB`;g2b?Ok4d^} zJJs7gfe5IPgP8j6&xxrs6Di`11VeFi-V2Dlc0L_CQm<3iK@@Y%@@6v9Ky(@S8+~GC zXDw#fa@DYd%v6@HPZ6_wG^725T7z>@sFYS+_uvsNn3K*wf#*7OYK*H*!jXp zi(|1;lgQ`9HTHz?N+M{)?044MUl~cb$CFYcNV1Uk%vf!C1%Z%(bFQPld0f0Hgp!nC zBj@RI8TpF;b;NsvWsD>7weP-b=2d5~c8Wf0*D0U29Wd18Z~I^j_K^#(Gay;)ILmXC z(>QRF;&Ohm+Df~dUi8N>a%04Q6G0nJY%pEmJ~ zT5A)zR^cw3|4Lz$lHDt{>#&)Yomn6Ykf1vP5+&Asu-9W2K&(SJlwiEJ#>V+1qM0}D zom%ezO9&~pr7VfrbK~T@dK7d&ZD9dCooPhLrWas&?dafD=VHq^f{l`B*WyCr;y$X4 zB=~_mShHf`!(R`M%n<8~1CP@eNj$vZ&Y5MqCt>GjZPHZrXQTWy0O@rjeFL1v_P5_s zYLZ0gJfUsBPN%wIcc&owy!IqtlAjk}#+YKu{hq~ef|dQ+jcd5ZHa+H8FB7ZP;-L!v zl7GL;d*vdeKBB^};Yr@&szq#3K3yqQxxO#6?2PDJl*Qcx)iGbENfd{Zdp39GG|4NR zHMzkwRcR8uwr;nwx@H?3Y}yTY=EMgo)|T3WM@fDoVM`|41gNfSAEWWwT-DhW8(Mi; zyRx=$v6f!Hj*xY1@RLz)KtC76ETLIK(=E*ayvAni&m?EAl(s5uN$=c`E`;gGglp8> zL$xg$Sah9($z1}-DTtlSETY+o6{&hX#L5&-Lj-(L@r$V%_f1wH8K$J`>=b6D=N+8l zkxIpxJMdMFUrs;7;jz}8--6ZOOu2Ax1ddR!WNv0AXqvyL#Sh;G_sGD?X5b1sJ;=sn zNWGTV*;bUSpQS1B=NVi_@-NuWqt54p_@)RXgcfp~rvlxo1nA9gdNOBHp3mb=gTqJ- z@AtrZ)e_$`O>iiPGK@*(JF!9uVrf)yW|ieA!@Hos&Sb2xL1WbN2$>-Y3Em$PhW!!J zfIh2+qE8b*EVgix&u6;L-+)x3?M@nahbouC=Znf&6RPI`Co2=Vi?NAFi>{xy4#i_5ptWmp*$`6J=d}vM%44@14#{=jiJhjgn=`AoVYV5uxXj7+6^8z|EiGf_bz zX@e4x1Em=Gm>;dJnsD05)EqYo!!uCAokP9W?OX&lGKk}LIw>Bzku#@lpZMRaP6qDea;{1 zdzKfRzR#*oEKOS0x+fqkT7T!>Af$AjsVQe5k@_f>@ttUe)_*)&0`$G;1pSo&OD4|! z=0M8UA$jwaXNtWsCB)<$znWFrLyg=k=8T!ElxNjzEGn%{mi0vl8hM47J}C)8A~6}ujal+y#oMqL-)7roPocE zYJ2Gh-chd?rt@`nMY}jaWNbb+{K@e8%P%sT)GRQS!co4SVHxpVHm0A+j*2dbzjuf) zLWhZ=itakE9~44*!{4*>{hHoOd*J|8DJN_}0IJ(vqySPn$Q=`N-a#g#hMdWNOh^>Ve{oes5i8J3pPG7D2PZYx-;b!T4S z&4ibFZ^*Ms)a0_yy<{hTGTvy3*rRE=K?i3}E&5}P6V(tIcRB@7<4pZdSc7&61loC) z|I`R78r?Z)U}PNHfF!%;Qw<0A*cYaYr5j+qkzGntfz6IT%;x@?0iExEFeA-0$Bwr5 z(}BL{E_{Uke0$de8p&9*m3$j*4sP{3CnsBhPAv8Ur&53a4yO@MVE`P4#q4Ef${C(Z zb4ciAjDo)hAp@{bh16TeNm-@sYs(7`QEqiR!SuH^jjE4cY8rEk=m{=fPs{=Fk)0fI zbFkm2J=qf-4TME4OnSoFMf_bZv#8rS*xUs|*_? z7*4u$#{0ku;BNIX5^AuJwVL1emP{x}7+poeLi!JO1N$Y&1Z`N!DpjGqb z6`uY9hGaT%P2q?Ge~$Cmz?mC>Tgh-R zdSOdh92DB`;7m+&T7+84vbLZ+601&DZsoo`zt6O)PMSN1sCINY2ACr({??NkigL{%qgXu_A2x1~c%)B6F zGb3RWUMwcL$jLAyUITw9{K|_73Xj_bAY<7%15u|%ObtEE;TRfLRoR8Pxg+%i;`&Gd zF&0{+M_cpyQ|z^NA?nys#j{4^uQaqU2_j4UZq!!^38oV^vYCa2B_9K5Bo3I)N~1n5 zvt&{HVJVs=5B7|Hm8@-h)}o&*F2KHzFiQ;kiF~ojmMB%ER2I~T{$F)3lZDfFpdI+A z1QG!9roqe>9iN+$M8fE7SI8#6&K51sOOO1kca>UegbBriCu9DSO1*1e!{Y1EoM#`` z_Bh(^v*72Z|Y5JrBRJ2#2$=t$Q6to3#GvhtZytID_fz zxq4|}BLb3;TF&(j>de*=ff2g6~3vpQq9W}8b$ zqN8?38gOZ5s=7^We2qIe%SMz8rkG(|PvssM@bm~m@z3`bMI|9Tb-UkN0LnPG{ zCdP!#j`kFt5cdr<;izf+JfIF3h&R?%FGWs=w}qIGgLvVsWqxSJ+xrqk^pHU|o(Y1Z z6}^k@@GhQ7sEy`|UVX4>#=DliI7dB=C6QpB0~qM71NC~v?DM@h;2s`M6^G__-`H>C za>Q%Rfd0u}&i_S_q&SCZ1L}O!{$Jf0DerfktLeWhs0BJdiL6ELDqd*d*p8{)0DW@w zT%6?-=24`Phby$?KpN+s{YT$drH-hd_xwPcMVg&1cFNBTASYNi2efAYsf3`urV8J< zof}Wi^F9Ok9BKV}FfWg5K}*7iXU7W-w)MqgVjsBE)mjCKW|sz77_I7Xb9g zJJW(vITO*=s*4n+l%d)4T${(947xOIn2t3GQ5w=^JDfAm)~%}!^PDG|0iMhpH6kZ_ zIaa%XpB1{Q-p7TIxvpyWZ3!K(@VKgQlbLJz!Wh&p@>VS6;j=Ko^8hd_7MI~tG}W&w z1$c^pt4r+#sle#P^H!Z=frJ&h*<8Hvh<;i7xEaOlX{afB%d9$<#VQvv7iu`a`hL-` zzx7h7yqfqd@$pTN`}|x=EhCoIN`LgPM2XlPMwtOQNER(~R)4)Qvkj%x+=n+C4(^iH z=lf(o0Hb6w!Q19n?-WSo&*<~5Wo02f+91-sQoRpY2P;ubvF&rS8j5e&CQanXKT=F3 z`s8qd+9ze;(5K)7%#KgQ!uvDgbwHBa_DL3uJPLjPap_$my3lfohmzuRscQ_E{vQTI z9%-++QI(635ZI)~DdCdhFf0o4iK$xST^})uJ${ZH13KXcI;7DgMB#a^?i@+T*(qkx z6coC256oOKWC2r=N{7Pl|B9%LTji~%Fst-H)@ZO37rh7XfNc0}N1wn#jpt-$T3N_X zf!cL1W)eeV*Be{=Q9yZ&f6{N0kvc`ZC2JZhI~7kt#Zrw)Kc)AN!$g*TU4(4YjZDZnt|!ld>O#st zHzzSyD&T%Ht0{Ca9@4x}zP8i2pOQoGOcj)#gA88$+P7Go6F*aY1vw{V*O(gHYktBs zZxWxR<3b7*OyIAd%6C;}dLKQ1Vpg+eYu*>M`nWR%Nh*!V7qht0G(IpTiietDLbPE^ zK|;|OUO6}fDpnektf^L?>WxmuXKPC&C5_zUM7ME9SLehAS#)XMd$XqABb|8QG5dA3 zWt!yG_pI~LTvWhv?j5bznjmfo$*phAtHRzq?AFm^W~EebgS~`g@@_3>E=U!-aBvk)@C(PQx-V<-X3?8t z5i4EwGzV`jj`$xiF`I>Xs-Tc}N>Y5Q6?G9|P+=!Jj-=at9M{Zf0q4K?>z8>Ss`5w0 zKqMKzuCd_1_72@G(B+|xzX)8?(o@|UIr7`awSrgyUAc%4krm0MZ{WJr@uYZ^Np75rf4CaG z>b+>9nJZ*0T`J5t+O%^)wQc_jeJ!z-dPaOl=FEbmp35cEttJdZe2sR;*FU_HXNsjA+K%{S>Vx+_o@y?EmO6PN! zDc*q$w+Po#q%>FvSLfv4_3U|1|3t(z^?tTjpdN?gnf$f`Z@Koh6}gx$OuV`Z>93Yu zZX=oG8ch~BQh>6CtaWZ$FyL=H^}{+vtCr|E#H?-bhbnY+W6|j%qP}C51`>hSO<&mw zKU!z1=~Fm_WZrY762kt10QmM^_f42LV5L8q6cOuW(`2wM!pe*gnPt7iO1^?b6aLp! zX?ha!!Z1oi6rOKW^A6Fi5z)UjZ05z0kkss=BKn3EI+pucQod@EfJglL_G!^P?A0C2lY0@zhb%f`G4^)QR@$-rWBN=>OG@hC}(1lYHm<|~g9VP%^lN1XcXzwdGpmAW98Y6bTUraTM2+AlPkL}@uf)6c|1zMq* zEHJ;dFU4P8B)#}_m|M2c@pf3q>+Fwv4yd${M9q-Dg58oXjelA~YsvZ1^lj=VSY4ry z0@$rTp+l#K5zZP{EjjYL$%sR78x~z6^Sy_ly^S~LD*(NzFH?+3*wZkauA!v5doNwk z88CC}dx}fryavgHmNP6Yd`OFS`}vqi<-v4O8O$Y)FK5mAjL|Nn2dRzH3akZsb$328 zqvzEimn#Lj7W&HX;5t+wQyq>59a)T>KQnMU}q2^JdVFS%ib~Pa=(zz@0P5HB``#5%DDd%mGl)`IBQhZ z^<56CoHxwYKYOK5RV|NGZLnT1BcxV@j;yBF$6R^E#WWw%< z3?#8o2Z|f-ko65|%bw+#QvSJ~raw1UX1dut*g(aU zBUWd_9=i0TEFsG$T9g%F%Tm(Vq&{bRq8C|WwVg*8LXpxx>P3LFG6d>{a}WE{qYkhQ$7w6~P zhYl1u!UiwAkS_|Goiq+nBql%3V_46+L$=r7Ry;dVK_DA|y`y=ceEuP-zw{(GZX#&# zrx|%JK3Vk(E>C7t^toAl7>$#Gte#P2HzfK`lAm!i)Jmx+r|d#xt!;5Fn9a5Znx`M( zkLN=P$P@P&UZ#1E-ZFow4>8p%OE1(uEhEioZuGV4fRCi?2B2V*^(X}g^x;otO$_#F z(({&I69wC(Mvbd#@Q)8pjgzBq(Ly#ND$Ghu)Mp!E{Y)5)`9#vB3x{uJ`aY<~XcY;+&x}mBS_Rge_w&i)#j$!w8w204z4v|MS>umNjEphEn ztB?Q4-GZ3!&!&*o75q^UQupxgv(P#yT68Lj*KlD(_bMp5h~-;{sUu=9s{QvdhHj3wJU8ofZHhXyUK z*^?g>T&LdSzWviaG4=fVZ2icH?v?t3NG25~-DW$fM5RTHhNN|?Arl9WQk)7U!9_NT zKBT}znVZZtlD7p{!7BNSuWxk6haG3yjcYNh8}DV9FtBQ$R*gqI)4zSHAqJDKz z(QX4FJm$77X&@S-Qf(&?zgnlRe2DKy1pdt3oe2n?c@)RNtx013OT~h>C3P2|h`ne? z*T>aJf4dw$8rhFu7=0-{rDbAv_o-vH1~(y&<||a-p})nlK7T>+EF^|DG%x(Dc|WT7^(-*1o(hchix)kGMonRE!u$KN;Xr0HJDvg zTG6Zf>(tQxc9?Fa+*_L67h)ld3HtI+UkKmgR)v~tnk@LX!Z&bU%LsavYuNJg#d^1|WsZiD=RdfZw^i+huVgivwP9O} zg}sOh}^UN4^+KtfK=i8d6s$iB4otOh}29xExO;^9nc<%;jkneHcm#T#03 z5{v>*s`K23$Yg<{s94`J1^G8$_2lXQMn)dlmNPJstIkl8DcSq(1jV6%2~8zH4)uD> z2Tceo{zzLdLumH-A>PWMxMQraQ6lao(3cqi)C^1*R!lg- z?N82}pHAvK4G=>2I8fWrL`8_?UgBG3E?QbQ8{MMRr3tfXl@AdEII|eQR3gZPFnrVz z#8YDm^Ji^E^Rpi?ZEaF7opL%fG=;3I%9|1llGX;F2$2v`rm1!=q0112lju_B(3s}F zL7?0}r?)>>RbjIntbixnT>y#m`wWt3btf0%Nh>`0@JL>0w9%>VX&%@>7xUN>z;=nx z4)Ve^4s*(Xpj{3rCAlnPBEKh#R-205Gf_~1P6Q#-p=|2?g(zA;2P#br53d_u$8_)k zPW^auXypOCB1kZsJ3END&o15hW#)t|O?}OTHuAaI6d;78^mde>C!HO>`LfW|Cu5G7 zm-d`zc}yni7=4B?k5+~ow&TNCnmy9T$;C68toU8iWFSHvgdHlVP^pKi7ws>iE43X% zpEmwkB-Ea?7>hk0AG6DD6lj;3xf=)YwJ5OLm62}u+Zr=1BI)_Bes+(=Y-oyPA;FHr z9)pU5E)JypBf}nt97aP0f1OJ!Z|539j6#G=olLpZ1FP9rkFfs~MEr}pSO};kB!uLH zbz@UDXh-+#iC13N2}y)-Z0vQ*E;7{Vf)YtoYQZt=s=%r0)w+P#+_A$mQxqz+S#tL*65x zAMZQh|5=fQ3o%@RbMS^gIUa+{z@PvaL($4kIeTQzxL-oGv?@~ej-G6mh-lH!*M1)* zTUXzH?joHok3&k3&5p?wtO%#SsV8)%g13i*MkCGyQ!pp`sA*Ko*6=A%Z!dP4ro_0oOBraRv? zC^`K3kg%#K@A2gQh3vgt$LgndBGPWh`?a!CbP2L#wFeoUbk4dq`5w}UUI0c26(Rk* zlxP!H%QXJl{a3T*KYvJm(jVyfFmQbY|K(`l{~3--@V^TX0V(BC+sYV1kgxX3p3 zIRuL%>JL`s%L8ju8erN;u=FxridQo)Ngfz~H?-V|&Df9u)*P#?l*hPy+1(k)FaAq>aJ$dfb*0{UaAv-j z^~$5FbdsUTHnVrap0-tM1W-^oXkQr-3Al_gJ~BNn2qA|LI&V~*T!KctL<~5-PjYo1 z7KJXAv5}y=25^c5c1gN@FGewy`?K8T8C=c%fX?B|y&V^W^6_4%&Y6|$_d-VdQo4=_ zLEj`|sYrZO#G^Vmj=x4&F*l1n=ZMB)9`Nng71684KiG-g`L)@pbqVAvUR+%``|ipc zE>I!48Nx)kTcken_;mTUHfdjbUE#;xq?-f5lC|3>2alKTw$H>iaUo98%JDMlW2XU< zj!wQzh(i^9zJeyD$QSEq+zS6@q$mnIllBv`Xc=AWv`Lt_(Mn}=x<+^I`=|)B1Bs=kcelwSmrHrgzup2^; zGJLZd?vJ%8Le8VlwRM^w_t+y#zeoVlNmDG|QGhdLE8A<1Q79_ReM$_&Fm$54H>$|r zW$6;?eBsfXW7gNfnmwc12@B26u2;=|6|E*yGzufVS|_Leb}MoT*q(l3$|sheV>KfvAe zzicnka}H2um!?J75BQv6(4lV`5fFC{QlWd3y4N}HA3{3{{YgS zQN-h~^&x-KlD(_HZC3SFg%bO5fj1sJl}aU;oy(KC1e%^6*29!V-%h1HDZ9Rs!>=ps z1z;Q*rKcLI`{p#qDe}6~xYmBbeYKJ^lHe2BjFQyupQp5>euQ~1sp(1h8Ql~%0PEW9 zFluj4&gqB6lt3P65k@G2Jr-0mT~8O2Ynd6e%-qA%i!kIwUPPi zm!^*4QrqJ+&(rz0Xnn9LiSkO>K)*Mtv#!XCvPS+oYiXo=|o6h$%f+-O)7X9$L+^>f51?>s|#l_bPGRK-G*MAph8Y3(;NR!#2-2i2r1oRIAxfIc#EBgYNK0i0C4I|?ZfE^y^y zOmlylBt1&;^6%lq*CG-Hq*wl;-*7Uz_%qG?tCagJs*BSNzL-trd>qsfx{%}<_akf8 JaYBOJLB(>A>}vo3 diff --git a/libs/guessit/test/__init__.py b/libs/guessit/test/__init__.py index 7ce54945..e5be370e 100644 --- a/libs/guessit/test/__init__.py +++ b/libs/guessit/test/__init__.py @@ -1,26 +1,3 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import logging -from guessit.slogging import setup_logging -setup_logging() -logging.disable(logging.INFO) +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name diff --git a/libs/guessit/test/__main__.py b/libs/guessit/test/__main__.py deleted file mode 100644 index 32b8dd10..00000000 --- a/libs/guessit/test/__main__.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals -from guessit.test import (test_api, test_autodetect, test_autodetect_all, test_doctests, - test_episode, test_hashes, test_language, test_main, - test_matchtree, test_movie, test_quality, test_utils) -from unittest import TextTestRunner - - -import logging - -def main(): - for suite in [test_api.suite, test_autodetect.suite, - test_autodetect_all.suite, test_doctests.suite, - test_episode.suite, test_hashes.suite, test_language.suite, - test_main.suite, test_matchtree.suite, test_movie.suite, - test_quality.suite, test_utils.suite]: - TextTestRunner(verbosity=2).run(suite) - - -if __name__ == '__main__': - main() diff --git a/libs/guessit/test/autodetect.yaml b/libs/guessit/test/autodetect.yaml deleted file mode 100644 index 864b8827..00000000 --- a/libs/guessit/test/autodetect.yaml +++ /dev/null @@ -1,489 +0,0 @@ -? Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv -: type: movie - title: Fear and Loathing in Las Vegas - year: 1998 - screenSize: 720p - format: HD-DVD - audioCodec: DTS - videoCodec: h264 - releaseGroup: ESiR - -? Leopard.dmg -: type: unknown - extension: dmg - -? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi -: type: episode - series: Duckman - season: 1 - episodeNumber: 1 - title: I, Duckman - date: 2002-11-07 - -? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi -: type: episode - series: Neverwhere - episodeNumber: 5 - title: Down Street - website: tvu.org.ru - -? Neverwhere.05.Down.Street.[tvu.org.ru].avi -: type: episode - series: Neverwhere - episodeNumber: 5 - title: Down Street - website: tvu.org.ru - -? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi -: type: episode - series: Breaking Bad - episodeFormat: Minisode - episodeNumber: 1 - title: Good Cop Bad Cop - format: WEBRip - videoCodec: XviD - -? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi -: type: episode - series: Kaamelott - episodeNumber: 23 - title: Le Forfait - -? Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv -: type: movie - title: The Doors - year: 1991 - date: 2008-03-09 - format: BluRay - screenSize: 720p - audioCodec: AC3 - videoCodec: h264 - releaseGroup: HiS@SiLUHD - language: english - website: sharethefiles.com - -? Movies/M.A.S.H. (1970)/MASH.(1970).[Divx.5.02][Dual-Subtitulos][DVDRip].ogm -: type: movie - title: M.A.S.H. - year: 1970 - videoCodec: DivX - format: DVD - -? the.mentalist.501.hdtv-lol.mp4 -: type: episode - series: The Mentalist - season: 5 - episodeNumber: 1 - format: HDTV - releaseGroup: LOL - -? the.simpsons.2401.hdtv-lol.mp4 -: type: episode - series: The Simpsons - season: 24 - episodeNumber: 1 - format: HDTV - releaseGroup: LOL - -? Homeland.S02E01.HDTV.x264-EVOLVE.mp4 -: type: episode - series: Homeland - season: 2 - episodeNumber: 1 - format: HDTV - videoCodec: h264 - releaseGroup: EVOLVE - -? /media/Band_of_Brothers-e01-Currahee.mkv -: type: episode - series: Band of Brothers - episodeNumber: 1 - title: Currahee - -? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv -: type: episode - series: Band of Brothers - bonusNumber: 2 - bonusTitle: We Stand Alone Together - -? /movies/James_Bond-f21-Casino_Royale-x02-Stunts.mkv -: type: movie - title: Casino Royale - filmSeries: James Bond - filmNumber: 21 - bonusNumber: 2 - bonusTitle: Stunts - -? /TV Shows/new.girl.117.hdtv-lol.mp4 -: type: episode - series: New Girl - season: 1 - episodeNumber: 17 - format: HDTV - releaseGroup: LOL - -? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi -: type: episode - series: The Office (US) - country: US - season: 1 - episodeNumber: 3 - title: Health Care - format: HDTV - videoCodec: XviD - releaseGroup: LOL - -? The_Insider-(1999)-x02-60_Minutes_Interview-1996.mp4 -: type: movie - title: The Insider - year: 1999 - bonusNumber: 2 - bonusTitle: 60 Minutes Interview-1996 - -? OSS_117--Cairo,_Nest_of_Spies.mkv -: type: movie - title: OSS 117--Cairo, Nest of Spies - -? Rush.._Beyond_The_Lighted_Stage-x09-Between_Sun_and_Moon-2002_Hartford.mkv -: type: movie - title: Rush Beyond The Lighted Stage - bonusNumber: 9 - bonusTitle: Between Sun and Moon-2002 Hartford - -? House.Hunters.International.S56E06.720p.hdtv.x264.mp4 -: type: episode - series: House Hunters International - season: 56 - episodeNumber: 6 - screenSize: 720p - format: HDTV - videoCodec: h264 - -? White.House.Down.2013.1080p.BluRay.DTS-HD.MA.5.1.x264-PublicHD.mkv -: type: movie - title: White House Down - year: 2013 - screenSize: 1080p - format: BluRay - audioCodec: DTS - audioProfile: HDMA - videoCodec: h264 - releaseGroup: PublicHD - audioChannels: "5.1" - -? White.House.Down.2013.1080p.BluRay.DTSHD.MA.5.1.x264-PublicHD.mkv -: type: movie - title: White House Down - year: 2013 - screenSize: 1080p - format: BluRay - audioCodec: DTS - audioProfile: HDMA - videoCodec: h264 - releaseGroup: PublicHD - audioChannels: "5.1" - -? Hostages.S01E01.Pilot.for.Air.720p.WEB-DL.DD5.1.H.264-NTb.nfo -: type: episodeinfo - series: Hostages - title: Pilot for Air - season: 1 - episodeNumber: 1 - screenSize: 720p - format: WEB-DL - audioChannels: "5.1" - videoCodec: h264 - audioCodec: DolbyDigital - releaseGroup: NTb - -? Despicable.Me.2.2013.1080p.BluRay.x264-VeDeTT.nfo -: type: movieinfo - title: Despicable Me 2 - year: 2013 - screenSize: 1080p - format: BluRay - videoCodec: h264 - releaseGroup: VeDeTT - -? Le Cinquieme Commando 1971 SUBFORCED FRENCH DVDRiP XViD AC3 Bandix.mkv -: type: movie - audioCodec: AC3 - format: DVD - releaseGroup: Bandix - subtitleLanguage: French - title: Le Cinquieme Commando - videoCodec: XviD - year: 1971 - -? Le Seigneur des Anneaux - La Communauté de l'Anneau - Version Longue - BDRip.mkv -: type: movie - format: BluRay - title: Le Seigneur des Anneaux - -? La petite bande (Michel Deville - 1983) VF PAL MP4 x264 AAC.mkv -: type: movie - audioCodec: AAC - language: French - title: La petite bande - videoCodec: h264 - year: 1983 - -? Retour de Flammes (Gregor Schnitzler 2003) FULL DVD.iso -: type: movie - format: DVD - title: Retour de Flammes - type: movie - year: 2003 - -? A.Common.Title.Special.2014.avi -: type: movie - year: 2014 - title: A Common Title Special - -? A.Common.Title.2014.Special.avi -: type: episode - year: 2014 - series: A Common Title - title: Special - episodeDetails: Special - -? A.Common.Title.2014.Special.Edition.avi -: type: movie - year: 2014 - title: A Common Title - edition: Special Edition - -? Downton.Abbey.2013.Christmas.Special.HDTV.x264-FoV.mp4 -: type: episode - year: 2013 - series: Downton Abbey - title: Christmas Special - videoCodec: h264 - releaseGroup: FoV - format: HDTV - episodeDetails: Special - -? Doctor_Who_2013_Christmas_Special.The_Time_of_The_Doctor.HD -: options: -n - type: episode - series: Doctor Who - other: HD - episodeDetails: Special - title: Christmas Special The Time of The Doctor - year: 2013 - -? Doctor Who 2005 50th Anniversary Special The Day of the Doctor 3.avi -: type: episode - series: Doctor Who - episodeDetails: Special - title: 50th Anniversary Special The Day of the Doctor 3 - year: 2005 - -? Robot Chicken S06-Born Again Virgin Christmas Special HDTV x264.avi -: type: episode - series: Robot Chicken - format: HDTV - season: 6 - title: Born Again Virgin Christmas Special - videoCodec: h264 - episodeDetails: Special - -? Wicked.Tuna.S03E00.Head.To.Tail.Special.HDTV.x264-YesTV -: options: -n - type: episode - series: Wicked Tuna - title: Head To Tail Special - releaseGroup: YesTV - season: 3 - episodeNumber: 0 - videoCodec: h264 - format: HDTV - episodeDetails: Special - -? The.Voice.UK.S03E12.HDTV.x264-C4TV -: options: -n - episodeNumber: 12 - videoCodec: h264 - format: HDTV - series: The Voice (UK) - releaseGroup: C4TV - season: 3 - country: United Kingdom - type: episode - -? /tmp/star.trek.9/star.trek.9.mkv -: type: movie - title: star trek 9 - -? star.trek.9.mkv -: type: movie - title: star trek 9 - -? FlexGet.S01E02.TheName.HDTV.xvid -: options: -n - episodeNumber: 2 - format: HDTV - season: 1 - series: FlexGet - title: TheName - type: episode - videoCodec: XviD - -? FlexGet.S01E02.TheName.HDTV.xvid -: options: -n - episodeNumber: 2 - format: HDTV - season: 1 - series: FlexGet - title: TheName - type: episode - videoCodec: XviD - -? some.series.S03E14.Title.Here.720p -: options: -n - episodeNumber: 14 - screenSize: 720p - season: 3 - series: some series - title: Title Here - type: episode - -? '[the.group] Some.Series.S03E15.Title.Two.720p' -: options: -n - episodeNumber: 15 - releaseGroup: the.group - screenSize: 720p - season: 3 - series: Some Series - title: Title Two - type: episode - -? 'HD 720p: Some series.S03E16.Title.Three' -: options: -n - episodeNumber: 16 - other: HD - screenSize: 720p - season: 3 - series: Some series - title: Title Three - type: episode - -? Something.Season.2.1of4.Ep.Title.HDTV.torrent -: episodeCount: 4 - episodeNumber: 1 - format: HDTV - season: 2 - series: Something - title: Title - type: episode - -? Show-A (US) - Episode Title S02E09 hdtv -: options: -n - country: US - episodeNumber: 9 - format: HDTV - season: 2 - series: Show-A (US) - type: episode - -? Jack's.Show.S03E01.blah.1080p -: options: -n - episodeNumber: 1 - screenSize: 1080p - season: 3 - series: Jack's Show - title: blah - type: episode - -? FlexGet.epic -: options: -n - title: FlexGet epic - type: movie - -? FlexGet.Apt.1 -: options: -n - title: FlexGet Apt 1 - type: movie - -? FlexGet.aptitude -: options: -n - title: FlexGet aptitude - type: movie - -? FlexGet.Step1 -: options: -n - title: FlexGet Step1 - type: movie - -? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720 * 432].avi -: format: DVD - screenSize: 720x432 - title: El Bosque Animado - videoCodec: XviD - year: 1987 - type: movie - -? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi -: format: DVD - screenSize: 720x432 - title: El Bosque Animado - videoCodec: XviD - year: 1987 - type: movie - -? 2009.shoot.fruit.chan.multi.dvd9.pal -: options: -n - format: DVD - language: mul - other: PAL - title: shoot fruit chan - type: movie - year: 2009 - -? 2009.shoot.fruit.chan.multi.dvd5.pal -: options: -n - format: DVD - language: mul - other: PAL - title: shoot fruit chan - type: movie - year: 2009 - -? The.Flash.2014.S01E01.PREAIR.WEBRip.XviD-EVO.avi -: episodeNumber: 1 - format: WEBRip - other: Preair - releaseGroup: EVO - season: 1 - series: The Flash - type: episode - videoCodec: XviD - year: 2014 - -? Ice.Lake.Rebels.S01E06.Ice.Lake.Games.720p.HDTV.x264-DHD -: options: -n - episodeNumber: 6 - format: HDTV - releaseGroup: DHD - screenSize: 720p - season: 1 - series: Ice Lake Rebels - title: Ice Lake Games - type: episode - videoCodec: h264 - -? The League - S06E10 - Epi Sexy.mkv -: episodeNumber: 10 - season: 6 - series: The League - title: Epi Sexy - type: episode - -? Stay (2005) [1080p]/Stay.2005.1080p.BluRay.x264.YIFY.mp4 -: format: BluRay - releaseGroup: YIFY - screenSize: 1080p - title: Stay - type: movie - videoCodec: h264 - year: 2005 \ No newline at end of file diff --git a/libs/guessit/test/dummy.srt b/libs/guessit/test/dummy.srt deleted file mode 100644 index ca4cf8b8..00000000 --- a/libs/guessit/test/dummy.srt +++ /dev/null @@ -1 +0,0 @@ -Just a dummy srt file (used for unittests: do not remove!) diff --git a/libs/guessit/test/episodes.yaml b/libs/guessit/test/episodes.yaml deleted file mode 100644 index afba6e74..00000000 --- a/libs/guessit/test/episodes.yaml +++ /dev/null @@ -1,1174 +0,0 @@ -# Dubious tests -# -#? "finale " -#: releaseGroup: FiNaLe -# extension: "" - - -? Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.avi -: series: Californication - season: 2 - episodeNumber: 5 - title: Vaginatown - format: HDTV - videoCodec: XviD - releaseGroup: 0TV - -? Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi -: series: Dexter - season: 5 - episodeNumber: 2 - title: Hello, Bandit - language: English - subtitleLanguage: French - format: HDTV - videoCodec: XviD - releaseGroup: AlFleNi-TeaM - website: tvu.org.ru - -? Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.avi -: series: Treme - season: 1 - episodeNumber: 3 - title: Right Place, Wrong Time - format: HDTV - videoCodec: XviD - releaseGroup: NoTV - -? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi -: series: Duckman - season: 1 - episodeNumber: 1 - title: I, Duckman - date: 2002-11-07 - -? Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi -: series: Duckman - season: 1 - episodeNumber: 13 - title: Joking The Chicken - -? Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.avi -: series: The Simpsons - season: 12 - episodeNumber: 8 - title: A Bas Le Sergent Skinner - language: French - -? Series/Futurama/Season 3 (mkv)/[â„¢] Futurama - S03E22 - Le chef de fer à 30% ( 30 Percent Iron Chef ).mkv -: series: Futurama - season: 3 - episodeNumber: 22 - title: Le chef de fer à 30% - -? Series/The Office/Season 6/The Office - S06xE01.avi -: series: The Office - season: 6 - episodeNumber: 1 - -? series/The Office/Season 4/The Office [401] Fun Run.avi -: series: The Office - season: 4 - episodeNumber: 1 - title: Fun Run - -? Series/Mad Men Season 1 Complete/Mad.Men.S01E01.avi -: series: Mad Men - season: 1 - episodeNumber: 1 - other: complete - -? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E02.65.Million.Years.Off.avi -: series: Psych - season: 2 - episodeNumber: 2 - title: 65 Million Years Off - language: english - format: DVD - other: complete - -? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E03.Psy.Vs.Psy.Français.srt -: series: Psych - season: 2 - episodeNumber: 3 - title: Psy Vs Psy - format: DVD - language: English - subtitleLanguage: French - other: complete - -? Series/Pure Laine/Pure.Laine.1x01.Toutes.Couleurs.Unies.FR.(Québec).DVB-Kceb.[tvu.org.ru].avi -: series: Pure Laine - season: 1 - episodeNumber: 1 - title: Toutes Couleurs Unies - format: DVB - releaseGroup: Kceb - language: french - website: tvu.org.ru - -? Series/Pure Laine/2x05 - Pure Laine - Je Me Souviens.avi -: series: Pure Laine - season: 2 - episodeNumber: 5 - title: Je Me Souviens - -? Series/Tout sur moi/Tout sur moi - S02E02 - Ménage à trois (14-01-2008) [Rip by Ampli].avi -: series: Tout sur moi - season: 2 - episodeNumber: 2 - title: Ménage à trois - date: 2008-01-14 - -? The.Mentalist.2x21.18-5-4.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi -: series: The Mentalist - season: 2 - episodeNumber: 21 - title: 18-5-4 - language: english - subtitleLanguage: french - format: HDTV - videoCodec: Xvid - releaseGroup: AlFleNi-TeaM - website: tvu.org.ru - -? series/__ Incomplete __/Dr Slump (Catalan)/Dr._Slump_-_003_DVB-Rip_Catalan_by_kelf.avi -: series: Dr Slump - episodeNumber: 3 - format: DVB - language: catalan - -? series/Ren and Stimpy - Black_hole_[DivX].avi -: series: Ren and Stimpy - title: Black hole - videoCodec: DivX - -? Series/Walt Disney/Donald.Duck.-.Good.Scouts.[www.bigernie.jump.to].avi -: series: Donald Duck - title: Good Scouts - website: www.bigernie.jump.to - -? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi -: series: Neverwhere - episodeNumber: 5 - title: Down Street - website: tvu.org.ru - -? Series/South Park/Season 4/South.Park.4x07.Cherokee.Hair.Tampons.DVDRip.[tvu.org.ru].avi -: series: South Park - season: 4 - episodeNumber: 7 - title: Cherokee Hair Tampons - format: DVD - website: tvu.org.ru - -? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi -: series: Kaamelott - episodeNumber: 23 - title: Le Forfait - -? Series/Duckman/Duckman - 110 (10) - 20021218 - Cellar Beware.avi -: series: Duckman - season: 1 - episodeNumber: 10 - date: 2002-12-18 - title: Cellar Beware - -? Series/Ren & Stimpy/Ren And Stimpy - Onward & Upward-Adult Party Cartoon.avi -: series: Ren And Stimpy - title: Onward & Upward-Adult Party Cartoon - -? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi -: series: Breaking Bad - episodeFormat: Minisode - episodeNumber: 1 - title: Good Cop Bad Cop - format: WEBRip - videoCodec: XviD - -? Series/My Name Is Earl/My.Name.Is.Earl.S01Extras.-.Bad.Karma.DVDRip.XviD.avi -: series: My Name Is Earl - season: 1 - title: Bad Karma - format: DVD - episodeDetails: Extras - videoCodec: XviD - -? series/Freaks And Geeks/Season 1/Episode 4 - Kim Kelly Is My Friend-eng(1).srt -: series: Freaks And Geeks - season: 1 - episodeNumber: 4 - title: Kim Kelly Is My Friend - language: English - -? /mnt/series/The Big Bang Theory/S01/The.Big.Bang.Theory.S01E01.mkv -: series: The Big Bang Theory - season: 1 - episodeNumber: 1 - -? /media/Parks_and_Recreation-s03-e01.mkv -: series: Parks and Recreation - season: 3 - episodeNumber: 1 - -? /media/Parks_and_Recreation-s03-e02-Flu_Season.mkv -: series: Parks and Recreation - season: 3 - title: Flu Season - episodeNumber: 2 - -? /media/Parks_and_Recreation-s03-x01.mkv -: series: Parks and Recreation - season: 3 - bonusNumber: 1 - -? /media/Parks_and_Recreation-s03-x02-Gag_Reel.mkv -: series: Parks and Recreation - season: 3 - bonusNumber: 2 - bonusTitle: Gag Reel - -? /media/Band_of_Brothers-e01-Currahee.mkv -: series: Band of Brothers - episodeNumber: 1 - title: Currahee - -? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv -: series: Band of Brothers - bonusNumber: 2 - bonusTitle: We Stand Alone Together - -? /TV Shows/Mad.M-5x9.mkv -: series: Mad M - season: 5 - episodeNumber: 9 - -? /TV Shows/new.girl.117.hdtv-lol.mp4 -: series: New Girl - season: 1 - episodeNumber: 17 - format: HDTV - releaseGroup: LOL - -? Kaamelott - 5x44x45x46x47x48x49x50.avi -: series: Kaamelott - season: 5 - episodeNumber: 44 - episodeList: [44, 45, 46, 47, 48, 49, 50] - -? Example S01E01-02.avi -: series: Example - season: 1 - episodeNumber: 1 - episodeList: [1, 2] - -? Example S01E01E02.avi -: series: Example - season: 1 - episodeNumber: 1 - episodeList: [1, 2] - -? Series/Baccano!/Baccano!_-_T1_-_Trailer_-_[Ayu](dae8173e).mkv -: series: Baccano! - other: Trailer - releaseGroup: Ayu - title: T1 - crc32: dae8173e - -? Series/Doctor Who (2005)/Season 06/Doctor Who (2005) - S06E01 - The Impossible Astronaut (1).avi -: series: Doctor Who - year: 2005 - season: 6 - episodeNumber: 1 - title: The Impossible Astronaut - -? Parks and Recreation - [04x12] - Ad Campaign.avi -: series: Parks and Recreation - season: 4 - episodeNumber: 12 - title: Ad Campaign - -? The Sopranos - [05x07] - In Camelot.mp4 -: series: The Sopranos - season: 5 - episodeNumber: 7 - title: In Camelot - -? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi -: series: The Office (US) - country: US - season: 1 - episodeNumber: 3 - title: Health Care - format: HDTV - videoCodec: XviD - releaseGroup: LOL - -? /Volumes/data-1/Series/Futurama/Season 3/Futurama_-_S03_DVD_Bonus_-_Deleted_Scenes_Part_3.ogm -: series: Futurama - season: 3 - part: 3 - other: Bonus - title: Deleted Scenes - format: DVD - -? Ben.and.Kate.S01E02.720p.HDTV.X264-DIMENSION.mkv -: series: Ben and Kate - season: 1 - episodeNumber: 2 - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: DIMENSION - -? /volume1/TV Series/Drawn Together/Season 1/Drawn Together 1x04 Requiem for a Reality Show.avi -: series: Drawn Together - season: 1 - episodeNumber: 4 - title: Requiem for a Reality Show - -? Sons.of.Anarchy.S05E06.720p.WEB.DL.DD5.1.H.264-CtrlHD.mkv -: series: Sons of Anarchy - season: 5 - episodeNumber: 6 - screenSize: 720p - format: WEB-DL - audioChannels: "5.1" - audioCodec: DolbyDigital - videoCodec: h264 - releaseGroup: CtrlHD - -? /media/bdc64bfe-e36f-4af8-b550-e6fd2dfaa507/TV_Shows/Doctor Who (2005)/Saison 6/Doctor Who (2005) - S06E13 - The Wedding of River Song.mkv -: series: Doctor Who - season: 6 - episodeNumber: 13 - year: 2005 - title: The Wedding of River Song - idNumber: bdc64bfe-e36f-4af8-b550-e6fd2dfaa507 - -? /mnt/videos/tvshows/Doctor Who/Season 06/E13 - The Wedding of River Song.mkv -: series: Doctor Who - season: 6 - episodeNumber: 13 - title: The Wedding of River Song - -? The.Simpsons.S24E03.Adventures.in.Baby-Getting.720p.WEB-DL.DD5.1.H.264-CtrlHD.mkv -: series: The Simpsons - season: 24 - episodeNumber: 3 - title: Adventures in Baby-Getting - screenSize: 720p - format: WEB-DL - audioChannels: "5.1" - audioCodec: DolbyDigital - videoCodec: h264 - releaseGroup: CtrlHD - -? /home/disaster/Videos/TV/Merlin/merlin_2008.5x02.arthurs_bane_part_two.repack.720p_hdtv_x264-fov.mkv -: series: Merlin - season: 5 - episodeNumber: 2 - part: 2 - title: Arthurs bane - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: Fov - year: 2008 - other: Proper - -? "Da Vinci's Demons - 1x04 - The Magician.mkv" -: series: "Da Vinci's Demons" - season: 1 - episodeNumber: 4 - title: The Magician - -? CSI.S013E18.Sheltered.720p.WEB-DL.DD5.1.H.264.mkv -: series: CSI - season: 13 - episodeNumber: 18 - title: Sheltered - screenSize: 720p - format: WEB-DL - audioChannels: "5.1" - audioCodec: DolbyDigital - videoCodec: h264 - -? Game of Thrones S03E06 1080i HDTV DD5.1 MPEG2-TrollHD.ts -: series: Game of Thrones - season: 3 - episodeNumber: 6 - screenSize: 1080i - format: HDTV - audioChannels: "5.1" - audioCodec: DolbyDigital - videoCodec: MPEG2 - releaseGroup: TrollHD - -? gossip.girl.s01e18.hdtv.xvid-2hd.eng.srt -: series: gossip girl - season: 1 - episodeNumber: 18 - format: HDTV - videoCodec: XviD - releaseGroup: 2HD - subtitleLanguage: english - -? Wheels.S03E01E02.720p.HDTV.x264-IMMERSE.mkv -: series: Wheels - season: 3 - episodeNumber: 1 - episodeList: [1, 2] - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: IMMERSE - -? Wheels.S03E01-02.720p.HDTV.x264-IMMERSE.mkv -: series: Wheels - season: 3 - episodeNumber: 1 - episodeList: [1, 2] - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: IMMERSE - -? Wheels.S03E01-E02.720p.HDTV.x264-IMMERSE.mkv -: series: Wheels - season: 3 - episodeNumber: 1 - episodeList: [1, 2] - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: IMMERSE - -? Wheels.S03E01-03.720p.HDTV.x264-IMMERSE.mkv -: series: Wheels - season: 3 - episodeNumber: 1 - episodeList: [1, 2, 3] - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: IMMERSE - -? Marvels.Agents.of.S.H.I.E.L.D.S01E06.720p.HDTV.X264-DIMENSION.mkv -: series: Marvels Agents of S.H.I.E.L.D. - season: 1 - episodeNumber: 6 - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: DIMENSION - -? Marvels.Agents.of.S.H.I.E.L.D..S01E06.720p.HDTV.X264-DIMENSION.mkv -: series: Marvels Agents of S.H.I.E.L.D. - season: 1 - episodeNumber: 6 - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: DIMENSION - -? Series/Friday Night Lights/Season 1/Friday Night Lights S01E19 - Ch-Ch-Ch-Ch-Changes.avi -: series: Friday Night Lights - season: 1 - episodeNumber: 19 - title: Ch-Ch-Ch-Ch-Changes - -? Dexter Saison VII FRENCH.BDRip.XviD-MiND.nfo -: series: Dexter - season: 7 - videoCodec: XviD - language: French - format: BluRay - releaseGroup: MiND - -? Dexter Saison sept FRENCH.BDRip.XviD-MiND.nfo -: series: Dexter - season: 7 - videoCodec: XviD - language: French - format: BluRay - releaseGroup: MiND - -? "Pokémon S16 - E29 - 1280*720 HDTV VF.mkv" -: series: Pokémon - format: HDTV - language: French - season: 16 - episodeNumber: 29 - screenSize: 720p - -? One.Piece.E576.VOSTFR.720p.HDTV.x264-MARINE-FORD.mkv -: episodeNumber: 576 - videoCodec: h264 - format: HDTV - series: One Piece - releaseGroup: MARINE-FORD - subtitleLanguage: French - screenSize: 720p - -? Dexter.S08E12.FINAL.MULTi.1080p.BluRay.x264-MiND.mkv -: videoCodec: h264 - episodeNumber: 12 - season: 8 - format: BluRay - series: Dexter - other: final - language: Multiple languages - releaseGroup: MiND - screenSize: 1080p - -? One Piece - E623 VOSTFR HD [www.manga-ddl-free.com].mkv -: website: www.manga-ddl-free.com - episodeNumber: 623 - subtitleLanguage: French - series: One Piece - other: HD - -? Falling Skies Saison 1.HDLight.720p.x264.VFF.mkv -: language: French - screenSize: 720p - season: 1 - series: Falling Skies - videoCodec: h264 - other: HDLight - -? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BP.mkv -: episodeNumber: 9 - videoCodec: h264 - format: WEB-DL - series: Sleepy Hollow - audioChannels: "5.1" - screenSize: 720p - season: 1 - videoProfile: BP - audioCodec: DolbyDigital - -? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BS.mkv -: episodeNumber: 9 - videoCodec: h264 - format: WEB-DL - series: Sleepy Hollow - audioChannels: "5.1" - screenSize: 720p - season: 1 - releaseGroup: BS - audioCodec: DolbyDigital - -? Battlestar.Galactica.S00.Pilot.FRENCH.DVDRip.XviD-NOTAG.avi -: series: Battlestar Galactica - season: 0 - title: Pilot - episodeDetails: Pilot - language: French - format: DVD - videoCodec: XviD - releaseGroup: NOTAG - -? The Big Bang Theory S00E00 Unaired Pilot VOSTFR TVRip XviD-VioCs -: options: -n - series: The Big Bang Theory - season: 0 - episodeNumber: 0 - subtitleLanguage: French - format: TV - videoCodec: XviD - releaseGroup: VioCs - episodeDetails: [Unaired, Pilot] - title: Unaired Pilot - -? The Big Bang Theory S01E00 PROPER Unaired Pilot TVRip XviD-GIGGITY -: options: -n - series: The Big Bang Theory - season: 1 - episodeNumber: 0 - format: TV - videoCodec: XviD - releaseGroup: GIGGITY - other: proper - episodeDetails: [Unaired, Pilot] - title: Unaired Pilot - -? Pawn.Stars.S2014E18.720p.HDTV.x264-KILLERS -: options: -n - series: Pawn Stars - season: 2014 - year: 2014 - episodeNumber: 18 - screenSize: 720p - format: HDTV - videoCodec: h264 - releaseGroup: KILLERS - -? 2.Broke.Girls.S03E10.480p.HDTV.x264-mSD.mkv -: series: 2 Broke Girls - season: 3 - episodeNumber: 10 - screenSize: 480p - format: HDTV - videoCodec: h264 - releaseGroup: mSD - -? House.of.Cards.2013.S02E03.1080p.NF.WEBRip.DD5.1.x264-NTb.mkv -: series: House of Cards - year: 2013 - season: 2 - episodeNumber: 3 - screenSize: 1080p - other: Netflix - format: Webrip - audioChannels: "5.1" - audioCodec: DolbyDigital - videoCodec: h264 - releaseGroup: NTb - -? the.100.109.hdtv-lol.mp4 -: series: the 100 - season: 1 - episodeNumber: 9 - format: HDTV - releaseGroup: lol - -? 03-Criminal.Minds.5x03.Reckoner.ENG.-.sub.FR.HDTV.XviD-STi.[tvu.org.ru].avi -: series: Criminal Minds - language: English - subtitleLanguage: French - season: 5 - episodeNumber: 3 - videoCodec: XviD - format: HDTV - website: tvu.org.ru - releaseGroup: STi - title: Reckoner - -? 03-Criminal.Minds.avi -: series: Criminal Minds - episodeNumber: 3 - -? '[Evil-Saizen]_Laughing_Salesman_14_[DVD][1C98686A].mkv' -: crc32: 1C98686A - episodeNumber: 14 - format: DVD - releaseGroup: Evil-Saizen - series: Laughing Salesman - -? '[Kaylith] Zankyou no Terror - 04 [480p][B4D4514E].mp4' -: crc32: B4D4514E - episodeNumber: 4 - releaseGroup: Kaylith - screenSize: 480p - series: Zankyou no Terror - -? '[PuyaSubs!] Seirei Tsukai no Blade Dance - 05 [720p][32DD560E].mkv' -: crc32: 32DD560E - episodeNumber: 5 - releaseGroup: PuyaSubs! - screenSize: 720p - series: Seirei Tsukai no Blade Dance - -? '[Doremi].Happiness.Charge.Precure.27.[1280x720].[DC91581A].mkv' -: crc32: DC91581A - episodeNumber: 27 - releaseGroup: Doremi - screenSize: 720p - series: Happiness Charge Precure - -? "[Daisei] Free!:Iwatobi Swim Club - 01 ~ (BD 720p 10-bit AAC) [99E8E009].mkv" -: audioCodec: AAC - crc32: 99E8E009 - episodeNumber: 1 - format: BluRay - releaseGroup: Daisei - screenSize: 720p - series: Free!:Iwatobi Swim Club - videoProfile: 10bit - -? '[Tsundere] Boku wa Tomodachi ga Sukunai - 03 [BDRip h264 1920x1080 10bit FLAC][AF0C22CC].mkv' -: audioCodec: Flac - crc32: AF0C22CC - episodeNumber: 3 - format: BluRay - releaseGroup: Tsundere - screenSize: 1080p - series: Boku wa Tomodachi ga Sukunai - videoCodec: h264 - videoProfile: 10bit - -? '[t.3.3.d]_Mikakunin_de_Shinkoukei_-_12_[720p][5DDC1352].mkv' -: crc32: 5DDC1352 - episodeNumber: 12 - screenSize: 720p - series: Mikakunin de Shinkoukei - releaseGroup: t.3.3.d - -? '[Anime-Koi] Sabagebu! - 06 [h264-720p][ABB3728A].mkv' -: crc32: ABB3728A - episodeNumber: 6 - releaseGroup: Anime-Koi - screenSize: 720p - series: Sabagebu! - videoCodec: h264 - -? '[aprm-Diogo4D] [BD][1080p] Nagi no Asukara 08 [4D102B7C].mkv' -: crc32: 4D102B7C - episodeNumber: 8 - format: BluRay - releaseGroup: aprm-Diogo4D - screenSize: 1080p - series: Nagi no Asukara - -? '[Akindo-SSK] Zankyou no Terror - 05 [720P][Sub_ITA][F5CCE87C].mkv' -: crc32: F5CCE87C - episodeNumber: 5 - releaseGroup: Akindo-SSK - screenSize: 720p - series: Zankyou no Terror - subtitleLanguage: it - -? Naruto Shippuden Episode 366 VOSTFR.avi -: episodeNumber: 366 - series: Naruto Shippuden - subtitleLanguage: fr - -? Naruto Shippuden Episode 366v2 VOSTFR.avi -: episodeNumber: 366 - version: 2 - series: Naruto Shippuden - subtitleLanguage: fr - -? '[HorribleSubs] Ao Haru Ride - 06 [480p].mkv' -: episodeNumber: 6 - releaseGroup: HorribleSubs - screenSize: 480p - series: Ao Haru Ride - -? '[DeadFish] Tari Tari - 01 [BD][720p][AAC].mp4' -: audioCodec: AAC - episodeNumber: 1 - format: BluRay - releaseGroup: DeadFish - screenSize: 720p - series: Tari Tari - -? '[NoobSubs] Sword Art Online II 06 (720p 8bit AAC).mp4' -: audioCodec: AAC - episodeNumber: 6 - releaseGroup: NoobSubs - screenSize: 720p - series: Sword Art Online II - videoProfile: 8bit - -? '[DeadFish] 01 - Tari Tari [BD][720p][AAC].mp4' -: audioCodec: AAC - episodeNumber: 1 - format: BluRay - releaseGroup: DeadFish - screenSize: 720p - series: Tari Tari - -? '[NoobSubs] 06 Sword Art Online II (720p 8bit AAC).mp4' -: audioCodec: AAC - episodeNumber: 6 - releaseGroup: NoobSubs - screenSize: 720p - series: Sword Art Online II - videoProfile: 8bit - -? '[DeadFish] 12 - Tari Tari [BD][720p][AAC].mp4' -: audioCodec: AAC - episodeNumber: 12 - format: BluRay - releaseGroup: DeadFish - screenSize: 720p - series: Tari Tari - -? Something.Season.2.1of4.Ep.Title.HDTV.torrent -: episodeCount: 4 - episodeNumber: 1 - format: HDTV - season: 2 - series: Something - title: Title - extension: torrent - -? Something.Season.2of5.3of9.Ep.Title.HDTV.torrent -: episodeCount: 9 - episodeNumber: 3 - format: HDTV - season: 2 - seasonCount: 5 - series: Something - title: Title - extension: torrent - -? Something.Other.Season.3of5.Complete.HDTV.torrent -: format: HDTV - other: Complete - season: 3 - seasonCount: 5 - series: Something Other - extension: torrent - -? Something.Other.Season.1-3.avi -: season: 1 - seasonList: - - 1 - - 2 - - 3 - series: Something Other - -? Something.Other.Season.1&3.avi -: season: 1 - seasonList: - - 1 - - 3 - series: Something Other - -? Something.Other.Season.1&3-1to12ep.avi -: season: 1 - seasonList: - - 1 - - 3 - series: Something Other - -? Something.Other.saison 1 2 & 4 a 7.avi -: season: 1 - seasonList: - - 1 - - 2 - - 4 - - 5 - - 6 - - 7 - series: Something Other - -? W2Test.123.HDTV.XViD-FlexGet -: options: -n - episodeNumber: 23 - season: 1 - format: HDTV - releaseGroup: FlexGet - series: W2Test - videoCodec: XviD - -? W2Test.123.HDTV.XViD-FlexGet -: options: -n --episode-prefer-number - episodeNumber: 123 - format: HDTV - releaseGroup: FlexGet - series: W2Test - videoCodec: XviD - -? FooBar.0307.PDTV-FlexGet -: options: -n --episode-prefer-number - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - season: 3 - series: FooBar - -? FooBar.307.PDTV-FlexGet -: options: -n --episode-prefer-number - episodeNumber: 307 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? FooBar.07.PDTV-FlexGet -: options: -n --episode-prefer-number - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? FooBar.7.PDTV-FlexGet -: options: -n -t episode --episode-prefer-number - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? FooBar.0307.PDTV-FlexGet -: options: -n - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - season: 3 - series: FooBar - -? FooBar.307.PDTV-FlexGet -: options: -n - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - season: 3 - series: FooBar - -? FooBar.07.PDTV-FlexGet -: options: -n - episodeNumber: 7 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? FooBar.07v4.PDTV-FlexGet -: options: -n - episodeNumber: 7 - version: 4 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? FooBar.7.PDTV-FlexGet -: options: -n -t episode - format: DVB - releaseGroup: FlexGet - series: FooBar 7 - -? FooBar.7v3.PDTV-FlexGet -: options: -n -t episode - episodeNumber: 7 - version: 3 - format: DVB - releaseGroup: FlexGet - series: FooBar - -? Test.S02E01.hdtv.real.proper -: options: -n - episodeNumber: 1 - format: HDTV - other: Proper - properCount: 2 - season: 2 - series: Test - -? Real.Test.S02E01.hdtv.proper -: options: -n - episodeNumber: 1 - format: HDTV - other: Proper - properCount: 1 - season: 2 - series: Real Test - -? Test.Real.S02E01.hdtv.proper -: options: -n - episodeNumber: 1 - format: HDTV - other: Proper - properCount: 1 - season: 2 - series: Test Real - -? Test.S02E01.hdtv.proper -: options: -n - episodeNumber: 1 - format: HDTV - other: Proper - properCount: 1 - season: 2 - series: Test - -? Test.S02E01.hdtv.real.repack.proper -: options: -n - episodeNumber: 1 - format: HDTV - other: Proper - properCount: 3 - season: 2 - series: Test - -? Date.Show.03-29-2012.HDTV.XViD-FlexGet -: options: -n - date: 2012-03-29 - format: HDTV - releaseGroup: FlexGet - series: Date Show - videoCodec: XviD - -? Something.1x5.Season.Complete-FlexGet -: options: -n - episodeNumber: 5 - other: Complete - season: 1 - series: Something - releaseGroup: FlexGet - -? Something Seasons 1 & 2 - Complete -: options: -n - other: Complete - season: 1 - seasonList: - - 1 - - 2 - series: Something - -? Something Seasons 4 Complete -: options: -n - other: Complete - season: 4 - series: Something - -? Something.1xAll.Season.Complete-FlexGet -: options: -n - other: Complete - season: 1 - series: Something - releaseGroup: FlexGet - -? Something.1xAll-FlexGet -: options: -n - other: Complete - season: 1 - series: Something - releaseGroup: FlexGet - -? FlexGet.US.S2013E14.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP -: options: -n - audioChannels: '5.1' - audioCodec: AAC - country: US - episodeNumber: 14 - format: HDTV - releaseGroup: NOGRP - screenSize: 720p - season: 2013 - series: FlexGet (US) - title: Title Here - videoCodec: h264 - year: 2013 - -? FlexGet.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP -: options: -n - audioChannels: '5.1' - audioCodec: AAC - episodeCount: 21 - episodeNumber: 14 - format: HDTV - releaseGroup: NOGRP - screenSize: 720p - series: FlexGet - title: Title Here - videoCodec: h264 - -? FlexGet.Series.2013.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP -: options: -n - audioChannels: '5.1' - audioCodec: AAC - episodeCount: 21 - episodeNumber: 14 - format: HDTV - releaseGroup: NOGRP - screenSize: 720p - season: 2013 - series: FlexGet - title: Title Here - videoCodec: h264 - year: 2013 - -? Something.S04E05E09 -: options: -n - episodeList: - - 5 - - 6 - - 7 - - 8 - - 9 - episodeNumber: 5 - season: 4 - series: Something - -? FooBar 360 1080i -: options: -n -t episode --episode-prefer-number - episodeNumber: 360 - screenSize: 1080i - series: FooBar - -? FooBar 360 1080i -: options: -n -t episode - episodeNumber: 60 - season: 3 - screenSize: 1080i - series: FooBar - -? FooBar 360 -: options: -n -t episode - screenSize: 360p - series: FooBar - -? BarFood christmas special HDTV -: options: -n -t episode --expected-series BarFood - format: HDTV - series: BarFood - title: christmas special - episodeDetails: Special - -? Something.2008x12.13-FlexGet -: options: -n -t episode - series: Something - date: 2008-12-13 - title: FlexGet - -? '[Ignored] Test 12' -: options: -n - episodeNumber: 12 - releaseGroup: Ignored - series: Test - -? '[FlexGet] Test 12' -: options: -n - episodeNumber: 12 - releaseGroup: FlexGet - series: Test - -? Test.13.HDTV-Ignored -: options: -n - episodeNumber: 13 - format: HDTV - releaseGroup: Ignored - series: Test - -? Test.13.HDTV-Ignored -: options: -n --expected-series test - episodeNumber: 13 - format: HDTV - releaseGroup: Ignored - series: Test - -? Test.13.HDTV-Ignored -: series: Test - episodeNumber: 13 - format: HDTV - releaseGroup: Ignored - -? Test.13.HDTV-Ignored -: options: -n --expected-group "Name;FlexGet" - episodeNumber: 13 - format: HDTV - releaseGroup: Ignored - series: Test - -? Test.13.HDTV-FlexGet -: options: -n - episodeNumber: 13 - format: HDTV - releaseGroup: FlexGet - series: Test - -? Test.14.HDTV-Name -: options: -n - episodeNumber: 14 - format: HDTV - releaseGroup: Name - series: Test - -? Real.Time.With.Bill.Maher.2014.10.31.HDTV.XviD-AFG.avi -: date: 2014-10-31 - format: HDTV - releaseGroup: AFG - series: Real Time With Bill Maher - videoCodec: XviD diff --git a/libs/guessit/test/episodes.yml b/libs/guessit/test/episodes.yml new file mode 100644 index 00000000..adc4755e --- /dev/null +++ b/libs/guessit/test/episodes.yml @@ -0,0 +1,2048 @@ +? __default__ +: type: episode + +? Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.avi +: title: Californication + season: 2 + episode: 5 + episode_title: Vaginatown + format: HDTV + video_codec: XviD + release_group: 0TV + container: avi + +? Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi +: title: Dexter + season: 5 + episode: 2 + episode_title: Hello, Bandit + language: English + subtitle_language: French + format: HDTV + video_codec: XviD + release_group: AlFleNi-TeaM + website: tvu.org.ru + container: avi + +? Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.avi +: title: Treme + season: 1 + episode: 3 + episode_title: Right Place, Wrong Time + format: HDTV + video_codec: XviD + release_group: NoTV + +? Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi +: title: Duckman + season: 1 + episode: 13 + episode_title: Joking The Chicken + +? Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.avi +: title: The Simpsons + season: 12 + episode: 8 + episode_title: A Bas Le Sergent Skinner + language: French + +? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi +: title: Duckman + season: 1 + episode: 1 + episode_title: I, Duckman + date: 2002-11-07 + +? Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.avi +: title: The Simpsons + season: 12 + episode: 8 + episode_title: A Bas Le Sergent Skinner + language: French + +? Series/Futurama/Season 3 (mkv)/[â„¢] Futurama - S03E22 - Le chef de fer à 30% ( 30 Percent Iron Chef ).mkv +: title: Futurama + season: 3 + episode: 22 + episode_title: Le chef de fer à 30% + +? Series/The Office/Season 6/The Office - S06xE01.avi +: title: The Office + season: 6 + episode: 1 + +? series/The Office/Season 4/The Office [401] Fun Run.avi +: title: The Office + season: 4 + episode: 1 + episode_title: Fun Run + +? Series/Mad Men Season 1 Complete/Mad.Men.S01E01.avi +: title: Mad Men + season: 1 + episode: 1 + other: Complete + +? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E02.65.Million.Years.Off.avi +: title: Psych + season: 2 + episode: 2 + episode_title: 65 Million Years Off + language: english + format: DVD + other: Complete + +? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E03.Psy.Vs.Psy.Français.srt +: title: Psych + season: 2 + episode: 3 + episode_title: Psy Vs Psy + format: DVD + language: English + subtitle_language: French + other: Complete + +? Series/Pure Laine/Pure.Laine.1x01.Toutes.Couleurs.Unies.FR.(Québec).DVB-Kceb.[tvu.org.ru].avi +: title: Pure Laine + season: 1 + episode: 1 + episode_title: Toutes Couleurs Unies + format: DVB + release_group: Kceb + language: french + website: tvu.org.ru + +? Series/Pure Laine/2x05 - Pure Laine - Je Me Souviens.avi +: title: Pure Laine + season: 2 + episode: 5 + episode_title: Je Me Souviens + +? Series/Tout sur moi/Tout sur moi - S02E02 - Ménage à trois (14-01-2008) [Rip by Ampli].avi +: title: Tout sur moi + season: 2 + episode: 2 + episode_title: Ménage à trois + date: 2008-01-14 + +? The.Mentalist.2x21.18-5-4.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi +: title: The Mentalist + season: 2 + episode: 21 + episode_title: 18-5-4 + language: english + subtitle_language: french + format: HDTV + video_codec: XviD + release_group: AlFleNi-TeaM + website: tvu.org.ru + +? series/__ Incomplete __/Dr Slump (Catalan)/Dr._Slump_-_003_DVB-Rip_Catalan_by_kelf.avi +: title: Dr Slump + episode: 3 + format: DVB + language: catalan + +# Disabling this test because it just doesn't looks like a serie ... +#? series/Ren and Stimpy - Black_hole_[DivX].avi +#: title: Ren and Stimpy +# episode_title: Black hole +# video_codec: DivX + +# Disabling this test because it just doesn't looks like a serie ... +# ? Series/Walt Disney/Donald.Duck.-.Good.Scouts.[www.bigernie.jump.to].avi +#: title: Donald Duck +# episode_title: Good Scouts +# website: www.bigernie.jump.to + +? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi +: title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Series/South Park/Season 4/South.Park.4x07.Cherokee.Hair.Tampons.DVDRip.[tvu.org.ru].avi +: title: South Park + season: 4 + episode: 7 + episode_title: Cherokee Hair Tampons + format: DVD + website: tvu.org.ru + +? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi +: title: Kaamelott + alternative_title: Livre V + episode: 23 + episode_title: Le Forfait + +? Series/Duckman/Duckman - 110 (10) - 20021218 - Cellar Beware.avi +: title: Duckman + season: 1 + episode: 10 + date: 2002-12-18 + episode_title: Cellar Beware + +# Removing this test because it doesn't look like a series +# ? Series/Ren & Stimpy/Ren And Stimpy - Onward & Upward-Adult Party Cartoon.avi +# : title: Ren And Stimpy +# episode_title: Onward & Upward-Adult Party Cartoon + +? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi +: title: Breaking Bad + episode_format: Minisode + episode: 1 + episode_title: Good Cop Bad Cop + format: WEBRip + video_codec: XviD + +? Series/My Name Is Earl/My.Name.Is.Earl.S01Extras.-.Bad.Karma.DVDRip.XviD.avi +: title: My Name Is Earl + season: 1 + episode_title: Extras - Bad Karma + format: DVD + episode_details: Extras + video_codec: XviD + +? series/Freaks And Geeks/Season 1/Episode 4 - Kim Kelly Is My Friend-eng(1).srt +: title: Freaks And Geeks + season: 1 + episode: 4 + episode_title: Kim Kelly Is My Friend + subtitle_language: English # This is really a subtitle_language, despite guessit 1.x assert for language. + +? /mnt/series/The Big Bang Theory/S01/The.Big.Bang.Theory.S01E01.mkv +: title: The Big Bang Theory + season: 1 + episode: 1 + +? /media/Parks_and_Recreation-s03-e01.mkv +: title: Parks and Recreation + season: 3 + episode: 1 + +? /media/Parks_and_Recreation-s03-e02-Flu_Season.mkv +: title: Parks and Recreation + season: 3 + episode_title: Flu Season + episode: 2 + +? /media/Parks_and_Recreation-s03-x01.mkv +: title: Parks and Recreation + season: 3 + episode: 1 + +? /media/Parks_and_Recreation-s03-x02-Gag_Reel.mkv +: title: Parks and Recreation + season: 3 + episode: 2 + episode_title: Gag Reel + +? /media/Band_of_Brothers-e01-Currahee.mkv +: title: Band of Brothers + episode: 1 + episode_title: Currahee + +? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv +: title: Band of Brothers + bonus: 2 + bonus_title: We Stand Alone Together + +? /TV Shows/Mad.M-5x9.mkv +: title: Mad M + season: 5 + episode: 9 + +? /TV Shows/new.girl.117.hdtv-lol.mp4 +: title: new girl + season: 1 + episode: 17 + format: HDTV + release_group: lol + +? Kaamelott - 5x44x45x46x47x48x49x50.avi +: title: Kaamelott + season: 5 + episode: [44, 45, 46, 47, 48, 49, 50] + +? Example S01E01-02.avi +? Example S01E01E02.avi +: title: Example + season: 1 + episode: [1, 2] + +? Series/Baccano!/Baccano!_-_T1_-_Trailer_-_[Ayu](dae8173e).mkv +: title: Baccano! + other: Trailer + release_group: Ayu + episode_title: T1 + crc32: dae8173e + +? Series/Doctor Who (2005)/Season 06/Doctor Who (2005) - S06E01 - The Impossible Astronaut (1).avi +: title: Doctor Who + year: 2005 + season: 6 + episode: 1 + episode_title: The Impossible Astronaut + +? The Sopranos - [05x07] - In Camelot.mp4 +: title: The Sopranos + season: 5 + episode: 7 + episode_title: In Camelot + +? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi +: title: The Office + country: US + season: 1 + episode: 3 + episode_title: Health Care + format: HDTV + video_codec: XviD + release_group: LOL + +? /Volumes/data-1/Series/Futurama/Season 3/Futurama_-_S03_DVD_Bonus_-_Deleted_Scenes_Part_3.ogm +: title: Futurama + season: 3 + part: 3 + other: Bonus + episode_title: Deleted Scenes + format: DVD + +? Ben.and.Kate.S01E02.720p.HDTV.X264-DIMENSION.mkv +: title: Ben and Kate + season: 1 + episode: 2 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: DIMENSION + +? /volume1/TV Series/Drawn Together/Season 1/Drawn Together 1x04 Requiem for a Reality Show.avi +: title: Drawn Together + season: 1 + episode: 4 + episode_title: Requiem for a Reality Show + +? Sons.of.Anarchy.S05E06.720p.WEB.DL.DD5.1.H.264-CtrlHD.mkv +: title: Sons of Anarchy + season: 5 + episode: 6 + screen_size: 720p + format: WEB-DL + audio_channels: "5.1" + audio_codec: DolbyDigital + video_codec: h264 + release_group: CtrlHD + +? /media/bdc64bfe-e36f-4af8-b550-e6fd2dfaa507/TV_Shows/Doctor Who (2005)/Saison 6/Doctor Who (2005) - S06E13 - The Wedding of River Song.mkv +: title: Doctor Who + season: 6 + episode: 13 + year: 2005 + episode_title: The Wedding of River Song + uuid: bdc64bfe-e36f-4af8-b550-e6fd2dfaa507 + +? /mnt/videos/tvshows/Doctor Who/Season 06/E13 - The Wedding of River Song.mkv +: title: Doctor Who + season: 6 + episode: 13 + episode_title: The Wedding of River Song + +? The.Simpsons.S24E03.Adventures.in.Baby-Getting.720p.WEB-DL.DD5.1.H.264-CtrlHD.mkv +: title: The Simpsons + season: 24 + episode: 3 + episode_title: Adventures in Baby-Getting + screen_size: 720p + format: WEB-DL + audio_channels: "5.1" + audio_codec: DolbyDigital + video_codec: h264 + release_group: CtrlHD + +? /home/disaster/Videos/TV/Merlin/merlin_2008.5x02.arthurs_bane_part_two.repack.720p_hdtv_x264-fov.mkv +: title: merlin + season: 5 + episode: 2 + part: 2 + episode_title: arthurs bane + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: fov + year: 2008 + other: Proper + proper_count: 1 + +? "Da Vinci's Demons - 1x04 - The Magician.mkv" +: title: "Da Vinci's Demons" + season: 1 + episode: 4 + episode_title: The Magician + +? CSI.S013E18.Sheltered.720p.WEB-DL.DD5.1.H.264.mkv +: title: CSI + season: 13 + episode: 18 + episode_title: Sheltered + screen_size: 720p + format: WEB-DL + audio_channels: "5.1" + audio_codec: DolbyDigital + video_codec: h264 + +? Game of Thrones S03E06 1080i HDTV DD5.1 MPEG2-TrollHD.ts +: title: Game of Thrones + season: 3 + episode: 6 + screen_size: 1080i + format: HDTV + audio_channels: "5.1" + audio_codec: DolbyDigital + video_codec: Mpeg2 + release_group: TrollHD + +? gossip.girl.s01e18.hdtv.xvid-2hd.eng.srt +: title: gossip girl + season: 1 + episode: 18 + format: HDTV + video_codec: XviD + release_group: 2hd + subtitle_language: english + +? Wheels.S03E01E02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: IMMERSE + +? Wheels.S03E01-02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: IMMERSE + +? Wheels.S03E01-E02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: IMMERSE + +? Wheels.S03E01-04.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2, 3, 4] + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: IMMERSE + +? Marvels.Agents.of.S.H.I.E.L.D-S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D + season: 1 + episode: 6 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: DIMENSION + +? Marvels.Agents.of.S.H.I.E.L.D.S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 1 + episode: 6 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: DIMENSION + +? Marvels.Agents.of.S.H.I.E.L.D..S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 1 + episode: 6 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: DIMENSION + +? Series/Friday Night Lights/Season 1/Friday Night Lights S01E19 - Ch-Ch-Ch-Ch-Changes.avi +: title: Friday Night Lights + season: 1 + episode: 19 + episode_title: Ch-Ch-Ch-Ch-Changes + +? Dexter Saison VII FRENCH.BDRip.XviD-MiND.nfo +: title: Dexter + season: 7 + video_codec: XviD + language: French + format: BluRay + release_group: MiND + +? Dexter Saison sept FRENCH.BDRip.XviD-MiND.nfo +: title: Dexter + season: 7 + video_codec: XviD + language: French + format: BluRay + release_group: MiND + +? "Pokémon S16 - E29 - 1280*720 HDTV VF.mkv" +: title: Pokémon + format: HDTV + language: French + season: 16 + episode: 29 + screen_size: 720p + +? One.Piece.E576.VOSTFR.720p.HDTV.x264-MARINE-FORD.mkv +: episode: 576 + video_codec: h264 + format: HDTV + title: One Piece + release_group: MARINE-FORD + subtitle_language: French + screen_size: 720p + +? Dexter.S08E12.FINAL.MULTi.1080p.BluRay.x264-MiND.mkv +: video_codec: h264 + episode: 12 + season: 8 + format: BluRay + title: Dexter + other: FINAL + language: Multiple languages + release_group: MiND + screen_size: 1080p + +? One Piece - E623 VOSTFR HD [www.manga-ddl-free.com].mkv +: website: www.manga-ddl-free.com + episode: 623 + subtitle_language: French + title: One Piece + other: HD + +? Falling Skies Saison 1.HDLight.720p.x264.VFF.mkv +: language: French + screen_size: 720p + season: 1 + title: Falling Skies + video_codec: h264 + other: HDLight + +? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BP.mkv +: episode: 9 + video_codec: h264 + format: WEB-DL + title: Sleepy Hollow + audio_channels: "5.1" + screen_size: 720p + season: 1 + video_profile: BP + audio_codec: DolbyDigital + +? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BS.mkv +: episode: 9 + video_codec: h264 + format: WEB-DL + title: Sleepy Hollow + audio_channels: "5.1" + screen_size: 720p + season: 1 + release_group: BS + audio_codec: DolbyDigital + +? Battlestar.Galactica.S00.Pilot.FRENCH.DVDRip.XviD-NOTAG.avi +: title: Battlestar Galactica + season: 0 + episode_details: Pilot + episode_title: Pilot + language: French + format: DVD + video_codec: XviD + release_group: NOTAG + +? The Big Bang Theory S00E00 Unaired Pilot VOSTFR TVRip XviD-VioCs +: title: The Big Bang Theory + season: 0 + episode: 0 + subtitle_language: French + format: TV + video_codec: XviD + release_group: VioCs + episode_details: [Unaired, Pilot] + +? The Big Bang Theory S01E00 PROPER Unaired Pilot TVRip XviD-GIGGITY +: title: The Big Bang Theory + season: 1 + episode: 0 + format: TV + video_codec: XviD + release_group: GIGGITY + other: Proper + proper_count: 1 + episode_details: [Unaired, Pilot] + +? Pawn.Stars.S2014E18.720p.HDTV.x264-KILLERS +: title: Pawn Stars + season: 2014 + year: 2014 + episode: 18 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: KILLERS + +? 2.Broke.Girls.S03E10.480p.HDTV.x264-mSD.mkv +: title: 2 Broke Girls + season: 3 + episode: 10 + screen_size: 480p + format: HDTV + video_codec: h264 + release_group: mSD + +? House.of.Cards.2013.S02E03.1080p.NF.WEBRip.DD5.1.x264-NTb.mkv +: title: House of Cards + year: 2013 + season: 2 + episode: 3 + screen_size: 1080p + other: Netflix + format: WEBRip + audio_channels: "5.1" + audio_codec: DolbyDigital + video_codec: h264 + release_group: NTb + +? the.100.109.hdtv-lol.mp4 +: title: the 100 + season: 1 + episode: 9 + format: HDTV + release_group: lol + +? Criminal.Minds.5x03.Reckoner.ENG.-.sub.FR.HDTV.XviD-STi.[tvu.org.ru].avi +: title: Criminal Minds + language: English + subtitle_language: French + season: 5 + episode: 3 + video_codec: XviD + format: HDTV + website: tvu.org.ru + release_group: STi + episode_title: Reckoner + +? 03-Criminal.Minds.avi +: title: Criminal Minds + episode: 3 + +? '[Evil-Saizen]_Laughing_Salesman_14_[DVD][1C98686A].mkv' +: crc32: 1C98686A + episode: 14 + format: DVD + release_group: Evil-Saizen + title: Laughing Salesman + +? '[Kaylith] Zankyou no Terror - 04 [480p][B4D4514E].mp4' +: crc32: B4D4514E + episode: 4 + release_group: Kaylith + screen_size: 480p + title: Zankyou no Terror + +? '[PuyaSubs!] Seirei Tsukai no Blade Dance - 05 [720p][32DD560E].mkv' +: crc32: 32DD560E + episode: 5 + release_group: PuyaSubs! + screen_size: 720p + title: Seirei Tsukai no Blade Dance + +? '[Doremi].Happiness.Charge.Precure.27.[1280x720].[DC91581A].mkv' +: crc32: DC91581A + episode: 27 + release_group: Doremi + screen_size: 720p + title: Happiness Charge Precure + +? "[Daisei] Free!:Iwatobi Swim Club - 01 ~ (BD 720p 10-bit AAC) [99E8E009].mkv" +: audio_codec: AAC + crc32: 99E8E009 + episode: 1 + format: BluRay + release_group: Daisei + screen_size: 720p + title: Free!:Iwatobi Swim Club + video_profile: 10bit + +? '[Tsundere] Boku wa Tomodachi ga Sukunai - 03 [BDRip h264 1920x1080 10bit FLAC][AF0C22CC].mkv' +: audio_codec: FLAC + crc32: AF0C22CC + episode: 3 + format: BluRay + release_group: Tsundere + screen_size: 1080p + title: Boku wa Tomodachi ga Sukunai + video_codec: h264 + video_profile: 10bit + +? '[t.3.3.d]_Mikakunin_de_Shinkoukei_-_12_[720p][5DDC1352].mkv' +: crc32: 5DDC1352 + episode: 12 + screen_size: 720p + title: Mikakunin de Shinkoukei + release_group: t.3.3.d + +? '[Anime-Koi] Sabagebu! - 06 [h264-720p][ABB3728A].mkv' +: crc32: ABB3728A + episode: 6 + release_group: Anime-Koi + screen_size: 720p + title: Sabagebu! + video_codec: h264 + +? '[aprm-Diogo4D] [BD][1080p] Nagi no Asukara 08 [4D102B7C].mkv' +: crc32: 4D102B7C + episode: 8 + format: BluRay + release_group: aprm-Diogo4D + screen_size: 1080p + title: Nagi no Asukara + +? '[Akindo-SSK] Zankyou no Terror - 05 [720P][Sub_ITA][F5CCE87C].mkv' +: crc32: F5CCE87C + episode: 5 + release_group: Akindo-SSK + screen_size: 720p + title: Zankyou no Terror + subtitle_language: it + +? Naruto Shippuden Episode 366 VOSTFR.avi +: episode: 366 + title: Naruto Shippuden + subtitle_language: fr + +? Naruto Shippuden Episode 366v2 VOSTFR.avi +: episode: 366 + version: 2 + title: Naruto Shippuden + subtitle_language: fr + +? '[HorribleSubs] Ao Haru Ride - 06 [480p].mkv' +: episode: 6 + release_group: HorribleSubs + screen_size: 480p + title: Ao Haru Ride + +? '[DeadFish] Tari Tari - 01 [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 1 + format: BluRay + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? '[NoobSubs] Sword Art Online II 06 (720p 8bit AAC).mp4' +: audio_codec: AAC + episode: 6 + release_group: NoobSubs + screen_size: 720p + title: Sword Art Online II + video_profile: 8bit + +? '[DeadFish] 01 - Tari Tari [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 1 + format: BluRay + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? '[NoobSubs] 06 Sword Art Online II (720p 8bit AAC).mp4' +: audio_codec: AAC + episode: 6 + release_group: NoobSubs + screen_size: 720p + title: Sword Art Online II + video_profile: 8bit + +? '[DeadFish] 12 - Tari Tari [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 12 + format: BluRay + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? Something.Season.2.1of4.Ep.Title.HDTV.torrent +: episode_count: 4 + episode: 1 + format: HDTV + season: 2 + title: Something + episode_title: Title + container: torrent + +? Something.Season.2of5.3of9.Ep.Title.HDTV.torrent +: episode_count: 9 + episode: 3 + format: HDTV + season: 2 + season_count: 5 + title: Something + episode_title: Title + container: torrent + +? Something.Other.Season.3of5.Complete.HDTV.torrent +: format: HDTV + other: Complete + season: 3 + season_count: 5 + title: Something Other + container: torrent + +? Something.Other.Season.1-3.avi +: season: [1, 2, 3] + title: Something Other + +? Something.Other.Season.1&3.avi +: season: [1, 3] + title: Something Other + +? Something.Other.Season.1&3-1to12ep.avi +: season: [1, 3] + title: Something Other + +? W2Test.123.HDTV.XViD-FlexGet +: episode: 23 + season: 1 + format: HDTV + release_group: FlexGet + title: W2Test + video_codec: XviD + +? W2Test.123.HDTV.XViD-FlexGet +: options: --episode-prefer-number + episode: 123 + format: HDTV + release_group: FlexGet + title: W2Test + video_codec: XviD + +? FooBar.0307.PDTV-FlexGet +: episode: 7 + format: DVB + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.0307.PDTV-FlexGet +? FooBar.307.PDTV-FlexGet +: options: --episode-prefer-number + episode: 307 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.07.PDTV-FlexGet +: options: --episode-prefer-number + episode: 7 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.7.PDTV-FlexGet +: options: --episode-prefer-number + episode: 7 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.0307.PDTV-FlexGet +: episode: 7 + format: DVB + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.307.PDTV-FlexGet +: episode: 7 + format: DVB + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.07.PDTV-FlexGet +: episode: 7 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.07v4.PDTV-FlexGet +: episode: 7 + version: 4 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.7.PDTV-FlexGet +: format: DVB + release_group: FlexGet + title: FooBar 7 + type: movie + +? FooBar.7.PDTV-FlexGet +: options: -t episode + episode: 7 + format: DVB + release_group: FlexGet + title: FooBar + +? FooBar.7v3.PDTV-FlexGet +: options: -t episode + episode: 7 + version: 3 + format: DVB + release_group: FlexGet + title: FooBar + +? Test.S02E01.hdtv.real.proper +: episode: 1 + format: HDTV + other: Proper + proper_count: 2 + season: 2 + title: Test + +? Real.Test.S02E01.hdtv.proper +: episode: 1 + format: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Real Test + +? Test.Real.S02E01.hdtv.proper +: episode: 1 + format: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Test Real + +? Test.S02E01.hdtv.proper +: episode: 1 + format: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Test + +? Test.S02E01.hdtv.real.repack.proper +: episode: 1 + format: HDTV + other: Proper + proper_count: 3 + season: 2 + title: Test + +? Date.Show.03-29-2012.HDTV.XViD-FlexGet +: date: 2012-03-29 + format: HDTV + release_group: FlexGet + title: Date Show + video_codec: XviD + +? Something.1x5.Season.Complete-FlexGet +: episode: 5 + other: Complete + season: 1 + title: Something + release_group: FlexGet + +? Something Seasons 1 & 2 - Complete +: other: Complete + season: + - 1 + - 2 + title: Something + +? Something Seasons 4 Complete +: other: Complete + season: 4 + title: Something + +? Something.1xAll.Season.Complete-FlexGet +: other: Complete + season: 1 + title: Something + release_group: FlexGet + +? Something.1xAll-FlexGet +: other: Complete + season: 1 + title: Something + release_group: FlexGet + +? FlexGet.US.S2013E14.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + country: US + episode: 14 + format: HDTV + release_group: NOGRP + screen_size: 720p + season: 2013 + title: FlexGet + episode_title: Title Here + video_codec: h264 + year: 2013 + +? FlexGet.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + episode_count: 21 + episode: 14 + format: HDTV + release_group: NOGRP + screen_size: 720p + title: FlexGet + episode_title: Title Here + video_codec: h264 + +? FlexGet.Series.2013.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + episode_count: 21 + episode: 14 + format: HDTV + release_group: NOGRP + screen_size: 720p + season: 2013 + title: FlexGet + episode_title: Title Here + video_codec: h264 + year: 2013 + +? Something.S04E05E09 +: episode: # 1.x guessit this as a range from 5 to 9. But not sure if it should ... + - 5 + - 9 + season: 4 + title: Something + +? FooBar 360 1080i +: options: --episode-prefer-number + episode: 360 + screen_size: 1080i + title: FooBar + +? FooBar 360 1080i +: episode: 60 + season: 3 + screen_size: 1080i + title: FooBar + +? FooBar 360 +: screen_size: 360p + title: FooBar + +? BarFood christmas special HDTV +: options: --expected-title BarFood + format: HDTV + title: BarFood + episode_title: christmas special + episode_details: Special + +? Something.2008x12.13-FlexGet +: title: Something + date: 2008-12-13 + episode_title: FlexGet + +? '[Ignored] Test 12' +: episode: 12 + release_group: Ignored + title: Test + +? '[FlexGet] Test 12' +: episode: 12 + release_group: FlexGet + title: Test + +? Test.13.HDTV-Ignored +: episode: 13 + format: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-Ignored +: options: --expected-series test + episode: 13 + format: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-Ignored +: title: Test + episode: 13 + format: HDTV + release_group: Ignored + +? Test.13.HDTV-Ignored +: episode: 13 + format: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-FlexGet +: episode: 13 + format: HDTV + release_group: FlexGet + title: Test + +? Test.14.HDTV-Name +: episode: 14 + format: HDTV + release_group: Name + title: Test + +? Real.Time.With.Bill.Maher.2014.10.31.HDTV.XviD-AFG.avi +: date: 2014-10-31 + format: HDTV + release_group: AFG + title: Real Time With Bill Maher + video_codec: XviD + +? Arrow.S03E21.Al.Sah-Him.1080p.WEB-DL.DD5.1.H.264-BS.mkv +: title: Arrow + season: 3 + episode: 21 + episode_title: Al Sah-Him + screen_size: 1080p + audio_codec: DolbyDigital + audio_channels: "5.1" + video_codec: h264 + release_group: BS + format: WEB-DL + +? How to Make It in America - S02E06 - I'm Sorry, Who's Yosi?.mkv +: title: How to Make It in America + season: 2 + episode: 6 + episode_title: I'm Sorry, Who's Yosi? + +? 24.S05E07.FRENCH.DVDRip.XviD-FiXi0N.avi +: episode: 7 + format: DVD + language: fr + season: 5 + title: '24' + video_codec: XviD + release_group: FiXi0N + +? 12.Monkeys.S01E12.FRENCH.BDRip.x264-VENUE.mkv +: episode: 12 + format: BluRay + language: fr + release_group: VENUE + season: 1 + title: 12 Monkeys + video_codec: h264 + +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.720p.CC.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-01 + format: WEBRip + other: [Extended, CC] + release_group: BTW + screen_size: 720p + title: The Daily Show + episode_title: Kirsten Gillibrand + video_codec: h264 + +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.Interview.720p.CC.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-01 + format: WEBRip + other: CC + release_group: BTW + screen_size: 720p + title: The Daily Show + episode_title: Kirsten Gillibrand Extended Interview + video_codec: h264 + +? The.Daily.Show.2015.07.02.Sarah.Vowell.CC.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-02 + format: WEBRip + other: CC + release_group: BTW + title: The Daily Show + episode_title: Sarah Vowell + video_codec: h264 + +? 90.Day.Fiance.S02E07.I.Have.To.Tell.You.Something.720p.HDTV.x264-W4F +: episode: 7 + format: HDTV + screen_size: 720p + season: 2 + title: 90 Day Fiance + episode_title: I Have To Tell You Something + release_group: W4F + +? Doctor.Who.2005.S04E06.FRENCH.LD.DVDRip.XviD-TRACKS.avi +: episode: 6 + format: DVD + language: fr + release_group: TRACKS + season: 4 + title: Doctor Who + other: LD + video_codec: XviD + year: 2005 + +? Astro.Le.Petit.Robot.S01E01+02.FRENCH.DVDRiP.X264.INT-BOOLZ.mkv +: episode: [1, 2] + format: DVD + language: fr + release_group: INT-BOOLZ + season: 1 + title: Astro Le Petit Robot + video_codec: h264 + +? Annika.Bengtzon.2012.E01.Le.Testament.De.Nobel.FRENCH.DVDRiP.XViD-STVFRV.avi +: episode: 1 + format: DVD + language: fr + release_group: STVFRV + title: Annika Bengtzon + episode_title: Le Testament De Nobel + video_codec: XviD + year: 2012 + +? Dead.Set.02.FRENCH.LD.DVDRip.XviD-EPZ.avi +: episode: 2 + format: DVD + language: fr + other: LD + release_group: EPZ + title: Dead Set + video_codec: XviD + +? Phineas and Ferb S01E00 & S01E01 & S01E02 +: episode: [0, 1, 2] + season: 1 + title: Phineas and Ferb + +? Show.Name.S01E02.S01E03.HDTV.XViD.Etc-Group +: episode: [2, 3] + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - S01E02 - S01E03 - S01E04 - Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.1x02.1x03.HDTV.XViD.Etc-Group +: episode: [2, 3] + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - 1x02 - 1x03 - 1x04 - Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.S01E02.HDTV.XViD.Etc-Group +: episode: 2 + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - S01E02 - My Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show Name - S01.E03 - My Ep Name +: episode: 3 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show.Name.S01E02E03.HDTV.XViD.Etc-Group +: episode: [2, 3] + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - S01E02-03 - My Ep Name +: episode: [2, 3] + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show.Name.S01.E02.E03 +: episode: [2, 3] + season: 1 + title: Show Name + +? Show_Name.1x02.HDTV_XViD_Etc-Group +: episode: 2 + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - 1x02 - My Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show_Name.1x02x03x04.HDTV_XViD_Etc-Group +: episode: [2, 3, 4] + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show Name - 1x02-03-04 - My Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: My Ep Name + +# 1x guess this as episode 100 but 101 as episode 1 season 1. +? Show.Name.100.Event.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + season: 1 + episode: 0 + format: HDTV + release_group: Etc-Group + title: Show Name + episode_title: Event + video_codec: XviD + +? Show.Name.101.Event.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + season: 1 + episode: 1 + format: HDTV + release_group: Etc-Group + title: Show Name + episode_title: Event + video_codec: XviD + +? Show.Name.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + format: HDTV + release_group: Etc-Group + title: Show Name + +? Show Name - 2010-11-23 - Ep Name +: date: 2010-11-23 + title: Show Name + episode_title: Ep Name + +? Show Name Season 1 Episode 2 Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.S01.HDTV.XViD.Etc-Group +: format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? Show.Name.E02-03 +: episode: [2, 3] + title: Show Name + +? Show.Name.E02.2010 +: episode: 2 + year: 2010 + title: Show Name + +? Show.Name.E23.Test +: episode: 23 + title: Show Name + episode_title: Test + +? Show.Name.Part.3.HDTV.XViD.Etc-Group +: part: 3 + title: Show Name + format: HDTV + video_codec: XviD + release_group: Etc-Group + type: movie + # Fallback to movie type because we can't tell it's a series ... + +? Show.Name.Part.1.and.Part.2.Blah-Group +: part: [1, 2] + title: Show Name + type: movie + # Fallback to movie type because we can't tell it's a series ... + +? Show Name - 01 - Ep Name +: episode: 1 + title: Show Name + episode_title: Ep Name + +? 01 - Ep Name +: episode: 1 + title: Ep Name + +? Show.Name.102.HDTV.XViD.Etc-Group +: episode: 2 + format: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: XviD + +? '[HorribleSubs] Maria the Virgin Witch - 01 [720p].mkv' +: episode: 1 + release_group: HorribleSubs + screen_size: 720p + title: Maria the Virgin Witch + +? '[ISLAND]One_Piece_679_[VOSTFR]_[V1]_[8bit]_[720p]_[EB7838FC].mp4' +: options: -E + crc32: EB7838FC + episode: 679 + release_group: ISLAND + screen_size: 720p + title: One Piece + subtitle_language: fr + video_profile: 8bit + version: 1 + +? '[ISLAND]One_Piece_679_[VOSTFR]_[8bit]_[720p]_[EB7838FC].mp4' +: options: -E + crc32: EB7838FC + episode: 679 + release_group: ISLAND + screen_size: 720p + title: One Piece + subtitle_language: fr + video_profile: 8bit + +? '[Kaerizaki-Fansub]_One_Piece_679_[VOSTFR][HD_1280x720].mp4' +: options: -E + episode: 679 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + +? '[Kaerizaki-Fansub]_One_Piece_679_[VOSTFR][FANSUB][HD_1280x720].mp4' +: options: -E + episode: 679 + other: + - Fansub + - HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + +? '[Kaerizaki-Fansub]_One_Piece_681_[VOSTFR][HD_1280x720]_V2.mp4' +: options: -E + episode: 681 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + version: 2 + +? '[Kaerizaki-Fansub] High School DxD New 04 VOSTFR HD (1280x720) V2.mp4' +: options: -E + episode: 4 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: High School DxD New + subtitle_language: fr + version: 2 + +? '[Kaerizaki-Fansub] One Piece 603 VOSTFR PS VITA (960x544) V2.mp4' +: options: -E + episode: 603 + release_group: + - Kaerizaki-Fansub + - PS VITA + screen_size: 960x544 + title: One Piece + subtitle_language: fr + version: 2 + +? '[Group Name] Show Name.13' +: episode: 13 + release_group: Group Name + title: Show Name + +? '[Group Name] Show Name - 13' +: episode: 13 + release_group: Group Name + title: Show Name + +? '[Group Name] Show Name 13' +: episode: 13 + release_group: Group Name + title: Show Name + +# [Group Name] Show Name.13-14 +# [Group Name] Show Name - 13-14 +# Show Name 13-14 + +? '[Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]' +: audio_codec: AAC + crc32: 379759DB + episode: 12 + release_group: Stratos-Subs + screen_size: 720p + title: Infinite Stratos + video_codec: h264 + +# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC) + +? '[SGKK] Bleach 312v1 [720p/MKV]' +: options: -E # guessit 1.x for episode only when version is guessed, but it's doesn't make it consistent. + episode: 312 + release_group: SGKK + screen_size: 720p + title: Bleach + version: 1 + +? '[Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]' +: crc32: EB7838FC + episode: 7 + release_group: Ayako + screen_size: 720p + title: Infinite Stratos + video_codec: h264 + +? '[Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]' +: crc32: '44419534' + episode: 7 + release_group: Ayako + screen_size: 720p + title: Infinite Stratos + video_codec: h264 + version: 2 + +? '[Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]' +: crc32: 8853B21C + episode: 10 + release_group: Ayako-Shikkaku + screen_size: 720p + title: Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne + video_codec: h264 + +# TODO: Add support for absolute episodes +? Bleach - s16e03-04 - 313-314 +? Bleach.s16e03-04.313-314 +? Bleach.s16e03-04.313-314 +? Bleach - s16e03-04 - 313-314 +? Bleach.s16e03-04.313-314 +? Bleach s16e03e04 313-314 +: episode: [3, 4] + season: 16 + title: Bleach + +? Bleach - 313-314 +: options: -E + episode: [313, 314] + title: Bleach + +? '[ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)' +: audio_codec: AAC + episode: [2, 3] + release_group: ShinBunBu-Subs + screen_size: 720p + title: Bleach + video_codec: h264 + +? 003. Show Name - Ep Name.avi +: episode: 3 + title: Show Name + episode_title: Ep Name + +? 003-004. Show Name - Ep Name.avi +: episode: [3, 4] + title: Show Name + episode_title: Ep Name + +? One Piece - 102 +: episode: 2 + season: 1 + title: One Piece + +? "[ACX]_Wolf's_Spirit_001.mkv" +: episode: 1 + release_group: ACX + title: "Wolf's Spirit" + +? Project.Runway.S14E00.and.S14E01.(Eng.Subs).SDTV.x264-[2Maverick].mp4 +: episode: [0, 1] + format: TV + release_group: 2Maverick + season: 14 + title: Project Runway + subtitle_language: en + video_codec: h264 + +? '[Hatsuyuki-Kaitou]_Fairy_Tail_2_-_16-20_[720p][10bit].torrent' +: episode: [16, 17, 18, 19, 20] + release_group: Hatsuyuki-Kaitou + screen_size: 720p + title: Fairy Tail 2 + video_profile: 10bit + +? '[Hatsuyuki-Kaitou]_Fairy_Tail_2_-_16-20_(191-195)_[720p][10bit].torrent' +: options: -E + episode: [16, 17, 18, 19, 20, 191, 192, 193, 194, 195] + release_group: Hatsuyuki-Kaitou + screen_size: 720p + title: Fairy Tail 2 + +? "Looney Tunes 1940x01 Porky's Last Stand.mkv" +: episode: 1 + season: 1940 + title: Looney Tunes + episode_title: Porky's Last Stand + year: 1940 + +? The.Good.Wife.S06E01.E10.720p.WEB-DL.DD5.1.H.264-CtrlHD/The.Good.Wife.S06E09.Trust.Issues.720p.WEB-DL.DD5.1.H.264-CtrlHD.mkv +: audio_channels: '5.1' + audio_codec: DolbyDigital + episode: 9 + format: WEB-DL + release_group: CtrlHD + screen_size: 720p + season: 6 + title: The Good Wife + episode_title: Trust Issues + video_codec: h264 + +? Fear the Walking Dead - 01x02 - So Close, Yet So Far.REPACK-KILLERS.French.C.updated.Addic7ed.com.mkv +: episode: 2 + language: fr + other: Proper + proper_count: 1 + season: 1 + title: Fear the Walking Dead + episode_title: So Close, Yet So Far + +? Fear the Walking Dead - 01x02 - En Close, Yet En Far.REPACK-KILLERS.French.C.updated.Addic7ed.com.mkv +: episode: 2 + language: fr + other: Proper + proper_count: 1 + season: 1 + title: Fear the Walking Dead + episode_title: En Close, Yet En Far + +? /av/unsorted/The.Daily.Show.2015.07.22.Jake.Gyllenhaal.720p.HDTV.x264-BATV.mkv +: date: 2015-07-22 + format: HDTV + release_group: BATV + screen_size: 720p + title: The Daily Show + episode_title: Jake Gyllenhaal + video_codec: h264 + +? "[7.1.7.8.5] Foo Bar - 11 (H.264) [5235532D].mkv" +: options: -E + episode: 11 + +? my 720p show S01E02 +: options: -T "my 720p show" + title: my 720p show + season: 1 + episode: 2 + +? my 720p show S01E02 720p +: options: -T "my 720p show" + title: my 720p show + season: 1 + episode: 2 + screen_size: 720p + +? -my 720p show S01E02 +: options: -T "re:my \d+p show" + screen_size: 720p + +? Show S01E02 +: options: -T "The Show" + title: Show + season: 1 + episode: 2 + +? Foo's & Bars (2009) S01E01 720p XviD-2HD[AOEU] +: episode: 1 + release_group: 2HD[AOEU] + screen_size: 720p + season: 1 + title: Foo's & Bars + video_codec: XviD + year: 2009 + +? Date.Series.10-11-2008.XViD +: date: 2008-11-10 + title: Date + video_codec: XviD + +? Scrubs/SEASON-06/Scrubs.S06E09.My.Perspective.DVDRip.XviD-WAT/scrubs.s06e09.dvdrip.xvid-wat.avi +: container: avi + episode: 9 + episode_title: My Perspective + format: DVD + mimetype: video/x-msvideo + release_group: WAT + season: 6 + title: Scrubs + video_codec: XviD + +? '[PuyaSubs!] Digimon Adventure tri - 01 [720p][F9967949].mkv' +: container: mkv + crc32: F9967949 + episode: 1 + mimetype: video/x-matroska + release_group: PuyaSubs! + screen_size: 720p + title: Digimon Adventure tri + +? Sherlock.S01.720p.BluRay.x264-AVCHD +: format: BluRay + screen_size: 720p + season: 1 + title: Sherlock + video_codec: h264 + +? Running.Wild.With.Bear.Grylls.S02E07.Michael.B.Jordan.PROPER.HDTV.x264-W4F.avi +: container: avi + episode: 7 + episode_title: Michael B Jordan + format: HDTV + mimetype: video/x-msvideo + other: Proper + proper_count: 1 + release_group: W4F + season: 2 + title: Running Wild With Bear Grylls + video_codec: h264 + +? Homeland.S05E11.Our.Man.in.Damascus.German.Sub.720p.HDTV.x264.iNTERNAL-BaCKToRG +: episode: 11 + episode_title: Our Man in Damascus + format: HDTV + release_group: iNTERNAL-BaCKToRG + screen_size: 720p + season: 5 + subtitle_language: de + title: Homeland + type: episode + video_codec: h264 + +? Breaking.Bad.S01E01.2008.BluRay.VC1.1080P.5.1.WMV-NOVO +: title: Breaking Bad + season: 1 + episode: 1 + year: 2008 + format: BluRay + screen_size: 1080p + audio_channels: '5.1' + container: WMV + release_group: NOVO + type: episode + +? Cosmos.A.Space.Time.Odyssey.S01E02.HDTV.x264.PROPER-LOL +: title: Cosmos A Space Time Odyssey + season: 1 + episode: 2 + format: HDTV + video_codec: h264 + other: Proper + proper_count: 1 + release_group: LOL + type: episode + +? Fear.The.Walking.Dead.S02E01.HDTV.x264.AAC.MP4-k3n +: title: Fear The Walking Dead + season: 2 + episode: 1 + format: HDTV + video_codec: h264 + audio_codec: AAC + container: MP4 + release_group: k3n + type: episode + +? Elementary.S01E01.Pilot.DVDSCR.x264.PREAiR-NoGRP +: title: Elementary + season: 1 + episode: 1 + episode_details: Pilot + episode_title: Pilot + format: DVD + video_codec: h264 + other: [Screener, Preair] + release_group: NoGRP + type: episode + +? Once.Upon.a.Time.S05E19.HDTV.x264.REPACK-LOL[ettv] +: title: Once Upon a Time + season: 5 + episode: 19 + format: HDTV + video_codec: h264 + other: Proper + proper_count: 1 + release_group: LOL[ettv] + type: episode + +? Show.Name.S01E03.WEB-DL.x264.HUN-nIk +: title: Show Name + season: 1 + episode: 3 + format: WEB-DL + video_codec: h264 + language: hu + release_group: nIk + type: episode + +? Game.of.Thrones.S6.Ep5.X265.Dolby.2.0.KTM3.mp4 +: audio_channels: '2.0' + audio_codec: DolbyDigital + container: mp4 + episode: 5 + release_group: KTM3 + season: 6 + title: Game of Thrones + type: episode + video_codec: h265 + +? Fargo.-.Season.1.-.720p.BluRay.-.x264.-.ShAaNiG +: format: BluRay + release_group: ShAaNiG + screen_size: 720p + season: 1 + title: Fargo + type: episode + video_codec: h264 + +? Show.Name.S02E02.Episode.Title.1080p.WEB-DL.x264.5.1Ch.-.Group +: audio_channels: '5.1' + episode: 2 + episode_title: Episode Title + format: WEB-DL + release_group: Group + screen_size: 1080p + season: 2 + title: Show Name + type: episode + video_codec: h264 + +? Breaking.Bad.S01E01.2008.BluRay.VC1.1080P.5.1.WMV-NOVO +: audio_channels: '5.1' + container: WMV + episode: 1 + format: BluRay + release_group: NOVO + screen_size: 1080p + season: 1 + title: Breaking Bad + type: episode + year: 2008 + +? Cosmos.A.Space.Time.Odyssey.S01E02.HDTV.x264.PROPER-LOL +: episode: 2 + format: HDTV + other: Proper + proper_count: 1 + release_group: LOL + season: 1 + title: Cosmos A Space Time Odyssey + type: episode + video_codec: h264 + +? Elementary.S01E01.Pilot.DVDSCR.x264.PREAiR-NoGRP +: episode: 1 + episode_details: Pilot + episode_title: Pilot + format: DVD + other: + - Screener + - Preair + release_group: NoGRP + season: 1 + title: Elementary + type: episode + video_codec: h264 + +? Fear.The.Walking.Dead.S02E01.HDTV.x264.AAC.MP4-k3n.mp4 +: audio_codec: AAC + container: + - MP4 + - mp4 + episode: 1 + format: HDTV + mimetype: video/mp4 + release_group: k3n + season: 2 + title: Fear The Walking Dead + type: episode + video_codec: h264 + +? Game.of.Thrones.S03.1080p.BluRay.DTS-HD.MA.5.1.AVC.REMUX-FraMeSToR +: audio_channels: '5.1' + audio_codec: DTS + audio_profile: HDMA + format: BluRay + other: Remux + release_group: FraMeSToR + screen_size: 1080p + season: 3 + title: Game of Thrones + type: episode + +? Show.Name.S01E02.HDTV.x264.NL-subs-ABC +: episode: 2 + format: HDTV + release_group: ABC + season: 1 + subtitle_language: nl + title: Show Name + type: episode + video_codec: h264 + +? Friends.S01-S10.COMPLETE.720p.BluRay.x264-PtM +: format: BluRay + other: Complete + release_group: PtM + screen_size: 720p + season: # Should it be [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ? + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + title: Friends + type: episode + video_codec: h264 + +? Duck.Dynasty.S02E07.Streik.German.DOKU.DL.WS.DVDRiP.x264-CDP +: episode: 7 + episode_title: Streik German DOKU + format: DVD + language: mul + other: WideScreen + release_group: CDP + season: 2 + title: Duck Dynasty + type: episode + video_codec: h264 + +? Family.Guy.S13E14.JOLO.German.AC3D.DL.720p.WebHD.x264-CDD +: audio_codec: AC3 + episode: 14 + episode_title: JOLO German + format: WEB-DL + language: mul + release_group: CDD + screen_size: 720p + season: 13 + title: Family Guy + type: episode + video_codec: h264 + +? How.I.Met.Your.Mother.COMPLETE.SERIES.DVDRip.XviD-AR +: options: -L en -C us + format: DVD + other: Complete + release_group: AR + title: How I Met Your Mother + type: movie + video_codec: XviD + +? Show Name The Complete Seasons 1 to 5 720p BluRay x265 HEVC-SUJAIDR[UTR] +: format: BluRay + other: Complete + release_group: SUJAIDR[UTR] + screen_size: 720p + season: + - 1 + - 2 + - 3 + - 4 + - 5 + title: Show Name + type: episode + video_codec: h265 + +? Fear.the.Walking.Dead.-.Season.2.epi.02.XviD.Eng.Ac3-5.1.sub.ita.eng.iCV-MIRCrew +: options: -t episode + audio_channels: '5.1' + audio_codec: AC3 + episode: 2 + episode_title: epi + language: en + release_group: iCV-MIRCrew + season: 2 + subtitle_language: it + title: Fear the Walking Dead + type: episode + video_codec: XviD + +? Game.Of.Thrones.S06E04.720p.PROPER.HDTV.x264-HDD +: episode: 4 + format: HDTV + other: Proper + proper_count: 1 + release_group: HDD + screen_size: 720p + season: 6 + title: Game Of Thrones + type: episode + video_codec: h264 \ No newline at end of file diff --git a/libs/guessit/test/guessittest.py b/libs/guessit/test/guessittest.py deleted file mode 100644 index 1e9374f0..00000000 --- a/libs/guessit/test/guessittest.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import base_text_type, u -from collections import defaultdict -from unittest import TestCase, TestLoader, TextTestRunner -import shlex -import babelfish -import yaml, logging, sys, os -from os.path import * - - -def currentPath(): - '''Returns the path in which the calling file is located.''' - return dirname(join(os.getcwd(), sys._getframe(1).f_globals['__file__'])) - - -def addImportPath(path): - '''Function that adds the specified path to the import path. The path can be - absolute or relative to the calling file.''' - importPath = abspath(join(currentPath(), path)) - sys.path = [importPath] + sys.path - -log = logging.getLogger(__name__) - -from guessit.plugins import transformers -from guessit.options import get_opts -import guessit -from guessit import * -from guessit.matcher import * -from guessit.fileutils import * - - -def allTests(testClass): - return TestLoader().loadTestsFromTestCase(testClass) - - -class TestGuessit(TestCase): - - def checkMinimumFieldsCorrect(self, filename, filetype=None, remove_type=True, - exclude_files=None): - groundTruth = yaml.load(load_file_in_same_dir(__file__, filename)) - - def guess_func(string, options=None): - return guess_file_info(string, options=options, type=filetype) - - return self.checkFields(groundTruth, guess_func, remove_type, exclude_files) - - def checkFields(self, groundTruth, guess_func, remove_type=True, - exclude_files=None): - total = 0 - exclude_files = exclude_files or [] - - fails = defaultdict(list) - additionals = defaultdict(list) - - for filename, required_fields in groundTruth.items(): - filename = u(filename) - if filename in exclude_files: - continue - - log.debug('\n' + '-' * 120) - log.info('Guessing information for file: %s' % filename) - - options = required_fields.pop('options') if 'options' in required_fields else None - - if options: - args = shlex.split(options) - options = get_opts().parse_args(args) - options = vars(options) - try: - found = guess_func(filename, options) - except Exception as e: - fails[filename].append("An exception has occured in %s: %s" % (filename, e)) - log.exception("An exception has occured in %s: %s" % (filename, e)) - continue - - total = total + 1 - - # no need for these in the unittests - if remove_type: - try: - del found['type'] - except: - pass - for prop in ('container', 'mimetype', 'unidentified'): - if prop in found: - del found[prop] - - # props which are list of just 1 elem should be opened for easier writing of the tests - for prop in ('language', 'subtitleLanguage', 'other', 'episodeDetails', 'unidentified'): - value = found.get(prop, None) - if isinstance(value, list) and len(value) == 1: - found[prop] = value[0] - - # look for missing properties - for prop, value in required_fields.items(): - if prop not in found: - log.debug("Prop '%s' not found in: %s" % (prop, filename)) - fails[filename].append("'%s' not found in: %s" % (prop, filename)) - continue - - # if both properties are strings, do a case-insensitive comparison - if (isinstance(value, base_text_type) and - isinstance(found[prop], base_text_type)): - if value.lower() != found[prop].lower(): - log.debug("Wrong prop value [str] for '%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - fails[filename].append("'%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - - elif isinstance(value, list) and isinstance(found[prop], list): - if found[prop] and isinstance(found[prop][0], babelfish.Language): - # list of languages - s1 = set(Language.fromguessit(s) for s in value) - s2 = set(found[prop]) - else: - # by default we assume list of strings and do a case-insensitive - # comparison on their elements - s1 = set(u(s).lower() for s in value) - s2 = set(u(s).lower() for s in found[prop]) - - if s1 != s2: - log.debug("Wrong prop value [list] for '%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - fails[filename].append("'%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - - elif isinstance(found[prop], babelfish.Language): - try: - if babelfish.Language.fromguessit(value) != found[prop]: - raise ValueError - except: - log.debug("Wrong prop value [Language] for '%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - fails[filename].append("'%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - - elif isinstance(found[prop], babelfish.Country): - try: - if babelfish.Country.fromguessit(value) != found[prop]: - raise ValueError - except: - log.debug("Wrong prop value [Country] for '%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - fails[filename].append("'%s': expected = '%s' - received = '%s'" % (prop, u(value), u(found[prop]))) - - - # otherwise, just compare their values directly - else: - if found[prop] != value: - log.debug("Wrong prop value for '%s': expected = '%s' [%s] - received = '%s' [%s]" % (prop, u(value), type(value), u(found[prop]), type(found[prop]))) - fails[filename].append("'%s': expected = '%s' [%s] - received = '%s' [%s]" % (prop, u(value), type(value), u(found[prop]), type(found[prop]))) - - # look for additional properties - for prop, value in found.items(): - if prop not in required_fields: - log.debug("Found additional info for prop = '%s': '%s'" % (prop, u(value))) - additionals[filename].append("'%s': '%s'" % (prop, u(value))) - - correct = total - len(fails) - log.info('SUMMARY: Guessed correctly %d out of %d filenames' % (correct, total)) - - for failed_entry, failed_properties in fails.items(): - log.error('---- ' + failed_entry + ' ----') - for failed_property in failed_properties: - log.error("FAILED: " + failed_property) - - for additional_entry, additional_properties in additionals.items(): - log.warning('---- ' + additional_entry + ' ----') - for additional_property in additional_properties: - log.warning("ADDITIONAL: " + additional_property) - - self.assertTrue(correct == total, - msg='Correct: %d < Total: %d' % (correct, total)) diff --git a/libs/guessit/test/movies.yaml b/libs/guessit/test/movies.yml similarity index 60% rename from libs/guessit/test/movies.yaml rename to libs/guessit/test/movies.yml index 7894ef69..a132b116 100644 --- a/libs/guessit/test/movies.yaml +++ b/libs/guessit/test/movies.yml @@ -1,91 +1,93 @@ +? __default__ +: type: movie ? Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv : title: Fear and Loathing in Las Vegas year: 1998 - screenSize: 720p + screen_size: 720p format: HD-DVD - audioCodec: DTS - videoCodec: h264 - releaseGroup: ESiR + audio_codec: DTS + video_codec: h264 + container: mkv + release_group: ESiR ? Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi : title: El Dia de la Bestia year: 1995 format: DVD language: spanish - videoCodec: DivX - releaseGroup: Artik[SEDG] + video_codec: DivX + release_group: Artik[SEDG] + container: avi ? Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv : title: Dark City year: 1998 format: BluRay - screenSize: 720p - audioCodec: DTS - videoCodec: h264 - releaseGroup: CHD + screen_size: 720p + audio_codec: DTS + video_codec: h264 + release_group: CHD ? Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv : title: Sin City year: 2005 format: BluRay - screenSize: 720p - videoCodec: h264 - audioCodec: AC3 - releaseGroup: SEPTiC - + screen_size: 720p + video_codec: h264 + audio_codec: AC3 + release_group: SEPTiC ? Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi : title: Borat year: 2006 - other: PROPER + proper_count: 2 format: DVD other: [ R5, Proper ] - videoCodec: XviD - releaseGroup: PUKKA - + video_codec: XviD + release_group: PUKKA ? "[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv" : title: Le Prestige format: DVD - videoCodec: h264 - videoProfile: HP - audioCodec: AAC - audioProfile: HE + video_codec: h264 + video_profile: HP + audio_codec: AAC + audio_profile: HE language: [ french, english ] - subtitleLanguage: [ french, english ] - releaseGroup: XCT + subtitle_language: [ french, english ] + release_group: Chaps ? Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi : title: Battle Royale year: 2000 - edition: special edition - cdNumber: 1 - cdNumberTotal: 2 + edition: Special Edition + cd: 1 + cd_count: 2 format: DVD - videoCodec: XviD - releaseGroup: ZeaL + video_codec: XviD + release_group: ZeaL ? Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.avi : title: Brazil edition: Criterion Edition year: 1985 - cdNumber: 2 + cd: 2 ? Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv : title: Persepolis year: 2007 - videoCodec: h264 - audioCodec: AAC + video_codec: h264 + audio_codec: AAC language: [ French, English ] - subtitleLanguage: [ French, English ] - releaseGroup: XCT + subtitle_language: [ French, English ] + release_group: Ind ? Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv : title: Toy Story year: 1995 format: HDTV - screenSize: 720p + screen_size: 720p language: [ english, spanish ] ? Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi @@ -93,84 +95,85 @@ year: 1999 format: DVD language: [ english, spanish ] - videoCodec: XviD - audioCodec: AC3 + video_codec: XviD + audio_codec: AC3 ? Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.avi : title: Wild Zero year: 2000 - videoCodec: DivX - releaseGroup: EPiC + video_codec: DivX + release_group: EPiC ? movies/Baraka_Edition_Collector.avi : title: Baraka - edition: collector edition + edition: Collector Edition ? Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director's.Cut).CD1.DVDRip.XviD.AC3-WAF.avi : title: Blade Runner year: 1982 - edition: Director's Cut - cdNumber: 1 + edition: Director's cut + cd: 1 format: DVD - videoCodec: XviD - audioCodec: AC3 - releaseGroup: WAF + video_codec: XviD + audio_codec: AC3 + release_group: WAF ? movies/American.The.Bill.Hicks.Story.2009.DVDRip.XviD-EPiSODE.[UsaBit.com]/UsaBit.com_esd-americanbh.avi : title: American The Bill Hicks Story year: 2009 format: DVD - videoCodec: XviD - releaseGroup: EPiSODE + video_codec: XviD + release_group: EPiSODE website: UsaBit.com ? movies/Charlie.And.Boots.DVDRip.XviD-TheWretched/wthd-cab.avi : title: Charlie And Boots format: DVD - videoCodec: XviD - releaseGroup: TheWretched + video_codec: XviD + release_group: TheWretched ? movies/Steig Larsson Millenium Trilogy (2009) BRrip 720 AAC x264/(1)The Girl With The Dragon Tattoo (2009) BRrip 720 AAC x264.mkv : title: The Girl With The Dragon Tattoo - filmSeries: Steig Larsson Millenium Trilogy - filmNumber: 1 + #film_title: Steig Larsson Millenium Trilogy + #film: 1 year: 2009 format: BluRay - audioCodec: AAC - videoCodec: h264 - screenSize: 720p + audio_codec: AAC + video_codec: h264 + screen_size: 720p ? movies/Greenberg.REPACK.LiMiTED.DVDRip.XviD-ARROW/arw-repack-greenberg.dvdrip.xvid.avi : title: Greenberg format: DVD - videoCodec: XviD - releaseGroup: ARROW + video_codec: XviD + release_group: ARROW other: ['Proper', 'Limited'] + proper_count: 1 ? Movies/Fr - Paris 2054, Renaissance (2005) - De Christian Volckman - (Film Divx Science Fiction Fantastique Thriller Policier N&B).avi : title: Paris 2054, Renaissance year: 2005 language: french - videoCodec: DivX + video_codec: DivX ? Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi : title: Avida year: 2006 language: french format: DVD - videoCodec: XviD - releaseGroup: PROD + video_codec: XviD + release_group: PROD ? Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi : title: Alice in Wonderland format: DVD - videoCodec: XviD - releaseGroup: DiAMOND + video_codec: XviD + release_group: DiAMOND ? Movies/Ne.Le.Dis.A.Personne.Fr 2 cd/personnea_mp.avi : title: Ne Le Dis A Personne language: french - cdNumberTotal: 2 + cd_count: 2 ? Movies/Bunker Palace Hôtel (Enki Bilal) (1989)/Enki Bilal - Bunker Palace Hotel (Fr Vhs Rip).avi : title: Bunker Palace Hôtel @@ -182,33 +185,33 @@ : title: "21" year: 2008 format: DVD - videoCodec: h264 - audioCodec: AC3 - releaseGroup: FtS + video_codec: h264 + audio_codec: AC3 + release_group: FtS website: sharethefiles.com ? Movies/9 (2009)/9.2009.Blu-ray.DTS.720p.x264.HDBRiSe.[sharethefiles.com].mkv : title: "9" year: 2009 format: BluRay - audioCodec: DTS - screenSize: 720p - videoCodec: h264 - releaseGroup: HDBRiSe + audio_codec: DTS + screen_size: 720p + video_codec: h264 + release_group: HDBRiSe website: sharethefiles.com ? Movies/Mamma.Mia.2008.DVDRip.AC3.XviD-CrazyTeam/Mamma.Mia.2008.DVDRip.AC3.XviD-CrazyTeam.avi : title: Mamma Mia year: 2008 format: DVD - audioCodec: AC3 - videoCodec: XviD - releaseGroup: CrazyTeam + audio_codec: AC3 + video_codec: XviD + release_group: CrazyTeam ? Movies/M.A.S.H. (1970)/MASH.(1970).[Divx.5.02][Dual-Subtitulos][DVDRip].ogm -: title: M.A.S.H. +: title: MASH year: 1970 - videoCodec: DivX + video_codec: DivX format: DVD ? Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv @@ -216,10 +219,10 @@ year: 1991 date: 2008-03-09 format: BluRay - screenSize: 720p - audioCodec: AC3 - videoCodec: h264 - releaseGroup: HiS@SiLUHD + screen_size: 720p + audio_codec: AC3 + video_codec: h264 + release_group: HiS@SiLUHD language: english website: sharethefiles.com @@ -229,10 +232,10 @@ year: 1991 date: 2008-03-09 format: BluRay - screenSize: 720p - audioCodec: AC3 - videoCodec: h264 - releaseGroup: HiS@SiLUHD + screen_size: 720p + audio_codec: AC3 + video_codec: h264 + release_group: HiS@SiLUHD language: english website: sharethefiles.com @@ -240,79 +243,78 @@ : title: Ratatouille format: DVD -? Movies/001 __ A classer/Fantomas se déchaine - Louis de Funès.avi -: title: Fantomas se déchaine +# Removing this one because 001 is guessed as an episode number. +# ? Movies/001 __ A classer/Fantomas se déchaine - Louis de Funès.avi +# : title: Fantomas se déchaine ? Movies/Comme une Image (2004)/Comme.Une.Image.FRENCH.DVDRiP.XViD-NTK.par-www.divx-overnet.com.avi : title: Comme une Image year: 2004 language: french format: DVD - videoCodec: XviD - releaseGroup: NTK + video_codec: XviD + release_group: NTK website: www.divx-overnet.com ? Movies/Fantastic Mr Fox/Fantastic.Mr.Fox.2009.DVDRip.{x264+LC-AAC.5.1}{Fr-Eng}{Sub.Fr-Eng}-â„¢.[sharethefiles.com].mkv : title: Fantastic Mr Fox year: 2009 format: DVD - videoCodec: h264 - audioCodec: AAC - audioProfile: LC - audioChannels: "5.1" + video_codec: h264 + audio_codec: AAC + audio_profile: LC + audio_channels: "5.1" language: [ french, english ] - subtitleLanguage: [ french, english ] + subtitle_language: [ french, english ] website: sharethefiles.com ? Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi : title: Somewhere year: 2010 format: DVD - videoCodec: XviD - releaseGroup: iLG + video_codec: XviD + release_group: iLG ? Movies/Moon_(2009).mkv : title: Moon year: 2009 -? Movies/Moon_(2009)-x01.mkv -: title: Moon - year: 2009 - bonusNumber: 1 - ? Movies/Moon_(2009)-x02-Making_Of.mkv : title: Moon year: 2009 - bonusNumber: 2 - bonusTitle: Making Of + bonus: 2 + bonus_title: Making Of ? movies/James_Bond-f17-Goldeneye.mkv : title: Goldeneye - filmSeries: James Bond - filmNumber: 17 + film_title: James Bond + film: 17 + ? /movies/James_Bond-f21-Casino_Royale.mkv : title: Casino Royale - filmSeries: James Bond - filmNumber: 21 + film_title: James Bond + film: 21 ? /movies/James_Bond-f21-Casino_Royale-x01-Becoming_Bond.mkv : title: Casino Royale - filmSeries: James Bond - filmNumber: 21 - bonusNumber: 1 - bonusTitle: Becoming Bond + film_title: James Bond + film: 21 + bonus: 1 + bonus_title: Becoming Bond ? /movies/James_Bond-f21-Casino_Royale-x02-Stunts.mkv : title: Casino Royale - filmSeries: James Bond - filmNumber: 21 - bonusNumber: 2 - bonusTitle: Stunts + film_title: James Bond + film: 21 + bonus: 2 + bonus_title: Stunts ? OSS_117--Cairo,_Nest_of_Spies.mkv -: title: OSS 117--Cairo, Nest of Spies +: title: OSS 117 +# TODO: Implement subTitle for movies. +? The Godfather Part 3.mkv ? The Godfather Part III.mkv : title: The Godfather part: 3 @@ -324,50 +326,52 @@ ? The_Insider-(1999)-x02-60_Minutes_Interview-1996.mp4 : title: The Insider year: 1999 - bonusNumber: 2 - bonusTitle: 60 Minutes Interview-1996 + bonus: 2 + bonus_title: 60 Minutes Interview-1996 ? Rush.._Beyond_The_Lighted_Stage-x09-Between_Sun_and_Moon-2002_Hartford.mkv : title: Rush Beyond The Lighted Stage - bonusNumber: 9 - bonusTitle: Between Sun and Moon-2002 Hartford + bonus: 9 + bonus_title: Between Sun and Moon + year: 2002 ? /public/uTorrent/Downloads Finished/Movies/Indiana.Jones.and.the.Temple.of.Doom.1984.HDTV.720p.x264.AC3.5.1-REDµX/Indiana.Jones.and.the.Temple.of.Doom.1984.HDTV.720p.x264.AC3.5.1-REDµX.mkv : title: Indiana Jones and the Temple of Doom year: 1984 format: HDTV - screenSize: 720p - videoCodec: h264 - audioCodec: AC3 - audioChannels: "5.1" - releaseGroup: REDµX + screen_size: 720p + video_codec: h264 + audio_codec: AC3 + audio_channels: "5.1" + release_group: REDµX ? The.Director’s.Notebook.2006.Blu-Ray.x264.DXVA.720p.AC3-de[42].mkv : title: The Director’s Notebook year: 2006 format: BluRay - videoCodec: h264 - videoApi: DXVA - screenSize: 720p - audioCodec: AC3 - releaseGroup: de[42] + video_codec: h264 + video_api: DXVA + screen_size: 720p + audio_codec: AC3 + release_group: de[42] + ? Movies/Cosmopolis.2012.LiMiTED.720p.BluRay.x264-AN0NYM0US[bb]/ano-cosmo.720p.mkv : title: Cosmopolis year: 2012 - screenSize: 720p - videoCodec: h264 - releaseGroup: AN0NYM0US[bb] + screen_size: 720p + video_codec: h264 + release_group: AN0NYM0US[bb] format: BluRay - other: LIMITED + other: Limited -? movies/La Science des ReÌ‚ves (2006)/La.Science.Des.Reves.FRENCH.DVDRip.XviD-MP-AceBot.avi +? movies/La Science des Rêves (2006)/La.Science.Des.Reves.FRENCH.DVDRip.XviD-MP-AceBot.avi : title: La Science des Rêves year: 2006 format: DVD - videoCodec: XviD - videoProfile: MP - releaseGroup: AceBot + video_codec: XviD + video_profile: MP + release_group: AceBot language: French ? The_Italian_Job.mkv @@ -376,76 +380,76 @@ ? The.Rum.Diary.2011.1080p.BluRay.DTS.x264.D-Z0N3.mkv : title: The Rum Diary year: 2011 - screenSize: 1080p + screen_size: 1080p format: BluRay - videoCodec: h264 - audioCodec: DTS - releaseGroup: D-Z0N3 + video_codec: h264 + audio_codec: DTS + release_group: D-Z0N3 ? Life.Of.Pi.2012.1080p.BluRay.DTS.x264.D-Z0N3.mkv : title: Life Of Pi year: 2012 - screenSize: 1080p + screen_size: 1080p format: BluRay - videoCodec: h264 - audioCodec: DTS - releaseGroup: D-Z0N3 + video_codec: h264 + audio_codec: DTS + release_group: D-Z0N3 ? The.Kings.Speech.2010.1080p.BluRay.DTS.x264.D Z0N3.mkv : title: The Kings Speech year: 2010 - screenSize: 1080p + screen_size: 1080p format: BluRay - audioCodec: DTS - videoCodec: h264 - releaseGroup: D Z0N3 + audio_codec: DTS + video_codec: h264 + release_group: D Z0N3 ? Street.Kings.2008.BluRay.1080p.DTS.x264.dxva EuReKA.mkv : title: Street Kings year: 2008 format: BluRay - screenSize: 1080p - audioCodec: DTS - videoCodec: h264 - videoApi: DXVA - releaseGroup: EuReKa + screen_size: 1080p + audio_codec: DTS + video_codec: h264 + video_api: DXVA + release_group: EuReKA ? 2001.A.Space.Odyssey.1968.HDDVD.1080p.DTS.x264.dxva EuReKA.mkv : title: 2001 A Space Odyssey year: 1968 format: HD-DVD - screenSize: 1080p - audioCodec: DTS - videoCodec: h264 - videoApi: DXVA - releaseGroup: EuReKa + screen_size: 1080p + audio_codec: DTS + video_codec: h264 + video_api: DXVA + release_group: EuReKA ? 2012.2009.720p.BluRay.x264.DTS WiKi.mkv : title: "2012" year: 2009 - screenSize: 720p + screen_size: 720p format: BluRay - videoCodec: h264 - audioCodec: DTS - releaseGroup: WiKi + video_codec: h264 + audio_codec: DTS + release_group: WiKi ? /share/Download/movie/Dead Man Down (2013) BRRiP XViD DD5_1 Custom NLSubs =-_lt Q_o_Q gt-=_/XD607ebb-BRc59935-5155473f-1c5f49/XD607ebb-BRc59935-5155473f-1c5f49.avi : title: Dead Man Down year: 2013 format: BluRay - videoCodec: XviD - audioChannels: "5.1" - audioCodec: DolbyDigital - idNumber: XD607ebb-BRc59935-5155473f-1c5f49 + video_codec: XviD + audio_channels: "5.1" + audio_codec: DolbyDigital + uuid: XD607ebb-BRc59935-5155473f-1c5f49 ? Pacific.Rim.3D.2013.COMPLETE.BLURAY-PCH.avi : title: Pacific Rim year: 2013 format: BluRay other: - - complete + - Complete - 3D - releaseGroup: PCH + release_group: PCH ? Immersion.French.2011.STV.READNFO.QC.FRENCH.ENGLISH.NTSC.DVDR.nfo : title: Immersion French @@ -454,64 +458,69 @@ - French - English format: DVD + other: NTSC ? Immersion.French.2011.STV.READNFO.QC.FRENCH.NTSC.DVDR.nfo : title: Immersion French year: 2011 language: French format: DVD + other: NTSC ? Immersion.French.2011.STV.READNFO.QC.NTSC.DVDR.nfo -: title: Immersion French +: title: Immersion + language: French year: 2011 format: DVD + other: NTSC ? French.Immersion.2011.STV.READNFO.QC.ENGLISH.NTSC.DVDR.nfo : title: French Immersion year: 2011 language: ENGLISH format: DVD + other: NTSC ? Howl's_Moving_Castle_(2004)_[720p,HDTV,x264,DTS]-FlexGet.avi -: videoCodec: h264 +: video_codec: h264 format: HDTV title: Howl's Moving Castle - screenSize: 720p + screen_size: 720p year: 2004 - audioCodec: DTS - releaseGroup: FlexGet + audio_codec: DTS + release_group: FlexGet ? Pirates de langkasuka.2008.FRENCH.1920X1080.h264.AVC.AsiaRa.mkv -: screenSize: 1080p +: screen_size: 1080p year: 2008 language: French - videoCodec: h264 + video_codec: h264 title: Pirates de langkasuka - releaseGroup: AsiaRa + release_group: AsiaRa ? Masala (2013) Telugu Movie HD DVDScr XviD - Exclusive.avi : year: 2013 - videoCodec: XviD + video_codec: XviD title: Masala format: HD-DVD - other: screener + other: Screener language: Telugu - releaseGroup: Exclusive + release_group: Exclusive ? Django Unchained 2012 DVDSCR X264 AAC-P2P.nfo : year: 2012 - other: screener - videoCodec: h264 + other: Screener + video_codec: h264 title: Django Unchained - audioCodec: AAC + audio_codec: AAC format: DVD - releaseGroup: P2P + release_group: P2P ? Ejecutiva.En.Apuros(2009).BLURAY.SCR.Xvid.Spanish.LanzamientosD.nfo : year: 2009 - other: screener + other: Screener format: BluRay - videoCodec: XviD + video_codec: XviD language: Spanish title: Ejecutiva En Apuros @@ -521,26 +530,26 @@ language: - Multiple languages - German - videoCodec: h264 - releaseGroup: EXQUiSiTE - screenSize: 1080p + video_codec: h264 + release_group: EXQUiSiTE + screen_size: 1080p ? Rocky 1976 French SubForced BRRip x264 AC3-FUNKY.mkv : title: Rocky year: 1976 - subtitleLanguage: French + subtitle_language: French format: BluRay - videoCodec: h264 - audioCodec: AC3 - releaseGroup: FUNKY + video_codec: h264 + audio_codec: AC3 + release_group: FUNKY ? REDLINE (BD 1080p H264 10bit FLAC) [3xR].mkv : title: REDLINE format: BluRay - videoCodec: h264 - videoProfile: 10bit - audioCodec: Flac - screenSize: 1080p + video_codec: h264 + video_profile: 10bit + audio_codec: FLAC + screen_size: 1080p ? The.Lizzie.McGuire.Movie.(2003).HR.DVDRiP.avi : title: The Lizzie McGuire Movie @@ -550,205 +559,279 @@ ? Hua.Mulan.BRRIP.MP4.x264.720p-HR.avi : title: Hua Mulan - videoCodec: h264 + video_codec: h264 format: BluRay - screenSize: 720p + screen_size: 720p other: HR ? Dr.Seuss.The.Lorax.2012.DVDRip.LiNE.XviD.AC3.HQ.Hive-CM8.mp4 -: videoCodec: XviD +: video_codec: XviD title: Dr Seuss The Lorax format: DVD other: LiNE year: 2012 - audioCodec: AC3 - audioProfile: HQ - releaseGroup: Hive-CM8 - + audio_codec: AC3 + audio_profile: HQ + release_group: Hive-CM8 ? "Star Wars: Episode IV - A New Hope (2004) Special Edition.MKV" -: title: Star Wars Episode IV +: title: "Star Wars: Episode IV" + alternative_title: A New Hope year: 2004 edition: Special Edition - + ? Dr.LiNE.The.Lorax.2012.DVDRip.LiNE.XviD.AC3.HQ.Hive-CM8.mp4 -: videoCodec: XviD +: video_codec: XviD title: Dr LiNE The Lorax format: DVD other: LiNE year: 2012 - audioCodec: AC3 - audioProfile: HQ - releaseGroup: Hive-CM8 + audio_codec: AC3 + audio_profile: HQ + release_group: Hive-CM8 + +? Dr.LiNE.The.Lorax.2012.DVDRip.XviD.AC3.HQ.Hive-CM8.mp4 +: video_codec: XviD + title: Dr LiNE The Lorax + format: DVD + year: 2012 + audio_codec: AC3 + audio_profile: HQ + release_group: Hive-CM8 ? Perfect Child-2007-TRUEFRENCH-TVRip.Xvid-h@mster.avi -: releaseGroup: h@mster +: release_group: h@mster title: Perfect Child - videoCodec: XviD + video_codec: XviD language: French format: TV year: 2007 - + ? entre.ciel.et.terre.(1994).dvdrip.h264.aac-psypeon.avi -: audioCodec: AAC +: audio_codec: AAC format: DVD - releaseGroup: psypeon + release_group: psypeon title: entre ciel et terre - videoCodec: h264 + video_codec: h264 year: 1994 - + ? Yves.Saint.Laurent.2013.FRENCH.DVDSCR.MD.XviD-ViVARiUM.avi : format: DVD language: French - other: Screener - releaseGroup: ViVARiUM + other: + - MD + - Screener + release_group: ViVARiUM title: Yves Saint Laurent - videoCodec: XviD + video_codec: XviD year: 2013 - + ? Echec et Mort - Hard to Kill - Steven Seagal Multi 1080p BluRay x264 CCATS.avi : format: BluRay language: Multiple languages - releaseGroup: CCATS - screenSize: 1080p + release_group: CCATS + screen_size: 1080p title: Echec et Mort - videoCodec: h264 + alternative_title: + - Hard to Kill + - Steven Seagal + video_codec: h264 ? Paparazzi - Timsit/Lindon (MKV 1080p tvripHD) : options: -n title: Paparazzi - screenSize: 1080p + alternative_title: + - Timsit + - Lindon + screen_size: 1080p + container: MKV format: HDTV - + ? some.movie.720p.bluray.x264-mind -: options: -n - title: some movie - screenSize: 720p - videoCodec: h264 - releaseGroup: mind +: title: some movie + screen_size: 720p + video_codec: h264 + release_group: mind format: BluRay - + ? Dr LiNE The Lorax 720p h264 BluRay -: options: -n - title: Dr LiNE The Lorax - screenSize: 720p - videoCodec: h264 +: title: Dr LiNE The Lorax + screen_size: 720p + video_codec: h264 format: BluRay -? BeatdownFrenchDVDRip.mkv -: options: -c - title: Beatdown - language: French - format: DVD +#TODO: Camelcase implementation +#? BeatdownFrenchDVDRip.mkv +#: options: -c +# title: Beatdown +# language: French +# format: DVD + +#? YvesSaintLaurent2013FrenchDVDScrXvid.avi +#: options: -c +# format: DVD +# language: French +# other: Screener +# title: Yves saint laurent +# video_codec: XviD +# year: 2013 -? YvesSaintLaurent2013FrenchDVDScrXvid.avi -: options: -c - format: DVD - language: French - other: Screener - title: Yves saint laurent - videoCodec: XviD - year: 2013 ? Elle.s.en.va.720p.mkv -: screenSize: 720p +: screen_size: 720p title: Elle s en va ? FooBar.7.PDTV-FlexGet -: options: -n - format: DVB - releaseGroup: FlexGet +: format: DVB + release_group: FlexGet title: FooBar 7 ? h265 - HEVC Riddick Unrated Director Cut French 1080p DTS.mkv -: audioCodec: DTS +: audio_codec: DTS edition: Director's cut language: fr - screenSize: 1080p - title: Riddick Unrated - videoCodec: h265 + screen_size: 1080p + title: Riddick + other: Unrated + video_codec: h265 ? "[h265 - HEVC] Riddick Unrated Director Cut French [1080p DTS].mkv" -: audioCodec: DTS +: audio_codec: DTS edition: Director's cut language: fr - screenSize: 1080p - title: Riddick Unrated - videoCodec: h265 + screen_size: 1080p + title: Riddick + other: Unrated + video_codec: h265 ? Barbecue-2014-French-mHD-1080p -: options: -n - language: fr +: language: fr other: mHD - screenSize: 1080p + screen_size: 1080p title: Barbecue year: 2014 ? Underworld Quadrilogie VO+VFF+VFQ 1080p HDlight.x264~Tonyk~Monde Infernal -: options: -n - language: - - fr - - vo - other: HDLight - screenSize: 1080p +: language: fr + other: + - HDLight + - OV + screen_size: 1080p title: Underworld Quadrilogie - videoCodec: h264 + video_codec: h264 ? A Bout Portant (The Killers).PAL.Multi.DVD-R-KZ -: options: -n - format: DVD +: format: DVD language: mul - releaseGroup: KZ + release_group: KZ title: A Bout Portant ? "Mise à Sac (Alain Cavalier, 1967) [Vhs.Rip.Vff]" -: options: -n - format: VHS +: format: VHS language: fr title: "Mise à Sac" year: 1967 ? A Bout Portant (The Killers).PAL.Multi.DVD-R-KZ -: options: -n - format: DVD +: format: DVD + other: PAL language: mul - releaseGroup: KZ + release_group: KZ title: A Bout Portant ? Youth.In.Revolt.(Be.Bad).2009.MULTI.1080p.LAME3*92-MEDIOZZ -: options: -n - audioCodec: MP3 +: audio_codec: MP3 language: mul - releaseGroup: MEDIOZZ - screenSize: 1080p + release_group: MEDIOZZ + screen_size: 1080p title: Youth In Revolt year: 2009 ? La Defense Lincoln (The Lincoln Lawyer) 2011 [DVDRIP][Vostfr] -: options: -n - format: DVD - subtitleLanguage: fr +: format: DVD + subtitle_language: fr title: La Defense Lincoln year: 2011 ? '[h265 - HEVC] Fight Club French 1080p DTS.' -: options: -n - audioCodec: DTS +: audio_codec: DTS language: fr - screenSize: 1080p + screen_size: 1080p title: Fight Club - videoCodec: h265 + video_codec: h265 ? Love Gourou (Mike Myers) - FR -: options: -n - language: fr +: language: fr title: Love Gourou ? '[h265 - hevc] transformers 2 1080p french ac3 6ch.' -: options: -n - audioChannels: '5.1' - audioCodec: AC3 +: audio_channels: '5.1' + audio_codec: AC3 language: fr - screenSize: 1080p + screen_size: 1080p title: transformers 2 - videoCodec: h265 + video_codec: h265 + +? 1.Angry.Man.1957.mkv +: title: 1 Angry Man + year: 1957 + +? 12.Angry.Men.1957.mkv +: title: 12 Angry Men + year: 1957 + +? 123.Angry.Men.1957.mkv +: title: 123 Angry Men + year: 1957 + +? "Looney Tunes 1444x866 Porky's Last Stand.mkv" +: screen_size: 1444x866 + title: Looney Tunes + +? Das.Appartement.German.AC3D.DL.720p.BluRay.x264-TVP +: audio_codec: AC3 + format: BluRay + language: mul + release_group: TVP + screen_size: 720p + title: Das Appartement German + type: movie + video_codec: h264 + +? Das.Appartement.GERMAN.AC3D.DL.720p.BluRay.x264-TVP +: audio_codec: AC3 + format: BluRay + language: + - de + - mul + release_group: TVP + screen_size: 720p + title: Das Appartement + video_codec: h264 + +? Hyena.Road.2015.German.1080p.DL.DTSHD.Bluray.x264-pmHD +: audio_codec: DTS + audio_profile: HD + format: BluRay + language: + - de + - mul + release_group: pmHD + screen_size: 1080p + title: Hyena Road + type: movie + video_codec: h264 + year: 2015 + +? Hyena.Road.2015.German.Ep.Title.1080p.DL.DTSHD.Bluray.x264-pmHD +: audio_codec: DTS + audio_profile: HD + episode_title: German Ep Title + format: BluRay + language: mul + release_group: pmHD + screen_size: 1080p + title: Hyena Road + type: movie + video_codec: h264 + year: 2015 diff --git a/libs/guessit/test/opensubtitles_languages_2012_05_09.txt b/libs/guessit/test/opensubtitles_languages_2012_05_09.txt deleted file mode 100644 index 4a08d9b5..00000000 --- a/libs/guessit/test/opensubtitles_languages_2012_05_09.txt +++ /dev/null @@ -1,473 +0,0 @@ -IdSubLanguage ISO639 LanguageName UploadEnabled WebEnabled -aar aa Afar, afar 0 0 -abk ab Abkhazian 0 0 -ace Achinese 0 0 -ach Acoli 0 0 -ada Adangme 0 0 -ady adyghé 0 0 -afa Afro-Asiatic (Other) 0 0 -afh Afrihili 0 0 -afr af Afrikaans 0 0 -ain Ainu 0 0 -aka ak Akan 0 0 -akk Akkadian 0 0 -alb sq Albanian 1 1 -ale Aleut 0 0 -alg Algonquian languages 0 0 -alt Southern Altai 0 0 -amh am Amharic 0 0 -ang English, Old (ca.450-1100) 0 0 -apa Apache languages 0 0 -ara ar Arabic 1 1 -arc Aramaic 0 0 -arg an Aragonese 0 0 -arm hy Armenian 1 0 -arn Araucanian 0 0 -arp Arapaho 0 0 -art Artificial (Other) 0 0 -arw Arawak 0 0 -asm as Assamese 0 0 -ast Asturian, Bable 0 0 -ath Athapascan languages 0 0 -aus Australian languages 0 0 -ava av Avaric 0 0 -ave ae Avestan 0 0 -awa Awadhi 0 0 -aym ay Aymara 0 0 -aze az Azerbaijani 0 0 -bad Banda 0 0 -bai Bamileke languages 0 0 -bak ba Bashkir 0 0 -bal Baluchi 0 0 -bam bm Bambara 0 0 -ban Balinese 0 0 -baq eu Basque 1 1 -bas Basa 0 0 -bat Baltic (Other) 0 0 -bej Beja 0 0 -bel be Belarusian 0 0 -bem Bemba 0 0 -ben bn Bengali 1 0 -ber Berber (Other) 0 0 -bho Bhojpuri 0 0 -bih bh Bihari 0 0 -bik Bikol 0 0 -bin Bini 0 0 -bis bi Bislama 0 0 -bla Siksika 0 0 -bnt Bantu (Other) 0 0 -bos bs Bosnian 1 0 -bra Braj 0 0 -bre br Breton 1 0 -btk Batak (Indonesia) 0 0 -bua Buriat 0 0 -bug Buginese 0 0 -bul bg Bulgarian 1 1 -bur my Burmese 0 0 -byn Blin 0 0 -cad Caddo 0 0 -cai Central American Indian (Other) 0 0 -car Carib 0 0 -cat ca Catalan 1 1 -cau Caucasian (Other) 0 0 -ceb Cebuano 0 0 -cel Celtic (Other) 0 0 -cha ch Chamorro 0 0 -chb Chibcha 0 0 -che ce Chechen 0 0 -chg Chagatai 0 0 -chi zh Chinese 1 1 -chk Chuukese 0 0 -chm Mari 0 0 -chn Chinook jargon 0 0 -cho Choctaw 0 0 -chp Chipewyan 0 0 -chr Cherokee 0 0 -chu cu Church Slavic 0 0 -chv cv Chuvash 0 0 -chy Cheyenne 0 0 -cmc Chamic languages 0 0 -cop Coptic 0 0 -cor kw Cornish 0 0 -cos co Corsican 0 0 -cpe Creoles and pidgins, English based (Other) 0 0 -cpf Creoles and pidgins, French-based (Other) 0 0 -cpp Creoles and pidgins, Portuguese-based (Other) 0 0 -cre cr Cree 0 0 -crh Crimean Tatar 0 0 -crp Creoles and pidgins (Other) 0 0 -csb Kashubian 0 0 -cus Cushitic (Other)' couchitiques, autres langues 0 0 -cze cs Czech 1 1 -dak Dakota 0 0 -dan da Danish 1 1 -dar Dargwa 0 0 -day Dayak 0 0 -del Delaware 0 0 -den Slave (Athapascan) 0 0 -dgr Dogrib 0 0 -din Dinka 0 0 -div dv Divehi 0 0 -doi Dogri 0 0 -dra Dravidian (Other) 0 0 -dua Duala 0 0 -dum Dutch, Middle (ca.1050-1350) 0 0 -dut nl Dutch 1 1 -dyu Dyula 0 0 -dzo dz Dzongkha 0 0 -efi Efik 0 0 -egy Egyptian (Ancient) 0 0 -eka Ekajuk 0 0 -elx Elamite 0 0 -eng en English 1 1 -enm English, Middle (1100-1500) 0 0 -epo eo Esperanto 1 0 -est et Estonian 1 1 -ewe ee Ewe 0 0 -ewo Ewondo 0 0 -fan Fang 0 0 -fao fo Faroese 0 0 -fat Fanti 0 0 -fij fj Fijian 0 0 -fil Filipino 0 0 -fin fi Finnish 1 1 -fiu Finno-Ugrian (Other) 0 0 -fon Fon 0 0 -fre fr French 1 1 -frm French, Middle (ca.1400-1600) 0 0 -fro French, Old (842-ca.1400) 0 0 -fry fy Frisian 0 0 -ful ff Fulah 0 0 -fur Friulian 0 0 -gaa Ga 0 0 -gay Gayo 0 0 -gba Gbaya 0 0 -gem Germanic (Other) 0 0 -geo ka Georgian 1 1 -ger de German 1 1 -gez Geez 0 0 -gil Gilbertese 0 0 -gla gd Gaelic 0 0 -gle ga Irish 0 0 -glg gl Galician 1 1 -glv gv Manx 0 0 -gmh German, Middle High (ca.1050-1500) 0 0 -goh German, Old High (ca.750-1050) 0 0 -gon Gondi 0 0 -gor Gorontalo 0 0 -got Gothic 0 0 -grb Grebo 0 0 -grc Greek, Ancient (to 1453) 0 0 -ell el Greek 1 1 -grn gn Guarani 0 0 -guj gu Gujarati 0 0 -gwi Gwich´in 0 0 -hai Haida 0 0 -hat ht Haitian 0 0 -hau ha Hausa 0 0 -haw Hawaiian 0 0 -heb he Hebrew 1 1 -her hz Herero 0 0 -hil Hiligaynon 0 0 -him Himachali 0 0 -hin hi Hindi 1 1 -hit Hittite 0 0 -hmn Hmong 0 0 -hmo ho Hiri Motu 0 0 -hrv hr Croatian 1 1 -hun hu Hungarian 1 1 -hup Hupa 0 0 -iba Iban 0 0 -ibo ig Igbo 0 0 -ice is Icelandic 1 1 -ido io Ido 0 0 -iii ii Sichuan Yi 0 0 -ijo Ijo 0 0 -iku iu Inuktitut 0 0 -ile ie Interlingue 0 0 -ilo Iloko 0 0 -ina ia Interlingua (International Auxiliary Language Asso 0 0 -inc Indic (Other) 0 0 -ind id Indonesian 1 1 -ine Indo-European (Other) 0 0 -inh Ingush 0 0 -ipk ik Inupiaq 0 0 -ira Iranian (Other) 0 0 -iro Iroquoian languages 0 0 -ita it Italian 1 1 -jav jv Javanese 0 0 -jpn ja Japanese 1 1 -jpr Judeo-Persian 0 0 -jrb Judeo-Arabic 0 0 -kaa Kara-Kalpak 0 0 -kab Kabyle 0 0 -kac Kachin 0 0 -kal kl Kalaallisut 0 0 -kam Kamba 0 0 -kan kn Kannada 0 0 -kar Karen 0 0 -kas ks Kashmiri 0 0 -kau kr Kanuri 0 0 -kaw Kawi 0 0 -kaz kk Kazakh 1 0 -kbd Kabardian 0 0 -kha Khasi 0 0 -khi Khoisan (Other) 0 0 -khm km Khmer 1 1 -kho Khotanese 0 0 -kik ki Kikuyu 0 0 -kin rw Kinyarwanda 0 0 -kir ky Kirghiz 0 0 -kmb Kimbundu 0 0 -kok Konkani 0 0 -kom kv Komi 0 0 -kon kg Kongo 0 0 -kor ko Korean 1 1 -kos Kosraean 0 0 -kpe Kpelle 0 0 -krc Karachay-Balkar 0 0 -kro Kru 0 0 -kru Kurukh 0 0 -kua kj Kuanyama 0 0 -kum Kumyk 0 0 -kur ku Kurdish 0 0 -kut Kutenai 0 0 -lad Ladino 0 0 -lah Lahnda 0 0 -lam Lamba 0 0 -lao lo Lao 0 0 -lat la Latin 0 0 -lav lv Latvian 1 0 -lez Lezghian 0 0 -lim li Limburgan 0 0 -lin ln Lingala 0 0 -lit lt Lithuanian 1 0 -lol Mongo 0 0 -loz Lozi 0 0 -ltz lb Luxembourgish 1 0 -lua Luba-Lulua 0 0 -lub lu Luba-Katanga 0 0 -lug lg Ganda 0 0 -lui Luiseno 0 0 -lun Lunda 0 0 -luo Luo (Kenya and Tanzania) 0 0 -lus lushai 0 0 -mac mk Macedonian 1 1 -mad Madurese 0 0 -mag Magahi 0 0 -mah mh Marshallese 0 0 -mai Maithili 0 0 -mak Makasar 0 0 -mal ml Malayalam 0 0 -man Mandingo 0 0 -mao mi Maori 0 0 -map Austronesian (Other) 0 0 -mar mr Marathi 0 0 -mas Masai 0 0 -may ms Malay 1 1 -mdf Moksha 0 0 -mdr Mandar 0 0 -men Mende 0 0 -mga Irish, Middle (900-1200) 0 0 -mic Mi'kmaq 0 0 -min Minangkabau 0 0 -mis Miscellaneous languages 0 0 -mkh Mon-Khmer (Other) 0 0 -mlg mg Malagasy 0 0 -mlt mt Maltese 0 0 -mnc Manchu 0 0 -mni Manipuri 0 0 -mno Manobo languages 0 0 -moh Mohawk 0 0 -mol mo Moldavian 0 0 -mon mn Mongolian 1 0 -mos Mossi 0 0 -mwl Mirandese 0 0 -mul Multiple languages 0 0 -mun Munda languages 0 0 -mus Creek 0 0 -mwr Marwari 0 0 -myn Mayan languages 0 0 -myv Erzya 0 0 -nah Nahuatl 0 0 -nai North American Indian 0 0 -nap Neapolitan 0 0 -nau na Nauru 0 0 -nav nv Navajo 0 0 -nbl nr Ndebele, South 0 0 -nde nd Ndebele, North 0 0 -ndo ng Ndonga 0 0 -nds Low German 0 0 -nep ne Nepali 0 0 -new Nepal Bhasa 0 0 -nia Nias 0 0 -nic Niger-Kordofanian (Other) 0 0 -niu Niuean 0 0 -nno nn Norwegian Nynorsk 0 0 -nob nb Norwegian Bokmal 0 0 -nog Nogai 0 0 -non Norse, Old 0 0 -nor no Norwegian 1 1 -nso Northern Sotho 0 0 -nub Nubian languages 0 0 -nwc Classical Newari 0 0 -nya ny Chichewa 0 0 -nym Nyamwezi 0 0 -nyn Nyankole 0 0 -nyo Nyoro 0 0 -nzi Nzima 0 0 -oci oc Occitan 1 1 -oji oj Ojibwa 0 0 -ori or Oriya 0 0 -orm om Oromo 0 0 -osa Osage 0 0 -oss os Ossetian 0 0 -ota Turkish, Ottoman (1500-1928) 0 0 -oto Otomian languages 0 0 -paa Papuan (Other) 0 0 -pag Pangasinan 0 0 -pal Pahlavi 0 0 -pam Pampanga 0 0 -pan pa Panjabi 0 0 -pap Papiamento 0 0 -pau Palauan 0 0 -peo Persian, Old (ca.600-400 B.C.) 0 0 -per fa Persian 1 1 -phi Philippine (Other) 0 0 -phn Phoenician 0 0 -pli pi Pali 0 0 -pol pl Polish 1 1 -pon Pohnpeian 0 0 -por pt Portuguese 1 1 -pra Prakrit languages 0 0 -pro Provençal, Old (to 1500) 0 0 -pus ps Pushto 0 0 -que qu Quechua 0 0 -raj Rajasthani 0 0 -rap Rapanui 0 0 -rar Rarotongan 0 0 -roa Romance (Other) 0 0 -roh rm Raeto-Romance 0 0 -rom Romany 0 0 -run rn Rundi 0 0 -rup Aromanian 0 0 -rus ru Russian 1 1 -sad Sandawe 0 0 -sag sg Sango 0 0 -sah Yakut 0 0 -sai South American Indian (Other) 0 0 -sal Salishan languages 0 0 -sam Samaritan Aramaic 0 0 -san sa Sanskrit 0 0 -sas Sasak 0 0 -sat Santali 0 0 -scc sr Serbian 1 1 -scn Sicilian 0 0 -sco Scots 0 0 -sel Selkup 0 0 -sem Semitic (Other) 0 0 -sga Irish, Old (to 900) 0 0 -sgn Sign Languages 0 0 -shn Shan 0 0 -sid Sidamo 0 0 -sin si Sinhalese 1 1 -sio Siouan languages 0 0 -sit Sino-Tibetan (Other) 0 0 -sla Slavic (Other) 0 0 -slo sk Slovak 1 1 -slv sl Slovenian 1 1 -sma Southern Sami 0 0 -sme se Northern Sami 0 0 -smi Sami languages (Other) 0 0 -smj Lule Sami 0 0 -smn Inari Sami 0 0 -smo sm Samoan 0 0 -sms Skolt Sami 0 0 -sna sn Shona 0 0 -snd sd Sindhi 0 0 -snk Soninke 0 0 -sog Sogdian 0 0 -som so Somali 0 0 -son Songhai 0 0 -sot st Sotho, Southern 0 0 -spa es Spanish 1 1 -srd sc Sardinian 0 0 -srr Serer 0 0 -ssa Nilo-Saharan (Other) 0 0 -ssw ss Swati 0 0 -suk Sukuma 0 0 -sun su Sundanese 0 0 -sus Susu 0 0 -sux Sumerian 0 0 -swa sw Swahili 1 0 -swe sv Swedish 1 1 -syr Syriac 1 0 -tah ty Tahitian 0 0 -tai Tai (Other) 0 0 -tam ta Tamil 0 0 -tat tt Tatar 0 0 -tel te Telugu 0 0 -tem Timne 0 0 -ter Tereno 0 0 -tet Tetum 0 0 -tgk tg Tajik 0 0 -tgl tl Tagalog 1 1 -tha th Thai 1 1 -tib bo Tibetan 0 0 -tig Tigre 0 0 -tir ti Tigrinya 0 0 -tiv Tiv 0 0 -tkl Tokelau 0 0 -tlh Klingon 0 0 -tli Tlingit 0 0 -tmh Tamashek 0 0 -tog Tonga (Nyasa) 0 0 -ton to Tonga (Tonga Islands) 0 0 -tpi Tok Pisin 0 0 -tsi Tsimshian 0 0 -tsn tn Tswana 0 0 -tso ts Tsonga 0 0 -tuk tk Turkmen 0 0 -tum Tumbuka 0 0 -tup Tupi languages 0 0 -tur tr Turkish 1 1 -tut Altaic (Other) 0 0 -tvl Tuvalu 0 0 -twi tw Twi 0 0 -tyv Tuvinian 0 0 -udm Udmurt 0 0 -uga Ugaritic 0 0 -uig ug Uighur 0 0 -ukr uk Ukrainian 1 1 -umb Umbundu 0 0 -und Undetermined 0 0 -urd ur Urdu 1 0 -uzb uz Uzbek 0 0 -vai Vai 0 0 -ven ve Venda 0 0 -vie vi Vietnamese 1 1 -vol vo Volapük 0 0 -vot Votic 0 0 -wak Wakashan languages 0 0 -wal Walamo 0 0 -war Waray 0 0 -was Washo 0 0 -wel cy Welsh 0 0 -wen Sorbian languages 0 0 -wln wa Walloon 0 0 -wol wo Wolof 0 0 -xal Kalmyk 0 0 -xho xh Xhosa 0 0 -yao Yao 0 0 -yap Yapese 0 0 -yid yi Yiddish 0 0 -yor yo Yoruba 0 0 -ypk Yupik languages 0 0 -zap Zapotec 0 0 -zen Zenaga 0 0 -zha za Zhuang 0 0 -znd Zande 0 0 -zul zu Zulu 0 0 -zun Zuni 0 0 -rum ro Romanian 1 1 -pob pb Brazilian 1 1 diff --git a/libs/guessit/test/rules/__init__.py b/libs/guessit/test/rules/__init__.py new file mode 100644 index 00000000..e5be370e --- /dev/null +++ b/libs/guessit/test/rules/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name diff --git a/libs/guessit/test/rules/audio_codec.yml b/libs/guessit/test/rules/audio_codec.yml new file mode 100644 index 00000000..b744d7bf --- /dev/null +++ b/libs/guessit/test/rules/audio_codec.yml @@ -0,0 +1,83 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. + + +? +MP3 +? +lame +? +lame3.12 +? +lame3.100 +: audio_codec: MP3 + +? +DolbyDigital +? +DD +? +Dolby Digital +: audio_codec: DolbyDigital + +? +DolbyAtmos +? +Dolby Atmos +? +Atmos +? -Atmosphere +: audio_codec: DolbyAtmos + +? +AAC +: audio_codec: AAC + +? +AC3 +: audio_codec: AC3 + +? +Flac +: audio_codec: FLAC + +? +DTS +: audio_codec: DTS + +? +True-HD +? +trueHD +: audio_codec: TrueHD + +? +DTS-HD +: audio_codec: DTS + audio_profile: HD + +? +DTS-HDma +: audio_codec: DTS + audio_profile: HDMA + +? +AC3-hq +: audio_codec: AC3 + audio_profile: HQ + +? +AAC-HE +: audio_codec: AAC + audio_profile: HE + +? +AAC-LC +: audio_codec: AAC + audio_profile: LC + +? +AAC2.0 +: audio_codec: AAC + audio_channels: '2.0' + +? +7.1 +? +7ch +? +8ch +: audio_channels: '7.1' + +? +5.1 +? +5ch +? +6ch +: audio_channels: '5.1' + +? +2ch +? +2.0 +? +stereo +: audio_channels: '2.0' + +? +1ch +? +mono +: audio_channels: '1.0' + +? DD5.1 +: audio_codec: DolbyDigital + audio_channels: '5.1' diff --git a/libs/guessit/test/rules/bonus.yml b/libs/guessit/test/rules/bonus.yml new file mode 100644 index 00000000..6ef6f5b2 --- /dev/null +++ b/libs/guessit/test/rules/bonus.yml @@ -0,0 +1,9 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Movie Title-x01-Other Title.mkv +? Movie Title-x01-Other Title +? directory/Movie Title-x01-Other Title/file.mkv +: title: Movie Title + bonus_title: Other Title + bonus: 1 + diff --git a/libs/guessit/test/rules/cds.yml b/libs/guessit/test/rules/cds.yml new file mode 100644 index 00000000..cc63765e --- /dev/null +++ b/libs/guessit/test/rules/cds.yml @@ -0,0 +1,10 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? cd 1of3 +: cd: 1 + cd_count: 3 + +? Some.Title-DVDRIP-x264-CDP +: cd: !!null + release_group: CDP + video_codec: h264 diff --git a/libs/guessit/test/rules/country.yml b/libs/guessit/test/rules/country.yml new file mode 100644 index 00000000..f2da1b20 --- /dev/null +++ b/libs/guessit/test/rules/country.yml @@ -0,0 +1,10 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. +? Us.this.is.title +? this.is.title.US +: country: US + title: this is title + +? This.is.us.title +: title: This is us title + diff --git a/libs/guessit/test/rules/date.yml b/libs/guessit/test/rules/date.yml new file mode 100644 index 00000000..d7379f03 --- /dev/null +++ b/libs/guessit/test/rules/date.yml @@ -0,0 +1,50 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +09.03.08 +? +09.03.2008 +? +2008.03.09 +: date: 2008-03-09 + +? +31.01.15 +? +31.01.2015 +? +15.01.31 +? +2015.01.31 +: date: 2015-01-31 + +? +01.02.03 +: date: 2003-02-01 + +? +01.02.03 +: options: --date-year-first + date: 2001-02-03 + +? +01.02.03 +: options: --date-day-first + date: 2003-02-01 + +? 1919 +? 2030 +: !!map {} + +? 2029 +: year: 2029 + +? (1920) +: year: 1920 + +? 2012 +: year: 2012 + +? 2011 2013 (2012) (2015) # first marked year is guessed. +: title: "2011 2013" + year: 2012 + +? 2012 2009 S01E02 2015 # If no year is marked, the second one is guessed. +: title: "2012" + year: 2009 + episode_title: "2015" + +? Something 2 mar 2013) +: title: Something + date: 2013-03-02 + type: episode diff --git a/libs/guessit/test/rules/edition.yml b/libs/guessit/test/rules/edition.yml new file mode 100644 index 00000000..bc35b85e --- /dev/null +++ b/libs/guessit/test/rules/edition.yml @@ -0,0 +1,25 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Director's cut +? Edition Director's cut +: edition: Director's cut + +? Collector +? Collector Edition +? Edition Collector +: edition: Collector Edition + +? Special Edition +? Edition Special +? -Special +: edition: Special Edition + +? Criterion Edition +? Edition Criterion +? -Criterion +: edition: Criterion Edition + +? Deluxe +? Deluxe Edition +? Edition Deluxe +: edition: Deluxe Edition diff --git a/libs/guessit/test/rules/episodes.yml b/libs/guessit/test/rules/episodes.yml new file mode 100644 index 00000000..a75e6702 --- /dev/null +++ b/libs/guessit/test/rules/episodes.yml @@ -0,0 +1,247 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. +? +2x5 +? +2X5 +? +02x05 +? +2X05 +? +02x5 +? S02E05 +? s02e05 +? s02e5 +? s2e05 +? s02ep05 +? s2EP5 +? -s03e05 +? -s02e06 +? -3x05 +? -2x06 +: season: 2 + episode: 5 + +? "+0102" +? "+102" +: season: 1 + episode: 2 + +? "0102 S03E04" +? "S03E04 102" +: season: 3 + episode: 4 + +? +serie Saison 2 other +? +serie Season 2 other +? +serie Saisons 2 other +? +serie Seasons 2 other +? +serie Serie 2 other +? +serie Series 2 other +? +serie Season Two other +? +serie Season II other +: season: 2 + +? Some Series.S02E01.Episode.title.mkv +? Some Series/Season 02/E01-Episode title.mkv +? Some Series/Season 02/Some Series-E01-Episode title.mkv +? Some Dummy Directory/Season 02/Some Series-E01-Episode title.mkv +? -Some Dummy Directory/Season 02/E01-Episode title.mkv +? Some Series/Unsafe Season 02/Some Series-E01-Episode title.mkv +? -Some Series/Unsafe Season 02/E01-Episode title.mkv +? Some Series/Season 02/E01-Episode title.mkv +? Some Series/ Season 02/E01-Episode title.mkv +? Some Dummy Directory/Some Series S02/E01-Episode title.mkv +? Some Dummy Directory/S02 Some Series/E01-Episode title.mkv +: title: Some Series + episode_title: Episode title + season: 2 + episode: 1 + +? Some Series.S02E01.mkv +? Some Series/Season 02/E01.mkv +? Some Series/Season 02/Some Series-E01.mkv +? Some Dummy Directory/Season 02/Some Series-E01.mkv +? -Some Dummy Directory/Season 02/E01.mkv +? Some Series/Unsafe Season 02/Some Series-E01.mkv +? -Some Series/Unsafe Season 02/E01.mkv +? Some Series/Season 02/E01.mkv +? Some Series/ Season 02/E01.mkv +? Some Dummy Directory/Some Series S02/E01-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA.mkv +: title: Some Series + season: 2 + episode: 1 + +? Some Series S03E01E02 +: title: Some Series + season: 3 + episode: [1, 2] + +? Some Series S01S02S03 +? Some Series S01-02-03 +? Some Series S01 S02 S03 +? Some Series S01 02 03 +: title: Some Series + season: [1, 2, 3] + +? Some Series E01E02E03 +? Some Series E01-02-03 +? Some Series E01-03 +? Some Series E01 E02 E03 +? Some Series E01 02 03 +: title: Some Series + episode: [1, 2, 3] + +? Some Series E01E02E04 +? Some Series E01 E02 E04 +? Some Series E01 02 04 +: title: Some Series + episode: [1, 2, 4] + +? Some Series E01-02-04 +? Some Series E01-04 +? Some Series E01-04 +: title: Some Series + episode: [1, 2, 3, 4] + +? Some Series E01-02-E04 +: title: Some Series + episode: [1, 2, 3, 4] + +? Episode 3 +? -Episode III +: episode: 3 + +? Episode 3 +? Episode III +: options: -t episode + episode: 3 + +? -A very special movie +: episode_details: Special + +? A very special episode +: options: -t episode + episode_details: Special + +? 12 Monkeys\Season 01\Episode 05\12 Monkeys - S01E05 - The Night Room.mkv +: container: mkv + title: 12 Monkeys + episode: 5 + season: 1 + +? S03E02.X.1080p +: episode: 2 + screen_size: 1080p + season: 3 + +? Something 1 x 2-FlexGet +: options: -t episode + title: Something + season: 1 + episode: 2 + episode_title: FlexGet + +? Show.Name.-.Season.1.to.3.-.Mp4.1080p +? Show.Name.-.Season.1~3.-.Mp4.1080p +? Show.Name.-.Saison.1.a.3.-.Mp4.1080p +: container: MP4 + screen_size: 1080p + season: + - 1 + - 2 + - 3 + title: Show Name + +? Show.Name.Season.1.3&5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.3 and 5.HDTV.XviD-GoodGroup[SomeTrash] +: format: HDTV + release_group: GoodGroup[SomeTrash] + season: + - 1 + - 3 + - 5 + title: Show Name + type: episode + video_codec: XviD + +? Show.Name.Season.1.2.3-5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.2.3~5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.2.3 to 5.HDTV.XviD-GoodGroup[SomeTrash] +: format: HDTV + release_group: GoodGroup[SomeTrash] + season: + - 1 + - 2 + - 3 + - 4 + - 5 + title: Show Name + type: episode + video_codec: XviD + +? The.Get.Down.S01EP01.FRENCH.720p.WEBRIP.XVID-STR +: episode: 1 + format: WEBRip + language: fr + release_group: STR + screen_size: 720p + season: 1 + title: The Get Down + type: episode + video_codec: XviD + +? My.Name.Is.Earl.S01E01-S01E21.SWE-SUB +: episode: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + season: 1 + subtitle_language: sv + title: My Name Is Earl + type: episode + +? Show.Name.Season.4.Episodes.1-12 +: episode: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + season: 4 + title: Show Name + type: episode + +? show name s01.to.s04 +: season: + - 1 + - 2 + - 3 + - 4 + title: show name + type: episode + +? epi +: options: -t episode + title: epi \ No newline at end of file diff --git a/libs/guessit/test/rules/film.yml b/libs/guessit/test/rules/film.yml new file mode 100644 index 00000000..1f774331 --- /dev/null +++ b/libs/guessit/test/rules/film.yml @@ -0,0 +1,9 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Film Title-f01-Series Title.mkv +? Film Title-f01-Series Title +? directory/Film Title-f01-Series Title/file.mkv +: title: Series Title + film_title: Film Title + film: 1 + diff --git a/libs/guessit/test/rules/format.yml b/libs/guessit/test/rules/format.yml new file mode 100644 index 00000000..cf3dea92 --- /dev/null +++ b/libs/guessit/test/rules/format.yml @@ -0,0 +1,112 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +VHS +? +VHSRip +? +VHS-Rip +? +VhS_rip +? +VHS.RIP +? -VHSAnythingElse +? -SomeVHS stuff +? -VH +? -VHx +? -VHxRip +: format: VHS + +? +Cam +? +CamRip +? +CaM Rip +? +Cam_Rip +? +cam.rip +: format: Cam + +? +Telesync +? +TS +? +HD TS +? -Hd.Ts # ts file extension +? -HD.TS # ts file extension +? +Hd-Ts +: format: Telesync + +? +Workprint +? +workPrint +? +WorkPrint +? +WP +? -Work Print +: format: Workprint + +? +Telecine +? +teleCine +? +TC +? -Tele Cine +: format: Telecine + +? +PPV +? +ppv-rip +: format: PPV + +? -TV +? +SDTV +? +SDTVRIP +? +Rip sd tv +? +TvRip +? +Rip TV +: format: TV + +? +DVB +? +DVB-Rip +? +DvBRiP +? +pdTV +? +Pd Tv +: format: DVB + +? +DVD +? +DVD-RIP +? +video ts +? +DVDR +? +DVD 9 +? +dvd 5 +? -dvd ts +: format: DVD + -format: ts + +? +HDTV +? +tv rip hd +? +HDtv Rip +? +HdRip +: format: HDTV + +? +VOD +? +VodRip +? +vod rip +: format: VOD + +? +webrip +? +Web Rip +: format: WEBRip + +? +webdl +? +Web DL +? +webHD +? +WEB hd +? +web +: format: WEB-DL + +? +HDDVD +? +hd dvd +? +hdDvdRip +: format: HD-DVD + +? +BluRay +? +BluRay rip +? +BD +? +BR +? +BDRip +? +BR rip +? +BD5 +? +BD9 +? +BD25 +? +bd50 +: format: BluRay + +? XVID.NTSC.DVDR.nfo +: format: DVD diff --git a/libs/guessit/test/rules/language.yml b/libs/guessit/test/rules/language.yml new file mode 100644 index 00000000..51bbd8da --- /dev/null +++ b/libs/guessit/test/rules/language.yml @@ -0,0 +1,39 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +English +? .ENG. +: language: English + +? +French +: language: French + +? +SubFrench +? +SubFr +? +STFr +? ST.FR +: subtitle_language: French + +? +ENG.-.sub.FR +? ENG.-.FR Sub +? +ENG.-.SubFR +? +ENG.-.FRSUB +? +ENG.-.FRSUBS +? +ENG.-.FR-SUBS +: language: English + subtitle_language: French + +? "{Fr-Eng}.St{Fr-Eng}" +? "Le.Prestige[x264.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv" +: language: [French, English] + subtitle_language: [French, English] + +? +ENG.-.sub.SWE +? ENG.-.SWE Sub +? +ENG.-.SubSWE +? +ENG.-.SWESUB +? +ENG.-.sub.SV +? ENG.-.SV Sub +? +ENG.-.SubSV +? +ENG.-.SVSUB +: language: English + subtitle_language: Swedish \ No newline at end of file diff --git a/libs/guessit/test/rules/other.yml b/libs/guessit/test/rules/other.yml new file mode 100644 index 00000000..cce8cbd0 --- /dev/null +++ b/libs/guessit/test/rules/other.yml @@ -0,0 +1,137 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +DVDSCR +? +DVDScreener +? +DVD-SCR +? +DVD Screener +? +DVD AnythingElse Screener +? -DVD AnythingElse SCR +: other: Screener + +? +AudioFix +? +AudioFixed +? +Audio Fix +? +Audio Fixed +: other: AudioFix + +? +SyncFix +? +SyncFixed +? +Sync Fix +? +Sync Fixed +: other: SyncFix + +? +DualAudio +? +Dual Audio +: other: DualAudio + +? +ws +? +WideScreen +? +Wide Screen +: other: WideScreen + +? +NF +? +Netflix +: other: Netflix + +# Fix and Real must be surround by others properties to be matched. +? DVD.Real.XViD +? DVD.fix.XViD +? -DVD.Real +? -DVD.Fix +? -Real.XViD +? -Fix.XViD +: other: Proper + proper_count: 1 + +? -DVD.BlablaBla.Fix.Blablabla.XVID +? -DVD.BlablaBla.Fix.XVID +? -DVD.Fix.Blablabla.XVID +: other: Proper + proper_count: 1 + + +? DVD.Real.PROPER.REPACK +: other: Proper + proper_count: 3 + + +? Proper +? +Repack +? +Rerip +: other: Proper + proper_count: 1 + +? XViD.Fansub +: other: Fansub + +? XViD.Fastsub +: other: Fastsub + +? +Season Complete +? -Complete +: other: Complete + +? R5 +? RC +: other: R5 + +? PreAir +? Pre Air +: other: Preair + +? Screener +: other: Screener + +? Remux +: other: Remux + +? 3D +: other: 3D + +? HD +: other: HD + +? mHD # ?? +: other: mHD + +? HDLight +: other: HDLight + +? HQ +: other: HQ + +? ddc +: other: DDC + +? hr +: other: HR + +? PAL +: other: PAL + +? SECAM +: other: SECAM + +? NTSC +: other: NTSC + +? CC +: other: CC + +? LD +: other: LD + +? MD +: other: MD + +? -The complete movie +: other: Complete + +? +The complete movie +: title: The complete movie + +? +AC3-HQ +: audio_profile: HQ + +? Other-HQ +: other: HQ diff --git a/libs/guessit/test/rules/part.yml b/libs/guessit/test/rules/part.yml new file mode 100644 index 00000000..72f3d98a --- /dev/null +++ b/libs/guessit/test/rules/part.yml @@ -0,0 +1,18 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Filename Part 3.mkv +? Filename Part III.mkv +? Filename Part Three.mkv +? Filename Part Trois.mkv +: title: Filename + part: 3 + +? Part 3 +? Part III +? Part Three +? Part Trois +? Part3 +: part: 3 + +? -Something.Apt.1 +: part: 1 \ No newline at end of file diff --git a/libs/guessit/test/rules/processors.yml b/libs/guessit/test/rules/processors.yml new file mode 100644 index 00000000..ee906b2c --- /dev/null +++ b/libs/guessit/test/rules/processors.yml @@ -0,0 +1,8 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. + +# Prefer information for last path. +? Some movie (2000)/Some movie (2001).mkv +? Some movie (2001)/Some movie.mkv +: year: 2001 + container: mkv diff --git a/libs/guessit/test/rules/release_group.yml b/libs/guessit/test/rules/release_group.yml new file mode 100644 index 00000000..d048ff71 --- /dev/null +++ b/libs/guessit/test/rules/release_group.yml @@ -0,0 +1,41 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Some.Title.XViD-ReleaseGroup +? Some.Title.XViD-ReleaseGroup.mkv +: release_group: ReleaseGroup + +? Some.Title.XViD-by.Artik[SEDG].avi +: release_group: Artik[SEDG] + +? "[ABC] Some.Title.avi" +? some/folder/[ABC]Some.Title.avi +: release_group: ABC + +? "[ABC] Some.Title.XViD-GRP.avi" +? some/folder/[ABC]Some.Title.XViD-GRP.avi +: release_group: GRP + +? "[ABC] Some.Title.S01E02.avi" +? some/folder/[ABC]Some.Title.S01E02.avi +: release_group: ABC + +? Some.Title.XViD-S2E02.NoReleaseGroup.avi +: release_group: !!null + +? Test.S01E01-FooBar-Group +: options: -G group -G xxxx + episode: 1 + episode_title: FooBar + release_group: Group + season: 1 + title: Test + type: episode + +? Test.S01E01-FooBar-Group +: options: -G re:gr.?up -G xxxx + episode: 1 + episode_title: FooBar + release_group: Group + season: 1 + title: Test + type: episode diff --git a/libs/guessit/test/rules/screen_size.yml b/libs/guessit/test/rules/screen_size.yml new file mode 100644 index 00000000..1145dd7e --- /dev/null +++ b/libs/guessit/test/rules/screen_size.yml @@ -0,0 +1,69 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +360p +? +360px +? +360i +? "+360" +? +500x360 +: screen_size: 360p + +? +368p +? +368px +? +368i +? "+368" +? +500x368 +: screen_size: 368p + +? +480p +? +480px +? +480i +? "+480" +? +500x480 +: screen_size: 480p + +? +576p +? +576px +? +576i +? "+576" +? +500x576 +: screen_size: 576p + +? +720p +? +720px +? 720hd +? 720pHD +? +720i +? "+720" +? +500x720 +: screen_size: 720p + +? +900p +? +900px +? +900i +? "+900" +? +500x900 +: screen_size: 900p + +? +1080p +? +1080px +? +1080hd +? +1080pHD +? -1080i +? "+1080" +? +500x1080 +: screen_size: 1080p + +? +1080i +? -1080p +: screen_size: 1080i + +? +2160p +? +2160px +? +2160i +? "+2160" +? +4096x2160 +: screen_size: 4K + +? Test.File.720hd.bluray +? Test.File.720p50 +: screen_size: 720p diff --git a/libs/guessit/test/rules/title.yml b/libs/guessit/test/rules/title.yml new file mode 100644 index 00000000..fffaf8a2 --- /dev/null +++ b/libs/guessit/test/rules/title.yml @@ -0,0 +1,32 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Title Only +? -Title XViD 720p Only +? sub/folder/Title Only +? -sub/folder/Title XViD 720p Only +? Title Only.mkv +? Title Only.avi +: title: Title Only + +? Title Only/title_only.mkv +: title: Title Only + +? title_only.mkv +: title: title only + +? Some Title/some.title.mkv +? some.title/Some.Title.mkv +: title: Some Title + +? SOME TITLE/Some.title.mkv +? Some.title/SOME TITLE.mkv +: title: Some title + +? some title/Some.title.mkv +? Some.title/some title.mkv +: title: Some title + +? Some other title/Some.Other.title.mkv +? Some.Other title/Some other title.mkv +: title: Some Other title + diff --git a/libs/guessit/test/rules/video_codec.yml b/libs/guessit/test/rules/video_codec.yml new file mode 100644 index 00000000..d195eaaf --- /dev/null +++ b/libs/guessit/test/rules/video_codec.yml @@ -0,0 +1,54 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? rv10 +? rv13 +? RV20 +? Rv30 +? rv40 +? -xrv40 +: video_codec: Real + +? mpeg2 +? MPEG2 +? -mpeg +? -mpeg 2 # Not sure if we should ignore this one ... +? -xmpeg2 +? -mpeg2x +: video_codec: Mpeg2 + +? DivX +? -div X +? divx +? dvdivx +? DVDivX +: video_codec: DivX + +? XviD +? xvid +? -x vid +: video_codec: XviD + +? h264 +? x264 +? h.264 +? x.264 +? mpeg4-AVC +? -MPEG-4 +? -mpeg4 +? -mpeg +? -h 265 +? -x265 +: video_codec: h264 + +? h265 +? x265 +? h.265 +? x.265 +? hevc +? -h 264 +? -x264 +: video_codec: h265 + +? h265-HP +: video_codec: h265 + video_profile: HP \ No newline at end of file diff --git a/libs/guessit/test/rules/website.yml b/libs/guessit/test/rules/website.yml new file mode 100644 index 00000000..11d434d2 --- /dev/null +++ b/libs/guessit/test/rules/website.yml @@ -0,0 +1,23 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +tvu.org.ru +? -tvu.unsafe.ru +: website: tvu.org.ru + +? +www.nimp.na +? -somewww.nimp.na +? -www.nimp.nawouak +? -nimp.na +: website: www.nimp.na + +? +wawa.co.uk +? -wawa.uk +: website: wawa.co.uk + +? -Dark.Net.S01E06.720p.HDTV.x264-BATV + -Dark.Net.2015.720p.HDTV.x264-BATV +: website: Dark.Net + +? Dark.Net.S01E06.720p.HDTV.x264-BATV + Dark.Net.2015.720p.HDTV.x264-BATV +: title: Dark Net diff --git a/libs/guessit/test/test-input-file.txt b/libs/guessit/test/test-input-file.txt new file mode 100644 index 00000000..656bc931 --- /dev/null +++ b/libs/guessit/test/test-input-file.txt @@ -0,0 +1,2 @@ +Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv +SecondFile.avi \ No newline at end of file diff --git a/libs/guessit/test/test_api.py b/libs/guessit/test/test_api.py index 92cef41b..ca33df04 100644 --- a/libs/guessit/test/test_api.py +++ b/libs/guessit/test/test_api.py @@ -1,54 +1,63 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2014 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement -from __future__ import absolute_import, division, print_function, unicode_literals +import os -from guessit.test.guessittest import * +import pytest +import six + +from ..api import guessit, properties, GuessitException + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -class TestApi(TestGuessit): - def test_api(self): - movie_path = 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv' +def test_default(): + ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret - movie_info = guessit.guess_movie_info(movie_path) - video_info = guessit.guess_video_info(movie_path) - episode_info = guessit.guess_episode_info(movie_path) - file_info = guessit.guess_file_info(movie_path) - self.assertEqual(guessit.guess_file_info(movie_path, type='movie'), movie_info) - self.assertEqual(guessit.guess_file_info(movie_path, type='video'), video_info) - self.assertEqual(guessit.guess_file_info(movie_path, type='episode'), episode_info) +def test_forced_unicode(): + ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.text_type) - self.assertEqual(guessit.guess_file_info(movie_path, options={'type': 'movie'}), movie_info) - self.assertEqual(guessit.guess_file_info(movie_path, options={'type': 'video'}), video_info) - self.assertEqual(guessit.guess_file_info(movie_path, options={'type': 'episode'}), episode_info) - self.assertEqual(guessit.guess_file_info(movie_path, options={'type': 'episode'}, type='movie'), episode_info) # kwargs priority other options +def test_forced_binary(): + ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type) - movie_path_name_only = 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD' - file_info_name_only = guessit.guess_file_info(movie_path_name_only, options={"name_only": True}) - self.assertFalse('container' in file_info_name_only) - self.assertTrue('container' in file_info) +def test_unicode_japanese(): + ret = guessit('[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi') + assert ret and 'title' in ret -suite = allTests(TestApi) -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) +def test_unicode_japanese_options(): + ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]}) + assert ret and 'title' in ret and ret['title'] == "阿维达" + + +def test_forced_unicode_japanese_options(): + ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == u"阿维达" + +# TODO: This doesn't compile on python 3, but should be tested on python 2. +""" +if six.PY2: + def test_forced_binary_japanese_options(): + ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == b"阿维达" +""" + + +def test_properties(): + props = properties() + assert 'video_codec' in props.keys() + + +def test_exception(): + with pytest.raises(GuessitException) as excinfo: + guessit(object()) + assert "An internal error has occured in guessit" in str(excinfo.value) + assert "Guessit Exception Report" in str(excinfo.value) + assert "Please report at https://github.com/guessit-io/guessit/issues" in str(excinfo.value) diff --git a/libs/guessit/test/test_api_unicode_literals.py b/libs/guessit/test/test_api_unicode_literals.py new file mode 100644 index 00000000..3347a7d8 --- /dev/null +++ b/libs/guessit/test/test_api_unicode_literals.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement + + +from __future__ import unicode_literals + +import os + +import pytest +import six + +from ..api import guessit, properties, GuessitException + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +def test_default(): + ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret + + +def test_forced_unicode(): + ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.text_type) + + +def test_forced_binary(): + ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type) + + +def test_unicode_japanese(): + ret = guessit('[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi') + assert ret and 'title' in ret + + +def test_unicode_japanese_options(): + ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]}) + assert ret and 'title' in ret and ret['title'] == "阿维达" + + +def test_forced_unicode_japanese_options(): + ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == u"阿维达" + +# TODO: This doesn't compile on python 3, but should be tested on python 2. +""" +if six.PY2: + def test_forced_binary_japanese_options(): + ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == b"阿维达" +""" + + +def test_properties(): + props = properties() + assert 'video_codec' in props.keys() + + +def test_exception(): + with pytest.raises(GuessitException) as excinfo: + guessit(object()) + assert "An internal error has occured in guessit" in str(excinfo.value) + assert "Guessit Exception Report" in str(excinfo.value) + assert "Please report at https://github.com/guessit-io/guessit/issues" in str(excinfo.value) diff --git a/libs/guessit/test/test_autodetect.py b/libs/guessit/test/test_autodetect.py deleted file mode 100644 index 229b491f..00000000 --- a/libs/guessit/test/test_autodetect.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - - -class TestAutoDetect(TestGuessit): - def testEmpty(self): - result = guessit.guess_file_info('') - self.assertEqual(result, {}) - - result = guessit.guess_file_info('___-__') - self.assertEqual(result, {}) - - result = guessit.guess_file_info('__-.avc') - self.assertEqual(result, {'type': 'unknown', 'extension': 'avc'}) - - def testAutoDetect(self): - self.checkMinimumFieldsCorrect(filename='autodetect.yaml', - remove_type=False) - - -suite = allTests(TestAutoDetect) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_autodetect_all.py b/libs/guessit/test/test_autodetect_all.py deleted file mode 100644 index 033e1571..00000000 --- a/libs/guessit/test/test_autodetect_all.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - -IGNORE_EPISODES = [] -IGNORE_MOVIES = [] - - -class TestAutoDetectAll(TestGuessit): - def testAutoMatcher(self): - self.checkMinimumFieldsCorrect(filename='autodetect.yaml', - remove_type=False) - - def testAutoMatcherMovies(self): - self.checkMinimumFieldsCorrect(filename='movies.yaml', - exclude_files=IGNORE_MOVIES) - - def testAutoMatcherEpisodes(self): - self.checkMinimumFieldsCorrect(filename='episodes.yaml', - exclude_files=IGNORE_EPISODES) - - -suite = allTests(TestAutoDetectAll) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_benchmark.py b/libs/guessit/test/test_benchmark.py new file mode 100644 index 00000000..34386e30 --- /dev/null +++ b/libs/guessit/test/test_benchmark.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use,pointless-statement,missing-docstring,invalid-name,line-too-long +import time + +import pytest + +from ..api import guessit + + +def case1(): + return guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + + +def case2(): + return guessit('Movies/Fantastic Mr Fox/Fantastic.Mr.Fox.2009.DVDRip.{x264+LC-AAC.5.1}{Fr-Eng}{Sub.Fr-Eng}-â„¢.[sharethefiles.com].mkv') + + +def case3(): + return guessit('Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi') + + +def case4(): + return guessit('Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv') + + +@pytest.mark.benchmark( + group="Performance Tests", + min_time=1, + max_time=2, + min_rounds=5, + timer=time.time, + disable_gc=True, + warmup=False +) +@pytest.mark.skipif(True, reason="Disabled") +class TestBenchmark(object): + def test_case1(self, benchmark): + ret = benchmark(case1) + assert ret + + def test_case2(self, benchmark): + ret = benchmark(case2) + assert ret + + def test_case3(self, benchmark): + ret = benchmark(case3) + assert ret + + def test_case4(self, benchmark): + ret = benchmark(case4) + assert ret diff --git a/libs/guessit/test/test_doctests.py b/libs/guessit/test/test_doctests.py deleted file mode 100644 index 9fedeb0f..00000000 --- a/libs/guessit/test/test_doctests.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2014 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * -import guessit -import guessit.hash_ed2k -import unittest -import doctest - - -def load_tests(loader, tests, ignore): - tests.addTests(doctest.DocTestSuite(guessit)) - tests.addTests(doctest.DocTestSuite(guessit.date)) - tests.addTests(doctest.DocTestSuite(guessit.fileutils)) - tests.addTests(doctest.DocTestSuite(guessit.guess)) - tests.addTests(doctest.DocTestSuite(guessit.hash_ed2k)) - tests.addTests(doctest.DocTestSuite(guessit.language)) - tests.addTests(doctest.DocTestSuite(guessit.matchtree)) - tests.addTests(doctest.DocTestSuite(guessit.textutils)) - return tests - -suite = unittest.TestSuite() -load_tests(None, suite, None) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_episode.py b/libs/guessit/test/test_episode.py deleted file mode 100644 index 03abf6b0..00000000 --- a/libs/guessit/test/test_episode.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - - -class TestEpisode(TestGuessit): - def testEpisodes(self): - self.checkMinimumFieldsCorrect(filetype='episode', - filename='episodes.yaml') - - -suite = allTests(TestEpisode) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_hashes.py b/libs/guessit/test/test_hashes.py deleted file mode 100644 index a8bc763c..00000000 --- a/libs/guessit/test/test_hashes.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - - -class TestHashes(TestGuessit): - def test_hashes(self): - hashes = ( - ('hash_mpc', '1MB', u'8542ad406c15c8bd'), # TODO: Check if this value is valid - ('hash_ed2k', '1MB', u'ed2k://|file|1MB|1048576|AA3CC5552A9931A76B61A41D306735F7|/'), # TODO: Check if this value is valid - ('hash_md5', '1MB', u'5d8dcbca8d8ac21766f28797d6c3954c'), - ('hash_sha1', '1MB', u'51d2b8f3248d7ee495b7750c8da5aa3b3819de9d'), - ('hash_md5', 'dummy.srt', u'64de6b5893cac24456c46a935ef9c359'), - ('hash_sha1', 'dummy.srt', u'a703fc0fa4518080505809bf562c6fc6f7b3c98c') - ) - - for hash_type, filename, expected_value in hashes: - guess = guess_file_info(file_in_same_dir(__file__, filename), hash_type) - computed_value = guess.get(hash_type) - self.assertEqual(expected_value, guess.get(hash_type), "Invalid %s for %s: %s != %s" % (hash_type, filename, computed_value, expected_value)) - - -suite = allTests(TestHashes) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_language.py b/libs/guessit/test/test_language.py deleted file mode 100644 index 99578fe7..00000000 --- a/libs/guessit/test/test_language.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - -import io - - -class TestLanguage(TestGuessit): - - def check_languages(self, languages): - for lang1, lang2 in languages.items(): - self.assertEqual(Language.fromguessit(lang1), - Language.fromguessit(lang2)) - - def test_addic7ed(self): - languages = {'English': 'en', - 'English (US)': 'en-US', - 'English (UK)': 'en-UK', - 'Italian': 'it', - 'Portuguese': 'pt', - 'Portuguese (Brazilian)': 'pt-BR', - 'Romanian': 'ro', - 'Español (Latinoamérica)': 'es-MX', - 'Español (España)': 'es-ES', - 'Spanish (Latin America)': 'es-MX', - 'Español': 'es', - 'Spanish': 'es', - 'Spanish (Spain)': 'es-ES', - 'French': 'fr', - 'Greek': 'el', - 'Arabic': 'ar', - 'German': 'de', - 'Croatian': 'hr', - 'Indonesian': 'id', - 'Hebrew': 'he', - 'Russian': 'ru', - 'Turkish': 'tr', - 'Swedish': 'se', - 'Czech': 'cs', - 'Dutch': 'nl', - 'Hungarian': 'hu', - 'Norwegian': 'no', - 'Polish': 'pl', - 'Persian': 'fa'} - - self.check_languages(languages) - - def test_subswiki(self): - languages = {'English (US)': 'en-US', 'English (UK)': 'en-UK', 'English': 'en', - 'French': 'fr', 'Brazilian': 'po', 'Portuguese': 'pt', - 'Español (Latinoamérica)': 'es-MX', 'Español (España)': 'es-ES', - 'Español': 'es', 'Italian': 'it', 'Català': 'ca'} - - self.check_languages(languages) - - def test_tvsubtitles(self): - languages = {'English': 'en', 'Español': 'es', 'French': 'fr', 'German': 'de', - 'Brazilian': 'br', 'Russian': 'ru', 'Ukrainian': 'ua', 'Italian': 'it', - 'Greek': 'gr', 'Arabic': 'ar', 'Hungarian': 'hu', 'Polish': 'pl', - 'Turkish': 'tr', 'Dutch': 'nl', 'Portuguese': 'pt', 'Swedish': 'sv', - 'Danish': 'da', 'Finnish': 'fi', 'Korean': 'ko', 'Chinese': 'cn', - 'Japanese': 'jp', 'Bulgarian': 'bg', 'Czech': 'cz', 'Romanian': 'ro'} - - self.check_languages(languages) - - def test_opensubtitles(self): - opensubtitles_langfile = file_in_same_dir(__file__, 'opensubtitles_languages_2012_05_09.txt') - for l in [u(l).strip() for l in io.open(opensubtitles_langfile, encoding='utf-8')][1:]: - idlang, alpha2, _, upload_enabled, web_enabled = l.strip().split('\t') - # do not test languages that are too esoteric / not widely available - if int(upload_enabled) and int(web_enabled): - # check that we recognize the opensubtitles language code correctly - # and that we are able to output this code from a language - self.assertEqual(idlang, Language.fromguessit(idlang).opensubtitles) - if alpha2: - # check we recognize the opensubtitles 2-letter code correctly - self.check_languages({idlang: alpha2}) - - def test_tmdb(self): - # examples from http://api.themoviedb.org/2.1/language-tags - for lang in ['en-US', 'en-CA', 'es-MX', 'fr-PF']: - self.assertEqual(lang, str(Language.fromguessit(lang))) - - def test_subtitulos(self): - languages = {'English (US)': 'en-US', 'English (UK)': 'en-UK', 'English': 'en', - 'French': 'fr', 'Brazilian': 'po', 'Portuguese': 'pt', - 'Español (Latinoamérica)': 'es-MX', 'Español (España)': 'es-ES', - 'Español': 'es', 'Italian': 'it', 'Català': 'ca'} - - self.check_languages(languages) - - def test_thesubdb(self): - languages = {'af': 'af', 'cs': 'cs', 'da': 'da', 'de': 'de', 'en': 'en', 'es': 'es', 'fi': 'fi', - 'fr': 'fr', 'hu': 'hu', 'id': 'id', 'it': 'it', 'la': 'la', 'nl': 'nl', 'no': 'no', - 'oc': 'oc', 'pl': 'pl', 'pt': 'pt', 'ro': 'ro', 'ru': 'ru', 'sl': 'sl', 'sr': 'sr', - 'sv': 'sv', 'tr': 'tr'} - - self.check_languages(languages) - - def test_exceptions(self): - self.assertEqual(Language.fromguessit('br'), Language.fromguessit('pt(br)')) - - self.assertEqual(Language.fromguessit('unknown'), - Language.fromguessit('und')) - - -suite = allTests(TestLanguage) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_main.py b/libs/guessit/test/test_main.py index 1140654a..cbdba7aa 100644 --- a/libs/guessit/test/test_main.py +++ b/libs/guessit/test/test_main.py @@ -1,69 +1,72 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2014 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name -from __future__ import absolute_import, division, print_function, unicode_literals +import os -from guessit.test.guessittest import * -from guessit.fileutils import split_path, file_in_same_dir -from guessit.textutils import strip_brackets, str_replace, str_fill -from guessit import PY2 -from guessit import __main__ +import pytest -if PY2: - from StringIO import StringIO -else: - from io import StringIO +from ..__main__ import main + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) -class TestMain(TestGuessit): - def setUp(self): - self._stdout = sys.stdout - string_out = StringIO() - sys.stdout = string_out +def test_main_no_args(): + main([]) - def tearDown(self): - sys.stdout = self._stdout - def test_list_properties(self): - __main__.main(["-p"], False) - __main__.main(["-V"], False) +def test_main(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv']) - def test_list_transformers(self): - __main__.main(["--transformers"], False) - __main__.main(["-V", "--transformers"], False) - def test_demo(self): - __main__.main(["-d"], False) +def test_main_unicode(): + main(['[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi']) - def test_filename(self): - __main__.main(["A.Movie.2014.avi"], False) - __main__.main(["A.Movie.2014.avi", "A.2nd.Movie.2014.avi"], False) - __main__.main(["-y", "A.Movie.2014.avi"], False) - __main__.main(["-a", "A.Movie.2014.avi"], False) - __main__.main(["-v", "A.Movie.2014.avi"], False) - __main__.main(["-t", "movie", "A.Movie.2014.avi"], False) - __main__.main(["-t", "episode", "A.Serie.S02E06.avi"], False) - __main__.main(["-i", "hash_mpc", file_in_same_dir(__file__, "1MB")], False) - __main__.main(["-i", "hash_md5", file_in_same_dir(__file__, "1MB")], False) -suite = allTests(TestMain) +def test_main_forced_unicode(): + main([u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv']) -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) + +def test_main_verbose(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--verbose']) + + +def test_main_yaml(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--yaml']) + + +def test_main_json(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--json']) + + +def test_main_show_property(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-P', 'title']) + + +def test_main_advanced(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-a']) + + +def test_main_input(): + main(['--input', os.path.join(__location__, 'test-input-file.txt')]) + + +def test_main_properties(): + main(['-p']) + main(['-p', '--json']) + main(['-p', '--yaml']) + + +def test_main_values(): + main(['-V']) + main(['-V', '--json']) + main(['-V', '--yaml']) + + +def test_main_help(): + with pytest.raises(SystemExit): + main(['--help']) + + +def test_main_version(): + main(['--version']) diff --git a/libs/guessit/test/test_matchtree.py b/libs/guessit/test/test_matchtree.py deleted file mode 100644 index 8712d78f..00000000 --- a/libs/guessit/test/test_matchtree.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - -from guessit.transfo.guess_release_group import GuessReleaseGroup -from guessit.transfo.guess_properties import GuessProperties -from guessit.matchtree import BaseMatchTree - -keywords = yaml.load(""" - -? Xvid PROPER -: videoCodec: Xvid - other: PROPER - -? PROPER-Xvid -: videoCodec: Xvid - other: PROPER - -""") - - -def guess_info(string, options=None): - mtree = MatchTree(string) - GuessReleaseGroup().process(mtree, options) - GuessProperties().process(mtree, options) - return mtree.matched() - - -class TestMatchTree(TestGuessit): - def test_base_tree(self): - t = BaseMatchTree('One Two Three(Three) Four') - t.partition((3, 7, 20)) - leaves = list(t.leaves()) - - self.assertEqual(leaves[0].span, (0, 3)) - - self.assertEqual('One', leaves[0].value) - self.assertEqual(' Two', leaves[1].value) - self.assertEqual(' Three(Three)', leaves[2].value) - self.assertEqual(' Four', leaves[3].value) - - leaves[2].partition((1, 6, 7, 12)) - three_leaves = list(leaves[2].leaves()) - - self.assertEqual('Three', three_leaves[1].value) - self.assertEqual('Three', three_leaves[3].value) - - leaves = list(t.leaves()) - - self.assertEqual(len(leaves), 8) - - self.assertEqual(leaves[5], three_leaves[3]) - - self.assertEqual(t.previous_leaf(leaves[5]), leaves[4]) - self.assertEqual(t.next_leaf(leaves[5]), leaves[6]) - - self.assertEqual(t.next_leaves(leaves[5]), [leaves[6], leaves[7]]) - self.assertEqual(t.previous_leaves(leaves[5]), [leaves[4], leaves[3], leaves[2], leaves[1], leaves[0]]) - - self.assertEqual(t.next_leaf(leaves[7]), None) - self.assertEqual(t.previous_leaf(leaves[0]), None) - - self.assertEqual(t.next_leaves(leaves[7]), []) - self.assertEqual(t.previous_leaves(leaves[0]), []) - - def test_match(self): - self.checkFields(keywords, guess_info) - - -suite = allTests(TestMatchTree) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_movie.py b/libs/guessit/test/test_movie.py deleted file mode 100644 index eecbf49d..00000000 --- a/libs/guessit/test/test_movie.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * - - -class TestMovie(TestGuessit): - def testMovies(self): - self.checkMinimumFieldsCorrect(filetype='movie', - filename='movies.yaml') - - -suite = allTests(TestMovie) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_quality.py b/libs/guessit/test/test_quality.py deleted file mode 100644 index 52e21791..00000000 --- a/libs/guessit/test/test_quality.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.quality import best_quality, best_quality_properties -from guessit.containers import QualitiesContainer -from guessit.test.guessittest import * - - -class TestQuality(TestGuessit): - def test_container(self): - container = QualitiesContainer() - - container.register_quality('color', 'red', 10) - container.register_quality('color', 'orange', 20) - container.register_quality('color', 'green', 30) - - container.register_quality('context', 'sun', 100) - container.register_quality('context', 'sea', 200) - container.register_quality('context', 'sex', 300) - - g1 = Guess() - g1['color'] = 'red' - - g2 = Guess() - g2['color'] = 'green' - - g3 = Guess() - g3['color'] = 'orange' - - q3 = container.rate_quality(g3) - self.assertEqual(q3, 20, "ORANGE should be rated 20. Don't ask why!") - - q1 = container.rate_quality(g1) - q2 = container.rate_quality(g2) - - self.assertTrue(q2 > q1, "GREEN should be greater than RED. Don't ask why!") - - g1['context'] = 'sex' - g2['context'] = 'sun' - - q1 = container.rate_quality(g1) - q2 = container.rate_quality(g2) - - self.assertTrue(q1 > q2, "SEX should be greater than SUN. Don't ask why!") - - self.assertEqual(container.best_quality(g1, g2), g1, "RED&SEX should be better than GREEN&SUN. Don't ask why!") - - self.assertEqual(container.best_quality_properties(['color'], g1, g2), g2, "GREEN should be better than RED. Don't ask why!") - - self.assertEqual(container.best_quality_properties(['context'], g1, g2), g1, "SEX should be better than SUN. Don't ask why!") - - q1 = container.rate_quality(g1, 'color') - q2 = container.rate_quality(g2, 'color') - - self.assertTrue(q2 > q1, "GREEN should be greater than RED. Don't ask why!") - - container.unregister_quality('context', 'sex') - container.unregister_quality('context', 'sun') - - q1 = container.rate_quality(g1) - q2 = container.rate_quality(g2) - - self.assertTrue(q2 > q1, "GREEN&SUN should be greater than RED&SEX. Don't ask why!") - - g3['context'] = 'sea' - container.unregister_quality('context', 'sea') - - q3 = container.rate_quality(g3, 'context') - self.assertEqual(q3, 0, "Context should be unregistered.") - - container.unregister_quality('color') - q3 = container.rate_quality(g3, 'color') - - self.assertEqual(q3, 0, "Color should be unregistered.") - - container.clear_qualities() - - q1 = container.rate_quality(g1) - q2 = container.rate_quality(g2) - - self.assertTrue(q1 == q2 == 0, "Empty quality container should rate each guess to 0") - - def test_quality_transformers(self): - guess_720p = guessit.guess_file_info("2012.2009.720p.BluRay.x264.DTS WiKi.mkv") - guess_1080p = guessit.guess_file_info("2012.2009.1080p.BluRay.x264.MP3 WiKi.mkv") - - self.assertTrue('audioCodec' in guess_720p, "audioCodec should be present") - self.assertTrue('audioCodec' in guess_1080p, "audioCodec should be present") - self.assertTrue('screenSize' in guess_720p, "screenSize should be present") - self.assertTrue('screenSize' in guess_1080p, "screenSize should be present") - - best_quality_guess = best_quality(guess_720p, guess_1080p) - - self.assertTrue(guess_1080p == best_quality_guess, "1080p+MP3 is not the best global quality") - - best_quality_guess = best_quality_properties(['screenSize'], guess_720p, guess_1080p) - - self.assertTrue(guess_1080p == best_quality_guess, "1080p is not the best screenSize") - - best_quality_guess = best_quality_properties(['audioCodec'], guess_720p, guess_1080p) - - self.assertTrue(guess_720p == best_quality_guess, "DTS is not the best audioCodec") - -suite = allTests(TestQuality) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_utils.py b/libs/guessit/test/test_utils.py deleted file mode 100644 index 87eecb98..00000000 --- a/libs/guessit/test/test_utils.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.test.guessittest import * -from guessit.fileutils import split_path -from guessit.textutils import strip_brackets, str_replace, str_fill, from_camel, is_camel,\ - levenshtein, reorder_title -from guessit import PY2 -from guessit.date import search_date, search_year -from datetime import datetime, date, timedelta - - -class TestUtils(TestGuessit): - def test_splitpath(self): - alltests = {False: {'/usr/bin/smewt': ['/', 'usr', 'bin', 'smewt'], - 'relative_path/to/my_folder/': ['relative_path', 'to', 'my_folder'], - '//some/path': ['//', 'some', 'path'], - '//some//path': ['//', 'some', 'path'], - '///some////path': ['///', 'some', 'path'] - - }, - True: {'C:\\Program Files\\Smewt\\smewt.exe': ['C:\\', 'Program Files', 'Smewt', 'smewt.exe'], - 'Documents and Settings\\User\\config': ['Documents and Settings', 'User', 'config'], - 'C:\\Documents and Settings\\User\\config': ['C:\\', 'Documents and Settings', 'User', 'config'], - # http://bugs.python.org/issue19945 - '\\\\netdrive\\share': ['\\\\', 'netdrive', 'share'] if PY2 else ['\\\\netdrive\\share'], - '\\\\netdrive\\share\\folder': ['\\\\', 'netdrive', 'share', 'folder'] if PY2 else ['\\\\netdrive\\share\\', 'folder'], - } - } - tests = alltests[sys.platform == 'win32'] - for path, split in tests.items(): - self.assertEqual(split, split_path(path)) - - def test_strip_brackets(self): - allTests = (('', ''), - ('[test]', 'test'), - ('{test2}', 'test2'), - ('(test3)', 'test3'), - ('(test4]', '(test4]'), - ) - - for i, e in allTests: - self.assertEqual(e, strip_brackets(i)) - - def test_levenshtein(self): - self.assertEqual(levenshtein("abcdef ghijk lmno", "abcdef ghijk lmno"), 0) - self.assertEqual(levenshtein("abcdef ghijk lmnop", "abcdef ghijk lmno"), 1) - self.assertEqual(levenshtein("abcdef ghijk lmno", "abcdef ghijk lmn"), 1) - self.assertEqual(levenshtein("abcdef ghijk lmno", "abcdef ghijk lmnp"), 1) - self.assertEqual(levenshtein("abcdef ghijk lmno", "abcdef ghijk lmnq"), 1) - self.assertEqual(levenshtein("cbcdef ghijk lmno", "abcdef ghijk lmnq"), 2) - self.assertEqual(levenshtein("cbcdef ghihk lmno", "abcdef ghijk lmnq"), 3) - - def test_reorder_title(self): - self.assertEqual(reorder_title("Simpsons, The"), "The Simpsons") - self.assertEqual(reorder_title("Simpsons,The"), "The Simpsons") - self.assertEqual(reorder_title("Simpsons,Les", articles=('the', 'le', 'la', 'les')), "Les Simpsons") - self.assertEqual(reorder_title("Simpsons, Les", articles=('the', 'le', 'la', 'les')), "Les Simpsons") - - def test_camel(self): - self.assertEqual("", from_camel("")) - - self.assertEqual("Hello world", str_replace("Hello World", 6, 'w')) - self.assertEqual("Hello *****", str_fill("Hello World", (6, 11), '*')) - - self.assertTrue("This is camel", from_camel("ThisIsCamel")) - - self.assertEqual('camel case', from_camel('camelCase')) - self.assertEqual('A case', from_camel('ACase')) - self.assertEqual('MiXedCaSe is not camel case', from_camel('MiXedCaSe is not camelCase')) - - self.assertEqual("This is camel cased title", from_camel("ThisIsCamelCasedTitle")) - self.assertEqual("This is camel CASED title", from_camel("ThisIsCamelCASEDTitle")) - - self.assertEqual("These are camel CASED title", from_camel("TheseAreCamelCASEDTitle")) - - self.assertEqual("Give a camel case string", from_camel("GiveACamelCaseString")) - - self.assertEqual("Death TO camel case", from_camel("DeathTOCamelCase")) - self.assertEqual("But i like java too:)", from_camel("ButILikeJavaToo:)")) - - self.assertEqual("Beatdown french DVD rip.mkv", from_camel("BeatdownFrenchDVDRip.mkv")) - self.assertEqual("DO NOTHING ON UPPER CASE", from_camel("DO NOTHING ON UPPER CASE")) - - self.assertFalse(is_camel("this_is_not_camel")) - self.assertTrue(is_camel("ThisIsCamel")) - - self.assertEqual("Dark.City.(1998).DC.BDRIP.720p.DTS.X264-CHD.mkv", from_camel("Dark.City.(1998).DC.BDRIP.720p.DTS.X264-CHD.mkv")) - self.assertFalse(is_camel("Dark.City.(1998).DC.BDRIP.720p.DTS.X264-CHD.mkv")) - - self.assertEqual("A2LiNE", from_camel("A2LiNE")) - - def test_date(self): - self.assertEqual(search_year(' in the year 2000... '), (2000, (13, 17))) - self.assertEqual(search_year(' they arrived in 1492. '), (None, None)) - - today = date.today() - today_year_2 = int(str(today.year)[2:]) - - future = today + timedelta(days=1000) - future_year_2 = int(str(future.year)[2:]) - - past = today - timedelta(days=10000) - past_year_2 = int(str(past.year)[2:]) - - self.assertEqual(search_date(' Something before 2002-04-22 '), (date(2002, 4, 22), (18, 28))) - self.assertEqual(search_date(' 2002-04-22 Something after '), (date(2002, 4, 22), (1, 11))) - - self.assertEqual(search_date(' This happened on 2002-04-22. '), (date(2002, 4, 22), (18, 28))) - self.assertEqual(search_date(' This happened on 22-04-2002. '), (date(2002, 4, 22), (18, 28))) - - self.assertEqual(search_date(' This happened on 13-04-%s. ' % (today_year_2,)), (date(today.year, 4, 13), (18, 26))) - self.assertEqual(search_date(' This happened on 22-04-%s. ' % (future_year_2,)), (date(future.year, 4, 22), (18, 26))) - self.assertEqual(search_date(' This happened on 20-04-%s. ' % (past_year_2)), (date(past.year, 4, 20), (18, 26))) - - self.assertEqual(search_date(' This happened on 13-06-14. ', year_first=True), (date(2013, 6, 14), (18, 26))) - self.assertEqual(search_date(' This happened on 13-05-14. ', year_first=False), (date(2014, 5, 13), (18, 26))) - - self.assertEqual(search_date(' This happened on 04-13-%s. ' % (today_year_2,)), (date(today.year, 4, 13), (18, 26))) - self.assertEqual(search_date(' This happened on 04-22-%s. ' % (future_year_2,)), (date(future.year, 4, 22), (18, 26))) - self.assertEqual(search_date(' This happened on 04-20-%s. ' % (past_year_2)), (date(past.year, 4, 20), (18, 26))) - - self.assertEqual(search_date(' This happened on 35-12-%s. ' % (today_year_2,)), (None, None)) - self.assertEqual(search_date(' This happened on 37-18-%s. ' % (future_year_2,)), (None, None)) - self.assertEqual(search_date(' This happened on 44-42-%s. ' % (past_year_2)), (None, None)) - - self.assertEqual(search_date(' This happened on %s. ' % (today, )), (today, (18, 28))) - self.assertEqual(search_date(' This happened on %s. ' % (future, )), (future, (18, 28))) - self.assertEqual(search_date(' This happened on %s. ' % (past, )), (past, (18, 28))) - - self.assertEqual(search_date(' released date: 04-03-1901? '), (None, None)) - - self.assertEqual(search_date(' There\'s no date in here. '), (None, None)) - - self.assertEqual(search_date(' Something 01-02-03 '), (date(2003, 2, 1), (11, 19))) - self.assertEqual(search_date(' Something 01-02-03 ', year_first=False, day_first=True), (date(2003, 2, 1), (11, 19))) - self.assertEqual(search_date(' Something 01-02-03 ', year_first=True), (date(2001, 2, 3), (11, 19))) - self.assertEqual(search_date(' Something 01-02-03 ', day_first=False), (date(2003, 1, 2), (11, 19))) - - -suite = allTests(TestUtils) - -if __name__ == '__main__': - TextTestRunner(verbosity=2).run(suite) diff --git a/libs/guessit/test/test_yml.py b/libs/guessit/test/test_yml.py new file mode 100644 index 00000000..c8e3d193 --- /dev/null +++ b/libs/guessit/test/test_yml.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name +import logging + +# io.open supports encoding= in python 2.7 +from io import open # pylint: disable=redefined-builtin +import os +import yaml + +import six + +import babelfish +import pytest + +from rebulk.remodule import re +from rebulk.utils import is_iterable + +from guessit.options import parse_options +from ..yamlutils import OrderedDictYAMLLoader +from .. import guessit + + +logger = logging.getLogger(__name__) + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +filename_predicate = None +string_predicate = None + + +# filename_predicate = lambda filename: 'episode_title' in filename +# string_predicate = lambda string: '-DVD.BlablaBla.Fix.Blablabla.XVID' in string + + +class EntryResult(object): + def __init__(self, string, negates=False): + self.string = string + self.negates = negates + self.valid = [] + self.missing = [] + self.different = [] + self.extra = [] + self.others = [] + + @property + def ok(self): + if self.negates: + return self.missing or self.different + return not self.missing and not self.different and not self.extra and not self.others + + @property + def warning(self): + if self.negates: + return False + return not self.missing and not self.different and self.extra + + @property + def error(self): + if self.negates: + return not self.missing and not self.different and not self.others + return self.missing or self.different or self.others + + def __repr__(self): + if self.ok: + return self.string + ': OK!' + elif self.warning: + return '%s%s: WARNING! (valid=%i, extra=%i)' % ('-' if self.negates else '', self.string, len(self.valid), + len(self.extra)) + elif self.error: + return '%s%s: ERROR! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \ + ('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different), + len(self.extra), len(self.others)) + else: + return '%s%s: UNKOWN! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \ + ('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different), + len(self.extra), len(self.others)) + + @property + def details(self): + ret = [] + if self.valid: + ret.append('valid=' + str(len(self.valid))) + for valid in self.valid: + ret.append(' ' * 4 + str(valid)) + if self.missing: + ret.append('missing=' + str(len(self.missing))) + for missing in self.missing: + ret.append(' ' * 4 + str(missing)) + if self.different: + ret.append('different=' + str(len(self.different))) + for different in self.different: + ret.append(' ' * 4 + str(different)) + if self.extra: + ret.append('extra=' + str(len(self.extra))) + for extra in self.extra: + ret.append(' ' * 4 + str(extra)) + if self.others: + ret.append('others=' + str(len(self.others))) + for other in self.others: + ret.append(' ' * 4 + str(other)) + return ret + + +class Results(list): + def assert_ok(self): + errors = [entry for entry in self if entry.error] + assert not errors + + +def files_and_ids(predicate=None): + files = [] + ids = [] + + for (dirpath, _, filenames) in os.walk(__location__): + if dirpath == __location__: + dirpath_rel = '' + else: + dirpath_rel = os.path.relpath(dirpath, __location__) + for filename in filenames: + name, ext = os.path.splitext(filename) + filepath = os.path.join(dirpath_rel, filename) + if ext == '.yml' and (not predicate or predicate(filepath)): + files.append(filepath) + ids.append(os.path.join(dirpath_rel, name)) + + return files, ids + + +class TestYml(object): + """ + Run tests from yaml files. + Multiple input strings having same expected results can be chained. + Use $ marker to check inputs that should not match results. + """ + + options_re = re.compile(r'^([ \+-]+)(.*)') + + files, ids = files_and_ids(filename_predicate) + + @staticmethod + def set_default(expected, default): + if default: + for k, v in default.items(): + if k not in expected: + expected[k] = v + + @pytest.mark.parametrize('filename', files, ids=ids) + def test(self, filename, caplog): + caplog.setLevel(logging.INFO) + with open(os.path.join(__location__, filename), 'r', encoding='utf-8') as infile: + data = yaml.load(infile, OrderedDictYAMLLoader) + entries = Results() + + last_expected = None + for string, expected in reversed(list(data.items())): + if expected is None: + data[string] = last_expected + else: + last_expected = expected + + default = None + try: + default = data['__default__'] + del data['__default__'] + except KeyError: + pass + + for string, expected in data.items(): + TestYml.set_default(expected, default) + entry = self.check_data(filename, string, expected) + entries.append(entry) + entries.assert_ok() + + def check_data(self, filename, string, expected): + if six.PY2 and isinstance(string, six.text_type): + string = string.encode('utf-8') + converts = [] + for k, v in expected.items(): + if isinstance(v, six.text_type): + v = v.encode('utf-8') + converts.append((k, v)) + for k, v in converts: + expected[k] = v + if not isinstance(string, str): + string = str(string) + if not string_predicate or string_predicate(string): # pylint: disable=not-callable + entry = self.check(string, expected) + if entry.ok: + logger.debug('[' + filename + '] ' + str(entry)) + elif entry.warning: + logger.warning('[' + filename + '] ' + str(entry)) + elif entry.error: + logger.error('[' + filename + '] ' + str(entry)) + for line in entry.details: + logger.error('[' + filename + '] ' + ' ' * 4 + line) + return entry + + def check(self, string, expected): + negates, global_, string = self.parse_token_options(string) + + options = expected.get('options') + if options is None: + options = {} + if not isinstance(options, dict): + options = parse_options(options) + if 'implicit' not in options: + options['implicit'] = True + try: + result = guessit(string, options) + except Exception as exc: + logger.error('[' + string + '] Exception: ' + str(exc)) + raise exc + + entry = EntryResult(string, negates) + + if global_: + self.check_global(string, result, entry) + + self.check_expected(result, expected, entry) + + return entry + + def parse_token_options(self, string): + matches = self.options_re.search(string) + negates = False + global_ = False + if matches: + string = matches.group(2) + for opt in matches.group(1): + if '-' in opt: + negates = True + if '+' in opt: + global_ = True + return negates, global_, string + + def check_global(self, string, result, entry): + global_span = [] + for result_matches in result.matches.values(): + for result_match in result_matches: + if not global_span: + global_span = list(result_match.span) + else: + if global_span[0] > result_match.span[0]: + global_span[0] = result_match.span[0] + if global_span[1] < result_match.span[1]: + global_span[1] = result_match.span[1] + if global_span and global_span[1] - global_span[0] < len(string): + entry.others.append("Match is not global") + + def is_same(self, value, expected): + values = set(value) if is_iterable(value) else set((value,)) + expecteds = set(expected) if is_iterable(expected) else set((expected,)) + if len(values) != len(expecteds): + return False + if isinstance(next(iter(values)), babelfish.Language): + # pylint: disable=no-member + expecteds = set([babelfish.Language.fromguessit(expected) for expected in expecteds]) + elif isinstance(next(iter(values)), babelfish.Country): + # pylint: disable=no-member + expecteds = set([babelfish.Country.fromguessit(expected) for expected in expecteds]) + return values == expecteds + + def check_expected(self, result, expected, entry): + if expected: + for expected_key, expected_value in expected.items(): + if expected_key and expected_key != 'options' and expected_value is not None: + negates_key, _, result_key = self.parse_token_options(expected_key) + if result_key in result.keys(): + if not self.is_same(result[result_key], expected_value): + if negates_key: + entry.valid.append((expected_key, expected_value)) + else: + entry.different.append((expected_key, expected_value, result[expected_key])) + else: + if negates_key: + entry.different.append((expected_key, expected_value, result[expected_key])) + else: + entry.valid.append((expected_key, expected_value)) + elif not negates_key: + entry.missing.append((expected_key, expected_value)) + + for result_key, result_value in result.items(): + if result_key not in expected.keys(): + entry.extra.append((result_key, result_value)) diff --git a/libs/guessit/test/various.yml b/libs/guessit/test/various.yml new file mode 100644 index 00000000..72e2f602 --- /dev/null +++ b/libs/guessit/test/various.yml @@ -0,0 +1,800 @@ +? Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv +: type: movie + title: Fear and Loathing in Las Vegas + year: 1998 + screen_size: 720p + format: HD-DVD + audio_codec: DTS + video_codec: h264 + release_group: ESiR + +? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi +: type: episode + title: Duckman + season: 1 + episode: 1 + episode_title: I, Duckman + date: 2002-11-07 + +? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi +: type: episode + title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Neverwhere.05.Down.Street.[tvu.org.ru].avi +: type: episode + title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi +: type: episode + title: Breaking Bad + episode_format: Minisode + episode: 1 + episode_title: Good Cop Bad Cop + format: WEBRip + video_codec: XviD + +? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi +: type: episode + title: Kaamelott + episode: 23 + episode_title: Le Forfait + +? Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv +: type: movie + title: The Doors + year: 1991 + date: 2008-03-09 + format: BluRay + screen_size: 720p + audio_codec: AC3 + video_codec: h264 + release_group: HiS@SiLUHD + language: english + website: sharethefiles.com + +? Movies/M.A.S.H. (1970)/MASH.(1970).[Divx.5.02][Dual-Subtitulos][DVDRip].ogm +: type: movie + title: MASH + year: 1970 + video_codec: DivX + format: DVD + +? the.mentalist.501.hdtv-lol.mp4 +: type: episode + title: the mentalist + season: 5 + episode: 1 + format: HDTV + release_group: lol + +? the.simpsons.2401.hdtv-lol.mp4 +: type: episode + title: the simpsons + season: 24 + episode: 1 + format: HDTV + release_group: lol + +? Homeland.S02E01.HDTV.x264-EVOLVE.mp4 +: type: episode + title: Homeland + season: 2 + episode: 1 + format: HDTV + video_codec: h264 + release_group: EVOLVE + +? /media/Band_of_Brothers-e01-Currahee.mkv +: type: episode + title: Band of Brothers + episode: 1 + episode_title: Currahee + +? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv +: type: episode + title: Band of Brothers + bonus: 2 + bonus_title: We Stand Alone Together + +? /movies/James_Bond-f21-Casino_Royale-x02-Stunts.mkv +: type: movie + title: Casino Royale + film_title: James Bond + film: 21 + bonus: 2 + bonus_title: Stunts + +? /TV Shows/new.girl.117.hdtv-lol.mp4 +: type: episode + title: new girl + season: 1 + episode: 17 + format: HDTV + release_group: lol + +? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi +: type: episode + title: The Office + country: US + season: 1 + episode: 3 + episode_title: Health Care + format: HDTV + video_codec: XviD + release_group: LOL + +? The_Insider-(1999)-x02-60_Minutes_Interview-1996.mp4 +: type: movie + title: The Insider + year: 1999 + bonus: 2 + bonus_title: 60 Minutes Interview-1996 + +? OSS_117--Cairo,_Nest_of_Spies.mkv +: type: movie + title: OSS 117 + alternative_title: Cairo, Nest of Spies + +? Rush.._Beyond_The_Lighted_Stage-x09-Between_Sun_and_Moon-2002_Hartford.mkv +: type: movie + title: Rush Beyond The Lighted Stage + bonus: 9 + bonus_title: Between Sun and Moon + year: 2002 + +? House.Hunters.International.S56E06.720p.hdtv.x264.mp4 +: type: episode + title: House Hunters International + season: 56 + episode: 6 + screen_size: 720p + format: HDTV + video_codec: h264 + +? White.House.Down.2013.1080p.BluRay.DTS-HD.MA.5.1.x264-PublicHD.mkv +: type: movie + title: White House Down + year: 2013 + screen_size: 1080p + format: BluRay + audio_codec: DTS + audio_profile: HDMA + video_codec: h264 + release_group: PublicHD + audio_channels: "5.1" + +? White.House.Down.2013.1080p.BluRay.DTSHD.MA.5.1.x264-PublicHD.mkv +: type: movie + title: White House Down + year: 2013 + screen_size: 1080p + format: BluRay + audio_codec: DTS + audio_profile: HDMA + video_codec: h264 + release_group: PublicHD + audio_channels: "5.1" + +? Hostages.S01E01.Pilot.for.Air.720p.WEB-DL.DD5.1.H.264-NTb.nfo +: type: episode + title: Hostages + episode_title: Pilot for Air + season: 1 + episode: 1 + screen_size: 720p + format: WEB-DL + audio_channels: "5.1" + video_codec: h264 + audio_codec: DolbyDigital + release_group: NTb + +? Despicable.Me.2.2013.1080p.BluRay.x264-VeDeTT.nfo +: type: movie + title: Despicable Me 2 + year: 2013 + screen_size: 1080p + format: BluRay + video_codec: h264 + release_group: VeDeTT + +? Le Cinquieme Commando 1971 SUBFORCED FRENCH DVDRiP XViD AC3 Bandix.mkv +: type: movie + audio_codec: AC3 + format: DVD + release_group: Bandix + subtitle_language: French + title: Le Cinquieme Commando + video_codec: XviD + year: 1971 + +? Le Seigneur des Anneaux - La Communauté de l'Anneau - Version Longue - BDRip.mkv +: type: movie + format: BluRay + title: Le Seigneur des Anneaux + +? La petite bande (Michel Deville - 1983) VF PAL MP4 x264 AAC.mkv +: type: movie + audio_codec: AAC + language: French + title: La petite bande + video_codec: h264 + year: 1983 + other: PAL + +? Retour de Flammes (Gregor Schnitzler 2003) FULL DVD.iso +: type: movie + format: DVD + title: Retour de Flammes + type: movie + year: 2003 + +? A.Common.Title.Special.2014.avi +: type: movie + year: 2014 + title: A Common Title Special + +? A.Common.Title.2014.Special.avi +: type: episode + year: 2014 + title: A Common Title + episode_title: Special + episode_details: Special + +? A.Common.Title.2014.Special.Edition.avi +: type: movie + year: 2014 + title: A Common Title + edition: Special Edition + +? Downton.Abbey.2013.Christmas.Special.HDTV.x264-FoV.mp4 +: type: episode + year: 2013 + title: Downton Abbey + episode_title: Christmas Special + video_codec: h264 + release_group: FoV + format: HDTV + episode_details: Special + +? Doctor_Who_2013_Christmas_Special.The_Time_of_The_Doctor.HD +: type: episode + title: Doctor Who + other: HD + episode_details: Special + episode_title: Christmas Special The Time of The Doctor + year: 2013 + +? Doctor Who 2005 50th Anniversary Special The Day of the Doctor 3.avi +: type: episode + title: Doctor Who + episode_details: Special + episode_title: 50th Anniversary Special The Day of the Doctor 3 + year: 2005 + +? Robot Chicken S06-Born Again Virgin Christmas Special HDTV x264.avi +: type: episode + title: Robot Chicken + format: HDTV + season: 6 + episode_title: Born Again Virgin Christmas Special + video_codec: h264 + episode_details: Special + +? Wicked.Tuna.S03E00.Head.To.Tail.Special.HDTV.x264-YesTV +: type: episode + title: Wicked Tuna + episode_title: Head To Tail Special + release_group: YesTV + season: 3 + episode: 0 + video_codec: h264 + format: HDTV + episode_details: Special + +? The.Voice.UK.S03E12.HDTV.x264-C4TV +: episode: 12 + video_codec: h264 + format: HDTV + title: The Voice + release_group: C4TV + season: 3 + country: United Kingdom + type: episode + +? /tmp/star.trek.9/star.trek.9.mkv +: type: movie + title: star trek 9 + +? star.trek.9.mkv +: type: movie + title: star trek 9 + +? FlexGet.S01E02.TheName.HDTV.xvid +: episode: 2 + format: HDTV + season: 1 + title: FlexGet + episode_title: TheName + type: episode + video_codec: XviD + +? FlexGet.S01E02.TheName.HDTV.xvid +: episode: 2 + format: HDTV + season: 1 + title: FlexGet + episode_title: TheName + type: episode + video_codec: XviD + +? some.series.S03E14.Title.Here.720p +: episode: 14 + screen_size: 720p + season: 3 + title: some series + episode_title: Title Here + type: episode + +? '[the.group] Some.Series.S03E15.Title.Two.720p' +: episode: 15 + release_group: the.group + screen_size: 720p + season: 3 + title: Some Series + episode_title: Title Two + type: episode + +? 'HD 720p: Some series.S03E16.Title.Three' +: episode: 16 + other: HD + screen_size: 720p + season: 3 + title: Some series + episode_title: Title Three + type: episode + +? Something.Season.2.1of4.Ep.Title.HDTV.torrent +: episode_count: 4 + episode: 1 + format: HDTV + season: 2 + title: Something + episode_title: Title + type: episode + container: torrent + +? Show-A (US) - Episode Title S02E09 hdtv +: country: US + episode: 9 + format: HDTV + season: 2 + title: Show-A + type: episode + +? Jack's.Show.S03E01.blah.1080p +: episode: 1 + screen_size: 1080p + season: 3 + title: Jack's Show + episode_title: blah + type: episode + +? FlexGet.epic +: title: FlexGet epic + type: movie + +? FlexGet.Apt.1 +: title: FlexGet Apt 1 + type: movie + +? FlexGet.aptitude +: title: FlexGet aptitude + type: movie + +? FlexGet.Step1 +: title: FlexGet Step1 + type: movie + +? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720 * 432].avi +: format: DVD + screen_size: 720x432 + title: El Bosque Animado + video_codec: XviD + year: 1987 + type: movie + +? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi +: format: DVD + screen_size: 720x432 + title: El Bosque Animado + video_codec: XviD + year: 1987 + type: movie + +? 2009.shoot.fruit.chan.multi.dvd9.pal +: format: DVD + language: mul + other: PAL + title: shoot fruit chan + type: movie + year: 2009 + +? 2009.shoot.fruit.chan.multi.dvd5.pal +: format: DVD + language: mul + other: PAL + title: shoot fruit chan + type: movie + year: 2009 + +? The.Flash.2014.S01E01.PREAIR.WEBRip.XviD-EVO.avi +: episode: 1 + format: WEBRip + other: Preair + release_group: EVO + season: 1 + title: The Flash + type: episode + video_codec: XviD + year: 2014 + +? Ice.Lake.Rebels.S01E06.Ice.Lake.Games.720p.HDTV.x264-DHD +: episode: 6 + format: HDTV + release_group: DHD + screen_size: 720p + season: 1 + title: Ice Lake Rebels + episode_title: Ice Lake Games + type: episode + video_codec: h264 + +? The League - S06E10 - Epi Sexy.mkv +: episode: 10 + season: 6 + title: The League + episode_title: Epi Sexy + type: episode + +? Stay (2005) [1080p]/Stay.2005.1080p.BluRay.x264.YIFY.mp4 +: format: BluRay + release_group: YIFY + screen_size: 1080p + title: Stay + type: movie + video_codec: h264 + year: 2005 + +? /media/live/A/Anger.Management.S02E82.720p.HDTV.X264-DIMENSION.mkv +: format: HDTV + release_group: DIMENSION + screen_size: 720p + title: Anger Management + type: episode + season: 2 + episode: 82 + video_codec: h264 + +? "[Figmentos] Monster 34 - At the End of Darkness [781219F1].mkv" +: type: episode + release_group: Figmentos + title: Monster + episode: 34 + episode_title: At the End of Darkness + crc32: 781219F1 + +? Game.of.Thrones.S05E07.720p.HDTV-KILLERS.mkv +: type: episode + episode: 7 + format: HDTV + release_group: KILLERS + screen_size: 720p + season: 5 + title: Game of Thrones + +? Game.of.Thrones.S05E07.HDTV.720p-KILLERS.mkv +: type: episode + episode: 7 + format: HDTV + release_group: KILLERS + screen_size: 720p + season: 5 + title: Game of Thrones + +? Parks and Recreation - [04x12] - Ad Campaign.avi +: type: episode + title: Parks and Recreation + season: 4 + episode: 12 + episode_title: Ad Campaign + +? Star Trek Into Darkness (2013)/star.trek.into.darkness.2013.720p.web-dl.h264-publichd.mkv +: type: movie + title: Star Trek Into Darkness + year: 2013 + screen_size: 720p + format: WEB-DL + video_codec: h264 + release_group: publichd + +? /var/medias/series/The Originals/Season 02/The.Originals.S02E15.720p.HDTV.X264-DIMENSION.mkv +: type: episode + title: The Originals + season: 2 + episode: 15 + screen_size: 720p + format: HDTV + video_codec: h264 + release_group: DIMENSION + +? Test.S01E01E07-FooBar-Group.avi +: container: avi + episode: + - 1 + - 7 + episode_title: FooBar-Group # Make sure it doesn't conflict with uuid + mimetype: video/x-msvideo + season: 1 + title: Test + type: episode + +? TEST.S01E02.2160p.NF.WEBRip.x264.DD5.1-ABC +: audio_channels: '5.1' + audio_codec: DolbyDigital + episode: 2 + format: WEBRip + other: Netflix + release_group: ABC + screen_size: 4K + season: 1 + title: TEST + type: episode + video_codec: h264 + +? TEST.2015.12.30.720p.WEBRip.h264-ABC +: date: 2015-12-30 + format: WEBRip + release_group: ABC + screen_size: 720p + title: TEST + type: episode + video_codec: h264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + format: WEBRip + other: Netflix + release_group: ABC + screen_size: 1080p + season: 1 + title: TEST + type: episode + video_codec: h264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + format: WEBRip + other: Netflix + release_group: ABC + screen_size: 1080p + season: 1 + title: TEST + type: episode + video_codec: h264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC.2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + format: WEBRip + other: Netflix + release_group: ABC + screen_size: 1080p + season: 1 + title: TEST + type: episode + video_codec: h264 + +? TEST.S05E02.720p.iP.WEBRip.AAC2.0.H264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 2 + format: WEBRip + release_group: ABC + screen_size: 720p + season: 5 + title: TEST + type: episode + video_codec: h264 + +? TEST.S03E07.720p.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 7 + format: WEBRip + release_group: ABC + screen_size: 720p + season: 3 + title: TEST + type: episode + video_codec: h264 + +? TEST.S15E15.24.1080p.FREE.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 15 + episode_title: '24' + format: WEBRip + release_group: ABC + screen_size: 1080p + season: 15 + title: TEST + type: episode + video_codec: h264 + +? TEST.S11E11.24.720p.ETV.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 11 + episode_title: '24' + format: WEBRip + release_group: ABC + screen_size: 720p + season: 11 + title: TEST + type: episode + video_codec: h264 + +? TEST.2015.1080p.HC.WEBRip.x264.AAC2.0-ABC +: audio_channels: '2.0' + audio_codec: AAC + format: WEBRip + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: h264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-SBS.x264.DTS-HD.MA.7.1-ABC +: audio_channels: '7.1' + audio_codec: DTS + audio_profile: HDMA + format: BluRay + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: h264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-OU.x264.DTS-HD.MA.7.1-ABC +: audio_channels: '7.1' + audio_codec: DTS + audio_profile: HDMA + format: BluRay + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: h264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-OU.x264.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS + - TrueHD + - DolbyAtmos + audio_profile: HDMA + format: BluRay + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: h264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-SBS.x264.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS + - TrueHD + - DolbyAtmos + audio_profile: HDMA + format: BluRay + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: h264 + year: 2015 + +? TEST.2015.1080p.BluRay.REMUX.AVC.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS + - TrueHD + - DolbyAtmos + audio_profile: HDMA + format: BluRay + other: Remux + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + year: 2015 + +? Gangs of New York 2002 REMASTERED 1080p BluRay x264-AVCHD +: format: BluRay + other: Remastered + screen_size: 1080p + title: Gangs of New York + type: movie + video_codec: h264 + year: 2002 + +? Peep.Show.S06E02.DVDrip.x264-faks86.mkv +: container: mkv + episode: 2 + format: DVD + release_group: faks86 + season: 6 + title: Peep Show + type: episode + video_codec: h264 + +? The Soup - 11x41 - October 8, 2014.mp4 +: container: mp4 + episode: 41 + episode_title: October 8 + season: 11 + title: The Soup + type: episode + year: 2014 + +? Red.Rock.S02E59.WEB-DLx264-JIVE +: episode: 59 + season: 2 + format: WEB-DL + release_group: JIVE + title: Red Rock + type: episode + video_codec: h264 + +? Pawn.Stars.S12E31.Deals.On.Wheels.PDTVx264-JIVE +: episode: 31 + episode_title: Deals On Wheels + season: 12 + format: DVB + release_group: JIVE + title: Pawn Stars + type: episode + video_codec: h264 + +? Duck.Dynasty.S09E09.Van.He-llsing.HDTVx264-JIVE +: episode: 9 + episode_title: Van He-llsing + season: 9 + format: HDTV + release_group: JIVE + title: Duck Dynasty + type: episode + video_codec: h264 \ No newline at end of file diff --git a/libs/guessit/textutils.py b/libs/guessit/textutils.py deleted file mode 100644 index 3537aa3b..00000000 --- a/libs/guessit/textutils.py +++ /dev/null @@ -1,355 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit import s -from guessit.patterns import sep -import functools -import unicodedata -import re - -# string-related functions - - -def normalize_unicode(s): - return unicodedata.normalize('NFC', s) - - -def strip_brackets(s): - if not s: - return s - - if ((s[0] == '[' and s[-1] == ']') or - (s[0] == '(' and s[-1] == ')') or - (s[0] == '{' and s[-1] == '}')): - return s[1:-1] - - return s - - -_dotted_rexp = re.compile(r'(?:\W|^)(([A-Za-z]\.){2,}[A-Za-z]\.?)') - - -def clean_default(st): - for c in sep: - # do not remove certain chars - if c in ['-', ',']: - continue - - if c == '.': - # we should not remove the dots for acronyms and such - dotted = _dotted_rexp.search(st) - if dotted: - s = dotted.group(1) - exclude_begin, exclude_end = dotted.span(1) - - st = (st[:exclude_begin].replace(c, ' ') + - st[exclude_begin:exclude_end] + - st[exclude_end:].replace(c, ' ')) - continue - - st = st.replace(c, ' ') - - parts = st.split() - result = ' '.join(p for p in parts if p != '') - - # now also remove dashes on the outer part of the string - while result and result[0] in '-': - result = result[1:] - while result and result[-1] in '-': - result = result[:-1] - - return result - -_words_rexp = re.compile('\w+', re.UNICODE) - - -def find_words(s): - return _words_rexp.findall(s.replace('_', ' ')) - - -def iter_words(s): - return _words_rexp.finditer(s.replace('_', ' ')) - - -def reorder_title(title, articles=('the',), separators=(',', ', ')): - ltitle = title.lower() - for article in articles: - for separator in separators: - suffix = separator + article - if ltitle[-len(suffix):] == suffix: - return title[-len(suffix) + len(separator):] + ' ' + title[:-len(suffix)] - return title - - -def str_replace(string, pos, c): - return string[:pos] + c + string[pos + 1:] - - -def str_fill(string, region, c): - start, end = region - return string[:start] + c * (end - start) + string[end:] - - -def levenshtein(a, b): - if not a: - return len(b) - if not b: - return len(a) - - m = len(a) - n = len(b) - d = [] - for i in range(m + 1): - d.append([0] * (n + 1)) - - for i in range(m + 1): - d[i][0] = i - - for j in range(n + 1): - d[0][j] = j - - for i in range(1, m + 1): - for j in range(1, n + 1): - if a[i - 1] == b[j - 1]: - cost = 0 - else: - cost = 1 - - d[i][j] = min(d[i - 1][j] + 1, # deletion - d[i][j - 1] + 1, # insertion - d[i - 1][j - 1] + cost # substitution - ) - - return d[m][n] - - -# group-related functions - -def find_first_level_groups_span(string, enclosing): - """Return a list of pairs (start, end) for the groups delimited by the given - enclosing characters. - This does not return nested groups, ie: '(ab(c)(d))' will return a single group - containing the whole string. - - >>> find_first_level_groups_span('abcd', '()') - [] - - >>> find_first_level_groups_span('abc(de)fgh', '()') - [(3, 7)] - - >>> find_first_level_groups_span('(ab(c)(d))', '()') - [(0, 10)] - - >>> find_first_level_groups_span('ab[c]de[f]gh(i)', '[]') - [(2, 5), (7, 10)] - """ - opening, closing = enclosing - depth = [] # depth is a stack of indices where we opened a group - result = [] - for i, c, in enumerate(string): - if c == opening: - depth.append(i) - elif c == closing: - try: - start = depth.pop() - end = i - if not depth: - # we emptied our stack, so we have a 1st level group - result.append((start, end + 1)) - except IndexError: - # we closed a group which was not opened before - pass - - return result - - -def split_on_groups(string, groups): - """Split the given string using the different known groups for boundaries. - >>> s(split_on_groups('0123456789', [ (2, 4) ])) - ['01', '23', '456789'] - - >>> s(split_on_groups('0123456789', [ (2, 4), (4, 6) ])) - ['01', '23', '45', '6789'] - - >>> s(split_on_groups('0123456789', [ (5, 7), (2, 4) ])) - ['01', '23', '4', '56', '789'] - - """ - if not groups: - return [string] - - boundaries = sorted(set(functools.reduce(lambda l, x: l + list(x), groups, []))) - if boundaries[0] != 0: - boundaries.insert(0, 0) - if boundaries[-1] != len(string): - boundaries.append(len(string)) - - groups = [string[start:end] for start, end in zip(boundaries[:-1], - boundaries[1:])] - - return [g for g in groups if g] # return only non-empty groups - - -def find_first_level_groups(string, enclosing, blank_sep=None): - """Return a list of groups that could be split because of explicit grouping. - The groups are delimited by the given enclosing characters. - - You can also specify if you want to blank the separator chars in the returned - list of groups by specifying a character for it. None means it won't be replaced. - - This does not return nested groups, ie: '(ab(c)(d))' will return a single group - containing the whole string. - - >>> s(find_first_level_groups('', '()')) - [''] - - >>> s(find_first_level_groups('abcd', '()')) - ['abcd'] - - >>> s(find_first_level_groups('abc(de)fgh', '()')) - ['abc', '(de)', 'fgh'] - - >>> s(find_first_level_groups('(ab(c)(d))', '()', blank_sep = '_')) - ['_ab(c)(d)_'] - - >>> s(find_first_level_groups('ab[c]de[f]gh(i)', '[]')) - ['ab', '[c]', 'de', '[f]', 'gh(i)'] - - >>> s(find_first_level_groups('()[]()', '()', blank_sep = '-')) - ['--', '[]', '--'] - - """ - groups = find_first_level_groups_span(string, enclosing) - if blank_sep: - for start, end in groups: - string = str_replace(string, start, blank_sep) - string = str_replace(string, end - 1, blank_sep) - - return split_on_groups(string, groups) - - -_camel_word2_set = set(('is', 'to',)) -_camel_word3_set = set(('the',)) - - -def _camel_split_and_lower(string, i): - """Retrieves a tuple (need_split, need_lower) - - need_split is True if this char is a first letter in a camelCasedString. - need_lower is True if this char should be lowercased. - """ - - def islower(c): - return c.isalpha() and not c.isupper() - - previous_char2 = string[i - 2] if i > 1 else None - previous_char = string[i - 1] if i > 0 else None - char = string[i] - next_char = string[i + 1] if i + 1 < len(string) else None - next_char2 = string[i + 2] if i + 2 < len(string) else None - - char_upper = char.isupper() - char_lower = islower(char) - - # previous_char2_lower = islower(previous_char2) if previous_char2 else False - previous_char2_upper = previous_char2.isupper() if previous_char2 else False - - previous_char_lower = islower(previous_char) if previous_char else False - previous_char_upper = previous_char.isupper() if previous_char else False - - next_char_upper = next_char.isupper() if next_char else False - next_char_lower = islower(next_char) if next_char else False - - next_char2_upper = next_char2.isupper() if next_char2 else False - # next_char2_lower = islower(next_char2) if next_char2 else False - - mixedcase_word = (previous_char_upper and char_lower and next_char_upper) or \ - (previous_char_lower and char_upper and next_char_lower and next_char2_upper) or \ - (previous_char2_upper and previous_char_lower and char_upper) - if mixedcase_word: - word2 = (char + next_char).lower() if next_char else None - word3 = (char + next_char + next_char2).lower() if next_char and next_char2 else None - word2b = (previous_char2 + previous_char).lower() if previous_char2 and previous_char else None - if word2 in _camel_word2_set or word2b in _camel_word2_set or word3 in _camel_word3_set: - mixedcase_word = False - - uppercase_word = previous_char_upper and char_upper and next_char_upper or (char_upper and next_char_upper and next_char2_upper) - - need_split = char_upper and previous_char_lower and not mixedcase_word - - if not need_split: - previous_char_upper = string[i - 1].isupper() if i > 0 else False - next_char_lower = (string[i + 1].isalpha() and not string[i + 1].isupper()) if i + 1 < len(string) else False - need_split = char_upper and previous_char_upper and next_char_lower - uppercase_word = previous_char_upper and not next_char_lower - - need_lower = not uppercase_word and not mixedcase_word and need_split - - return (need_split, need_lower) - - -def is_camel(string): - """ - >>> is_camel('dogEATDog') - True - >>> is_camel('DeathToCamelCase') - True - >>> is_camel('death_to_camel_case') - False - >>> is_camel('TheBest') - True - >>> is_camel('The Best') - False - """ - for i in range(0, len(string)): - need_split, _ = _camel_split_and_lower(string, i) - if need_split: - return True - return False - - -def from_camel(string): - """ - >>> from_camel('dogEATDog') == 'dog EAT dog' - True - >>> from_camel('DeathToCamelCase') == 'Death to camel case' - True - >>> from_camel('TheBest') == 'The best' - True - >>> from_camel('MiXedCaSe is not camelCase') == 'MiXedCaSe is not camel case' - True - """ - if not string: - return string - pieces = [] - - for i in range(0, len(string)): - char = string[i] - need_split, need_lower = _camel_split_and_lower(string, i) - if need_split: - pieces.append(' ') - - if need_lower: - pieces.append(char.lower()) - else: - pieces.append(char) - return ''.join(pieces) diff --git a/libs/guessit/transfo/__init__.py b/libs/guessit/transfo/__init__.py deleted file mode 100644 index cce2dfda..00000000 --- a/libs/guessit/transfo/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - - -class TransformerException(Exception): - def __init__(self, transformer, message): - - # Call the base class constructor with the parameters it needs - Exception.__init__(self, message) - - self.transformer = transformer \ No newline at end of file diff --git a/libs/guessit/transfo/expected_series.py b/libs/guessit/transfo/expected_series.py deleted file mode 100644 index edbd46d4..00000000 --- a/libs/guessit/transfo/expected_series.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals -from guessit.containers import PropertiesContainer -from guessit.matcher import GuessFinder - -from guessit.plugins.transformers import Transformer - -import re - - -class ExpectedSeries(Transformer): - def __init__(self): - Transformer.__init__(self, 230) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-S', '--expected-series', action='append', dest='expected_series', - help='Expected series to parse (can be used multiple times)') - - def should_process(self, mtree, options=None): - return options and options.get('expected_series') - - def expected_series(self, string, node=None, options=None): - container = PropertiesContainer(enhance=True, canonical_from_pattern=False) - - for expected_serie in options.get('expected_series'): - if expected_serie.startswith('re:'): - expected_serie = expected_serie[3:] - expected_serie = expected_serie.replace(' ', '-') - container.register_property('series', expected_serie, enhance=True) - else: - expected_serie = re.escape(expected_serie) - container.register_property('series', expected_serie, enhance=False) - - found = container.find_properties(string, node, options) - return container.as_guess(found, string) - - def supported_properties(self): - return ['series'] - - def process(self, mtree, options=None): - GuessFinder(self.expected_series, None, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/expected_title.py b/libs/guessit/transfo/expected_title.py deleted file mode 100644 index 2fe3d20e..00000000 --- a/libs/guessit/transfo/expected_title.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.containers import PropertiesContainer -from guessit.matcher import GuessFinder - -from guessit.plugins.transformers import Transformer - -import re - - -class ExpectedTitle(Transformer): - def __init__(self): - Transformer.__init__(self, 225) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', - help='Expected title (can be used multiple times)') - - def should_process(self, mtree, options=None): - return options and options.get('expected_title') - - def expected_titles(self, string, node=None, options=None): - container = PropertiesContainer(enhance=True, canonical_from_pattern=False) - - for expected_title in options.get('expected_title'): - if expected_title.startswith('re:'): - expected_title = expected_title[3:] - expected_title = expected_title.replace(' ', '-') - container.register_property('title', expected_title, enhance=True) - else: - expected_title = re.escape(expected_title) - container.register_property('title', expected_title, enhance=False) - - found = container.find_properties(string, node, options) - return container.as_guess(found, string) - - def supported_properties(self): - return ['title'] - - def process(self, mtree, options=None): - GuessFinder(self.expected_titles, None, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_bonus_features.py b/libs/guessit/transfo/guess_bonus_features.py deleted file mode 100644 index c70b31e5..00000000 --- a/libs/guessit/transfo/guess_bonus_features.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import found_property - - -class GuessBonusFeatures(Transformer): - def __init__(self): - Transformer.__init__(self, -150) - - def supported_properties(self): - return ['bonusNumber', 'bonusTitle', 'filmNumber', 'filmSeries', 'title', 'series'] - - def process(self, mtree, options=None): - def previous_group(g): - for leaf in reversed(list(mtree.unidentified_leaves())): - if leaf.node_idx < g.node_idx: - return leaf - - def next_group(g): - for leaf in mtree.unidentified_leaves(): - if leaf.node_idx > g.node_idx: - return leaf - - def same_group(g1, g2): - return g1.node_idx[:2] == g2.node_idx[:2] - - bonus = [node for node in mtree.leaves() if 'bonusNumber' in node.guess] - if bonus: - bonus_title = next_group(bonus[0]) - if bonus_title and same_group(bonus_title, bonus[0]): - found_property(bonus_title, 'bonusTitle', confidence=0.8) - - film_number = [node for node in mtree.leaves() - if 'filmNumber' in node.guess] - if film_number: - film_series = previous_group(film_number[0]) - found_property(film_series, 'filmSeries', confidence=0.9) - - title = next_group(film_number[0]) - found_property(title, 'title', confidence=0.9) - - season = [node for node in mtree.leaves() if 'season' in node.guess] - if season and 'bonusNumber' in mtree.info: - series = previous_group(season[0]) - if same_group(series, season[0]): - found_property(series, 'series', confidence=0.9) diff --git a/libs/guessit/transfo/guess_country.py b/libs/guessit/transfo/guess_country.py deleted file mode 100644 index c08cac7b..00000000 --- a/libs/guessit/transfo/guess_country.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from babelfish import Country -from guessit import Guess -from guessit.textutils import iter_words -from guessit.matcher import GuessFinder, found_guess -from guessit.language import LNG_COMMON_WORDS -import babelfish -import logging - -log = logging.getLogger(__name__) - - -class GuessCountry(Transformer): - def __init__(self): - Transformer.__init__(self, -170) - self.replace_language = frozenset(['uk']) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-C', '--allowed-country', action='append', dest='allowed_countries', - help='Allowed country (can be used multiple times)') - - def supported_properties(self): - return ['country'] - - def should_process(self, mtree, options=None): - options = options or {} - return options.get('country', True) - - def _scan_country(self, country, strict=False): - """ - Find a country if it is at the start or end of country string - """ - words_match = list(iter_words(country.lower())) - s = "" - start = None - - for word_match in words_match: - if not start: - start = word_match.start(0) - s += word_match.group(0) - try: - return Country.fromguessit(s), (start, word_match.end(0)) - except babelfish.Error: - continue - - words_match.reverse() - s = "" - end = None - for word_match in words_match: - if not end: - end = word_match.end(0) - s = word_match.group(0) + s - try: - return Country.fromguessit(s), (word_match.start(0), end) - except babelfish.Error: - continue - - return Country.fromguessit(country), (start, end) - - def is_valid_country(self, country, options=None): - if options and options.get('allowed_countries'): - allowed_countries = options.get('allowed_countries') - return country.name.lower() in allowed_countries or country.alpha2.lower() in allowed_countries - else: - return (country.name.lower() not in LNG_COMMON_WORDS and - country.alpha2.lower() not in LNG_COMMON_WORDS) - - def guess_country(self, string, node=None, options=None): - c = string.strip().lower() - if c not in LNG_COMMON_WORDS: - try: - country, country_span = self._scan_country(c, True) - if self.is_valid_country(country, options): - guess = Guess(country=country, confidence=1.0, input=node.value, span=(country_span[0] + 1, country_span[1] + 1)) - return guess - except babelfish.Error: - pass - return None, None - - def process(self, mtree, options=None): - GuessFinder(self.guess_country, None, self.log, options).process_nodes(mtree.unidentified_leaves()) - for node in mtree.leaves_containing('language'): - c = node.clean_value.lower() - if c in self.replace_language: - node.guess.set('language', None) - try: - country = Country.fromguessit(c) - if self.is_valid_country(country, options): - guess = Guess(country=country, confidence=0.9, input=node.value, span=node.span) - found_guess(node, guess, logger=log) - except babelfish.Error: - pass - - def post_process(self, mtree, options=None, *args, **kwargs): - # if country is in the guessed properties, make it part of the series name - series_leaves = list(mtree.leaves_containing('series')) - country_leaves = list(mtree.leaves_containing('country')) - - if series_leaves and country_leaves: - country_leaf = country_leaves[0] - for serie_leaf in series_leaves: - serie_leaf.guess['series'] += ' (%s)' % str(country_leaf.guess['country'].guessit) diff --git a/libs/guessit/transfo/guess_date.py b/libs/guessit/transfo/guess_date.py deleted file mode 100644 index 73fa246d..00000000 --- a/libs/guessit/transfo/guess_date.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from guessit.date import search_date - - -class GuessDate(Transformer): - def __init__(self): - Transformer.__init__(self, 50) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None, - help='If short date is found, consider the first digits as the year.') - naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None, - help='If short date is found, consider the second digits as the day.') - - def supported_properties(self): - return ['date'] - - def guess_date(self, string, node=None, options=None): - date, span = search_date(string, options.get('date_year_first') if options else False, options.get('date_day_first') if options else False) - if date: - return {'date': date}, span - else: - return None, None - - def process(self, mtree, options=None): - GuessFinder(self.guess_date, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_episode_details.py b/libs/guessit/transfo/guess_episode_details.py deleted file mode 100644 index ba7ff298..00000000 --- a/libs/guessit/transfo/guess_episode_details.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import found_guess -from guessit.containers import PropertiesContainer -import itertools - - -class GuessEpisodeDetails(Transformer): - def __init__(self): - Transformer.__init__(self, -205) - self.container = PropertiesContainer() - self.container.register_property('episodeDetails', 'Special', 'Bonus', 'Omake', 'Ova', 'Oav', 'Pilot', 'Unaired') - self.container.register_property('episodeDetails', 'Extras?', canonical_form='Extras') - - def guess_details(self, string, node=None, options=None): - properties = self.container.find_properties(string, node, options, 'episodeDetails', multiple=True) - guesses = self.container.as_guess(properties, multiple=True) - return guesses - - def second_pass_options(self, mtree, options=None): - if not mtree.guess.get('type', '').startswith('episode'): - for unidentified_leaf in mtree.unidentified_leaves(): - properties = self.container.find_properties(unidentified_leaf.value, unidentified_leaf, options, 'episodeDetails') - guess = self.container.as_guess(properties) - if guess: - return {'type': 'episode'} - return None - - def supported_properties(self): - return self.container.get_supported_properties() - - def process(self, mtree, options=None): - if (mtree.guess.get('type', '').startswith('episode') and - (not mtree.info.get('episodeNumber') or - mtree.info.get('season') == 0)): - - for leaf in itertools.chain(mtree.leaves_containing('title'), - mtree.unidentified_leaves()): - guesses = self.guess_details(leaf.value, leaf, options) - for guess in guesses: - found_guess(leaf, guess, update_guess=False) - - return None diff --git a/libs/guessit/transfo/guess_episode_info_from_position.py b/libs/guessit/transfo/guess_episode_info_from_position.py deleted file mode 100644 index ad8973dd..00000000 --- a/libs/guessit/transfo/guess_episode_info_from_position.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer, get_transformer -from guessit.textutils import reorder_title - -from guessit.matcher import found_property - - -class GuessEpisodeInfoFromPosition(Transformer): - def __init__(self): - Transformer.__init__(self, -200) - - def supported_properties(self): - return ['title', 'series'] - - def match_from_epnum_position(self, mtree, node, options): - epnum_idx = node.node_idx - - # a few helper functions to be able to filter using high-level semantics - def before_epnum_in_same_pathgroup(): - return [leaf for leaf in mtree.unidentified_leaves(lambda x: len(x.clean_value) > 1) - if (leaf.node_idx[0] == epnum_idx[0] and - leaf.node_idx[1:] < epnum_idx[1:])] - - def after_epnum_in_same_pathgroup(): - return [leaf for leaf in mtree.unidentified_leaves(lambda x: len(x.clean_value) > 1) - if (leaf.node_idx[0] == epnum_idx[0] and - leaf.node_idx[1:] > epnum_idx[1:])] - - def after_epnum_in_same_explicitgroup(): - return [leaf for leaf in mtree.unidentified_leaves(lambda x: len(x.clean_value) > 1) - if (leaf.node_idx[:2] == epnum_idx[:2] and - leaf.node_idx[2:] > epnum_idx[2:])] - - # epnumber is the first group and there are only 2 after it in same - # path group - # -> series title - episode title - title_candidates = self._filter_candidates(after_epnum_in_same_pathgroup(), options) - - if ('title' not in mtree.info and # no title - 'series' in mtree.info and # series present - before_epnum_in_same_pathgroup() == [] and # no groups before - len(title_candidates) == 1): # only 1 group after - - found_property(title_candidates[0], 'title', confidence=0.4) - return - - if ('title' not in mtree.info and # no title - before_epnum_in_same_pathgroup() == [] and # no groups before - len(title_candidates) == 2): # only 2 groups after - - found_property(title_candidates[0], 'series', confidence=0.4) - found_property(title_candidates[1], 'title', confidence=0.4) - return - - # if we have at least 1 valid group before the episodeNumber, then it's - # probably the series name - series_candidates = before_epnum_in_same_pathgroup() - if len(series_candidates) >= 1: - found_property(series_candidates[0], 'series', confidence=0.7) - - # only 1 group after (in the same path group) and it's probably the - # episode title. - title_candidates = self._filter_candidates(after_epnum_in_same_pathgroup(), options) - if len(title_candidates) == 1: - found_property(title_candidates[0], 'title', confidence=0.5) - return - else: - # try in the same explicit group, with lower confidence - title_candidates = self._filter_candidates(after_epnum_in_same_explicitgroup(), options) - if len(title_candidates) == 1: - found_property(title_candidates[0], 'title', confidence=0.4) - return - elif len(title_candidates) > 1: - found_property(title_candidates[0], 'title', confidence=0.3) - return - - # get the one with the longest value - title_candidates = self._filter_candidates(after_epnum_in_same_pathgroup(), options) - if title_candidates: - maxidx = -1 - maxv = -1 - for i, c in enumerate(title_candidates): - if len(c.clean_value) > maxv: - maxidx = i - maxv = len(c.clean_value) - found_property(title_candidates[maxidx], 'title', confidence=0.3) - - def should_process(self, mtree, options=None): - options = options or {} - return not options.get('skip_title') and mtree.guess.get('type', '').startswith('episode') - - def _filter_candidates(self, candidates, options): - episode_details_transformer = get_transformer('guess_episode_details') - if episode_details_transformer: - return [n for n in candidates if not episode_details_transformer.container.find_properties(n.value, n, options, re_match=True)] - else: - return candidates - - def process(self, mtree, options=None): - """ - try to identify the remaining unknown groups by looking at their - position relative to other known elements - """ - eps = [node for node in mtree.leaves() if 'episodeNumber' in node.guess] - - if not eps: - eps = [node for node in mtree.leaves() if 'date' in node.guess] - - if eps: - self.match_from_epnum_position(mtree, eps[0], options) - - else: - # if we don't have the episode number, but at least 2 groups in the - # basename, then it's probably series - eptitle - basename = mtree.node_at((-2,)) - - title_candidates = self._filter_candidates(basename.unidentified_leaves(), options) - - if len(title_candidates) >= 2 and 'series' not in mtree.info: - found_property(title_candidates[0], 'series', confidence=0.4) - found_property(title_candidates[1], 'title', confidence=0.4) - elif len(title_candidates) == 1: - # but if there's only one candidate, it's probably the series name - found_property(title_candidates[0], 'series' if 'series' not in mtree.info else 'title', confidence=0.4) - - # if we only have 1 remaining valid group in the folder containing the - # file, then it's likely that it is the series name - try: - series_candidates = list(mtree.node_at((-3,)).unidentified_leaves()) - except ValueError: - series_candidates = [] - - if len(series_candidates) == 1: - found_property(series_candidates[0], 'series', confidence=0.3) - - # if there's a path group that only contains the season info, then the - # previous one is most likely the series title (ie: ../series/season X/..) - eps = [node for node in mtree.nodes() - if 'season' in node.guess and 'episodeNumber' not in node.guess] - - if eps: - previous = [node for node in mtree.unidentified_leaves() - if node.node_idx[0] == eps[0].node_idx[0] - 1] - if len(previous) == 1: - found_property(previous[0], 'series', confidence=0.5) - - # If we have found title without any serie name, replace it by the serie name. - if 'series' not in mtree.info and 'title' in mtree.info: - title_leaf = mtree.first_leaf_containing('title') - metadata = title_leaf.guess.metadata('title') - value = title_leaf.guess['title'] - del title_leaf.guess['title'] - title_leaf.guess.set('series', value, metadata=metadata) - - def post_process(self, mtree, options=None): - for node in mtree.nodes(): - if 'series' not in node.guess: - continue - - node.guess['series'] = reorder_title(node.guess['series']) diff --git a/libs/guessit/transfo/guess_episodes_rexps.py b/libs/guessit/transfo/guess_episodes_rexps.py deleted file mode 100644 index 927c9890..00000000 --- a/libs/guessit/transfo/guess_episodes_rexps.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from guessit.patterns import sep, build_or_pattern -from guessit.containers import PropertiesContainer, WeakValidator, NoValidator, ChainedValidator, DefaultValidator, \ - FormatterValidator -from guessit.patterns.numeral import numeral, digital_numeral, parse_numeral -import re - - -class GuessEpisodesRexps(Transformer): - def __init__(self): - Transformer.__init__(self, 20) - - range_separators = ['-', 'to', 'a'] - discrete_separators = ['&', 'and', 'et'] - of_separators = ['of', 'sur', '/', '\\'] - - season_words = ['seasons?', 'saisons?', 'series?'] - episode_words = ['episodes?'] - - season_markers = ['s'] - episode_markers = ['e', 'ep'] - - discrete_sep = sep - for range_separator in range_separators: - discrete_sep = discrete_sep.replace(range_separator, '') - discrete_separators.append(discrete_sep) - all_separators = list(range_separators) - all_separators.extend(discrete_separators) - - self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False) - - range_separators_re = re.compile(build_or_pattern(range_separators), re.IGNORECASE) - discrete_separators_re = re.compile(build_or_pattern(discrete_separators), re.IGNORECASE) - all_separators_re = re.compile(build_or_pattern(all_separators), re.IGNORECASE) - of_separators_re = re.compile(build_or_pattern(of_separators, escape=True), re.IGNORECASE) - - season_words_re = re.compile(build_or_pattern(season_words), re.IGNORECASE) - episode_words_re = re.compile(build_or_pattern(episode_words), re.IGNORECASE) - - season_markers_re = re.compile(build_or_pattern(season_markers), re.IGNORECASE) - episode_markers_re = re.compile(build_or_pattern(episode_markers), re.IGNORECASE) - - def list_parser(value, property_list_name, discrete_separators_re=discrete_separators_re, range_separators_re=range_separators_re, allow_discrete=False, fill_gaps=False): - discrete_elements = filter(lambda x: x != '', discrete_separators_re.split(value)) - discrete_elements = [x.strip() for x in discrete_elements] - - proper_discrete_elements = [] - i = 0 - while i < len(discrete_elements): - if i < len(discrete_elements) - 2 and range_separators_re.match(discrete_elements[i+1]): - proper_discrete_elements.append(discrete_elements[i] + discrete_elements[i+1] + discrete_elements[i+2]) - i += 3 - else: - match = range_separators_re.search(discrete_elements[i]) - if match and match.start() == 0: - proper_discrete_elements[i-1] = proper_discrete_elements[i-1] + discrete_elements[i] - elif match and match.end() == len(discrete_elements[i]): - proper_discrete_elements.append(discrete_elements[i] + discrete_elements[i + 1]) - else: - proper_discrete_elements.append(discrete_elements[i]) - i += 1 - - discrete_elements = proper_discrete_elements - - ret = [] - - for discrete_element in discrete_elements: - range_values = filter(lambda x: x != '', range_separators_re.split(discrete_element)) - range_values = [x.strip() for x in range_values] - if len(range_values) > 1: - for x in range(0, len(range_values) - 1): - start_range_ep = parse_numeral(range_values[x]) - end_range_ep = parse_numeral(range_values[x+1]) - for range_ep in range(start_range_ep, end_range_ep + 1): - if range_ep not in ret: - ret.append(range_ep) - else: - discrete_value = parse_numeral(discrete_element) - if discrete_value not in ret: - ret.append(discrete_value) - - if len(ret) > 1: - if not allow_discrete: - valid_ret = list() - # replace discrete elements by ranges - valid_ret.append(ret[0]) - for i in range(0, len(ret) - 1): - previous = valid_ret[len(valid_ret) - 1] - if ret[i+1] < previous: - pass - else: - valid_ret.append(ret[i+1]) - ret = valid_ret - if fill_gaps: - ret = list(range(min(ret), max(ret) + 1)) - if len(ret) > 1: - return {None: ret[0], property_list_name: ret} - if len(ret) > 0: - return ret[0] - return None - - def episode_parser_x(value): - return list_parser(value, 'episodeList', discrete_separators_re=re.compile('x', re.IGNORECASE)) - - def episode_parser_e(value): - return list_parser(value, 'episodeList', discrete_separators_re=re.compile('e', re.IGNORECASE), fill_gaps=True) - - def episode_parser(value): - return list_parser(value, 'episodeList') - - def season_parser(value): - return list_parser(value, 'seasonList') - - class ResolutionCollisionValidator(object): - def validate(self, prop, string, node, match, entry_start, entry_end): - return len(match.group(2)) < 3 # limit - - self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P' + numeral + ')' + sep + '?' + season_words_re.pattern + '?)', confidence=1.0, formatter=parse_numeral) - self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)' + sep + '?' + season_words_re.pattern + '?)' + sep, confidence=1.0, formatter={None: parse_numeral, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), FormatterValidator('season', lambda x: len(x) > 1 if hasattr(x, '__len__') else False))) - - self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P' + digital_numeral + ')[^0-9]?' + sep + '?(?P(?:e' + digital_numeral + '(?:' + sep + '?[e-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_e, 'season': season_parser}, validator=NoValidator()) - # self.container.register_property(None, r'[^0-9]((?P' + digital_numeral + ')[^0-9 .-]?-?(?P(?:x' + digital_numeral + '(?:' + sep + '?[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator())) - self.container.register_property(None, sep + r'((?P' + digital_numeral + ')' + sep + '' + '(?P(?:x' + sep + digital_numeral + '(?:' + sep + '[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator())) - self.container.register_property(None, r'((?P' + digital_numeral + ')' + '(?P(?:x' + digital_numeral + '(?:[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator())) - self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*))', confidence=0.6, formatter={None: parse_numeral, 'season': season_parser}, validator=NoValidator()) - - self.container.register_property(None, r'((?P' + digital_numeral + ')' + sep + '?v(?P\d+))', confidence=0.6, formatter=parse_numeral) - self.container.register_property(None, r'(ep' + sep + r'?(?P' + digital_numeral + ')' + sep + '?)', confidence=0.7, formatter=parse_numeral) - self.container.register_property(None, r'(ep' + sep + r'?(?P' + digital_numeral + ')' + sep + '?v(?P\d+))', confidence=0.7, formatter=parse_numeral) - - - self.container.register_property(None, r'(' + episode_markers_re.pattern + '(?P' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*))', confidence=0.6, formatter={None: parse_numeral, 'episodeNumber': episode_parser}) - self.container.register_property(None, r'(' + episode_words_re.pattern + sep + '?(?P' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)' + sep + '?' + episode_words_re.pattern + '?)', confidence=0.8, formatter={None: parse_numeral, 'episodeNumber': episode_parser}) - - self.container.register_property(None, r'(' + episode_markers_re.pattern + '(?P' + digital_numeral + ')' + sep + '?v(?P\d+))', confidence=0.6, formatter={None: parse_numeral, 'episodeNumber': episode_parser}) - self.container.register_property(None, r'(' + episode_words_re.pattern + sep + '?(?P' + digital_numeral + ')' + sep + '?v(?P\d+))', confidence=0.8, formatter={None: parse_numeral, 'episodeNumber': episode_parser}) - - - self.container.register_property('episodeNumber', r'^ ?(\d{2})' + sep, confidence=0.4, formatter=parse_numeral) - self.container.register_property('episodeNumber', r'^ ?(\d{2})' + sep, confidence=0.4, formatter=parse_numeral) - self.container.register_property('episodeNumber', r'^ ?0(\d{1,2})' + sep, confidence=0.4, formatter=parse_numeral) - self.container.register_property('episodeNumber', sep + r'(\d{2}) ?$', confidence=0.4, formatter=parse_numeral) - self.container.register_property('episodeNumber', sep + r'0(\d{1,2}) ?$', confidence=0.4, formatter=parse_numeral) - - self.container.register_property(None, r'((?P' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P' + numeral + ')(?:' + sep + '?(?:episodes?|eps?))?)', confidence=0.7, formatter=parse_numeral) - self.container.register_property(None, r'((?:episodes?|eps?)' + sep + '?(?P' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P' + numeral + '))', confidence=0.7, formatter=parse_numeral) - self.container.register_property(None, r'((?:seasons?|saisons?|s)' + sep + '?(?P' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P' + numeral + '))', confidence=0.7, formatter=parse_numeral) - self.container.register_property(None, r'((?P' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P' + numeral + ')' + sep + '?(?:seasons?|saisons?|s))', confidence=0.7, formatter=parse_numeral) - - self.container.register_canonical_properties('other', 'FiNAL', 'Complete', validator=WeakValidator()) - - self.container.register_property(None, r'[^0-9]((?P' + digital_numeral + ')[^0-9 .-]?-?(?PxAll))', confidence=1.0, formatter={None: parse_numeral, 'other': lambda x: 'Complete', 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator())) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number', default=False, - help='Guess "serie.213.avi" as the episodeNumber 213. Without this option, ' - 'it will be guessed as season 2, episodeNumber 13') - - def supported_properties(self): - return ['episodeNumber', 'season', 'episodeList', 'seasonList', 'episodeCount', 'seasonCount', 'version', 'other'] - - def guess_episodes_rexps(self, string, node=None, options=None): - found = self.container.find_properties(string, node, options) - return self.container.as_guess(found, string) - - def should_process(self, mtree, options=None): - return mtree.guess.get('type', '').startswith('episode') - - def process(self, mtree, options=None): - GuessFinder(self.guess_episodes_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_filetype.py b/libs/guessit/transfo/guess_filetype.py deleted file mode 100644 index 0eb3475f..00000000 --- a/libs/guessit/transfo/guess_filetype.py +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -import mimetypes -import os.path -import re - -from guessit.guess import Guess -from guessit.patterns.extension import subtitle_exts, info_exts, video_exts -from guessit.transfo import TransformerException -from guessit.plugins.transformers import Transformer, get_transformer -from guessit.matcher import log_found_guess, found_guess, found_property - - -class GuessFiletype(Transformer): - def __init__(self): - Transformer.__init__(self, 200) - - # List of well known movies and series, hardcoded because they cannot be - # guessed appropriately otherwise - MOVIES = ['OSS 117'] - SERIES = ['Band of Brothers'] - - MOVIES = [m.lower() for m in MOVIES] - SERIES = [s.lower() for s in SERIES] - - def guess_filetype(self, mtree, options=None): - options = options or {} - - # put the filetype inside a dummy container to be able to have the - # following functions work correctly as closures - # this is a workaround for python 2 which doesn't have the - # 'nonlocal' keyword which we could use here in the upgrade_* functions - # (python 3 does have it) - filetype_container = [mtree.guess.get('type')] - other = {} - filename = mtree.string - - def upgrade_episode(): - if filetype_container[0] == 'subtitle': - filetype_container[0] = 'episodesubtitle' - elif filetype_container[0] == 'info': - filetype_container[0] = 'episodeinfo' - elif (not filetype_container[0] or - filetype_container[0] == 'video'): - filetype_container[0] = 'episode' - - def upgrade_movie(): - if filetype_container[0] == 'subtitle': - filetype_container[0] = 'moviesubtitle' - elif filetype_container[0] == 'info': - filetype_container[0] = 'movieinfo' - elif (not filetype_container[0] or - filetype_container[0] == 'video'): - filetype_container[0] = 'movie' - - def upgrade_subtitle(): - if filetype_container[0] == 'movie': - filetype_container[0] = 'moviesubtitle' - elif filetype_container[0] == 'episode': - filetype_container[0] = 'episodesubtitle' - elif not filetype_container[0]: - filetype_container[0] = 'subtitle' - - def upgrade_info(): - if filetype_container[0] == 'movie': - filetype_container[0] = 'movieinfo' - elif filetype_container[0] == 'episode': - filetype_container[0] = 'episodeinfo' - elif not filetype_container[0]: - filetype_container[0] = 'info' - - # look at the extension first - fileext = os.path.splitext(filename)[1][1:].lower() - if fileext in subtitle_exts: - upgrade_subtitle() - other = {'container': fileext} - elif fileext in info_exts: - upgrade_info() - other = {'container': fileext} - elif fileext in video_exts: - other = {'container': fileext} - else: - if fileext and not options.get('name_only'): - other = {'extension': fileext} - list(mtree.unidentified_leaves())[-1].guess = Guess(other) - - # check whether we are in a 'Movies', 'Tv Shows', ... folder - folder_rexps = [(r'Movies?', upgrade_movie), - (r'Films?', upgrade_movie), - (r'Tv[ _-]?Shows?', upgrade_episode), - (r'Series?', upgrade_episode), - (r'Episodes?', upgrade_episode)] - for frexp, upgrade_func in folder_rexps: - frexp = re.compile(frexp, re.IGNORECASE) - for pathgroup in mtree.children: - if frexp.match(pathgroup.value): - upgrade_func() - return filetype_container[0], other - - # check for a few specific cases which will unintentionally make the - # following heuristics confused (eg: OSS 117 will look like an episode, - # season 1, epnum 17, when it is in fact a movie) - fname = mtree.clean_string(filename).lower() - for m in self.MOVIES: - if m in fname: - self.log.debug('Found in exception list of movies -> type = movie') - upgrade_movie() - return filetype_container[0], other - for s in self.SERIES: - if s in fname: - self.log.debug('Found in exception list of series -> type = episode') - upgrade_episode() - return filetype_container[0], other - - # if we have an episode_rexp (eg: s02e13), it is an episode - episode_transformer = get_transformer('guess_episodes_rexps') - if episode_transformer: - filename_parts = list(x.value for x in mtree.unidentified_leaves()); - filename_parts.append(filename) - for filename_part in filename_parts: - guess = episode_transformer.guess_episodes_rexps(filename_part) - if guess: - self.log.debug('Found guess_episodes_rexps: %s -> type = episode', guess) - upgrade_episode() - return filetype_container[0], other - - properties_transformer = get_transformer('guess_properties') - if properties_transformer: - # if we have certain properties characteristic of episodes, it is an ep - found = properties_transformer.container.find_properties(filename, mtree, options, 'episodeFormat') - guess = properties_transformer.container.as_guess(found, filename) - if guess: - self.log.debug('Found characteristic property of episodes: %s"', guess) - upgrade_episode() - return filetype_container[0], other - - weak_episode_transformer = get_transformer('guess_weak_episodes_rexps') - if weak_episode_transformer: - found = properties_transformer.container.find_properties(filename, mtree, options, 'crc32') - guess = properties_transformer.container.as_guess(found, filename) - if guess: - found = weak_episode_transformer.container.find_properties(filename, mtree, options) - guess = weak_episode_transformer.container.as_guess(found, filename) - if guess: - self.log.debug('Found characteristic property of episodes: %s"', guess) - upgrade_episode() - return filetype_container[0], other - - found = properties_transformer.container.find_properties(filename, mtree, options, 'format') - guess = properties_transformer.container.as_guess(found, filename) - if guess and guess['format'] in ('HDTV', 'WEBRip', 'WEB-DL', 'DVB'): - # Use weak episodes only if TV or WEB source - weak_episode_transformer = get_transformer('guess_weak_episodes_rexps') - if weak_episode_transformer: - guess = weak_episode_transformer.guess_weak_episodes_rexps(filename) - if guess: - self.log.debug('Found guess_weak_episodes_rexps: %s -> type = episode', guess) - upgrade_episode() - return filetype_container[0], other - - website_transformer = get_transformer('guess_website') - if website_transformer: - found = website_transformer.container.find_properties(filename, mtree, options, 'website') - guess = website_transformer.container.as_guess(found, filename) - if guess: - for namepart in ('tv', 'serie', 'episode'): - if namepart in guess['website']: - # origin-specific type - self.log.debug('Found characteristic property of episodes: %s', guess) - upgrade_episode() - return filetype_container[0], other - - if filetype_container[0] in ('subtitle', 'info') or (not filetype_container[0] and fileext in video_exts): - # if no episode info found, assume it's a movie - self.log.debug('Nothing characteristic found, assuming type = movie') - upgrade_movie() - - if not filetype_container[0]: - self.log.debug('Nothing characteristic found, assuming type = unknown') - filetype_container[0] = 'unknown' - - return filetype_container[0], other - - def process(self, mtree, options=None): - """guess the file type now (will be useful later) - """ - filetype, other = self.guess_filetype(mtree, options) - - mtree.guess.set('type', filetype, confidence=1.0) - log_found_guess(mtree.guess) - - filetype_info = Guess(other, confidence=1.0) - # guess the mimetype of the filename - # TODO: handle other mimetypes not found on the default type_maps - # mimetypes.types_map['.srt']='text/subtitle' - mime, _ = mimetypes.guess_type(mtree.string, strict=False) - if mime is not None: - filetype_info.update({'mimetype': mime}, confidence=1.0) - - node_ext = mtree.node_at((-1,)) - found_guess(node_ext, filetype_info) - - if mtree.guess.get('type') in [None, 'unknown']: - if options.get('name_only'): - mtree.guess.set('type', 'movie', confidence=0.6) - else: - raise TransformerException(__name__, 'Unknown file type') - - def post_process(self, mtree, options=None): - # now look whether there are some specific hints for episode vs movie - # If we have a date and no year, this is a TV Show. - if 'date' in mtree.info and 'year' not in mtree.info and mtree.info.get('type') != 'episode': - mtree.guess['type'] = 'episode' - for type_leaves in mtree.leaves_containing('type'): - type_leaves.guess['type'] = 'episode' - for title_leaves in mtree.leaves_containing('title'): - title_leaves.guess.rename('title', 'series') \ No newline at end of file diff --git a/libs/guessit/transfo/guess_idnumber.py b/libs/guessit/transfo/guess_idnumber.py deleted file mode 100644 index 30b63cbd..00000000 --- a/libs/guessit/transfo/guess_idnumber.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -import re - -_DIGIT = 0 -_LETTER = 1 -_OTHER = 2 - - -class GuessIdnumber(Transformer): - def __init__(self): - Transformer.__init__(self, 220) - - def supported_properties(self): - return ['idNumber'] - - _idnum = re.compile(r'(?P[a-zA-Z0-9-]{20,})') # 1.0, (0, 0)) - - def guess_idnumber(self, string, node=None, options=None): - match = self._idnum.search(string) - if match is not None: - result = match.groupdict() - switch_count = 0 - switch_letter_count = 0; - letter_count = 0; - last_letter = None - - last = _LETTER - for c in result['idNumber']: - if c in '0123456789': - ci = _DIGIT - elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': - ci = _LETTER - if c != last_letter: - switch_letter_count += 1 - last_letter = c - letter_count += 1 - else: - ci = _OTHER - - if ci != last: - switch_count += 1 - - last = ci - - switch_ratio = float(switch_count) / len(result['idNumber']) - letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1 - - # only return the result as probable if we alternate often between - # char type (more likely for hash values than for common words) - if switch_ratio > 0.4 and letters_ratio > 0.4: - return result, match.span() - - return None, None - - def process(self, mtree, options=None): - GuessFinder(self.guess_idnumber, 0.4, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_language.py b/libs/guessit/transfo/guess_language.py deleted file mode 100644 index cb9787d3..00000000 --- a/libs/guessit/transfo/guess_language.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.language import search_language, subtitle_prefixes, subtitle_suffixes -from guessit.patterns.extension import subtitle_exts -from guessit.textutils import find_words -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder - - -class GuessLanguage(Transformer): - def __init__(self): - Transformer.__init__(self, 30) - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', - help='Allowed language (can be used multiple times)') - - def supported_properties(self): - return ['language', 'subtitleLanguage'] - - def guess_language(self, string, node=None, options=None): - allowed_languages = None - if options and 'allowed_languages' in options: - allowed_languages = options.get('allowed_languages') - guess = search_language(string, allowed_languages) - return guess - - def _skip_language_on_second_pass(self, mtree, node): - """Check if found node is a valid language node, or if it's a false positive. - - :param mtree: Tree detected on first pass. - :type mtree: :class:`guessit.matchtree.MatchTree` - :param node: Node that contains a language Guess - :type node: :class:`guessit.matchtree.MatchTree` - - :return: True if a second pass skipping this node is required - :rtype: bool - """ - unidentified_starts = {} - unidentified_ends = {} - - property_starts = {} - property_ends = {} - - title_starts = {} - title_ends = {} - - for unidentified_node in mtree.unidentified_leaves(): - unidentified_starts[unidentified_node.span[0]] = unidentified_node - unidentified_ends[unidentified_node.span[1]] = unidentified_node - - for property_node in mtree.leaves_containing('year'): - property_starts[property_node.span[0]] = property_node - property_ends[property_node.span[1]] = property_node - - for title_node in mtree.leaves_containing(['title', 'series']): - title_starts[title_node.span[0]] = title_node - title_ends[title_node.span[1]] = title_node - - return node.span[0] in title_ends.keys() and (node.span[1] in unidentified_starts.keys() or node.span[1] + 1 in property_starts.keys()) or\ - node.span[1] in title_starts.keys() and (node.span[0] == node.group_node().span[0] or node.span[0] in unidentified_ends.keys() or node.span[0] in property_ends.keys()) - - def second_pass_options(self, mtree, options=None): - m = mtree.matched() - to_skip_language_nodes = [] - - for lang_key in ('language', 'subtitleLanguage'): - langs = {} - lang_nodes = set(mtree.leaves_containing(lang_key)) - - for lang_node in lang_nodes: - lang = lang_node.guess.get(lang_key, None) - if self._skip_language_on_second_pass(mtree, lang_node): - # Language probably split the title. Add to skip for 2nd pass. - - # if filetype is subtitle and the language appears last, just before - # the extension, then it is likely a subtitle language - parts = mtree.clean_string(lang_node.root.value).split() - if m.get('type') in ['moviesubtitle', 'episodesubtitle']: - if lang_node.value in parts and \ - (parts.index(lang_node.value) == len(parts) - 2): - continue - to_skip_language_nodes.append(lang_node) - elif lang not in langs: - langs[lang] = lang_node - else: - # The same language was found. Keep the more confident one, - # and add others to skip for 2nd pass. - existing_lang_node = langs[lang] - to_skip = None - if (existing_lang_node.guess.confidence('language') >= - lang_node.guess.confidence('language')): - # lang_node is to remove - to_skip = lang_node - else: - # existing_lang_node is to remove - langs[lang] = lang_node - to_skip = existing_lang_node - to_skip_language_nodes.append(to_skip) - - if to_skip_language_nodes: - # Also skip same value nodes - skipped_values = [skip_node.value for skip_node in to_skip_language_nodes] - - for lang_key in ('language', 'subtitleLanguage'): - lang_nodes = set(mtree.leaves_containing(lang_key)) - - for lang_node in lang_nodes: - if lang_node not in to_skip_language_nodes and lang_node.value in skipped_values: - to_skip_language_nodes.append(lang_node) - return {'skip_nodes': to_skip_language_nodes} - return None - - def should_process(self, mtree, options=None): - options = options or {} - return options.get('language', True) - - def process(self, mtree, options=None): - GuessFinder(self.guess_language, None, self.log, options).process_nodes(mtree.unidentified_leaves()) - - def promote_subtitle(self, node): - if 'language' in node.guess: - node.guess.set('subtitleLanguage', node.guess['language'], - confidence=node.guess.confidence('language')) - del node.guess['language'] - - def post_process(self, mtree, options=None): - # 1- try to promote language to subtitle language where it makes sense - for node in mtree.nodes(): - if 'language' not in node.guess: - continue - - # - if we matched a language in a file with a sub extension and that - # the group is the last group of the filename, it is probably the - # language of the subtitle - # (eg: 'xxx.english.srt') - if (mtree.node_at((-1,)).value.lower() in subtitle_exts and - node == list(mtree.leaves())[-2]): - self.promote_subtitle(node) - - # - if we find in the same explicit group - # a subtitle prefix before the language, - # or a subtitle suffix after the language, - # then upgrade the language - explicit_group = mtree.node_at(node.node_idx[:2]) - group_str = explicit_group.value.lower() - - for sub_prefix in subtitle_prefixes: - if (sub_prefix in find_words(group_str) and - 0 <= group_str.find(sub_prefix) < (node.span[0] - explicit_group.span[0])): - self.promote_subtitle(node) - - for sub_suffix in subtitle_suffixes: - if (sub_suffix in find_words(group_str) and - (node.span[0] - explicit_group.span[0]) < group_str.find(sub_suffix)): - self.promote_subtitle(node) - - # - if a language is in an explicit group just preceded by "st", - # it is a subtitle language (eg: '...st[fr-eng]...') - try: - idx = node.node_idx - previous = list(mtree.node_at((idx[0], idx[1] - 1)).leaves())[-1] - if previous.value.lower()[-2:] == 'st': - self.promote_subtitle(node) - except IndexError: - pass diff --git a/libs/guessit/transfo/guess_movie_title_from_position.py b/libs/guessit/transfo/guess_movie_title_from_position.py deleted file mode 100644 index 671e4cb5..00000000 --- a/libs/guessit/transfo/guess_movie_title_from_position.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import found_property -from guessit import u - - -class GuessMovieTitleFromPosition(Transformer): - def __init__(self): - Transformer.__init__(self, -200) - - def supported_properties(self): - return ['title'] - - def should_process(self, mtree, options=None): - options = options or {} - return not options.get('skip_title') and not mtree.guess.get('type', '').startswith('episode') - - def process(self, mtree, options=None): - """ - try to identify the remaining unknown groups by looking at their - position relative to other known elements - """ - if 'title' in mtree.info: - return - - basename = mtree.node_at((-2,)) - all_valid = lambda leaf: len(leaf.clean_value) > 0 - basename_leftover = list(basename.unidentified_leaves(valid=all_valid)) - - try: - folder = mtree.node_at((-3,)) - folder_leftover = list(folder.unidentified_leaves()) - except ValueError: - folder = None - folder_leftover = [] - - self.log.debug('folder: %s' % u(folder_leftover)) - self.log.debug('basename: %s' % u(basename_leftover)) - - # specific cases: - # if we find the same group both in the folder name and the filename, - # it's a good candidate for title - if folder_leftover and basename_leftover and folder_leftover[0].clean_value == basename_leftover[0].clean_value: - found_property(folder_leftover[0], 'title', confidence=0.8) - return - - # specific cases: - # if the basename contains a number first followed by an unidentified - # group, and the folder only contains 1 unidentified one, then we have - # a series - # ex: Millenium Trilogy (2009)/(1)The Girl With The Dragon Tattoo(2009).mkv - if len(folder_leftover) > 0 and len(basename_leftover) > 1: - series = folder_leftover[0] - film_number = basename_leftover[0] - title = basename_leftover[1] - - basename_leaves = list(basename.leaves()) - - num = None - try: - num = int(film_number.clean_value) - except ValueError: - pass - - if num: - self.log.debug('series: %s' % series.clean_value) - self.log.debug('title: %s' % title.clean_value) - if (series.clean_value != title.clean_value and - series.clean_value != film_number.clean_value and - basename_leaves.index(film_number) == 0 and - basename_leaves.index(title) == 1): - - found_property(title, 'title', confidence=0.6) - found_property(series, 'filmSeries', confidence=0.6) - found_property(film_number, 'filmNumber', num, confidence=0.6) - return - - if folder: - year_group = folder.first_leaf_containing('year') - if year_group: - groups_before = folder.previous_unidentified_leaves(year_group) - if groups_before: - try: - node = next(groups_before) - found_property(node, 'title', confidence=0.8) - return - except StopIteration: - pass - - # if we have either format or videoCodec in the folder containing the - # file or one of its parents, then we should probably look for the title - # in there rather than in the basename - try: - props = list(mtree.previous_leaves_containing(mtree.children[-2], - ['videoCodec', - 'format', - 'language'])) - except IndexError: - props = [] - - if props: - group_idx = props[0].node_idx[0] - if all(g.node_idx[0] == group_idx for g in props): - # if they're all in the same group, take leftover info from there - leftover = mtree.node_at((group_idx,)).unidentified_leaves() - try: - found_property(next(leftover), 'title', confidence=0.7) - return - except StopIteration: - pass - - # look for title in basename if there are some remaining unidentified - # groups there - if basename_leftover: - # if basename is only one word and the containing folder has at least - # 3 words in it, we should take the title from the folder name - # ex: Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi - # ex: Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi <-- TODO: gets caught here? - if (basename_leftover[0].clean_value.count(' ') == 0 and - folder_leftover and folder_leftover[0].clean_value.count(' ') >= 2): - - found_property(folder_leftover[0], 'title', confidence=0.7) - return - - # if there are only many unidentified groups, take the first of which is - # not inside brackets or parentheses. - # ex: Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi - if basename_leftover[0].is_explicit(): - for basename_leftover_elt in basename_leftover: - if not basename_leftover_elt.is_explicit(): - found_property(basename_leftover_elt, 'title', confidence=0.8) - return - - # if all else fails, take the first remaining unidentified group in the - # basename as title - found_property(basename_leftover[0], 'title', confidence=0.6) - return - - # if there are no leftover groups in the basename, look in the folder name - if folder_leftover: - found_property(folder_leftover[0], 'title', confidence=0.5) - return - - # if nothing worked, look if we have a very small group at the beginning - # of the basename - basename = mtree.node_at((-2,)) - basename_leftover = basename.unidentified_leaves(valid=lambda leaf: True) - try: - found_property(next(basename_leftover), 'title', confidence=0.4) - return - except StopIteration: - pass diff --git a/libs/guessit/transfo/guess_properties.py b/libs/guessit/transfo/guess_properties.py deleted file mode 100644 index 01aecddc..00000000 --- a/libs/guessit/transfo/guess_properties.py +++ /dev/null @@ -1,288 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.containers import PropertiesContainer, WeakValidator, LeavesValidator, QualitiesContainer, NoValidator, \ - ChainedValidator, DefaultValidator, OnlyOneValidator, LeftValidator, NeighborValidator -from guessit.patterns import sep, build_or_pattern -from guessit.patterns.extension import subtitle_exts, video_exts, info_exts -from guessit.patterns.numeral import numeral, parse_numeral -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder, found_property -import re - - -class GuessProperties(Transformer): - def __init__(self): - Transformer.__init__(self, 35) - - self.container = PropertiesContainer() - self.qualities = QualitiesContainer() - - def register_property(propname, props, **kwargs): - """props a dict of {value: [patterns]}""" - for canonical_form, patterns in props.items(): - if isinstance(patterns, tuple): - patterns2, pattern_kwarg = patterns - if kwargs: - current_kwarg = dict(kwargs) - current_kwarg.update(pattern_kwarg) - else: - current_kwarg = dict(pattern_kwarg) - current_kwarg['canonical_form'] = canonical_form - self.container.register_property(propname, *patterns2, **current_kwarg) - elif kwargs: - current_kwarg = dict(kwargs) - current_kwarg['canonical_form'] = canonical_form - self.container.register_property(propname, *patterns, **current_kwarg) - else: - self.container.register_property(propname, *patterns, canonical_form=canonical_form) - - def register_quality(propname, quality_dict): - """props a dict of {canonical_form: quality}""" - for canonical_form, quality in quality_dict.items(): - self.qualities.register_quality(propname, canonical_form, quality) - - register_property('container', {'mp4': ['MP4']}) - - # http://en.wikipedia.org/wiki/Pirated_movie_release_types - register_property('format', {'VHS': ['VHS', 'VHS-Rip'], - 'Cam': ['CAM', 'CAMRip', 'HD-CAM'], - #'Telesync': ['TELESYNC', 'PDVD'], - 'Telesync': (['TS', 'HD-TS'], {'confidence': 0.4}), - 'Workprint': ['WORKPRINT', 'WP'], - 'Telecine': ['TELECINE', 'TC'], - 'PPV': ['PPV', 'PPV-Rip'], # Pay Per View - 'TV': ['SD-TV', 'SD-TV-Rip', 'Rip-SD-TV', 'TV-Rip', 'Rip-TV'], - 'DVB': ['DVB-Rip', 'DVB', 'PD-TV'], - 'DVD': ['DVD', 'DVD-Rip', 'VIDEO-TS', 'DVD-R', 'DVD-9', 'DVD-5'], - 'HDTV': ['HD-TV', 'TV-RIP-HD', 'HD-TV-RIP'], - 'VOD': ['VOD', 'VOD-Rip'], - 'WEBRip': ['WEB-Rip'], - 'WEB-DL': ['WEB-DL', 'WEB-HD', 'WEB'], - 'HD-DVD': ['HD-(?:DVD)?-Rip', 'HD-DVD'], - 'BluRay': ['Blu-ray(?:-Rip)?', 'B[DR]', 'B[DR]-Rip', 'BD[59]', 'BD25', 'BD50'] - }) - - register_quality('format', {'VHS': -100, - 'Cam': -90, - 'Telesync': -80, - 'Workprint': -70, - 'Telecine': -60, - 'PPV': -50, - 'TV': -30, - 'DVB': -20, - 'DVD': 0, - 'HDTV': 20, - 'VOD': 40, - 'WEBRip': 50, - 'WEB-DL': 60, - 'HD-DVD': 80, - 'BluRay': 100 - }) - - register_property('screenSize', {'360p': ['(?:\d{3,}(?:\\|\/|x|\*))?360(?:i|p?x?)'], - '368p': ['(?:\d{3,}(?:\\|\/|x|\*))?368(?:i|p?x?)'], - '480p': ['(?:\d{3,}(?:\\|\/|x|\*))?480(?:i|p?x?)'], - #'480p': (['hr'], {'confidence': 0.2}), # duplicate dict key - '576p': ['(?:\d{3,}(?:\\|\/|x|\*))?576(?:i|p?x?)'], - '720p': ['(?:\d{3,}(?:\\|\/|x|\*))?720(?:i|p?x?)'], - '900p': ['(?:\d{3,}(?:\\|\/|x|\*))?900(?:i|p?x?)'], - '1080i': ['(?:\d{3,}(?:\\|\/|x|\*))?1080i'], - '1080p': ['(?:\d{3,}(?:\\|\/|x|\*))?1080p?x?'], - '4K': ['(?:\d{3,}(?:\\|\/|x|\*))?2160(?:i|p?x?)'] - }, - validator=ChainedValidator(DefaultValidator(), OnlyOneValidator())) - - class ResolutionValidator(object): - """Make sure our match is surrounded by separators, or by another entry""" - def validate(self, prop, string, node, match, entry_start, entry_end): - """ - span = _get_span(prop, match) - span = _trim_span(span, string[span[0]:span[1]]) - start, end = span - - sep_start = start <= 0 or string[start - 1] in sep - sep_end = end >= len(string) or string[end] in sep - start_by_other = start in entry_end - end_by_other = end in entry_start - if (sep_start or start_by_other) and (sep_end or end_by_other): - return True - return False - """ - return True - - _digits_re = re.compile('\d+') - - def resolution_formatter(value): - digits = _digits_re.findall(value) - return 'x'.join(digits) - - self.container.register_property('screenSize', '\d{3,4}-?[x\*]-?\d{3,4}', canonical_from_pattern=False, formatter=resolution_formatter, validator=ChainedValidator(DefaultValidator(), ResolutionValidator())) - - register_quality('screenSize', {'360p': -300, - '368p': -200, - '480p': -100, - '576p': 0, - '720p': 100, - '900p': 130, - '1080i': 180, - '1080p': 200, - '4K': 400 - }) - - _videoCodecProperty = {'Real': ['Rv\d{2}'], # http://en.wikipedia.org/wiki/RealVideo - 'Mpeg2': ['Mpeg2'], - 'DivX': ['DVDivX', 'DivX'], - 'XviD': ['XviD'], - 'h264': ['[hx]-264(?:-AVC)?', 'MPEG-4(?:-AVC)'], - 'h265': ['[hx]-265(?:-HEVC)?', 'HEVC'] - } - - register_property('videoCodec', _videoCodecProperty) - - register_quality('videoCodec', {'Real': -50, - 'Mpeg2': -30, - 'DivX': -10, - 'XviD': 0, - 'h264': 100, - 'h265': 150 - }) - - # http://blog.mediacoderhq.com/h264-profiles-and-levels/ - # http://fr.wikipedia.org/wiki/H.264 - self.container.register_property('videoProfile', 'BP', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - self.container.register_property('videoProfile', 'XP', 'EP', canonical_form='XP', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - self.container.register_property('videoProfile', 'MP', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - self.container.register_property('videoProfile', 'HP', 'HiP', canonical_form='HP', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - self.container.register_property('videoProfile', '10.?bit', 'Hi10P', canonical_form='10bit') - self.container.register_property('videoProfile', '8.?bit', canonical_form='8bit') - self.container.register_property('videoProfile', 'Hi422P', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - self.container.register_property('videoProfile', 'Hi444PP', validator=LeavesValidator(lambdas=[lambda node: 'videoCodec' in node.guess])) - - register_quality('videoProfile', {'BP': -20, - 'XP': -10, - 'MP': 0, - 'HP': 10, - '10bit': 15, - 'Hi422P': 25, - 'Hi444PP': 35 - }) - - # has nothing to do here (or on filenames for that matter), but some - # releases use it and it helps to identify release groups, so we adapt - register_property('videoApi', {'DXVA': ['DXVA']}) - - register_property('audioCodec', {'MP3': ['MP3', 'LAME', 'LAME(?:\d)+-(?:\d)+'], - 'DolbyDigital': ['DD'], - 'AAC': ['AAC'], - 'AC3': ['AC3'], - 'Flac': ['FLAC'], - 'DTS': (['DTS'], {'validator': LeftValidator()}), - 'TrueHD': ['True-HD'] - }) - - register_quality('audioCodec', {'MP3': 10, - 'DolbyDigital': 30, - 'AAC': 35, - 'AC3': 40, - 'Flac': 45, - 'DTS': 60, - 'TrueHD': 70 - }) - - self.container.register_property('audioProfile', 'HD', validator=LeavesValidator(lambdas=[lambda node: node.guess.get('audioCodec') == 'DTS'])) - self.container.register_property('audioProfile', 'HD-MA', canonical_form='HDMA', validator=LeavesValidator(lambdas=[lambda node: node.guess.get('audioCodec') == 'DTS'])) - self.container.register_property('audioProfile', 'HE', validator=LeavesValidator(lambdas=[lambda node: node.guess.get('audioCodec') == 'AAC'])) - self.container.register_property('audioProfile', 'LC', validator=LeavesValidator(lambdas=[lambda node: node.guess.get('audioCodec') == 'AAC'])) - self.container.register_property('audioProfile', 'HQ', validator=LeavesValidator(lambdas=[lambda node: node.guess.get('audioCodec') == 'AC3'])) - - register_quality('audioProfile', {'HD': 20, - 'HDMA': 50, - 'LC': 0, - 'HQ': 0, - 'HE': 20 - }) - - register_property('audioChannels', {'7.1': ['7[\W_]1', '7ch', '8ch'], - '5.1': ['5[\W_]1', '5ch', '6ch'], - '2.0': ['2[\W_]0', '2ch', 'stereo'], - '1.0': ['1[\W_]0', '1ch', 'mono'] - }) - - register_quality('audioChannels', {'7.1': 200, - '5.1': 100, - '2.0': 0, - '1.0': -100 - }) - - self.container.register_property('episodeFormat', r'Minisodes?', canonical_form='Minisode') - - self.container.register_property('crc32', '(?:[a-fA-F]|[0-9]){8}', enhance=False, canonical_from_pattern=False) - - weak_episode_words = ['pt', 'part'] - self.container.register_property(None, '(' + build_or_pattern(weak_episode_words) + sep + '?(?P' + numeral + '))[^0-9]', enhance=False, canonical_from_pattern=False, confidence=0.4, formatter=parse_numeral) - - register_property('other', {'AudioFix': ['Audio-Fix', 'Audio-Fixed'], - 'SyncFix': ['Sync-Fix', 'Sync-Fixed'], - 'DualAudio': ['Dual-Audio'], - 'WideScreen': ['ws', 'wide-screen'], - 'Netflix': ['Netflix', 'NF'] - }) - - self.container.register_property('other', 'Real', 'Fix', canonical_form='Proper', validator=NeighborValidator()) - self.container.register_property('other', 'Proper', 'Repack', 'Rerip', canonical_form='Proper') - self.container.register_property('other', 'Fansub', canonical_form='Fansub') - self.container.register_property('other', 'Fastsub', canonical_form='Fastsub') - self.container.register_property('other', '(?:Seasons?' + sep + '?)?Complete', canonical_form='Complete') - self.container.register_property('other', 'R5', 'RC', canonical_form='R5') - self.container.register_property('other', 'Pre-Air', 'Preair', canonical_form='Preair') - - self.container.register_canonical_properties('other', 'Screener', 'Remux', '3D', 'HD', 'mHD', 'HDLight', 'HQ', - 'DDC', - 'HR', 'PAL', 'SECAM', 'NTSC') - self.container.register_canonical_properties('other', 'Limited', 'Complete', 'Classic', 'Unrated', 'LiNE', 'Bonus', 'Trailer', validator=WeakValidator()) - - for prop in self.container.get_properties('format'): - self.container.register_property('other', prop.pattern + '(-?Scr(?:eener)?)', canonical_form='Screener') - - for exts in (subtitle_exts, info_exts, video_exts): - for container in exts: - self.container.register_property('container', container, confidence=0.3) - - def guess_properties(self, string, node=None, options=None): - found = self.container.find_properties(string, node, options) - return self.container.as_guess(found, string) - - def supported_properties(self): - return self.container.get_supported_properties() - - def process(self, mtree, options=None): - GuessFinder(self.guess_properties, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves()) - proper_count = 0 - for other_leaf in mtree.leaves_containing('other'): - if 'other' in other_leaf.info and 'Proper' in other_leaf.info['other']: - proper_count += 1 - if proper_count: - found_property(mtree, 'properCount', proper_count) - - def rate_quality(self, guess, *props): - return self.qualities.rate_quality(guess, *props) diff --git a/libs/guessit/transfo/guess_release_group.py b/libs/guessit/transfo/guess_release_group.py deleted file mode 100644 index 646c7128..00000000 --- a/libs/guessit/transfo/guess_release_group.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder, build_guess -from guessit.containers import PropertiesContainer -from guessit.patterns import sep -from guessit.guess import Guess -from guessit.textutils import strip_brackets -import re - - -class GuessReleaseGroup(Transformer): - def __init__(self): - Transformer.__init__(self, -190) - - self.container = PropertiesContainer(canonical_from_pattern=False) - self._allowed_groupname_pattern = '[\w@#€£$&!\?]' - self._forbidden_groupname_lambda = [lambda elt: elt in ['rip', 'by', 'for', 'par', 'pour', 'bonus'], - lambda elt: self._is_number(elt)] - # If the previous property in this list, the match will be considered as safe - # and group name can contain a separator. - self.previous_safe_properties = ['videoCodec', 'format', 'videoApi', 'audioCodec', 'audioProfile', 'videoProfile', 'audioChannels', 'other'] - self.previous_safe_values = {'other': ['Complete']} - self.next_safe_properties = ['extension', 'website'] - self.next_safe_values = {'format': ['Telesync']} - self.container.sep_replace_char = '-' - self.container.canonical_from_pattern = False - self.container.enhance = True - self.container.register_property('releaseGroup', self._allowed_groupname_pattern + '+') - self.container.register_property('releaseGroup', self._allowed_groupname_pattern + '+-' + self._allowed_groupname_pattern + '+') - self.re_sep = re.compile('(' + sep + ')') - - def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): - naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', - help='Expected release group (can be used multiple times)') - - def supported_properties(self): - return self.container.get_supported_properties() - - def _is_number(self, s): - try: - int(s) - return True - except ValueError: - return False - - def validate_group_name(self, guess): - val = guess['releaseGroup'] - if len(val) > 1: - checked_val = "" - forbidden = False - for elt in self.re_sep.split(val): # separators are in the list because of capturing group - if forbidden: - # Previous was forbidden, don't had separator - forbidden = False - continue - for forbidden_lambda in self._forbidden_groupname_lambda: - forbidden = forbidden_lambda(elt.lower()) - if forbidden: - if checked_val: - # Removing previous separator - checked_val = checked_val[0:len(checked_val) - 1] - break - if not forbidden: - checked_val += elt - - val = checked_val - if not val: - return False - if self.re_sep.match(val[-1]): - val = val[:len(val)-1] - if self.re_sep.match(val[0]): - val = val[1:] - guess['releaseGroup'] = val - forbidden = False - for forbidden_lambda in self._forbidden_groupname_lambda: - forbidden = forbidden_lambda(val.lower()) - if forbidden: - break - if not forbidden: - return True - return False - - def is_leaf_previous(self, leaf, node): - if leaf.span[1] <= node.span[0]: - for idx in range(leaf.span[1], node.span[0]): - if leaf.root.value[idx] not in sep: - return False - return True - return False - - def validate_next_leaves(self, node): - if 'series' in node.root.info or 'title' in node.root.info: - # --expected-series or --expected-title is used. - return True - - # Make sure to avoid collision with 'series' or 'title' guessed later. Should be more precise. - leaves = node.root.unidentified_leaves() - return len(list(leaves)) > 1 - - def validate_node(self, leaf, node, safe=False): - if not self.is_leaf_previous(leaf, node): - return False - if not self.validate_next_leaves(node): - return False - if safe: - for k, v in leaf.guess.items(): - if k in self.previous_safe_values and not v in self.previous_safe_values[k]: - return False - return True - - def guess_release_group(self, string, node=None, options=None): - if options and options.get('expected_group'): - expected_container = PropertiesContainer(enhance=True, canonical_from_pattern=False) - for expected_group in options.get('expected_group'): - if expected_group.startswith('re:'): - expected_group = expected_group[3:] - expected_group = expected_group.replace(' ', '-') - expected_container.register_property('releaseGroup', expected_group, enhance=True) - else: - expected_group = re.escape(expected_group) - expected_container.register_property('releaseGroup', expected_group, enhance=False) - - found = expected_container.find_properties(string, node, options, 'releaseGroup') - guess = expected_container.as_guess(found, string, self.validate_group_name) - if guess: - return guess - - found = self.container.find_properties(string, node, options, 'releaseGroup') - guess = self.container.as_guess(found, string, self.validate_group_name) - validated_guess = None - if guess: - group_node = node.group_node() - if group_node: - for leaf in group_node.leaves_containing(self.previous_safe_properties): - if self.validate_node(leaf, node, True): - if leaf.root.value[leaf.span[1]] == '-': - guess.metadata().confidence = 1 - else: - guess.metadata().confidence = 0.7 - validated_guess = guess - - if not validated_guess: - # If previous group last leaf is identified as a safe property, - # consider the raw value as a releaseGroup - previous_group_node = node.previous_group_node() - if previous_group_node: - for leaf in previous_group_node.leaves_containing(self.previous_safe_properties): - if self.validate_node(leaf, node, False): - guess = Guess({'releaseGroup': node.value}, confidence=1, input=node.value, span=(0, len(node.value))) - if self.validate_group_name(guess): - node.guess = guess - validated_guess = guess - - if validated_guess: - # If following group nodes have only one unidentified leaf, it belongs to the release group - next_group_node = node - - while True: - next_group_node = next_group_node.next_group_node() - if next_group_node: - leaves = list(next_group_node.leaves()) - if len(leaves) == 1 and not leaves[0].guess: - validated_guess['releaseGroup'] = validated_guess['releaseGroup'] + leaves[0].value - leaves[0].guess = validated_guess - else: - break - else: - break - - if not validated_guess and node.is_explicit() and node.node_last_idx == 0: # first node from group - validated_guess = build_guess(node, 'releaseGroup', value=node.value[1:len(node.value)-1]) - validated_guess.metadata().confidence = 0.4 - validated_guess.metadata().span = 1, len(node.value) - node.guess = validated_guess - - if validated_guess: - # Strip brackets - validated_guess['releaseGroup'] = strip_brackets(validated_guess['releaseGroup']) - - return validated_guess - - def process(self, mtree, options=None): - GuessFinder(self.guess_release_group, None, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_video_rexps.py b/libs/guessit/transfo/guess_video_rexps.py deleted file mode 100644 index b1dca8ee..00000000 --- a/libs/guessit/transfo/guess_video_rexps.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, \ - unicode_literals - -from guessit.patterns import _psep -from guessit.containers import PropertiesContainer -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from guessit.patterns.numeral import parse_numeral - - -class GuessVideoRexps(Transformer): - def __init__(self): - Transformer.__init__(self, 25) - - self.container = PropertiesContainer(canonical_from_pattern=False) - - self.container.register_property(None, 'cd' + _psep + '(?P[0-9])(?:' + _psep + 'of' + _psep + '(?P[0-9]))?', confidence=1.0, enhance=False, global_span=True, formatter=parse_numeral) - self.container.register_property('cdNumberTotal', '([1-9])' + _psep + 'cds?', confidence=0.9, enhance=False, formatter=parse_numeral) - - self.container.register_property('bonusNumber', 'x([0-9]{1,2})', enhance=False, global_span=True, formatter=parse_numeral) - - self.container.register_property('filmNumber', 'f([0-9]{1,2})', enhance=False, global_span=True, formatter=parse_numeral) - - self.container.register_property('edition', 'collector', 'collector-edition', 'edition-collector', canonical_form='Collector Edition') - self.container.register_property('edition', 'special-edition', 'edition-special', canonical_form='Special Edition') - self.container.register_property('edition', 'criterion', 'criterion-edition', 'edition-criterion', canonical_form='Criterion Edition') - self.container.register_property('edition', 'deluxe', 'cdeluxe-edition', 'edition-deluxe', canonical_form='Deluxe Edition') - self.container.register_property('edition', 'director\'?s?-cut', 'director\'?s?-cut-edition', 'edition-director\'?s?-cut', canonical_form='Director\'s cut') - - def supported_properties(self): - return self.container.get_supported_properties() - - def guess_video_rexps(self, string, node=None, options=None): - found = self.container.find_properties(string, node, options) - return self.container.as_guess(found, string) - - def process(self, mtree, options=None): - GuessFinder(self.guess_video_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_weak_episodes_rexps.py b/libs/guessit/transfo/guess_weak_episodes_rexps.py deleted file mode 100644 index 93d7a7bb..00000000 --- a/libs/guessit/transfo/guess_weak_episodes_rexps.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from guessit.patterns import sep, build_or_pattern -from guessit.containers import PropertiesContainer, LeavesValidator, NoValidator, WeakValidator -from guessit.patterns.numeral import numeral, parse_numeral -from guessit.date import valid_year - -import re - - -class GuessWeakEpisodesRexps(Transformer): - def __init__(self): - Transformer.__init__(self, 15) - - of_separators = ['of', 'sur', '/', '\\'] - of_separators_re = re.compile(build_or_pattern(of_separators, escape=True), re.IGNORECASE) - - self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False) - - episode_words = ['episodes?'] - - def _formater(episode_number): - epnum = parse_numeral(episode_number) - if not valid_year(epnum): - if epnum > 100: - season, epnum = epnum // 100, epnum % 100 - # episodes which have a season > 50 are most likely errors - # (Simpson is at 25!) - if season > 50: - return None - return {'season': season, 'episodeNumber': epnum} - else: - return epnum - - self.container.register_property(['episodeNumber', 'season'], '[0-9]{2,4}', confidence=0.6, formatter=_formater, disabler=lambda options: options.get('episode_prefer_number') if options else False) - self.container.register_property(['episodeNumber', 'season'], '[0-9]{4}', confidence=0.6, formatter=_formater) - self.container.register_property('episodeNumber', '[^0-9](\d{1,3})', confidence=0.6, formatter=parse_numeral, disabler=lambda options: not options.get('episode_prefer_number') if options else True) - self.container.register_property(None, '(' + build_or_pattern(episode_words) + sep + '?(?P' + numeral + '))[^0-9]', confidence=0.4, formatter=parse_numeral) - self.container.register_property(None, r'(?P' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P' + numeral +')', confidence=0.6, formatter=parse_numeral) - self.container.register_property('episodeNumber', r'^' + sep + '?(\d{1,3})' + sep, confidence=0.4, formatter=parse_numeral, disabler=lambda options: not options.get('episode_prefer_number') if options else True) - self.container.register_property('episodeNumber', sep + r'(\d{1,3})' + sep + '?$', confidence=0.4, formatter=parse_numeral, disabler=lambda options: not options.get('episode_prefer_number') if options else True) - - def supported_properties(self): - return self.container.get_supported_properties() - - def guess_weak_episodes_rexps(self, string, node=None, options=None): - if node and 'episodeNumber' in node.root.info: - return None - - properties = self.container.find_properties(string, node, options) - guess = self.container.as_guess(properties, string) - - return guess - - def should_process(self, mtree, options=None): - return mtree.guess.get('type', '').startswith('episode') - - def process(self, mtree, options=None): - GuessFinder(self.guess_weak_episodes_rexps, 0.6, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_website.py b/libs/guessit/transfo/guess_website.py deleted file mode 100644 index aa33226b..00000000 --- a/libs/guessit/transfo/guess_website.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Rémi Alvergnat -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals -from guessit.patterns import build_or_pattern -from guessit.containers import PropertiesContainer -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from pkg_resources import resource_stream # @UnresolvedImport - -TLDS = [l.strip().decode('utf-8') - for l in resource_stream('guessit', 'tlds-alpha-by-domain.txt').readlines() - if b'--' not in l][1:] - - -class GuessWebsite(Transformer): - def __init__(self): - Transformer.__init__(self, 45) - - self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False) - - tlds_pattern = build_or_pattern(TLDS) # All registered domain extension - safe_tlds_pattern = build_or_pattern(['com', 'org', 'net']) # For sure a website extension - safe_subdomains_pattern = build_or_pattern(['www']) # For sure a website subdomain - safe_prefix_tlds_pattern = build_or_pattern(['co', 'com', 'org', 'net']) # Those words before a tlds are sure - - self.container.register_property('website', '(?:' + safe_subdomains_pattern + '\.)+' + r'(?:[a-z-]+\.)+' + r'(?:' + tlds_pattern + r')+') - self.container.register_property('website', '(?:' + safe_subdomains_pattern + '\.)*' + r'[a-z-]+\.' + r'(?:' + safe_tlds_pattern + r')+') - self.container.register_property('website', '(?:' + safe_subdomains_pattern + '\.)*' + r'[a-z-]+\.' + r'(?:' + safe_prefix_tlds_pattern + r'\.)+' + r'(?:' + tlds_pattern + r')+') - - def supported_properties(self): - return self.container.get_supported_properties() - - def guess_website(self, string, node=None, options=None): - found = self.container.find_properties(string, node, options, 'website') - return self.container.as_guess(found, string) - - def process(self, mtree, options=None): - GuessFinder(self.guess_website, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves()) diff --git a/libs/guessit/transfo/guess_year.py b/libs/guessit/transfo/guess_year.py deleted file mode 100644 index 61363da5..00000000 --- a/libs/guessit/transfo/guess_year.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.matcher import GuessFinder -from guessit.date import search_year, valid_year - - -class GuessYear(Transformer): - def __init__(self): - Transformer.__init__(self, -160) - - def supported_properties(self): - return ['year'] - - def guess_year(self, string, node=None, options=None): - year, span = search_year(string) - if year: - return {'year': year}, span - else: - return None, None - - def second_pass_options(self, mtree, options=None): - year_nodes = list(mtree.leaves_containing('year')) - if len(year_nodes) > 1: - return {'skip_nodes': year_nodes[:len(year_nodes) - 1]} - return None - - def process(self, mtree, options=None): - GuessFinder(self.guess_year, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves()) - - # if we found a season number that is a valid year, it is usually safe to assume - # we can also set the year property to that value - for n in mtree.leaves_containing('season'): - g = n.guess - season = g['season'] - if valid_year(season): - g['year'] = season diff --git a/libs/guessit/transfo/split_explicit_groups.py b/libs/guessit/transfo/split_explicit_groups.py deleted file mode 100644 index 67d54cfb..00000000 --- a/libs/guessit/transfo/split_explicit_groups.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.textutils import find_first_level_groups -from guessit.patterns import group_delimiters -from functools import reduce - - -class SplitExplicitGroups(Transformer): - def __init__(self): - Transformer.__init__(self, 250) - - def process(self, mtree, options=None): - """split each of those into explicit groups (separated by parentheses or square brackets) - - :return: return the string split into explicit groups, that is, those either - between parenthese, square brackets or curly braces, and those separated - by a dash.""" - for c in mtree.children: - groups = find_first_level_groups(c.value, group_delimiters[0]) - for delimiters in group_delimiters: - flatten = lambda l, x: l + find_first_level_groups(x, delimiters) - groups = reduce(flatten, groups, []) - - # do not do this at this moment, it is not strong enough and can break other - # patterns, such as dates, etc... - # groups = functools.reduce(lambda l, x: l + x.split('-'), groups, []) - - c.split_on_components(groups) diff --git a/libs/guessit/transfo/split_on_dash.py b/libs/guessit/transfo/split_on_dash.py deleted file mode 100644 index e86c6a3f..00000000 --- a/libs/guessit/transfo/split_on_dash.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit.patterns import sep -import re - - -class SplitOnDash(Transformer): - def __init__(self): - Transformer.__init__(self, 245) - - def process(self, mtree, options=None): - """split into '-' separated subgroups (with required separator chars - around the dash) - """ - for node in mtree.unidentified_leaves(): - indices = [] - - pattern = re.compile(sep + '-' + sep) - match = pattern.search(node.value) - while match: - span = match.span() - indices.extend([span[0], span[1]]) - match = pattern.search(node.value, span[1]) - - if indices: - node.partition(indices) diff --git a/libs/guessit/transfo/split_path_components.py b/libs/guessit/transfo/split_path_components.py deleted file mode 100644 index c630a30c..00000000 --- a/libs/guessit/transfo/split_path_components.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# GuessIt - A library for guessing information from filenames -# Copyright (c) 2013 Nicolas Wack -# -# GuessIt is free software; you can redistribute it and/or modify it under -# the terms of the Lesser GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# GuessIt is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Lesser GNU General Public License for more details. -# -# You should have received a copy of the Lesser GNU General Public License -# along with this program. If not, see . -# - -from __future__ import absolute_import, division, print_function, unicode_literals - -from guessit.plugins.transformers import Transformer -from guessit import fileutils -from os.path import splitext - - -class SplitPathComponents(Transformer): - def __init__(self): - Transformer.__init__(self, 255) - - def process(self, mtree, options=None): - """first split our path into dirs + basename + ext - - :return: the filename split into [ dir*, basename, ext ] - """ - if not options.get('name_only'): - components = fileutils.split_path(mtree.value) - basename = components.pop(-1) - components += list(splitext(basename)) - components[-1] = components[-1][1:] # remove the '.' from the extension - - mtree.split_on_components(components) - else: - mtree.split_on_components([mtree.value, '']) diff --git a/libs/guessit/yamlutils.py b/libs/guessit/yamlutils.py new file mode 100644 index 00000000..2824575d --- /dev/null +++ b/libs/guessit/yamlutils.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Options +""" +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error +import babelfish + +import yaml + + +class OrderedDictYAMLLoader(yaml.Loader): + """ + A YAML loader that loads mappings into ordered dictionaries. + From https://gist.github.com/enaeseth/844388 + """ + + def __init__(self, *args, **kwargs): + yaml.Loader.__init__(self, *args, **kwargs) + + self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map) + self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map) + + def construct_yaml_map(self, node): + data = OrderedDict() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_mapping(self, node, deep=False): + if isinstance(node, yaml.MappingNode): + self.flatten_mapping(node) + else: # pragma: no cover + raise yaml.constructor.ConstructorError(None, None, + 'expected a mapping node, but found %s' % node.id, node.start_mark) + + mapping = OrderedDict() + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError as exc: # pragma: no cover + raise yaml.constructor.ConstructorError('while constructing a mapping', + node.start_mark, 'found unacceptable key (%s)' + % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + +class CustomDumper(yaml.SafeDumper): + """ + Custom YAML Dumper. + """ + pass + + +def default_representer(dumper, data): + """Default representer""" + return dumper.represent_str(str(data)) +CustomDumper.add_representer(babelfish.Language, default_representer) +CustomDumper.add_representer(babelfish.Country, default_representer) + + +def ordered_dict_representer(dumper, data): + """OrderedDict representer""" + return dumper.represent_dict(data) +CustomDumper.add_representer(OrderedDict, ordered_dict_representer) diff --git a/libs/rarfile.py b/libs/rarfile.py new file mode 100644 index 00000000..25b61196 --- /dev/null +++ b/libs/rarfile.py @@ -0,0 +1,2002 @@ +# rarfile.py +# +# Copyright (c) 2005-2016 Marko Kreen +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +r"""RAR archive reader. + +This is Python module for Rar archive reading. The interface +is made as :mod:`zipfile`-like as possible. + +Basic logic: + - Parse archive structure with Python. + - Extract non-compressed files with Python + - Extract compressed files with unrar. + - Optionally write compressed data to temp file to speed up unrar, + otherwise it needs to scan whole archive on each execution. + +Example:: + + import rarfile + + rf = rarfile.RarFile('myarchive.rar') + for f in rf.infolist(): + print f.filename, f.file_size + if f.filename == 'README': + print(rf.read(f)) + +Archive files can also be accessed via file-like object returned +by :meth:`RarFile.open`:: + + import rarfile + + with rarfile.RarFile('archive.rar') as rf: + with rf.open('README') as f: + for ln in f: + print(ln.strip()) + +There are few module-level parameters to tune behaviour, +here they are with defaults, and reason to change it:: + + import rarfile + + # Set to full path of unrar.exe if it is not in PATH + rarfile.UNRAR_TOOL = "unrar" + + # Set to 0 if you don't look at comments and want to + # avoid wasting time for parsing them + rarfile.NEED_COMMENTS = 1 + + # Set up to 1 if you don't want to deal with decoding comments + # from unknown encoding. rarfile will try couple of common + # encodings in sequence. + rarfile.UNICODE_COMMENTS = 0 + + # Set to 1 if you prefer timestamps to be datetime objects + # instead tuples + rarfile.USE_DATETIME = 0 + + # Set to '/' to be more compatible with zipfile + rarfile.PATH_SEP = '\\' + +For more details, refer to source. + +""" + +__version__ = '2.8' + +# export only interesting items +__all__ = ['is_rarfile', 'RarInfo', 'RarFile', 'RarExtFile'] + +## +## Imports and compat - support both Python 2.x and 3.x +## + +import sys, os, struct, errno +from struct import pack, unpack, Struct +from binascii import crc32 +from tempfile import mkstemp +from subprocess import Popen, PIPE, STDOUT +from datetime import datetime +from io import RawIOBase +from hashlib import sha1 + +# only needed for encryped headers +try: + try: + from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher + from cryptography.hazmat.backends import default_backend + class AES_CBC_Decrypt(object): + block_size = 16 + def __init__(self, key, iv): + ciph = Cipher(algorithms.AES(key), modes.CBC(iv), default_backend()) + self.dec = ciph.decryptor() + def decrypt(self, data): + return self.dec.update(data) + except ImportError: + from Crypto.Cipher import AES + class AES_CBC_Decrypt(object): + block_size = 16 + def __init__(self, key, iv): + self.dec = AES.new(key, AES.MODE_CBC, iv) + def decrypt(self, data): + return self.dec.decrypt(data) + _have_crypto = 1 +except ImportError: + _have_crypto = 0 + +# compat with 2.x +if sys.hexversion < 0x3000000: + # prefer 3.x behaviour + range = xrange +else: + unicode = str + +## +## Module configuration. Can be tuned after importing. +## + +#: default fallback charset +DEFAULT_CHARSET = "windows-1252" + +#: list of encodings to try, with fallback to DEFAULT_CHARSET if none succeed +TRY_ENCODINGS = ('utf8', 'utf-16le') + +#: 'unrar', 'rar' or full path to either one +UNRAR_TOOL = "unrar" + +#: Command line args to use for opening file for reading. +OPEN_ARGS = ('p', '-inul') + +#: Command line args to use for extracting file to disk. +EXTRACT_ARGS = ('x', '-y', '-idq') + +#: args for testrar() +TEST_ARGS = ('t', '-idq') + +# +# Allow use of tool that is not compatible with unrar. +# +# By default use 'bsdtar' which is 'tar' program that +# sits on top of libarchive. +# +# Problems with libarchive RAR backend: +# - Does not support solid archives. +# - Does not support password-protected archives. +# + +ALT_TOOL = 'bsdtar' +ALT_OPEN_ARGS = ('-x', '--to-stdout', '-f') +ALT_EXTRACT_ARGS = ('-x', '-f') +ALT_TEST_ARGS = ('-t', '-f') +ALT_CHECK_ARGS = ('--help',) + +#: whether to speed up decompression by using tmp archive +USE_EXTRACT_HACK = 1 + +#: limit the filesize for tmp archive usage +HACK_SIZE_LIMIT = 20*1024*1024 + +#: whether to parse file/archive comments. +NEED_COMMENTS = 1 + +#: whether to convert comments to unicode strings +UNICODE_COMMENTS = 0 + +#: Convert RAR time tuple into datetime() object +USE_DATETIME = 0 + +#: Separator for path name components. RAR internally uses '\\'. +#: Use '/' to be similar with zipfile. +PATH_SEP = '\\' + +## +## rar constants +## + +# block types +RAR_BLOCK_MARK = 0x72 # r +RAR_BLOCK_MAIN = 0x73 # s +RAR_BLOCK_FILE = 0x74 # t +RAR_BLOCK_OLD_COMMENT = 0x75 # u +RAR_BLOCK_OLD_EXTRA = 0x76 # v +RAR_BLOCK_OLD_SUB = 0x77 # w +RAR_BLOCK_OLD_RECOVERY = 0x78 # x +RAR_BLOCK_OLD_AUTH = 0x79 # y +RAR_BLOCK_SUB = 0x7a # z +RAR_BLOCK_ENDARC = 0x7b # { + +# flags for RAR_BLOCK_MAIN +RAR_MAIN_VOLUME = 0x0001 +RAR_MAIN_COMMENT = 0x0002 +RAR_MAIN_LOCK = 0x0004 +RAR_MAIN_SOLID = 0x0008 +RAR_MAIN_NEWNUMBERING = 0x0010 +RAR_MAIN_AUTH = 0x0020 +RAR_MAIN_RECOVERY = 0x0040 +RAR_MAIN_PASSWORD = 0x0080 +RAR_MAIN_FIRSTVOLUME = 0x0100 +RAR_MAIN_ENCRYPTVER = 0x0200 + +# flags for RAR_BLOCK_FILE +RAR_FILE_SPLIT_BEFORE = 0x0001 +RAR_FILE_SPLIT_AFTER = 0x0002 +RAR_FILE_PASSWORD = 0x0004 +RAR_FILE_COMMENT = 0x0008 +RAR_FILE_SOLID = 0x0010 +RAR_FILE_DICTMASK = 0x00e0 +RAR_FILE_DICT64 = 0x0000 +RAR_FILE_DICT128 = 0x0020 +RAR_FILE_DICT256 = 0x0040 +RAR_FILE_DICT512 = 0x0060 +RAR_FILE_DICT1024 = 0x0080 +RAR_FILE_DICT2048 = 0x00a0 +RAR_FILE_DICT4096 = 0x00c0 +RAR_FILE_DIRECTORY = 0x00e0 +RAR_FILE_LARGE = 0x0100 +RAR_FILE_UNICODE = 0x0200 +RAR_FILE_SALT = 0x0400 +RAR_FILE_VERSION = 0x0800 +RAR_FILE_EXTTIME = 0x1000 +RAR_FILE_EXTFLAGS = 0x2000 + +# flags for RAR_BLOCK_ENDARC +RAR_ENDARC_NEXT_VOLUME = 0x0001 +RAR_ENDARC_DATACRC = 0x0002 +RAR_ENDARC_REVSPACE = 0x0004 +RAR_ENDARC_VOLNR = 0x0008 + +# flags common to all blocks +RAR_SKIP_IF_UNKNOWN = 0x4000 +RAR_LONG_BLOCK = 0x8000 + +# Host OS types +RAR_OS_MSDOS = 0 +RAR_OS_OS2 = 1 +RAR_OS_WIN32 = 2 +RAR_OS_UNIX = 3 +RAR_OS_MACOS = 4 +RAR_OS_BEOS = 5 + +# Compression methods - '0'..'5' +RAR_M0 = 0x30 +RAR_M1 = 0x31 +RAR_M2 = 0x32 +RAR_M3 = 0x33 +RAR_M4 = 0x34 +RAR_M5 = 0x35 + +## +## internal constants +## + +RAR_ID = b"Rar!\x1a\x07\x00" +ZERO = b"\0" +EMPTY = b"" + +S_BLK_HDR = Struct(' 0 + + +class RarFile(object): + '''Parse RAR structure, provide access to files in archive. + ''' + + #: Archive comment. Byte string or None. Use :data:`UNICODE_COMMENTS` + #: to get automatic decoding to unicode. + comment = None + + def __init__(self, rarfile, mode="r", charset=None, info_callback=None, + crc_check = True, errors = "stop"): + """Open and parse a RAR archive. + + Parameters: + + rarfile + archive file name + mode + only 'r' is supported. + charset + fallback charset to use, if filenames are not already Unicode-enabled. + info_callback + debug callback, gets to see all archive entries. + crc_check + set to False to disable CRC checks + errors + Either "stop" to quietly stop parsing on errors, + or "strict" to raise errors. Default is "stop". + """ + self.rarfile = rarfile + self.comment = None + self._charset = charset or DEFAULT_CHARSET + self._info_callback = info_callback + + self._info_list = [] + self._info_map = {} + self._parse_error = None + self._needs_password = False + self._password = None + self._crc_check = crc_check + self._vol_list = [] + + if errors == "stop": + self._strict = False + elif errors == "strict": + self._strict = True + else: + raise ValueError("Invalid value for 'errors' parameter.") + + self._main = None + + if mode != "r": + raise NotImplementedError("RarFile supports only mode=r") + + self._parse() + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def setpassword(self, password): + '''Sets the password to use when extracting.''' + self._password = password + if not self._main: + self._parse() + + def needs_password(self): + '''Returns True if any archive entries require password for extraction.''' + return self._needs_password + + def namelist(self): + '''Return list of filenames in archive.''' + return [f.filename for f in self.infolist()] + + def infolist(self): + '''Return RarInfo objects for all files/directories in archive.''' + return self._info_list + + def volumelist(self): + '''Returns filenames of archive volumes. + + In case of single-volume archive, the list contains + just the name of main archive file. + ''' + return self._vol_list + + def getinfo(self, fname): + '''Return RarInfo for file.''' + + if isinstance(fname, RarInfo): + return fname + + # accept both ways here + if PATH_SEP == '/': + fname2 = fname.replace("\\", "/") + else: + fname2 = fname.replace("/", "\\") + + try: + return self._info_map[fname] + except KeyError: + try: + return self._info_map[fname2] + except KeyError: + raise NoRarEntry("No such file: "+fname) + + def open(self, fname, mode = 'r', psw = None): + '''Returns file-like object (:class:`RarExtFile`), + from where the data can be read. + + The object implements :class:`io.RawIOBase` interface, so it can + be further wrapped with :class:`io.BufferedReader` + and :class:`io.TextIOWrapper`. + + On older Python where io module is not available, it implements + only .read(), .seek(), .tell() and .close() methods. + + The object is seekable, although the seeking is fast only on + uncompressed files, on compressed files the seeking is implemented + by reading ahead and/or restarting the decompression. + + Parameters: + + fname + file name or RarInfo instance. + mode + must be 'r' + psw + password to use for extracting. + ''' + + if mode != 'r': + raise NotImplementedError("RarFile.open() supports only mode=r") + + # entry lookup + inf = self.getinfo(fname) + if inf.isdir(): + raise TypeError("Directory does not have any data: " + inf.filename) + + if inf.flags & RAR_FILE_SPLIT_BEFORE: + raise NeedFirstVolume("Partial file, please start from first volume: " + inf.filename) + + # check password + if inf.needs_password(): + psw = psw or self._password + if psw is None: + raise PasswordRequired("File %s requires password" % inf.filename) + else: + psw = None + + # is temp write usable? + use_hack = 1 + if not self._main: + use_hack = 0 + elif self._main.flags & (RAR_MAIN_SOLID | RAR_MAIN_PASSWORD): + use_hack = 0 + elif inf.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER): + use_hack = 0 + elif is_filelike(self.rarfile): + pass + elif inf.file_size > HACK_SIZE_LIMIT: + use_hack = 0 + elif not USE_EXTRACT_HACK: + use_hack = 0 + + # now extract + if inf.compress_type == RAR_M0 and (inf.flags & RAR_FILE_PASSWORD) == 0: + return self._open_clear(inf) + elif use_hack: + return self._open_hack(inf, psw) + elif is_filelike(self.rarfile): + return self._open_unrar_membuf(self.rarfile, inf, psw) + else: + return self._open_unrar(self.rarfile, inf, psw) + + def read(self, fname, psw = None): + """Return uncompressed data for archive entry. + + For longer files using :meth:`RarFile.open` may be better idea. + + Parameters: + + fname + filename or RarInfo instance + psw + password to use for extracting. + """ + + f = self.open(fname, 'r', psw) + try: + return f.read() + finally: + f.close() + + def close(self): + """Release open resources.""" + pass + + def printdir(self): + """Print archive file list to stdout.""" + for f in self.infolist(): + print(f.filename) + + def extract(self, member, path=None, pwd=None): + """Extract single file into current directory. + + Parameters: + + member + filename or :class:`RarInfo` instance + path + optional destination path + pwd + optional password to use + """ + if isinstance(member, RarInfo): + fname = member.filename + else: + fname = member + self._extract([fname], path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all files into current directory. + + Parameters: + + path + optional destination path + members + optional filename or :class:`RarInfo` instance list to extract + pwd + optional password to use + """ + fnlist = [] + if members is not None: + for m in members: + if isinstance(m, RarInfo): + fnlist.append(m.filename) + else: + fnlist.append(m) + self._extract(fnlist, path, pwd) + + def testrar(self): + """Let 'unrar' test the archive. + """ + cmd = [UNRAR_TOOL] + list(TEST_ARGS) + add_password_arg(cmd, self._password) + cmd.append('--') + + if is_filelike(self.rarfile): + tmpname = membuf_tempfile(self.rarfile) + cmd.append(tmpname) + else: + tmpname = None + cmd.append(self.rarfile) + + try: + p = custom_popen(cmd) + output = p.communicate()[0] + check_returncode(p, output) + finally: + if tmpname: + os.unlink(tmpname) + + def strerror(self): + """Return error string if parsing failed, + or None if no problems. + """ + return self._parse_error + + ## + ## private methods + ## + + def _set_error(self, msg, *args): + if args: + msg = msg % args + self._parse_error = msg + if self._strict: + raise BadRarFile(msg) + + # store entry + def _process_entry(self, item): + if item.type == RAR_BLOCK_FILE: + # use only first part + if (item.flags & RAR_FILE_SPLIT_BEFORE) == 0: + self._info_map[item.filename] = item + self._info_list.append(item) + # remember if any items require password + if item.needs_password(): + self._needs_password = True + elif len(self._info_list) > 0: + # final crc is in last block + old = self._info_list[-1] + old.CRC = item.CRC + old.compress_size += item.compress_size + + # parse new-style comment + if item.type == RAR_BLOCK_SUB and item.filename == 'CMT': + if not NEED_COMMENTS: + pass + elif item.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER): + pass + elif item.flags & RAR_FILE_SOLID: + # file comment + cmt = self._read_comment_v3(item, self._password) + if len(self._info_list) > 0: + old = self._info_list[-1] + old.comment = cmt + else: + # archive comment + cmt = self._read_comment_v3(item, self._password) + self.comment = cmt + + if self._info_callback: + self._info_callback(item) + + # read rar + def _parse(self): + self._fd = None + try: + self._parse_real() + finally: + if self._fd: + self._fd.close() + self._fd = None + + def _parse_real(self): + fd = XFile(self.rarfile) + self._fd = fd + id = fd.read(len(RAR_ID)) + if id != RAR_ID: + if isinstance(self.rarfile, (str, unicode)): + raise NotRarFile("Not a Rar archive: {}".format(self.rarfile)) + raise NotRarFile("Not a Rar archive") + + volume = 0 # first vol (.rar) is 0 + more_vols = 0 + endarc = 0 + volfile = self.rarfile + self._vol_list = [self.rarfile] + while 1: + if endarc: + h = None # don't read past ENDARC + else: + h = self._parse_header(fd) + if not h: + if more_vols: + volume += 1 + fd.close() + try: + volfile = self._next_volname(volfile) + fd = XFile(volfile) + except IOError: + self._set_error("Cannot open next volume: %s", volfile) + break + self._fd = fd + more_vols = 0 + endarc = 0 + self._vol_list.append(volfile) + continue + break + h.volume = volume + h.volume_file = volfile + + if h.type == RAR_BLOCK_MAIN and not self._main: + self._main = h + if h.flags & RAR_MAIN_NEWNUMBERING: + # RAR 2.x does not set FIRSTVOLUME, + # so check it only if NEWNUMBERING is used + if (h.flags & RAR_MAIN_FIRSTVOLUME) == 0: + raise NeedFirstVolume("Need to start from first volume") + if h.flags & RAR_MAIN_PASSWORD: + self._needs_password = True + if not self._password: + self._main = None + break + elif h.type == RAR_BLOCK_ENDARC: + more_vols = h.flags & RAR_ENDARC_NEXT_VOLUME + endarc = 1 + elif h.type == RAR_BLOCK_FILE: + # RAR 2.x does not write RAR_BLOCK_ENDARC + if h.flags & RAR_FILE_SPLIT_AFTER: + more_vols = 1 + # RAR 2.x does not set RAR_MAIN_FIRSTVOLUME + if volume == 0 and h.flags & RAR_FILE_SPLIT_BEFORE: + raise NeedFirstVolume("Need to start from first volume") + + # store it + self._process_entry(h) + + # go to next header + if h.add_size > 0: + fd.seek(h.file_offset + h.add_size, 0) + + # AES encrypted headers + _last_aes_key = (None, None, None) # (salt, key, iv) + def _decrypt_header(self, fd): + if not _have_crypto: + raise NoCrypto('Cannot parse encrypted headers - no crypto') + salt = fd.read(8) + if self._last_aes_key[0] == salt: + key, iv = self._last_aes_key[1:] + else: + key, iv = rar3_s2k(self._password, salt) + self._last_aes_key = (salt, key, iv) + return HeaderDecrypt(fd, key, iv) + + # read single header + def _parse_header(self, fd): + try: + # handle encrypted headers + if self._main and self._main.flags & RAR_MAIN_PASSWORD: + if not self._password: + return + fd = self._decrypt_header(fd) + + # now read actual header + return self._parse_block_header(fd) + except struct.error: + self._set_error('Broken header in RAR file') + return None + + # common header + def _parse_block_header(self, fd): + h = RarInfo() + h.header_offset = fd.tell() + h.comment = None + + # read and parse base header + buf = fd.read(S_BLK_HDR.size) + if not buf: + return None + t = S_BLK_HDR.unpack_from(buf) + h.header_crc, h.type, h.flags, h.header_size = t + h.header_base = S_BLK_HDR.size + pos = S_BLK_HDR.size + + # read full header + if h.header_size > S_BLK_HDR.size: + h.header_data = buf + fd.read(h.header_size - S_BLK_HDR.size) + else: + h.header_data = buf + h.file_offset = fd.tell() + + # unexpected EOF? + if len(h.header_data) != h.header_size: + self._set_error('Unexpected EOF when reading header') + return None + + # block has data assiciated with it? + if h.flags & RAR_LONG_BLOCK: + h.add_size = S_LONG.unpack_from(h.header_data, pos)[0] + else: + h.add_size = 0 + + # parse interesting ones, decide header boundaries for crc + if h.type == RAR_BLOCK_MARK: + return h + elif h.type == RAR_BLOCK_MAIN: + h.header_base += 6 + if h.flags & RAR_MAIN_ENCRYPTVER: + h.header_base += 1 + if h.flags & RAR_MAIN_COMMENT: + self._parse_subblocks(h, h.header_base) + self.comment = h.comment + elif h.type == RAR_BLOCK_FILE: + self._parse_file_header(h, pos) + elif h.type == RAR_BLOCK_SUB: + self._parse_file_header(h, pos) + h.header_base = h.header_size + elif h.type == RAR_BLOCK_OLD_AUTH: + h.header_base += 8 + elif h.type == RAR_BLOCK_OLD_EXTRA: + h.header_base += 7 + else: + h.header_base = h.header_size + + # check crc + if h.type == RAR_BLOCK_OLD_SUB: + crcdat = h.header_data[2:] + fd.read(h.add_size) + else: + crcdat = h.header_data[2:h.header_base] + + calc_crc = crc32(crcdat) & 0xFFFF + + # return good header + if h.header_crc == calc_crc: + return h + + # header parsing failed. + self._set_error('Header CRC error (%02x): exp=%x got=%x (xlen = %d)', + h.type, h.header_crc, calc_crc, len(crcdat)) + + # instead panicing, send eof + return None + + # read file-specific header + def _parse_file_header(self, h, pos): + fld = S_FILE_HDR.unpack_from(h.header_data, pos) + h.compress_size = fld[0] + h.file_size = fld[1] + h.host_os = fld[2] + h.CRC = fld[3] + h.date_time = parse_dos_time(fld[4]) + h.extract_version = fld[5] + h.compress_type = fld[6] + h.name_size = fld[7] + h.mode = fld[8] + pos += S_FILE_HDR.size + + if h.flags & RAR_FILE_LARGE: + h1 = S_LONG.unpack_from(h.header_data, pos)[0] + h2 = S_LONG.unpack_from(h.header_data, pos + 4)[0] + h.compress_size |= h1 << 32 + h.file_size |= h2 << 32 + pos += 8 + h.add_size = h.compress_size + + name = h.header_data[pos : pos + h.name_size ] + pos += h.name_size + if h.flags & RAR_FILE_UNICODE: + nul = name.find(ZERO) + h.orig_filename = name[:nul] + u = UnicodeFilename(h.orig_filename, name[nul + 1 : ]) + h.filename = u.decode() + + # if parsing failed fall back to simple name + if u.failed: + h.filename = self._decode(h.orig_filename) + else: + h.orig_filename = name + h.filename = self._decode(name) + + # change separator, if requested + if PATH_SEP != '\\': + h.filename = h.filename.replace('\\', PATH_SEP) + + if h.flags & RAR_FILE_SALT: + h.salt = h.header_data[pos : pos + 8] + pos += 8 + else: + h.salt = None + + # optional extended time stamps + if h.flags & RAR_FILE_EXTTIME: + pos = self._parse_ext_time(h, pos) + else: + h.mtime = h.atime = h.ctime = h.arctime = None + + # base header end + h.header_base = pos + + if h.flags & RAR_FILE_COMMENT: + self._parse_subblocks(h, pos) + + # convert timestamps + if USE_DATETIME: + h.date_time = to_datetime(h.date_time) + h.mtime = to_datetime(h.mtime) + h.atime = to_datetime(h.atime) + h.ctime = to_datetime(h.ctime) + h.arctime = to_datetime(h.arctime) + + # .mtime is .date_time with more precision + if h.mtime: + if USE_DATETIME: + h.date_time = h.mtime + else: + # keep seconds int + h.date_time = h.mtime[:5] + (int(h.mtime[5]),) + + return pos + + # find old-style comment subblock + def _parse_subblocks(self, h, pos): + hdata = h.header_data + while pos < len(hdata): + # ordinary block header + t = S_BLK_HDR.unpack_from(hdata, pos) + scrc, stype, sflags, slen = t + pos_next = pos + slen + pos += S_BLK_HDR.size + + # corrupt header + if pos_next < pos: + break + + # followed by block-specific header + if stype == RAR_BLOCK_OLD_COMMENT and pos + S_COMMENT_HDR.size <= pos_next: + declen, ver, meth, crc = S_COMMENT_HDR.unpack_from(hdata, pos) + pos += S_COMMENT_HDR.size + data = hdata[pos : pos_next] + cmt = rar_decompress(ver, meth, data, declen, sflags, + crc, self._password) + if not self._crc_check: + h.comment = self._decode_comment(cmt) + elif crc32(cmt) & 0xFFFF == crc: + h.comment = self._decode_comment(cmt) + + pos = pos_next + + def _parse_ext_time(self, h, pos): + data = h.header_data + + # flags and rest of data can be missing + flags = 0 + if pos + 2 <= len(data): + flags = S_SHORT.unpack_from(data, pos)[0] + pos += 2 + + h.mtime, pos = self._parse_xtime(flags >> 3*4, data, pos, h.date_time) + h.ctime, pos = self._parse_xtime(flags >> 2*4, data, pos) + h.atime, pos = self._parse_xtime(flags >> 1*4, data, pos) + h.arctime, pos = self._parse_xtime(flags >> 0*4, data, pos) + return pos + + def _parse_xtime(self, flag, data, pos, dostime = None): + unit = 10000000.0 # 100 ns units + if flag & 8: + if not dostime: + t = S_LONG.unpack_from(data, pos)[0] + dostime = parse_dos_time(t) + pos += 4 + rem = 0 + cnt = flag & 3 + for i in range(cnt): + b = S_BYTE.unpack_from(data, pos)[0] + rem = (b << 16) | (rem >> 8) + pos += 1 + sec = dostime[5] + rem / unit + if flag & 4: + sec += 1 + dostime = dostime[:5] + (sec,) + return dostime, pos + + # given current vol name, construct next one + def _next_volname(self, volfile): + if is_filelike(volfile): + raise IOError("Working on single FD") + if self._main.flags & RAR_MAIN_NEWNUMBERING: + return self._next_newvol(volfile) + return self._next_oldvol(volfile) + + # new-style next volume + def _next_newvol(self, volfile): + i = len(volfile) - 1 + while i >= 0: + if volfile[i] >= '0' and volfile[i] <= '9': + return self._inc_volname(volfile, i) + i -= 1 + raise BadRarName("Cannot construct volume name: "+volfile) + + # old-style next volume + def _next_oldvol(self, volfile): + # rar -> r00 + if volfile[-4:].lower() == '.rar': + return volfile[:-2] + '00' + return self._inc_volname(volfile, len(volfile) - 1) + + # increase digits with carry, otherwise just increment char + def _inc_volname(self, volfile, i): + fn = list(volfile) + while i >= 0: + if fn[i] != '9': + fn[i] = chr(ord(fn[i]) + 1) + break + fn[i] = '0' + i -= 1 + return ''.join(fn) + + def _open_clear(self, inf): + return DirectReader(self, inf) + + # put file compressed data into temporary .rar archive, and run + # unrar on that, thus avoiding unrar going over whole archive + def _open_hack(self, inf, psw = None): + BSIZE = 32*1024 + + size = inf.compress_size + inf.header_size + rf = XFile(inf.volume_file, 0) + rf.seek(inf.header_offset) + + tmpfd, tmpname = mkstemp(suffix='.rar') + tmpf = os.fdopen(tmpfd, "wb") + + try: + # create main header: crc, type, flags, size, res1, res2 + mh = S_BLK_HDR.pack(0x90CF, 0x73, 0, 13) + ZERO * (2+4) + tmpf.write(RAR_ID + mh) + while size > 0: + if size > BSIZE: + buf = rf.read(BSIZE) + else: + buf = rf.read(size) + if not buf: + raise BadRarFile('read failed: ' + inf.filename) + tmpf.write(buf) + size -= len(buf) + tmpf.close() + rf.close() + except: + rf.close() + tmpf.close() + os.unlink(tmpname) + raise + + return self._open_unrar(tmpname, inf, psw, tmpname) + + def _read_comment_v3(self, inf, psw=None): + + # read data + rf = XFile(inf.volume_file) + rf.seek(inf.file_offset) + data = rf.read(inf.compress_size) + rf.close() + + # decompress + cmt = rar_decompress(inf.extract_version, inf.compress_type, data, + inf.file_size, inf.flags, inf.CRC, psw, inf.salt) + + # check crc + if self._crc_check: + crc = crc32(cmt) + if crc < 0: + crc += (1 << 32) + if crc != inf.CRC: + return None + + return self._decode_comment(cmt) + + # write in-memory archive to temp file - needed for solid archives + def _open_unrar_membuf(self, memfile, inf, psw): + tmpname = membuf_tempfile(memfile) + return self._open_unrar(tmpname, inf, psw, tmpname) + + # extract using unrar + def _open_unrar(self, rarfile, inf, psw = None, tmpfile = None): + if is_filelike(rarfile): + raise ValueError("Cannot use unrar directly on memory buffer") + cmd = [UNRAR_TOOL] + list(OPEN_ARGS) + add_password_arg(cmd, psw) + cmd.append("--") + cmd.append(rarfile) + + # not giving filename avoids encoding related problems + if not tmpfile: + fn = inf.filename + if PATH_SEP != os.sep: + fn = fn.replace(PATH_SEP, os.sep) + cmd.append(fn) + + # read from unrar pipe + return PipeReader(self, inf, cmd, tmpfile) + + def _decode(self, val): + for c in TRY_ENCODINGS: + try: + return val.decode(c) + except UnicodeError: + pass + return val.decode(self._charset, 'replace') + + def _decode_comment(self, val): + if UNICODE_COMMENTS: + return self._decode(val) + return val + + # call unrar to extract a file + def _extract(self, fnlist, path=None, psw=None): + cmd = [UNRAR_TOOL] + list(EXTRACT_ARGS) + + # pasoword + psw = psw or self._password + add_password_arg(cmd, psw) + cmd.append('--') + + # rar file + if is_filelike(self.rarfile): + tmpname = membuf_tempfile(self.rarfile) + cmd.append(tmpname) + else: + tmpname = None + cmd.append(self.rarfile) + + # file list + for fn in fnlist: + if os.sep != PATH_SEP: + fn = fn.replace(PATH_SEP, os.sep) + cmd.append(fn) + + # destination path + if path is not None: + cmd.append(path + os.sep) + + # call + try: + p = custom_popen(cmd) + output = p.communicate()[0] + check_returncode(p, output) + finally: + if tmpname: + os.unlink(tmpname) + +## +## Utility classes +## + +class UnicodeFilename(object): + """Handle unicode filename decompression""" + + def __init__(self, name, encdata): + self.std_name = bytearray(name) + self.encdata = bytearray(encdata) + self.pos = self.encpos = 0 + self.buf = bytearray() + self.failed = 0 + + def enc_byte(self): + try: + c = self.encdata[self.encpos] + self.encpos += 1 + return c + except IndexError: + self.failed = 1 + return 0 + + def std_byte(self): + try: + return self.std_name[self.pos] + except IndexError: + self.failed = 1 + return ord('?') + + def put(self, lo, hi): + self.buf.append(lo) + self.buf.append(hi) + self.pos += 1 + + def decode(self): + hi = self.enc_byte() + flagbits = 0 + while self.encpos < len(self.encdata): + if flagbits == 0: + flags = self.enc_byte() + flagbits = 8 + flagbits -= 2 + t = (flags >> flagbits) & 3 + if t == 0: + self.put(self.enc_byte(), 0) + elif t == 1: + self.put(self.enc_byte(), hi) + elif t == 2: + self.put(self.enc_byte(), self.enc_byte()) + else: + n = self.enc_byte() + if n & 0x80: + c = self.enc_byte() + for i in range((n & 0x7f) + 2): + lo = (self.std_byte() + c) & 0xFF + self.put(lo, hi) + else: + for i in range(n + 2): + self.put(self.std_byte(), 0) + return self.buf.decode("utf-16le", "replace") + + +class RarExtFile(RawIOBase): + """Base class for file-like object that :meth:`RarFile.open` returns. + + Provides public methods and common crc checking. + + Behaviour: + - no short reads - .read() and .readinfo() read as much as requested. + - no internal buffer, use io.BufferedReader for that. + + If :mod:`io` module is available (Python 2.6+, 3.x), then this calls + will inherit from :class:`io.RawIOBase` class. This makes line-based + access available: :meth:`RarExtFile.readline` and ``for ln in f``. + """ + + #: Filename of the archive entry + name = None + + def __init__(self, rf, inf): + super(RarExtFile, self).__init__() + + # standard io.* properties + self.name = inf.filename + self.mode = 'rb' + + self.rf = rf + self.inf = inf + self.crc_check = rf._crc_check + self.fd = None + self.CRC = 0 + self.remain = 0 + self.returncode = 0 + + self._open() + + def _open(self): + if self.fd: + self.fd.close() + self.fd = None + self.CRC = 0 + self.remain = self.inf.file_size + + def read(self, cnt = None): + """Read all or specified amount of data from archive entry.""" + + # sanitize cnt + if cnt is None or cnt < 0: + cnt = self.remain + elif cnt > self.remain: + cnt = self.remain + if cnt == 0: + return EMPTY + + # actual read + data = self._read(cnt) + if data: + self.CRC = crc32(data, self.CRC) + self.remain -= len(data) + if len(data) != cnt: + raise BadRarFile("Failed the read enough data") + + # done? + if not data or self.remain == 0: + #self.close() + self._check() + return data + + def _check(self): + """Check final CRC.""" + if not self.crc_check: + return + if self.returncode: + check_returncode(self, '') + if self.remain != 0: + raise BadRarFile("Failed the read enough data") + crc = self.CRC + if crc < 0: + crc += (1 << 32) + if crc != self.inf.CRC: + raise BadRarFile("Corrupt file - CRC check failed: " + self.inf.filename) + + def _read(self, cnt): + """Actual read that gets sanitized cnt.""" + + def close(self): + """Close open resources.""" + + super(RarExtFile, self).close() + + if self.fd: + self.fd.close() + self.fd = None + + def __del__(self): + """Hook delete to make sure tempfile is removed.""" + self.close() + + def readinto(self, buf): + """Zero-copy read directly into buffer. + + Returns bytes read. + """ + + data = self.read(len(buf)) + n = len(data) + try: + buf[:n] = data + except TypeError: + import array + if not isinstance(buf, array.array): + raise + buf[:n] = array.array(buf.typecode, data) + return n + + def tell(self): + """Return current reading position in uncompressed data.""" + return self.inf.file_size - self.remain + + def seek(self, ofs, whence = 0): + """Seek in data. + + On uncompressed files, the seeking works by actual + seeks so it's fast. On compresses files its slow + - forward seeking happends by reading ahead, + backwards by re-opening and decompressing from the start. + """ + + # disable crc check when seeking + self.crc_check = 0 + + fsize = self.inf.file_size + cur_ofs = self.tell() + + if whence == 0: # seek from beginning of file + new_ofs = ofs + elif whence == 1: # seek from current position + new_ofs = cur_ofs + ofs + elif whence == 2: # seek from end of file + new_ofs = fsize + ofs + else: + raise ValueError('Invalid value for whence') + + # sanity check + if new_ofs < 0: + new_ofs = 0 + elif new_ofs > fsize: + new_ofs = fsize + + # do the actual seek + if new_ofs >= cur_ofs: + self._skip(new_ofs - cur_ofs) + else: + # process old data ? + #self._skip(fsize - cur_ofs) + # reopen and seek + self._open() + self._skip(new_ofs) + return self.tell() + + def _skip(self, cnt): + """Read and discard data""" + while cnt > 0: + if cnt > 8192: + buf = self.read(8192) + else: + buf = self.read(cnt) + if not buf: + break + cnt -= len(buf) + + def readable(self): + """Returns True""" + return True + + def writable(self): + """Returns False. + + Writing is not supported.""" + return False + + def seekable(self): + """Returns True. + + Seeking is supported, although it's slow on compressed files. + """ + return True + + def readall(self): + """Read all remaining data""" + # avoid RawIOBase default impl + return self.read() + + +class PipeReader(RarExtFile): + """Read data from pipe, handle tempfile cleanup.""" + + def __init__(self, rf, inf, cmd, tempfile=None): + self.cmd = cmd + self.proc = None + self.tempfile = tempfile + super(PipeReader, self).__init__(rf, inf) + + def _close_proc(self): + if not self.proc: + return + if self.proc.stdout: + self.proc.stdout.close() + if self.proc.stdin: + self.proc.stdin.close() + if self.proc.stderr: + self.proc.stderr.close() + self.proc.wait() + self.returncode = self.proc.returncode + self.proc = None + + def _open(self): + super(PipeReader, self)._open() + + # stop old process + self._close_proc() + + # launch new process + self.returncode = 0 + self.proc = custom_popen(self.cmd) + self.fd = self.proc.stdout + + # avoid situation where unrar waits on stdin + if self.proc.stdin: + self.proc.stdin.close() + + def _read(self, cnt): + """Read from pipe.""" + + # normal read is usually enough + data = self.fd.read(cnt) + if len(data) == cnt or not data: + return data + + # short read, try looping + buf = [data] + cnt -= len(data) + while cnt > 0: + data = self.fd.read(cnt) + if not data: + break + cnt -= len(data) + buf.append(data) + return EMPTY.join(buf) + + def close(self): + """Close open resources.""" + + self._close_proc() + super(PipeReader, self).close() + + if self.tempfile: + try: + os.unlink(self.tempfile) + except OSError: + pass + self.tempfile = None + + def readinto(self, buf): + """Zero-copy read directly into buffer.""" + cnt = len(buf) + if cnt > self.remain: + cnt = self.remain + vbuf = memoryview(buf) + res = got = 0 + while got < cnt: + res = self.fd.readinto(vbuf[got : cnt]) + if not res: + break + if self.crc_check: + self.CRC = crc32(vbuf[got : got + res], self.CRC) + self.remain -= res + got += res + return got + + +class DirectReader(RarExtFile): + """Read uncompressed data directly from archive.""" + + def _open(self): + super(DirectReader, self)._open() + + self.volfile = self.inf.volume_file + self.fd = XFile(self.volfile, 0) + self.fd.seek(self.inf.header_offset, 0) + self.cur = self.rf._parse_header(self.fd) + self.cur_avail = self.cur.add_size + + def _skip(self, cnt): + """RAR Seek, skipping through rar files to get to correct position + """ + + while cnt > 0: + # next vol needed? + if self.cur_avail == 0: + if not self._open_next(): + break + + # fd is in read pos, do the read + if cnt > self.cur_avail: + cnt -= self.cur_avail + self.remain -= self.cur_avail + self.cur_avail = 0 + else: + self.fd.seek(cnt, 1) + self.cur_avail -= cnt + self.remain -= cnt + cnt = 0 + + def _read(self, cnt): + """Read from potentially multi-volume archive.""" + + buf = [] + while cnt > 0: + # next vol needed? + if self.cur_avail == 0: + if not self._open_next(): + break + + # fd is in read pos, do the read + if cnt > self.cur_avail: + data = self.fd.read(self.cur_avail) + else: + data = self.fd.read(cnt) + if not data: + break + + # got some data + cnt -= len(data) + self.cur_avail -= len(data) + buf.append(data) + + if len(buf) == 1: + return buf[0] + return EMPTY.join(buf) + + def _open_next(self): + """Proceed to next volume.""" + + # is the file split over archives? + if (self.cur.flags & RAR_FILE_SPLIT_AFTER) == 0: + return False + + if self.fd: + self.fd.close() + self.fd = None + + # open next part + self.volfile = self.rf._next_volname(self.volfile) + fd = open(self.volfile, "rb", 0) + self.fd = fd + + # loop until first file header + while 1: + cur = self.rf._parse_header(fd) + if not cur: + raise BadRarFile("Unexpected EOF") + if cur.type in (RAR_BLOCK_MARK, RAR_BLOCK_MAIN): + if cur.add_size: + fd.seek(cur.add_size, 1) + continue + if cur.orig_filename != self.inf.orig_filename: + raise BadRarFile("Did not found file entry") + self.cur = cur + self.cur_avail = cur.add_size + return True + + def readinto(self, buf): + """Zero-copy read directly into buffer.""" + got = 0 + vbuf = memoryview(buf) + while got < len(buf): + # next vol needed? + if self.cur_avail == 0: + if not self._open_next(): + break + + # length for next read + cnt = len(buf) - got + if cnt > self.cur_avail: + cnt = self.cur_avail + + # read into temp view + res = self.fd.readinto(vbuf[got : got + cnt]) + if not res: + break + if self.crc_check: + self.CRC = crc32(vbuf[got : got + res], self.CRC) + self.cur_avail -= res + self.remain -= res + got += res + return got + + +class HeaderDecrypt(object): + """File-like object that decrypts from another file""" + def __init__(self, f, key, iv): + self.f = f + self.ciph = AES_CBC_Decrypt(key, iv) + self.buf = EMPTY + + def tell(self): + return self.f.tell() + + def read(self, cnt=None): + if cnt > 8*1024: + raise BadRarFile('Bad count to header decrypt - wrong password?') + + # consume old data + if cnt <= len(self.buf): + res = self.buf[:cnt] + self.buf = self.buf[cnt:] + return res + res = self.buf + self.buf = EMPTY + cnt -= len(res) + + # decrypt new data + BLK = self.ciph.block_size + while cnt > 0: + enc = self.f.read(BLK) + if len(enc) < BLK: + break + dec = self.ciph.decrypt(enc) + if cnt >= len(dec): + res += dec + cnt -= len(dec) + else: + res += dec[:cnt] + self.buf = dec[cnt:] + cnt = 0 + + return res + +# handle (filename|filelike) object +class XFile(object): + __slots__ = ('_fd', '_need_close') + def __init__(self, xfile, bufsize = 1024): + if is_filelike(xfile): + self._need_close = False + self._fd = xfile + self._fd.seek(0) + else: + self._need_close = True + self._fd = open(xfile, 'rb', bufsize) + def read(self, n=None): + return self._fd.read(n) + def tell(self): + return self._fd.tell() + def seek(self, ofs, whence=0): + return self._fd.seek(ofs, whence) + def readinto(self, dst): + return self._fd.readinto(dst) + def close(self): + if self._need_close: + self._fd.close() + def __enter__(self): + return self + def __exit__(self, typ, val, tb): + self.close() + +## +## Utility functions +## + +def is_filelike(obj): + if isinstance(obj, str) or isinstance(obj, unicode): + return False + res = True + for a in ('read', 'tell', 'seek'): + res = res and hasattr(obj, a) + if not res: + raise ValueError("Invalid object passed as file") + return True + +def rar3_s2k(psw, salt): + """String-to-key hash for RAR3.""" + + seed = psw.encode('utf-16le') + salt + iv = EMPTY + h = sha1() + for i in range(16): + for j in range(0x4000): + cnt = S_LONG.pack(i*0x4000 + j) + h.update(seed + cnt[:3]) + if j == 0: + iv += h.digest()[19:20] + key_be = h.digest()[:16] + key_le = pack("LLLL", key_be)) + return key_le, iv + +def rar_decompress(vers, meth, data, declen=0, flags=0, crc=0, psw=None, salt=None): + """Decompress blob of compressed data. + + Used for data with non-standard header - eg. comments. + """ + + # already uncompressed? + if meth == RAR_M0 and (flags & RAR_FILE_PASSWORD) == 0: + return data + + # take only necessary flags + flags = flags & (RAR_FILE_PASSWORD | RAR_FILE_SALT | RAR_FILE_DICTMASK) + flags |= RAR_LONG_BLOCK + + # file header + fname = b'data' + date = 0 + mode = 0x20 + fhdr = S_FILE_HDR.pack(len(data), declen, RAR_OS_MSDOS, crc, + date, vers, meth, len(fname), mode) + fhdr += fname + if flags & RAR_FILE_SALT: + if not salt: + return EMPTY + fhdr += salt + + # full header + hlen = S_BLK_HDR.size + len(fhdr) + hdr = S_BLK_HDR.pack(0, RAR_BLOCK_FILE, flags, hlen) + fhdr + hcrc = crc32(hdr[2:]) & 0xFFFF + hdr = S_BLK_HDR.pack(hcrc, RAR_BLOCK_FILE, flags, hlen) + fhdr + + # archive main header + mh = S_BLK_HDR.pack(0x90CF, RAR_BLOCK_MAIN, 0, 13) + ZERO * (2+4) + + # decompress via temp rar + tmpfd, tmpname = mkstemp(suffix='.rar') + tmpf = os.fdopen(tmpfd, "wb") + try: + tmpf.write(RAR_ID + mh + hdr + data) + tmpf.close() + + cmd = [UNRAR_TOOL] + list(OPEN_ARGS) + add_password_arg(cmd, psw, (flags & RAR_FILE_PASSWORD)) + cmd.append(tmpname) + + p = custom_popen(cmd) + return p.communicate()[0] + finally: + tmpf.close() + os.unlink(tmpname) + +def to_datetime(t): + """Convert 6-part time tuple into datetime object.""" + + if t is None: + return None + + # extract values + year, mon, day, h, m, xs = t + s = int(xs) + us = int(1000000 * (xs - s)) + + # assume the values are valid + try: + return datetime(year, mon, day, h, m, s, us) + except ValueError: + pass + + # sanitize invalid values + MDAY = (0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) + if mon < 1: mon = 1 + if mon > 12: mon = 12 + if day < 1: day = 1 + if day > MDAY[mon]: day = MDAY[mon] + if h > 23: h = 23 + if m > 59: m = 59 + if s > 59: s = 59 + if mon == 2 and day == 29: + try: + return datetime(year, mon, day, h, m, s, us) + except ValueError: + day = 28 + return datetime(year, mon, day, h, m, s, us) + +def parse_dos_time(stamp): + """Parse standard 32-bit DOS timestamp.""" + + sec = stamp & 0x1F; stamp = stamp >> 5 + min = stamp & 0x3F; stamp = stamp >> 6 + hr = stamp & 0x1F; stamp = stamp >> 5 + day = stamp & 0x1F; stamp = stamp >> 5 + mon = stamp & 0x0F; stamp = stamp >> 4 + yr = (stamp & 0x7F) + 1980 + return (yr, mon, day, hr, min, sec * 2) + +def custom_popen(cmd): + """Disconnect cmd from parent fds, read only from stdout.""" + + # needed for py2exe + creationflags = 0 + if sys.platform == 'win32': + creationflags = 0x08000000 # CREATE_NO_WINDOW + + # run command + try: + p = Popen(cmd, bufsize = 0, + stdout = PIPE, stdin = PIPE, stderr = STDOUT, + creationflags = creationflags) + except OSError as ex: + if ex.errno == errno.ENOENT: + raise RarCannotExec("Unrar not installed? (rarfile.UNRAR_TOOL=%r)" % UNRAR_TOOL) + raise + return p + +def custom_check(cmd, ignore_retcode=False): + """Run command, collect output, raise error if needed.""" + p = custom_popen(cmd) + out, err = p.communicate() + if p.returncode and not ignore_retcode: + raise RarExecError("Check-run failed") + return out + +def add_password_arg(cmd, psw, required=False): + """Append password switch to commandline.""" + if UNRAR_TOOL == ALT_TOOL: + return + if psw is not None: + cmd.append('-p' + psw) + else: + cmd.append('-p-') + +def check_returncode(p, out): + """Raise exception according to unrar exit code""" + + code = p.returncode + if code == 0: + return + + # map return code to exception class + errmap = [None, + RarWarning, RarFatalError, RarCRCError, RarLockedArchiveError, + RarWriteError, RarOpenError, RarUserError, RarMemoryError, + RarCreateError, RarNoFilesError] # codes from rar.txt + if UNRAR_TOOL == ALT_TOOL: + errmap = [None] + if code > 0 and code < len(errmap): + exc = errmap[code] + elif code == 255: + exc = RarUserBreak + elif code < 0: + exc = RarSignalExit + else: + exc = RarUnknownError + + # format message + if out: + msg = "%s [%d]: %s" % (exc.__doc__, p.returncode, out) + else: + msg = "%s [%d]" % (exc.__doc__, p.returncode) + + raise exc(msg) + +def membuf_tempfile(memfile): + memfile.seek(0, 0) + + tmpfd, tmpname = mkstemp(suffix='.rar') + tmpf = os.fdopen(tmpfd, "wb") + + try: + BSIZE = 32*1024 + while True: + buf = memfile.read(BSIZE) + if not buf: + break + tmpf.write(buf) + tmpf.close() + return tmpname + except: + tmpf.close() + os.unlink(tmpname) + raise + +# +# Check if unrar works +# + +try: + # does UNRAR_TOOL work? + custom_check([UNRAR_TOOL], True) +except RarCannotExec: + try: + # does ALT_TOOL work? + custom_check([ALT_TOOL] + list(ALT_CHECK_ARGS), True) + # replace config + UNRAR_TOOL = ALT_TOOL + OPEN_ARGS = ALT_OPEN_ARGS + EXTRACT_ARGS = ALT_EXTRACT_ARGS + TEST_ARGS = ALT_TEST_ARGS + except RarCannotExec: + # no usable tool, only uncompressed archives work + pass + diff --git a/libs/rarfile1/LICENSE b/libs/rarfile1/LICENSE new file mode 100644 index 00000000..cd53af08 --- /dev/null +++ b/libs/rarfile1/LICENSE @@ -0,0 +1,15 @@ + +Copyright (c) 2005-2016 Marko Kreen + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + diff --git a/libs/rarfile1/MANIFEST.in b/libs/rarfile1/MANIFEST.in new file mode 100644 index 00000000..6d1f1f6b --- /dev/null +++ b/libs/rarfile1/MANIFEST.in @@ -0,0 +1,3 @@ +include README.rst Makefile MANIFEST.in LICENSE dumprar.py +include doc/*.rst doc/Makefile doc/conf.py doc/make.bat +include test/Makefile test/*.sh test/files/*.rar test/files/*.exp diff --git a/libs/rarfile1/Makefile b/libs/rarfile1/Makefile new file mode 100644 index 00000000..45e3c2b7 --- /dev/null +++ b/libs/rarfile1/Makefile @@ -0,0 +1,31 @@ + +prefix = /usr/local + +all: + python setup.py build + +install: + python setup.py install --prefix=$(prefix) + +tgz: clean + python setup.py sdist + +clean: + rm -rf __pycache__ build dist + rm -f *.pyc MANIFEST *.orig *.rej *.html *.class + rm -rf doc/_build doc/_static doc/_templates + make -C test clean + +html: + rst2html README.rst > README.html + make -C doc html + +lint: + pylint -E rarfile.py + +rbuild: + curl -X POST https://readthedocs.org/build/6715 + +upload: + python setup.py sdist upload + diff --git a/libs/rarfile1/PKG-INFO b/libs/rarfile1/PKG-INFO new file mode 100644 index 00000000..282b56d3 --- /dev/null +++ b/libs/rarfile1/PKG-INFO @@ -0,0 +1,56 @@ +Metadata-Version: 1.1 +Name: rarfile +Version: 2.8 +Summary: RAR archive reader for Python +Home-page: https://github.com/markokr/rarfile +Author: Marko Kreen +Author-email: markokr@gmail.com +License: ISC +Description: rarfile - RAR archive reader for Python + ======================================= + + This is Python module for RAR_ archive reading. The interface + is made as zipfile_ like as possible. Licensed under ISC_ + license. + + Features: + + - Supports both RAR2 and RAR3 archives (WinRAR 2.x .. WinRAR 4.x). + - Supports multi volume archives. + - Supports Unicode filenames. + - Supports password-protected archives. + - Supports archive and file comments. + - Archive parsing and non-compressed files are handled in pure Python code. + - Compressed files are extracted by executing external tool: either ``unrar`` + from RARLAB_ or ``bsdtar`` from libarchive_. + - Works with both Python 2.7 and 3.x. + + Notes: + + - Does not support the RAR5 format introduced in WinRAR 5.0. + - ``bsdtar`` does not support all RAR3 features. + + Links: + + - `Documentation`_ + - `Downloads`_ + - `Git`_ repo + + .. _RAR: https://en.wikipedia.org/wiki/RAR_%28file_format%29 + .. _zipfile: https://docs.python.org/2/library/zipfile.html + .. _ISC: https://en.wikipedia.org/wiki/ISC_license + .. _Git: https://github.com/markokr/rarfile + .. _Downloads: https://pypi.python.org/pypi/rarfile + .. _Documentation: https://rarfile.readthedocs.io/ + .. _libarchive: https://github.com/libarchive/libarchive + .. _RARLAB: http://www.rarlab.com/ +Keywords: rar,unrar,archive +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: ISC License (ISCL) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Archiving :: Compression diff --git a/libs/rarfile1/README.rst b/libs/rarfile1/README.rst new file mode 100644 index 00000000..596ca917 --- /dev/null +++ b/libs/rarfile1/README.rst @@ -0,0 +1,39 @@ + +rarfile - RAR archive reader for Python +======================================= + +This is Python module for RAR_ archive reading. The interface +is made as zipfile_ like as possible. Licensed under ISC_ +license. + +Features: + +- Supports both RAR2 and RAR3 archives (WinRAR 2.x .. WinRAR 4.x). +- Supports multi volume archives. +- Supports Unicode filenames. +- Supports password-protected archives. +- Supports archive and file comments. +- Archive parsing and non-compressed files are handled in pure Python code. +- Compressed files are extracted by executing external tool: either ``unrar`` + from RARLAB_ or ``bsdtar`` from libarchive_. +- Works with both Python 2.7 and 3.x. + +Notes: + +- Does not support the RAR5 format introduced in WinRAR 5.0. +- ``bsdtar`` does not support all RAR3 features. + +Links: + +- `Documentation`_ +- `Downloads`_ +- `Git`_ repo + +.. _RAR: https://en.wikipedia.org/wiki/RAR_%28file_format%29 +.. _zipfile: https://docs.python.org/2/library/zipfile.html +.. _ISC: https://en.wikipedia.org/wiki/ISC_license +.. _Git: https://github.com/markokr/rarfile +.. _Downloads: https://pypi.python.org/pypi/rarfile +.. _Documentation: https://rarfile.readthedocs.io/ +.. _libarchive: https://github.com/libarchive/libarchive +.. _RARLAB: http://www.rarlab.com/ diff --git a/libs/rarfile1/doc/Makefile b/libs/rarfile1/doc/Makefile new file mode 100644 index 00000000..d257cf0b --- /dev/null +++ b/libs/rarfile1/doc/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/RarFile.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/RarFile.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/RarFile" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/RarFile" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/libs/rarfile1/doc/api.rst b/libs/rarfile1/doc/api.rst new file mode 100644 index 00000000..9892e8b4 --- /dev/null +++ b/libs/rarfile1/doc/api.rst @@ -0,0 +1,111 @@ + +rarfile API documentation +========================= + +.. contents:: Table Of Contents + +Introduction +------------ + +.. automodule:: rarfile + +RarFile class +------------- + +.. autoclass:: RarFile + :members: + :inherited-members: + +RarInfo class +------------- + +.. autoclass:: RarInfo + :members: + :inherited-members: + +RarExtFile class +---------------- + +.. autoclass:: RarExtFile + :members: + :inherited-members: + +Functions +--------- + +.. autofunction:: is_rarfile + +Module Configuration +-------------------- + +.. autodata:: UNRAR_TOOL +.. autodata:: DEFAULT_CHARSET +.. autodata:: TRY_ENCODINGS +.. autodata:: USE_DATETIME +.. autodata:: PATH_SEP +.. autodata:: NEED_COMMENTS +.. autodata:: UNICODE_COMMENTS +.. autodata:: USE_EXTRACT_HACK +.. autodata:: HACK_SIZE_LIMIT + +Constants +--------- + +.. py:data:: RAR_M0 + + No compression. + +.. py:data:: RAR_M1 + + Compression level `-m1` - Fastest compression. + +.. py:data:: RAR_M2 + + Compression level `-m2`. + +.. py:data:: RAR_M3 + + Compression level `-m3`. + +.. py:data:: RAR_M4 + + Compression level `-m4`. + +.. py:data:: RAR_M5 + + Compression level `-m5` - Maximum compression. + +.. py:data:: RAR_OS_MSDOS +.. py:data:: RAR_OS_OS2 +.. py:data:: RAR_OS_WIN32 +.. py:data:: RAR_OS_UNIX +.. py:data:: RAR_OS_MACOS +.. py:data:: RAR_OS_BEOS + +Exceptions +---------- + +.. autoclass:: Error +.. autoclass:: BadRarFile +.. autoclass:: NotRarFile +.. autoclass:: BadRarName +.. autoclass:: NoRarEntry +.. autoclass:: PasswordRequired +.. autoclass:: NeedFirstVolume +.. autoclass:: NoCrypto +.. autoclass:: RarExecError +.. autoclass:: RarWarning +.. autoclass:: RarFatalError +.. autoclass:: RarCRCError +.. autoclass:: RarLockedArchiveError +.. autoclass:: RarWriteError +.. autoclass:: RarOpenError +.. autoclass:: RarUserError +.. autoclass:: RarMemoryError +.. autoclass:: RarCreateError +.. autoclass:: RarNoFilesError +.. autoclass:: RarUserBreak +.. autoclass:: RarUnknownError +.. autoclass:: RarSignalExit + + diff --git a/libs/rarfile1/doc/conf.py b/libs/rarfile1/doc/conf.py new file mode 100644 index 00000000..47094733 --- /dev/null +++ b/libs/rarfile1/doc/conf.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- +# +# RarFile documentation build configuration file, created by +# sphinx-quickstart on Sun Mar 24 13:29:46 2013. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os, os.path + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +import rarfile + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] + +autodoc_member_order = 'bysource' +autoclass_content = 'both' +autodoc_default_flags = ['show-inheritance'] + +intersphinx_mapping = {'python': ('http://docs.python.org/2', None)} + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'RarFile' +copyright = u'2005-2016, Marko Kreen' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = rarfile.__version__ +# The full version, including alpha/beta/rc tags. +release = rarfile.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +html_show_sphinx = False + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +#htmlhelp_basename = 'RarFiledoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'RarFile.tex', u'RarFile Documentation', + u'Marko Kreen', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +#man_pages = [ +# ('index', 'rarfile', u'RarFile Documentation', +# [u'Marko Kreen'], 1) +#] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'RarFile', u'RarFile Documentation', + u'Marko Kreen', 'RarFile', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' diff --git a/libs/rarfile1/doc/faq.rst b/libs/rarfile1/doc/faq.rst new file mode 100644 index 00000000..488b42a4 --- /dev/null +++ b/libs/rarfile1/doc/faq.rst @@ -0,0 +1,87 @@ + +rarfile FAQ +=========== + +.. contents:: Table of Contents + +What are the dependencies? +-------------------------- + +It depends on ``unrar`` command-line utility to do the actual decompression. +Note that by default it expect it to be in ``PATH``. If unrar +launching fails, you need to fix this. + +Alternatively, :mod:`rarfile` can use bsdtar_ from libarchive_ as +decompression backend, but that is a bit problematic as bsdtar_ does not support +all RAR features. + +.. _bsdtar: https://github.com/libarchive/libarchive/wiki/ManPageBsdtar1 +.. _libarchive: http://www.libarchive.org/ + +It depends on cryptography_ or PyCrypto_ modules to process +archives with password-protected headers. + +.. _cryptography: https://pypi.python.org/pypi/cryptography +.. _PyCrypto: https://pypi.python.org/pypi/pycrypto + +Does it parse ``unrar`` output to get archive contents? +------------------------------------------------------- + +No, :mod:`rarfile` parses RAR structure in Python code. Also it can +read uncompressed files from archive without external utility. + +Will rarfile support wrapping unrarlib/unrar.dll/unrar.so in the future? +------------------------------------------------------------------------ + +No. The current architecture - parsing in Python and decompression with +command line tools work well across all interesting operating systems +(Windows/Linux/MacOS), wrapping a library does not bring any advantages. + +Simple execution of command-line tools is also legally simpler situation +than linking with external library. + +How can I get it work on Windows? +--------------------------------- + +On Windows the ``unrar.exe`` is not in ``PATH`` so simple ``Popen("unrar ..")`` does not work. +It can be solved several ways: + +1. Add location of ``unrar.exe`` to PATH. +2. Set :data:`rarfile.UNRAR_TOOL` to full path of ``unrar.exe``. +3. Copy ``unrar.exe`` to your program directory. +4. Copy ``unrar.exe`` to system directory that is in PATH, eg. ``C:\Windows``. + +How to avoid the need for user to manually install rarfile/unrar? +----------------------------------------------------------------- + +Include ``rarfile.py`` and/or ``unrar`` with your application. + +Will it support creating RAR archives? +-------------------------------------- + +No. RARLAB_ is not interested in RAR becoming open format +and specifically discourages writing RAR creation software. + +In the meantime use either Zip_ (better compatibility) or 7z_ (better compression) +format for your own archives. + +.. _RARLAB: http://www.rarlab.com/ +.. _Zip: https://en.wikipedia.org/wiki/ZIP_%28file_format%29 +.. _7z: https://en.wikipedia.org/wiki/7z + +What is the USE_EXTRACT_HACK? +----------------------------- + +RarFile uses ``unrar`` to extract compressed files. But when extracting +single file from archive containing many entries, ``unrar`` needs to parse +whole archive until it finds the right entry. This makes random-access +to entries slow. To avoid that, RarFile remembers location of compressed +data for each entry and on read it copies it to temporary archive containing +only data for that one file, thus making ``unrar`` fast. + +The logic is only activated for entries smaller than :data:`rarfile.HACK_SIZE_LIMIT` +(20M by default). Bigger files are accessed directly from RAR. + +Note - it only works for non-solid archives. So if you care about +random access to files in your archive, do not create solid archives. + diff --git a/libs/rarfile1/doc/index.rst b/libs/rarfile1/doc/index.rst new file mode 100644 index 00000000..bbd4a51b --- /dev/null +++ b/libs/rarfile1/doc/index.rst @@ -0,0 +1,42 @@ + +rarfile - RAR archive reader for Python +======================================= + +This is Python module for RAR_ archive reading. The interface +is made as zipfile_ like as possible. Licensed under ISC_ +license. + +.. _RAR: http://en.wikipedia.org/wiki/RAR +.. _zipfile: http://docs.python.org/library/zipfile.html +.. _ISC: http://en.wikipedia.org/wiki/ISC_license + +Features: + +- Supports both RAR 2.x and 3.x archives. +- Supports multi volume archives. +- Supports Unicode filenames. +- Supports password-protected archives. +- Supports archive and file comments. +- Archive parsing and non-compressed files are handled in pure Python code. +- For compressed files runs ``unrar`` utility. +- Works with both Python 2.x and 3.x. + + + +Documentation: + +.. toctree:: + :maxdepth: 1 + + Module Documentation + FAQs + Release News + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/libs/rarfile1/doc/make.bat b/libs/rarfile1/doc/make.bat new file mode 100644 index 00000000..5a239c33 --- /dev/null +++ b/libs/rarfile1/doc/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\RarFile.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\RarFile.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/libs/rarfile1/doc/news.rst b/libs/rarfile1/doc/news.rst new file mode 100644 index 00000000..85d00f19 --- /dev/null +++ b/libs/rarfile1/doc/news.rst @@ -0,0 +1,243 @@ + +rarfile history +=============== + +.. py:currentmodule:: rarfile + +Version 2.8 (2016-06-07) +------------------------ + +* Fix: support solid archives from in-memory file object. + Full archive will be written out to temp file. + [`#21 `_] + +* Fix: ask unrar stop switches scanning, + to handle archive names starting with "-". + (Alexander Shadchin) + [`#12 `_] + +* Fix: add missing _parse_error variable to RarFile object. + (Gregory Mazzola) + [`#20 `_] + +* Fix: return proper boolean from :meth:`RarInfo.needs_password`. + [`#22 `_] + +* Fix: do not insert non-string rarfile into exception string. + (Tim Muller) + [`#23 `_] + +* Fix: make :meth:`RarFile.extract` and :meth:`RarFile.testrar` + support in-memory archives. + +* Use cryptography_ module as preferred crypto backend. + PyCrypto_ will be used as fallback. + +* Cleanup: remove compat code for Python 2.4/2.5/2.6. + +.. _cryptography: https://pypi.python.org/pypi/cryptography +.. _PyCrypto: https://pypi.python.org/pypi/pycrypto + +Version 2.7 (2014-11-23) +------------------------ + +* Allow use of bsdtar_ as decompression backend. It sits + on top of libarchive_, which has support for reading RAR archives. + + Limitations of ``libarchive`` RAR backend: + + - Does not support solid archives. + - Does not support password-protected archives. + - Does not support "parsing filters" used for audio/image/executable data, + so few non-solid, non-encrypted archives also fail. + + Now :mod:`rarfile` checks if ``unrar`` and if not then tries ``bsdtar``. + If that works, then keeps using it. If not then configuration + stays with ``unrar`` which will then appear in error messages. + +.. _bsdtar: https://github.com/libarchive/libarchive/wiki/ManPageBsdtar1 +.. _libarchive: http://www.libarchive.org/ + +* Both :class:`RarFile` and :func:`is_rarfile` now accept file-like + object. Eg. :class:`io.BytesIO`. Only requirement is that the object + must be seekable. This mirrors similar funtionality in zipfile. + + Based on patch by Chase Zhang. + +* Uniform error handling. :class:`RarFile` accepts ``errors="strict"`` + argument. + + Allow user to tune whether parsing and missing file errors will raise + exception. If error is not raised, the error string can be queried + with :meth:`RarFile.strerror` method. + +Version 2.6 (2013-04-10) +------------------------ + +* Add context manager support for :class:`RarFile` class. + Both :class:`RarFile` and :class:`RarExtFile` support + :keyword:`with` statement now. + (Wentao Han) +* :meth:`RarFile.volumelist` method, returns filenames of archive volumes. +* Re-throw clearer error in case ``unrar`` is not found in ``PATH``. +* Sync new unrar4.x error code from ``rar.txt``. +* Use Sphinx for documentation, push docs to rtfd.org_ + +.. _rtfd.org: https://rarfile.readthedocs.org/ + +Version 2.5 (2012-01-19) +------------------------ + +Fixes: + +* :meth:`RarExtFile.read` and :meth:`RarExtFile.readinto` now do looping read + to work properly on short reads. Important for Python 3.2+ where read from pipe + can return short result even on blocking file descriptor. +* Proper error reporting in :meth:`RarFile.extract`, :meth:`RarFile.extractall` + and :meth:`RarFile.testrar`. +* :meth:`RarExtFile.read` from unrar pipe: prefer to return unrar error code, + if thats not available, do own error checks. +* Avoid string addition in :meth:`RarExtFile.read`, instead use always list+join to + merge multi-part reads. +* dumprar: dont re-encode byte strings (Python 2.x). This avoids + unneccessary failure when printing invalid unicode. + +Version 2.4 (2011-11-05) +------------------------ + +Fixes: + +* :data:`USE_DATETIME`: survive bad values from RAR +* Fix bug in corrupt unicode filename handling +* dumprar: make unicode chars work with both pipe and console + +Version 2.3 (2011-07-03) +------------------------ + +Features: + +* Support .seek() method on file streams. (Kristian Larsson) +* Support .readinto() method on file streams. Optimized implementation + is available on Python 2.6+ where :class:`memoryview` is available. +* Support file comments - :attr:`RarInfo.comment` contains decompressed data if available. +* File objects returned by :meth:`RarFile.open()` are :class:`io.RawIOBase`-compatible. + They can further wrapped with :class:`io.BufferedReader` and :class:`io.TextIOWrapper`. +* Now .getinfo() uses dict lookup instead of sequential scan when + searching archive entry. This speeds up prococessing for archives that + have many entries. +* Option :data:`UNICODE_COMMENTS` to decode both archive and file comments to unicode. + It uses :data:`TRY_ENCODINGS` for list of encodings to try. If off, comments are + left as byte strings. Default: 0 +* Option :data:`PATH_SEP` to change path separator. Default: ``r'\'``, + set ``rarfile.PATH_SEP='/'`` to be compatibe with zipfile. +* Option :data:`USE_DATETIME` to convert timestamps to datetime objects. + Default: 0, timestamps are tuples. +* Option :data:`TRY_ENCODINGS` to allow tuning attempted encoding list. +* Reorder :class:`RarInfo` fiels to better show zipfile-compatible fields. +* Standard regtests to make sure various features work + +Compatibility: + +* Drop :attr:`RarInfo.unicode_filename`, plain :attr:`RarInfo.filename` is already unicode since 2.0. +* .read(-1) reads now until EOF. Previously it returned empty buffer. + +Fixes: + +* Make encrypted headers work with Python 3.x bytes() and with old 2.x 'sha' module. +* Simplify :class:`subprocess.Popen` usage when launching ``unrar``. Previously + it tried to optimize and work around OS/Python bugs, but this is not + maintainable. +* Use temp rar file hack on multi-volume archives too. +* Always .wait() on unrar, to avoid zombies +* Convert struct.error to BadRarFile +* Plug some fd leaks. Affected: Jython, PyPy. +* Broken archives are handled more robustly. + +Version 2.2 (2010-08-19) +------------------------ + +Fixes: + +* Relaxed volume naming. Now it just calculates new volume name by finding number + in old one and increasing it, without any expectations what that number should be. +* Files with 4G of compressed data in one colume were handled wrong. Fix. +* DOS timestamp seconds need to be multiplied with 2. +* Correct EXTTIME parsing. + +Cleanups: + +* Compressed size is per-volume, sum them together, so that user sees complete + compressed size for files split over several volumes. +* dumprar: Show unknown bits. +* Use :class:`struct.Struct` to cache unpack formats. +* Support missing :data:`os.devnull`. (Python 2.3) + +Version 2.1 (2010-07-31) +------------------------ + +Features: + +* Minimal implmentation for :meth:`RarFile.extract`, :meth:`RarFile.extractall`, :meth:`RarFile.testrar`. + They are simple shortcuts to ``unrar`` invocation. +* Accept :class:`RarInfo` object where filename is expected. +* Include ``dumprar.py`` in .tgz. It can be used to visualize RAR structure + and test module. +* Support for encrypted file headers. + +Fixes: + +* Don't read past ENDARC, there could be non-RAR data there. +* RAR 2.x: It does not write ENDARC, but our volume code expected it. Fix that. +* RAR 2.x: Support more than 200 old-style volumes. + +Cleanups: + +* Load comment only when requested. +* Cleanup of internal config variables. They should have now final names. +* :meth:`RarFile.open`: Add mode=r argument to match zipfile. +* Doc and comments cleanup, minimize duplication. +* Common wrappers for both compressed and uncompressed files, + now :meth:`RarFile.open` also does CRC-checking. + +Version 2.0 (2010-04-29) +------------------------ + +Features: + +* Python 3 support. Still works with 2.x. +* Parses extended time fields. (.mtime, .ctime, .atime) +* :meth:`RarFile.open` method. This makes possible to process large + entries that do not fit into memory. +* Supports password-protected archives. +* Supports archive comments. + +Cleanups: + +* Uses :mod:`subprocess` module to launch ``unrar``. +* .filename is always Unicode string, .unicode_filename is now deprecated. +* .CRC is unsigned again, as python3 crc32() is unsigned. + +Version 1.1 (2008-08-31) +------------------------ + +Fixes: + +* Replace :func:`os.tempnam` with :func:`tempfile.mkstemp`. (Jason Moiron) +* Fix infinite loop in _extract_hack on unexpected EOF +* :attr:`RarInfo.CRC` is now signed value to match crc32() +* :meth:`RarFile.read` now checks file crc + +Cleanups: + +* more docstrings +* throw proper exceptions (subclasses of :exc:`rarfile.Error`) +* RarInfo has fields pre-initialized, so they appear in help() +* rename RarInfo.data to RarInfo.header_data +* dont use "print" when header parsing fails +* use try/finally to delete temp rar + +Version 1.0 (2005-08-08) +------------------------ + +* First release. + diff --git a/libs/rarfile1/dumprar.py b/libs/rarfile1/dumprar.py new file mode 100755 index 00000000..f7ab062b --- /dev/null +++ b/libs/rarfile1/dumprar.py @@ -0,0 +1,361 @@ +#! /usr/bin/env python + +"""Dump archive contents, test extraction.""" + +import io +import sys +import rarfile as rf +from binascii import crc32, hexlify +from datetime import datetime + +try: + bytearray +except NameError: + import array + def bytearray(v): + return array.array('B', v) + +rf.UNICODE_COMMENTS = 1 +rf.USE_DATETIME = 1 + +usage = """ +dumprar [switches] [ARC1 ARC2 ...] [@ARCLIST] +switches: + @file read archive names from file + -pPSW set password + -Ccharset set fallback charset + -v increase verbosity + -t attempt to read all files + -x write read files out + -c show archive comment + -h show usage + -- stop switch parsing +""".strip() + +os_list = ['DOS', 'OS2', 'WIN', 'UNIX', 'MACOS', 'BEOS'] + +block_strs = ['MARK', 'MAIN', 'FILE', 'OLD_COMMENT', 'OLD_EXTRA', + 'OLD_SUB', 'OLD_RECOVERY', 'OLD_AUTH', 'SUB', 'ENDARC'] + +def rarType(type): + if type < rf.RAR_BLOCK_MARK or type > rf.RAR_BLOCK_ENDARC: + return "*UNKNOWN*" + return block_strs[type - rf.RAR_BLOCK_MARK] + +main_bits = ( + (rf.RAR_MAIN_VOLUME, "VOL"), + (rf.RAR_MAIN_COMMENT, "COMMENT"), + (rf.RAR_MAIN_LOCK, "LOCK"), + (rf.RAR_MAIN_SOLID, "SOLID"), + (rf.RAR_MAIN_NEWNUMBERING, "NEWNR"), + (rf.RAR_MAIN_AUTH, "AUTH"), + (rf.RAR_MAIN_RECOVERY, "RECOVERY"), + (rf.RAR_MAIN_PASSWORD, "PASSWORD"), + (rf.RAR_MAIN_FIRSTVOLUME, "FIRSTVOL"), + (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"), + (rf.RAR_LONG_BLOCK, "LONG"), +) + +endarc_bits = ( + (rf.RAR_ENDARC_NEXT_VOLUME, "NEXTVOL"), + (rf.RAR_ENDARC_DATACRC, "DATACRC"), + (rf.RAR_ENDARC_REVSPACE, "REVSPACE"), + (rf.RAR_ENDARC_VOLNR, "VOLNR"), + (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"), + (rf.RAR_LONG_BLOCK, "LONG"), +) + +file_bits = ( + (rf.RAR_FILE_SPLIT_BEFORE, "SPLIT_BEFORE"), + (rf.RAR_FILE_SPLIT_AFTER, "SPLIT_AFTER"), + (rf.RAR_FILE_PASSWORD, "PASSWORD"), + (rf.RAR_FILE_COMMENT, "COMMENT"), + (rf.RAR_FILE_SOLID, "SOLID"), + (rf.RAR_FILE_LARGE, "LARGE"), + (rf.RAR_FILE_UNICODE, "UNICODE"), + (rf.RAR_FILE_SALT, "SALT"), + (rf.RAR_FILE_VERSION, "VERSION"), + (rf.RAR_FILE_EXTTIME, "EXTTIME"), + (rf.RAR_FILE_EXTFLAGS, "EXTFLAGS"), + (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"), + (rf.RAR_LONG_BLOCK, "LONG"), +) + +generic_bits = ( + (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"), + (rf.RAR_LONG_BLOCK, "LONG"), +) + +file_parms = ("D64", "D128", "D256", "D512", + "D1024", "D2048", "D4096", "DIR") + +def xprint(m, *args): + if sys.hexversion < 0x3000000: + m = m.decode('utf8') + if args: + m = m % args + if sys.hexversion < 0x3000000: + m = m.encode('utf8') + sys.stdout.write(m) + sys.stdout.write('\n') + +def render_flags(flags, bit_list): + res = [] + known = 0 + for bit in bit_list: + known = known | bit[0] + if flags & bit[0]: + res.append(bit[1]) + unknown = flags & ~known + n = 0 + while unknown: + if unknown & 1: + res.append("UNK_%04x" % (1 << n)) + unknown = unknown >> 1 + n += 1 + + return ",".join(res) + +def get_file_flags(flags): + res = render_flags(flags & ~rf.RAR_FILE_DICTMASK, file_bits) + + xf = (flags & rf.RAR_FILE_DICTMASK) >> 5 + res += "," + file_parms[xf] + return res + +def get_main_flags(flags): + return render_flags(flags, main_bits) + +def get_endarc_flags(flags): + return render_flags(flags, endarc_bits) + +def get_generic_flags(flags): + return render_flags(flags, generic_bits) + +def fmt_time(t): + if isinstance(t, datetime): + return t.isoformat(' ') + return "%04d-%02d-%02d %02d:%02d:%02d" % t + +def show_item(h): + st = rarType(h.type) + unknown = h.header_size - h.header_base + xprint("%s: hdrlen=%d datlen=%d hdr_unknown=%d", st, h.header_size, + h.add_size, unknown) + if unknown > 0 and cf_verbose > 1: + dat = h.header_data[h.header_base : ] + xprint(" unknown: %s", hexlify(dat)) + if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB): + if h.host_os == rf.RAR_OS_UNIX: + s_mode = "0%o" % h.mode + else: + s_mode = "0x%x" % h.mode + xprint(" flags=0x%04x:%s", h.flags, get_file_flags(h.flags)) + if h.host_os >= 0 and h.host_os < len(os_list): + s_os = os_list[h.host_os] + else: + s_os = "?" + xprint(" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d", + h.host_os, s_os, + h.extract_version, s_mode, h.compress_type, + h.compress_size, h.file_size, h.volume) + ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1) + xprint(" crc=0x%08x (%d) time=%s", ucrc, h.CRC, fmt_time(h.date_time)) + xprint(" name=%s", h.filename) + if h.mtime: + xprint(" mtime=%s", fmt_time(h.mtime)) + if h.ctime: + xprint(" ctime=%s", fmt_time(h.ctime)) + if h.atime: + xprint(" atime=%s", fmt_time(h.atime)) + if h.arctime: + xprint(" arctime=%s", fmt_time(h.arctime)) + elif h.type == rf.RAR_BLOCK_MAIN: + xprint(" flags=0x%04x:%s", h.flags, get_main_flags(h.flags)) + elif h.type == rf.RAR_BLOCK_ENDARC: + xprint(" flags=0x%04x:%s", h.flags, get_endarc_flags(h.flags)) + elif h.type == rf.RAR_BLOCK_MARK: + xprint(" flags=0x%04x:", h.flags) + else: + xprint(" flags=0x%04x:%s", h.flags, get_generic_flags(h.flags)) + + if h.comment is not None: + cm = repr(h.comment) + if cm[0] == 'u': + cm = cm[1:] + xprint(" comment=%s", cm) + +cf_show_comment = 0 +cf_verbose = 0 +cf_charset = None +cf_extract = 0 +cf_test_read = 0 +cf_test_unrar = 0 +cf_test_memory = 0 + +def check_crc(f, inf): + ucrc = f.CRC + if ucrc < 0: + ucrc += (long(1) << 32) + if ucrc != inf.CRC: + print ('crc error') + +def test_read_long(r, inf): + f = r.open(inf.filename) + total = 0 + while 1: + data = f.read(8192) + if not data: + break + total += len(data) + if total != inf.file_size: + xprint("\n *** %s has corrupt file: %s ***", r.rarfile, inf.filename) + xprint(" *** short read: got=%d, need=%d ***\n", total, inf.file_size) + check_crc(f, inf) + + # test .seek() & .readinto() + if cf_test_read > 1: + f.seek(0,0) + + # hack: re-enable crc calc + f.crc_check = 1 + f.CRC = 0 + + total = 0 + buf = bytearray(rf.ZERO*4096) + while 1: + res = f.readinto(buf) + if not res: + break + total += res + if inf.file_size != total: + xprint(" *** readinto failed: got=%d, need=%d ***\n", total, inf.file_size) + check_crc(f, inf) + f.close() + +def test_read(r, inf): + test_read_long(r, inf) + + +def test_real(fn, psw): + xprint("Archive: %s", fn) + + cb = None + if cf_verbose > 1: + cb = show_item + + rfarg = fn + if cf_test_memory: + rfarg = io.BytesIO(open(fn, 'rb').read()) + + # check if rar + if not rf.is_rarfile(rfarg): + xprint(" --- %s is not a RAR file ---", fn) + return + + # open + r = rf.RarFile(rfarg, charset = cf_charset, info_callback = cb) + # set password + if r.needs_password(): + if psw: + r.setpassword(psw) + else: + xprint(" --- %s requires password ---", fn) + return + + # show comment + if cf_show_comment and r.comment: + for ln in r.comment.split('\n'): + xprint(" %s", ln) + elif cf_verbose == 1 and r.comment: + cm = repr(r.comment) + if cm[0] == 'u': + cm = cm[1:] + xprint(" comment=%s", cm) + + # process + for n in r.namelist(): + inf = r.getinfo(n) + if inf.isdir(): + continue + if cf_verbose == 1: + show_item(inf) + if cf_test_read: + test_read(r, inf) + + if cf_extract: + r.extractall() + for inf in r.infolist(): + r.extract(inf) + + if cf_test_unrar: + r.testrar() + +def test(fn, psw): + try: + test_real(fn, psw) + except rf.NeedFirstVolume: + xprint(" --- %s is middle part of multi-vol archive ---", fn) + except rf.Error: + exc, msg, tb = sys.exc_info() + xprint("\n *** %s: %s ***\n", exc.__name__, msg) + del tb + except IOError: + exc, msg, tb = sys.exc_info() + xprint("\n *** %s: %s ***\n", exc.__name__, msg) + del tb + +def main(): + global cf_verbose, cf_show_comment, cf_charset + global cf_extract, cf_test_read, cf_test_unrar + global cf_test_memory + + # parse args + args = [] + psw = None + noswitch = False + for a in sys.argv[1:]: + if noswitch: + args.append(a) + elif a[0] == "@": + for ln in open(a[1:], 'r'): + fn = ln[:-1] + args.append(fn) + elif a[0] != '-': + args.append(a) + elif a[1] == 'p': + psw = a[2:] + elif a == '--': + noswitch = True + elif a == '-h': + xprint(usage) + return + elif a == '-v': + cf_verbose += 1 + elif a == '-c': + cf_show_comment = 1 + elif a == '-x': + cf_extract = 1 + elif a == '-t': + cf_test_read += 1 + elif a == '-T': + cf_test_unrar = 1 + elif a == '-M': + cf_test_memory = 1 + elif a[1] == 'C': + cf_charset = a[2:] + else: + raise Exception("unknown switch: "+a) + if not args: + xprint(usage) + + for fn in args: + test(fn, psw) + + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + pass + diff --git a/libs/rarfile1/setup.py b/libs/rarfile1/setup.py new file mode 100644 index 00000000..e1b412c6 --- /dev/null +++ b/libs/rarfile1/setup.py @@ -0,0 +1,33 @@ +#! /usr/bin/env python + +from distutils.core import setup + +import rarfile + +ver = rarfile.__version__ +ldesc = open("README.rst").read().strip() +sdesc = ldesc.split('\n')[0].split(' - ')[1].strip() + +setup( + name = "rarfile", + version = ver, + description = sdesc, + long_description = ldesc, + author = "Marko Kreen", + license = "ISC", + author_email = "markokr@gmail.com", + url = "https://github.com/markokr/rarfile", + py_modules = ['rarfile'], + keywords = ['rar', 'unrar', 'archive'], + classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: ISC License (ISCL)", + "Operating System :: OS Independent", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 3", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: System :: Archiving :: Compression", + ] +) + diff --git a/libs/rarfile1/test/Makefile b/libs/rarfile1/test/Makefile new file mode 100644 index 00000000..5383db3f --- /dev/null +++ b/libs/rarfile1/test/Makefile @@ -0,0 +1,9 @@ +test: + ./test1.sh + ./test2.sh + +clean: + rm -rf __pycache__ + rm -f files/*.rar.[pj]* *.pyc *.class *.diffs + rm -f rarfile.py + diff --git a/libs/rarfile1/test/files/ctime0.rar b/libs/rarfile1/test/files/ctime0.rar new file mode 100644 index 0000000000000000000000000000000000000000..d72c62ddf23fc85552ce3ad3ab79a89fddbce452 GIT binary patch literal 73 zcmWGaEK-zWXE;Bhn1O+p0RlW(N)#sOK{*TzOd!_u)vN4e4LBJTfb7Jy%$!ucl8O?B Q538*@kJwf-IIuGS0N&9LH~;_u literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/ctime0.rar.exp b/libs/rarfile1/test/files/ctime0.rar.exp new file mode 100644 index 00000000..2d6d0527 --- /dev/null +++ b/libs/rarfile1/test/files/ctime0.rar.exp @@ -0,0 +1,7 @@ +Archive: files/ctime0.rar +FILE: hdrlen=46 datlen=0 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0 + crc=0x00000000 (0) time=2011-05-10 21:28:47.899345 + name=afile.txt + mtime=2011-05-10 21:28:47.899345 diff --git a/libs/rarfile1/test/files/ctime1.rar b/libs/rarfile1/test/files/ctime1.rar new file mode 100644 index 0000000000000000000000000000000000000000..89d82557950158571dace5962d58f61f5113bde4 GIT binary patch literal 77 zcmWGaEK-zWXE;Bhn1O+p0Rq18l_*Rwf^rxbm_V%Qt5?~{8gMcw0NIIYnK`L?B^4zM Se^y&{f@F@^Rx>!TGXMZnkP;^V literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/ctime1.rar.exp b/libs/rarfile1/test/files/ctime1.rar.exp new file mode 100644 index 00000000..acab0250 --- /dev/null +++ b/libs/rarfile1/test/files/ctime1.rar.exp @@ -0,0 +1,8 @@ +Archive: files/ctime1.rar +FILE: hdrlen=50 datlen=0 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0 + crc=0x00000000 (0) time=2011-05-10 21:28:47.899345 + name=afile.txt + mtime=2011-05-10 21:28:47.899345 + ctime=2011-05-10 21:28:47 diff --git a/libs/rarfile1/test/files/ctime2.rar b/libs/rarfile1/test/files/ctime2.rar new file mode 100644 index 0000000000000000000000000000000000000000..09c91371028ce52fdfa47ecc2ccf57c30ae32d73 GIT binary patch literal 78 zcmWGaEK-zWXE;Bhn1O+p0Rk?ymMBaxhH@Afm_V%Qt5?~{8gMcw0NIIYnK`L?B^4zM Te^*;|f@C_6*j6(*urmMvdrT7t literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/ctime2.rar.exp b/libs/rarfile1/test/files/ctime2.rar.exp new file mode 100644 index 00000000..0b45e28d --- /dev/null +++ b/libs/rarfile1/test/files/ctime2.rar.exp @@ -0,0 +1,8 @@ +Archive: files/ctime2.rar +FILE: hdrlen=51 datlen=0 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0 + crc=0x00000000 (0) time=2011-05-10 21:28:47.899345 + name=afile.txt + mtime=2011-05-10 21:28:47.899345 + ctime=2011-05-10 21:28:47.897843 diff --git a/libs/rarfile1/test/files/ctime3.rar b/libs/rarfile1/test/files/ctime3.rar new file mode 100644 index 0000000000000000000000000000000000000000..a32fa14f4af42673911345542a0150ad51d9dbed GIT binary patch literal 79 zcmWGaEK-zWXE;Bhn1O+p0RqYdN)#rTKsgKyOd!_u)vN4e4LBJTfb7Jy%$!ucl8O?B Tf2*xJK{8gIM{KJZ9M~BEUZN6m literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/ctime3.rar.exp b/libs/rarfile1/test/files/ctime3.rar.exp new file mode 100644 index 00000000..7a185b5d --- /dev/null +++ b/libs/rarfile1/test/files/ctime3.rar.exp @@ -0,0 +1,8 @@ +Archive: files/ctime3.rar +FILE: hdrlen=52 datlen=0 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0 + crc=0x00000000 (0) time=2011-05-10 21:28:47.899345 + name=afile.txt + mtime=2011-05-10 21:28:47.899345 + ctime=2011-05-10 21:28:47.899327 diff --git a/libs/rarfile1/test/files/ctime4.rar b/libs/rarfile1/test/files/ctime4.rar new file mode 100644 index 0000000000000000000000000000000000000000..921e0da6a652e8cc1d63970f4d14d327cf87566d GIT binary patch literal 80 zcmWGaEK-zWXE;Bhn1O+p0RlFBDp8nV3gs{`Fo9UpSFf^@HQ;1W0J0O)GILV(N-9bi T{;#&`1jzuYBevBH4(tp7w=)zw literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/ctime4.rar.exp b/libs/rarfile1/test/files/ctime4.rar.exp new file mode 100644 index 00000000..7ce30c0d --- /dev/null +++ b/libs/rarfile1/test/files/ctime4.rar.exp @@ -0,0 +1,8 @@ +Archive: files/ctime4.rar +FILE: hdrlen=53 datlen=0 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0 + crc=0x00000000 (0) time=2011-05-10 21:28:47.899345 + name=afile.txt + mtime=2011-05-10 21:28:47.899345 + ctime=2011-05-10 21:28:47.899345 diff --git a/libs/rarfile1/test/files/rar15-comment-lock.rar b/libs/rarfile1/test/files/rar15-comment-lock.rar new file mode 100644 index 0000000000000000000000000000000000000000..462f2625a039b271e9131f605a67af17b5198e0c GIT binary patch literal 210 zcmWGaEK-zWXK-jLW@FG{fPf82r3?)63<3=NCZ8_wPkPF}`0Kwd{a*aBZ$%%PI513O zE8%GHU|4?tsk^V&FGey)i8_C)F@H zKQ}iuuS7u?1cWbFeQ4mABXS>X90yP*2s3Ku7&g-Y literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar15-comment-lock.rar.exp b/libs/rarfile1/test/files/rar15-comment-lock.rar.exp new file mode 100644 index 00000000..4a4af276 --- /dev/null +++ b/libs/rarfile1/test/files/rar15-comment-lock.rar.exp @@ -0,0 +1,14 @@ +Archive: files/rar15-comment-lock.rar + comment='RARcomment -----' +FILE: hdrlen=72 datlen=7 hdr_unknown=31 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=15 mode=0x20 meth=3 cmp=7 dec=7 vol=0 + crc=0xe27f07a9 (3799975849) time=2010-11-03 19:49:32 + name=FILE1.TXT + comment='file1comment -----' +FILE: hdrlen=72 datlen=8 hdr_unknown=31 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=15 mode=0x20 meth=0 cmp=8 dec=8 vol=0 + crc=0x3c4306f7 (1011025655) time=2010-11-03 19:49:38 + name=FILE2.TXT + comment='file2comment -----' diff --git a/libs/rarfile1/test/files/rar15-comment.rar b/libs/rarfile1/test/files/rar15-comment.rar new file mode 100644 index 0000000000000000000000000000000000000000..f193bb0f52b4721a1a417cb2e6275d4b0f532b1e GIT binary patch literal 210 zcmWGaEK-zWXRuN#W@6A`fPf82r3?)63<3=NCZ8_wPkPF}`0Kwd{a*aBZ$%%PI513O zE8%GHU|4?tsk^V&FGey)i8_C)F@H zKQ}iuuS7u?1cWbFeQ4mABXS>X90yP*2s3Jvb~b4M literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar15-comment.rar.exp b/libs/rarfile1/test/files/rar15-comment.rar.exp new file mode 100644 index 00000000..05e5a928 --- /dev/null +++ b/libs/rarfile1/test/files/rar15-comment.rar.exp @@ -0,0 +1,14 @@ +Archive: files/rar15-comment.rar + comment='RARcomment -----' +FILE: hdrlen=72 datlen=7 hdr_unknown=31 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=15 mode=0x20 meth=3 cmp=7 dec=7 vol=0 + crc=0xe27f07a9 (3799975849) time=2010-11-03 19:49:32 + name=FILE1.TXT + comment='file1comment -----' +FILE: hdrlen=72 datlen=8 hdr_unknown=31 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=15 mode=0x20 meth=0 cmp=8 dec=8 vol=0 + crc=0x3c4306f7 (1011025655) time=2010-11-03 19:49:38 + name=FILE2.TXT + comment='file2comment -----' diff --git a/libs/rarfile1/test/files/rar202-comment-nopsw.rar b/libs/rarfile1/test/files/rar202-comment-nopsw.rar new file mode 100644 index 0000000000000000000000000000000000000000..329dc72af0e52e6d105984fa5b49e5d0d41ee6da GIT binary patch literal 204 zcmWGaEK-zWXV}YF%*0^K00BQ(OBoo{7`PZjOx^_uF!+E)8M$&7tk9m%KJ_2_>cx#~ zS2pCHFX3o#Vqgc#f-u9bTFI(B=44wD15O47Am7c?$JJ0TBqBsH5NMnv0}lhg!M(_| w%$!uiAJw48hvO>>(F`&I8MHP9ZjfypNV5@=K}Nh>0Mbw~BLDyZ literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar202-comment-nopsw.rar.exp b/libs/rarfile1/test/files/rar202-comment-nopsw.rar.exp new file mode 100644 index 00000000..b20cb577 --- /dev/null +++ b/libs/rarfile1/test/files/rar202-comment-nopsw.rar.exp @@ -0,0 +1,14 @@ +Archive: files/rar202-comment-nopsw.rar + comment='RARcomment' +FILE: hdrlen=66 datlen=7 hdr_unknown=25 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=20 mode=0x20 meth=0 cmp=7 dec=7 vol=0 + crc=0x7a197dba (2048490938) time=2010-11-03 00:27:28 + name=FILE1.TXT + comment='file1comment' +FILE: hdrlen=66 datlen=7 hdr_unknown=25 + flags=0x8008:COMMENT,LONG,D64 + os=0:DOS ver=20 mode=0x20 meth=0 cmp=7 dec=7 vol=0 + crc=0x785fc3e3 (2019541987) time=2010-11-03 00:27:34 + name=FILE2.TXT + comment='file2comment' diff --git a/libs/rarfile1/test/files/rar202-comment-psw.rar b/libs/rarfile1/test/files/rar202-comment-psw.rar new file mode 100644 index 0000000000000000000000000000000000000000..60fb14f42b4063ba32f65125061dc36222956bd9 GIT binary patch literal 254 zcmWGaEK-zWXV}YF%*0^K00BQ(OBoo{7`PZjOx^_uF!+E)8M$&7tk9m%KJ_2_>cx#~ zS2hSRmGCq;F(?3K*+D9H)k;?7F(=!K7;}OJ-8_9<4fR4ILKFjm#z`{pFz_4Pi%iSR zNi|H)&&^HED+zv9TUUIJO?x-X+g2F?t#A8R`{=ejH|@EeZmQpNY|nGmN2oSDJ{(_B zh-QNk$cD8ka2ssnKsFdbZMZyPpRDuyEBh;>rq<}LJM1YW%<_Bswe3HjmkX_8i#xCa E02j$krT_o{ literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar202-comment-psw.rar.exp b/libs/rarfile1/test/files/rar202-comment-psw.rar.exp new file mode 100644 index 00000000..a54ac4b6 --- /dev/null +++ b/libs/rarfile1/test/files/rar202-comment-psw.rar.exp @@ -0,0 +1,14 @@ +Archive: files/rar202-comment-psw.rar + comment='RARcomment' +FILE: hdrlen=66 datlen=32 hdr_unknown=25 + flags=0x800c:PASSWORD,COMMENT,LONG,D64 + os=0:DOS ver=20 mode=0x20 meth=3 cmp=32 dec=7 vol=0 + crc=0x7a197dba (2048490938) time=2010-11-03 00:27:28 + name=FILE1.TXT + comment='file1comment' +FILE: hdrlen=66 datlen=32 hdr_unknown=25 + flags=0x800c:PASSWORD,COMMENT,LONG,D64 + os=0:DOS ver=20 mode=0x20 meth=3 cmp=32 dec=7 vol=0 + crc=0x785fc3e3 (2019541987) time=2010-11-03 00:27:34 + name=FILE2.TXT + comment='file2comment' diff --git a/libs/rarfile1/test/files/rar3-comment-hpsw.rar b/libs/rarfile1/test/files/rar3-comment-hpsw.rar new file mode 100644 index 0000000000000000000000000000000000000000..37210ad622d6e4530340a232143344bf722733a9 GIT binary patch literal 484 zcmWGaEK-zWXE-;rxPgI}0Rm=Rwc0uRKugs%#(g0w1&7_l>gMfZ(1E6{BIQHy72Litdp z6^S1O7wSd!G4rfC*>vVoq~b+h_282*a}_;Le_B$1YT>)6D6z$JcSi15Q65;{k~GoL zck*Mgy8=gizRwipFx+>2*5kE_w-h4%j$3L>K4_uB|Fr3D;0oKvXK}grpZ0URH&XnU zq7=WHJkn)7%yLC7LPAsIq(SbPNZTi;-@TMvsI;ZnMDAy^nm_Ml-?a4oY#W~*-SnoU w)+IkEi)(8P_Lw- zgy8_g{*Lr-EB{|{oLMD+Y?p_9z5qxY)UF1&U1k%)bSn7O^3VQ%bESLwp}oaogzYjy vwX0?Us$I(_++sqrOF-cCo6aZ2jvgD%-&7ZW^zU_Jf4J6{BevBH4(tp7wrE#G literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar3-comment-plain.rar.exp b/libs/rarfile1/test/files/rar3-comment-plain.rar.exp new file mode 100644 index 00000000..0ad21471 --- /dev/null +++ b/libs/rarfile1/test/files/rar3-comment-plain.rar.exp @@ -0,0 +1,16 @@ +Archive: files/rar3-comment-plain.rar + comment='RARcomment\n' +FILE: hdrlen=43 datlen=8 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=3:UNIX ver=29 mode=0100644 meth=3 cmp=8 dec=0 vol=0 + crc=0x00000000 (0) time=2010-11-02 10:03:25 + name=file1.txt + mtime=2010-11-02 10:03:25 + comment='Comment1v2\n' +FILE: hdrlen=43 datlen=8 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=3:UNIX ver=29 mode=0100644 meth=3 cmp=8 dec=0 vol=0 + crc=0x00000000 (0) time=2010-11-02 10:03:25 + name=file2.txt + mtime=2010-11-02 10:03:25 + comment='Comment2v2\n' diff --git a/libs/rarfile1/test/files/rar3-comment-psw.rar b/libs/rarfile1/test/files/rar3-comment-psw.rar new file mode 100644 index 0000000000000000000000000000000000000000..dd1beabf48e5981759fe94cae31ed5261447b02a GIT binary patch literal 332 zcmWGaEK-zWXE;Bhn1O+p0Rk?~tYT+IkEi)(8P_Lw- zWHX;Wqkw$sUWNk?KipF{oLiBT>zkD;-XOEwab}eOvfUo``C#Lqb~nK7Hk%NpQ^Bv6 zfA;^IE8Wu%?G@QW*lr{IcGoOGwR_owTTCD?AlWS-aQaQ>lVV4Y4d-vFi$D7Jy0Jf8 N>&p?_Y6b^(1^{4=VJ-jw literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/rar3-comment-psw.rar.exp b/libs/rarfile1/test/files/rar3-comment-psw.rar.exp new file mode 100644 index 00000000..a817bda9 --- /dev/null +++ b/libs/rarfile1/test/files/rar3-comment-psw.rar.exp @@ -0,0 +1,16 @@ +Archive: files/rar3-comment-psw.rar + comment='RARcomment\n' +FILE: hdrlen=51 datlen=16 hdr_unknown=0 + flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128 + os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0 + crc=0x00000000 (0) time=2010-11-02 10:03:25 + name=file1.txt + mtime=2010-11-02 10:03:25 + comment='Comment1v2\n' +FILE: hdrlen=51 datlen=16 hdr_unknown=0 + flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128 + os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0 + crc=0x00000000 (0) time=2010-11-02 10:03:25 + name=file2.txt + mtime=2010-11-02 10:03:25 + comment='Comment2v2\n' diff --git a/libs/rarfile1/test/files/seektest.rar b/libs/rarfile1/test/files/seektest.rar new file mode 100644 index 0000000000000000000000000000000000000000..b1d72bb722beb61852858674bfd00c7df2106fa0 GIT binary patch literal 2253 zcmY+>zsv7)7{~E1ryQLqiA5A=KArCCx?Z2}58<2)G8qh%;mB~#AK=_di9(X1?hG=p zNXZ~FIGsfjlT%KT=oC>*=U5F24=lXvHN5LNT=jVHvB&SYg{J&tF!y3&+gap{U`TVpImq0!XGbx|Nfts zA6=e!_3-JR&b;>XL-!wh_`Z{O-TULgHSfK6^6G0>S0BCe)S*0o)wd5EoVxMyrFXu5 z?xPQ1x_JEjr{5iY{f%dSUET28!HFwRe{=J%@1FbNg~}q`1^wY zU2)Uif3E0%uh`q$JF>S|);1PfY;7Cc+V03+=}K3+(v_}sr7K7TsF6)~$7G z-CDQSt#xbNTDR71bQ|49x6y5M8{I~?(QR}a-B!2NZFO7SR=3q{bz9w5x7F=*JKavV z)9rLS-A=dD?R2}{k>XK2ibwG%9>t@06p!LjJc>v0C?3V5codJ~Q9O!A@hBd}qj(gL z;!!+`NAV~g#iMu>kK$20ibwG%9>t@06p!LjJc>v0C?3V5codJ~Q9O!A@hBd}qj(gL z;!!+`NAV~g#iMu>kK$20ibwG%9>t@06p!LjJc>v0C?3V5codJ~Q9O!A@hBd}qj(gL z;!!+`NAV~g#iMu>kK$20ibwHi9?he9G>_)dJeo)IXdca@c{Gpa(L9<*^JpH;qj@xs z=FvQwNAqYN&7*lVkLJ-lnn&|!9?he9G>_)dJeo)IXdca@c{Gpa(L9<*^JpH;qj@xs z=FvQwNAqYN&7*lVkLJ-lnn&|!9?he9G>_)dJeo)IXdca@c{Gpa(L9<*^JpH;qj@xs z=FvQwNAqYN&7*lVkLJ-lnn&|!9?he9G>_piJch^c7#_o8cnpu>F+7IH@E9J$V|WaY z;W0dh$M6^)!((_1kKr*qhR5(29>ZgJ43FV4Jch^c7#_o8cnpu>F+7IH@E9J$V|WaY z;W0dh$M6^)!((_1kKr*qhR5(29>ZgJ43FV4Jch^c7#_o8cnpu>F+7IH@E9J$V|WaY z;W0dh$M6^)!((_1kKr*qhR5(29>ZgJ43FV4Jch^c2#@dxkMIbO@Cc9a2#@dxkMIbO z@Cc9a2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a z2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dx zkMIbO@Cc9a2#@dxkMIbO@Cc9a2#@dxkMLM{EIbw-3y+1z!eimF@K|^(JQf}ckI(LY JYIXAP>MysWnDYPt literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/seektest.rar.exp b/libs/rarfile1/test/files/seektest.rar.exp new file mode 100644 index 00000000..cb61124a --- /dev/null +++ b/libs/rarfile1/test/files/seektest.rar.exp @@ -0,0 +1,13 @@ +Archive: files/seektest.rar +FILE: hdrlen=44 datlen=90 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=3:UNIX ver=29 mode=0100644 meth=5 cmp=90 dec=2048 vol=0 + crc=0xc5b7e6a2 (3317163682) time=2011-06-12 12:53:33 + name=stest1.txt + mtime=2011-06-12 12:53:33 +FILE: hdrlen=44 datlen=2048 hdr_unknown=0 + flags=0x9020:EXTTIME,LONG,D128 + os=3:UNIX ver=20 mode=0100644 meth=0 cmp=2048 dec=2048 vol=0 + crc=0xc5b7e6a2 (3317163682) time=2011-06-12 12:53:33 + name=stest2.txt + mtime=2011-06-12 12:53:33 diff --git a/libs/rarfile1/test/files/unicode.rar b/libs/rarfile1/test/files/unicode.rar new file mode 100644 index 0000000000000000000000000000000000000000..7453ac0fb56b441aa72513c14d38f8a098525a66 GIT binary patch literal 163 zcmWGaEK-zWXRy9d%)!9R00A-|OBx!?7zBYlCLm@G{u7wa)b`9y)>MpPNh1Tp#pVk; zfNo_(666 literal 0 HcmV?d00001 diff --git a/libs/rarfile1/test/files/unicode.rar.exp b/libs/rarfile1/test/files/unicode.rar.exp new file mode 100644 index 00000000..5044f7b3 --- /dev/null +++ b/libs/rarfile1/test/files/unicode.rar.exp @@ -0,0 +1,11 @@ +Archive: files/unicode.rar +FILE: hdrlen=54 datlen=17 hdr_unknown=0 + flags=0x8080:LONG,D1024 + os=3:UNIX ver=29 mode=0100644 meth=5 cmp=17 dec=2 vol=0 + crc=0x6751fc53 (1733426259) time=2011-07-06 16:48:04 + name=уииоотивл.txt +FILE: hdrlen=52 datlen=13 hdr_unknown=0 + flags=0x8090:SOLID,LONG,D1024 + os=3:UNIX ver=29 mode=0100644 meth=5 cmp=13 dec=2 vol=0 + crc=0x6751fc53 (1733426259) time=2011-07-06 16:48:04 + name=ð€ððð‚.txt diff --git a/libs/rarfile1/test/test1.sh b/libs/rarfile1/test/test1.sh new file mode 100755 index 00000000..5b0f86aa --- /dev/null +++ b/libs/rarfile1/test/test1.sh @@ -0,0 +1,32 @@ +#! /bin/sh + +PYTHONPATH=..:$PYTHONPATH +export PYTHONPATH + +JAVA_OPTIONS="-Dpython.path=`pwd`/.." +export JAVA_OPTIONS + +plist="python2.7 python3.2 python3.3 python3.4 python3.5 python3.6 pypy jython jython2.7" + +rm -f test.diffs + +for py in $plist; do + if which $py > /dev/null; then + for f in files/*.rar; do + printf "%s -> %-30s .. " $py $f + $py ../dumprar.py -t -t -v -ppassword $f > $f.$py + if diff -uw $f.exp $f.$py > /dev/null; then + echo "ok" + else + echo "FAIL" + echo "#### $py ####" >> test.diffs + diff -uw $f.exp $f.$py >> test.diffs + fi + done + echo "" + else + echo $py not available + echo "" + fi +done + diff --git a/libs/rarfile1/test/test2.sh b/libs/rarfile1/test/test2.sh new file mode 100755 index 00000000..328e3ea0 --- /dev/null +++ b/libs/rarfile1/test/test2.sh @@ -0,0 +1,19 @@ +#! /bin/sh + +cp ../rarfile.py . + +#ulimit -n 16 + +plist="python2.7 python3.2 python3.3 python3.4 python3.5 python3.6 pypy jython jython2.7" + +for py in $plist; do + if which $py > /dev/null; then + echo "== $py ==" + $py ./testseek.py + $py ./testio.py + $py ./testcorrupt.py --quick + fi +done + +rm -f rarfile.py + diff --git a/libs/rarfile1/test/testcorrupt.py b/libs/rarfile1/test/testcorrupt.py new file mode 100755 index 00000000..91fc3d80 --- /dev/null +++ b/libs/rarfile1/test/testcorrupt.py @@ -0,0 +1,85 @@ +#! /usr/bin/env python + +import rarfile +import sys, os, time +import tempfile + +def progress(): + sys.stdout.write('.') + sys.stdout.flush() + +def try_read(tmpfn): + #progress() + try: + rf = rarfile.RarFile(tmpfn) + if rf.needs_password(): + rf.setpassword('password') + except rarfile.Error: + return + for fn in rf.namelist(): + try: + data = rf.read(fn) + pass + except rarfile.Error: + pass + +def test_rar(rarfn): + data = open(rarfn, "rb").read() + + fd, tmpfn = tempfile.mkstemp('.rar') + os.close(fd) + + print('testcorrupt 1') + for n in range(len(data)): + bad = data[:n] + f = open(tmpfn, 'wb') + f.write(bad) + f.close() + + try_read(tmpfn) + + print('testcorrupt 2') + crap = rarfile.RAR_ID + for n in range(1, len(data)): + for i in range(len(crap)): + c = crap[i:i+1] + bad = data[:n - 1] + c + data[n:] + f = open(tmpfn, 'wb') + f.write(bad) + f.close() + try_read(tmpfn) + + os.unlink(tmpfn) + +test_rar_list = [ + "files/ctime0.rar", + "files/ctime1.rar", + "files/ctime2.rar", + "files/ctime3.rar", + "files/ctime4.rar", + "files/seektest.rar", + "files/rar15-comment-lock.rar", + "files/rar15-comment.rar", + "files/rar202-comment-nopsw.rar", + "files/rar202-comment-psw.rar", + "files/rar3-comment-hpsw.rar", + "files/rar3-comment-plain.rar", + "files/rar3-comment-psw.rar", + "files/unicode.rar", +] + +def main(): + if sys.argv[-1] == '--quick': + test_rar("files/rar3-comment-plain.rar") + return + for rar in test_rar_list: + print(rar) + test_rar(rar) + +if __name__ == '__main__': + try: + main() + except OSError: + print('OSError: pid = %d' % os.getpid()) + time.sleep(80000) + diff --git a/libs/rarfile1/test/testio.py b/libs/rarfile1/test/testio.py new file mode 100755 index 00000000..ee008b95 --- /dev/null +++ b/libs/rarfile1/test/testio.py @@ -0,0 +1,35 @@ +#! /usr/bin/env python + +import rarfile, os, os.path, time, sys + +try: + from io import BufferedReader, TextIOWrapper +except ImportError: + print('no io module') + sys.exit(0) + def BufferedReader(x): return x + def TextIOWrapper(x): return x + +def test_readline(rf, fn): + f = rf.open(fn) + tr = TextIOWrapper(BufferedReader(f)) + while 1: + ln = tr.readline() + if not ln: + break + tr.close() + +def main(): + files = ['stest1.txt', 'stest2.txt'] + arc = 'files/seektest.rar' + + rf = rarfile.RarFile(arc, crc_check=0) + for fn in files: + sys.stdout.write('test/readline: %s .. ' % fn) + sys.stdout.flush() + test_readline(rf, fn) + print('ok') + +if __name__ == '__main__': + main() + diff --git a/libs/rarfile1/test/testseek.py b/libs/rarfile1/test/testseek.py new file mode 100755 index 00000000..e6925ebf --- /dev/null +++ b/libs/rarfile1/test/testseek.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python + +import rarfile, os, os.path, time, sys + +def show_fds(): + fdir = "/proc/%d/fd" % os.getpid() + if os.path.isdir(fdir): + os.system('printf "fds = "; ls -l %s | wc -l' % fdir) + +def do_seek(f, pos, lim): + ofs = pos*4 + fsize = lim*4 + + if ofs < 0: + exp = 0 + elif ofs > fsize: + exp = fsize + else: + exp = ofs + + f.seek(ofs) + + got = f.tell() + + if got != exp: + raise Exception('seek failed (got=%d, exp=%d)' % (got, exp)) + ln = f.read(4) + if got == fsize and ln: + raise Exception('unexpected read') + if not ln and got < fsize: + raise Exception('unexpected read failure') + if ln: + spos = int(ln) + if spos*4 != got: + raise Exception('unexpected pos: spos=%d pos=%d' % (spos, pos)) + +def test_seek(rf, fn): + inf = rf.getinfo(fn) + cnt = int(inf.file_size / 4) + f = rf.open(fn) + + do_seek(f, int(cnt/2), cnt) + do_seek(f, 0, cnt) + + for i in range(int(cnt/2)): + do_seek(f, i*2, cnt) + + for i in range(cnt): + do_seek(f, i*2 - int(cnt / 2), cnt) + + for i in range(cnt + 10): + do_seek(f, cnt - i - 5, cnt) + + f.close() + + print('OK') + +def test_arc(arc, desc): + files = ['stest1.txt', 'stest2.txt'] + rf = rarfile.RarFile(arc, crc_check=0) + for fn in files: + sys.stdout.write('%s | test/seek %s .. ' % (desc, fn)) + sys.stdout.flush() + test_seek(rf, fn) + +def main(): + arc = 'files/seektest.rar' + data = open(arc, 'rb').read() + + # filename + test_arc(arc, "fn") + + # filelike: cStringIO + try: + import cStringIO + test_arc(cStringIO.StringIO(data), "cStringIO") + except ImportError: + pass + + # filelike: io.BytesIO, io.open() + try: + import io + test_arc(io.BytesIO(data), "io.BytesIO") + test_arc(io.open(arc, 'rb'), "io.open") + except ImportError: + pass + + # filelike: StringIO + try: + import StringIO + test_arc(StringIO.StringIO(data), "StringIO") + except ImportError: + pass + + # filelike: file() + test_arc(open(arc, 'rb'), "file") + + time.sleep(1) + show_fds() + +if __name__ == '__main__': + main() + diff --git a/libs/rebulk/__init__.py b/libs/rebulk/__init__.py new file mode 100644 index 00000000..93d5e477 --- /dev/null +++ b/libs/rebulk/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Define simple search patterns in bulk to perform advanced matching on any string. +""" +# pylint:disable=import-self +from .rebulk import Rebulk +from .rules import Rule, CustomRule, AppendMatch, RemoveMatch, RenameMatch, AppendTags, RemoveTags +from .processors import ConflictSolver, PrivateRemover, POST_PROCESS, PRE_PROCESS +from .pattern import REGEX_AVAILABLE diff --git a/libs/rebulk/__version__.py b/libs/rebulk/__version__.py new file mode 100644 index 00000000..6b0a83ec --- /dev/null +++ b/libs/rebulk/__version__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Version module +""" +# pragma: no cover +__version__ = '0.7.7.dev0' diff --git a/libs/rebulk/chain.py b/libs/rebulk/chain.py new file mode 100644 index 00000000..7817e8c0 --- /dev/null +++ b/libs/rebulk/chain.py @@ -0,0 +1,440 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Chain patterns and handle repetiting capture group +""" +# pylint: disable=super-init-not-called +import itertools + +from .loose import call, set_defaults +from .match import Match, Matches +from .pattern import Pattern, filter_match_kwargs +from .remodule import re + + +class _InvalidChainException(Exception): + """ + Internal exception raised when a chain is not valid + """ + pass + + +class Chain(Pattern): + """ + Definition of a pattern chain to search for. + """ + + def __init__(self, rebulk, **kwargs): + call(super(Chain, self).__init__, **kwargs) + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + self._defaults = {} + self._regex_defaults = {} + self._string_defaults = {} + self._functional_defaults = {} + self.rebulk = rebulk + self.parts = [] + + def defaults(self, **kwargs): + """ + Define default keyword arguments for all patterns + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._defaults = kwargs + return self + + def regex_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._regex_defaults = kwargs + return self + + def string_defaults(self, **kwargs): + """ + Define default keyword arguments for string patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._string_defaults = kwargs + return self + + def functional_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._functional_defaults = kwargs + return self + + def chain(self): + """ + Add patterns chain, using configuration from this chain + + :return: + :rtype: + """ + # pylint: disable=protected-access + chain = self.rebulk.chain(**self._kwargs) + chain._defaults = dict(self._defaults) + chain._regex_defaults = dict(self._regex_defaults) + chain._functional_defaults = dict(self._functional_defaults) + chain._string_defaults = dict(self._string_defaults) + return chain + + def regex(self, *pattern, **kwargs): + """ + Add re pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._kwargs, kwargs) + set_defaults(self._regex_defaults, kwargs) + set_defaults(self._defaults, kwargs) + pattern = self.rebulk.build_re(*pattern, **kwargs) + part = ChainPart(self, pattern) + self.parts.append(part) + return part + + def functional(self, *pattern, **kwargs): + """ + Add functional pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._kwargs, kwargs) + set_defaults(self._functional_defaults, kwargs) + set_defaults(self._defaults, kwargs) + pattern = self.rebulk.build_functional(*pattern, **kwargs) + part = ChainPart(self, pattern) + self.parts.append(part) + return part + + def string(self, *pattern, **kwargs): + """ + Add string pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._kwargs, kwargs) + set_defaults(self._functional_defaults, kwargs) + set_defaults(self._defaults, kwargs) + pattern = self.rebulk.build_string(*pattern, **kwargs) + part = ChainPart(self, pattern) + self.parts.append(part) + return part + + def close(self): + """ + Close chain builder to continue registering other pattern + + :return: + :rtype: + """ + return self.rebulk + + def _match(self, pattern, input_string, context=None): + chain_matches = [] + chain_input_string = input_string + offset = 0 + while offset < len(input_string): + current_chain_matches = [] + valid_chain = True + is_chain_start = True + for chain_part in self.parts: + try: + chain_part_matches, raw_chain_part_matches = Chain._match_chain_part(is_chain_start, chain_part, + chain_input_string, + context) + if raw_chain_part_matches: + Chain._fix_matches_offset(raw_chain_part_matches, input_string, offset) + offset = raw_chain_part_matches[-1].raw_end + chain_input_string = input_string[offset:] + if not chain_part.is_hidden: + current_chain_matches.extend(chain_part_matches) + except _InvalidChainException: + valid_chain = False + if current_chain_matches: + offset = current_chain_matches[0].raw_end + break + is_chain_start = False + if not current_chain_matches: + break + if valid_chain: + match = self._build_chain_match(current_chain_matches, input_string) + chain_matches.append(match) + + return chain_matches + + def _match_parent(self, match, yield_parent): + """ + Handle a parent match + :param match: + :type match: + :param yield_parent: + :type yield_parent: + :return: + :rtype: + """ + ret = super(Chain, self)._match_parent(match, yield_parent) + original_children = Matches(match.children) + original_end = match.end + while not ret and match.children: + last_pattern = match.children[-1].pattern + last_pattern_children = [child for child in match.children if child.pattern == last_pattern] + last_pattern_groups_iter = itertools.groupby(last_pattern_children, lambda child: child.match_index) + last_pattern_groups = {} + for index, matches in last_pattern_groups_iter: + last_pattern_groups[index] = list(matches) + + for index in reversed(list(last_pattern_groups)): + last_matches = list(last_pattern_groups[index]) + for last_match in last_matches: + match.children.remove(last_match) + match.end = match.children[-1].end if match.children else match.start + ret = super(Chain, self)._match_parent(match, yield_parent) + if ret: + return True + match.children = original_children + match.end = original_end + return ret + + def _build_chain_match(self, current_chain_matches, input_string): + start = None + end = None + for match in current_chain_matches: + if start is None or start > match.start: + start = match.start + if end is None or end < match.end: + end = match.end + match = call(Match, start, end, pattern=self, input_string=input_string, **self._match_kwargs) + for chain_match in current_chain_matches: + if chain_match.children: + for child in chain_match.children: + match.children.append(child) + if chain_match not in match.children: + match.children.append(chain_match) + chain_match.parent = match + return match + + @staticmethod + def _fix_matches_offset(chain_part_matches, input_string, offset): + for chain_part_match in chain_part_matches: + if chain_part_match.input_string != input_string: + chain_part_match.input_string = input_string + chain_part_match.end += offset + chain_part_match.start += offset + if chain_part_match.children: + Chain._fix_matches_offset(chain_part_match.children, input_string, offset) + + @staticmethod + def _match_chain_part(is_chain_start, chain_part, chain_input_string, context): + chain_part_matches, raw_chain_part_matches = chain_part.pattern.matches(chain_input_string, context, + with_raw_matches=True) + chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part, + chain_input_string) + raw_chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, raw_chain_part_matches, chain_part, + chain_input_string) + + Chain._validate_chain_part_matches(raw_chain_part_matches, chain_part) + return chain_part_matches, raw_chain_part_matches + + @staticmethod + def _truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part, chain_input_string): + if not chain_part_matches: + return chain_part_matches + + if not is_chain_start: + separator = chain_input_string[0:chain_part_matches[0].initiator.raw_start] + if len(separator) > 0: + return [] + + j = 1 + for i in range(0, len(chain_part_matches) - 1): + separator = chain_input_string[chain_part_matches[i].initiator.raw_end: + chain_part_matches[i + 1].initiator.raw_start] + if len(separator) > 0: + break + j += 1 + truncated = chain_part_matches[:j] + if chain_part.repeater_end is not None: + truncated = [m for m in truncated if m.match_index < chain_part.repeater_end] + return truncated + + @staticmethod + def _validate_chain_part_matches(chain_part_matches, chain_part): + max_match_index = -1 + if chain_part_matches: + max_match_index = max([m.match_index for m in chain_part_matches]) + if max_match_index + 1 < chain_part.repeater_start: + raise _InvalidChainException + + @property + def match_options(self): + return {} + + @property + def patterns(self): + return [self] + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s:%s>" % (self.__class__.__name__, defined, self.parts) + + +class ChainPart(object): + """ + Part of a pattern chain. + """ + + def __init__(self, chain, pattern): + self._chain = chain + self.pattern = pattern + self.repeater_start = 1 + self.repeater_end = 1 + self._hidden = False + + def chain(self): + """ + Add patterns chain, using configuration from this chain + + :return: + :rtype: + """ + return self._chain.chain() + + def hidden(self, hidden=True): + """ + Hide chain part results from global chain result + + :param hidden: + :type hidden: + :return: + :rtype: + """ + self._hidden = hidden + return self + + @property + def is_hidden(self): + """ + Check if the chain part is hidden + :return: + :rtype: + """ + return self._hidden + + def regex(self, *pattern, **kwargs): + """ + Add re pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.regex(*pattern, **kwargs) + + def functional(self, *pattern, **kwargs): + """ + Add functional pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.functional(*pattern, **kwargs) + + def string(self, *pattern, **kwargs): + """ + Add string pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.string(*pattern, **kwargs) + + def close(self): + """ + Close the chain builder to continue registering other patterns + + :return: + :rtype: + """ + return self._chain.close() + + def repeater(self, value): + """ + Define the repeater of the current chain part. + + :param value: + :type value: + :return: + :rtype: + """ + try: + value = int(value) + self.repeater_start = value + self.repeater_end = value + return self + except ValueError: + pass + if value == '+': + self.repeater_start = 1 + self.repeater_end = None + if value == '*': + self.repeater_start = 0 + self.repeater_end = None + elif value == '?': + self.repeater_start = 0 + self.repeater_end = 1 + else: + match = re.match(r'\{\s*(\d*)\s*,?\s*(\d*)\s*\}', value) + if match: + start = match.group(1) + end = match.group(2) + if start or end: + self.repeater_start = int(start) if start else 0 + self.repeater_end = int(end) if end else None + return self + + def __repr__(self): + return "%s({%s,%s})" % (self.pattern, self.repeater_start, self.repeater_end) diff --git a/libs/rebulk/debug.py b/libs/rebulk/debug.py new file mode 100644 index 00000000..2384b26e --- /dev/null +++ b/libs/rebulk/debug.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Debug tools. + +Can be configured by changing values of those variable. + +DEBUG = False +Enable this variable to activate debug features (like defined_at parameters). It can slow down Rebulk + +LOG_LEVEL = 0 +Default log level of generated rebulk logs. +""" + +import inspect +import logging +import os +from collections import namedtuple + + +DEBUG = False +LOG_LEVEL = logging.DEBUG + + +class Frame(namedtuple('Frame', ['lineno', 'package', 'name', 'filename'])): + """ + Stack frame representation. + """ + __slots__ = () + + def __repr__(self): + return "%s#L%s" % (os.path.basename(self.filename), self.lineno) + + +def defined_at(): + """ + Get definition location of a pattern or a match (outside of rebulk package). + :return: + :rtype: + """ + if DEBUG: + frame = inspect.currentframe() + while frame: + try: + if frame.f_globals['__package__'] != __package__: + break + except KeyError: # pragma:no cover + # If package is missing, consider we are in. Workaround for python 3.3. + break + frame = frame.f_back + ret = Frame(frame.f_lineno, + frame.f_globals.get('__package__'), + frame.f_globals.get('__name__'), + frame.f_code.co_filename) + del frame + return ret diff --git a/libs/rebulk/formatters.py b/libs/rebulk/formatters.py new file mode 100644 index 00000000..47046942 --- /dev/null +++ b/libs/rebulk/formatters.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Formatter functions to use in patterns. + +All those function have last argument as match.value (str). +""" + + +def formatters(*chained_formatters): + """ + Chain formatter functions. + :param chained_formatters: + :type chained_formatters: + :return: + :rtype: + """ + def formatters_chain(input_string): # pylint:disable=missing-docstring + for chained_formatter in chained_formatters: + input_string = chained_formatter(input_string) + return input_string + + return formatters_chain diff --git a/libs/rebulk/introspector.py b/libs/rebulk/introspector.py new file mode 100644 index 00000000..64b9836f --- /dev/null +++ b/libs/rebulk/introspector.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Introspect rebulk object to retrieve capabilities. +""" +from abc import ABCMeta, abstractproperty +from collections import defaultdict + +import six +from .pattern import StringPattern, RePattern, FunctionalPattern +from .utils import extend_safe + + +@six.add_metaclass(ABCMeta) +class Description(object): + """ + Abstract class for a description. + """ + @abstractproperty + def properties(self): # pragma: no cover + """ + Properties of described object. + :return: all properties that described object can generate grouped by name. + :rtype: dict + """ + pass + + +class PatternDescription(Description): + """ + Description of a pattern. + """ + def __init__(self, pattern): # pylint:disable=too-many-branches + self.pattern = pattern + self._properties = defaultdict(list) + + if pattern.properties: + for key, values in pattern.properties.items(): + extend_safe(self._properties[key], values) + elif 'value' in pattern.match_options: + self._properties[pattern.name].append(pattern.match_options['value']) + elif isinstance(pattern, StringPattern): + extend_safe(self._properties[pattern.name], pattern.patterns) + elif isinstance(pattern, RePattern): + if pattern.name and pattern.name not in pattern.private_names: + extend_safe(self._properties[pattern.name], [None]) + if not pattern.private_children: + for regex_pattern in pattern.patterns: + for group_name, values in regex_pattern.groupindex.items(): + if group_name not in pattern.private_names: + extend_safe(self._properties[group_name], [None]) + elif isinstance(pattern, FunctionalPattern): + if pattern.name and pattern.name not in pattern.private_names: + extend_safe(self._properties[pattern.name], [None]) + + + @property + def properties(self): + """ + Properties for this rule. + :return: + :rtype: dict + """ + return self._properties + + +class RuleDescription(Description): + """ + Description of a rule. + """ + def __init__(self, rule): + self.rule = rule + + self._properties = defaultdict(list) + + if rule.properties: + for key, values in rule.properties.items(): + extend_safe(self._properties[key], values) + + @property + def properties(self): + """ + Properties for this rule. + :return: + :rtype: dict + """ + return self._properties + + +class Introspection(Description): + """ + Introspection results. + """ + def __init__(self, rebulk, context=None): + self.patterns = [PatternDescription(pattern) for pattern in rebulk.effective_patterns(context) + if not pattern.private and not pattern.marker] + self.rules = [RuleDescription(rule) for rule in rebulk.effective_rules(context)] + + @property + def properties(self): + """ + Properties for Introspection results. + :return: + :rtype: + """ + properties = defaultdict(list) + for pattern in self.patterns: + for key, values in pattern.properties.items(): + extend_safe(properties[key], values) + for rule in self.rules: + for key, values in rule.properties.items(): + extend_safe(properties[key], values) + return properties + + +def introspect(rebulk, context=None): + """ + Introspect a Rebulk instance to grab defined objects and properties that can be generated. + :param rebulk: + :type rebulk: Rebulk + :param context: + :type context: + :return: Introspection instance + :rtype: Introspection + """ + return Introspection(rebulk, context) diff --git a/libs/rebulk/loose.py b/libs/rebulk/loose.py new file mode 100644 index 00000000..72543b1e --- /dev/null +++ b/libs/rebulk/loose.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Various utilities functions +""" +import inspect +import sys +from .utils import is_iterable + +if sys.version_info < (3, 4, 0): # pragma: no cover + def _constructor(class_): + """ + Retrieves constructor from given class + + :param class_: + :type class_: class + :return: constructor from given class + :rtype: callable + """ + return class_.__init__ +else: # pragma: no cover + def _constructor(class_): + """ + Retrieves constructor from given class + + :param class_: + :type class_: class + :return: constructor from given class + :rtype: callable + """ + return class_ + + +def call(function, *args, **kwargs): + """ + Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match + function or constructor signature + + :param function: Function or constructor to call + :type function: callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: sale vakye as default function call + :rtype: object + """ + func = constructor_args if inspect.isclass(function) else function_args + call_args, call_kwargs = func(function, *args, **kwargs) + return function(*call_args, **call_kwargs) + + +def function_args(callable_, *args, **kwargs): + """ + Return (args, kwargs) matching the function signature + + :param callable: callable to inspect + :type callable: callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + argspec = inspect.getargspec(callable_) # pylint:disable=deprecated-method + return argspec_args(argspec, False, *args, **kwargs) + + +def constructor_args(class_, *args, **kwargs): + """ + Return (args, kwargs) matching the function signature + + :param callable: callable to inspect + :type callable: Callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + argspec = inspect.getargspec(_constructor(class_)) # pylint:disable=deprecated-method + return argspec_args(argspec, True, *args, **kwargs) + + +def argspec_args(argspec, constructor, *args, **kwargs): + """ + Return (args, kwargs) matching the argspec object + + :param argspec: argspec to use + :type argspec: argspec + :param constructor: is it a constructor ? + :type constructor: bool + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + if argspec.keywords: + call_kwarg = kwargs + else: + call_kwarg = dict((k, kwargs[k]) for k in kwargs if k in argspec.args) # Python 2.6 dict comprehension + if argspec.varargs: + call_args = args + else: + call_args = args[:len(argspec.args) - (1 if constructor else 0)] + return call_args, call_kwarg + + +def ensure_list(param): + """ + Retrieves a list from given parameter. + + :param param: + :type param: + :return: + :rtype: + """ + if not param: + param = [] + elif not is_iterable(param): + param = [param] + return param + + +def ensure_dict(param, default_value, default_key=None): + """ + Retrieves a dict and a default value from given parameter. + + if parameter is not a dict, it will be promoted as the default value. + + :param param: + :type param: + :param default_value: + :type default_value: + :param default_key: + :type default_key: + :return: + :rtype: + """ + if not param: + param = default_value + if not isinstance(param, dict): + if param: + default_value = param + return {default_key: param}, default_value + return param, default_value + + +def filter_index(collection, predicate=None, index=None): + """ + Filter collection with predicate function and index. + + If index is not found, returns None. + :param collection: + :type collection: collection supporting iteration and slicing + :param predicate: function to filter the collection with + :type predicate: function + :param index: position of a single element to retrieve + :type index: int + :return: filtered list, or single element of filtered list if index is defined + :rtype: list or object + """ + if index is None and isinstance(predicate, int): + index = predicate + predicate = None + if predicate: + collection = collection.__class__(filter(predicate, collection)) + if index is not None: + try: + collection = collection[index] + except IndexError: + collection = None + return collection + + +def set_defaults(defaults, kwargs): + """ + Set defaults from defaults dict to kwargs dict + :param defaults: + :type defaults: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + for key, value in defaults.items(): + if key not in kwargs and value is not None: + kwargs[key] = value + elif isinstance(value, list) and isinstance(kwargs[key], list): + kwargs[key] = list(value) + kwargs[key] + elif isinstance(value, dict) and isinstance(kwargs[key], dict): + set_defaults(value, kwargs[key]) + elif key in kwargs and value is None: + kwargs[key] = None diff --git a/libs/rebulk/match.py b/libs/rebulk/match.py new file mode 100644 index 00000000..909c9fd6 --- /dev/null +++ b/libs/rebulk/match.py @@ -0,0 +1,784 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Classes and functions related to matches +""" +from collections import defaultdict, MutableSequence +import copy +try: + from collections import OrderedDict # pylint:disable=ungrouped-imports +except ImportError: # pragma: no cover + from ordereddict import OrderedDict # pylint:disable=import-error +import six + +from .loose import ensure_list, filter_index +from .utils import is_iterable +from .debug import defined_at + + +class MatchesDict(OrderedDict): + """ + A custom dict with matches property. + """ + def __init__(self): + super(MatchesDict, self).__init__() + self.matches = defaultdict(list) + self.values_list = defaultdict(list) + + +class _BaseMatches(MutableSequence): + """ + A custom list[Match] that automatically maintains name, tag, start and end lookup structures. + """ + _base = list + _base_add = _base.append + _base_remove = _base.remove + + def __init__(self, matches=None, input_string=None): + self.input_string = input_string + self._max_end = 0 + self._delegate = [] + self._name_dict = defaultdict(_BaseMatches._base) + self._tag_dict = defaultdict(_BaseMatches._base) + self._start_dict = defaultdict(_BaseMatches._base) + self._end_dict = defaultdict(_BaseMatches._base) + self._index_dict = defaultdict(_BaseMatches._base) + if matches: + self.extend(matches) + + def _add_match(self, match): + """ + Add a match + :param match: + :type match: Match + """ + if match.name: + _BaseMatches._base_add(self._name_dict[match.name], (match)) + for tag in match.tags: + _BaseMatches._base_add(self._tag_dict[tag], match) + _BaseMatches._base_add(self._start_dict[match.start], match) + _BaseMatches._base_add(self._end_dict[match.end], match) + for index in range(*match.span): + _BaseMatches._base_add(self._index_dict[index], match) + if match.end > self._max_end: + self._max_end = match.end + + def _remove_match(self, match): + """ + Remove a match + :param match: + :type match: Match + """ + if match.name: + _BaseMatches._base_remove(self._name_dict[match.name], match) + for tag in match.tags: + _BaseMatches._base_remove(self._tag_dict[tag], match) + _BaseMatches._base_remove(self._start_dict[match.start], match) + _BaseMatches._base_remove(self._end_dict[match.end], match) + for index in range(*match.span): + _BaseMatches._base_remove(self._index_dict[index], match) + if match.end >= self._max_end and not self._end_dict[match.end]: + self._max_end = max(self._end_dict.keys()) + + def previous(self, match, predicate=None, index=None): + """ + Retrieves the nearest previous matches. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: int + :return: + :rtype: + """ + current = match.start + while current > -1: + previous_matches = self.ending(current) + if previous_matches: + return filter_index(previous_matches, predicate, index) + current -= 1 + return filter_index(_BaseMatches._base(), predicate, index) + + def next(self, match, predicate=None, index=None): + """ + Retrieves the nearest next matches. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: int + :return: + :rtype: + """ + current = match.start + 1 + while current <= self._max_end: + next_matches = self.starting(current) + if next_matches: + return filter_index(next_matches, predicate, index) + current += 1 + return filter_index(_BaseMatches._base(), predicate, index) + + def named(self, name, predicate=None, index=None): + """ + Retrieves a set of Match objects that have the given name. + :param name: + :type name: str + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._name_dict[name]), predicate, index) + + def tagged(self, tag, predicate=None, index=None): + """ + Retrieves a set of Match objects that have the given tag defined. + :param tag: + :type tag: str + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._tag_dict[tag]), predicate, index) + + def starting(self, start, predicate=None, index=None): + """ + Retrieves a set of Match objects that starts at given index. + :param start: the starting index + :type start: int + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._start_dict[start]), predicate, index) + + def ending(self, end, predicate=None, index=None): + """ + Retrieves a set of Match objects that ends at given index. + :param end: the ending index + :type end: int + :param predicate: + :type predicate: + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._end_dict[end]), predicate, index) + + def range(self, start=0, end=None, predicate=None, index=None): + """ + Retrieves a set of Match objects that are available in given range, sorted from start to end. + :param start: the starting index + :type start: int + :param end: the ending index + :type end: int + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + ret = _BaseMatches._base() + for match in sorted(self): + if match.start < end and match.end > start: + ret.append(match) + return filter_index(ret, predicate, index) + + def chain_before(self, position, seps, start=0, predicate=None, index=None): + """ + Retrieves a list of chained matches, before position, matching predicate and separated by characters from seps + only. + :param position: + :type position: + :param seps: + :type seps: + :param start: + :type start: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + if hasattr(position, 'start'): + position = position.start + + chain = _BaseMatches._base() + position = min(self.max_end, position) + + for i in reversed(range(start, position)): + index_matches = self.at_index(i) + filtered_matches = [index_match for index_match in index_matches if not predicate or predicate(index_match)] + if filtered_matches: + for chain_match in filtered_matches: + if chain_match not in chain: + chain.append(chain_match) + elif self.input_string[i] not in seps: + break + + return filter_index(chain, predicate, index) + + def chain_after(self, position, seps, end=None, predicate=None, index=None): + """ + Retrieves a list of chained matches, after position, matching predicate and separated by characters from seps + only. + :param position: + :type position: + :param seps: + :type seps: + :param end: + :type end: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + if hasattr(position, 'end'): + position = position.end + chain = _BaseMatches._base() + + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + + for i in range(position, end): + index_matches = self.at_index(i) + filtered_matches = [index_match for index_match in index_matches if not predicate or predicate(index_match)] + if filtered_matches: + for chain_match in filtered_matches: + if chain_match not in chain: + chain.append(chain_match) + elif self.input_string[i] not in seps: + break + + return filter_index(chain, predicate, index) + + @property + def max_end(self): + """ + Retrieves the maximum index. + :return: + """ + return max(len(self.input_string), self._max_end) if self.input_string else self._max_end + + def _hole_start(self, position, ignore=None): + """ + Retrieves the start of hole index from position. + :param position: + :type position: + :param ignore: + :type ignore: + :return: + :rtype: + """ + for lindex in reversed(range(0, position)): + for starting in self.starting(lindex): + if not ignore or not ignore(starting): + return lindex + return 0 + + def _hole_end(self, position, ignore=None): + """ + Retrieves the end of hole index from position. + :param position: + :type position: + :param ignore: + :type ignore: + :return: + :rtype: + """ + for rindex in range(position, self.max_end): + for starting in self.starting(rindex): + if not ignore or not ignore(starting): + return rindex + return self.max_end + + def holes(self, start=0, end=None, formatter=None, ignore=None, seps=None, predicate=None, index=None): # pylint: disable=too-many-branches,too-many-locals + """ + Retrieves a set of Match objects that are not defined in given range. + :param start: + :type start: + :param end: + :type end: + :param formatter: + :type formatter: + :param ignore: + :type ignore: + :param seps: + :type seps: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + assert self.input_string if seps else True, "input_string must be defined when using seps parameter" + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + ret = _BaseMatches._base() + hole = False + rindex = start + + loop_start = self._hole_start(start, ignore) + + for rindex in range(loop_start, end): + current = [] + for at_index in self.at_index(rindex): + if not ignore or not ignore(at_index): + current.append(at_index) + + if seps and hole and self.input_string and self.input_string[rindex] in seps: + hole = False + ret[-1].end = rindex + else: + if not current and not hole: + # Open a new hole match + hole = True + ret.append(Match(max(rindex, start), None, input_string=self.input_string, formatter=formatter)) + elif current and hole: + # Close current hole match + hole = False + ret[-1].end = rindex + + if ret and hole: + # go the the next starting element ... + ret[-1].end = min(self._hole_end(rindex, ignore), end) + return filter_index(ret, predicate, index) + + def conflicting(self, match, predicate=None, index=None): + """ + Retrieves a list of ``Match`` objects that conflicts with given match. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + ret = _BaseMatches._base() + + for i in range(*match.span): + for at_match in self.at_index(i): + if at_match not in ret: + ret.append(at_match) + + ret.remove(match) + + return filter_index(ret, predicate, index) + + def at_match(self, match, predicate=None, index=None): + """ + Retrieves a list of matches from given match. + """ + return self.at_span(match.span, predicate, index) + + def at_span(self, span, predicate=None, index=None): + """ + Retrieves a list of matches from given (start, end) tuple. + """ + starting = self._index_dict[span[0]] + ending = self._index_dict[span[1] - 1] + + merged = list(starting) + for marker in ending: + if marker not in merged: + merged.append(marker) + + return filter_index(merged, predicate, index) + + def at_index(self, pos, predicate=None, index=None): + """ + Retrieves a list of matches from given position + """ + return filter_index(self._index_dict[pos], predicate, index) + + @property + def names(self): + """ + Retrieve all names. + :return: + """ + return self._name_dict.keys() + + @property + def tags(self): + """ + Retrieve all tags. + :return: + """ + return self._tag_dict.keys() + + def to_dict(self, details=False, implicit=False): + """ + Converts matches to a dict object. + :param details if True, values will be complete Match object, else it will be only string Match.value property + :type details: bool + :param implicit if True, multiple values will be set as a list in the dict. Else, only the first value + will be kept. + :type implicit: bool + :return: + :rtype: dict + """ + ret = MatchesDict() + for match in sorted(self): + value = match if details else match.value + ret.matches[match.name].append(match) + if value not in ret.values_list[match.name]: + ret.values_list[match.name].append(value) + if match.name in ret.keys(): + if implicit: + if not isinstance(ret[match.name], list): + if ret[match.name] == value: + continue + ret[match.name] = [ret[match.name]] + else: + if value in ret[match.name]: + continue + ret[match.name].append(value) + else: + ret[match.name] = value + return ret + + if six.PY2: # pragma: no cover + def clear(self): + """ + Python 3 backport + """ + del self[:] + + def __len__(self): + return len(self._delegate) + + def __getitem__(self, index): + ret = self._delegate[index] + if isinstance(ret, list): + return Matches(ret) + return ret + + def __setitem__(self, index, match): + self._delegate[index] = match + if isinstance(index, slice): + for match_item in match: + self._add_match(match_item) + return + self._add_match(match) + + def __delitem__(self, index): + match = self._delegate[index] + del self._delegate[index] + if isinstance(match, list): + # if index is a slice, we has a match list + for match_item in match: + self._remove_match(match_item) + else: + self._remove_match(match) + + def __repr__(self): + return self._delegate.__repr__() + + def insert(self, index, match): + self._delegate.insert(index, match) + self._add_match(match) + + +class Matches(_BaseMatches): + """ + A custom list[Match] contains matches list. + """ + def __init__(self, matches=None, input_string=None): + self.markers = Markers(input_string=input_string) + super(Matches, self).__init__(matches=matches, input_string=input_string) + + def _add_match(self, match): + assert not match.marker, "A marker match should not be added to object" + super(Matches, self)._add_match(match) + + +class Markers(_BaseMatches): + """ + A custom list[Match] containing markers list. + """ + def __init__(self, matches=None, input_string=None): + super(Markers, self).__init__(matches=None, input_string=input_string) + + def _add_match(self, match): + assert match.marker, "A non-marker match should not be added to object" + super(Markers, self)._add_match(match) + + +class Match(object): + """ + Object storing values related to a single match + """ + def __init__(self, start, end, value=None, name=None, tags=None, marker=None, parent=None, private=None, + pattern=None, input_string=None, formatter=None, conflict_solver=None): + self.start = start + self.end = end + self.name = name + self._value = value + self.tags = ensure_list(tags) + self.marker = marker + self.parent = parent + self.input_string = input_string + self.formatter = formatter + self.pattern = pattern + self.private = private + self.conflict_solver = conflict_solver + self.children = Matches([], input_string) + self._raw_start = None + self._raw_end = None + self.defined_at = pattern.defined_at if pattern else defined_at() + + @property + def span(self): + """ + 2-tuple with start and end indices of the match + """ + return self.start, self.end + + @property + def value(self): + """ + Get the value of the match, using formatter if defined. + :return: + :rtype: + """ + if self._value: + return self._value + if self.formatter: + return self.formatter(self.raw) + return self.raw + + @value.setter + def value(self, value): + """ + Set the value (hardcode) + :param value: + :type value: + :return: + :rtype: + """ + self._value = value # pylint: disable=attribute-defined-outside-init + + @property + def names(self): + """ + Get all names of children + :return: + :rtype: + """ + if not self.children: + return set([self.name]) + else: + ret = set() + for child in self.children: + for name in child.names: + ret.add(name) + return ret + + @property + def raw_start(self): + """ + start index of raw value + :return: + :rtype: + """ + if self._raw_start is None: + return self.start + return self._raw_start + + @raw_start.setter + def raw_start(self, value): + """ + Set start index of raw value + :return: + :rtype: + """ + self._raw_start = value + + @property + def raw_end(self): + """ + end index of raw value + :return: + :rtype: + """ + if self._raw_end is None: + return self.end + return self._raw_end + + @raw_end.setter + def raw_end(self, value): + """ + Set end index of raw value + :return: + :rtype: + """ + self._raw_end = value + + @property + def raw(self): + """ + Get the raw value of the match, without using hardcoded value nor formatter. + :return: + :rtype: + """ + if self.input_string: + return self.input_string[self.raw_start:self.raw_end] + return None + + @property + def initiator(self): + """ + Retrieve the initiator parent of a match + :param match: + :type match: + :return: + :rtype: + """ + match = self + while match.parent: + match = match.parent + return match + + def crop(self, crops, predicate=None, index=None): + """ + crop the match with given Match objects or spans tuples + :param crops: + :type crops: list or object + :return: a list of Match objects + :rtype: list[Match] + """ + if not is_iterable(crops) or len(crops) == 2 and isinstance(crops[0], int): + crops = [crops] + initial = copy.deepcopy(self) + ret = [initial] + for crop in crops: + if hasattr(crop, 'span'): + start, end = crop.span + else: + start, end = crop + for current in list(ret): + if start <= current.start and end >= current.end: + # self is included in crop, remove current ... + ret.remove(current) + elif start >= current.start and end <= current.end: + # crop is included in self, split current ... + right = copy.deepcopy(current) + current.end = start + if len(current) <= 0: + ret.remove(current) + right.start = end + if len(right) > 0: + ret.append(right) + elif end <= current.end and end > current.start: + current.start = end + elif start >= current.start and start < current.end: + current.end = start + return filter_index(ret, predicate, index) + + def split(self, seps, predicate=None, index=None): + """ + Split this match in multiple matches using given separators. + :param seps: + :type seps: string containing separator characters + :return: list of new Match objects + :rtype: list + """ + split_match = copy.deepcopy(self) + current_match = split_match + ret = [] + + for i in range(0, len(self.raw)): + if self.raw[i] in seps: + if not split_match: + split_match = copy.deepcopy(current_match) + current_match.end = self.start + i + + else: + if split_match: + split_match.start = self.start + i + current_match = split_match + ret.append(split_match) + split_match = None + + return filter_index(ret, predicate, index) + + def __len__(self): + return self.end - self.start + + def __hash__(self): + return hash(Match) + hash(self.start) + hash(self.end) + hash(self.value) + + def __eq__(self, other): + if isinstance(other, Match): + return self.span == other.span and self.value == other.value and self.name == other.name and \ + self.parent == other.parent + return NotImplemented + + def __ne__(self, other): + if isinstance(other, Match): + return self.span != other.span or self.value != other.value or self.name != other.name or \ + self.parent != other.parent + return NotImplemented + + def __lt__(self, other): + if isinstance(other, Match): + return self.span < other.span + return NotImplemented + + def __gt__(self, other): + if isinstance(other, Match): + return self.span > other.span + return NotImplemented + + def __le__(self, other): + if isinstance(other, Match): + return self.span <= other.span + return NotImplemented + + def __ge__(self, other): + if isinstance(other, Match): + return self.span >= other.span + return NotImplemented + + def __repr__(self): + flags = "" + name = "" + tags = "" + defined = "" + initiator = "" + if self.initiator.value != self.value: + initiator = "+initiator=" + self.initiator.value + if self.private: + flags += '+private' + if self.name: + name = "+name=%s" % (self.name,) + if self.tags: + tags = "+tags=%s" % (self.tags,) + if self.defined_at: + defined += "@%s" % (self.defined_at,) + return "<%s:%s%s%s%s%s%s>" % (self.value, self.span, flags, name, tags, initiator, defined) diff --git a/libs/rebulk/pattern.py b/libs/rebulk/pattern.py new file mode 100644 index 00000000..767767b4 --- /dev/null +++ b/libs/rebulk/pattern.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Abstract pattern class definition along with various implementations (regexp, string, functional) +""" +# pylint: disable=super-init-not-called,wrong-import-position + +from abc import ABCMeta, abstractmethod, abstractproperty + +import six + +from . import debug +from .loose import call, ensure_list, ensure_dict +from .match import Match +from .remodule import re, REGEX_AVAILABLE +from .utils import find_all, is_iterable, get_first_defined + + +@six.add_metaclass(ABCMeta) +class Pattern(object): + """ + Definition of a particular pattern to search for. + """ + + def __init__(self, name=None, tags=None, formatter=None, value=None, validator=None, children=False, every=False, + private_parent=False, private_children=False, private=False, private_names=None, ignore_names=None, + marker=False, format_all=False, validate_all=False, disabled=lambda context: False, log_level=None, + properties=None): + """ + :param name: Name of this pattern + :type name: str + :param tags: List of tags related to this pattern + :type tags: list[str] + :param formatter: dict (name, func) of formatter to use with this pattern. name is the match name to support, + and func a function(input_string) that returns the formatted string. A single formatter function can also be + passed as a shortcut for {None: formatter}. The returned formatted string with be set in Match.value property. + :type formatter: dict[str, func] || func + :param value: dict (name, value) of value to use with this pattern. name is the match name to support, + and value an object for the match value. A single object value can also be + passed as a shortcut for {None: value}. The value with be set in Match.value property. + :type value: dict[str, object] || object + :param validator: dict (name, func) of validator to use with this pattern. name is the match name to support, + and func a function(match) that returns the a boolean. A single validator function can also be + passed as a shortcut for {None: validator}. If return value is False, match will be ignored. + :param children: generates children instead of parent + :type children: bool + :param every: generates both parent and children. + :type every: bool + :param private: flag this pattern as beeing private. + :type private: bool + :param private_parent: force return of parent and flag parent matches as private. + :type private_parent: bool + :param private_children: force return of children and flag children matches as private. + :type private_children: bool + :param private_names: force return of named matches as private. + :type private_names: bool + :param ignore_names: drop some named matches after validation. + :type ignore_names: bool + :param marker: flag this pattern as beeing a marker. + :type private: bool + :param format_all if True, pattern will format every match in the hierarchy (even match not yield). + :type format_all: bool + :param validate_all if True, pattern will validate every match in the hierarchy (even match not yield). + :type validate_all: bool + :param disabled: if True, this pattern is disabled. Can also be a function(context). + :type disabled: bool|function + :param log_lvl: Log level associated to this pattern + :type log_lvl: int + """ + # pylint:disable=too-many-locals + self.name = name + self.tags = ensure_list(tags) + self.formatters, self._default_formatter = ensure_dict(formatter, lambda x: x) + self.values, self._default_value = ensure_dict(value, None) + self.validators, self._default_validator = ensure_dict(validator, lambda match: True) + self.every = every + self.children = children + self.private = private + self.private_names = private_names if private_names else [] + self.ignore_names = ignore_names if ignore_names else [] + self.private_parent = private_parent + self.private_children = private_children + self.marker = marker + self.format_all = format_all + self.validate_all = validate_all + if not callable(disabled): + self.disabled = lambda context: disabled + else: + self.disabled = disabled + self._log_level = log_level + self._properties = properties + self.defined_at = debug.defined_at() + + @property + def log_level(self): + """ + Log level for this pattern. + :return: + :rtype: + """ + return self._log_level if self._log_level is not None else debug.LOG_LEVEL + + def _yield_children(self, match): + """ + Does this match has children + :param match: + :type match: + :return: + :rtype: + """ + return match.children and (self.children or self.every) + + def _yield_parent(self): + """ + Does this mat + :param match: + :type match: + :return: + :rtype: + """ + return not self.children or self.every + + def _match_parent(self, match, yield_parent): + """ + Handle a parent match + :param match: + :type match: + :param yield_parent: + :type yield_parent: + :return: + :rtype: + """ + if len(match) < 0 or match.value == "": + return False + + pattern_value = get_first_defined(self.values, [match.name, '__parent__', None], + self._default_value) + if pattern_value: + match.value = pattern_value + + if yield_parent or self.format_all: + match.formatter = get_first_defined(self.formatters, [match.name, '__parent__', None], + self._default_formatter) + if yield_parent or self.validate_all: + validator = get_first_defined(self.validators, [match.name, '__parent__', None], + self._default_validator) + if validator and not validator(match): + return False + return True + + def _match_child(self, child, yield_children): + """ + Handle a children match + :param child: + :type child: + :param yield_children: + :type yield_children: + :return: + :rtype: + """ + if len(child) < 0 or child.value == "": + return False + + pattern_value = get_first_defined(self.values, [child.name, '__children__', None], + self._default_value) + if pattern_value: + child.value = pattern_value + + if yield_children or self.format_all: + child.formatter = get_first_defined(self.formatters, [child.name, '__children__', None], + self._default_formatter) + + if yield_children or self.validate_all: + validator = get_first_defined(self.validators, [child.name, '__children__', None], + self._default_validator) + if validator and not validator(child): + return False + return True + + def matches(self, input_string, context=None, with_raw_matches=False): + """ + Computes all matches for a given input + + :param input_string: the string to parse + :type input_string: str + :param context: the context + :type context: dict + :param with_raw_matches: should return details + :type with_raw_matches: dict + :return: matches based on input_string for this pattern + :rtype: iterator[Match] + """ + # pylint: disable=too-many-branches + + matches = [] + raw_matches = [] + for pattern in self.patterns: + yield_parent = self._yield_parent() + match_index = -1 + for match in self._match(pattern, input_string, context): + match_index += 1 + match.match_index = match_index + raw_matches.append(match) + yield_children = self._yield_children(match) + if not self._match_parent(match, yield_parent): + continue + validated = True + for child in match.children: + if not self._match_child(child, yield_children): + validated = False + break + if validated: + if self.private_parent: + match.private = True + if self.private_children: + for child in match.children: + child.private = True + if yield_parent or self.private_parent: + matches.append(match) + if yield_children or self.private_children: + for child in match.children: + child.match_index = match_index + matches.append(child) + self._matches_privatize(matches) + self._matches_ignore(matches) + if with_raw_matches: + return matches, raw_matches + return matches + + def _matches_privatize(self, matches): + """ + Mark matches included in private_names with private flag. + :param matches: + :type matches: + :return: + :rtype: + """ + if self.private_names: + for match in matches: + if match.name in self.private_names: + match.private = True + + def _matches_ignore(self, matches): + """ + Ignore matches included in ignore_names. + :param matches: + :type matches: + :return: + :rtype: + """ + if self.ignore_names: + for match in list(matches): + if match.name in self.ignore_names: + matches.remove(match) + + @abstractproperty + def patterns(self): # pragma: no cover + """ + List of base patterns defined + + :return: A list of base patterns + :rtype: list + """ + pass + + @property + def properties(self): + """ + Properties names and values that can ben retrieved by this pattern. + :return: + :rtype: + """ + if self._properties: + return self._properties + return {} + + @abstractproperty + def match_options(self): # pragma: no cover + """ + dict of default options for generated Match objects + + :return: **options to pass to Match constructor + :rtype: dict + """ + pass + + @abstractmethod + def _match(self, pattern, input_string, context=None): # pragma: no cover + """ + Computes all matches for a given pattern and input + + :param pattern: the pattern to use + :param input_string: the string to parse + :type input_string: str + :param context: the context + :type context: dict + :return: matches based on input_string for this pattern + :rtype: iterator[Match] + """ + pass + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s:%s>" % (self.__class__.__name__, defined, self.__repr__patterns__) + + @property + def __repr__patterns__(self): + return self.patterns + + +class StringPattern(Pattern): + """ + Definition of one or many strings to search for. + """ + + def __init__(self, *patterns, **kwargs): + call(super(StringPattern, self).__init__, **kwargs) + self._patterns = patterns + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + + @property + def patterns(self): + return self._patterns + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + for index in call(find_all, input_string, pattern, **self._kwargs): + yield call(Match, index, index + len(pattern), pattern=self, input_string=input_string, + **self._match_kwargs) + + +class RePattern(Pattern): + """ + Definition of one or many regular expression pattern to search for. + """ + + def __init__(self, *patterns, **kwargs): + call(super(RePattern, self).__init__, **kwargs) + self.repeated_captures = REGEX_AVAILABLE + if 'repeated_captures' in kwargs: + self.repeated_captures = kwargs.get('repeated_captures') + if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover + raise NotImplementedError("repeated_capture is available only with regex module.") + self.abbreviations = kwargs.get('abbreviations', []) + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + self._children_match_kwargs = filter_match_kwargs(kwargs, children=True) + self._patterns = [] + for pattern in patterns: + if isinstance(pattern, six.string_types): + if self.abbreviations and pattern: + for key, replacement in self.abbreviations: + pattern = pattern.replace(key, replacement) + pattern = call(re.compile, pattern, **self._kwargs) + elif isinstance(pattern, dict): + if self.abbreviations and 'pattern' in pattern: + for key, replacement in self.abbreviations: + pattern['pattern'] = pattern['pattern'].replace(key, replacement) + pattern = re.compile(**pattern) + elif hasattr(pattern, '__iter__'): + pattern = re.compile(*pattern) + self._patterns.append(pattern) + + @property + def patterns(self): + return self._patterns + + @property + def __repr__patterns__(self): + return [pattern.pattern for pattern in self.patterns] + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + names = dict((v, k) for k, v in pattern.groupindex.items()) + for match_object in pattern.finditer(input_string): + start = match_object.start() + end = match_object.end() + main_match = call(Match, start, end, pattern=self, input_string=input_string, **self._match_kwargs) + + if pattern.groups: + for i in range(1, pattern.groups + 1): + name = names.get(i, main_match.name) + if self.repeated_captures: + for start, end in match_object.spans(i): + child_match = call(Match, start, end, name=name, parent=main_match, pattern=self, + input_string=input_string, **self._children_match_kwargs) + main_match.children.append(child_match) + else: + start, end = match_object.span(i) + if start > -1 and end > -1: + child_match = call(Match, start, end, name=name, parent=main_match, pattern=self, + input_string=input_string, **self._children_match_kwargs) + main_match.children.append(child_match) + + yield main_match + + +class FunctionalPattern(Pattern): + """ + Definition of one or many functional pattern to search for. + """ + + def __init__(self, *patterns, **kwargs): + call(super(FunctionalPattern, self).__init__, **kwargs) + self._patterns = patterns + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + + @property + def patterns(self): + return self._patterns + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + ret = call(pattern, input_string, context, **self._kwargs) + if ret: + if not is_iterable(ret) or isinstance(ret, dict) \ + or (is_iterable(ret) and hasattr(ret, '__getitem__') and isinstance(ret[0], int)): + args_iterable = [ret] + else: + args_iterable = ret + for args in args_iterable: + if isinstance(args, dict): + options = args + options.pop('input_string', None) + options.pop('pattern', None) + if self._match_kwargs: + options = self._match_kwargs.copy() + options.update(args) + yield call(Match, pattern=self, input_string=input_string, **options) + else: + kwargs = self._match_kwargs + if isinstance(args[-1], dict): + kwargs = dict(kwargs) + kwargs.update(args[-1]) + args = args[:-1] + yield call(Match, *args, pattern=self, input_string=input_string, **kwargs) + + +def filter_match_kwargs(kwargs, children=False): + """ + Filters out kwargs for Match construction + + :param kwargs: + :type kwargs: dict + :param children: + :type children: Flag to filter children matches + :return: A filtered dict + :rtype: dict + """ + kwargs = kwargs.copy() + for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'): + if key in kwargs: + del kwargs[key] + if children: + for key in ('name',): + if key in kwargs: + del kwargs[key] + return kwargs diff --git a/libs/rebulk/processors.py b/libs/rebulk/processors.py new file mode 100644 index 00000000..0121c658 --- /dev/null +++ b/libs/rebulk/processors.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Processor functions +""" +from logging import getLogger + +from .utils import IdentitySet + +from .rules import Rule, RemoveMatch + +log = getLogger(__name__).log + +DEFAULT = '__default__' + +POST_PROCESS = -2048 +PRE_PROCESS = 2048 + + +def _default_conflict_solver(match, conflicting_match): + """ + Default conflict solver for matches, shorter matches if they conflicts with longer ones + + :param conflicting_match: + :type conflicting_match: + :param match: + :type match: + :return: + :rtype: + """ + if len(conflicting_match.initiator) < len(match.initiator): + return conflicting_match + elif len(match.initiator) < len(conflicting_match.initiator): + return match + return None + + +class ConflictSolver(Rule): + """ + Remove conflicting matches. + """ + priority = PRE_PROCESS + + consequence = RemoveMatch + + @property + def default_conflict_solver(self): # pylint:disable=no-self-use + """ + Default conflict solver to use. + """ + return _default_conflict_solver + + def when(self, matches, context): + to_remove_matches = IdentitySet() + + public_matches = [match for match in matches if not match.private] + public_matches.sort(key=len) + + for match in public_matches: + conflicting_matches = matches.conflicting(match) + + if conflicting_matches: + # keep the match only if it's the longest + conflicting_matches = [conflicting_match for conflicting_match in conflicting_matches if + not conflicting_match.private] + conflicting_matches.sort(key=len) + + for conflicting_match in conflicting_matches: + conflict_solvers = [(self.default_conflict_solver, False)] + + if match.conflict_solver: + conflict_solvers.append((match.conflict_solver, False)) + if conflicting_match.conflict_solver: + conflict_solvers.append((conflicting_match.conflict_solver, True)) + + for conflict_solver, reverse in reversed(conflict_solvers): + if reverse: + to_remove = conflict_solver(conflicting_match, match) + else: + to_remove = conflict_solver(match, conflicting_match) + if to_remove == DEFAULT: + continue + if to_remove and to_remove not in to_remove_matches: + both_matches = [match, conflicting_match] + both_matches.remove(to_remove) + to_keep = both_matches[0] + + if to_keep not in to_remove_matches: + log(self.log_level, "Conflicting match %s will be removed in favor of match %s", + to_remove, to_keep) + + to_remove_matches.add(to_remove) + break + return to_remove_matches + + +class PrivateRemover(Rule): + """ + Removes private matches rule. + """ + priority = POST_PROCESS + + consequence = RemoveMatch + + def when(self, matches, context): + return [match for match in matches if match.private] diff --git a/libs/rebulk/rebulk.py b/libs/rebulk/rebulk.py new file mode 100644 index 00000000..9326482b --- /dev/null +++ b/libs/rebulk/rebulk.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Entry point functions and classes for Rebulk +""" +from logging import getLogger + +from .match import Matches + +from .pattern import RePattern, StringPattern, FunctionalPattern +from .chain import Chain + +from .processors import ConflictSolver, PrivateRemover +from .loose import set_defaults +from .utils import extend_safe +from .rules import Rules + +log = getLogger(__name__).log + + +class Rebulk(object): + r""" + Regular expression, string and function based patterns are declared in a ``Rebulk`` object. It use a fluent API to + chain ``string``, ``regex``, and ``functional`` methods to define various patterns types. + + .. code-block:: python + + >>> from rebulk import Rebulk + >>> bulk = Rebulk().string('brown').regex(r'qu\w+').functional(lambda s: (20, 25)) + + When ``Rebulk`` object is fully configured, you can call ``matches`` method with an input string to retrieve all + ``Match`` objects found by registered pattern. + + .. code-block:: python + + >>> bulk.matches("The quick brown fox jumps over the lazy dog") + [, , ] + + If multiple ``Match`` objects are found at the same position, only the longer one is kept. + + .. code-block:: python + + >>> bulk = Rebulk().string('lakers').string('la') + >>> bulk.matches("the lakers are from la") + [, ] + """ + # pylint:disable=protected-access + + def __init__(self, disabled=lambda context: False, default_rules=True): + """ + Creates a new Rebulk object. + :param disabled: if True, this pattern is disabled. Can also be a function(context). + :type disabled: bool|function + :param default_rules: use default rules + :type default_rules: + :return: + :rtype: + """ + if not callable(disabled): + self.disabled = lambda context: disabled + else: + self.disabled = disabled + self._patterns = [] + self._rules = Rules() + if default_rules: + self.rules(ConflictSolver, PrivateRemover) + self._defaults = {} + self._regex_defaults = {} + self._string_defaults = {} + self._functional_defaults = {} + self._rebulks = [] + + def pattern(self, *pattern): + """ + Add patterns objects + + :param pattern: + :type pattern: rebulk.pattern.Pattern + :return: self + :rtype: Rebulk + """ + self._patterns.extend(pattern) + return self + + def defaults(self, **kwargs): + """ + Define default keyword arguments for all patterns + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._defaults = kwargs + return self + + def regex_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._regex_defaults = kwargs + return self + + def regex(self, *pattern, **kwargs): + """ + Add re pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + self.pattern(self.build_re(*pattern, **kwargs)) + return self + + def build_re(self, *pattern, **kwargs): + """ + Builds a new regular expression pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._regex_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return RePattern(*pattern, **kwargs) + + def string_defaults(self, **kwargs): + """ + Define default keyword arguments for string patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._string_defaults = kwargs + return self + + def string(self, *pattern, **kwargs): + """ + Add string pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + self.pattern(self.build_string(*pattern, **kwargs)) + return self + + def build_string(self, *pattern, **kwargs): + """ + Builds a new string pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._string_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return StringPattern(*pattern, **kwargs) + + def functional_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + self._functional_defaults = kwargs + return self + + def functional(self, *pattern, **kwargs): + """ + Add functional pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + self.pattern(self.build_functional(*pattern, **kwargs)) + return self + + def build_functional(self, *pattern, **kwargs): + """ + Builds a new functional pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._functional_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return FunctionalPattern(*pattern, **kwargs) + + def chain(self, **kwargs): + """ + Add patterns chain, using configuration of this rebulk + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + chain = self.build_chain(**kwargs) + self._patterns.append(chain) + return chain + + def build_chain(self, **kwargs): + """ + Builds a new patterns chain + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._defaults, kwargs) + return Chain(self, **kwargs) + + def rules(self, *rules): + """ + Add rules as a module, class or instance. + :param rules: + :type rules: list[Rule] + :return: + """ + self._rules.load(*rules) + return self + + def rebulk(self, *rebulks): + """ + Add a children rebulk object + :param rebulks: + :type rebulks: Rebulk + :return: + """ + self._rebulks.extend(rebulks) + return self + + def matches(self, string, context=None): + """ + Search for all matches with current configuration against input_string + :param string: string to search into + :type string: str + :param context: context to use + :type context: dict + :return: A custom list of matches + :rtype: Matches + """ + matches = Matches(input_string=string) + if context is None: + context = {} + + self._matches_patterns(matches, context) + + self._execute_rules(matches, context) + + return matches + + def effective_rules(self, context=None): + """ + Get effective rules for this rebulk object and its children. + :param context: + :type context: + :return: + :rtype: + """ + rules = Rules() + rules.extend(self._rules) + for rebulk in self._rebulks: + if not rebulk.disabled(context): + extend_safe(rules, rebulk._rules) + return rules + + def _execute_rules(self, matches, context): + """ + Execute rules for this rebulk and children. + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + if not self.disabled(context): + rules = self.effective_rules(context) + rules.execute_all_rules(matches, context) + + def effective_patterns(self, context=None): + """ + Get effective patterns for this rebulk object and its children. + :param context: + :type context: + :return: + :rtype: + """ + patterns = list(self._patterns) + for rebulk in self._rebulks: + if not rebulk.disabled(context): + extend_safe(patterns, rebulk._patterns) + return patterns + + def _matches_patterns(self, matches, context): + """ + Search for all matches with current paterns agains input_string + :param matches: matches list + :type matches: Matches + :param context: context to use + :type context: dict + :return: + :rtype: + """ + if not self.disabled(context): + patterns = self.effective_patterns(context) + for pattern in patterns: + if not pattern.disabled(context): + pattern_matches = pattern.matches(matches.input_string, context) + if pattern_matches: + log(pattern.log_level, "Pattern has %s match(es). (%s)", len(pattern_matches), pattern) + else: + pass + # log(pattern.log_level, "Pattern doesn't match. (%s)" % (pattern,)) + for match in pattern_matches: + if match.marker: + log(pattern.log_level, "Marker found. (%s)", match) + matches.markers.append(match) + else: + log(pattern.log_level, "Match found. (%s)", match) + matches.append(match) + else: + log(pattern.log_level, "Pattern is disabled. (%s)", pattern) diff --git a/libs/rebulk/remodule.py b/libs/rebulk/remodule.py new file mode 100644 index 00000000..d1d68d19 --- /dev/null +++ b/libs/rebulk/remodule.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Uniform re module +""" +# pylint: disable-all +import os + +REGEX_AVAILABLE = False +if os.environ.get('REGEX_DISABLED') in ["1", "true", "True", "Y"]: + import re +else: + try: + import regex as re + REGEX_AVAILABLE = True + except ImportError: + import re diff --git a/libs/rebulk/rules.py b/libs/rebulk/rules.py new file mode 100644 index 00000000..19b563ab --- /dev/null +++ b/libs/rebulk/rules.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Abstract rule class definition and rule engine implementation +""" +from abc import ABCMeta, abstractmethod +import inspect +from itertools import groupby +from logging import getLogger + +import six +from .utils import is_iterable + +from .toposort import toposort + +from . import debug + +log = getLogger(__name__).log + + +@six.add_metaclass(ABCMeta) +class Consequence(object): + """ + Definition of a consequence to apply. + """ + @abstractmethod + def then(self, matches, when_response, context): # pragma: no cover + """ + Action implementation. + + :param matches: + :type matches: rebulk.match.Matches + :param context: + :type context: + :param when_response: return object from when call. + :type when_response: object + :return: True if the action was runned, False if it wasn't. + :rtype: bool + """ + pass + + +@six.add_metaclass(ABCMeta) +class Condition(object): + """ + Definition of a condition to check. + """ + @abstractmethod + def when(self, matches, context): # pragma: no cover + """ + Condition implementation. + + :param matches: + :type matches: rebulk.match.Matches + :param context: + :type context: + :return: truthy if rule should be triggered and execute then action, falsy if it should not. + :rtype: object + """ + pass + + +@six.add_metaclass(ABCMeta) +class CustomRule(Condition, Consequence): + """ + Definition of a rule to apply + """ + # pylint: disable=no-self-use, unused-argument, abstract-method + priority = 0 + name = None + dependency = None + properties = {} + + def __init__(self, log_level=None): + self.defined_at = debug.defined_at() + if log_level is None and not hasattr(self, 'log_level'): + self.log_level = debug.LOG_LEVEL + + def enabled(self, context): + """ + Disable rule. + + :param context: + :type context: + :return: True if rule is enabled, False if disabled + :rtype: bool + """ + return True + + def __lt__(self, other): + return self.priority > other.priority + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s>" % (self.name if self.name else self.__class__.__name__, defined) + + def __eq__(self, other): + return self.__class__ == other.__class__ + + def __hash__(self): + return hash(self.__class__) + + +class Rule(CustomRule): + """ + Definition of a rule to apply + """ + # pylint:disable=abstract-method + consequence = None + + def then(self, matches, when_response, context): + assert self.consequence + if is_iterable(self.consequence): + if not is_iterable(when_response): + when_response = [when_response] + iterator = iter(when_response) + for cons in self.consequence: #pylint: disable=not-an-iterable + if inspect.isclass(cons): + cons = cons() + cons.then(matches, next(iterator), context) + else: + cons = self.consequence + if inspect.isclass(cons): + cons = cons() # pylint:disable=not-callable + cons.then(matches, when_response, context) + + +class RemoveMatch(Consequence): # pylint: disable=abstract-method + """ + Remove matches returned by then + """ + def then(self, matches, when_response, context): + if is_iterable(when_response): + ret = [] + when_response = list(when_response) + for match in when_response: + if match in matches: + matches.remove(match) + ret.append(match) + return ret + else: + if when_response in matches: + matches.remove(when_response) + return when_response + + +class AppendMatch(Consequence): # pylint: disable=abstract-method + """ + Append matches returned by then + """ + def __init__(self, match_name=None): + self.match_name = match_name + + def then(self, matches, when_response, context): + if is_iterable(when_response): + ret = [] + when_response = list(when_response) + for match in when_response: + if match not in matches: + if self.match_name: + match.name = self.match_name + matches.append(match) + ret.append(match) + return ret + else: + if self.match_name: + when_response.name = self.match_name + if when_response not in matches: + matches.append(when_response) + return when_response + + +class RenameMatch(Consequence): # pylint: disable=abstract-method + """ + Rename matches returned by then + """ + def __init__(self, match_name): + self.match_name = match_name + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + match.name = self.match_name + elif removed: + removed.name = self.match_name + if removed: + self.append.then(matches, removed, context) + + +class AppendTags(Consequence): # pylint: disable=abstract-method + """ + Add tags to returned matches + """ + def __init__(self, tags): + self.tags = tags + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + match.tags.extend(self.tags) + elif removed: + removed.tags.extend(self.tags) # pylint: disable=no-member + if removed: + self.append.then(matches, removed, context) + + +class RemoveTags(Consequence): # pylint: disable=abstract-method + """ + Remove tags from returned matches + """ + def __init__(self, tags): + self.tags = tags + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + for tag in self.tags: + if tag in match.tags: + match.tags.remove(tag) + elif removed: + for tag in self.tags: + if tag in removed.tags: # pylint: disable=no-member + removed.tags.remove(tag) # pylint: disable=no-member + if removed: + self.append.then(matches, removed, context) + + +class Rules(list): + """ + list of rules ready to execute. + """ + + def __init__(self, *rules): + super(Rules, self).__init__() + self.load(*rules) + + def load(self, *rules): + """ + Load rules from a Rule module, class or instance + + :param rules: + :type rules: + :return: + :rtype: + """ + for rule in rules: + if inspect.ismodule(rule): + self.load_module(rule) + elif inspect.isclass(rule): + self.load_class(rule) + else: + self.append(rule) + + def load_module(self, module): + """ + Load a rules module + + :param module: + :type module: + :return: + :rtype: + """ + # pylint: disable=unused-variable + for name, obj in inspect.getmembers(module, + lambda member: hasattr(member, '__module__') + and member.__module__ == module.__name__ + and inspect.isclass): + self.load_class(obj) + + def load_class(self, class_): + """ + Load a Rule class. + + :param class_: + :type class_: + :return: + :rtype: + """ + self.append(class_()) + + def execute_all_rules(self, matches, context): + """ + Execute all rules from this rules list. All when condition with same priority will be performed before + calling then actions. + + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + ret = [] + for priority, priority_rules in groupby(sorted(self), lambda rule: rule.priority): + sorted_rules = toposort_rules(list(priority_rules)) # Group by dependency graph toposort + for rules_group in sorted_rules: + rules_group = list(sorted(rules_group, key=self.index)) # Sort rules group based on initial ordering. + group_log_level = None + for rule in rules_group: + if group_log_level is None or group_log_level < rule.log_level: + group_log_level = rule.log_level + log(group_log_level, "%s independent rule(s) at priority %s.", len(rules_group), priority) + for rule in rules_group: + when_response = execute_rule(rule, matches, context) + if when_response is not None: + ret.append((rule, when_response)) + + return ret + + +def execute_rule(rule, matches, context): + """ + Execute the given rule. + :param rule: + :type rule: + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + if rule.enabled(context): + log(rule.log_level, "Checking rule condition: %s", rule) + when_response = rule.when(matches, context) + if when_response: + log(rule.log_level, "Rule was triggered: %s", when_response) + log(rule.log_level, "Running rule consequence: %s %s", rule, when_response) + rule.then(matches, when_response, context) + return when_response + else: + log(rule.log_level, "Rule is disabled: %s", rule) + +def toposort_rules(rules): + """ + Sort given rules using toposort with dependency parameter. + :param rules: + :type rules: + :return: + :rtype: + """ + graph = {} + class_dict = {} + for rule in rules: + if rule.__class__ in class_dict: + raise ValueError("Duplicate class rules are not allowed: %s" % rule.__class__) + class_dict[rule.__class__] = rule + for rule in rules: + if not is_iterable(rule.dependency) and rule.dependency: + rule_dependencies = [rule.dependency] + else: + rule_dependencies = rule.dependency + dependencies = set() + if rule_dependencies: + for dependency in rule_dependencies: + if inspect.isclass(dependency): + dependency = class_dict.get(dependency) + if dependency: + dependencies.add(dependency) + graph[rule] = dependencies + return toposort(graph) diff --git a/libs/rebulk/test/__init__.py b/libs/rebulk/test/__init__.py new file mode 100644 index 00000000..0ab48c94 --- /dev/null +++ b/libs/rebulk/test/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring diff --git a/libs/rebulk/test/default_rules_module.py b/libs/rebulk/test/default_rules_module.py new file mode 100644 index 00000000..5eed8e0d --- /dev/null +++ b/libs/rebulk/test/default_rules_module.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name +from ..match import Match +from ..rules import Rule, RemoveMatch, AppendMatch, RenameMatch, AppendTags, RemoveTags + + +class RuleRemove0(Rule): + consequence = RemoveMatch + def when(self, matches, context): + return matches[0] + + +class RuleAppend0(Rule): + consequence = AppendMatch() + def when(self, matches, context): + return Match(5, 10) + +class RuleRename0(Rule): + consequence = [RenameMatch('renamed')] + def when(self, matches, context): + return [Match(5, 10, name="original")] + +class RuleRemove1(Rule): + consequence = [RemoveMatch()] + def when(self, matches, context): + return [matches[0]] + +class RuleAppend1(Rule): + consequence = [AppendMatch] + def when(self, matches, context): + return [Match(5, 10)] + +class RuleRename1(Rule): + consequence = RenameMatch('renamed') + def when(self, matches, context): + return [Match(5, 10, name="original")] + +class RuleAppend2(Rule): + consequence = [AppendMatch('renamed')] + properties = {'renamed': [None]} + def when(self, matches, context): + return [Match(5, 10)] + +class RuleRename2(Rule): + consequence = RenameMatch('renamed') + def when(self, matches, context): + return Match(5, 10, name="original") + +class RuleAppend3(Rule): + consequence = AppendMatch('renamed') + properties = {'renamed': [None]} + def when(self, matches, context): + return [Match(5, 10)] + +class RuleRename3(Rule): + consequence = [RenameMatch('renamed')] + def when(self, matches, context): + return Match(5, 10, name="original") + +class RuleAppendTags0(Rule): + consequence = AppendTags(['new-tag']) + def when(self, matches, context): + return matches.named('tags', 0) + +class RuleRemoveTags0(Rule): + consequence = RemoveTags(['new-tag']) + def when(self, matches, context): + return matches.named('tags', 0) + +class RuleAppendTags1(Rule): + consequence = AppendTags(['new-tag']) + def when(self, matches, context): + return matches.named('tags') + +class RuleRemoveTags1(Rule): + consequence = RemoveTags(['new-tag']) + def when(self, matches, context): + return matches.named('tags') diff --git a/libs/rebulk/test/rebulk_rules_module.py b/libs/rebulk/test/rebulk_rules_module.py new file mode 100644 index 00000000..0bd5ef33 --- /dev/null +++ b/libs/rebulk/test/rebulk_rules_module.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name +from rebulk.rules import Rule, RemoveMatch, CustomRule + + +class RemoveAllButLastYear(Rule): + consequence = RemoveMatch + def when(self, matches, context): + entries = matches.named('year') + return entries[:-1] + + +class PrefixedSuffixedYear(CustomRule): + def when(self, matches, context): + toRemove = [] + years = matches.named('year') + for year in years: + if not matches.previous(year, lambda p: p.name == 'yearPrefix') and \ + not matches.next(year, lambda n: n.name == 'yearSuffix'): + toRemove.append(year) + return toRemove + + def then(self, matches, when_response, context): + for to_remove in when_response: + matches.remove(to_remove) + + +class PrefixedSuffixedYearNoLambda(Rule): + consequence = RemoveMatch + def when(self, matches, context): + toRemove = [] + years = matches.named('year') + for year in years: + if not [m for m in matches.previous(year) if m.name == 'yearPrefix'] and \ + not [m for m in matches.next(year) if m.name == 'yearSuffix']: + toRemove.append(year) + return toRemove diff --git a/libs/rebulk/test/rules_module.py b/libs/rebulk/test/rules_module.py new file mode 100644 index 00000000..887b81da --- /dev/null +++ b/libs/rebulk/test/rules_module.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name +from ..match import Match +from ..rules import Rule + + +class Rule3(Rule): + def when(self, matches, context): + return context.get('when') + + def then(self, matches, when_response, context): + assert when_response in [True, False] + matches.append(Match(3, 4)) + + +class Rule2(Rule): + dependency = Rule3 + + def when(self, matches, context): + return True + + def then(self, matches, when_response, context): + assert when_response + matches.append(Match(3, 4)) + + +class Rule1(Rule): + dependency = Rule2 + + def when(self, matches, context): + return True + + def then(self, matches, when_response, context): + assert when_response + matches.clear() + + +class Rule0(Rule): + dependency = Rule1 + + def when(self, matches, context): + return True + + def then(self, matches, when_response, context): + assert when_response + matches.append(Match(3, 4)) + + +class Rule1Disabled(Rule1): + name = "Disabled Rule1" + + def enabled(self, context): + return False diff --git a/libs/rebulk/test/test_chain.py b/libs/rebulk/test/test_chain.py new file mode 100644 index 00000000..8238ad63 --- /dev/null +++ b/libs/rebulk/test/test_chain.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, no-member +import re + +from functools import partial + +from ..validators import chars_surround +from ..rebulk import Rebulk, FunctionalPattern, RePattern, StringPattern + + +def test_chain_close(): + rebulk = Rebulk() + ret = rebulk.chain().close() + + assert ret == rebulk + assert len(rebulk.effective_patterns()) == 1 + + +def test_build_chain(): + rebulk = Rebulk() + + def digit(input_string): + i = input_string.find("1849") + if i > -1: + return i, i + len("1849") + + ret = rebulk.chain() \ + .functional(digit) \ + .string("test").repeater(2) \ + .string("x").repeater('{1,3}') \ + .string("optional").repeater('?') \ + .regex("f?x").repeater('+') \ + .close() + + assert ret == rebulk + assert len(rebulk.effective_patterns()) == 1 + + chain = rebulk.effective_patterns()[0] + + assert len(chain.parts) == 5 + + assert isinstance(chain.parts[0].pattern, FunctionalPattern) + assert chain.parts[0].repeater_start == 1 + assert chain.parts[0].repeater_end == 1 + + assert isinstance(chain.parts[1].pattern, StringPattern) + assert chain.parts[1].repeater_start == 2 + assert chain.parts[1].repeater_end == 2 + + assert isinstance(chain.parts[2].pattern, StringPattern) + assert chain.parts[2].repeater_start == 1 + assert chain.parts[2].repeater_end == 3 + + assert isinstance(chain.parts[3].pattern, StringPattern) + assert chain.parts[3].repeater_start == 0 + assert chain.parts[3].repeater_end == 1 + + assert isinstance(chain.parts[4].pattern, RePattern) + assert chain.parts[4].repeater_start == 1 + assert chain.parts[4].repeater_end is None + + +def test_chain_defaults(): + rebulk = Rebulk() + rebulk.defaults(validator=lambda x: True, ignore_names=['testIgnore'], children=True) + + rebulk.chain()\ + .regex("(?Ptest)") \ + .regex(" ").repeater("*") \ + .regex("(?PtestIgnore)") + matches = rebulk.matches("test testIgnore") + + assert len(matches) == 1 + assert matches[0].name == "test" + + +def test_matches(): + rebulk = Rebulk() + + def digit(input_string): + i = input_string.find("1849") + if i > -1: + return i, i + len("1849") + + input_string = "1849testtestxxfixfux_foxabc1849testtestxoptionalfoxabc" + + chain = rebulk.chain() \ + .functional(digit) \ + .string("test").hidden().repeater(2) \ + .string("x").hidden().repeater('{1,3}') \ + .string("optional").hidden().repeater('?') \ + .regex("f.?x", name='result').repeater('+') \ + .close() + + matches = chain.matches(input_string) + + assert len(matches) == 2 + children = matches[0].children + + assert children[0].value == '1849' + assert children[1].value == 'fix' + assert children[2].value == 'fux' + + children = matches[1].children + assert children[0].value == '1849' + assert children[1].value == 'fox' + + input_string = "_1850testtestxoptionalfoxabc" + matches = chain.matches(input_string) + + assert len(matches) == 0 + + input_string = "_1849testtesttesttestxoptionalfoxabc" + matches = chain.matches(input_string) + + assert len(matches) == 0 + + input_string = "_1849testtestxxxxoptionalfoxabc" + matches = chain.matches(input_string) + + assert len(matches) == 0 + + input_string = "_1849testtestoptionalfoxabc" + matches = chain.matches(input_string) + + assert len(matches) == 0 + + input_string = "_1849testtestxoptionalabc" + matches = chain.matches(input_string) + + assert len(matches) == 0 + + input_string = "_1849testtestxoptionalfaxabc" + matches = chain.matches(input_string) + + assert len(matches) == 1 + children = matches[0].children + + assert children[0].value == '1849' + assert children[1].value == 'fax' + + +def test_matches_2(): + rebulk = Rebulk() \ + .regex_defaults(flags=re.IGNORECASE) \ + .chain(children=True, formatter={'episode': int}) \ + .defaults(formatter={'version': int}) \ + .regex(r'e(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'[ex-](?P\d{1,4})').repeater('*') \ + .close() + + matches = rebulk.matches("This is E14v2-15E16x17") + assert len(matches) == 5 + + assert matches[0].name == 'episode' + assert matches[0].value == 14 + + assert matches[1].name == 'version' + assert matches[1].value == 2 + + assert matches[2].name == 'episode' + assert matches[2].value == 15 + + assert matches[3].name == 'episode' + assert matches[3].value == 16 + + assert matches[4].name == 'episode' + assert matches[4].value == 17 + + +def test_matches_3(): + alt_dash = (r'@', r'[\W_]') # abbreviation + + rebulk = Rebulk() + + rebulk.chain(formatter={'season': int, 'episode': int}, + tags=['SxxExx'], + abbreviations=[alt_dash], + private_names=['episodeSeparator', 'seasonSeparator'], + children=True, + private_parent=True, + conflict_solver=lambda match, other: match + if match.name in ['season', 'episode'] and other.name in + ['screen_size', 'video_codec', 'audio_codec', + 'audio_channels', 'container', 'date'] + else '__default__') \ + .regex(r'(?P\d+)@?x@?(?P\d+)') \ + .regex(r'(?Px|-|\+|&)(?P\d+)').repeater('*') \ + .chain() \ + .regex(r'S(?P\d+)@?(?:xE|Ex|E|x)@?(?P\d+)') \ + .regex(r'(?:(?PxE|Ex|E|x|-|\+|&)(?P\d+))').repeater('*') \ + .chain() \ + .regex(r'S(?P\d+)') \ + .regex(r'(?PS|-|\+|&)(?P\d+)').repeater('*') + + matches = rebulk.matches("test-01x02-03") + assert len(matches) == 3 + + assert matches[0].name == 'season' + assert matches[0].value == 1 + + assert matches[1].name == 'episode' + assert matches[1].value == 2 + + assert matches[2].name == 'episode' + assert matches[2].value == 3 + + matches = rebulk.matches("test-S01E02-03") + + assert len(matches) == 3 + assert matches[0].name == 'season' + assert matches[0].value == 1 + + assert matches[1].name == 'episode' + assert matches[1].value == 2 + + assert matches[2].name == 'episode' + assert matches[2].value == 3 + + matches = rebulk.matches("test-S01-02-03-04") + + assert len(matches) == 4 + assert matches[0].name == 'season' + assert matches[0].value == 1 + + assert matches[1].name == 'season' + assert matches[1].value == 2 + + assert matches[2].name == 'season' + assert matches[2].value == 3 + + assert matches[3].name == 'season' + assert matches[3].value == 4 + + +def test_matches_4(): + seps_surround = partial(chars_surround, " ") + + rebulk = Rebulk() + rebulk.regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True, + validator={'__parent__': seps_surround}, children=True, private_parent=True) + + rebulk.chain(formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'e(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Pe|x|-)(?P\d{1,4})').repeater('*') + + matches = rebulk.matches("Some Series E01E02E03") + assert len(matches) == 3 + + assert matches[0].value == 1 + assert matches[1].value == 2 + assert matches[2].value == 3 + + +def test_matches_5(): + seps_surround = partial(chars_surround, " ") + + rebulk = Rebulk() + rebulk.regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True, + validator={'__parent__': seps_surround}, children=True, private_parent=True) + + rebulk.chain(formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'e(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Pe|x|-)(?P\d{1,4})').repeater('{2,3}') + + matches = rebulk.matches("Some Series E01E02E03") + assert len(matches) == 3 + + matches = rebulk.matches("Some Series E01E02") + assert len(matches) == 0 + + matches = rebulk.matches("Some Series E01E02E03E04E05E06") # Parent can't be validated, so no results at all + assert len(matches) == 0 + + +def test_matches_6(): + rebulk = Rebulk() + rebulk.regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True, + validator=None, children=True, private_parent=True) + + rebulk.chain(formatter={'episode': int, 'version': int}) \ + .defaults(validator=None) \ + .regex(r'e(?P\d{1,4})') \ + .regex(r'v(?P\d+)').repeater('?') \ + .regex(r'(?Pe|x|-)(?P\d{1,4})').repeater('{2,3}') + + matches = rebulk.matches("Some Series E01E02E03") + assert len(matches) == 3 + + matches = rebulk.matches("Some Series E01E02") + assert len(matches) == 0 + + matches = rebulk.matches("Some Series E01E02E03E04E05E06") # No validator on parent, so it should give 4 episodes. + assert len(matches) == 4 diff --git a/libs/rebulk/test/test_debug.py b/libs/rebulk/test/test_debug.py new file mode 100644 index 00000000..a35f95fd --- /dev/null +++ b/libs/rebulk/test/test_debug.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, protected-access, invalid-name + +from ..pattern import StringPattern +from ..rebulk import Rebulk +from ..match import Match +from .. import debug +from .default_rules_module import RuleRemove0 + + +class TestDebug(object): + + + #request.addfinalizer(disable_debug) + + + + debug.DEBUG = True + pattern = StringPattern(1, 3, value="es") + + match = Match(1, 3, value="es") + rule = RuleRemove0() + + input_string = "This is a debug test" + rebulk = Rebulk().string("debug") \ + .string("is") + + matches = rebulk.matches(input_string) + debug.DEBUG = False + + @classmethod + def setup_class(cls): + debug.DEBUG = True + + @classmethod + def teardown_class(cls): + debug.DEBUG = False + + def test_pattern(self): + assert self.pattern.defined_at.lineno == 20 + assert self.pattern.defined_at.name == 'rebulk.test.test_debug' + assert self.pattern.defined_at.filename.endswith('test_debug.py') + + assert str(self.pattern.defined_at) == 'test_debug.py#L20' + assert repr(self.pattern) == '' + + def test_match(self): + assert self.match.defined_at.lineno == 22 + assert self.match.defined_at.name == 'rebulk.test.test_debug' + assert self.match.defined_at.filename.endswith('test_debug.py') + + assert str(self.match.defined_at) == 'test_debug.py#L22' + + def test_rule(self): + assert self.rule.defined_at.lineno == 23 + assert self.rule.defined_at.name == 'rebulk.test.test_debug' + assert self.rule.defined_at.filename.endswith('test_debug.py') + + assert str(self.rule.defined_at) == 'test_debug.py#L23' + assert repr(self.rule) == '' + + def test_rebulk(self): + """ + This test fails on travis CI, can't find out why there's 1 line offset ... + """ + assert self.rebulk._patterns[0].defined_at.lineno in [26, 27] + assert self.rebulk._patterns[0].defined_at.name == 'rebulk.test.test_debug' + assert self.rebulk._patterns[0].defined_at.filename.endswith('test_debug.py') + + assert str(self.rebulk._patterns[0].defined_at) in ['test_debug.py#L26', 'test_debug.py#L27'] + + assert self.rebulk._patterns[1].defined_at.lineno in [27, 28] + assert self.rebulk._patterns[1].defined_at.name == 'rebulk.test.test_debug' + assert self.rebulk._patterns[1].defined_at.filename.endswith('test_debug.py') + + assert str(self.rebulk._patterns[1].defined_at) in ['test_debug.py#L27', 'test_debug.py#L28'] + + assert self.matches[0].defined_at == self.rebulk._patterns[0].defined_at + assert self.matches[1].defined_at == self.rebulk._patterns[1].defined_at + + def test_repr(self): + str(self.matches) diff --git a/libs/rebulk/test/test_introspector.py b/libs/rebulk/test/test_introspector.py new file mode 100644 index 00000000..24c0c500 --- /dev/null +++ b/libs/rebulk/test/test_introspector.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Introspector tests +""" +# pylint: disable=no-self-use,pointless-statement,missing-docstring,protected-access,invalid-name +from ..rebulk import Rebulk +from .. import introspector +from .default_rules_module import RuleAppend2, RuleAppend3 + + +def test_string_introspector(): + rebulk = Rebulk().string('One', 'Two', 'Three', name='first').string('1', '2', '3', name='second') + + introspected = introspector.introspect(rebulk, None) + + assert len(introspected.patterns) == 2 + + first_properties = introspected.patterns[0].properties + assert len(first_properties) == 1 + first_properties['first'] == ['One', 'Two', 'Three'] + + second_properties = introspected.patterns[1].properties + assert len(second_properties) == 1 + second_properties['second'] == ['1', '2', '3'] + + properties = introspected.properties + assert len(properties) == 2 + assert properties['first'] == first_properties['first'] + assert properties['second'] == second_properties['second'] + + +def test_string_properties(): + rebulk = Rebulk()\ + .string('One', 'Two', 'Three', name='first', properties={'custom': ['One']})\ + .string('1', '2', '3', name='second', properties={'custom': [1]}) + + introspected = introspector.introspect(rebulk, None) + + assert len(introspected.patterns) == 2 + assert len(introspected.rules) == 2 + + first_properties = introspected.patterns[0].properties + assert len(first_properties) == 1 + first_properties['custom'] == ['One'] + + second_properties = introspected.patterns[1].properties + assert len(second_properties) == 1 + second_properties['custom'] == [1] + + properties = introspected.properties + assert len(properties) == 1 + assert properties['custom'] == ['One', 1] + + +def test_various_pattern(): + rebulk = Rebulk()\ + .regex('One', 'Two', 'Three', name='first', value="string") \ + .string('1', '2', '3', name='second', value="digit") \ + .string('4', '5', '6', name='third') \ + .string('private', private=True) \ + .functional(lambda string: (0, 5), name='func', value='test') \ + .regex('One', 'Two', 'Three', name='regex_name') \ + .regex('(?POne)(?PTwo)(?PThree)') \ + .functional(lambda string: (6, 10), name='func2') \ + .string('7', name='third') + + introspected = introspector.introspect(rebulk, None) + + assert len(introspected.patterns) == 8 + assert len(introspected.rules) == 2 + + first_properties = introspected.patterns[0].properties + assert len(first_properties) == 1 + first_properties['first'] == ['string'] + + second_properties = introspected.patterns[1].properties + assert len(second_properties) == 1 + second_properties['second'] == ['digit'] + + third_properties = introspected.patterns[2].properties + assert len(third_properties) == 1 + third_properties['third'] == ['4', '5', '6'] + + func_properties = introspected.patterns[3].properties + assert len(func_properties) == 1 + func_properties['func'] == ['test'] + + regex_name_properties = introspected.patterns[4].properties + assert len(regex_name_properties) == 1 + regex_name_properties['regex_name'] == [None] + + regex_groups_properties = introspected.patterns[5].properties + assert len(regex_groups_properties) == 3 + regex_groups_properties['one'] == [None] + regex_groups_properties['two'] == [None] + regex_groups_properties['three'] == [None] + + func2_properties = introspected.patterns[6].properties + assert len(func2_properties) == 1 + func2_properties['func2'] == [None] + + append_third_properties = introspected.patterns[7].properties + assert len(append_third_properties) == 1 + append_third_properties['third'] == [None] + + properties = introspected.properties + assert len(properties) == 9 + assert properties['first'] == first_properties['first'] + assert properties['second'] == second_properties['second'] + assert properties['third'] == third_properties['third'] + append_third_properties['third'] + assert properties['func'] == func_properties['func'] + assert properties['regex_name'] == regex_name_properties['regex_name'] + assert properties['one'] == regex_groups_properties['one'] + assert properties['two'] == regex_groups_properties['two'] + assert properties['three'] == regex_groups_properties['three'] + assert properties['func2'] == func2_properties['func2'] + + +def test_rule_properties(): + rebulk = Rebulk(default_rules=False).rules(RuleAppend2, RuleAppend3) + + introspected = introspector.introspect(rebulk, None) + + assert len(introspected.rules) == 2 + assert len(introspected.patterns) == 0 + + rule_properties = introspected.rules[0].properties + assert len(rule_properties) == 1 + assert rule_properties['renamed'] == [None] + + rule_properties = introspected.rules[1].properties + assert len(rule_properties) == 1 + assert rule_properties['renamed'] == [None] + + properties = introspected.properties + assert len(properties) == 1 + assert properties['renamed'] == [None] diff --git a/libs/rebulk/test/test_loose.py b/libs/rebulk/test/test_loose.py new file mode 100644 index 00000000..bc0c6bca --- /dev/null +++ b/libs/rebulk/test/test_loose.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name + +from ..loose import call + + +def test_loose_function(): + + def func(v1, v2, v3=3, v4=4): + return v1 + v2 + v3 + v4 + + assert call(func, 1, 2) == func(1, 2) + assert call(func, 1, 2, 3, 5) == func(1, 2, 3, 5) + assert call(func, 1, 2, v3=4, v4=5) == func(1, 2, v3=4, v4=5) + assert call(func, 1, 2, 3, 4, 5) == func(1, 2, 3, 4) + assert call(func, 1, 2, 3, 4, more=5) == func(1, 2, 3, 4) + + +def test_loose_varargs_function(): + def func(v1, v2, *args): + return v1 + v2 + args[0] if len(args) > 0 else 3 + args[1] if len(args) > 1 else 4 + + assert call(func, 1, 2) == func(1, 2) + assert call(func, 1, 2, 3, 5) == func(1, 2, 3, 5) + assert call(func, 1, 2, 3, 4, 5) == func(1, 2, 3, 4) + + +def test_loose_kwargs_function(): + def func(v1, v2, **kwargs): + return v1 + v2 + kwargs.get('v3', 3) + kwargs.get('v4', 4) + + assert call(func, v1=1, v2=2) == func(v1=1, v2=2) + assert call(func, v1=1, v2=2, v3=3, v4=5) == func(v1=1, v2=2, v3=3, v4=5) + + +def test_loose_class(): + class Dummy(object): + def __init__(self, v1, v2, v3=3, v4=4): + self.v1 = v1 + self.v2 = v2 + self.v3 = v3 + self.v4 = v4 + + def call(self): + return self.v1 + self.v2 + self.v3 + self.v4 + + assert call(Dummy, 1, 2).call() == Dummy(1, 2).call() + assert call(Dummy, 1, 2, 3, 5).call() == Dummy(1, 2, 3, 5).call() + assert call(Dummy, 1, 2, v3=4, v4=5).call() == Dummy(1, 2, v3=4, v4=5).call() + assert call(Dummy, 1, 2, 3, 4, 5).call() == Dummy(1, 2, 3, 4).call() + assert call(Dummy, 1, 2, 3, 4, more=5).call() == Dummy(1, 2, 3, 4).call() + + +def test_loose_varargs_class(): + class Dummy(object): + def __init__(self, v1, v2, *args): + self.v1 = v1 + self.v2 = v2 + self.v3 = args[0] if len(args) > 0 else 3 + self.v4 = args[1] if len(args) > 1 else 4 + + def call(self): + return self.v1 + self.v2 + self.v3 + self.v4 + + assert call(Dummy, 1, 2).call() == Dummy(1, 2).call() + assert call(Dummy, 1, 2, 3, 5).call() == Dummy(1, 2, 3, 5).call() + assert call(Dummy, 1, 2, 3, 4, 5).call() == Dummy(1, 2, 3, 4).call() + + +def test_loose_kwargs_class(): + class Dummy(object): + def __init__(self, v1, v2, **kwargs): + self.v1 = v1 + self.v2 = v2 + self.v3 = kwargs.get('v3', 3) + self.v4 = kwargs.get('v4', 4) + + def call(self): + return self.v1 + self.v2 + self.v3 + self.v4 + + assert call(Dummy, v1=1, v2=2).call() == Dummy(v1=1, v2=2).call() + assert call(Dummy, v1=1, v2=2, v3=3, v4=5).call() == Dummy(v1=1, v2=2, v3=3, v4=5).call() diff --git a/libs/rebulk/test/test_match.py b/libs/rebulk/test/test_match.py new file mode 100644 index 00000000..efbc63d0 --- /dev/null +++ b/libs/rebulk/test/test_match.py @@ -0,0 +1,565 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, unneeded-not + +import pytest +import six + +from ..match import Match, Matches +from ..pattern import StringPattern, RePattern +from ..formatters import formatters + + +class TestMatchClass(object): + def test_repr(self): + match1 = Match(1, 3, value="es") + + assert repr(match1) == '' + + match2 = Match(0, 4, value="test", private=True, name="abc", tags=['one', 'two']) + + assert repr(match2) == '' + + def test_names(self): + parent = Match(0, 10, name="test") + parent.children.append(Match(0, 10, name="child1", parent=parent)) + parent.children.append(Match(0, 10, name="child2", parent=parent)) + + assert set(parent.names) == set(["child1", "child2"]) + + def test_equality(self): + match1 = Match(1, 3, value="es") + match2 = Match(1, 3, value="es") + + other = object() + + assert hash(match1) == hash(match2) + assert hash(match1) != hash(other) + + assert match1 == match2 + assert not match1 == other + + def test_inequality(self): + match1 = Match(0, 2, value="te") + match2 = Match(2, 4, value="st") + match3 = Match(0, 2, value="other") + + other = object() + + assert hash(match1) != hash(match2) + assert hash(match1) != hash(match3) + + assert match1 != other + assert match1 != match2 + assert match1 != match3 + + def test_length(self): + match1 = Match(0, 4, value="test") + match2 = Match(0, 2, value="spanIsUsed") + + assert len(match1) == 4 + assert len(match2) == 2 + + def test_compare(self): + match1 = Match(0, 2, value="te") + match2 = Match(2, 4, value="st") + + other = object() + + assert match1 < match2 + assert match1 <= match2 + + assert match2 > match1 + assert match2 >= match1 + + if six.PY3: + with pytest.raises(TypeError): + match1 < other + + with pytest.raises(TypeError): + match1 <= other + + with pytest.raises(TypeError): + match1 > other + + with pytest.raises(TypeError): + match1 >= other + else: + assert match1 < other + assert match1 <= other + assert not match1 > other + assert not match1 >= other + + def test_value(self): + match1 = Match(1, 3) + match1.value = "test" + + assert match1.value == "test" + + +class TestMatchesClass(object): + match1 = Match(0, 2, value="te", name="start") + match2 = Match(2, 3, value="s", tags="tag1") + match3 = Match(3, 4, value="t", tags=["tag1", "tag2"]) + match4 = Match(2, 4, value="st", name="end") + + def test_tag(self): + matches = Matches() + matches.append(self.match1) + matches.append(self.match2) + matches.append(self.match3) + matches.append(self.match4) + + assert "start" in matches.names + assert "end" in matches.names + + assert "tag1" in matches.tags + assert "tag2" in matches.tags + + tag1 = matches.tagged("tag1") + assert len(tag1) == 2 + assert tag1[0] == self.match2 + assert tag1[1] == self.match3 + + tag2 = matches.tagged("tag2") + assert len(tag2) == 1 + assert tag2[0] == self.match3 + + start = matches.named("start") + assert len(start) == 1 + assert start[0] == self.match1 + + end = matches.named("end") + assert len(end) == 1 + assert end[0] == self.match4 + + def test_base(self): + matches = Matches() + matches.append(self.match1) + + assert len(matches) == 1 + assert repr(matches) == repr([self.match1]) + assert list(matches.starting(0)) == [self.match1] + assert list(matches.ending(2)) == [self.match1] + + matches.append(self.match2) + matches.append(self.match3) + matches.append(self.match4) + + assert len(matches) == 4 + assert list(matches.starting(2)) == [self.match2, self.match4] + assert list(matches.starting(3)) == [self.match3] + assert list(matches.ending(3)) == [self.match2] + assert list(matches.ending(4)) == [self.match3, self.match4] + assert list(matches.range()) == [self.match1, self.match2, self.match4, self.match3] + assert list(matches.range(0)) == [self.match1, self.match2, self.match4, self.match3] + assert list(matches.range(0, 3)) == [self.match1, self.match2, self.match4] + assert list(matches.range(2, 3)) == [self.match2, self.match4] + assert list(matches.range(3, 4)) == [self.match4, self.match3] + + matches.remove(self.match1) + assert len(matches) == 3 + assert len(matches.starting(0)) == 0 + assert len(matches.ending(2)) == 0 + + matches.clear() + + assert len(matches) == 0 + assert len(matches.starting(0)) == 0 + assert len(matches.starting(2)) == 0 + assert len(matches.starting(3)) == 0 + assert len(matches.ending(2)) == 0 + assert len(matches.ending(3)) == 0 + assert len(matches.ending(4)) == 0 + + def test_get_slices(self): + matches = Matches() + matches.append(self.match1) + matches.append(self.match2) + matches.append(self.match3) + matches.append(self.match4) + + slice_matches = matches[1:3] + + assert isinstance(slice_matches, Matches) + + assert len(slice_matches) == 2 + assert slice_matches[0] == self.match2 + assert slice_matches[1] == self.match3 + + def test_remove_slices(self): + matches = Matches() + matches.append(self.match1) + matches.append(self.match2) + matches.append(self.match3) + matches.append(self.match4) + + del matches[1:3] + + assert len(matches) == 2 + assert matches[0] == self.match1 + assert matches[1] == self.match4 + + def test_set_slices(self): + matches = Matches() + matches.append(self.match1) + matches.append(self.match2) + matches.append(self.match3) + matches.append(self.match4) + + matches[1:3] = self.match1, self.match4 + + assert len(matches) == 4 + assert matches[0] == self.match1 + assert matches[1] == self.match1 + assert matches[2] == self.match4 + assert matches[3] == self.match4 + + def test_set_index(self): + matches = Matches() + matches.append(self.match1) + matches.append(self.match2) + matches.append(self.match3) + + matches[1] = self.match4 + + assert len(matches) == 3 + assert matches[0] == self.match1 + assert matches[1] == self.match4 + assert matches[2] == self.match3 + + def test_constructor(self): + matches = Matches([self.match1, self.match2, self.match3, self.match4]) + + assert len(matches) == 4 + assert list(matches.starting(0)) == [self.match1] + assert list(matches.ending(2)) == [self.match1] + assert list(matches.starting(2)) == [self.match2, self.match4] + assert list(matches.starting(3)) == [self.match3] + assert list(matches.ending(3)) == [self.match2] + assert list(matches.ending(4)) == [self.match3, self.match4] + + def test_constructor_kwargs(self): + matches = Matches([self.match1, self.match2, self.match3, self.match4], input_string="test") + + assert len(matches) == 4 + assert matches.input_string == "test" + assert list(matches.starting(0)) == [self.match1] + assert list(matches.ending(2)) == [self.match1] + assert list(matches.starting(2)) == [self.match2, self.match4] + assert list(matches.starting(3)) == [self.match3] + assert list(matches.ending(3)) == [self.match2] + assert list(matches.ending(4)) == [self.match3, self.match4] + + def test_crop(self): + input_string = "abcdefghijklmnopqrstuvwxyz" + + match1 = Match(1, 10, input_string=input_string) + match2 = Match(0, 2, input_string=input_string) + match3 = Match(8, 15, input_string=input_string) + + ret = match1.crop([match2, match3.span]) + + assert len(ret) == 1 + + assert ret[0].span == (2, 8) + assert ret[0].value == "cdefgh" + + ret = match1.crop((1, 10)) + assert len(ret) == 0 + + ret = match1.crop((1, 3)) + assert len(ret) == 1 + assert ret[0].span == (3, 10) + + ret = match1.crop((7, 10)) + assert len(ret) == 1 + assert ret[0].span == (1, 7) + + ret = match1.crop((0, 12)) + assert len(ret) == 0 + + ret = match1.crop((4, 6)) + assert len(ret) == 2 + + assert ret[0].span == (1, 4) + assert ret[1].span == (6, 10) + + ret = match1.crop([(3, 5), (7, 9)]) + assert len(ret) == 3 + + assert ret[0].span == (1, 3) + assert ret[1].span == (5, 7) + assert ret[2].span == (9, 10) + + def test_split(self): + input_string = "123 +word1 - word2 + word3 456" + match = Match(3, len(input_string) - 3, input_string=input_string) + splitted = match.split(" -+") + + assert len(splitted) == 3 + assert [split.value for split in splitted] == ["word1", "word2", "word3"] + + +class TestMaches(object): + def test_names(self): + input_string = "One Two Three" + + matches = Matches() + + matches.extend(StringPattern("One", name="1-str", tags=["One", "str"]).matches(input_string)) + matches.extend(RePattern("One", name="1-re", tags=["One", "re"]).matches(input_string)) + matches.extend(StringPattern("Two", name="2-str", tags=["Two", "str"]).matches(input_string)) + matches.extend(RePattern("Two", name="2-re", tags=["Two", "re"]).matches(input_string)) + matches.extend(StringPattern("Three", name="3-str", tags=["Three", "str"]).matches(input_string)) + matches.extend(RePattern("Three", name="3-re", tags=["Three", "re"]).matches(input_string)) + + assert set(matches.names) == set(["1-str", "1-re", "2-str", "2-re", "3-str", "3-re"]) + + def test_filters(self): + input_string = "One Two Three" + + matches = Matches() + + matches.extend(StringPattern("One", name="1-str", tags=["One", "str"]).matches(input_string)) + matches.extend(RePattern("One", name="1-re", tags=["One", "re"]).matches(input_string)) + matches.extend(StringPattern("Two", name="2-str", tags=["Two", "str"]).matches(input_string)) + matches.extend(RePattern("Two", name="2-re", tags=["Two", "re"]).matches(input_string)) + matches.extend(StringPattern("Three", name="3-str", tags=["Three", "str"]).matches(input_string)) + matches.extend(RePattern("Three", name="3-re", tags=["Three", "re"]).matches(input_string)) + + selection = matches.starting(0) + assert len(selection) == 2 + + selection = matches.starting(0, lambda m: "str" in m.tags) + assert len(selection) == 1 + assert selection[0].pattern.name == "1-str" + + selection = matches.ending(7, predicate=lambda m: "str" in m.tags) + assert len(selection) == 1 + assert selection[0].pattern.name == "2-str" + + selection = matches.previous(matches.named("2-str")[0]) + assert len(selection) == 2 + assert selection[0].pattern.name == "1-str" + assert selection[1].pattern.name == "1-re" + + selection = matches.previous(matches.named("2-str", 0), lambda m: "str" in m.tags) + assert len(selection) == 1 + assert selection[0].pattern.name == "1-str" + + selection = matches.next(matches.named("2-str", 0)) + assert len(selection) == 2 + assert selection[0].pattern.name == "3-str" + assert selection[1].pattern.name == "3-re" + + selection = matches.next(matches.named("2-str", 0), index=0, predicate=lambda m: "re" in m.tags) + assert selection is not None + assert selection.pattern.name == "3-re" + + selection = matches.next(matches.named("2-str", index=0), lambda m: "re" in m.tags) + assert len(selection) == 1 + assert selection[0].pattern.name == "3-re" + + selection = matches.named("2-str", lambda m: "re" in m.tags) + assert len(selection) == 0 + + selection = matches.named("2-re", lambda m: "re" in m.tags, 0) + assert selection is not None + assert selection.name == "2-re" # pylint:disable=no-member + + selection = matches.named("2-re", lambda m: "re" in m.tags) + assert len(selection) == 1 + assert selection[0].name == "2-re" + + selection = matches.named("2-re", lambda m: "re" in m.tags, index=1000) + assert selection is None + + def test_raw(self): + input_string = "0123456789" + + match = Match(0, 10, input_string=input_string, formatter=lambda s: s*2) + + assert match.value == match.raw * 2 + assert match.raw == input_string + + match.raw_end = 9 + match.raw_start = 1 + + assert match.value == match.raw * 2 + assert match.raw == input_string[1:9] + + match.raw_end = None + match.raw_start = None + + assert match.value == match.raw * 2 + assert match.raw == input_string + + + def test_formatter_chain(self): + input_string = "100" + + match = Match(0, 3, input_string=input_string, formatter=formatters(int, lambda s: s*2, lambda s: s+10)) + + assert match.raw == input_string + assert match.value == 100 * 2 + 10 + + + def test_to_dict(self): + input_string = "One Two Two Three" + + matches = Matches() + + matches.extend(StringPattern("One", name="1", tags=["One", "str"]).matches(input_string)) + matches.extend(RePattern("One", name="1", tags=["One", "re"]).matches(input_string)) + matches.extend(StringPattern("Two", name="2", tags=["Two", "str"]).matches(input_string)) + matches.extend(RePattern("Two", name="2", tags=["Two", "re"]).matches(input_string)) + matches.extend(RePattern("Two", name="2", tags=["Two", "reBis"]).matches(input_string)) + matches.extend(StringPattern("Three", name="3", tags=["Three", "str"]).matches(input_string)) + matches.extend(RePattern("Three", name="3bis", tags=["Three", "re"]).matches(input_string)) + matches.extend(RePattern(r"(\w+)", name="words").matches(input_string)) + + kvalues = matches.to_dict() + assert kvalues == {"1": "One", + "2": "Two", + "3": "Three", + "3bis": "Three", + "words": "One"} + assert kvalues.values_list["words"] == ["One", "Two", "Three"] + + kvalues = matches.to_dict(details=True, implicit=True) + assert kvalues["1"].value == "One" + + assert len(kvalues["2"]) == 2 + assert kvalues["2"][0].value == "Two" + assert kvalues["2"][1].value == "Two" + + assert kvalues["3"].value == "Three" + assert kvalues["3bis"].value == "Three" + + assert len(kvalues["words"]) == 4 + assert kvalues["words"][0].value == "One" + assert kvalues["words"][1].value == "Two" + assert kvalues["words"][2].value == "Two" + assert kvalues["words"][3].value == "Three" + + kvalues = matches.to_dict(details=True) + assert kvalues["1"].value == "One" + + assert len(kvalues.values_list["2"]) == 2 + assert kvalues.values_list["2"][0].value == "Two" + assert kvalues.values_list["2"][1].value == "Two" + + assert kvalues["3"].value == "Three" + assert kvalues["3bis"].value == "Three" + + assert len(kvalues.values_list["words"]) == 4 + assert kvalues.values_list["words"][0].value == "One" + assert kvalues.values_list["words"][1].value == "Two" + assert kvalues.values_list["words"][2].value == "Two" + assert kvalues.values_list["words"][3].value == "Three" + + def test_chains(self): + input_string = "wordX 10 20 30 40 wordA, wordB, wordC 70 80 wordX" + + matches = Matches(input_string=input_string) + + matches.extend(RePattern(r"\d+", name="digit").matches(input_string)) + matches.extend(RePattern("[a-zA-Z]+", name="word").matches(input_string)) + + assert len(matches) == 11 + + a_start = input_string.find('wordA') + + b_start = input_string.find('wordB') + b_end = b_start + len('wordB') + + c_start = input_string.find('wordC') + c_end = c_start + len('wordC') + + chain_before = matches.chain_before(b_start, " ,", predicate=lambda match: match.name == "word") + assert len(chain_before) == 1 + assert chain_before[0].value == 'wordA' + + chain_before = matches.chain_before(Match(b_start, b_start), " ,", predicate=lambda match: match.name == "word") + assert len(chain_before) == 1 + assert chain_before[0].value == 'wordA' + + chain_before = matches.chain_before(b_start, " ,", predicate=lambda match: match.name == "digit") + assert len(chain_before) == 0 + + chain_before = matches.chain_before(a_start, " ,", predicate=lambda match: match.name == "digit") + assert len(chain_before) == 4 + assert [match.value for match in chain_before] == ["40", "30", "20", "10"] + + chain_after = matches.chain_after(b_end, " ,", predicate=lambda match: match.name == "word") + assert len(chain_after) == 1 + assert chain_after[0].value == 'wordC' + + chain_after = matches.chain_after(Match(b_end, b_end), " ,", predicate=lambda match: match.name == "word") + assert len(chain_after) == 1 + assert chain_after[0].value == 'wordC' + + chain_after = matches.chain_after(b_end, " ,", predicate=lambda match: match.name == "digit") + assert len(chain_after) == 0 + + chain_after = matches.chain_after(c_end, " ,", predicate=lambda match: match.name == "digit") + assert len(chain_after) == 2 + assert [match.value for match in chain_after] == ["70", "80"] + + chain_after = matches.chain_after(c_end, " ,", end=10000, predicate=lambda match: match.name == "digit") + assert len(chain_after) == 2 + assert [match.value for match in chain_after] == ["70", "80"] + + def test_holes(self): + input_string = '1'*10+'2'*10+'3'*10+'4'*10+'5'*10+'6'*10+'7'*10 + + hole1 = Match(0, 10, input_string=input_string) + hole2 = Match(20, 30, input_string=input_string) + hole3 = Match(30, 40, input_string=input_string) + hole4 = Match(60, 70, input_string=input_string) + + matches = Matches([hole1, hole2], input_string=input_string) + matches.append(hole3) + matches.append(hole4) + + holes = list(matches.holes()) + assert len(holes) == 2 + assert holes[0].span == (10, 20) + assert holes[0].value == '2'*10 + assert holes[1].span == (40, 60) + assert holes[1].value == '5' * 10 + '6' * 10 + + holes = list(matches.holes(5, 15)) + assert len(holes) == 1 + assert holes[0].span == (10, 15) + assert holes[0].value == '2'*5 + + holes = list(matches.holes(5, 15, formatter=lambda value: "formatted")) + assert len(holes) == 1 + assert holes[0].span == (10, 15) + assert holes[0].value == "formatted" + + holes = list(matches.holes(5, 15, predicate=lambda hole: False)) + assert len(holes) == 0 + + def test_holes_empty(self): + input_string = "Test hole on empty matches" + matches = Matches(input_string=input_string) + holes = matches.holes() + assert len(holes) == 1 + assert holes[0].value == input_string + + def test_holes_seps(self): + input_string = "Test hole - with many separators + included" + match = StringPattern("many").matches(input_string) + + matches = Matches(match, input_string) + holes = matches.holes() + + assert len(holes) == 2 + + holes = matches.holes(seps="-+") + + assert len(holes) == 4 + assert [hole.value for hole in holes] == ["Test hole ", " with ", " separators ", " included"] diff --git a/libs/rebulk/test/test_pattern.py b/libs/rebulk/test/test_pattern.py new file mode 100644 index 00000000..fadca5f2 --- /dev/null +++ b/libs/rebulk/test/test_pattern.py @@ -0,0 +1,848 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, unbalanced-tuple-unpacking + +import re +import pytest + +from ..pattern import StringPattern, RePattern, FunctionalPattern, REGEX_AVAILABLE +from ..match import Match + +class TestStringPattern(object): + """ + Tests for StringPattern matching + """ + + input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \ + "which were the Hebrew letter qoph." + + def test_single(self): + pattern = StringPattern("Celtic") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (28, 34) + assert matches[0].value == "Celtic" + + def test_repr(self): + pattern = StringPattern("Celtic") + + assert repr(pattern) == '' + + def test_ignore_case(self): + pattern = StringPattern("celtic", ignore_case=False) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = StringPattern("celtic", ignore_case=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert matches[0].value == "Celtic" + + def test_private_names(self): + pattern = StringPattern("celtic", name="test", private_names=["test"], ignore_case=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert matches[0].private + + def test_ignore_names(self): + pattern = StringPattern("celtic", name="test", ignore_names=["test"], ignore_case=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + def test_no_match(self): + pattern = StringPattern("Python") + + matches = list(pattern.matches(self.input_string)) + assert not matches + + def test_multiple_patterns(self): + pattern = StringPattern("playing", "annoyed", "Hebrew") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (18, 25) + assert matches[0].value == "playing" + + assert isinstance(matches[1], Match) + assert matches[1].pattern == pattern + assert matches[1].span == (46, 53) + assert matches[1].value == "annoyed" + + assert isinstance(matches[2], Match) + assert matches[2].pattern == pattern + assert matches[2].span == (88, 94) + assert matches[2].value == "Hebrew" + + def test_start_end_kwargs(self): + pattern = StringPattern("Abyssinian", start=20, end=40) + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 0 + + def test_matches_kwargs(self): + pattern = StringPattern("Abyssinian", name="test", value="AB") + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 1 + assert matches[0].name == "test" + assert matches[0].value == "AB" + + +class TestRePattern(object): + """ + Tests for RePattern matching + """ + + input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \ + "which were the Hebrew letter qoph." + + def test_single_compiled(self): + pattern = RePattern(re.compile("Celt.?c")) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (28, 34) + assert matches[0].value == "Celtic" + + def test_single_string(self): + pattern = RePattern("Celt.?c") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (28, 34) + assert matches[0].value == "Celtic" + + def test_single_kwargs(self): + pattern = RePattern({"pattern": "celt.?c", "flags": re.IGNORECASE}) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (28, 34) + assert matches[0].value == "Celtic" + + def test_single_vargs(self): + pattern = RePattern(("celt.?c", re.IGNORECASE)) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (28, 34) + assert matches[0].value == "Celtic" + + def test_no_match(self): + pattern = RePattern("abc.?def") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + def test_shortcuts(self): + pattern = RePattern("Celtic-violin", abbreviations=[("-", r"[\W_]+")]) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + pattern = RePattern({"pattern": "celtic-violin", "flags": re.IGNORECASE}, abbreviations=[("-", r"[\W_]+")]) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def test_multiple_patterns(self): + pattern = RePattern("pla.?ing", "ann.?yed", "Heb.?ew") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (18, 25) + assert matches[0].value == "playing" + + assert isinstance(matches[1], Match) + assert matches[1].pattern == pattern + assert matches[1].span == (46, 53) + assert matches[1].value == "annoyed" + + assert isinstance(matches[2], Match) + assert matches[2].pattern == pattern + assert matches[2].span == (88, 94) + assert matches[2].value == "Hebrew" + + def test_unnamed_groups(self): + pattern = RePattern(r"(Celt.?c)\s+(\w+)") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + parent = matches[0] + + assert isinstance(parent, Match) + assert parent.pattern == pattern + assert parent.span == (28, 41) + assert parent.name is None + assert parent.value == "Celtic violin" + + assert len(parent.children) == 2 + + group1, group2 = parent.children + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name is None + assert group1.value == "Celtic" + assert group1.parent == parent + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name is None + assert group2.value == "violin" + assert group2.parent == parent + + def test_named_groups(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + parent = matches[0] + + assert isinstance(parent, Match) + assert parent.pattern == pattern + assert parent.span == (28, 41) + assert parent.name is None + assert parent.value == "Celtic violin" + + assert len(parent.children) == 2 + group1, group2 = parent.children + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name == "param1" + assert group1.value == "Celtic" + assert group1.parent == parent + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name == "param2" + assert group2.value == "violin" + assert group2.parent == parent + + def test_children(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 2 + group1, group2 = matches + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name == "param1" + assert group1.value == "Celtic" + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name == "param2" + assert group2.value == "violin" + + def test_children_parent_private(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", children=True, private_parent=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + parent, group1, group2 = matches + + assert isinstance(group1, Match) + assert parent.private + assert parent.pattern == pattern + assert parent.span == (28, 41) + assert parent.name is None + assert parent.value == "Celtic violin" + + assert isinstance(group1, Match) + assert not group1.private + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name == "param1" + assert group1.value == "Celtic" + + assert isinstance(group2, Match) + assert not group2.private + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name == "param2" + assert group2.value == "violin" + + def test_parent_children_private(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", private_children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + parent, group1, group2 = matches + + assert isinstance(group1, Match) + assert not parent.private + assert parent.pattern == pattern + assert parent.span == (28, 41) + assert parent.name is None + assert parent.value == "Celtic violin" + + assert isinstance(group1, Match) + assert group1.private + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name == "param1" + assert group1.value == "Celtic" + + assert isinstance(group2, Match) + assert group2.private + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name == "param2" + assert group2.value == "violin" + + def test_every(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", every=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + parent, group1, group2 = matches + + assert isinstance(group1, Match) + assert not parent.private + assert parent.pattern == pattern + assert parent.span == (28, 41) + assert parent.name is None + assert parent.value == "Celtic violin" + + assert isinstance(group1, Match) + assert not group1.private + assert group1.pattern == pattern + assert group1.span == (28, 34) + assert group1.name == "param1" + assert group1.value == "Celtic" + + assert isinstance(group2, Match) + assert not group2.private + assert group2.pattern == pattern + assert group2.span == (35, 41) + assert group2.name == "param2" + assert group2.value == "violin" + + def test_private_names(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", private_names=["param2"], children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 2 + assert matches[0].name == "param1" + assert not matches[0].private + assert matches[1].name == "param2" + assert matches[1].private + + def test_ignore_names(self): + pattern = RePattern(r"(?PCelt.?c)\s+(?P\w+)", ignore_names=["param2"], children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert matches[0].name == "param1" + + def test_matches_kwargs(self): + pattern = RePattern("He.rew", name="test", value="HE") + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 1 + assert matches[0].name == "test" + assert matches[0].value == "HE" + + pattern = RePattern("H(e.)(rew)", name="test", value="HE") + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 1 + assert matches[0].name == "test" + assert matches[0].value == "HE" + + children = matches[0].children + assert len(children) == 2 + assert children[0].name is "test" + assert children[0].value == "HE" + + assert children[1].name is "test" + assert children[1].value == "HE" + + pattern = RePattern("H(?Pe.)(?Prew)", name="test", value="HE") + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 1 + assert matches[0].name == "test" + assert matches[0].value == "HE" + + children = matches[0].children + assert len(children) == 2 + assert children[0].name == "first" + assert children[0].value == "HE" + + assert children[1].name == "second" + assert children[1].value == "HE" + + +class TestFunctionalPattern(object): + """ + Tests for FunctionalPattern matching + """ + + input_string = "An Abyssinian fly playing a Celtic violin was annoyed by trashy flags on " \ + "which were the Hebrew letter qoph." + + def test_single_vargs(self): + def func(input_string): + i = input_string.find("fly") + if i > -1: + return i, i + len("fly"), "fly", "functional" + + pattern = FunctionalPattern(func) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (14, 17) + assert matches[0].name == "functional" + assert matches[0].value == "fly" + + def test_single_kwargs(self): + def func(input_string): + i = input_string.find("fly") + if i > -1: + return {"start": i, "end": i + len("fly"), "name": "functional"} + + pattern = FunctionalPattern(func) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (14, 17) + assert matches[0].name == "functional" + assert matches[0].value == "fly" + + def test_multiple_objects(self): + def func(input_string): + i = input_string.find("fly") + matches = [] + if i > -1: + matches.append((i, i + len("fly"), {'name': "functional"})) + i = input_string.find("annoyed") + if i > -1: + matches.append((i, i + len("annoyed"))) + i = input_string.find("Hebrew") + if i > -1: + matches.append({"start": i, "end": i + len("Hebrew")}) + return matches + + pattern = FunctionalPattern(func) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (14, 17) + assert matches[0].name == "functional" + assert matches[0].value == "fly" + + assert isinstance(matches[1], Match) + assert matches[1].pattern == pattern + assert matches[1].span == (46, 53) + assert matches[1].value == "annoyed" + + assert isinstance(matches[2], Match) + assert matches[2].pattern == pattern + assert matches[2].span == (88, 94) + assert matches[2].value == "Hebrew" + + def test_multiple_generator(self): + def func(input_string): + i = input_string.find("fly") + if i > -1: + yield (i, i + len("fly"), {'name': "functional"}) + i = input_string.find("annoyed") + if i > -1: + yield (i, i + len("annoyed")) + i = input_string.find("Hebrew") + if i > -1: + yield (i, {"end": i + len("Hebrew")}) + + pattern = FunctionalPattern(func) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (14, 17) + assert matches[0].name == "functional" + assert matches[0].value == "fly" + + assert isinstance(matches[1], Match) + assert matches[1].pattern == pattern + assert matches[1].span == (46, 53) + assert matches[1].value == "annoyed" + + assert isinstance(matches[2], Match) + assert matches[2].pattern == pattern + assert matches[2].span == (88, 94) + assert matches[2].value == "Hebrew" + + def test_no_match(self): + pattern = FunctionalPattern(lambda x: None) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + def test_multiple_patterns(self): + def playing(input_string): + i = input_string.find("playing") + if i > -1: + return i, i + len("playing") + + def annoyed(input_string): + i = input_string.find("annoyed") + if i > -1: + return i, i + len("annoyed") + + def hebrew(input_string): + i = input_string.find("Hebrew") + if i > -1: + return i, i + len("Hebrew") + + pattern = FunctionalPattern(playing, annoyed, hebrew) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 3 + + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (18, 25) + assert matches[0].value == "playing" + + assert isinstance(matches[1], Match) + assert matches[1].pattern == pattern + assert matches[1].span == (46, 53) + assert matches[1].value == "annoyed" + + assert isinstance(matches[2], Match) + assert matches[2].pattern == pattern + assert matches[2].span == (88, 94) + assert matches[2].value == "Hebrew" + + def test_matches_kwargs(self): + def playing(input_string): + i = input_string.find("playing") + if i > -1: + return i, i + len("playing") + + pattern = FunctionalPattern(playing, name="test", value="PLAY") + matches = list(pattern.matches(self.input_string)) + + assert len(matches) == 1 + assert matches[0].name == "test" + assert matches[0].value == "PLAY" + + +class TestValue(object): + """ + Tests for value option + """ + + input_string = "This string contains 1849 a number" + + def test_str_value(self): + pattern = StringPattern("1849", name="dummy", value="test") + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (21, 25) + assert matches[0].value == "test" + + def test_dict_child_value(self): + pattern = RePattern(r"(?Pcont.?ins)\s+(?P\d+)", + formatter={'intParam': lambda x: int(x) * 2, + 'strParam': lambda x: "really " + x}, + format_all=True, + value={'intParam': 'INT_PARAM_VALUE'}) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + parent = matches[0] + assert len(parent.children) == 2 + + group1, group2 = parent.children + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (12, 20) + assert group1.value == "really contains" + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (21, 25) + assert group2.value == 'INT_PARAM_VALUE' + + def test_dict_default_value(self): + pattern = RePattern(r"(?Pcont.?ins)\s+(?P\d+)", + formatter={'intParam': lambda x: int(x) * 2, + 'strParam': lambda x: "really " + x}, + format_all=True, + value={'__children__': 'CHILD', 'strParam': 'STR_VALUE', '__parent__': 'PARENT'}) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + parent = matches[0] + assert parent.value == "PARENT" + assert len(parent.children) == 2 + + group1, group2 = parent.children + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (12, 20) + assert group1.value == "STR_VALUE" + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (21, 25) + assert group2.value == "CHILD" + + +class TestFormatter(object): + """ + Tests for formatter option + """ + + input_string = "This string contains 1849 a number" + + def test_single_string(self): + pattern = StringPattern("1849", name="dummy", formatter=lambda x: int(x) / 2) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (21, 25) + assert matches[0].value == 1849 / 2 + + def test_single_re_no_group(self): + pattern = RePattern(r"\d+", formatter=lambda x: int(x) * 2) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (21, 25) + assert matches[0].value == 1849 * 2 + + def test_single_re_named_groups(self): + pattern = RePattern(r"(?Pcont.?ins)\s+(?P\d+)", + formatter={'intParam': lambda x: int(x) * 2, + 'strParam': lambda x: "really " + x}, format_all=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + parent = matches[0] + assert len(parent.children) == 2 + + group1, group2 = parent.children + + assert isinstance(group1, Match) + assert group1.pattern == pattern + assert group1.span == (12, 20) + assert group1.value == "really contains" + + assert isinstance(group2, Match) + assert group2.pattern == pattern + assert group2.span == (21, 25) + assert group2.value == 1849 * 2 + + def test_repeated_captures_option(self): + pattern = RePattern(r"\[(\d+)\](?:-(\d+))*") + + matches = list(pattern.matches("[02]-03-04-05-06")) + assert len(matches) == 1 + + match = matches[0] + if REGEX_AVAILABLE: + assert len(match.children) == 5 + assert [child.value for child in match.children] == ["02", "03", "04", "05", "06"] + else: + assert len(match.children) == 2 + assert [child.value for child in match.children] == ["02", "06"] + + with pytest.raises(NotImplementedError): + RePattern(r"\[(\d+)\](?:-(\d+))*", repeated_captures=True) + + pattern = RePattern(r"\[(\d+)\](?:-(\d+))*", repeated_captures=False) + + matches = list(pattern.matches("[02]-03-04-05-06")) + assert len(matches) == 1 + + match = matches[0] + assert len(match.children) == 2 + assert [child.value for child in match.children] == ["02", "06"] + + def test_single_functional(self): + def digit(input_string): + i = input_string.find("1849") + if i > -1: + return i, i + len("1849") + + pattern = FunctionalPattern(digit, formatter=lambda x: int(x) * 3) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + assert isinstance(matches[0], Match) + assert matches[0].pattern == pattern + assert matches[0].span == (21, 25) + assert matches[0].value == 1849 * 3 + + +class TestValidator(object): + """ + Tests for validator option + """ + + input_string = "This string contains 1849 a number" + + @staticmethod + def true_validator(match): + return int(match.value) < 1850 + + @staticmethod + def false_validator(match): + return int(match.value) >= 1850 + + def test_single_string(self): + pattern = StringPattern("1849", name="dummy", validator=self.false_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = StringPattern("1849", validator=self.true_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def test_single_re_no_group(self): + pattern = RePattern(r"\d+", validator=self.false_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = RePattern(r"\d+", validator=self.true_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def test_single_re_named_groups(self): + pattern = RePattern(r"(?Pcont.?ins)\s+(?P\d+)", + validator={'intParam': self.false_validator}, validate_all=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = RePattern(r"(?Pcont.?ins)\s+(?P\d+)", + validator={'intParam': self.true_validator}, validate_all=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def test_validate_all(self): + pattern = RePattern(r"contains (?P\d+)", formatter=int, validator=lambda match: match.value < 100, + children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = RePattern(r"contains (?P\d+)", formatter=int, validator=lambda match: match.value > 100, + children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def invalid_func(match): + if match.name == 'intParam': + return True + else: + return match.value.startswith('abc') + + pattern = RePattern(r"contains (?P\d+)", formatter=int, validator=invalid_func, validate_all=True, + children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + def func(match): + if match.name == 'intParam': + return True + else: + return match.value.startswith('contains') + + pattern = RePattern(r"contains (?P\d+)", formatter=int, validator=func, validate_all=True, + children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + + def test_format_all(self): + pattern = RePattern(r"contains (?P\d+)", formatter=int, + children=True) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 + for match in matches: + assert match.value is not None + + with pytest.raises(ValueError): + pattern = RePattern(r"contains (?P\d+)", formatter=int, format_all=True) + matches = list(pattern.matches(self.input_string)) + for match in matches: + assert match.value is not None + + def test_single_functional(self): + def digit(input_string): + i = input_string.find("1849") + if i > -1: + return i, i + len("1849") + + pattern = FunctionalPattern(digit, validator=self.false_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 0 + + pattern = FunctionalPattern(digit, validator=self.true_validator) + + matches = list(pattern.matches(self.input_string)) + assert len(matches) == 1 diff --git a/libs/rebulk/test/test_processors.py b/libs/rebulk/test/test_processors.py new file mode 100644 index 00000000..7afd4535 --- /dev/null +++ b/libs/rebulk/test/test_processors.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, no-member + +from ..pattern import StringPattern, RePattern +from ..processors import ConflictSolver +from ..rules import execute_rule +from ..match import Matches + + +def test_conflict_1(): + input_string = "abcdefghijklmnopqrstuvwxyz" + + pattern = StringPattern("ijklmn", "kl", "abcdef", "ab", "ef", "yz") + matches = Matches(pattern.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + + values = [x.value for x in matches] + + assert values == ["ijklmn", "abcdef", "yz"] + + +def test_conflict_2(): + input_string = "abcdefghijklmnopqrstuvwxyz" + + pattern = StringPattern("ijklmn", "jklmnopqrst") + matches = Matches(pattern.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + + values = [x.value for x in matches] + + assert values == ["jklmnopqrst"] + + +def test_conflict_3(): + input_string = "abcdefghijklmnopqrstuvwxyz" + + pattern = StringPattern("ijklmnopqrst", "jklmnopqrst") + matches = Matches(pattern.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + + values = [x.value for x in matches] + + assert values == ["ijklmnopqrst"] + + +def test_conflict_4(): + input_string = "123456789" + + pattern = StringPattern("123", "456789") + matches = Matches(pattern.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + + values = [x.value for x in matches] + assert values == ["123", "456789"] + + +def test_conflict_5(): + input_string = "123456789" + + pattern = StringPattern("123456", "789") + matches = Matches(pattern.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + + values = [x.value for x in matches] + assert values == ["123456", "789"] + + +def test_prefer_longer_parent(): + input_string = "xxx.1x02.xxx" + + re1 = RePattern("([0-9]+)x([0-9]+)", name='prefer', children=True, formatter=int) + re2 = RePattern("x([0-9]+)", name='skip', children=True) + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 2 + assert matches[0].value == 1 + assert matches[1].value == 2 + + +def test_conflict_solver_1(): + input_string = "123456789" + + re1 = StringPattern("2345678", conflict_solver=lambda match, conflicting: '__default__') + re2 = StringPattern("34567") + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "2345678" + + +def test_conflict_solver_2(): + input_string = "123456789" + + re1 = StringPattern("2345678", conflict_solver=lambda match, conflicting: '__default__') + re2 = StringPattern("34567", conflict_solver=lambda match, conflicting: conflicting) + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "34567" + + +def test_conflict_solver_3(): + input_string = "123456789" + + re1 = StringPattern("2345678", conflict_solver=lambda match, conflicting: match) + re2 = StringPattern("34567") + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "34567" + + +def test_conflict_solver_4(): + input_string = "123456789" + + re1 = StringPattern("2345678") + re2 = StringPattern("34567", conflict_solver=lambda match, conflicting: conflicting) + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "34567" + + +def test_conflict_solver_5(): + input_string = "123456789" + + re1 = StringPattern("2345678", conflict_solver=lambda match, conflicting: conflicting) + re2 = StringPattern("34567") + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "2345678" + + +def test_conflict_solver_6(): + input_string = "123456789" + + re1 = StringPattern("2345678") + re2 = StringPattern("34567", conflict_solver=lambda match, conflicting: conflicting) + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "34567" + + +def test_conflict_solver_7(): + input_string = "102" + + re1 = StringPattern("102") + re2 = StringPattern("02") + + matches = Matches(re2.matches(input_string)) + matches.extend(re1.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 1 + assert matches[0].value == "102" + + +def test_unresolved(): + input_string = "123456789" + + re1 = StringPattern("23456") + re2 = StringPattern("34567") + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 2 + + re1 = StringPattern("34567") + re2 = StringPattern("2345678", conflict_solver=lambda match, conflicting: None) + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 2 + + re1 = StringPattern("34567", conflict_solver=lambda match, conflicting: None) + re2 = StringPattern("2345678") + + matches = Matches(re1.matches(input_string)) + matches.extend(re2.matches(input_string)) + + execute_rule(ConflictSolver(), matches, None) + assert len(matches) == 2 diff --git a/libs/rebulk/test/test_rebulk.py b/libs/rebulk/test/test_rebulk.py new file mode 100644 index 00000000..bf0bc966 --- /dev/null +++ b/libs/rebulk/test/test_rebulk.py @@ -0,0 +1,419 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, no-member + +from ..rebulk import Rebulk +from ..rules import Rule +from . import rebulk_rules_module as rm + + +def test_rebulk_simple(): + rebulk = Rebulk() + + rebulk.string("quick") + rebulk.regex("f.x") + + def func(input_string): + i = input_string.find("over") + if i > -1: + return i, i + len("over") + + rebulk.functional(func) + + input_string = "The quick brown fox jumps over the lazy dog" + + matches = rebulk.matches(input_string) + assert len(matches) == 3 + + assert matches[0].value == "quick" + assert matches[1].value == "fox" + assert matches[2].value == "over" + + +def test_rebulk_composition(): + rebulk = Rebulk() + + rebulk.string("quick") + rebulk.rebulk(Rebulk().regex("f.x")) + + rebulk.rebulk(Rebulk(disabled=lambda context: True).functional(lambda string: None)) + + input_string = "The quick brown fox jumps over the lazy dog" + + matches = rebulk.matches(input_string) + assert len(matches) == 2 + + assert matches[0].value == "quick" + assert matches[1].value == "fox" + + +def test_rebulk_context(): + rebulk = Rebulk() + + context = {'nostring': True, 'word': 'lazy'} + + rebulk.string("quick", disabled=lambda context: context.get('nostring', False)) + rebulk.regex("f.x", disabled=lambda context: context.get('noregex', False)) + + def func(input_string, context): + word = context.get('word', 'over') + i = input_string.find(word) + if i > -1: + return i, i + len(word) + + rebulk.functional(func) + + input_string = "The quick brown fox jumps over the lazy dog" + + matches = rebulk.matches(input_string, context) + assert len(matches) == 2 + + assert matches[0].value == "fox" + assert matches[1].value == "lazy" + + +def test_rebulk_prefer_longer(): + input_string = "The quick brown fox jumps over the lazy dog" + + matches = Rebulk().string("quick").string("own").regex("br.{2}n").matches(input_string) + + assert len(matches) == 2 + + assert matches[0].value == "quick" + assert matches[1].value == "brown" + + +def test_rebulk_defaults(): + input_string = "The quick brown fox jumps over the lazy dog" + + def func(input_string): + i = input_string.find("fox") + if i > -1: + return i, i + len("fox") + + matches = Rebulk()\ + .string_defaults(name="string", tags=["a", "b"])\ + .regex_defaults(name="regex") \ + .functional_defaults(name="functional") \ + .string("quick", tags=["c"])\ + .functional(func)\ + .regex("br.{2}n") \ + .matches(input_string) + assert matches[0].name == "string" + assert matches[0].tags == ["a", "b", "c"] + assert matches[1].name == "functional" + assert matches[2].name == "regex" + + matches = Rebulk() \ + .defaults(name="default", tags=["0"])\ + .string_defaults(name="string", tags=["a", "b"]) \ + .functional_defaults(name="functional", tags=["1"]) \ + .string("quick", tags=["c"]) \ + .functional(func) \ + .regex("br.{2}n") \ + .matches(input_string) + assert matches[0].name == "string" + assert matches[0].tags == ["0", "a", "b", "c"] + assert matches[1].name == "functional" + assert matches[1].tags == ["0", "1"] + assert matches[2].name == "default" + assert matches[2].tags == ["0"] + + +def test_rebulk_rebulk(): + input_string = "The quick brown fox jumps over the lazy dog" + + base = Rebulk().string("quick") + child = Rebulk().string("own").regex("br.{2}n") + + matches = base.rebulk(child).matches(input_string) + + assert len(matches) == 2 + + assert matches[0].value == "quick" + assert matches[1].value == "brown" + + +def test_rebulk_no_default(): + input_string = "The quick brown fox jumps over the lazy dog" + + matches = Rebulk(default_rules=False).string("quick").string("own").regex("br.{2}n").matches(input_string) + + assert len(matches) == 3 + + assert matches[0].value == "quick" + assert matches[1].value == "own" + assert matches[2].value == "brown" + + +def test_rebulk_empty_match(): + input_string = "The quick brown fox jumps over the lazy dog" + + matches = Rebulk(default_rules=False).string("quick").string("own").regex("br(.*?)own", children=True)\ + .matches(input_string) + + assert len(matches) == 2 + + assert matches[0].value == "quick" + assert matches[1].value == "own" + + +def test_rebulk_tags_names(): + rebulk = Rebulk() + + rebulk.string("quick", name="str", tags=["first", "other"]) + rebulk.regex("f.x", tags="other") + + def func(input_string): + i = input_string.find("over") + if i > -1: + return i, i + len("over"), {'tags': ['custom']} + + rebulk.functional(func, name="fn") + + def func2(input_string): + i = input_string.find("lazy") + if i > -1: + return {'start': i, 'end': i + len("lazy"), 'tags': ['custom']} + + rebulk.functional(func2, name="fn") + + input_string = "The quick brown fox jumps over the lazy dog" + + matches = rebulk.matches(input_string) + assert len(matches) == 4 + + assert len(matches.named("str")) == 1 + assert len(matches.named("fn")) == 2 + assert len(matches.named("false")) == 0 + assert len(matches.tagged("false")) == 0 + assert len(matches.tagged("first")) == 1 + assert len(matches.tagged("other")) == 2 + assert len(matches.tagged("custom")) == 2 + + +def test_rebulk_rules_1(): + rebulk = Rebulk() + + rebulk.regex(r'\d{4}', name="year") + rebulk.rules(rm.RemoveAllButLastYear) + + matches = rebulk.matches("1984 keep only last 1968 entry 1982 case") + assert len(matches) == 1 + assert matches[0].value == "1982" + + +def test_rebulk_rules_2(): + rebulk = Rebulk() + + rebulk.regex(r'\d{4}', name="year") + rebulk.string(r'year', name="yearPrefix", private=True) + rebulk.string(r'keep', name="yearSuffix", private=True) + rebulk.rules(rm.PrefixedSuffixedYear) + + matches = rebulk.matches("Keep suffix 1984 keep prefixed year 1968 and remove the rest 1982") + assert len(matches) == 2 + assert matches[0].value == "1984" + assert matches[1].value == "1968" + + +def test_rebulk_rules_3(): + rebulk = Rebulk() + + rebulk.regex(r'\d{4}', name="year") + rebulk.string(r'year', name="yearPrefix", private=True) + rebulk.string(r'keep', name="yearSuffix", private=True) + rebulk.rules(rm.PrefixedSuffixedYearNoLambda) + + matches = rebulk.matches("Keep suffix 1984 keep prefixed year 1968 and remove the rest 1982") + assert len(matches) == 2 + assert matches[0].value == "1984" + assert matches[1].value == "1968" + + +def test_rebulk_rules_4(): + class FirstOnlyRule(Rule): + def when(self, matches, context): + grabbed = matches.named("grabbed", 0) + if grabbed and matches.previous(grabbed): + return grabbed + + def then(self, matches, when_response, context): + matches.remove(when_response) + + rebulk = Rebulk() + + rebulk.regex("This match (.*?)grabbed", name="grabbed") + rebulk.regex("if it's (.*?)first match", private=True) + + rebulk.rules(FirstOnlyRule) + + matches = rebulk.matches("This match is grabbed only if it's the first match") + assert len(matches) == 1 + assert matches[0].value == "This match is grabbed" + + matches = rebulk.matches("if it's NOT the first match, This match is NOT grabbed") + assert len(matches) == 0 + + +class TestMarkers(object): + def test_one_marker(self): + class MarkerRule(Rule): + def when(self, matches, context): + word_match = matches.named("word", 0) + marker = matches.markers.at_match(word_match, lambda marker: marker.name == "mark1", 0) + if not marker: + return word_match + + def then(self, matches, when_response, context): + matches.remove(when_response) + + rebulk = Rebulk().regex(r'\(.*?\)', marker=True, name="mark1") \ + .regex(r'\[.*?\]', marker=True, name="mark2") \ + .string("word", name="word") \ + .rules(MarkerRule) + + matches = rebulk.matches("grab (word) only if it's in parenthesis") + + assert len(matches) == 1 + assert matches[0].value == "word" + + matches = rebulk.matches("don't grab [word] if it's in braket") + assert len(matches) == 0 + + matches = rebulk.matches("don't grab word at all") + assert len(matches) == 0 + + def test_multiple_marker(self): + class MarkerRule(Rule): + def when(self, matches, context): + word_match = matches.named("word", 0) + marker = matches.markers.at_match(word_match, + lambda marker: marker.name == "mark1" or marker.name == "mark2") + if len(marker) < 2: + return word_match + + def then(self, matches, when_response, context): + matches.remove(when_response) + + rebulk = Rebulk().regex(r'\(.*?\)', marker=True, name="mark1") \ + .regex(r'\[.*?\]', marker=True, name="mark2") \ + .regex("w.*?d", name="word") \ + .rules(MarkerRule) + + matches = rebulk.matches("[grab (word) only] if it's in parenthesis and brakets") + + assert len(matches) == 1 + assert matches[0].value == "word" + + matches = rebulk.matches("[don't grab](word)[if brakets are outside]") + assert len(matches) == 0 + + matches = rebulk.matches("(grab w[or)d even] if it's partially in parenthesis and brakets") + assert len(matches) == 1 + assert matches[0].value == "w[or)d" + + def test_at_index_marker(self): + class MarkerRule(Rule): + def when(self, matches, context): + word_match = matches.named("word", 0) + marker = matches.markers.at_index(word_match.start, + lambda marker: marker.name == "mark1", 0) + if not marker: + return word_match + + def then(self, matches, when_response, context): + matches.remove(when_response) + + rebulk = Rebulk().regex(r'\(.*?\)', marker=True, name="mark1") \ + .regex("w.*?d", name="word") \ + .rules(MarkerRule) + + matches = rebulk.matches("gr(ab wo)rd only if starting of match is inside parenthesis") + + assert len(matches) == 1 + assert matches[0].value == "wo)rd" + + matches = rebulk.matches("don't grab wo(rd if starting of match is not inside parenthesis") + + assert len(matches) == 0 + + def test_remove_marker(self): + class MarkerRule(Rule): + def when(self, matches, context): + marker = matches.markers.named("mark1", 0) + if marker: + return marker + + def then(self, matches, when_response, context): + matches.markers.remove(when_response) + + rebulk = Rebulk().regex(r'\(.*?\)', marker=True, name="mark1") \ + .regex("w.*?d", name="word") \ + .rules(MarkerRule) + + matches = rebulk.matches("grab word event (if it's not) inside parenthesis") + + assert len(matches) == 1 + assert matches[0].value == "word" + + assert not matches.markers + + +class TestUnicode(object): + def test_rebulk_simple(self): + input_string = u"æ•æ·çš„æ£•色ç‹ç‹¸è·³éŽæ‡¶ç‹—" + + rebulk = Rebulk() + + rebulk.string(u"æ•") + rebulk.regex(u"æ·") + + def func(input_string): + i = input_string.find(u"çš„") + if i > -1: + return i, i + len(u"çš„") + + rebulk.functional(func) + + matches = rebulk.matches(input_string) + assert len(matches) == 3 + + assert matches[0].value == u"æ•" + assert matches[1].value == u"æ·" + assert matches[2].value == u"çš„" + + +class TestImmutable(object): + def test_starting(self): + input_string = "The quick brown fox jumps over the lazy dog" + matches = Rebulk().string("quick").string("over").string("fox").matches(input_string) + + for i in range(0, len(input_string)): + starting = matches.starting(i) + for match in list(starting): + starting.remove(match) + + assert len(matches) == 3 + + def test_ending(self): + input_string = "The quick brown fox jumps over the lazy dog" + matches = Rebulk().string("quick").string("over").string("fox").matches(input_string) + + for i in range(0, len(input_string)): + starting = matches.ending(i) + for match in list(starting): + starting.remove(match) + + assert len(matches) == 3 + + def test_named(self): + input_string = "The quick brown fox jumps over the lazy dog" + matches = Rebulk().defaults(name='test').string("quick").string("over").string("fox").matches(input_string) + + named = matches.named('test') + for match in list(named): + named.remove(match) + + assert len(named) == 0 + assert len(matches) == 3 diff --git a/libs/rebulk/test/test_rules.py b/libs/rebulk/test/test_rules.py new file mode 100644 index 00000000..47b6f5fc --- /dev/null +++ b/libs/rebulk/test/test_rules.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, no-member +import pytest +from rebulk.test.default_rules_module import RuleRemove0, RuleAppend0, RuleRename0, RuleAppend1, RuleRemove1, \ + RuleRename1, RuleAppend2, RuleRename2, RuleAppend3, RuleRename3, RuleAppendTags0, RuleRemoveTags0, \ + RuleAppendTags1, RuleRemoveTags1 + +from ..rules import Rules +from ..match import Matches, Match + +from .rules_module import Rule1, Rule2, Rule3, Rule0, Rule1Disabled +from . import rules_module as rm + + +def test_rule_priority(): + matches = Matches([Match(1, 2)]) + + rules = Rules(Rule1, Rule2()) + + rules.execute_all_rules(matches, {}) + assert len(matches) == 0 + matches = Matches([Match(1, 2)]) + + rules = Rules(Rule1(), Rule0) + + rules.execute_all_rules(matches, {}) + assert len(matches) == 1 + assert matches[0] == Match(3, 4) + + +def test_rules_duplicates(): + matches = Matches([Match(1, 2)]) + + rules = Rules(Rule1, Rule1) + + with pytest.raises(ValueError): + rules.execute_all_rules(matches, {}) + + +def test_rule_disabled(): + matches = Matches([Match(1, 2)]) + + rules = Rules(Rule1Disabled(), Rule2()) + + rules.execute_all_rules(matches, {}) + assert len(matches) == 2 + assert matches[0] == Match(1, 2) + assert matches[1] == Match(3, 4) + + +def test_rule_when(): + matches = Matches([Match(1, 2)]) + + rules = Rules(Rule3()) + + rules.execute_all_rules(matches, {'when': False}) + assert len(matches) == 1 + assert matches[0] == Match(1, 2) + + matches = Matches([Match(1, 2)]) + + rules.execute_all_rules(matches, {'when': True}) + assert len(matches) == 2 + assert matches[0] == Match(1, 2) + assert matches[1] == Match(3, 4) + + +class TestDefaultRules(object): + def test_remove(self): + rules = Rules(RuleRemove0) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 0 + + rules = Rules(RuleRemove1) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 0 + + def test_append(self): + rules = Rules(RuleAppend0) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 2 + + rules = Rules(RuleAppend1) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 2 + + rules = Rules(RuleAppend2) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 2 + assert len(matches.named('renamed')) == 1 + + rules = Rules(RuleAppend3) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 2 + assert len(matches.named('renamed')) == 1 + + def test_rename(self): + rules = Rules(RuleRename0) + + matches = Matches([Match(1, 2, name='original')]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('original')) == 1 + assert len(matches.named('renamed')) == 0 + + rules = Rules(RuleRename1) + + matches = Matches([Match(5, 10, name='original')]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('original')) == 0 + assert len(matches.named('renamed')) == 1 + + rules = Rules(RuleRename2) + + matches = Matches([Match(5, 10, name='original')]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('original')) == 0 + assert len(matches.named('renamed')) == 1 + + rules = Rules(RuleRename3) + + matches = Matches([Match(5, 10, name='original')]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('original')) == 0 + assert len(matches.named('renamed')) == 1 + + def test_append_tags(self): + rules = Rules(RuleAppendTags0) + + matches = Matches([Match(1, 2, name='tags', tags=['other'])]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('tags')) == 1 + assert matches.named('tags', index=0).tags == ['other', 'new-tag'] + + rules = Rules(RuleAppendTags1) + + matches = Matches([Match(1, 2, name='tags', tags=['other'])]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('tags')) == 1 + assert matches.named('tags', index=0).tags == ['other', 'new-tag'] + + def test_remove_tags(self): + rules = Rules(RuleRemoveTags0) + + matches = Matches([Match(1, 2, name='tags', tags=['other', 'new-tag'])]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('tags')) == 1 + assert matches.named('tags', index=0).tags == ['other'] + + rules = Rules(RuleRemoveTags1) + + matches = Matches([Match(1, 2, name='tags', tags=['other', 'new-tag'])]) + rules.execute_all_rules(matches, {}) + + assert len(matches.named('tags')) == 1 + assert matches.named('tags', index=0).tags == ['other'] + + +def test_rule_module(): + rules = Rules(rm) + + matches = Matches([Match(1, 2)]) + rules.execute_all_rules(matches, {}) + + assert len(matches) == 1 + + +def test_rule_repr(): + assert str(Rule0()) == "" + assert str(Rule1()) == "" + assert str(Rule2()) == "" + assert str(Rule1Disabled()) == "" diff --git a/libs/rebulk/test/test_toposort.py b/libs/rebulk/test/test_toposort.py new file mode 100644 index 00000000..76ea6031 --- /dev/null +++ b/libs/rebulk/test/test_toposort.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014 True Blade Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Original: +# - https://bitbucket.org/ericvsmith/toposort (1.4) +# Modifications: +# - port to pytest +# pylint: skip-file + +import pytest +from ..toposort import toposort, toposort_flatten, CyclicDependency + + +class TestCase(object): + def test_simple(self): + results = list(toposort({2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3])})) + expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] + assert results == expected + + # make sure self dependencies are ignored + results = list(toposort({2: set([2, 11]), 9: set([11, 8]), 10: set([10, 11, 3]), 11: set([7, 5]), 8: set([7, 3])})) + expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] + assert results == expected + + assert list(toposort({1: set()})) == [set([1])] + assert list(toposort({1: set([1])})) == [set([1])] + + def test_no_dependencies(self): + assert list(toposort({1: set([2]), 3: set([4]), 5: set([6])})) == [set([2, 4, 6]), set([1, 3, 5])] + assert list(toposort({1: set(), 3: set(), 5: set()})) == [set([1, 3, 5])] + + def test_empty(self): + assert list(toposort({})) == [] + + def test_strings(self): + results = list(toposort({'2': set(['11']), '9': set(['11', '8']), '10': set(['11', '3']), '11': set(['7', '5']), '8': set(['7', '3'])})) + expected = [set(['3', '5', '7']), set(['8', '11']), set(['2', '9', '10'])] + assert results == expected + + def test_objects(self): + o2 = object() + o3 = object() + o5 = object() + o7 = object() + o8 = object() + o9 = object() + o10 = object() + o11 = object() + results = list(toposort({o2: set([o11]), o9: set([o11, o8]), o10: set([o11, o3]), o11: set([o7, o5]), o8: set([o7, o3, o8])})) + expected = [set([o3, o5, o7]), set([o8, o11]), set([o2, o9, o10])] + assert results == expected + + def test_cycle(self): + # a simple, 2 element cycle + with pytest.raises(CyclicDependency): + list(toposort({1: set([2]), 2: set([1])})) + + # an indirect cycle + with pytest.raises(CyclicDependency): + list(toposort({1: set([2]), 2: set([3]), 3: set([1])})) + + def test_input_not_modified(self): + data = {2: set([11]), + 9: set([11, 8]), + 10: set([11, 3]), + 11: set([7, 5]), + 8: set([7, 3, 8]), # includes something self-referential + } + orig = data.copy() + results = list(toposort(data)) + assert data == orig + + def test_input_not_modified_when_cycle_error(self): + data = {1: set([2]), + 2: set([1]), + 3: set([4]), + } + orig = data.copy() + with pytest.raises(CyclicDependency): + list(toposort(data)) + assert data == orig + + +class TestCaseAll(object): + def test_sort_flatten(self): + data = {2: set([11]), + 9: set([11, 8]), + 10: set([11, 3]), + 11: set([7, 5]), + 8: set([7, 3, 8]), # includes something self-referential + } + expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])] + assert list(toposort(data)) == expected + + # now check the sorted results + results = [] + for item in expected: + results.extend(sorted(item)) + assert toposort_flatten(data) == results + + # and the unsorted results. break the results up into groups to compare them + actual = toposort_flatten(data, False) + results = [set([i for i in actual[0:3]]), set([i for i in actual[3:5]]), set([i for i in actual[5:8]])] + assert results == expected diff --git a/libs/rebulk/test/test_validators.py b/libs/rebulk/test/test_validators.py new file mode 100644 index 00000000..38511cbf --- /dev/null +++ b/libs/rebulk/test/test_validators.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name + +from functools import partial + +from rebulk.pattern import StringPattern + +from ..validators import chars_before, chars_after, chars_surround, validators + +chars = ' _.' +left = partial(chars_before, chars) +right = partial(chars_after, chars) +surrounding = partial(chars_surround, chars) + + +def test_left_chars(): + matches = list(StringPattern("word", validator=left).matches("xxxwordxxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=left).matches("xxx_wordxxx")) + assert len(matches) == 1 + + matches = list(StringPattern("word", validator=left).matches("wordxxx")) + assert len(matches) == 1 + + +def test_right_chars(): + matches = list(StringPattern("word", validator=right).matches("xxxwordxxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=right).matches("xxxword.xxx")) + assert len(matches) == 1 + + matches = list(StringPattern("word", validator=right).matches("xxxword")) + assert len(matches) == 1 + + +def test_surrounding_chars(): + matches = list(StringPattern("word", validator=surrounding).matches("xxxword xxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=surrounding).matches("xxx.wordxxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=surrounding).matches("xxx word_xxx")) + assert len(matches) == 1 + + matches = list(StringPattern("word", validator=surrounding).matches("word")) + assert len(matches) == 1 + + +def test_chain(): + matches = list(StringPattern("word", validator=validators(left, right)).matches("xxxword xxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx.wordxxx")) + assert len(matches) == 0 + + matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx word_xxx")) + assert len(matches) == 1 + + matches = list(StringPattern("word", validator=validators(left, right)).matches("word")) + assert len(matches) == 1 diff --git a/libs/rebulk/toposort.py b/libs/rebulk/toposort.py new file mode 100644 index 00000000..2bcba9ae --- /dev/null +++ b/libs/rebulk/toposort.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014 True Blade Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Original: +# - https://bitbucket.org/ericvsmith/toposort (1.4) +# Modifications: +# - merged Pull request #2 for CyclicDependency error +# - import reduce as original name +# - support python 2.6 dict comprehension + +# pylint: skip-file +from functools import reduce + + +class CyclicDependency(ValueError): + def __init__(self, cyclic): + s = 'Cyclic dependencies exist among these items: {0}'.format(', '.join(repr(x) for x in cyclic.items())) + super(CyclicDependency, self).__init__(s) + self.cyclic = cyclic + + +def toposort(data): + """ + Dependencies are expressed as a dictionary whose keys are items + and whose values are a set of dependent items. Output is a list of + sets in topological order. The first set consists of items with no + dependences, each subsequent set consists of items that depend upon + items in the preceeding sets. + :param data: + :type data: + :return: + :rtype: + """ + + # Special case empty input. + if len(data) == 0: + return + + # Copy the input so as to leave it unmodified. + data = data.copy() + + # Ignore self dependencies. + for k, v in data.items(): + v.discard(k) + # Find all items that don't depend on anything. + extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys()) + # Add empty dependences where needed. + data.update(dict((item, set()) for item in extra_items_in_deps)) + while True: + ordered = set(item for item, dep in data.items() if len(dep) == 0) + if not ordered: + break + yield ordered + data = dict((item, (dep - ordered)) + for item, dep in data.items() + if item not in ordered) + if len(data) != 0: + raise CyclicDependency(data) + + +def toposort_flatten(data, sort=True): + """ + Returns a single list of dependencies. For any set returned by + toposort(), those items are sorted and appended to the result (just to + make the results deterministic). + :param data: + :type data: + :param sort: + :type sort: + :return: Single list of dependencies. + :rtype: list + """ + + result = [] + for d in toposort(data): + result.extend((sorted if sort else list)(d)) + return result diff --git a/libs/rebulk/utils.py b/libs/rebulk/utils.py new file mode 100644 index 00000000..a49fe4ff --- /dev/null +++ b/libs/rebulk/utils.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Various utilities functions +""" +from collections import MutableSet + +from types import GeneratorType + + +def find_all(string, sub, start=None, end=None, ignore_case=False): + """ + Return all indices in string s where substring sub is + found, such that sub is contained in the slice s[start:end]. + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) + [16] + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) + [] + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) + [0] + + >>> list(find_all( + ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', + ... 'an')) + [44, 51, 70] + + >>> list(find_all( + ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', + ... 'an', + ... 50, + ... 60)) + [51] + + :param string: the input string + :type string: str + :param sub: the substring + :type sub: str + :return: all indices in the input string + :rtype: __generator[str] + """ + if ignore_case: + sub = sub.lower() + string = string.lower() + while True: + start = string.find(sub, start, end) + if start == -1: + return + yield start + start += len(sub) + + +def get_first_defined(data, keys, default_value=None): + """ + Get the first defined key in data. + :param data: + :type data: + :param keys: + :type keys: + :param default_value: + :type default_value: + :return: + :rtype: + """ + for key in keys: + try: + return data[key] + except KeyError: + pass + return default_value + + +def is_iterable(obj): + """ + Are we being asked to look up a list of things, instead of a single thing? + We check for the `__iter__` attribute so that this can cover types that + don't have to be known by this module, such as NumPy arrays. + + Strings, however, should be considered as atomic values to look up, not + iterables. + + We don't need to check for the Python 2 `unicode` type, because it doesn't + have an `__iter__` attribute anyway. + """ + return hasattr(obj, '__iter__') and not isinstance(obj, str) or isinstance(obj, GeneratorType) + + +def extend_safe(target, source): + """ + Extends source list to target list only if elements doesn't exists in target list. + :param target: + :type target: list + :param source: + :type source: list + """ + for elt in source: + if elt not in target: + target.append(elt) + + +class _Ref(object): + """ + Reference for IdentitySet + """ + def __init__(self, value): + self.value = value + + def __eq__(self, other): + return self.value is other.value + + def __hash__(self): + return id(self.value) + + +class IdentitySet(MutableSet): # pragma: no cover + """ + Set based on identity + """ + def __init__(self, items=None): + if items is None: + items = [] + self.refs = set(map(_Ref, items)) + + def __contains__(self, elem): + return _Ref(elem) in self.refs + + def __iter__(self): + return (ref.value for ref in self.refs) + + def __len__(self): + return len(self.refs) + + def add(self, elem): + self.refs.add(_Ref(elem)) + + def discard(self, elem): + self.refs.discard(_Ref(elem)) + + def update(self, iterable): + """ + Update set with iterable + :param iterable: + :type iterable: + :return: + :rtype: + """ + for elem in iterable: + self.add(elem) + + def __repr__(self): # pragma: no cover + return "%s(%s)" % (type(self).__name__, list(self)) diff --git a/libs/rebulk/validators.py b/libs/rebulk/validators.py new file mode 100644 index 00000000..5fd3dcb6 --- /dev/null +++ b/libs/rebulk/validators.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Validator functions to use in patterns. + +All those function have last argument as match, so it's possible to use functools.partial to bind previous arguments. +""" + + +def chars_before(chars, match): + """ + Validate the match if left character is in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + if match.start <= 0: + return True + return match.input_string[match.start - 1] in chars + + +def chars_after(chars, match): + """ + Validate the match if right character is in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + if match.end >= len(match.input_string): + return True + return match.input_string[match.end] in chars + + +def chars_surround(chars, match): + """ + Validate the match if surrounding characters are in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + return chars_before(chars, match) and chars_after(chars, match) + + +def validators(*chained_validators): + """ + Creates a validator chain from several validator functions. + + :param chained_validators: + :type chained_validators: + :return: + :rtype: + """ + def validator_chain(match): # pylint:disable=missing-docstring + for chained_validator in chained_validators: + if not chained_validator(match): + return False + return True + return validator_chain diff --git a/libs/subliminal/__init__.py b/libs/subliminal/__init__.py index 836700c1..7ff8ac34 100644 --- a/libs/subliminal/__init__.py +++ b/libs/subliminal/__init__.py @@ -1,17 +1,21 @@ # -*- coding: utf-8 -*- __title__ = 'subliminal' -__version__ = '0.8.0-dev' +__version__ = '2.0.5' +__short_version__ = '.'.join(__version__.split('.')[:2]) __author__ = 'Antoine Bertin' __license__ = 'MIT' -__copyright__ = 'Copyright 2013 Antoine Bertin' +__copyright__ = 'Copyright 2016, Antoine Bertin' import logging -from .api import list_subtitles, download_subtitles, download_best_subtitles, save_subtitles -from .cache import MutexLock, region as cache_region -from .exceptions import Error, ProviderError -from .providers import Provider, ProviderPool, provider_manager -from .subtitle import Subtitle -from .video import VIDEO_EXTENSIONS, SUBTITLE_EXTENSIONS, Video, Episode, Movie, scan_videos, scan_video +from .core import (AsyncProviderPool, ProviderPool, check_video, download_best_subtitles, download_subtitles, + list_subtitles, refine, save_subtitles, scan_video, scan_videos) +from .cache import region +from .exceptions import Error, ProviderError +from .extensions import provider_manager, refiner_manager +from .providers import Provider +from .score import compute_score, get_scores +from .subtitle import SUBTITLE_EXTENSIONS, Subtitle +from .video import VIDEO_EXTENSIONS, Episode, Movie, Video logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/libs/subliminal/api.py b/libs/subliminal/api.py deleted file mode 100644 index 47d6a2cb..00000000 --- a/libs/subliminal/api.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import collections -import io -import logging -import operator -import os.path -import babelfish -from .providers import ProviderPool -from .subtitle import get_subtitle_path - - -logger = logging.getLogger(__name__) - - -def list_subtitles(videos, languages, providers=None, provider_configs=None): - """List subtitles for `videos` with the given `languages` using the specified `providers` - - :param videos: videos to list subtitles for - :type videos: set of :class:`~subliminal.video.Video` - :param languages: languages of subtitles to search for - :type languages: set of :class:`babelfish.Language` - :param providers: providers to use, if not all - :type providers: list of string or None - :param provider_configs: configuration for providers - :type provider_configs: dict of provider name => provider constructor kwargs or None - :return: found subtitles - :rtype: dict of :class:`~subliminal.video.Video` => [:class:`~subliminal.subtitle.Subtitle`] - - """ - subtitles = collections.defaultdict(list) - with ProviderPool(providers, provider_configs) as pp: - for video in videos: - logger.info('Listing subtitles for %r', video) - video_subtitles = pp.list_subtitles(video, languages) - logger.info('Found %d subtitles total', len(video_subtitles)) - subtitles[video].extend(video_subtitles) - return subtitles - - -def download_subtitles(subtitles, provider_configs=None): - """Download subtitles - - :param subtitles: subtitles to download - :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` - :param provider_configs: configuration for providers - :type provider_configs: dict of provider name => provider constructor kwargs or None - - """ - with ProviderPool(provider_configs=provider_configs) as pp: - for subtitle in subtitles: - logger.info('Downloading subtitle %r', subtitle) - pp.download_subtitle(subtitle) - - -def download_best_subtitles(videos, languages, providers=None, provider_configs=None, min_score=0, - hearing_impaired=False, single=False): - """Download the best subtitles for `videos` with the given `languages` using the specified `providers` - - :param videos: videos to download subtitles for - :type videos: set of :class:`~subliminal.video.Video` - :param languages: languages of subtitles to download - :type languages: set of :class:`babelfish.Language` - :param providers: providers to use for the search, if not all - :type providers: list of string or None - :param provider_configs: configuration for providers - :type provider_configs: dict of provider name => provider constructor kwargs or None - :param int min_score: minimum score for subtitles to download - :param bool hearing_impaired: download hearing impaired subtitles - :param bool single: do not download for videos with an undetermined subtitle language detected - - """ - downloaded_subtitles = collections.defaultdict(list) - with ProviderPool(providers, provider_configs) as pp: - for video in videos: - # filter - if single and babelfish.Language('und') in video.subtitle_languages: - logger.debug('Skipping video %r: undetermined language found') - continue - - # list - logger.info('Listing subtitles for %r', video) - video_subtitles = pp.list_subtitles(video, languages) - logger.info('Found %d subtitles total', len(video_subtitles)) - - # download - downloaded_languages = set() - for subtitle, score in sorted([(s, s.compute_score(video)) for s in video_subtitles], - key=operator.itemgetter(1), reverse=True): - if score < min_score: - logger.info('No subtitle with score >= %d', min_score) - break - if subtitle.hearing_impaired != hearing_impaired: - logger.debug('Skipping subtitle: hearing impaired != %r', hearing_impaired) - continue - if subtitle.language in downloaded_languages: - logger.debug('Skipping subtitle: %r already downloaded', subtitle.language) - continue - logger.info('Downloading subtitle %r with score %d', subtitle, score) - if pp.download_subtitle(subtitle): - downloaded_languages.add(subtitle.language) - downloaded_subtitles[video].append(subtitle) - if single or downloaded_languages == languages: - logger.debug('All languages downloaded') - break - return downloaded_subtitles - - -def save_subtitles(subtitles, single=False, directory=None, encoding=None): - """Save subtitles on disk next to the video or in a specific folder if `folder_path` is specified - - :param bool single: download with .srt extension if ``True``, add language identifier otherwise - :param directory: path to directory where to save the subtitles, if any - :type directory: string or None - :param encoding: encoding for the subtitles or ``None`` to use the original encoding - :type encoding: string or None - - """ - for video, video_subtitles in subtitles.items(): - saved_languages = set() - for video_subtitle in video_subtitles: - if video_subtitle.content is None: - logger.debug('Skipping subtitle %r: no content', video_subtitle) - continue - if video_subtitle.language in saved_languages: - logger.debug('Skipping subtitle %r: language already saved', video_subtitle) - continue - subtitle_path = get_subtitle_path(video.name, None if single else video_subtitle.language) - if directory is not None: - subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1]) - logger.info('Saving %r to %r', video_subtitle, subtitle_path) - if encoding is None: - with io.open(subtitle_path, 'wb') as f: - f.write(video_subtitle.content) - else: - with io.open(subtitle_path, 'w', encoding=encoding) as f: - f.write(video_subtitle.text) - saved_languages.add(video_subtitle.language) - if single: - break diff --git a/libs/subliminal/cache.py b/libs/subliminal/cache.py index 72fbe01b..244ba953 100644 --- a/libs/subliminal/cache.py +++ b/libs/subliminal/cache.py @@ -1,14 +1,7 @@ # -*- coding: utf-8 -*- import datetime -import inspect -from dogpile.cache import make_region # @UnresolvedImport -from dogpile.cache.backends.file import AbstractFileLock # @UnresolvedImport -from dogpile.cache.compat import string_type # @UnresolvedImport -from dogpile.core.readwrite_lock import ReadWriteMutex # @UnresolvedImport - -#: Subliminal's cache version -CACHE_VERSION = 1 +from dogpile.cache import make_region #: Expiration time for show caching SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=3).total_seconds() @@ -16,45 +9,8 @@ SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=3).total_seconds() #: Expiration time for episode caching EPISODE_EXPIRATION_TIME = datetime.timedelta(days=3).total_seconds() - -def subliminal_key_generator(namespace, fn, to_str=string_type): - """Add a :data:`CACHE_VERSION` to dogpile.cache's default function_key_generator""" - if namespace is None: - namespace = '%d:%s:%s' % (CACHE_VERSION, fn.__module__, fn.__name__) - else: - namespace = '%d:%s:%s|%s' % (CACHE_VERSION, fn.__module__, fn.__name__, namespace) - - args = inspect.getargspec(fn) - has_self = args[0] and args[0][0] in ('self', 'cls') - - def generate_key(*args, **kw): - if kw: - raise ValueError('Keyword arguments not supported') - if has_self: - args = args[1:] - return namespace + '|' + ' '.join(map(to_str, args)) - return generate_key +#: Expiration time for scraper searches +REFINER_EXPIRATION_TIME = datetime.timedelta(weeks=1).total_seconds() -class MutexLock(AbstractFileLock): - """:class:`MutexLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`""" - def __init__(self, filename): - self.mutex = ReadWriteMutex() - - def acquire_read_lock(self, wait): - ret = self.mutex.acquire_read_lock(wait) - return wait or ret - - def acquire_write_lock(self, wait): - ret = self.mutex.acquire_write_lock(wait) - return wait or ret - - def release_read_lock(self): - return self.mutex.release_read_lock() - - def release_write_lock(self): - return self.mutex.release_write_lock() - - -#: The dogpile.cache region -region = make_region(function_key_generator=subliminal_key_generator) +region = make_region() diff --git a/libs/subliminal/cli.py b/libs/subliminal/cli.py index cabcdfc8..cc24853c 100644 --- a/libs/subliminal/cli.py +++ b/libs/subliminal/cli.py @@ -1,197 +1,461 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import argparse -import datetime +""" +Subliminal uses `click `_ to provide a powerful :abbr:`CLI (command-line interface)`. + +""" +from __future__ import division +from collections import defaultdict +from datetime import timedelta +import glob +import json import logging import os import re -import sys -import babelfish -import xdg.BaseDirectory -from subliminal import (__version__, cache_region, MutexLock, provider_manager, Video, Episode, Movie, scan_videos, - download_best_subtitles, save_subtitles) -try: - import colorlog -except ImportError: - colorlog = None + +from appdirs import AppDirs +from babelfish import Error as BabelfishError, Language +import click +from dogpile.cache.backends.file import AbstractFileLock +from dogpile.util.readwrite_lock import ReadWriteMutex +from six.moves import configparser + +from subliminal import (AsyncProviderPool, Episode, Movie, Video, __version__, check_video, compute_score, get_scores, + provider_manager, refine, refiner_manager, region, save_subtitles, scan_video, scan_videos) +from subliminal.core import ARCHIVE_EXTENSIONS, search_external_subtitles + +logger = logging.getLogger(__name__) -DEFAULT_CACHE_FILE = os.path.join(xdg.BaseDirectory.save_cache_path('subliminal'), 'cli.dbm') +class MutexLock(AbstractFileLock): + """:class:`MutexLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`.""" + def __init__(self, filename): + self.mutex = ReadWriteMutex() + + def acquire_read_lock(self, wait): + ret = self.mutex.acquire_read_lock(wait) + return wait or ret + + def acquire_write_lock(self, wait): + ret = self.mutex.acquire_write_lock(wait) + return wait or ret + + def release_read_lock(self): + return self.mutex.release_read_lock() + + def release_write_lock(self): + return self.mutex.release_write_lock() -def subliminal(): - parser = argparse.ArgumentParser(prog='subliminal', description='Subtitles, faster than your thoughts', - epilog='Suggestions and bug reports are greatly appreciated: ' - 'https://github.com/Diaoul/subliminal/issues', add_help=False) +class Config(object): + """A :class:`~configparser.ConfigParser` wrapper to store configuration. - # required arguments - required_arguments_group = parser.add_argument_group('required arguments') - required_arguments_group.add_argument('paths', nargs='+', metavar='PATH', help='path to video file or folder') - required_arguments_group.add_argument('-l', '--languages', nargs='+', required=True, metavar='LANGUAGE', - help='wanted languages as IETF codes e.g. fr, pt-BR, sr-Cyrl ') + Interaction with the configuration is done with the properties. - # configuration - configuration_group = parser.add_argument_group('configuration') - configuration_group.add_argument('-s', '--single', action='store_true', - help='download without language code in subtitle\'s filename i.e. .srt only') - configuration_group.add_argument('-c', '--cache-file', default=DEFAULT_CACHE_FILE, - help='cache file (default: %(default)s)') + :param str path: path to the configuration file. - # filtering - filtering_group = parser.add_argument_group('filtering') - filtering_group.add_argument('-p', '--providers', nargs='+', metavar='PROVIDER', - help='providers to use (%s)' % ', '.join(provider_manager.available_providers)) - filtering_group.add_argument('-m', '--min-score', type=int, default=0, - help='minimum score for subtitles (0-%d for episodes, 0-%d for movies)' - % (Episode.scores['hash'], Movie.scores['hash'])) - filtering_group.add_argument('-a', '--age', help='download subtitles for videos newer than AGE e.g. 12h, 1w2d') - filtering_group.add_argument('-h', '--hearing-impaired', action='store_true', - help='download hearing impaired subtitles') - filtering_group.add_argument('-f', '--force', action='store_true', - help='force subtitle download for videos with existing subtitles') + """ + def __init__(self, path): + #: Path to the configuration file + self.path = path - # addic7ed - addic7ed_group = parser.add_argument_group('addic7ed') - addic7ed_group.add_argument('--addic7ed-username', metavar='USERNAME', help='username for addic7ed provider') - addic7ed_group.add_argument('--addic7ed-password', metavar='PASSWORD', help='password for addic7ed provider') + #: The underlying configuration object + self.config = configparser.SafeConfigParser() + self.config.add_section('general') + self.config.set('general', 'languages', json.dumps(['en'])) + self.config.set('general', 'providers', json.dumps(sorted([p.name for p in provider_manager]))) + self.config.set('general', 'refiners', json.dumps(sorted([r.name for r in refiner_manager]))) + self.config.set('general', 'single', str(0)) + self.config.set('general', 'embedded_subtitles', str(1)) + self.config.set('general', 'age', str(int(timedelta(weeks=2).total_seconds()))) + self.config.set('general', 'hearing_impaired', str(1)) + self.config.set('general', 'min_score', str(0)) - # output - output_group = parser.add_argument_group('output') - output_group.add_argument('-d', '--directory', - help='save subtitles in the given directory rather than next to the video') - output_group.add_argument('-e', '--encoding', default=None, - help='encoding to convert the subtitle to (default: no conversion)') - output_exclusive_group = output_group.add_mutually_exclusive_group() - output_exclusive_group.add_argument('-q', '--quiet', action='store_true', help='disable output') - output_exclusive_group.add_argument('-v', '--verbose', action='store_true', help='verbose output') - output_group.add_argument('--log-file', help='log into a file instead of stdout') - output_group.add_argument('--color', action='store_true', help='add color to console output (requires colorlog)') + def read(self): + """Read the configuration from :attr:`path`""" + self.config.read(self.path) - # troubleshooting - troubleshooting_group = parser.add_argument_group('troubleshooting') - troubleshooting_group.add_argument('--debug', action='store_true', help='debug output') - troubleshooting_group.add_argument('--version', action='version', version=__version__) - troubleshooting_group.add_argument('--help', action='help', help='show this help message and exit') + def write(self): + """Write the configuration to :attr:`path`""" + with open(self.path, 'w') as f: + self.config.write(f) - # parse args - args = parser.parse_args() + @property + def languages(self): + return {Language.fromietf(l) for l in json.loads(self.config.get('general', 'languages'))} - # parse paths - try: - args.paths = [os.path.abspath(os.path.expanduser(p.decode('utf-8') if isinstance(p, bytes) else p)) - for p in args.paths] - except UnicodeDecodeError: - parser.error('argument paths: encodings is not utf-8: %r' % args.paths) + @languages.setter + def languages(self, value): + self.config.set('general', 'languages', json.dumps(sorted([str(l) for l in value]))) - # parse languages - try: - args.languages = {babelfish.Language.fromietf(l) for l in args.languages} - except babelfish.Error: - parser.error('argument -l/--languages: codes are not IETF: %r' % args.languages) + @property + def providers(self): + return json.loads(self.config.get('general', 'providers')) - # parse age - if args.age is not None: - match = re.match(r'^(?:(?P\d+?)w)?(?:(?P\d+?)d)?(?:(?P\d+?)h)?$', args.age) + @providers.setter + def providers(self, value): + self.config.set('general', 'providers', json.dumps(sorted([p.lower() for p in value]))) + + @property + def refiners(self): + return json.loads(self.config.get('general', 'refiners')) + + @refiners.setter + def refiners(self, value): + self.config.set('general', 'refiners', json.dumps([r.lower() for r in value])) + + @property + def single(self): + return self.config.getboolean('general', 'single') + + @single.setter + def single(self, value): + self.config.set('general', 'single', str(int(value))) + + @property + def embedded_subtitles(self): + return self.config.getboolean('general', 'embedded_subtitles') + + @embedded_subtitles.setter + def embedded_subtitles(self, value): + self.config.set('general', 'embedded_subtitles', str(int(value))) + + @property + def age(self): + return timedelta(seconds=self.config.getint('general', 'age')) + + @age.setter + def age(self, value): + self.config.set('general', 'age', str(int(value.total_seconds()))) + + @property + def hearing_impaired(self): + return self.config.getboolean('general', 'hearing_impaired') + + @hearing_impaired.setter + def hearing_impaired(self, value): + self.config.set('general', 'hearing_impaired', str(int(value))) + + @property + def min_score(self): + return self.config.getfloat('general', 'min_score') + + @min_score.setter + def min_score(self, value): + self.config.set('general', 'min_score', str(value)) + + @property + def provider_configs(self): + rv = {} + for provider in provider_manager: + if self.config.has_section(provider.name): + rv[provider.name] = {k: v for k, v in self.config.items(provider.name)} + return rv + + @provider_configs.setter + def provider_configs(self, value): + # loop over provider configurations + for provider, config in value.items(): + # create the corresponding section if necessary + if not self.config.has_section(provider): + self.config.add_section(provider) + + # add config options + for k, v in config.items(): + self.config.set(provider, k, v) + + +class LanguageParamType(click.ParamType): + """:class:`~click.ParamType` for languages that returns a :class:`~babelfish.language.Language`""" + name = 'language' + + def convert(self, value, param, ctx): + try: + return Language.fromietf(value) + except BabelfishError: + self.fail('%s is not a valid language' % value) + +LANGUAGE = LanguageParamType() + + +class AgeParamType(click.ParamType): + """:class:`~click.ParamType` for age strings that returns a :class:`~datetime.timedelta` + + An age string is in the form `number + identifier` with possible identifiers: + + * ``w`` for weeks + * ``d`` for days + * ``h`` for hours + + The form can be specified multiple times but only with that idenfier ordering. For example: + + * ``1w2d4h`` for 1 week, 2 days and 4 hours + * ``2w`` for 2 weeks + * ``3w6h`` for 3 weeks and 6 hours + + """ + name = 'age' + + def convert(self, value, param, ctx): + match = re.match(r'^(?:(?P\d+?)w)?(?:(?P\d+?)d)?(?:(?P\d+?)h)?$', value) if not match: - parser.error('argument -a/--age: invalid age: %r' % args.age) - args.age = datetime.timedelta(**{k: int(v) for k, v in match.groupdict(0).items()}) + self.fail('%s is not a valid age' % value) - # parse cache-file - args.cache_file = os.path.abspath(os.path.expanduser(args.cache_file)) - if not os.path.exists(os.path.split(args.cache_file)[0]): - parser.error('argument -c/--cache-file: directory %r for cache file does not exist' - % os.path.split(args.cache_file)[0]) + return timedelta(**{k: int(v) for k, v in match.groupdict(0).items()}) - # parse provider configs - provider_configs = {} - if (args.addic7ed_username is not None and args.addic7ed_password is None - or args.addic7ed_username is None and args.addic7ed_password is not None): - parser.error('argument --addic7ed-username/--addic7ed-password: both arguments are required or none') - if args.addic7ed_username is not None and args.addic7ed_password is not None: - provider_configs['addic7ed'] = {'username': args.addic7ed_username, 'password': args.addic7ed_password} +AGE = AgeParamType() - # parse color - if args.color and colorlog is None: - parser.error('argument --color: colorlog required') +PROVIDER = click.Choice(sorted(provider_manager.names())) - # setup output - if args.log_file is None: - handler = logging.StreamHandler() - else: - handler = logging.FileHandler(args.log_file, encoding='utf-8') - if args.debug: - if args.color: - if args.log_file is None: - log_format = '%(log_color)s%(levelname)-8s%(reset)s [%(blue)s%(name)s-%(funcName)s:%(lineno)d%(reset)s] %(message)s' - else: - log_format = '%(purple)s%(asctime)s%(reset)s %(log_color)s%(levelname)-8s%(reset)s [%(blue)s%(name)s-%(funcName)s:%(lineno)d%(reset)s] %(message)s' - handler.setFormatter(colorlog.ColoredFormatter(log_format, - log_colors=dict(colorlog.default_log_colors.items() + [('DEBUG', 'cyan')]))) - else: - if args.log_file is None: - log_format = '%(levelname)-8s [%(name)s-%(funcName)s:%(lineno)d] %(message)s' - else: - log_format = '%(asctime)s %(levelname)-8s [%(name)s-%(funcName)s:%(lineno)d] %(message)s' - handler.setFormatter(logging.Formatter(log_format)) - logging.getLogger().addHandler(handler) - logging.getLogger().setLevel(logging.DEBUG) - elif args.verbose: - if args.color: - if args.log_file is None: - log_format = '%(log_color)s%(levelname)-8s%(reset)s [%(blue)s%(name)s%(reset)s] %(message)s' - else: - log_format = '%(purple)s%(asctime)s%(reset)s %(log_color)s%(levelname)-8s%(reset)s [%(blue)s%(name)s%(reset)s] %(message)s' - handler.setFormatter(colorlog.ColoredFormatter(log_format)) - else: - log_format = '%(levelname)-8s [%(name)s] %(message)s' - if args.log_file is not None: - log_format = '%(asctime)s ' + log_format - handler.setFormatter(logging.Formatter(log_format)) - logging.getLogger('subliminal').addHandler(handler) - logging.getLogger('subliminal').setLevel(logging.INFO) - elif not args.quiet: - if args.color: - if args.log_file is None: - log_format = '[%(log_color)s%(levelname)s%(reset)s] %(message)s' - else: - log_format = '%(purple)s%(asctime)s%(reset)s [%(log_color)s%(levelname)s%(reset)s] %(message)s' - handler.setFormatter(colorlog.ColoredFormatter(log_format)) - else: - if args.log_file is None: - log_format = '%(levelname)s: %(message)s' - else: - log_format = '%(asctime)s %(levelname)s: %(message)s' - handler.setFormatter(logging.Formatter(log_format)) - logging.getLogger('subliminal.api').addHandler(handler) - logging.getLogger('subliminal.api').setLevel(logging.INFO) +REFINER = click.Choice(sorted(refiner_manager.names())) + +dirs = AppDirs('subliminal') +cache_file = 'subliminal.dbm' +config_file = 'config.ini' + + +@click.group(context_settings={'max_content_width': 100}, epilog='Suggestions and bug reports are greatly appreciated: ' + 'https://github.com/Diaoul/subliminal/') +@click.option('--addic7ed', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', help='Addic7ed configuration.') +@click.option('--legendastv', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', help='LegendasTV configuration.') +@click.option('--opensubtitles', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', + help='OpenSubtitles configuration.') +@click.option('--subscenter', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', help='SubsCenter configuration.') +@click.option('--cache-dir', type=click.Path(writable=True, file_okay=False), default=dirs.user_cache_dir, + show_default=True, expose_value=True, help='Path to the cache directory.') +@click.option('--debug', is_flag=True, help='Print useful information for debugging subliminal and for reporting bugs.') +@click.version_option(__version__) +@click.pass_context +def subliminal(ctx, addic7ed, legendastv, opensubtitles, subscenter, cache_dir, debug): + """Subtitles, faster than your thoughts.""" + # create cache directory + try: + os.makedirs(cache_dir) + except OSError: + if not os.path.isdir(cache_dir): + raise # configure cache - cache_region.configure('dogpile.cache.dbm', expiration_time=datetime.timedelta(days=30), # @UndefinedVariable - arguments={'filename': args.cache_file, 'lock_factory': MutexLock}) + region.configure('dogpile.cache.dbm', expiration_time=timedelta(days=30), + arguments={'filename': os.path.join(cache_dir, cache_file), 'lock_factory': MutexLock}) + + # configure logging + if debug: + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) + logging.getLogger('subliminal').addHandler(handler) + logging.getLogger('subliminal').setLevel(logging.DEBUG) + + # provider configs + ctx.obj = {'provider_configs': {}} + if addic7ed: + ctx.obj['provider_configs']['addic7ed'] = {'username': addic7ed[0], 'password': addic7ed[1]} + if legendastv: + ctx.obj['provider_configs']['legendastv'] = {'username': legendastv[0], 'password': legendastv[1]} + if opensubtitles: + ctx.obj['provider_configs']['opensubtitles'] = {'username': opensubtitles[0], 'password': opensubtitles[1]} + if subscenter: + ctx.obj['provider_configs']['subscenter'] = {'username': subscenter[0], 'password': subscenter[1]} + + +@subliminal.command() +@click.option('--clear-subliminal', is_flag=True, help='Clear subliminal\'s cache. Use this ONLY if your cache is ' + 'corrupted or if you experience issues.') +@click.pass_context +def cache(ctx, clear_subliminal): + """Cache management.""" + if clear_subliminal: + for file in glob.glob(os.path.join(ctx.parent.params['cache_dir'], cache_file) + '*'): + os.remove(file) + click.echo('Subliminal\'s cache cleared.') + else: + click.echo('Nothing done.') + + +@subliminal.command() +@click.option('-l', '--language', type=LANGUAGE, required=True, multiple=True, help='Language as IETF code, ' + 'e.g. en, pt-BR (can be used multiple times).') +@click.option('-p', '--provider', type=PROVIDER, multiple=True, help='Provider to use (can be used multiple times).') +@click.option('-r', '--refiner', type=REFINER, multiple=True, help='Refiner to use (can be used multiple times).') +@click.option('-a', '--age', type=AGE, help='Filter videos newer than AGE, e.g. 12h, 1w2d.') +@click.option('-d', '--directory', type=click.STRING, metavar='DIR', help='Directory where to save subtitles, ' + 'default is next to the video file.') +@click.option('-e', '--encoding', type=click.STRING, metavar='ENC', help='Subtitle file encoding, default is to ' + 'preserve original encoding.') +@click.option('-s', '--single', is_flag=True, default=False, help='Save subtitle without language code in the file ' + 'name, i.e. use .srt extension. Do not use this unless your media player requires it.') +@click.option('-f', '--force', is_flag=True, default=False, help='Force download even if a subtitle already exist.') +@click.option('-hi', '--hearing-impaired', is_flag=True, default=False, help='Prefer hearing impaired subtitles.') +@click.option('-m', '--min-score', type=click.IntRange(0, 100), default=0, help='Minimum score for a subtitle ' + 'to be downloaded (0 to 100).') +@click.option('-w', '--max-workers', type=click.IntRange(1, 50), default=None, help='Maximum number of threads to use.') +@click.option('-z/-Z', '--archives/--no-archives', default=True, show_default=True, help='Scan archives for videos ' + '(supported extensions: %s).' % ', '.join(ARCHIVE_EXTENSIONS)) +@click.option('-v', '--verbose', count=True, help='Increase verbosity.') +@click.argument('path', type=click.Path(), required=True, nargs=-1) +@click.pass_obj +def download(obj, provider, refiner, language, age, directory, encoding, single, force, hearing_impaired, min_score, + max_workers, archives, verbose, path): + """Download best subtitles. + + PATH can be an directory containing videos, a video file path or a video file name. It can be used multiple times. + + If an existing subtitle is detected (external or embedded) in the correct language, the download is skipped for + the associated video. + + """ + # process parameters + language = set(language) # scan videos - videos = scan_videos([p for p in args.paths if os.path.exists(p)], subtitles=not args.force, - embedded_subtitles=not args.force, age=args.age) + videos = [] + ignored_videos = [] + errored_paths = [] + with click.progressbar(path, label='Collecting videos', item_show_func=lambda p: p or '') as bar: + for p in bar: + logger.debug('Collecting path %s', p) - # guess videos - videos.extend([Video.fromname(p) for p in args.paths if not os.path.exists(p)]) + # non-existing + if not os.path.exists(p): + try: + video = Video.fromname(p) + except: + logger.exception('Unexpected error while collecting non-existing path %s', p) + errored_paths.append(p) + continue + if not force: + video.subtitle_languages |= set(search_external_subtitles(video.name, directory=directory).values()) + refine(video, episode_refiners=refiner, movie_refiners=refiner, embedded_subtitles=not force) + videos.append(video) + continue + + # directories + if os.path.isdir(p): + try: + scanned_videos = scan_videos(p, age=age, archives=archives) + except: + logger.exception('Unexpected error while collecting directory path %s', p) + errored_paths.append(p) + continue + for video in scanned_videos: + if not force: + video.subtitle_languages |= set(search_external_subtitles(video.name, + directory=directory).values()) + if check_video(video, languages=language, age=age, undefined=single): + refine(video, episode_refiners=refiner, movie_refiners=refiner, embedded_subtitles=not force) + videos.append(video) + else: + ignored_videos.append(video) + continue + + # other inputs + try: + video = scan_video(p) + except: + logger.exception('Unexpected error while collecting path %s', p) + errored_paths.append(p) + continue + if not force: + video.subtitle_languages |= set(search_external_subtitles(video.name, directory=directory).values()) + if check_video(video, languages=language, age=age, undefined=single): + refine(video, episode_refiners=refiner, movie_refiners=refiner, embedded_subtitles=not force) + videos.append(video) + else: + ignored_videos.append(video) + + # output errored paths + if verbose > 0: + for p in errored_paths: + click.secho('%s errored' % p, fg='red') + + # output ignored videos + if verbose > 1: + for video in ignored_videos: + click.secho('%s ignored - subtitles: %s / age: %d day%s' % ( + os.path.split(video.name)[1], + ', '.join(str(s) for s in video.subtitle_languages) or 'none', + video.age.days, + 's' if video.age.days > 1 else '' + ), fg='yellow') + + # report collected videos + click.echo('%s video%s collected / %s video%s ignored / %s error%s' % ( + click.style(str(len(videos)), bold=True, fg='green' if videos else None), + 's' if len(videos) > 1 else '', + click.style(str(len(ignored_videos)), bold=True, fg='yellow' if ignored_videos else None), + 's' if len(ignored_videos) > 1 else '', + click.style(str(len(errored_paths)), bold=True, fg='red' if errored_paths else None), + 's' if len(errored_paths) > 1 else '', + )) + + # exit if no video collected + if not videos: + return # download best subtitles - subtitles = download_best_subtitles(videos, args.languages, providers=args.providers, - provider_configs=provider_configs, min_score=args.min_score, - hearing_impaired=args.hearing_impaired, single=args.single) + downloaded_subtitles = defaultdict(list) + with AsyncProviderPool(max_workers=max_workers, providers=provider, provider_configs=obj['provider_configs']) as p: + with click.progressbar(videos, label='Downloading subtitles', + item_show_func=lambda v: os.path.split(v.name)[1] if v is not None else '') as bar: + for v in bar: + scores = get_scores(v) + subtitles = p.download_best_subtitles(p.list_subtitles(v, language - v.subtitle_languages), + v, language, min_score=scores['hash'] * min_score / 100, + hearing_impaired=hearing_impaired, only_one=single) + downloaded_subtitles[v] = subtitles + + if p.discarded_providers: + click.secho('Some providers have been discarded due to unexpected errors: %s' % + ', '.join(p.discarded_providers), fg='yellow') # save subtitles - save_subtitles(subtitles, single=args.single, directory=args.directory, encoding=args.encoding) + total_subtitles = 0 + for v, subtitles in downloaded_subtitles.items(): + saved_subtitles = save_subtitles(v, subtitles, single=single, directory=directory, encoding=encoding) + total_subtitles += len(saved_subtitles) - # result output - if not subtitles: - if not args.quiet: - print('No subtitles downloaded', file=sys.stderr) - exit(1) - if not args.quiet: - subtitles_count = sum([len(s) for s in subtitles.values()]) - if subtitles_count == 1: - print('%d subtitle downloaded' % subtitles_count) - else: - print('%d subtitles downloaded' % subtitles_count) + if verbose > 0: + click.echo('%s subtitle%s downloaded for %s' % (click.style(str(len(saved_subtitles)), bold=True), + 's' if len(saved_subtitles) > 1 else '', + os.path.split(v.name)[1])) + + if verbose > 1: + for s in saved_subtitles: + matches = s.get_matches(v) + score = compute_score(s, v) + + # score color + score_color = None + scores = get_scores(v) + if isinstance(v, Movie): + if score < scores['title']: + score_color = 'red' + elif score < scores['title'] + scores['year'] + scores['release_group']: + score_color = 'yellow' + else: + score_color = 'green' + elif isinstance(v, Episode): + if score < scores['series'] + scores['season'] + scores['episode']: + score_color = 'red' + elif score < scores['series'] + scores['season'] + scores['episode'] + scores['release_group']: + score_color = 'yellow' + else: + score_color = 'green' + + # scale score from 0 to 100 taking out preferences + scaled_score = score + if s.hearing_impaired == hearing_impaired: + scaled_score -= scores['hearing_impaired'] + scaled_score *= 100 / scores['hash'] + + # echo some nice colored output + click.echo(' - [{score}] {language} subtitle from {provider_name} (match on {matches})'.format( + score=click.style('{:5.1f}'.format(scaled_score), fg=score_color, bold=score >= scores['hash']), + language=s.language.name if s.language.country is None else '%s (%s)' % (s.language.name, + s.language.country.name), + provider_name=s.provider_name, + matches=', '.join(sorted(matches, key=scores.get, reverse=True)) + )) + + if verbose == 0: + click.echo('Downloaded %s subtitle%s' % (click.style(str(total_subtitles), bold=True), + 's' if total_subtitles > 1 else '')) diff --git a/libs/subliminal/compat.py b/libs/subliminal/compat.py deleted file mode 100644 index 28bd3e84..00000000 --- a/libs/subliminal/compat.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -import sys -import socket - - -if sys.version_info[0] == 2: - from xmlrpclib import ServerProxy, Transport - from httplib import HTTPConnection -elif sys.version_info[0] == 3: - from xmlrpc.client import ServerProxy, Transport - from http.client import HTTPConnection - - -class TimeoutTransport(Transport, object): - def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, *args, **kwargs): - super(TimeoutTransport, self).__init__(*args, **kwargs) - self.timeout = timeout - - def make_connection(self, host): - h = HTTPConnection(host, timeout=self.timeout) - return h diff --git a/libs/subliminal/converters/addic7ed.py b/libs/subliminal/converters/addic7ed.py index 0e862931..f9cb8316 100644 --- a/libs/subliminal/converters/addic7ed.py +++ b/libs/subliminal/converters/addic7ed.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals from babelfish import LanguageReverseConverter, language_converters class Addic7edConverter(LanguageReverseConverter): def __init__(self): self.name_converter = language_converters['name'] - self.from_addic7ed = {'Català': ('cat',), 'Chinese (Simplified)': ('zho',), 'Chinese (Traditional)': ('zho',), + self.from_addic7ed = {u'Català': ('cat',), 'Chinese (Simplified)': ('zho',), 'Chinese (Traditional)': ('zho',), 'Euskera': ('eus',), 'Galego': ('glg',), 'Greek': ('ell',), 'Malay': ('msa',), 'Portuguese (Brazilian)': ('por', 'BR'), 'Serbian (Cyrillic)': ('srp', None, 'Cyrl'), 'Serbian (Latin)': ('srp',), 'Spanish (Latin America)': ('spa',), @@ -23,9 +22,11 @@ class Addic7edConverter(LanguageReverseConverter): return self.to_addic7ed[(alpha3, country)] if (alpha3,) in self.to_addic7ed: return self.to_addic7ed[(alpha3,)] + return self.name_converter.convert(alpha3, country, script) def reverse(self, addic7ed): if addic7ed in self.from_addic7ed: return self.from_addic7ed[addic7ed] + return self.name_converter.reverse(addic7ed) diff --git a/libs/subliminal/converters/legendastv.py b/libs/subliminal/converters/legendastv.py new file mode 100644 index 00000000..c2e13bd3 --- /dev/null +++ b/libs/subliminal/converters/legendastv.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +from babelfish import LanguageReverseConverter + +from ..exceptions import ConfigurationError + + +class LegendasTVConverter(LanguageReverseConverter): + def __init__(self): + self.from_legendastv = {1: ('por', 'BR'), 2: ('eng',), 3: ('spa',), 4: ('fra',), 5: ('deu',), 6: ('jpn',), + 7: ('dan',), 8: ('nor',), 9: ('swe',), 10: ('por',), 11: ('ara',), 12: ('ces',), + 13: ('zho',), 14: ('kor',), 15: ('bul',), 16: ('ita',), 17: ('pol',)} + self.to_legendastv = {v: k for k, v in self.from_legendastv.items()} + self.codes = set(self.from_legendastv.keys()) + + def convert(self, alpha3, country=None, script=None): + if (alpha3, country) in self.to_legendastv: + return self.to_legendastv[(alpha3, country)] + if (alpha3,) in self.to_legendastv: + return self.to_legendastv[(alpha3,)] + + raise ConfigurationError('Unsupported language code for legendastv: %s, %s, %s' % (alpha3, country, script)) + + def reverse(self, legendastv): + if legendastv in self.from_legendastv: + return self.from_legendastv[legendastv] + + raise ConfigurationError('Unsupported language number for legendastv: %s' % legendastv) diff --git a/libs/subliminal/converters/podnapisi.py b/libs/subliminal/converters/podnapisi.py deleted file mode 100644 index d73cb1c1..00000000 --- a/libs/subliminal/converters/podnapisi.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -from babelfish import LanguageReverseConverter, LanguageConvertError, LanguageReverseError - - -class PodnapisiConverter(LanguageReverseConverter): - def __init__(self): - self.from_podnapisi = {2: ('eng',), 28: ('spa',), 26: ('pol',), 36: ('srp',), 1: ('slv',), 38: ('hrv',), - 9: ('ita',), 8: ('fra',), 48: ('por', 'BR'), 23: ('nld',), 12: ('ara',), 13: ('ron',), - 33: ('bul',), 32: ('por',), 16: ('ell',), 15: ('hun',), 31: ('fin',), 30: ('tur',), - 7: ('ces',), 25: ('swe',), 27: ('rus',), 24: ('dan',), 22: ('heb',), 51: ('vie',), - 52: ('fas',), 5: ('deu',), 14: ('spa', 'AR'), 54: ('ind',), 47: ('srp', None, 'Cyrl'), - 3: ('nor',), 20: ('est',), 10: ('bos',), 17: ('zho',), 37: ('slk',), 35: ('mkd',), - 11: ('jpn',), 4: ('kor',), 29: ('sqi',), 6: ('isl',), 19: ('lit',), 46: ('ukr',), - 44: ('tha',), 53: ('cat',), 56: ('sin',), 21: ('lav',), 40: ('cmn',), 55: ('msa',), - 42: ('hin',), 50: ('bel',)} - self.to_podnapisi = {v: k for k, v in self.from_podnapisi.items()} - self.codes = set(self.from_podnapisi.keys()) - - def convert(self, alpha3, country=None, script=None): - if (alpha3,) in self.to_podnapisi: - return self.to_podnapisi[(alpha3,)] - if (alpha3, country) in self.to_podnapisi: - return self.to_podnapisi[(alpha3, country)] - if (alpha3, country, script) in self.to_podnapisi: - return self.to_podnapisi[(alpha3, country, script)] - raise LanguageConvertError(alpha3, country, script) - - def reverse(self, podnapisi): - if podnapisi not in self.from_podnapisi: - raise LanguageReverseError(podnapisi) - return self.from_podnapisi[podnapisi] diff --git a/libs/subliminal/converters/shooter.py b/libs/subliminal/converters/shooter.py new file mode 100644 index 00000000..ac6431a6 --- /dev/null +++ b/libs/subliminal/converters/shooter.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +from babelfish import LanguageReverseConverter + +from ..exceptions import ConfigurationError + + +class ShooterConverter(LanguageReverseConverter): + def __init__(self): + self.from_shooter = {'chn': ('zho',), 'eng': ('eng',)} + self.to_shooter = {v: k for k, v in self.from_shooter.items()} + self.codes = set(self.from_shooter.keys()) + + def convert(self, alpha3, country=None, script=None): + if (alpha3,) in self.to_shooter: + return self.to_shooter[(alpha3,)] + + raise ConfigurationError('Unsupported language for shooter: %s, %s, %s' % (alpha3, country, script)) + + def reverse(self, shooter): + if shooter in self.from_shooter: + return self.from_shooter[shooter] + + raise ConfigurationError('Unsupported language code for shooter: %s' % shooter) diff --git a/libs/subliminal/converters/thesubdb.py b/libs/subliminal/converters/thesubdb.py new file mode 100644 index 00000000..58051afb --- /dev/null +++ b/libs/subliminal/converters/thesubdb.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +from babelfish import LanguageReverseConverter + +from ..exceptions import ConfigurationError + + +class TheSubDBConverter(LanguageReverseConverter): + def __init__(self): + self.from_thesubdb = {'en': ('eng',), 'es': ('spa',), 'fr': ('fra',), 'it': ('ita',), 'nl': ('nld',), + 'pl': ('pol',), 'pt': ('por', 'BR'), 'ro': ('ron',), 'sv': ('swe',), 'tr': ('tur',)} + self.to_thesubdb = {v: k for k, v in self.from_thesubdb.items()} + self.codes = set(self.from_thesubdb.keys()) + + def convert(self, alpha3, country=None, script=None): + if (alpha3, country) in self.to_thesubdb: + return self.to_thesubdb[(alpha3, country)] + if (alpha3,) in self.to_thesubdb: + return self.to_thesubdb[(alpha3,)] + + raise ConfigurationError('Unsupported language for thesubdb: %s, %s, %s' % (alpha3, country, script)) + + def reverse(self, thesubdb): + if thesubdb in self.from_thesubdb: + return self.from_thesubdb[thesubdb] + + raise ConfigurationError('Unsupported language code for thesubdb: %s' % thesubdb) diff --git a/libs/subliminal/converters/tvsubtitles.py b/libs/subliminal/converters/tvsubtitles.py index e9b7e74f..45b9fed1 100644 --- a/libs/subliminal/converters/tvsubtitles.py +++ b/libs/subliminal/converters/tvsubtitles.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals from babelfish import LanguageReverseConverter, language_converters @@ -8,7 +7,7 @@ class TVsubtitlesConverter(LanguageReverseConverter): self.alpha2_converter = language_converters['alpha2'] self.from_tvsubtitles = {'br': ('por', 'BR'), 'ua': ('ukr',), 'gr': ('ell',), 'cn': ('zho',), 'jp': ('jpn',), 'cz': ('ces',)} - self.to_tvsubtitles = {v: k for k, v in self.from_tvsubtitles} + self.to_tvsubtitles = {v: k for k, v in self.from_tvsubtitles.items()} self.codes = self.alpha2_converter.codes | set(self.from_tvsubtitles.keys()) def convert(self, alpha3, country=None, script=None): @@ -16,9 +15,11 @@ class TVsubtitlesConverter(LanguageReverseConverter): return self.to_tvsubtitles[(alpha3, country)] if (alpha3,) in self.to_tvsubtitles: return self.to_tvsubtitles[(alpha3,)] + return self.alpha2_converter.convert(alpha3, country, script) def reverse(self, tvsubtitles): if tvsubtitles in self.from_tvsubtitles: return self.from_tvsubtitles[tvsubtitles] + return self.alpha2_converter.reverse(tvsubtitles) diff --git a/libs/subliminal/core.py b/libs/subliminal/core.py new file mode 100644 index 00000000..c516c49d --- /dev/null +++ b/libs/subliminal/core.py @@ -0,0 +1,705 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime +import io +import itertools +import logging +import operator +import os.path +import socket + +from babelfish import Language, LanguageReverseError +from guessit import guessit +from rarfile import NotRarFile, RarCannotExec, RarFile +import requests + +from .extensions import provider_manager, refiner_manager +from .score import compute_score as default_compute_score +from .subtitle import SUBTITLE_EXTENSIONS, get_subtitle_path +from .utils import hash_napiprojekt, hash_opensubtitles, hash_shooter, hash_thesubdb +from .video import VIDEO_EXTENSIONS, Episode, Movie, Video + +#: Supported archive extensions +ARCHIVE_EXTENSIONS = ('.rar',) + +logger = logging.getLogger(__name__) + + +class ProviderPool(object): + """A pool of providers with the same API as a single :class:`~subliminal.providers.Provider`. + + It has a few extra features: + + * Lazy loads providers when needed and supports the `with` statement to :meth:`terminate` + the providers on exit. + * Automatically discard providers on failure. + + :param list providers: name of providers to use, if not all. + :param dict provider_configs: provider configuration as keyword arguments per provider name to pass when + instanciating the :class:`~subliminal.providers.Provider`. + + """ + def __init__(self, providers=None, provider_configs=None): + #: Name of providers to use + self.providers = providers or provider_manager.names() + + #: Provider configuration + self.provider_configs = provider_configs or {} + + #: Initialized providers + self.initialized_providers = {} + + #: Discarded providers + self.discarded_providers = set() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.terminate() + + def __getitem__(self, name): + if name not in self.providers: + raise KeyError + if name not in self.initialized_providers: + logger.info('Initializing provider %s', name) + provider = provider_manager[name].plugin(**self.provider_configs.get(name, {})) + provider.initialize() + self.initialized_providers[name] = provider + + return self.initialized_providers[name] + + def __delitem__(self, name): + if name not in self.initialized_providers: + raise KeyError(name) + + try: + logger.info('Terminating provider %s', name) + self.initialized_providers[name].terminate() + except (requests.Timeout, socket.timeout): + logger.error('Provider %r timed out, improperly terminated', name) + except: + logger.exception('Provider %r terminated unexpectedly', name) + + del self.initialized_providers[name] + + def __iter__(self): + return iter(self.initialized_providers) + + def list_subtitles_provider(self, provider, video, languages): + """List subtitles with a single provider. + + The video and languages are checked against the provider. + + :param str provider: name of the provider. + :param video: video to list subtitles for. + :type video: :class:`~subliminal.video.Video` + :param languages: languages to search for. + :type languages: set of :class:`~babelfish.language.Language` + :return: found subtitles. + :rtype: list of :class:`~subliminal.subtitle.Subtitle` or None + + """ + # check video validity + if not provider_manager[provider].plugin.check(video): + logger.info('Skipping provider %r: not a valid video', provider) + return [] + + # check supported languages + provider_languages = provider_manager[provider].plugin.languages & languages + if not provider_languages: + logger.info('Skipping provider %r: no language to search for', provider) + return [] + + # list subtitles + logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages) + try: + return self[provider].list_subtitles(video, provider_languages) + except (requests.Timeout, socket.timeout): + logger.error('Provider %r timed out', provider) + except: + logger.exception('Unexpected error in provider %r', provider) + + def list_subtitles(self, video, languages): + """List subtitles. + + :param video: video to list subtitles for. + :type video: :class:`~subliminal.video.Video` + :param languages: languages to search for. + :type languages: set of :class:`~babelfish.language.Language` + :return: found subtitles. + :rtype: list of :class:`~subliminal.subtitle.Subtitle` + + """ + subtitles = [] + + for name in self.providers: + # check discarded providers + if name in self.discarded_providers: + logger.debug('Skipping discarded provider %r', name) + continue + + # list subtitles + provider_subtitles = self.list_subtitles_provider(name, video, languages) + if provider_subtitles is None: + logger.info('Discarding provider %s', name) + self.discarded_providers.add(name) + continue + + # add the subtitles + subtitles.extend(provider_subtitles) + + return subtitles + + def download_subtitle(self, subtitle): + """Download `subtitle`'s :attr:`~subliminal.subtitle.Subtitle.content`. + + :param subtitle: subtitle to download. + :type subtitle: :class:`~subliminal.subtitle.Subtitle` + :return: `True` if the subtitle has been successfully downloaded, `False` otherwise. + :rtype: bool + + """ + # check discarded providers + if subtitle.provider_name in self.discarded_providers: + logger.warning('Provider %r is discarded', subtitle.provider_name) + return False + + logger.info('Downloading subtitle %r', subtitle) + try: + self[subtitle.provider_name].download_subtitle(subtitle) + except (requests.Timeout, socket.timeout): + logger.error('Provider %r timed out, discarding it', subtitle.provider_name) + self.discarded_providers.add(subtitle.provider_name) + return False + except: + logger.exception('Unexpected error in provider %r, discarding it', subtitle.provider_name) + self.discarded_providers.add(subtitle.provider_name) + return False + + # check subtitle validity + if not subtitle.is_valid(): + logger.error('Invalid subtitle') + return False + + return True + + def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False, + compute_score=None): + """Download the best matching subtitles. + + :param subtitles: the subtitles to use. + :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` + :param video: video to download subtitles for. + :type video: :class:`~subliminal.video.Video` + :param languages: languages to download. + :type languages: set of :class:`~babelfish.language.Language` + :param int min_score: minimum score for a subtitle to be downloaded. + :param bool hearing_impaired: hearing impaired preference. + :param bool only_one: download only one subtitle, not one per language. + :param compute_score: function that takes `subtitle` and `video` as positional arguments, + `hearing_impaired` as keyword argument and returns the score. + :return: downloaded subtitles. + :rtype: list of :class:`~subliminal.subtitle.Subtitle` + + """ + compute_score = compute_score or default_compute_score + + # sort subtitles by score + scored_subtitles = sorted([(s, compute_score(s, video, hearing_impaired=hearing_impaired)) + for s in subtitles], key=operator.itemgetter(1), reverse=True) + + # download best subtitles, falling back on the next on error + downloaded_subtitles = [] + for subtitle, score in scored_subtitles: + # check score + if score < min_score: + logger.info('Score %d is below min_score (%d)', score, min_score) + break + + # check downloaded languages + if subtitle.language in set(s.language for s in downloaded_subtitles): + logger.debug('Skipping subtitle: %r already downloaded', subtitle.language) + continue + + # download + if self.download_subtitle(subtitle): + downloaded_subtitles.append(subtitle) + + # stop when all languages are downloaded + if set(s.language for s in downloaded_subtitles) == languages: + logger.debug('All languages downloaded') + break + + # stop if only one subtitle is requested + if only_one: + logger.debug('Only one subtitle downloaded') + break + + return downloaded_subtitles + + def terminate(self): + """Terminate all the :attr:`initialized_providers`.""" + logger.debug('Terminating initialized providers') + for name in list(self.initialized_providers): + del self[name] + + +class AsyncProviderPool(ProviderPool): + """Subclass of :class:`ProviderPool` with asynchronous support for :meth:`~ProviderPool.list_subtitles`. + + :param int max_workers: maximum number of threads to use. If `None`, :attr:`max_workers` will be set + to the number of :attr:`~ProviderPool.providers`. + + """ + def __init__(self, max_workers=None, *args, **kwargs): + super(AsyncProviderPool, self).__init__(*args, **kwargs) + + #: Maximum number of threads to use + self.max_workers = max_workers or len(self.providers) + + def list_subtitles_provider(self, provider, video, languages): + return provider, super(AsyncProviderPool, self).list_subtitles_provider(provider, video, languages) + + def list_subtitles(self, video, languages): + subtitles = [] + + with ThreadPoolExecutor(self.max_workers) as executor: + for provider, provider_subtitles in executor.map(self.list_subtitles_provider, self.providers, + itertools.repeat(video, len(self.providers)), + itertools.repeat(languages, len(self.providers))): + # discard provider that failed + if provider_subtitles is None: + logger.info('Discarding provider %s', provider) + self.discarded_providers.add(provider) + continue + + # add subtitles + subtitles.extend(provider_subtitles) + + return subtitles + + +def check_video(video, languages=None, age=None, undefined=False): + """Perform some checks on the `video`. + + All the checks are optional. Return `False` if any of this check fails: + + * `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`. + * `video` is older than `age`. + * `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`. + + :param video: video to check. + :type video: :class:`~subliminal.video.Video` + :param languages: desired languages. + :type languages: set of :class:`~babelfish.language.Language` + :param datetime.timedelta age: maximum age of the video. + :param bool undefined: fail on existing undefined language. + :return: `True` if the video passes the checks, `False` otherwise. + :rtype: bool + + """ + # language test + if languages and not (languages - video.subtitle_languages): + logger.debug('All languages %r exist', languages) + return False + + # age test + if age and video.age > age: + logger.debug('Video is older than %r', age) + return False + + # undefined test + if undefined and Language('und') in video.subtitle_languages: + logger.debug('Undefined language found') + return False + + return True + + +def search_external_subtitles(path, directory=None): + """Search for external subtitles from a video `path` and their associated language. + + Unless `directory` is provided, search will be made in the same directory as the video file. + + :param str path: path to the video. + :param str directory: directory to search for subtitles. + :return: found subtitles with their languages. + :rtype: dict + + """ + # split path + dirpath, filename = os.path.split(path) + dirpath = dirpath or '.' + fileroot, fileext = os.path.splitext(filename) + + # search for subtitles + subtitles = {} + for p in os.listdir(directory or dirpath): + # keep only valid subtitle filenames + if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS): + continue + + # extract the potential language code + language = Language('und') + language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:] + if language_code: + try: + language = Language.fromietf(language_code) + except (ValueError, LanguageReverseError): + logger.error('Cannot parse language code %r', language_code) + + subtitles[p] = language + + logger.debug('Found subtitles %r', subtitles) + + return subtitles + + +def scan_video(path): + """Scan a video from a `path`. + + :param str path: existing path to the video. + :return: the scanned video. + :rtype: :class:`~subliminal.video.Video` + + """ + # check for non-existing path + if not os.path.exists(path): + raise ValueError('Path does not exist') + + # check video extension + if not path.endswith(VIDEO_EXTENSIONS): + raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1]) + + dirpath, filename = os.path.split(path) + logger.info('Scanning video %r in %r', filename, dirpath) + + # guess + video = Video.fromguess(path, guessit(path)) + + # size and hashes + video.size = os.path.getsize(path) + if video.size > 10485760: + logger.debug('Size is %d', video.size) + video.hashes['opensubtitles'] = hash_opensubtitles(path) + video.hashes['shooter'] = hash_shooter(path) + video.hashes['thesubdb'] = hash_thesubdb(path) + video.hashes['napiprojekt'] = hash_napiprojekt(path) + logger.debug('Computed hashes %r', video.hashes) + else: + logger.warning('Size is lower than 10MB: hashes not computed') + + return video + + +def scan_archive(path): + """Scan an archive from a `path`. + + :param str path: existing path to the archive. + :return: the scanned video. + :rtype: :class:`~subliminal.video.Video` + + """ + # check for non-existing path + if not os.path.exists(path): + raise ValueError('Path does not exist') + + # check video extension + if not path.endswith(ARCHIVE_EXTENSIONS): + raise ValueError('%r is not a valid archive extension' % os.path.splitext(path)[1]) + + dirpath, filename = os.path.split(path) + logger.info('Scanning archive %r in %r', filename, dirpath) + + # rar extension + if filename.endswith('.rar'): + rar = RarFile(path) + + # filter on video extensions + rar_filenames = [f for f in rar.namelist() if f.endswith(VIDEO_EXTENSIONS)] + + # no video found + if not rar_filenames: + raise ValueError('No video in archive') + + # more than one video found + if len(rar_filenames) > 1: + raise ValueError('More than one video in archive') + + # guess + rar_filename = rar_filenames[0] + rar_filepath = os.path.join(dirpath, rar_filename) + video = Video.fromguess(rar_filepath, guessit(rar_filepath)) + + # size + video.size = rar.getinfo(rar_filename).file_size + else: + raise ValueError('Unsupported extension %r' % os.path.splitext(path)[1]) + + return video + + +def scan_videos(path, age=None, archives=True): + """Scan `path` for videos and their subtitles. + + See :func:`refine` to find additional information for the video. + + :param str path: existing directory path to scan. + :param datetime.timedelta age: maximum age of the video or archive. + :param bool archives: scan videos in archives. + :return: the scanned videos. + :rtype: list of :class:`~subliminal.video.Video` + + """ + # check for non-existing path + if not os.path.exists(path): + raise ValueError('Path does not exist') + + # check for non-directory path + if not os.path.isdir(path): + raise ValueError('Path is not a directory') + + # walk the path + videos = [] + for dirpath, dirnames, filenames in os.walk(path): + logger.debug('Walking directory %r', dirpath) + + # remove badly encoded and hidden dirnames + for dirname in list(dirnames): + if dirname.startswith('.'): + logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath) + dirnames.remove(dirname) + + # scan for videos + for filename in filenames: + # filter on videos and archives + if not (filename.endswith(VIDEO_EXTENSIONS) or archives and filename.endswith(ARCHIVE_EXTENSIONS)): + continue + + # skip hidden files + if filename.startswith('.'): + logger.debug('Skipping hidden filename %r in %r', filename, dirpath) + continue + + # reconstruct the file path + filepath = os.path.join(dirpath, filename) + + # skip links + if os.path.islink(filepath): + logger.debug('Skipping link %r in %r', filename, dirpath) + continue + + # skip old files + if age and datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(filepath)) > age: + logger.debug('Skipping old file %r in %r', filename, dirpath) + continue + + # scan + if filename.endswith(VIDEO_EXTENSIONS): # video + try: + video = scan_video(filepath) + except ValueError: # pragma: no cover + logger.exception('Error scanning video') + continue + elif archives and filename.endswith(ARCHIVE_EXTENSIONS): # archive + try: + video = scan_archive(filepath) + except (NotRarFile, RarCannotExec, ValueError): # pragma: no cover + logger.exception('Error scanning archive') + continue + else: # pragma: no cover + raise ValueError('Unsupported file %r' % filename) + + videos.append(video) + + return videos + + +def refine(video, episode_refiners=None, movie_refiners=None, **kwargs): + """Refine a video using :ref:`refiners`. + + .. note:: + + Exceptions raised in refiners are silently passed and logged. + + :param video: the video to refine. + :type video: :class:`~subliminal.video.Video` + :param tuple episode_refiners: refiners to use for episodes. + :param tuple movie_refiners: refiners to use for movies. + :param \*\*kwargs: additional parameters for the :func:`~subliminal.refiners.refine` functions. + + """ + refiners = () + if isinstance(video, Episode): + refiners = episode_refiners or ('metadata', 'tvdb', 'omdb') + elif isinstance(video, Movie): + refiners = movie_refiners or ('metadata', 'omdb') + for refiner in refiners: + logger.info('Refining video with %s', refiner) + try: + refiner_manager[refiner].plugin(video, **kwargs) + except: + logger.exception('Failed to refine video') + + +def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs): + """List subtitles. + + The `videos` must pass the `languages` check of :func:`check_video`. + + :param videos: videos to list subtitles for. + :type videos: set of :class:`~subliminal.video.Video` + :param languages: languages to search for. + :type languages: set of :class:`~babelfish.language.Language` + :param pool_class: class to use as provider pool. + :type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar + :param \*\*kwargs: additional parameters for the provided `pool_class` constructor. + :return: found subtitles per video. + :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` + + """ + listed_subtitles = defaultdict(list) + + # check videos + checked_videos = [] + for video in videos: + if not check_video(video, languages=languages): + logger.info('Skipping video %r', video) + continue + checked_videos.append(video) + + # return immediately if no video passed the checks + if not checked_videos: + return listed_subtitles + + # list subtitles + with pool_class(**kwargs) as pool: + for video in checked_videos: + logger.info('Listing subtitles for %r', video) + subtitles = pool.list_subtitles(video, languages - video.subtitle_languages) + listed_subtitles[video].extend(subtitles) + logger.info('Found %d subtitle(s)', len(subtitles)) + + return listed_subtitles + + +def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs): + """Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`. + + :param subtitles: subtitles to download. + :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` + :param pool_class: class to use as provider pool. + :type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar + :param \*\*kwargs: additional parameters for the provided `pool_class` constructor. + + """ + with pool_class(**kwargs) as pool: + for subtitle in subtitles: + logger.info('Downloading subtitle %r', subtitle) + pool.download_subtitle(subtitle) + + +def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None, + pool_class=ProviderPool, **kwargs): + """List and download the best matching subtitles. + + The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`. + + :param videos: videos to download subtitles for. + :type videos: set of :class:`~subliminal.video.Video` + :param languages: languages to download. + :type languages: set of :class:`~babelfish.language.Language` + :param int min_score: minimum score for a subtitle to be downloaded. + :param bool hearing_impaired: hearing impaired preference. + :param bool only_one: download only one subtitle, not one per language. + :param compute_score: function that takes `subtitle` and `video` as positional arguments, + `hearing_impaired` as keyword argument and returns the score. + :param pool_class: class to use as provider pool. + :type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar + :param \*\*kwargs: additional parameters for the provided `pool_class` constructor. + :return: downloaded subtitles per video. + :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` + + """ + downloaded_subtitles = defaultdict(list) + + # check videos + checked_videos = [] + for video in videos: + if not check_video(video, languages=languages, undefined=only_one): + logger.info('Skipping video %r', video) + continue + checked_videos.append(video) + + # return immediately if no video passed the checks + if not checked_videos: + return downloaded_subtitles + + # download best subtitles + with pool_class(**kwargs) as pool: + for video in checked_videos: + logger.info('Downloading best subtitles for %r', video) + subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages), + video, languages, min_score=min_score, + hearing_impaired=hearing_impaired, only_one=only_one, + compute_score=compute_score) + logger.info('Downloaded %d subtitle(s)', len(subtitles)) + downloaded_subtitles[video].extend(subtitles) + + return downloaded_subtitles + + +def save_subtitles(video, subtitles, single=False, directory=None, encoding=None): + """Save subtitles on filesystem. + + Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles + with the same language are silently ignored. + + The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for + the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle. + + :param video: video of the subtitles. + :type video: :class:`~subliminal.video.Video` + :param subtitles: subtitles to save. + :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` + :param bool single: save a single subtitle, default is to save one subtitle per language. + :param str directory: path to directory where to save the subtitles, default is next to the video. + :param str encoding: encoding in which to save the subtitles, default is to keep original encoding. + :return: the saved subtitles + :rtype: list of :class:`~subliminal.subtitle.Subtitle` + + """ + saved_subtitles = [] + for subtitle in subtitles: + # check content + if subtitle.content is None: + logger.error('Skipping subtitle %r: no content', subtitle) + continue + + # check language + if subtitle.language in set(s.language for s in saved_subtitles): + logger.debug('Skipping subtitle %r: language already saved', subtitle) + continue + + # create subtitle path + subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language) + if directory is not None: + subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1]) + + # save content as is or in the specified encoding + logger.info('Saving %r to %r', subtitle, subtitle_path) + if encoding is None: + with io.open(subtitle_path, 'wb') as f: + f.write(subtitle.content) + else: + with io.open(subtitle_path, 'w', encoding=encoding) as f: + f.write(subtitle.text) + saved_subtitles.append(subtitle) + + # check single + if single: + break + + return saved_subtitles diff --git a/libs/subliminal/exceptions.py b/libs/subliminal/exceptions.py index be954800..5f5c7a77 100644 --- a/libs/subliminal/exceptions.py +++ b/libs/subliminal/exceptions.py @@ -1,22 +1,29 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - - class Error(Exception): - """Base class for exceptions in subliminal""" + """Base class for exceptions in subliminal.""" + pass class ProviderError(Error): - """Exception raised by providers""" + """Exception raised by providers.""" + pass class ConfigurationError(ProviderError): - """Exception raised by providers when badly configured""" + """Exception raised by providers when badly configured.""" + pass class AuthenticationError(ProviderError): - """Exception raised by providers when authentication failed""" + """Exception raised by providers when authentication failed.""" + pass + + +class TooManyRequests(ProviderError): + """Exception raised by providers when too many requests are made.""" + pass class DownloadLimitExceeded(ProviderError): - """Exception raised by providers when download limit is exceeded""" + """Exception raised by providers when download limit is exceeded.""" + pass diff --git a/libs/subliminal/extensions.py b/libs/subliminal/extensions.py new file mode 100644 index 00000000..1f378b7f --- /dev/null +++ b/libs/subliminal/extensions.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +from pkg_resources import EntryPoint + +from stevedore import ExtensionManager + + +class RegistrableExtensionManager(ExtensionManager): + """:class:~stevedore.extensions.ExtensionManager` with support for registration. + + It allows loading of internal extensions without setup and registering/unregistering additional extensions. + + Loading is done in this order: + + * Entry point extensions + * Internal extensions + * Registered extensions + + :param str namespace: namespace argument for :class:~stevedore.extensions.ExtensionManager`. + :param list internal_extensions: internal extensions to use with entry point syntax. + :param \*\*kwargs: additional parameters for the :class:~stevedore.extensions.ExtensionManager` constructor. + + """ + def __init__(self, namespace, internal_extensions, **kwargs): + #: Registered extensions with entry point syntax + self.registered_extensions = [] + + #: Internal extensions with entry point syntax + self.internal_extensions = internal_extensions + + super(RegistrableExtensionManager, self).__init__(namespace, **kwargs) + + def _find_entry_points(self, namespace): + # copy of default extensions + eps = list(super(RegistrableExtensionManager, self)._find_entry_points(namespace)) + + # internal extensions + for iep in self.internal_extensions: + ep = EntryPoint.parse(iep) + if ep.name not in [e.name for e in eps]: + eps.append(ep) + + # registered extensions + for rep in self.registered_extensions: + ep = EntryPoint.parse(rep) + if ep.name not in [e.name for e in eps]: + eps.append(ep) + + return eps + + def register(self, entry_point): + """Register an extension + + :param str entry_point: extension to register (entry point syntax). + :raise: ValueError if already registered. + + """ + if entry_point in self.registered_extensions: + raise ValueError('Extension already registered') + + ep = EntryPoint.parse(entry_point) + if ep.name in self.names(): + raise ValueError('An extension with the same name already exist') + + ext = self._load_one_plugin(ep, False, (), {}, False) + self.extensions.append(ext) + if self._extensions_by_name is not None: + self._extensions_by_name[ext.name] = ext + self.registered_extensions.insert(0, entry_point) + + def unregister(self, entry_point): + """Unregister a provider + + :param str entry_point: provider to unregister (entry point syntax). + + """ + if entry_point not in self.registered_extensions: + raise ValueError('Extension not registered') + + ep = EntryPoint.parse(entry_point) + self.registered_extensions.remove(entry_point) + if self._extensions_by_name is not None: + del self._extensions_by_name[ep.name] + for i, ext in enumerate(self.extensions): + if ext.name == ep.name: + del self.extensions[i] + break + + +#: Provider manager +provider_manager = RegistrableExtensionManager('subliminal.providers', [ + 'addic7ed = subliminal.providers.addic7ed:Addic7edProvider', + 'legendastv = subliminal.providers.legendastv:LegendasTVProvider', + 'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider', + 'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider', + 'shooter = subliminal.providers.shooter:ShooterProvider', + 'subscenter = subliminal.providers.subscenter:SubsCenterProvider', + 'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider', + 'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider' +]) + +#: Refiner manager +refiner_manager = RegistrableExtensionManager('subliminal.refiners', [ + 'metadata = subliminal.refiners.metadata:refine', + 'omdb = subliminal.refiners.omdb:refine', + 'tvdb = subliminal.refiners.tvdb:refine' +]) diff --git a/libs/subliminal/providers/__init__.py b/libs/subliminal/providers/__init__.py index 70daa12d..9d2fd6d2 100644 --- a/libs/subliminal/providers/__init__.py +++ b/libs/subliminal/providers/__init__.py @@ -1,27 +1,65 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals -import contextlib import logging -import socket -import babelfish -from pkg_resources import iter_entry_points, EntryPoint -import requests -from ..video import Episode, Movie +from bs4 import BeautifulSoup, FeatureNotFound +from six.moves.xmlrpc_client import SafeTransport + +from ..video import Episode, Movie logger = logging.getLogger(__name__) -class Provider(object): - """Base class for providers +class TimeoutSafeTransport(SafeTransport): + """Timeout support for ``xmlrpc.client.SafeTransport``.""" + def __init__(self, timeout, *args, **kwargs): + SafeTransport.__init__(self, *args, **kwargs) + self.timeout = timeout - If any configuration is possible for the provider, like credentials, it must take place during instantiation + def make_connection(self, host): + c = SafeTransport.make_connection(self, host) + c.timeout = self.timeout - :param \*\*kwargs: configuration - :raise: :class:`~subliminal.exceptions.ProviderConfigurationError` if there is a configuration error + return c + + +class ParserBeautifulSoup(BeautifulSoup): + """A ``bs4.BeautifulSoup`` that picks the first parser available in `parsers`. + + :param markup: markup for the ``bs4.BeautifulSoup``. + :param list parsers: parser names, in order of preference. """ - #: Supported BabelFish languages + def __init__(self, markup, parsers, **kwargs): + # reject features + if set(parsers).intersection({'fast', 'permissive', 'strict', 'xml', 'html', 'html5'}): + raise ValueError('Features not allowed, only parser names') + + # reject some kwargs + if 'features' in kwargs: + raise ValueError('Cannot use features kwarg') + if 'builder' in kwargs: + raise ValueError('Cannot use builder kwarg') + + # pick the first parser available + for parser in parsers: + try: + super(ParserBeautifulSoup, self).__init__(markup, parser, **kwargs) + return + except FeatureNotFound: + pass + + raise FeatureNotFound + + +class Provider(object): + """Base class for providers. + + If any configuration is possible for the provider, like credentials, it must take place during instantiation. + + :raise: :class:`~subliminal.exceptions.ConfigurationError` if there is a configuration error + + """ + #: Supported set of :class:`~babelfish.language.Language` languages = set() #: Supported video types @@ -30,53 +68,46 @@ class Provider(object): #: Required hash, if any required_hash = None - def __init__(self, **kwargs): - pass - def __enter__(self): self.initialize() return self - def __exit__(self, type, value, traceback): # @ReservedAssignment + def __exit__(self, exc_type, exc_value, traceback): self.terminate() def initialize(self): - """Initialize the provider + """Initialize the provider. Must be called when starting to work with the provider. This is the place for network initialization or login operations. - .. note: - This is called automatically if you use the :keyword:`with` statement - - - :raise: :class:`~subliminal.exceptions.ProviderNotAvailable` if the provider is unavailable + .. note:: + This is called automatically when entering the `with` statement """ - pass + raise NotImplementedError def terminate(self): - """Terminate the provider + """Terminate the provider. Must be called when done with the provider. This is the place for network shutdown or logout operations. - .. note: - This is called automatically if you use the :keyword:`with` statement + .. note:: + This is called automatically when exiting the `with` statement - :raise: :class:`~subliminal.exceptions.ProviderNotAvailable` if the provider is unavailable """ - pass + raise NotImplementedError @classmethod def check(cls, video): - """Check if the `video` can be processed + """Check if the `video` can be processed. - The video is considered invalid if not an instance of :attr:`video_types` or if the :attr:`required_hash` is - not present in :attr:`~subliminal.video.Video`'s `hashes` attribute. + The `video` is considered invalid if not an instance of :attr:`video_types` or if the :attr:`required_hash` is + not present in :attr:`~subliminal.video.Video.hashes` attribute of the `video`. - :param video: the video to check + :param video: the video to check. :type video: :class:`~subliminal.video.Video` - :return: `True` if the `video` and `languages` are valid, `False` otherwise + :return: `True` if the `video` is valid, `False` otherwise. :rtype: bool """ @@ -84,255 +115,47 @@ class Provider(object): return False if cls.required_hash is not None and cls.required_hash not in video.hashes: return False + return True - def query(self, languages, *args, **kwargs): - """Query the provider for subtitles + def query(self, *args, **kwargs): + """Query the provider for subtitles. - This method arguments match as much as possible the actual parameters for querying the provider + Arguments should match as much as possible the actual parameters for querying the provider - :param languages: languages to search for - :type languages: set of :class:`babelfish.Language` - :param \*args: other required arguments - :param \*\*kwargs: other optional arguments - :return: the subtitles + :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` - :raise: :class:`~subliminal.exceptions.ProviderNotAvailable` if the provider is unavailable - :raise: :class:`~subliminal.exceptions.ProviderError` if something unexpected occured + :raise: :class:`~subliminal.exceptions.ProviderError` """ raise NotImplementedError def list_subtitles(self, video, languages): - """List subtitles for the `video` with the given `languages` + """List subtitles for the `video` with the given `languages`. - This is a proxy for the :meth:`query` method. The parameters passed to the :meth:`query` method may - vary depending on the amount of information available in the `video` + This will call the :meth:`query` method internally. The parameters passed to the :meth:`query` method may + vary depending on the amount of information available in the `video`. - :param video: video to list subtitles for + :param video: video to list subtitles for. :type video: :class:`~subliminal.video.Video` - :param languages: languages to search for - :type languages: set of :class:`babelfish.Language` - :return: the subtitles + :param languages: languages to search for. + :type languages: set of :class:`~babelfish.language.Language` + :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` - :raise: :class:`~subliminal.exceptions.ProviderNotAvailable` if the provider is unavailable - :raise: :class:`~subliminal.exceptions.ProviderError` if something unexpected occured + :raise: :class:`~subliminal.exceptions.ProviderError` """ raise NotImplementedError def download_subtitle(self, subtitle): - """Download the `subtitle` an fill its :attr:`~subliminal.subtitle.Subtitle.content` attribute with - subtitle's text + """Download `subtitle`'s :attr:`~subliminal.subtitle.Subtitle.content`. - :param subtitle: subtitle to download + :param subtitle: subtitle to download. :type subtitle: :class:`~subliminal.subtitle.Subtitle` - :raise: :class:`~subliminal.exceptions.ProviderNotAvailable` if the provider is unavailable - :raise: :class:`~subliminal.exceptions.ProviderError` if something unexpected occured + :raise: :class:`~subliminal.exceptions.ProviderError` """ raise NotImplementedError def __repr__(self): return '<%s [%r]>' % (self.__class__.__name__, self.video_types) - - -class ProviderManager(object): - """Manager for providers behaving like a dict with lazy loading - - Loading is done in this order: - - * Entry point providers - * Registered providers - - .. attribute:: entry_point - - The entry point where to look for providers - - """ - entry_point = 'subliminal.providers' - - def __init__(self): - #: Registered providers with entry point syntax - self.registered_providers = ['addic7ed = subliminal.providers.addic7ed:Addic7edProvider', - 'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider', - 'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider', - 'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider', - 'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'] - - #: Loaded providers - self.providers = {} - - @property - def available_providers(self): - """Available providers""" - available_providers = set(self.providers.keys()) - available_providers.update([ep.name for ep in iter_entry_points(self.entry_point)]) - available_providers.update([EntryPoint.parse(c).name for c in self.registered_providers]) - return available_providers - - def __getitem__(self, name): - """Get a provider, lazy loading it if necessary""" - if name in self.providers: - return self.providers[name] - for ep in iter_entry_points(self.entry_point): - if ep.name == name: - self.providers[ep.name] = ep.load() - return self.providers[ep.name] - for ep in (EntryPoint.parse(c) for c in self.registered_providers): - if ep.name == name: - self.providers[ep.name] = ep.load(require=False) - return self.providers[ep.name] - raise KeyError(name) - - def __setitem__(self, name, provider): - """Load a provider""" - self.providers[name] = provider - - def __delitem__(self, name): - """Unload a provider""" - del self.providers[name] - - def __iter__(self): - """Iterator over loaded providers""" - return iter(self.providers) - - def register(self, entry_point): - """Register a provider - - :param string entry_point: provider to register (entry point syntax) - :raise: ValueError if already registered - - """ - if entry_point in self.registered_providers: - raise ValueError('Entry point \'%s\' already registered' % entry_point) - entry_point_name = EntryPoint.parse(entry_point).name - if entry_point_name in self.available_providers: - raise ValueError('An entry point with name \'%s\' already registered' % entry_point_name) - self.registered_providers.insert(0, entry_point) - - def unregister(self, entry_point): - """Unregister a provider - - :param string entry_point: provider to unregister (entry point syntax) - - """ - self.registered_providers.remove(entry_point) - - def __contains__(self, name): - return name in self.providers - -provider_manager = ProviderManager() - - -class ProviderPool(object): - """A pool of providers with the same API as a single :class:`Provider` - - The :class:`ProviderPool` supports the ``with`` statement to :meth:`terminate` the providers - - :param providers: providers to use, if not all - :type providers: list of string or None - :param provider_configs: configuration for providers - :type provider_configs: dict of provider name => provider constructor kwargs or None - - """ - def __init__(self, providers=None, provider_configs=None): - self.provider_configs = provider_configs or {} - self.providers = {p: provider_manager[p] for p in (providers or provider_manager.available_providers)} - self.initialized_providers = {} - self.discarded_providers = set() - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): # @ReservedAssignment - self.terminate() - - def get_initialized_provider(self, name): - """Get a :class:`Provider` by name, initializing it if necessary - - :param string name: name of the provider - :return: the initialized provider - :rtype: :class:`Provider` - - """ - if name in self.initialized_providers: - return self.initialized_providers[name] - provider = self.providers[name](**self.provider_configs.get(name, {})) - provider.initialize() - self.initialized_providers[name] = provider - return provider - - def list_subtitles(self, video, languages): - """List subtitles for `video` with the given `languages` - - :param video: video to list subtitles for - :type video: :class:`~subliminal.video.Video` - :param languages: languages of subtitles to search for - :type languages: set of :class:`babelfish.Language` - :return: found subtitles - :rtype: list of :class:`~subliminal.subtitle.Subtitle` - - """ - subtitles = [] - for provider_name, provider_class in self.providers.items(): - if not provider_class.check(video): - logger.info('Skipping provider %r: not a valid video', provider_name) - continue - provider_languages = provider_class.languages & languages - video.subtitle_languages - if not provider_languages: - logger.info('Skipping provider %r: no language to search for', provider_name) - continue - if provider_name in self.discarded_providers: - logger.debug('Skipping discarded provider %r', provider_name) - continue - try: - provider = self.get_initialized_provider(provider_name) - logger.info('Listing subtitles with provider %r and languages %r', provider_name, provider_languages) - provider_subtitles = provider.list_subtitles(video, provider_languages) - logger.info('Found %d subtitles', len(provider_subtitles)) - subtitles.extend(provider_subtitles) - except (requests.exceptions.Timeout, socket.timeout): - logger.warning('Provider %r timed out, discarding it', provider_name) - self.discarded_providers.add(provider_name) - except: - logger.exception('Unexpected error in provider %r, discarding it', provider_name) - self.discarded_providers.add(provider_name) - return subtitles - - def download_subtitle(self, subtitle): - """Download a subtitle - - :param subtitle: subtitle to download - :type subtitle: :class:`~subliminal.subtitle.Subtitle` - :return: ``True`` if the subtitle has been successfully downloaded, ``False`` otherwise - :rtype: bool - - """ - if subtitle.provider_name in self.discarded_providers: - logger.debug('Discarded provider %r', subtitle.provider_name) - return False - try: - provider = self.get_initialized_provider(subtitle.provider_name) - provider.download_subtitle(subtitle) - if not subtitle.is_valid: - logger.warning('Invalid subtitle') - return False - return True - except (requests.exceptions.Timeout, socket.timeout): - logger.warning('Provider %r timed out, discarding it', subtitle.provider_name) - self.discarded_providers.add(subtitle.provider_name) - except: - logger.exception('Unexpected error in provider %r, discarding it', subtitle.provider_name) - self.discarded_providers.add(subtitle.provider_name) - return False - - def terminate(self): - """Terminate all the initialized providers""" - for (provider_name, provider) in self.initialized_providers.items(): - try: - provider.terminate() - except (requests.exceptions.Timeout, socket.timeout): - logger.warning('Provider %r timed out, unable to terminate', provider_name) - except: - logger.exception('Unexpected error in provider %r', provider_name) diff --git a/libs/subliminal/providers/addic7ed.py b/libs/subliminal/providers/addic7ed.py index 93ea0884..0d4a58fd 100644 --- a/libs/subliminal/providers/addic7ed.py +++ b/libs/subliminal/providers/addic7ed.py @@ -1,26 +1,34 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import logging -import babelfish -import bs4 -import requests -from . import Provider -from .. import __version__ -from ..cache import region, SHOW_EXPIRATION_TIME -from ..exceptions import ConfigurationError, AuthenticationError, DownloadLimitExceeded, ProviderError -from ..subtitle import Subtitle, fix_line_endings, compute_guess_properties_matches +import re + +from babelfish import Language, language_converters +from guessit import guessit +from requests import Session + +from . import ParserBeautifulSoup, Provider +from .. import __short_version__ +from ..cache import SHOW_EXPIRATION_TIME, region +from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, TooManyRequests +from ..score import get_equivalent_release_groups +from ..subtitle import Subtitle, fix_line_ending, guess_matches +from ..utils import sanitize, sanitize_release_group from ..video import Episode - logger = logging.getLogger(__name__) -babelfish.language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter') + +language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter') + +#: Series header parsing regex +series_year_re = re.compile(r'^(?P[ \w\'.:(),&!?-]+?)(?: \((?P\d{4})\))?$') class Addic7edSubtitle(Subtitle): + """Addic7ed Subtitle.""" provider_name = 'addic7ed' - def __init__(self, language, series, season, episode, title, year, version, hearing_impaired, download_link, - page_link): + def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version, + download_link): super(Addic7edSubtitle, self).__init__(language, hearing_impaired, page_link) self.series = series self.season = season @@ -30,10 +38,15 @@ class Addic7edSubtitle(Subtitle): self.version = version self.download_link = download_link - def compute_matches(self, video): + @property + def id(self): + return self.download_link + + def get_matches(self, video): matches = set() + # series - if video.series and self.series == video.series: + if video.series and sanitize(self.series) == sanitize(video.series): matches.add('series') # season if video.season and self.season == video.season: @@ -42,153 +55,218 @@ class Addic7edSubtitle(Subtitle): if video.episode and self.episode == video.episode: matches.add('episode') # title - if video.title and self.title.lower() == video.title.lower(): + if video.title and sanitize(self.title) == sanitize(video.title): matches.add('title') # year - if self.year == video.year: + if video.original_series and self.year is None or video.year and video.year == self.year: matches.add('year') # release_group - if video.release_group and self.version and video.release_group.lower() in self.version.lower(): + if (video.release_group and self.version and + any(r in sanitize_release_group(self.version) + for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))): matches.add('release_group') - """ # resolution if video.resolution and self.version and video.resolution in self.version.lower(): matches.add('resolution') # format - if video.format and self.version and video.format in self.version.lower: + if video.format and self.version and video.format.lower() in self.version.lower(): matches.add('format') - """ - # we don't have the complete filename, so we need to guess the matches separately - # guess resolution (screenSize in guessit) - matches |= compute_guess_properties_matches(video, self.version, 'screenSize') - # guess format - matches |= compute_guess_properties_matches(video, self.version, 'format') + # other properties + matches |= guess_matches(video, guessit(self.version), partial=True) + return matches class Addic7edProvider(Provider): - languages = {babelfish.Language('por', 'BR')} | {babelfish.Language(l) - for l in ['ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', - 'fin', 'fra', 'glg', 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', - 'nld', 'nor', 'pol', 'por', 'ron', 'rus', 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', - 'tur', 'ukr', 'vie', 'zho']} + """Addic7ed Provider.""" + languages = {Language('por', 'BR')} | {Language(l) for l in [ + 'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg', + 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus', + 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho' + ]} video_types = (Episode,) - server = 'http://www.addic7ed.com' + server_url = 'http://www.addic7ed.com/' def __init__(self, username=None, password=None): if username is not None and password is None or username is None and password is not None: raise ConfigurationError('Username and password must be specified') + self.username = username self.password = password self.logged_in = False def initialize(self): - self.session = requests.Session() - self.session.headers = {'User-Agent': 'Subliminal/%s' % __version__.split('-')[0]} + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ + # login if self.username is not None and self.password is not None: - logger.debug('Logging in') + logger.info('Logging in') data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'} - r = self.session.post(self.server + '/dologin.php', data, timeout=10, allow_redirects=False) - if r.status_code == 302: - logger.info('Logged in') - self.logged_in = True - else: + r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10) + + if r.status_code != 302: raise AuthenticationError(self.username) + logger.debug('Logged in') + self.logged_in = True + def terminate(self): # logout if self.logged_in: - r = self.session.get(self.server + '/logout.php', timeout=10) - logger.info('Logged out') - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) + logger.info('Logging out') + r = self.session.get(self.server_url + 'logout.php', timeout=10) + r.raise_for_status() + logger.debug('Logged out') + self.logged_in = False + self.session.close() - def get(self, url, params=None): - """Make a GET request on `url` with the given parameters - - :param string url: part of the URL to reach with the leading slash - :param params: params of the request - :return: the response - :rtype: :class:`bs4.BeautifulSoup` - - """ - r = self.session.get(self.server + url, params=params, timeout=10) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - return bs4.BeautifulSoup(r.content, ['permissive']) - @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) - def get_show_ids(self): - """Load the shows page with default series to show ids mapping + def _get_show_ids(self): + """Get the ``dict`` of show ids per series by querying the `shows.php` page. - :return: series to show ids + :return: show id per series, lower case and without quotes. :rtype: dict """ - soup = self.get('/shows.php') + # get the show page + logger.info('Getting show ids') + r = self.session.get(self.server_url + 'shows.php', timeout=10) + r.raise_for_status() + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + + # populate the show ids show_ids = {} - for html_show in soup.select('td.version > h3 > a[href^="/show/"]'): - show_ids[html_show.string.lower()] = int(html_show['href'][6:]) + for show in soup.select('td.version > h3 > a[href^="/show/"]'): + show_ids[sanitize(show.text)] = int(show['href'][6:]) + logger.debug('Found %d show ids', len(show_ids)) + return show_ids @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) - def find_show_id(self, series, year=None): - """Find the show id from the `series` with optional `year` + def _search_show_id(self, series, year=None): + """Search the show id from the `series` and `year`. - Use this only if the show id cannot be found with :meth:`get_show_ids` - - :param string series: series of the episode in lowercase - :param year: year of the series, if any - :type year: int or None - :return: the show id, if any - :rtype: int or None + :param str series: series of the episode. + :param year: year of the series, if any. + :type year: int + :return: the show id, if found. + :rtype: int """ - series_year = series - if year is not None: - series_year += ' (%d)' % year - params = {'search': series_year, 'Submit': 'Search'} - logger.debug('Searching series %r', params) - suggested_shows = self.get('/search.php', params).select('span.titulo > a[href^="/show/"]') - if not suggested_shows: - logger.info('Series %r not found', series_year) - return None - return int(suggested_shows[0]['href'][6:]) + # addic7ed doesn't support search with quotes + series = series.replace('\'', ' ') - def query(self, series, season, year=None): - show_ids = self.get_show_ids() + # build the params + series_year = '%s %d' % (series, year) if year is not None else series + params = {'search': series_year, 'Submit': 'Search'} + + # make the search + logger.info('Searching show ids with %r', params) + r = self.session.get(self.server_url + 'search.php', params=params, timeout=10) + r.raise_for_status() + if r.status_code == 304: + raise TooManyRequests() + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + + # get the suggestion + suggestion = soup.select('span.titulo > a[href^="/show/"]') + if not suggestion: + logger.warning('Show id not found: no suggestion') + return None + if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year): + logger.warning('Show id not found: suggestion does not match') + return None + show_id = int(suggestion[0]['href'][6:]) + logger.debug('Found show id %d', show_id) + + return show_id + + def get_show_id(self, series, year=None, country_code=None): + """Get the best matching show id for `series`, `year` and `country_code`. + + First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`. + + :param str series: series of the episode. + :param year: year of the series, if any. + :type year: int + :param country_code: country code of the series, if any. + :type country_code: str + :return: the show id, if found. + :rtype: int + + """ + series_sanitized = sanitize(series).lower() + show_ids = self._get_show_ids() show_id = None - if year is not None: # search with the year - series_year = '%s (%d)' % (series.lower(), year) - if series_year in show_ids: - show_id = show_ids[series_year] - else: - show_id = self.find_show_id(series.lower(), year) - if show_id is None: # search without the year - year = None - if series.lower() in show_ids: - show_id = show_ids[series.lower()] - else: - show_id = self.find_show_id(series.lower()) + + # attempt with country + if not show_id and country_code: + logger.debug('Getting show id with country') + show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower())) + + # attempt with year + if not show_id and year: + logger.debug('Getting show id with year') + show_id = show_ids.get('%s %d' % (series_sanitized, year)) + + # attempt clean + if not show_id: + logger.debug('Getting show id') + show_id = show_ids.get(series_sanitized) + + # search as last resort + if not show_id: + logger.warning('Series not found in show ids') + show_id = self._search_show_id(series) + + return show_id + + def query(self, series, season, year=None, country=None): + # get the show id + show_id = self.get_show_id(series, year, country) if show_id is None: + logger.error('No show id found for %r (%r)', series, {'year': year, 'country': country}) return [] - params = {'show_id': show_id, 'season': season} - logger.debug('Searching subtitles %r', params) - link = '/show/{show_id}&season={season}'.format(**params) - soup = self.get(link) + + # get the page of the season of the show + logger.info('Getting the page of show id %d, season %d', show_id, season) + r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10) + r.raise_for_status() + if r.status_code == 304: + raise TooManyRequests() + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + + # loop over subtitle rows + match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10]) + series = match.group('series') + year = int(match.group('year')) if match.group('year') else None subtitles = [] - for row in soup('tr', class_='epeven completed'): + for row in soup.select('tr.epeven'): cells = row('td') - if cells[5].string != 'Completed': + + # ignore incomplete subtitles + status = cells[5].text + if status != 'Completed': + logger.debug('Ignoring subtitle with status %s', status) continue - if not cells[3].string: - continue - subtitles.append(Addic7edSubtitle(babelfish.Language.fromaddic7ed(cells[3].string), series, season, - int(cells[1].string), cells[2].string, year, cells[4].string, - bool(cells[6].string), cells[9].a['href'], - self.server + cells[2].a['href'])) + + # read the item + language = Language.fromaddic7ed(cells[3].text) + hearing_impaired = bool(cells[6].text) + page_link = self.server_url + cells[2].a['href'][1:] + season = int(cells[0].text) + episode = int(cells[1].text) + title = cells[2].text + version = cells[4].text + download_link = cells[9].a['href'][1:] + + subtitle = Addic7edSubtitle(language, hearing_impaired, page_link, series, season, episode, title, year, + version, download_link) + logger.debug('Found subtitle %r', subtitle) + subtitles.append(subtitle) + return subtitles def list_subtitles(self, video, languages): @@ -196,9 +274,14 @@ class Addic7edProvider(Provider): if s.language in languages and s.episode == video.episode] def download_subtitle(self, subtitle): - r = self.session.get(self.server + subtitle.download_link, timeout=10, headers={'Referer': subtitle.page_link}) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) + # download the subtitle + logger.info('Downloading subtitle %r', subtitle) + r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link}, + timeout=10) + r.raise_for_status() + + # detect download limit exceeded if r.headers['Content-Type'] == 'text/html': raise DownloadLimitExceeded - subtitle.content = fix_line_endings(r.content) + + subtitle.content = fix_line_ending(r.content) diff --git a/libs/subliminal/providers/legendastv.py b/libs/subliminal/providers/legendastv.py new file mode 100644 index 00000000..cdd16aca --- /dev/null +++ b/libs/subliminal/providers/legendastv.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +import io +import json +import logging +import os +import re + +from babelfish import Language, language_converters +from datetime import datetime, timedelta +from dogpile.cache.api import NO_VALUE +from guessit import guessit +import pytz +import rarfile +from rarfile import RarFile, is_rarfile +from requests import Session +from zipfile import ZipFile, is_zipfile + +from . import ParserBeautifulSoup, Provider +from .. import __short_version__ +from ..cache import SHOW_EXPIRATION_TIME, region +from ..exceptions import AuthenticationError, ConfigurationError, ProviderError +from ..subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending, guess_matches, sanitize +from ..video import Episode, Movie + +logger = logging.getLogger(__name__) + +language_converters.register('legendastv = subliminal.converters.legendastv:LegendasTVConverter') + +# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile` +rarfile.PATH_SEP = '/' + +#: Conversion map for types +type_map = {'M': 'movie', 'S': 'episode', 'C': 'episode'} + +#: BR title season parsing regex +season_re = re.compile(r' - (?P\d+)(\xaa|a|st|nd|rd|th) (temporada|season)', re.IGNORECASE) + +#: Downloads parsing regex +downloads_re = re.compile(r'(?P\d+) downloads') + +#: Rating parsing regex +rating_re = re.compile(r'nota (?P\d+)') + +#: Timestamp parsing regex +timestamp_re = re.compile(r'(?P\d+)/(?P\d+)/(?P\d+) - (?P\d+):(?P\d+)') + +#: Cache key for releases +releases_key = __name__ + ':releases|{archive_id}' + + +class LegendasTVArchive(object): + """LegendasTV Archive. + + :param str id: identifier. + :param str name: name. + :param bool pack: contains subtitles for multiple episodes. + :param bool pack: featured. + :param str link: link. + :param int downloads: download count. + :param int rating: rating (0-10). + :param timestamp: timestamp. + :type timestamp: datetime.datetime + + """ + def __init__(self, id, name, pack, featured, link, downloads=0, rating=0, timestamp=None): + #: Identifier + self.id = id + + #: Name + self.name = name + + #: Pack + self.pack = pack + + #: Featured + self.featured = featured + + #: Link + self.link = link + + #: Download count + self.downloads = downloads + + #: Rating (0-10) + self.rating = rating + + #: Timestamp + self.timestamp = timestamp + + #: Compressed content as :class:`rarfile.RarFile` or :class:`zipfile.ZipFile` + self.content = None + + def __repr__(self): + return '<%s [%s] %r>' % (self.__class__.__name__, self.id, self.name) + + +class LegendasTVSubtitle(Subtitle): + """LegendasTV Subtitle.""" + provider_name = 'legendastv' + + def __init__(self, language, type, title, year, imdb_id, season, archive, name): + super(LegendasTVSubtitle, self).__init__(language, archive.link) + self.type = type + self.title = title + self.year = year + self.imdb_id = imdb_id + self.season = season + self.archive = archive + self.name = name + + @property + def id(self): + return '%s-%s' % (self.archive.id, self.name.lower()) + + def get_matches(self, video, hearing_impaired=False): + matches = set() + + # episode + if isinstance(video, Episode) and self.type == 'episode': + # series + if video.series and sanitize(self.title) == sanitize(video.series): + matches.add('series') + + # year (year is based on season air date hence the adjustment) + if video.original_series and self.year is None or video.year and video.year == self.year - self.season + 1: + matches.add('year') + + # imdb_id + if video.series_imdb_id and self.imdb_id == video.series_imdb_id: + matches.add('series_imdb_id') + + # movie + elif isinstance(video, Movie) and self.type == 'movie': + # title + if video.title and sanitize(self.title) == sanitize(video.title): + matches.add('title') + + # year + if video.year and self.year == video.year: + matches.add('year') + + # imdb_id + if video.imdb_id and self.imdb_id == video.imdb_id: + matches.add('imdb_id') + + # archive name + matches |= guess_matches(video, guessit(self.archive.name, {'type': self.type})) + + # name + matches |= guess_matches(video, guessit(self.name, {'type': self.type})) + + return matches + + +class LegendasTVProvider(Provider): + """LegendasTV Provider. + + :param str username: username. + :param str password: password. + + """ + languages = {Language.fromlegendastv(l) for l in language_converters['legendastv'].codes} + server_url = 'http://legendas.tv/' + + def __init__(self, username=None, password=None): + if username and not password or not username and password: + raise ConfigurationError('Username and password must be specified') + + self.username = username + self.password = password + self.logged_in = False + + def initialize(self): + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ + + # login + if self.username is not None and self.password is not None: + logger.info('Logging in') + data = {'_method': 'POST', 'data[User][username]': self.username, 'data[User][password]': self.password} + r = self.session.post(self.server_url + 'login', data, allow_redirects=False, timeout=10) + r.raise_for_status() + + soup = ParserBeautifulSoup(r.content, ['html.parser']) + if soup.find('div', {'class': 'alert-error'}, string=re.compile(u'Usuário ou senha inválidos')): + raise AuthenticationError(self.username) + + logger.debug('Logged in') + self.logged_in = True + + def terminate(self): + # logout + if self.logged_in: + logger.info('Logging out') + r = self.session.get(self.server_url + 'users/logout', allow_redirects=False, timeout=10) + r.raise_for_status() + logger.debug('Logged out') + self.logged_in = False + + self.session.close() + + @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) + def search_titles(self, title): + """Search for titles matching the `title`. + + :param str title: the title to search for. + :return: found titles. + :rtype: dict + + """ + # make the query + logger.info('Searching title %r', title) + r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(title), timeout=10) + r.raise_for_status() + results = json.loads(r.text) + + # loop over results + titles = {} + for result in results: + source = result['_source'] + + # extract id + title_id = int(source['id_filme']) + + # extract type and title + title = {'type': type_map[source['tipo']], 'title': source['dsc_nome']} + + # extract year + if source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit(): + title['year'] = int(source['dsc_data_lancamento']) + + # extract imdb_id + if source['id_imdb'] != '0': + if not source['id_imdb'].startswith('tt'): + title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7) + else: + title['imdb_id'] = source['id_imdb'] + + # extract season + if title['type'] == 'episode': + if source['temporada'] and source['temporada'].isdigit(): + title['season'] = int(source['temporada']) + else: + match = season_re.search(source['dsc_nome_br']) + if match: + title['season'] = int(match.group('season')) + else: + logger.warning('No season detected for title %d', title_id) + + # add title + titles[title_id] = title + + logger.debug('Found %d titles', len(titles)) + + return titles + + @region.cache_on_arguments(expiration_time=timedelta(minutes=15).total_seconds()) + def get_archives(self, title_id, language_code): + """Get the archive list from a given `title_id` and `language_code`. + + :param int title_id: title id. + :param int language_code: language code. + :return: the archives. + :rtype: list of :class:`LegendasTVArchive` + + """ + logger.info('Getting archives for title %d and language %d', title_id, language_code) + archives = [] + page = 1 + while True: + # get the archive page + url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format( + title=title_id, language=language_code, page=page) + r = self.session.get(url) + r.raise_for_status() + + # parse the results + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + for archive_soup in soup.select('div.list_element > article > div'): + # create archive + archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, + 'pack' in archive_soup['class'], 'destaque' in archive_soup['class'], + self.server_url + archive_soup.a['href'][1:]) + + # extract text containing downloads, rating and timestamp + data_text = archive_soup.find('p', class_='data').text + + # match downloads + archive.downloads = int(downloads_re.search(data_text).group('downloads')) + + # match rating + match = rating_re.search(data_text) + if match: + archive.rating = int(match.group('rating')) + + # match timestamp and validate it + time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()} + archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data)) + if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc): + raise ProviderError('Archive timestamp is in the future') + + # add archive + archives.append(archive) + + # stop on last page + if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None: + break + + # increment page count + page += 1 + + logger.debug('Found %d archives', len(archives)) + + return archives + + def download_archive(self, archive): + """Download an archive's :attr:`~LegendasTVArchive.content`. + + :param archive: the archive to download :attr:`~LegendasTVArchive.content` of. + :type archive: :class:`LegendasTVArchive` + + """ + logger.info('Downloading archive %s', archive.id) + r = self.session.get(self.server_url + 'downloadarquivo/{}'.format(archive.id)) + r.raise_for_status() + + # open the archive + archive_stream = io.BytesIO(r.content) + if is_rarfile(archive_stream): + logger.debug('Identified rar archive') + archive.content = RarFile(archive_stream) + elif is_zipfile(archive_stream): + logger.debug('Identified zip archive') + archive.content = ZipFile(archive_stream) + else: + raise ValueError('Not a valid archive') + + def query(self, language, title, season=None, episode=None, year=None): + # search for titles + titles = self.search_titles(sanitize(title)) + + # search for titles with the quote or dot character + ignore_characters = {'\'', '.'} + if any(c in title for c in ignore_characters): + titles.update(self.search_titles(sanitize(title, ignore_characters=ignore_characters))) + + subtitles = [] + # iterate over titles + for title_id, t in titles.items(): + # discard mismatches on title + if sanitize(t['title']) != sanitize(title): + continue + + # episode + if season and episode: + # discard mismatches on type + if t['type'] != 'episode': + continue + + # discard mismatches on season + if 'season' not in t or t['season'] != season: + continue + # movie + else: + # discard mismatches on type + if t['type'] != 'movie': + continue + + # discard mismatches on year + if year is not None and 'year' in t and t['year'] != year: + continue + + # iterate over title's archives + for a in self.get_archives(title_id, language.legendastv): + # clean name of path separators and pack flags + clean_name = a.name.replace('/', '-') + if a.pack and clean_name.startswith('(p)'): + clean_name = clean_name[3:] + + # guess from name + guess = guessit(clean_name, {'type': t['type']}) + + # episode + if season and episode: + # discard mismatches on episode in non-pack archives + if not a.pack and 'episode' in guess and guess['episode'] != episode: + continue + + # compute an expiration time based on the archive timestamp + expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds() + + # attempt to get the releases from the cache + releases = region.get(releases_key.format(archive_id=a.id), expiration_time=expiration_time) + + # the releases are not in cache or cache is expired + if releases == NO_VALUE: + logger.info('Releases not found in cache') + + # download archive + self.download_archive(a) + + # extract the releases + releases = [] + for name in a.content.namelist(): + # discard the legendastv file + if name.startswith('Legendas.tv'): + continue + + # discard hidden files + if os.path.split(name)[-1].startswith('.'): + continue + + # discard non-subtitle files + if not name.lower().endswith(SUBTITLE_EXTENSIONS): + continue + + releases.append(name) + + # cache the releases + region.set(releases_key.format(archive_id=a.id), releases) + + # iterate over releases + for r in releases: + subtitle = LegendasTVSubtitle(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'), + t.get('season'), a, r) + logger.debug('Found subtitle %r', subtitle) + subtitles.append(subtitle) + + return subtitles + + def list_subtitles(self, video, languages): + season = episode = None + if isinstance(video, Episode): + title = video.series + season = video.season + episode = video.episode + else: + title = video.title + + return [s for l in languages for s in self.query(l, title, season=season, episode=episode, year=video.year)] + + def download_subtitle(self, subtitle): + # download archive in case we previously hit the releases cache and didn't download it + if subtitle.archive.content is None: + self.download_archive(subtitle.archive) + + # extract subtitle's content + subtitle.content = fix_line_ending(subtitle.archive.content.read(subtitle.name)) diff --git a/libs/subliminal/providers/napiprojekt.py b/libs/subliminal/providers/napiprojekt.py new file mode 100644 index 00000000..f44f85d9 --- /dev/null +++ b/libs/subliminal/providers/napiprojekt.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +import logging + +from babelfish import Language +from requests import Session + +from . import Provider +from .. import __short_version__ +from ..subtitle import Subtitle + +logger = logging.getLogger(__name__) + + +def get_subhash(hash): + """Get a second hash based on napiprojekt's hash. + + :param str hash: napiprojekt's hash. + :return: the subhash. + :rtype: str + + """ + idx = [0xe, 0x3, 0x6, 0x8, 0x2] + mul = [2, 2, 5, 4, 3] + add = [0, 0xd, 0x10, 0xb, 0x5] + + b = [] + for i in range(len(idx)): + a = add[i] + m = mul[i] + i = idx[i] + t = a + int(hash[i], 16) + v = int(hash[t:t + 2], 16) + b.append(('%x' % (v * m))[-1]) + + return ''.join(b) + + +class NapiProjektSubtitle(Subtitle): + """NapiProjekt Subtitle.""" + provider_name = 'napiprojekt' + + def __init__(self, language, hash): + super(NapiProjektSubtitle, self).__init__(language) + self.hash = hash + + @property + def id(self): + return self.hash + + def get_matches(self, video): + matches = set() + + # hash + if 'napiprojekt' in video.hashes and video.hashes['napiprojekt'] == self.hash: + matches.add('hash') + + return matches + + +class NapiProjektProvider(Provider): + """NapiProjekt Provider.""" + languages = {Language.fromalpha2(l) for l in ['pl']} + required_hash = 'napiprojekt' + server_url = 'http://napiprojekt.pl/unit_napisy/dl.php' + + def initialize(self): + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ + + def terminate(self): + self.session.close() + + def query(self, language, hash): + params = { + 'v': 'dreambox', + 'kolejka': 'false', + 'nick': '', + 'pass': '', + 'napios': 'Linux', + 'l': language.alpha2.upper(), + 'f': hash, + 't': get_subhash(hash)} + logger.info('Searching subtitle %r', params) + response = self.session.get(self.server_url, params=params, timeout=10) + response.raise_for_status() + + # handle subtitles not found and errors + if response.content[:4] == b'NPc0': + logger.debug('No subtitles found') + return None + + subtitle = NapiProjektSubtitle(language, hash) + subtitle.content = response.content + logger.debug('Found subtitle %r', subtitle) + + return subtitle + + def list_subtitles(self, video, languages): + return [s for s in [self.query(l, video.hashes['napiprojekt']) for l in languages] if s is not None] + + def download_subtitle(self, subtitle): + # there is no download step, content is already filled from listing subtitles + pass diff --git a/libs/subliminal/providers/opensubtitles.py b/libs/subliminal/providers/opensubtitles.py index 795799d2..5ab09da4 100644 --- a/libs/subliminal/providers/opensubtitles.py +++ b/libs/subliminal/providers/opensubtitles.py @@ -1,31 +1,33 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import base64 import logging import os import re import zlib -import babelfish -import guessit -from . import Provider -from .. import __version__ -from ..compat import ServerProxy, TimeoutTransport -from ..exceptions import ProviderError, AuthenticationError, DownloadLimitExceeded -from ..subtitle import Subtitle, fix_line_endings, compute_guess_matches -from ..video import Episode, Movie +from babelfish import Language, language_converters +from guessit import guessit +from six.moves.xmlrpc_client import ServerProxy + +from . import Provider, TimeoutSafeTransport +from .. import __short_version__ +from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, ProviderError +from ..subtitle import Subtitle, fix_line_ending, guess_matches +from ..utils import sanitize +from ..video import Episode, Movie logger = logging.getLogger(__name__) class OpenSubtitlesSubtitle(Subtitle): + """OpenSubtitles Subtitle.""" provider_name = 'opensubtitles' - series_re = re.compile('^"(?P.*)" (?P.*)$') + series_re = re.compile(r'^"(?P.*)" (?P.*)$') - def __init__(self, language, hearing_impaired, id, matched_by, movie_kind, hash, movie_name, movie_release_name, # @ReservedAssignment - movie_year, movie_imdb_id, series_season, series_episode, page_link): - super(OpenSubtitlesSubtitle, self).__init__(language, hearing_impaired, page_link) - self.id = id + def __init__(self, language, hearing_impaired, page_link, subtitle_id, matched_by, movie_kind, hash, movie_name, + movie_release_name, movie_year, movie_imdb_id, series_season, series_episode, filename, encoding): + super(OpenSubtitlesSubtitle, self).__init__(language, hearing_impaired, page_link, encoding) + self.subtitle_id = subtitle_id self.matched_by = matched_by self.movie_kind = movie_kind self.hash = hash @@ -35,6 +37,11 @@ class OpenSubtitlesSubtitle(Subtitle): self.movie_imdb_id = movie_imdb_id self.series_season = series_season self.series_episode = series_episode + self.filename = filename + + @property + def id(self): + return str(self.subtitle_id) @property def series_name(self): @@ -44,145 +51,225 @@ class OpenSubtitlesSubtitle(Subtitle): def series_title(self): return self.series_re.match(self.movie_name).group('series_title') - def compute_matches(self, video): + def get_matches(self, video): matches = set() + # episode if isinstance(video, Episode) and self.movie_kind == 'episode': + # tag match, assume series, year, season and episode matches + if self.matched_by == 'tag': + matches |= {'series', 'year', 'season', 'episode'} # series - if video.series and self.series_name.lower() == video.series.lower(): + if video.series and sanitize(self.series_name) == sanitize(video.series): matches.add('series') + # year + if video.original_series and self.movie_year is None or video.year and video.year == self.movie_year: + matches.add('year') # season if video.season and self.series_season == video.season: matches.add('season') # episode if video.episode and self.series_episode == video.episode: matches.add('episode') + # title + if video.title and sanitize(self.series_title) == sanitize(video.title): + matches.add('title') # guess - matches |= compute_guess_matches(video, guessit.guess_episode_info(self.movie_release_name + '.mkv')) + matches |= guess_matches(video, guessit(self.movie_release_name, {'type': 'episode'})) + matches |= guess_matches(video, guessit(self.filename, {'type': 'episode'})) + # hash + if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']: + if 'series' in matches and 'season' in matches and 'episode' in matches: + matches.add('hash') + else: + logger.debug('Match on hash discarded') # movie elif isinstance(video, Movie) and self.movie_kind == 'movie': + # tag match, assume title and year matches + if self.matched_by == 'tag': + matches |= {'title', 'year'} + # title + if video.title and sanitize(self.movie_name) == sanitize(video.title): + matches.add('title') # year if video.year and self.movie_year == video.year: matches.add('year') # guess - matches |= compute_guess_matches(video, guessit.guess_movie_info(self.movie_release_name + '.mkv')) + matches |= guess_matches(video, guessit(self.movie_release_name, {'type': 'movie'})) + matches |= guess_matches(video, guessit(self.filename, {'type': 'movie'})) + # hash + if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']: + if 'title' in matches: + matches.add('hash') + else: + logger.debug('Match on hash discarded') else: - logger.info('%r is not a valid movie_kind for %r', self.movie_kind, video) + logger.info('%r is not a valid movie_kind', self.movie_kind) return matches - # hash - if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']: - matches.add('hash') + # imdb_id if video.imdb_id and self.movie_imdb_id == video.imdb_id: matches.add('imdb_id') - # title - if video.title and self.movie_name.lower() == video.title.lower(): - matches.add('title') + return matches class OpenSubtitlesProvider(Provider): - languages = {babelfish.Language.fromopensubtitles(l) for l in babelfish.language_converters['opensubtitles'].codes} + """OpenSubtitles Provider. - def __init__(self): - self.server = ServerProxy('http://api.opensubtitles.org/xml-rpc', transport=TimeoutTransport(10)) + :param str username: username. + :param str password: password. + + """ + languages = {Language.fromopensubtitles(l) for l in language_converters['opensubtitles'].codes} + + def __init__(self, username=None, password=None): + self.server = ServerProxy('https://api.opensubtitles.org/xml-rpc', TimeoutSafeTransport(10)) + if username and not password or not username and password: + raise ConfigurationError('Username and password must be specified') + # None values not allowed for logging in, so replace it by '' + self.username = username or '' + self.password = password or '' self.token = None def initialize(self): - response = checked(self.server.LogIn('', '', 'eng', 'subliminal v%s' % __version__.split('-')[0])) + logger.info('Logging in') + response = checked(self.server.LogIn(self.username, self.password, 'eng', + 'subliminal v%s' % __short_version__)) self.token = response['token'] + logger.debug('Logged in with token %r', self.token) def terminate(self): + logger.info('Logging out') checked(self.server.LogOut(self.token)) self.server.close() + self.token = None + logger.debug('Logged out') def no_operation(self): + logger.debug('No operation') checked(self.server.NoOperation(self.token)) - def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None): # @ReservedAssignment - searches = [] + def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None, tag=None): + # fill the search criteria + criteria = [] if hash and size: - searches.append({'moviehash': hash, 'moviebytesize': str(size)}) + criteria.append({'moviehash': hash, 'moviebytesize': str(size)}) if imdb_id: - searches.append({'imdbid': imdb_id}) + criteria.append({'imdbid': imdb_id[2:]}) + if tag: + criteria.append({'tag': tag}) if query and season and episode: - searches.append({'query': query, 'season': season, 'episode': episode}) + criteria.append({'query': query.replace('\'', ''), 'season': season, 'episode': episode}) elif query: - searches.append({'query': query}) - if not searches: - raise ValueError('One or more parameter missing') - for search in searches: - search['sublanguageid'] = ','.join(l.opensubtitles for l in languages) - logger.debug('Searching subtitles %r', searches) - response = checked(self.server.SearchSubtitles(self.token, searches)) + criteria.append({'query': query.replace('\'', '')}) + if not criteria: + raise ValueError('Not enough information') + + # add the language + for criterion in criteria: + criterion['sublanguageid'] = ','.join(sorted(l.opensubtitles for l in languages)) + + # query the server + logger.info('Searching subtitles %r', criteria) + response = checked(self.server.SearchSubtitles(self.token, criteria)) + subtitles = [] + + # exit if no data if not response['data']: - logger.debug('No subtitle found') - return [] - return [OpenSubtitlesSubtitle(babelfish.Language.fromopensubtitles(r['SubLanguageID']), - bool(int(r['SubHearingImpaired'])), r['IDSubtitleFile'], r['MatchedBy'], - r['MovieKind'], r['MovieHash'], r['MovieName'], r['MovieReleaseName'], - int(r['MovieYear']) if r['MovieYear'] else None, int(r['IDMovieImdb']), - int(r['SeriesSeason']) if r['SeriesSeason'] else None, - int(r['SeriesEpisode']) if r['SeriesEpisode'] else None, r['SubtitlesLink']) - for r in response['data']] + logger.debug('No subtitles found') + return subtitles + + # loop over subtitle items + for subtitle_item in response['data']: + # read the item + language = Language.fromopensubtitles(subtitle_item['SubLanguageID']) + hearing_impaired = bool(int(subtitle_item['SubHearingImpaired'])) + page_link = subtitle_item['SubtitlesLink'] + subtitle_id = int(subtitle_item['IDSubtitleFile']) + matched_by = subtitle_item['MatchedBy'] + movie_kind = subtitle_item['MovieKind'] + hash = subtitle_item['MovieHash'] + movie_name = subtitle_item['MovieName'] + movie_release_name = subtitle_item['MovieReleaseName'] + movie_year = int(subtitle_item['MovieYear']) if subtitle_item['MovieYear'] else None + movie_imdb_id = 'tt' + subtitle_item['IDMovieImdb'] + series_season = int(subtitle_item['SeriesSeason']) if subtitle_item['SeriesSeason'] else None + series_episode = int(subtitle_item['SeriesEpisode']) if subtitle_item['SeriesEpisode'] else None + filename = subtitle_item['SubFileName'] + encoding = subtitle_item.get('SubEncoding') or None + + subtitle = OpenSubtitlesSubtitle(language, hearing_impaired, page_link, subtitle_id, matched_by, movie_kind, + hash, movie_name, movie_release_name, movie_year, movie_imdb_id, + series_season, series_episode, filename, encoding) + logger.debug('Found subtitle %r by %s', subtitle, matched_by) + subtitles.append(subtitle) + + return subtitles def list_subtitles(self, video, languages): - query = None - season = None - episode = None - if ('opensubtitles' not in video.hashes or not video.size) and not video.imdb_id: - query = video.name.split(os.sep)[-1] + season = episode = None if isinstance(video, Episode): query = video.series season = video.season episode = video.episode + else: + query = video.title + return self.query(languages, hash=video.hashes.get('opensubtitles'), size=video.size, imdb_id=video.imdb_id, - query=query, season=season, episode=episode) + query=query, season=season, episode=episode, tag=os.path.basename(video.name)) def download_subtitle(self, subtitle): - response = checked(self.server.DownloadSubtitles(self.token, [subtitle.id])) - if not response['data']: - raise ProviderError('Nothing to download') - subtitle.content = fix_line_endings(zlib.decompress(base64.b64decode(response['data'][0]['data']), 47)) + logger.info('Downloading subtitle %r', subtitle) + response = checked(self.server.DownloadSubtitles(self.token, [str(subtitle.subtitle_id)])) + subtitle.content = fix_line_ending(zlib.decompress(base64.b64decode(response['data'][0]['data']), 47)) class OpenSubtitlesError(ProviderError): - """Base class for non-generic :class:`OpenSubtitlesProvider` exceptions""" + """Base class for non-generic :class:`OpenSubtitlesProvider` exceptions.""" + pass class Unauthorized(OpenSubtitlesError, AuthenticationError): - """Exception raised when status is '401 Unauthorized'""" + """Exception raised when status is '401 Unauthorized'.""" + pass class NoSession(OpenSubtitlesError, AuthenticationError): - """Exception raised when status is '406 No session'""" + """Exception raised when status is '406 No session'.""" + pass class DownloadLimitReached(OpenSubtitlesError, DownloadLimitExceeded): - """Exception raised when status is '407 Download limit reached'""" + """Exception raised when status is '407 Download limit reached'.""" + pass class InvalidImdbid(OpenSubtitlesError): - """Exception raised when status is '413 Invalid ImdbID'""" + """Exception raised when status is '413 Invalid ImdbID'.""" + pass class UnknownUserAgent(OpenSubtitlesError, AuthenticationError): - """Exception raised when status is '414 Unknown User Agent'""" + """Exception raised when status is '414 Unknown User Agent'.""" + pass class DisabledUserAgent(OpenSubtitlesError, AuthenticationError): - """Exception raised when status is '415 Disabled user agent'""" + """Exception raised when status is '415 Disabled user agent'.""" + pass class ServiceUnavailable(OpenSubtitlesError): - """Exception raised when status is '503 Service Unavailable'""" + """Exception raised when status is '503 Service Unavailable'.""" + pass def checked(response): - """Check a response status before returning it + """Check a response status before returning it. - :param response: a response from a XMLRPC call to OpenSubtitles - :return: the response + :param response: a response from a XMLRPC call to OpenSubtitles. + :return: the response. :raise: :class:`OpenSubtitlesError` """ @@ -203,4 +290,5 @@ def checked(response): raise ServiceUnavailable if status_code != 200: raise OpenSubtitlesError(response['status']) + return response diff --git a/libs/subliminal/providers/podnapisi.py b/libs/subliminal/providers/podnapisi.py index 2aa1e7dc..f643682b 100644 --- a/libs/subliminal/providers/podnapisi.py +++ b/libs/subliminal/providers/podnapisi.py @@ -1,47 +1,59 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import io import logging import re -import xml.etree.ElementTree -import zipfile -import babelfish -import bs4 -import guessit -import requests + +from babelfish import Language, language_converters +from guessit import guessit +try: + from lxml import etree +except ImportError: + try: + import xml.etree.cElementTree as etree + except ImportError: + import xml.etree.ElementTree as etree +from requests import Session +from zipfile import ZipFile + from . import Provider -from .. import __version__ +from .. import __short_version__ from ..exceptions import ProviderError -from ..subtitle import Subtitle, fix_line_endings, compute_guess_matches +from ..subtitle import Subtitle, fix_line_ending, guess_matches +from ..utils import sanitize from ..video import Episode, Movie - logger = logging.getLogger(__name__) -babelfish.language_converters.register('podnapisi = subliminal.converters.podnapisi:PodnapisiConverter') class PodnapisiSubtitle(Subtitle): + """Podnapisi Subtitle.""" provider_name = 'podnapisi' - def __init__(self, language, id, releases, hearing_impaired, page_link, series=None, season=None, episode=None, # @ReservedAssignment - title=None, year=None): + def __init__(self, language, hearing_impaired, page_link, pid, releases, title, season=None, episode=None, + year=None): super(PodnapisiSubtitle, self).__init__(language, hearing_impaired, page_link) - self.id = id + self.pid = pid self.releases = releases - self.hearing_impaired = hearing_impaired - self.series = series + self.title = title self.season = season self.episode = episode - self.title = title self.year = year - def compute_matches(self, video): + @property + def id(self): + return self.pid + + def get_matches(self, video): matches = set() + # episode if isinstance(video, Episode): # series - if video.series and self.series.lower() == video.series.lower(): + if video.series and sanitize(self.title) == sanitize(video.series): matches.add('series') + # year + if video.original_series and self.year is None or video.year and video.year == self.year: + matches.add('year') # season if video.season and self.season == video.season: matches.add('season') @@ -50,105 +62,118 @@ class PodnapisiSubtitle(Subtitle): matches.add('episode') # guess for release in self.releases: - matches |= compute_guess_matches(video, guessit.guess_episode_info(release + '.mkv')) + matches |= guess_matches(video, guessit(release, {'type': 'episode'})) # movie elif isinstance(video, Movie): # title - if video.title and self.title.lower() == video.title.lower(): + if video.title and sanitize(self.title) == sanitize(video.title): matches.add('title') + # year + if video.year and self.year == video.year: + matches.add('year') # guess for release in self.releases: - matches |= compute_guess_matches(video, guessit.guess_movie_info(release + '.mkv')) - # year - if self.year == video.year: - matches.add('year') + matches |= guess_matches(video, guessit(release, {'type': 'movie'})) + return matches class PodnapisiProvider(Provider): - languages = {babelfish.Language.frompodnapisi(l) for l in babelfish.language_converters['podnapisi'].codes} - video_types = (Episode, Movie) - server = 'http://simple.podnapisi.net' - link_re = re.compile('^.*(?P/ppodnapisi/download/i/\d+/k/.*$)') + """Podnapisi Provider.""" + languages = ({Language('por', 'BR'), Language('srp', script='Latn')} | + {Language.fromalpha2(l) for l in language_converters['alpha2'].codes}) + server_url = 'http://podnapisi.net/subtitles/' def initialize(self): - self.session = requests.Session() - self.session.headers = {'User-Agent': 'Subliminal/%s' % __version__.split('-')[0]} + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ def terminate(self): self.session.close() - def get(self, url, params=None, is_xml=True): - """Make a GET request on `url` with the given parameters - - :param string url: part of the URL to reach with the leading slash - :param dict params: params of the request - :param bool xml: whether the response content is XML or not - :return: the response - :rtype: :class:`xml.etree.ElementTree.Element` or :class:`bs4.BeautifulSoup` - - """ - r = self.session.get(self.server + '/ppodnapisi' + url, params=params, timeout=10) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - if is_xml: - return xml.etree.ElementTree.fromstring(r.content) - else: - return bs4.BeautifulSoup(r.content, ['permissive']) - - def query(self, language, series=None, season=None, episode=None, title=None, year=None): - params = {'sXML': 1, 'sJ': language.podnapisi} - if series and season and episode: - params['sK'] = series + def query(self, language, keyword, season=None, episode=None, year=None): + # set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652 + params = {'sXML': 1, 'sL': str(language), 'sK': keyword} + is_episode = False + if season and episode: + is_episode = True params['sTS'] = season params['sTE'] = episode - elif title: - params['sK'] = title - else: - raise ValueError('Missing parameters series and season and episode or title') if year: params['sY'] = year - logger.debug('Searching episode %r', params) + + # loop over paginated results + logger.info('Searching subtitles %r', params) subtitles = [] + pids = set() while True: - root = self.get('/search', params) - if not int(root.find('pagination/results').text): - logger.debug('No subtitle found') + # query the server + xml = etree.fromstring(self.session.get(self.server_url + 'search/old', params=params, timeout=10).content) + + # exit if no results + if not int(xml.find('pagination/results').text): + logger.debug('No subtitles found') break - if series and season and episode: - subtitles.extend([PodnapisiSubtitle(language, int(s.find('id').text), - s.find('release').text.split() if s.find('release').text else [], - 'n' in (s.find('flags').text or ''), s.find('url').text, - series=series, season=season, episode=episode, - year=s.find('year').text) - for s in root.findall('subtitle')]) - elif title: - subtitles.extend([PodnapisiSubtitle(language, int(s.find('id').text), - s.find('release').text.split() if s.find('release').text else [], - 'n' in (s.find('flags').text or ''), s.find('url').text, - title=title, year=s.find('year').text) - for s in root.findall('subtitle')]) - if int(root.find('pagination/current').text) >= int(root.find('pagination/count').text): + + # loop over subtitles + for subtitle_xml in xml.findall('subtitle'): + # read xml elements + language = Language.fromietf(subtitle_xml.find('language').text) + hearing_impaired = 'n' in (subtitle_xml.find('flags').text or '') + page_link = subtitle_xml.find('url').text + pid = subtitle_xml.find('pid').text + releases = [] + if subtitle_xml.find('release').text: + for release in subtitle_xml.find('release').text.split(): + release = re.sub(r'\.+$', '', release) # remove trailing dots + release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters + releases.append(release) + title = subtitle_xml.find('title').text + season = int(subtitle_xml.find('tvSeason').text) + episode = int(subtitle_xml.find('tvEpisode').text) + year = int(subtitle_xml.find('year').text) + + if is_episode: + subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title, + season=season, episode=episode, year=year) + else: + subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title, + year=year) + + # ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321 + if pid in pids: + continue + + logger.debug('Found subtitle %r', subtitle) + subtitles.append(subtitle) + pids.add(pid) + + # stop on last page + if int(xml.find('pagination/current').text) >= int(xml.find('pagination/count').text): break - params['page'] = int(root.find('pagination/current').text) + 1 + + # increment current page + params['page'] = int(xml.find('pagination/current').text) + 1 + logger.debug('Getting page %d', params['page']) + return subtitles def list_subtitles(self, video, languages): if isinstance(video, Episode): - return [s for l in languages for s in self.query(l, series=video.series, season=video.season, + return [s for l in languages for s in self.query(l, video.series, season=video.season, episode=video.episode, year=video.year)] elif isinstance(video, Movie): - return [s for l in languages for s in self.query(l, title=video.title, year=video.year)] + return [s for l in languages for s in self.query(l, video.title, year=video.year)] def download_subtitle(self, subtitle): - soup = self.get(subtitle.page_link[38:], is_xml=False) - link = soup.find('a', href=self.link_re) - if not link: - raise ProviderError('Cannot find the download link') - r = self.session.get(self.server + self.link_re.match(link['href']).group('link'), timeout=10) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - with zipfile.ZipFile(io.BytesIO(r.content)) as zf: + # download as a zip + logger.info('Downloading subtitle %r', subtitle) + r = self.session.get(self.server_url + subtitle.pid + '/download', params={'container': 'zip'}, timeout=10) + r.raise_for_status() + + # open the zip + with ZipFile(io.BytesIO(r.content)) as zf: if len(zf.namelist()) > 1: raise ProviderError('More than one file to unzip') - subtitle.content = fix_line_endings(zf.read(zf.namelist()[0])) + + subtitle.content = fix_line_ending(zf.read(zf.namelist()[0])) diff --git a/libs/subliminal/providers/shooter.py b/libs/subliminal/providers/shooter.py new file mode 100644 index 00000000..fc79faf7 --- /dev/null +++ b/libs/subliminal/providers/shooter.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +import json +import logging +import os + +from babelfish import Language, language_converters +from requests import Session + +from . import Provider +from .. import __short_version__ +from ..subtitle import Subtitle, fix_line_ending + +logger = logging.getLogger(__name__) + +language_converters.register('shooter = subliminal.converters.shooter:ShooterConverter') + + +class ShooterSubtitle(Subtitle): + """Shooter Subtitle.""" + provider_name = 'shooter' + + def __init__(self, language, hash, download_link): + super(ShooterSubtitle, self).__init__(language) + self.hash = hash + self.download_link = download_link + + @property + def id(self): + return self.download_link + + def get_matches(self, video): + matches = set() + + # hash + if 'shooter' in video.hashes and video.hashes['shooter'] == self.hash: + matches.add('hash') + + return matches + + +class ShooterProvider(Provider): + """Shooter Provider.""" + languages = {Language(l) for l in ['eng', 'zho']} + server_url = 'https://www.shooter.cn/api/subapi.php' + + def initialize(self): + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ + + def terminate(self): + self.session.close() + + def query(self, language, filename, hash=None): + # query the server + params = {'filehash': hash, 'pathinfo': os.path.realpath(filename), 'format': 'json', 'lang': language.shooter} + logger.debug('Searching subtitles %r', params) + r = self.session.post(self.server_url, params=params, timeout=10) + r.raise_for_status() + + # handle subtitles not found + if r.content == b'\xff': + logger.debug('No subtitles found') + return [] + + # parse the subtitles + results = json.loads(r.text) + subtitles = [ShooterSubtitle(language, hash, t['Link']) for s in results for t in s['Files']] + + return subtitles + + def list_subtitles(self, video, languages): + return [s for l in languages for s in self.query(l, video.name, video.hashes.get('shooter'))] + + def download_subtitle(self, subtitle): + logger.info('Downloading subtitle %r', subtitle) + r = self.session.get(subtitle.download_link, timeout=10) + r.raise_for_status() + + subtitle.content = fix_line_ending(r.content) diff --git a/libs/subliminal/providers/subscenter.py b/libs/subliminal/providers/subscenter.py new file mode 100644 index 00000000..1e25e5e1 --- /dev/null +++ b/libs/subliminal/providers/subscenter.py @@ -0,0 +1,235 @@ +# -*- coding: utf-8 -*- +import bisect +from collections import defaultdict +import io +import json +import logging +import zipfile + +from babelfish import Language +from guessit import guessit +from requests import Session + +from . import ParserBeautifulSoup, Provider +from .. import __short_version__ +from ..cache import SHOW_EXPIRATION_TIME, region +from ..exceptions import AuthenticationError, ConfigurationError, ProviderError +from ..subtitle import Subtitle, fix_line_ending, guess_matches +from ..utils import sanitize +from ..video import Episode, Movie + +logger = logging.getLogger(__name__) + + +class SubsCenterSubtitle(Subtitle): + """SubsCenter Subtitle.""" + provider_name = 'subscenter' + + def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, subtitle_id, subtitle_key, + downloaded, releases): + super(SubsCenterSubtitle, self).__init__(language, hearing_impaired, page_link) + self.series = series + self.season = season + self.episode = episode + self.title = title + self.subtitle_id = subtitle_id + self.subtitle_key = subtitle_key + self.downloaded = downloaded + self.releases = releases + + @property + def id(self): + return str(self.subtitle_id) + + def get_matches(self, video): + matches = set() + + # episode + if isinstance(video, Episode): + # series + if video.series and sanitize(self.series) == sanitize(video.series): + matches.add('series') + # season + if video.season and self.season == video.season: + matches.add('season') + # episode + if video.episode and self.episode == video.episode: + matches.add('episode') + # guess + for release in self.releases: + matches |= guess_matches(video, guessit(release, {'type': 'episode'})) + # movie + elif isinstance(video, Movie): + # guess + for release in self.releases: + matches |= guess_matches(video, guessit(release, {'type': 'movie'})) + + # title + if video.title and sanitize(self.title) == sanitize(video.title): + matches.add('title') + + return matches + + +class SubsCenterProvider(Provider): + """SubsCenter Provider.""" + languages = {Language.fromalpha2(l) for l in ['he']} + server_url = 'http://www.subscenter.co/he/' + + def __init__(self, username=None, password=None): + if username is not None and password is None or username is None and password is not None: + raise ConfigurationError('Username and password must be specified') + + self.session = None + self.username = username + self.password = password + self.logged_in = False + + def initialize(self): + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__) + + # login + if self.username is not None and self.password is not None: + logger.debug('Logging in') + url = self.server_url + 'subscenter/accounts/login/' + + # retrieve CSRF token + self.session.get(url) + csrf_token = self.session.cookies['csrftoken'] + + # actual login + data = {'username': self.username, 'password': self.password, 'csrfmiddlewaretoken': csrf_token} + r = self.session.post(url, data, allow_redirects=False, timeout=10) + + if r.status_code != 302: + raise AuthenticationError(self.username) + + logger.info('Logged in') + self.logged_in = True + + def terminate(self): + # logout + if self.logged_in: + logger.info('Logging out') + r = self.session.get(self.server_url + 'subscenter/accounts/logout/', timeout=10) + r.raise_for_status() + logger.info('Logged out') + self.logged_in = False + + self.session.close() + + @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) + def _search_url_titles(self, title): + """Search the URL titles by kind for the given `title`. + + :param str title: title to search for. + :return: the URL titles by kind. + :rtype: collections.defaultdict + + """ + # make the search + logger.info('Searching title name for %r', title) + r = self.session.get(self.server_url + 'subtitle/search/', params={'q': title}, timeout=10) + r.raise_for_status() + + # check for redirections + if r.history and all([h.status_code == 302 for h in r.history]): + logger.debug('Redirected to the subtitles page') + links = [r.url] + else: + # get the suggestions (if needed) + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + links = [link.attrs['href'] for link in soup.select('#processes div.generalWindowTop a')] + logger.debug('Found %d suggestions', len(links)) + + url_titles = defaultdict(list) + for link in links: + parts = link.split('/') + url_titles[parts[-3]].append(parts[-2]) + + return url_titles + + def query(self, title, season=None, episode=None): + # search for the url title + url_titles = self._search_url_titles(title) + + # episode + if season and episode: + if 'series' not in url_titles: + logger.error('No URL title found for series %r', title) + return [] + url_title = url_titles['series'][0] + logger.debug('Using series title %r', url_title) + url = self.server_url + 'cst/data/series/sb/{}/{}/{}/'.format(url_title, season, episode) + page_link = self.server_url + 'subtitle/series/{}/{}/{}/'.format(url_title, season, episode) + else: + if 'movie' not in url_titles: + logger.error('No URL title found for movie %r', title) + return [] + url_title = url_titles['movie'][0] + logger.debug('Using movie title %r', url_title) + url = self.server_url + 'cst/data/movie/sb/{}/'.format(url_title) + page_link = self.server_url + 'subtitle/movie/{}/'.format(url_title) + + # get the list of subtitles + logger.debug('Getting the list of subtitles') + r = self.session.get(url) + r.raise_for_status() + results = json.loads(r.text) + + # loop over results + subtitles = {} + for language_code, language_data in results.items(): + for quality_data in language_data.values(): + for quality, subtitles_data in quality_data.items(): + for subtitle_item in subtitles_data.values(): + # read the item + language = Language.fromalpha2(language_code) + hearing_impaired = bool(subtitle_item['hearing_impaired']) + subtitle_id = subtitle_item['id'] + subtitle_key = subtitle_item['key'] + downloaded = subtitle_item['downloaded'] + release = subtitle_item['subtitle_version'] + + # add the release and increment downloaded count if we already have the subtitle + if subtitle_id in subtitles: + logger.debug('Found additional release %r for subtitle %d', release, subtitle_id) + bisect.insort_left(subtitles[subtitle_id].releases, release) # deterministic order + subtitles[subtitle_id].downloaded += downloaded + continue + + # otherwise create it + subtitle = SubsCenterSubtitle(language, hearing_impaired, page_link, title, season, episode, + title, subtitle_id, subtitle_key, downloaded, [release]) + logger.debug('Found subtitle %r', subtitle) + subtitles[subtitle_id] = subtitle + + return subtitles.values() + + def list_subtitles(self, video, languages): + season = episode = None + title = video.title + + if isinstance(video, Episode): + title = video.series + season = video.season + episode = video.episode + + return [s for s in self.query(title, season, episode) if s.language in languages] + + def download_subtitle(self, subtitle): + # download + url = self.server_url + 'subtitle/download/{}/{}/'.format(subtitle.language.alpha2, subtitle.subtitle_id) + params = {'v': subtitle.releases[0], 'key': subtitle.subtitle_key} + r = self.session.get(url, params=params, headers={'Referer': subtitle.page_link}, timeout=10) + r.raise_for_status() + + # open the zip + with zipfile.ZipFile(io.BytesIO(r.content)) as zf: + # remove some filenames from the namelist + namelist = [n for n in zf.namelist() if not n.endswith('.txt')] + if len(namelist) > 1: + raise ProviderError('More than one file to unzip') + + subtitle.content = fix_line_ending(zf.read(namelist[0])) diff --git a/libs/subliminal/providers/thesubdb.py b/libs/subliminal/providers/thesubdb.py index 44623173..6bf4a0eb 100644 --- a/libs/subliminal/providers/thesubdb.py +++ b/libs/subliminal/providers/thesubdb.py @@ -1,72 +1,84 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import logging -import babelfish -import requests -from . import Provider -from .. import __version__ -from ..exceptions import ProviderError -from ..subtitle import Subtitle, fix_line_endings +from babelfish import Language, language_converters +from requests import Session + +from . import Provider +from .. import __short_version__ +from ..subtitle import Subtitle, fix_line_ending logger = logging.getLogger(__name__) +language_converters.register('thesubdb = subliminal.converters.thesubdb:TheSubDBConverter') + class TheSubDBSubtitle(Subtitle): + """TheSubDB Subtitle.""" provider_name = 'thesubdb' - def __init__(self, language, hash): # @ReservedAssignment + def __init__(self, language, hash): super(TheSubDBSubtitle, self).__init__(language) self.hash = hash - def compute_matches(self, video): + @property + def id(self): + return self.hash + '-' + str(self.language) + + def get_matches(self, video): matches = set() + # hash if 'thesubdb' in video.hashes and video.hashes['thesubdb'] == self.hash: matches.add('hash') + return matches class TheSubDBProvider(Provider): - languages = {babelfish.Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']} + """TheSubDB Provider.""" + languages = {Language.fromthesubdb(l) for l in language_converters['thesubdb'].codes} required_hash = 'thesubdb' + server_url = 'http://api.thesubdb.com/' def initialize(self): - self.session = requests.Session() - self.session.headers = {'User-Agent': 'SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' % - __version__.split('-')[0]} + self.session = Session() + self.session.headers['User-Agent'] = ('SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' % + __short_version__) def terminate(self): self.session.close() - def get(self, params): - """Make a GET request on the server with the given parameters - - :param params: params of the request - :return: the response - :rtype: :class:`requests.Response` - - """ - return self.session.get('http://api.thesubdb.com', params=params, timeout=10) - - def query(self, hash): # @ReservedAssignment + def query(self, hash): + # make the query params = {'action': 'search', 'hash': hash} - logger.debug('Searching subtitles %r', params) - r = self.get(params) + logger.info('Searching subtitles %r', params) + r = self.session.get(self.server_url, params=params, timeout=10) + + # handle subtitles not found and errors if r.status_code == 404: - logger.debug('No subtitle found') + logger.debug('No subtitles found') return [] - elif r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - return [TheSubDBSubtitle(language, hash) for language in - {babelfish.Language.fromalpha2(l) for l in r.content.decode('utf-8').split(',')}] + r.raise_for_status() + + # loop over languages + subtitles = [] + for language_code in r.text.split(','): + language = Language.fromthesubdb(language_code) + + subtitle = TheSubDBSubtitle(language, hash) + logger.debug('Found subtitle %r', subtitle) + subtitles.append(subtitle) + + return subtitles def list_subtitles(self, video, languages): return [s for s in self.query(video.hashes['thesubdb']) if s.language in languages] def download_subtitle(self, subtitle): + logger.info('Downloading subtitle %r', subtitle) params = {'action': 'download', 'hash': subtitle.hash, 'language': subtitle.language.alpha2} - r = self.get(params) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - subtitle.content = fix_line_endings(r.content) + r = self.session.get(self.server_url, params=params, timeout=10) + r.raise_for_status() + + subtitle.content = fix_line_ending(r.content) diff --git a/libs/subliminal/providers/tvsubtitles.py b/libs/subliminal/providers/tvsubtitles.py index 3f21928b..ec033ee7 100644 --- a/libs/subliminal/providers/tvsubtitles.py +++ b/libs/subliminal/providers/tvsubtitles.py @@ -1,41 +1,53 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals import io import logging import re -import zipfile -import babelfish -import bs4 -import requests -from . import Provider -from .. import __version__ -from ..cache import region, SHOW_EXPIRATION_TIME, EPISODE_EXPIRATION_TIME +from zipfile import ZipFile + +from babelfish import Language, language_converters +from guessit import guessit +from requests import Session + +from . import ParserBeautifulSoup, Provider +from .. import __short_version__ +from ..cache import EPISODE_EXPIRATION_TIME, SHOW_EXPIRATION_TIME, region from ..exceptions import ProviderError -from ..subtitle import Subtitle, fix_line_endings, compute_guess_properties_matches +from ..score import get_equivalent_release_groups +from ..subtitle import Subtitle, fix_line_ending, guess_matches +from ..utils import sanitize, sanitize_release_group from ..video import Episode - logger = logging.getLogger(__name__) -babelfish.language_converters.register('tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter') + +language_converters.register('tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter') + +link_re = re.compile(r'^(?P.+?)(?: \(?\d{4}\)?| \((?:US|UK)\))? \((?P\d{4})-\d{4}\)$') +episode_id_re = re.compile(r'^episode-\d+\.html$') class TVsubtitlesSubtitle(Subtitle): + """TVsubtitles Subtitle.""" provider_name = 'tvsubtitles' - def __init__(self, language, series, season, episode, year, id, rip, release, page_link): # @ReservedAssignment + def __init__(self, language, page_link, subtitle_id, series, season, episode, year, rip, release): super(TVsubtitlesSubtitle, self).__init__(language, page_link=page_link) + self.subtitle_id = subtitle_id self.series = series self.season = season self.episode = episode self.year = year - self.id = id self.rip = rip self.release = release - def compute_matches(self, video): + @property + def id(self): + return str(self.subtitle_id) + + def get_matches(self, video): matches = set() + # series - if video.series and self.series == video.series: + if video.series and sanitize(self.series) == sanitize(video.series): matches.add('series') # season if video.season and self.season == video.season: @@ -44,148 +56,155 @@ class TVsubtitlesSubtitle(Subtitle): if video.episode and self.episode == video.episode: matches.add('episode') # year - if self.year == video.year: + if video.original_series and self.year is None or video.year and video.year == self.year: matches.add('year') # release_group - if video.release_group and self.release and video.release_group.lower() in self.release.lower(): + if (video.release_group and self.release and + any(r in sanitize_release_group(self.release) + for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))): matches.add('release_group') - """ - # video_codec - if video.video_codec and self.release and (video.video_codec in self.release.lower() - or video.video_codec == 'h264' and 'x264' in self.release.lower()): - matches.add('video_codec') - # resolution - if video.resolution and self.rip and video.resolution in self.rip.lower(): - matches.add('resolution') - # format - if video.format and self.rip and video.format in self.rip.lower(): - matches.add('format') - """ - # we don't have the complete filename, so we need to guess the matches separately - # guess video_codec (videoCodec in guessit) - matches |= compute_guess_properties_matches(video, self.release, 'videoCodec') - # guess resolution (screenSize in guessit) - matches |= compute_guess_properties_matches(video, self.rip, 'screenSize') - # guess format - matches |= compute_guess_properties_matches(video, self.rip, 'format') + # other properties + if self.release: + matches |= guess_matches(video, guessit(self.release, {'type': 'episode'}), partial=True) + if self.rip: + matches |= guess_matches(video, guessit(self.rip), partial=True) + return matches class TVsubtitlesProvider(Provider): - languages = {babelfish.Language('por', 'BR')} | {babelfish.Language(l) - for l in ['ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', - 'nld', 'pol', 'por', 'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho']} + """TVsubtitles Provider.""" + languages = {Language('por', 'BR')} | {Language(l) for l in [ + 'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por', + 'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho' + ]} video_types = (Episode,) - server = 'http://www.tvsubtitles.net' - episode_id_re = re.compile('^episode-\d+\.html$') - subtitle_re = re.compile('^\/subtitle-\d+\.html$') - link_re = re.compile('^(?P[A-Za-z0-9 \'.]+).*\((?P\d{4})-\d{4}\)$') + server_url = 'http://www.tvsubtitles.net/' def initialize(self): - self.session = requests.Session() - self.session.headers = {'User-Agent': 'Subliminal/%s' % __version__.split('-')[0]} + self.session = Session() + self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ def terminate(self): self.session.close() - def request(self, url, params=None, data=None, method='GET'): - """Make a `method` request on `url` with the given parameters - - :param string url: part of the URL to reach with the leading slash - :param dict params: params of the request - :param dict data: data of the request - :param string method: method of the request - :return: the response - :rtype: :class:`bs4.BeautifulSoup` - - """ - r = self.session.request(method, self.server + url, params=params, data=data, timeout=10) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - return bs4.BeautifulSoup(r.content, ['permissive']) - @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) - def find_show_id(self, series, year=None): - """Find the show id from the `series` with optional `year` + def search_show_id(self, series, year=None): + """Search the show id from the `series` and `year`. - :param string series: series of the episode in lowercase - :param year: year of the series, if any - :type year: int or None - :return: the show id, if any - :rtype: int or None + :param str series: series of the episode. + :param year: year of the series, if any. + :type year: int + :return: the show id, if any. + :rtype: int """ - data = {'q': series} - logger.debug('Searching series %r', data) - soup = self.request('/search.php', data=data, method='POST') - links = soup.select('div.left li div a[href^="/tvshow-"]') - if not links: - logger.info('Series %r not found', series) - return None - matched_links = [link for link in links if self.link_re.match(link.string)] - for link in matched_links: # first pass with exact match on series - match = self.link_re.match(link.string) - if match.group('series').lower().replace('.', ' ').strip() == series: + # make the search + logger.info('Searching show id for %r', series) + r = self.session.post(self.server_url + 'search.php', data={'q': series}, timeout=10) + r.raise_for_status() + + # get the series out of the suggestions + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + show_id = None + for suggestion in soup.select('div.left li div a[href^="/tvshow-"]'): + match = link_re.match(suggestion.text) + if not match: + logger.error('Failed to match %s', suggestion.text) + continue + + if match.group('series').lower() == series.lower(): if year is not None and int(match.group('first_year')) != year: + logger.debug('Year does not match') continue - return int(link['href'][8:-5]) - for link in matched_links: # less selective second pass - match = self.link_re.match(link.string) - if match.group('series').lower().replace('.', ' ').strip().startswith(series): - if year is not None and int(match.group('first_year')) != year: - continue - return int(link['href'][8:-5]) - return None + show_id = int(suggestion['href'][8:-5]) + logger.debug('Found show id %d', show_id) + break + + return show_id @region.cache_on_arguments(expiration_time=EPISODE_EXPIRATION_TIME) - def find_episode_ids(self, show_id, season): - """Find episode ids from the show id and the season + def get_episode_ids(self, show_id, season): + """Get episode ids from the show id and the season. - :param int show_id: show id - :param int season: season of the episode - :return: episode ids per episode number + :param int show_id: show id. + :param int season: season of the episode. + :return: episode ids per episode number. :rtype: dict """ - params = {'show_id': show_id, 'season': season} - logger.debug('Searching episodes %r', params) - soup = self.request('/tvshow-{show_id}-{season}.html'.format(**params)) + # get the page of the season of the show + logger.info('Getting the page of show id %d, season %d', show_id, season) + r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10) + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + + # loop over episode rows episode_ids = {} for row in soup.select('table#table5 tr'): - if not row('a', href=self.episode_id_re): + # skip rows that do not have a link to the episode page + if not row('a', href=episode_id_re): continue + + # extract data from the cells cells = row('td') - episode_ids[int(cells[0].string.split('x')[1])] = int(cells[1].a['href'][8:-5]) + episode = int(cells[0].text.split('x')[1]) + episode_id = int(cells[1].a['href'][8:-5]) + episode_ids[episode] = episode_id + + if episode_ids: + logger.debug('Found episode ids %r', episode_ids) + else: + logger.warning('No episode ids found') + return episode_ids def query(self, series, season, episode, year=None): - show_id = self.find_show_id(series.lower(), year) + # search the show id + show_id = self.search_show_id(series, year) if show_id is None: + logger.error('No show id found for %r (%r)', series, {'year': year}) return [] - episode_ids = self.find_episode_ids(show_id, season) + + # get the episode ids + episode_ids = self.get_episode_ids(show_id, season) if episode not in episode_ids: - logger.info('Episode %d not found', episode) + logger.error('Episode %d not found', episode) return [] - params = {'episode_id': episode_ids[episode]} - logger.debug('Searching episode %r', params) - link = '/episode-{episode_id}.html'.format(**params) - soup = self.request(link) - return [TVsubtitlesSubtitle(babelfish.Language.fromtvsubtitles(row.h5.img['src'][13:-4]), series, season, - episode, year if year and show_id != self.find_show_id(series.lower()) else None, - int(row['href'][10:-5]), row.find('p', title='rip').text.strip() or None, - row.find('p', title='release').text.strip() or None, - self.server + '/subtitle-%d.html' % int(row['href'][10:-5])) - for row in soup('a', href=self.subtitle_re)] + + # get the episode page + logger.info('Getting the page for episode %d', episode_ids[episode]) + r = self.session.get(self.server_url + 'episode-%d.html' % episode_ids[episode], timeout=10) + soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) + + # loop over subtitles rows + subtitles = [] + for row in soup.select('.subtitlen'): + # read the item + language = Language.fromtvsubtitles(row.h5.img['src'][13:-4]) + subtitle_id = int(row.parent['href'][10:-5]) + page_link = self.server_url + 'subtitle-%d.html' % subtitle_id + rip = row.find('p', title='rip').text.strip() or None + release = row.find('p', title='release').text.strip() or None + + subtitle = TVsubtitlesSubtitle(language, page_link, subtitle_id, series, season, episode, year, rip, + release) + logger.debug('Found subtitle %s', subtitle) + subtitles.append(subtitle) + + return subtitles def list_subtitles(self, video, languages): return [s for s in self.query(video.series, video.season, video.episode, video.year) if s.language in languages] def download_subtitle(self, subtitle): - r = self.session.get(self.server + '/download-{subtitle_id}.html'.format(subtitle_id=subtitle.id), - timeout=10) - if r.status_code != 200: - raise ProviderError('Request failed with status code %d' % r.status_code) - with zipfile.ZipFile(io.BytesIO(r.content)) as zf: + # download as a zip + logger.info('Downloading subtitle %r', subtitle) + r = self.session.get(self.server_url + 'download-%d.html' % subtitle.subtitle_id, timeout=10) + r.raise_for_status() + + # open the zip + with ZipFile(io.BytesIO(r.content)) as zf: if len(zf.namelist()) > 1: raise ProviderError('More than one file to unzip') - subtitle.content = fix_line_endings(zf.read(zf.namelist()[0])) + + subtitle.content = fix_line_ending(zf.read(zf.namelist()[0])) diff --git a/libs/subliminal/refiners/__init__.py b/libs/subliminal/refiners/__init__.py new file mode 100644 index 00000000..bbb8d3ef --- /dev/null +++ b/libs/subliminal/refiners/__init__.py @@ -0,0 +1,12 @@ +""" +Refiners enrich a :class:`~subliminal.video.Video` object by adding information to it. + +A refiner is a simple function: + +.. py:function:: refine(video, **kwargs) + + :param video: the video to refine. + :type video: :class:`~subliminal.video.Video` + :param \*\*kwargs: additional parameters for refiners. + +""" diff --git a/libs/subliminal/refiners/metadata.py b/libs/subliminal/refiners/metadata.py new file mode 100644 index 00000000..a8408742 --- /dev/null +++ b/libs/subliminal/refiners/metadata.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +import logging +import os + +from babelfish import Error as BabelfishError, Language +from enzyme import MKV + +logger = logging.getLogger(__name__) + + +def refine(video, embedded_subtitles=True, **kwargs): + """Refine a video by searching its metadata. + + Several :class:`~subliminal.video.Video` attributes can be found: + + * :attr:`~subliminal.video.Video.resolution` + * :attr:`~subliminal.video.Video.video_codec` + * :attr:`~subliminal.video.Video.audio_codec` + * :attr:`~subliminal.video.Video.subtitle_languages` + + :param bool embedded_subtitles: search for embedded subtitles. + + """ + # skip non existing videos + if not video.exists: + return + + # check extensions + extension = os.path.splitext(video.name)[1] + if extension == '.mkv': + with open(video.name, 'rb') as f: + mkv = MKV(f) + + # main video track + if mkv.video_tracks: + video_track = mkv.video_tracks[0] + + # resolution + if video_track.height in (480, 720, 1080): + if video_track.interlaced: + video.resolution = '%di' % video_track.height + else: + video.resolution = '%dp' % video_track.height + logger.debug('Found resolution %s', video.resolution) + + # video codec + if video_track.codec_id == 'V_MPEG4/ISO/AVC': + video.video_codec = 'h264' + logger.debug('Found video_codec %s', video.video_codec) + elif video_track.codec_id == 'V_MPEG4/ISO/SP': + video.video_codec = 'DivX' + logger.debug('Found video_codec %s', video.video_codec) + elif video_track.codec_id == 'V_MPEG4/ISO/ASP': + video.video_codec = 'XviD' + logger.debug('Found video_codec %s', video.video_codec) + else: + logger.warning('MKV has no video track') + + # main audio track + if mkv.audio_tracks: + audio_track = mkv.audio_tracks[0] + # audio codec + if audio_track.codec_id == 'A_AC3': + video.audio_codec = 'AC3' + logger.debug('Found audio_codec %s', video.audio_codec) + elif audio_track.codec_id == 'A_DTS': + video.audio_codec = 'DTS' + logger.debug('Found audio_codec %s', video.audio_codec) + elif audio_track.codec_id == 'A_AAC': + video.audio_codec = 'AAC' + logger.debug('Found audio_codec %s', video.audio_codec) + else: + logger.warning('MKV has no audio track') + + # subtitle tracks + if mkv.subtitle_tracks: + if embedded_subtitles: + embedded_subtitle_languages = set() + for st in mkv.subtitle_tracks: + if st.language: + try: + embedded_subtitle_languages.add(Language.fromalpha3b(st.language)) + except BabelfishError: + logger.error('Embedded subtitle track language %r is not a valid language', st.language) + embedded_subtitle_languages.add(Language('und')) + elif st.name: + try: + embedded_subtitle_languages.add(Language.fromname(st.name)) + except BabelfishError: + logger.debug('Embedded subtitle track name %r is not a valid language', st.name) + embedded_subtitle_languages.add(Language('und')) + else: + embedded_subtitle_languages.add(Language('und')) + logger.debug('Found embedded subtitle %r', embedded_subtitle_languages) + video.subtitle_languages |= embedded_subtitle_languages + else: + logger.debug('MKV has no subtitle track') + else: + logger.debug('Unsupported video extension %s', extension) diff --git a/libs/subliminal/refiners/omdb.py b/libs/subliminal/refiners/omdb.py new file mode 100644 index 00000000..e2514ae9 --- /dev/null +++ b/libs/subliminal/refiners/omdb.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +import logging +import operator + +import requests + +from .. import __short_version__ +from ..cache import REFINER_EXPIRATION_TIME, region +from ..video import Episode, Movie +from ..utils import sanitize + +logger = logging.getLogger(__name__) + + +class OMDBClient(object): + base_url = 'http://www.omdbapi.com' + + def __init__(self, version=1, session=None, headers=None, timeout=10): + #: Session for the requests + self.session = session or requests.Session() + self.session.timeout = timeout + self.session.headers.update(headers or {}) + self.session.params['r'] = 'json' + self.session.params['v'] = version + + def get(self, id=None, title=None, type=None, year=None, plot='short', tomatoes=False): + # build the params + params = {} + if id: + params['i'] = id + if title: + params['t'] = title + if not params: + raise ValueError('At least id or title is required') + params['type'] = type + params['y'] = year + params['plot'] = plot + params['tomatoes'] = tomatoes + + # perform the request + r = self.session.get(self.base_url, params=params) + r.raise_for_status() + + # get the response as json + j = r.json() + + # check response status + if j['Response'] == 'False': + return None + + return j + + def search(self, title, type=None, year=None, page=1): + # build the params + params = {'s': title, 'type': type, 'y': year, 'page': page} + + # perform the request + r = self.session.get(self.base_url, params=params) + r.raise_for_status() + + # get the response as json + j = r.json() + + # check response status + if j['Response'] == 'False': + return None + + return j + + +omdb_client = OMDBClient(headers={'User-Agent': 'Subliminal/%s' % __short_version__}) + + +@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME) +def search(title, type, year): + results = omdb_client.search(title, type, year) + if not results: + return None + + # fetch all paginated results + all_results = results['Search'] + total_results = int(results['totalResults']) + page = 1 + while total_results > page * 10: + page += 1 + results = omdb_client.search(title, type, year, page=page) + all_results.extend(results['Search']) + + return all_results + + +def refine(video, **kwargs): + """Refine a video by searching `OMDb API `_. + + Several :class:`~subliminal.video.Episode` attributes can be found: + + * :attr:`~subliminal.video.Episode.series` + * :attr:`~subliminal.video.Episode.year` + * :attr:`~subliminal.video.Episode.series_imdb_id` + + Similarly, for a :class:`~subliminal.video.Movie`: + + * :attr:`~subliminal.video.Movie.title` + * :attr:`~subliminal.video.Movie.year` + * :attr:`~subliminal.video.Video.imdb_id` + + """ + if isinstance(video, Episode): + # exit if the information is complete + if video.series_imdb_id: + logger.debug('No need to search') + return + + # search the series + results = search(video.series, 'series', video.year) + if not results: + logger.warning('No results for series') + return + logger.debug('Found %d results', len(results)) + + # filter the results + results = [r for r in results if sanitize(r['Title']) == sanitize(video.series)] + if not results: + logger.warning('No matching series found') + return + + # process the results + found = False + for result in sorted(results, key=operator.itemgetter('Year')): + if video.original_series and video.year is None: + logger.debug('Found result for original series without year') + found = True + break + if video.year == int(result['Year'].split(u'\u2013')[0]): + logger.debug('Found result with matching year') + found = True + break + + if not found: + logger.warning('No matching series found') + return + + # add series information + logger.debug('Found series %r', result) + video.series = result['Title'] + video.year = int(result['Year'].split(u'\u2013')[0]) + video.series_imdb_id = result['imdbID'] + + elif isinstance(video, Movie): + # exit if the information is complete + if video.imdb_id: + return + + # search the movie + results = search(video.title, 'movie', video.year) + if not results: + logger.warning('No results') + return + logger.debug('Found %d results', len(results)) + + # filter the results + results = [r for r in results if sanitize(r['Title']) == sanitize(video.title)] + if not results: + logger.warning('No matching movie found') + return + + # process the results + found = False + for result in results: + if video.year is None: + logger.debug('Found result for movie without year') + found = True + break + if video.year == int(result['Year']): + logger.debug('Found result with matching year') + found = True + break + + if not found: + logger.warning('No matching movie found') + return + + # add movie information + logger.debug('Found movie %r', result) + video.title = result['Title'] + video.year = int(result['Year'].split(u'\u2013')[0]) + video.imdb_id = result['imdbID'] diff --git a/libs/subliminal/refiners/tvdb.py b/libs/subliminal/refiners/tvdb.py new file mode 100644 index 00000000..1828e5cf --- /dev/null +++ b/libs/subliminal/refiners/tvdb.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +from datetime import datetime, timedelta +from functools import wraps +import logging +import re + +import requests + +from .. import __short_version__ +from ..cache import REFINER_EXPIRATION_TIME, region +from ..utils import sanitize +from ..video import Episode + +logger = logging.getLogger(__name__) + +series_re = re.compile(r'^(?P.*?)(?: \((?:(?P\d{4})|(?P[A-Z]{2}))\))?$') + + +def requires_auth(func): + """Decorator for :class:`TVDBClient` methods that require authentication""" + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.token is None or self.token_expired: + self.login() + elif self.token_needs_refresh: + self.refresh_token() + return func(self, *args, **kwargs) + return wrapper + + +class TVDBClient(object): + """TVDB REST API Client + + :param str apikey: API key to use. + :param str username: username to use. + :param str password: password to use. + :param str language: language of the responses. + :param session: session object to use. + :type session: :class:`requests.sessions.Session` or compatible. + :param dict headers: additional headers. + :param int timeout: timeout for the requests. + + """ + #: Base URL of the API + base_url = 'https://api.thetvdb.com' + + #: Token lifespan + token_lifespan = timedelta(hours=1) + + #: Minimum token age before a :meth:`refresh_token` is triggered + refresh_token_every = timedelta(minutes=30) + + def __init__(self, apikey=None, username=None, password=None, language='en', session=None, headers=None, + timeout=10): + #: API key + self.apikey = apikey + + #: Username + self.username = username + + #: Password + self.password = password + + #: Last token acquisition date + self.token_date = datetime.utcnow() - self.token_lifespan + + #: Session for the requests + self.session = session or requests.Session() + self.session.timeout = timeout + self.session.headers.update(headers or {}) + self.session.headers['Content-Type'] = 'application/json' + self.session.headers['Accept-Language'] = language + + @property + def language(self): + return self.session.headers['Accept-Language'] + + @language.setter + def language(self, value): + self.session.headers['Accept-Language'] = value + + @property + def token(self): + if 'Authorization' not in self.session.headers: + return None + return self.session.headers['Authorization'][7:] + + @property + def token_expired(self): + return datetime.utcnow() - self.token_date > self.token_lifespan + + @property + def token_needs_refresh(self): + return datetime.utcnow() - self.token_date > self.refresh_token_every + + def login(self): + """Login""" + # perform the request + data = {'apikey': self.apikey, 'username': self.username, 'password': self.password} + r = self.session.post(self.base_url + '/login', json=data) + r.raise_for_status() + + # set the Authorization header + self.session.headers['Authorization'] = 'Bearer ' + r.json()['token'] + + # update token_date + self.token_date = datetime.utcnow() + + def refresh_token(self): + """Refresh token""" + # perform the request + r = self.session.get(self.base_url + '/refresh_token') + r.raise_for_status() + + # set the Authorization header + self.session.headers['Authorization'] = 'Bearer ' + r.json()['token'] + + # update token_date + self.token_date = datetime.utcnow() + + @requires_auth + def search_series(self, name=None, imdb_id=None, zap2it_id=None): + """Search series""" + # perform the request + params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id} + r = self.session.get(self.base_url + '/search/series', params=params) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json()['data'] + + @requires_auth + def get_series(self, id): + """Get series""" + # perform the request + r = self.session.get(self.base_url + '/series/{}'.format(id)) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json()['data'] + + @requires_auth + def get_series_actors(self, id): + """Get series actors""" + # perform the request + r = self.session.get(self.base_url + '/series/{}/actors'.format(id)) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json()['data'] + + @requires_auth + def get_series_episodes(self, id, page=1): + """Get series episodes""" + # perform the request + params = {'page': page} + r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json() + + @requires_auth + def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None, + dvd_episode=None, imdb_id=None, page=1): + """Query series episodes""" + # perform the request + params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode, + 'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page} + r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json() + + @requires_auth + def get_episode(self, id): + """Get episode""" + # perform the request + r = self.session.get(self.base_url + '/episodes/{}'.format(id)) + if r.status_code == 404: + return None + r.raise_for_status() + + return r.json()['data'] + + +#: Configured instance of :class:`TVDBClient` +tvdb_client = TVDBClient('5EC930FB90DA1ADA', headers={'User-Agent': 'Subliminal/%s' % __short_version__}) + + +@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME) +def search_series(name): + """Search series. + + :param str name: name of the series. + :return: the search results. + :rtype: list + + """ + return tvdb_client.search_series(name) + + +@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME) +def get_series(id): + """Get series. + + :param int id: id of the series. + :return: the series data. + :rtype: dict + + """ + return tvdb_client.get_series(id) + + +@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME) +def get_series_episode(series_id, season, episode): + """Get an episode of a series. + + :param int series_id: id of the series. + :param int season: season number of the episode. + :param int episode: episode number of the episode. + :return: the episode data. + :rtype: dict + + """ + result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode) + if result: + return tvdb_client.get_episode(result['data'][0]['id']) + + +def refine(video, **kwargs): + """Refine a video by searching `TheTVDB `_. + + .. note:: + + This refiner only work for instances of :class:`~subliminal.video.Episode`. + + Several attributes can be found: + + * :attr:`~subliminal.video.Episode.series` + * :attr:`~subliminal.video.Episode.year` + * :attr:`~subliminal.video.Episode.series_imdb_id` + * :attr:`~subliminal.video.Episode.series_tvdb_id` + * :attr:`~subliminal.video.Episode.title` + * :attr:`~subliminal.video.Video.imdb_id` + * :attr:`~subliminal.video.Episode.tvdb_id` + + """ + # only deal with Episode videos + if not isinstance(video, Episode): + logger.error('Cannot refine episodes') + return + + # exit if the information is complete + if video.series_tvdb_id and video.tvdb_id: + logger.debug('No need to search') + return + + # search the series + logger.info('Searching series %r', video.series) + results = search_series(video.series.lower()) + if not results: + logger.warning('No results for series') + return + logger.debug('Found %d results', len(results)) + + # search for exact matches + matching_results = [] + for result in results: + matching_result = {} + + # use seriesName and aliases + series_names = [result['seriesName']] + series_names.extend(result['aliases']) + + # parse the original series as series + year or country + original_match = series_re.match(result['seriesName']).groupdict() + + # parse series year + series_year = None + if result['firstAired']: + series_year = datetime.strptime(result['firstAired'], '%Y-%m-%d').year + + # discard mismatches on year + if video.year and series_year and video.year != series_year: + logger.debug('Discarding series %r mismatch on year %d', result['seriesName'], series_year) + continue + + # iterate over series names + for series_name in series_names: + # parse as series and year + series, year, country = series_re.match(series_name).groups() + if year: + year = int(year) + + # discard mismatches on year + if year and (video.original_series or video.year != year): + logger.debug('Discarding series name %r mismatch on year %d', series, year) + continue + + # match on sanitized series name + if sanitize(series) == sanitize(video.series): + logger.debug('Found exact match on series %r', series_name) + matching_result['match'] = {'series': original_match['series'], 'year': series_year, + 'original_series': original_match['year'] is None} + break + + # add the result on match + if matching_result: + matching_result['data'] = result + matching_results.append(matching_result) + + # exit if we don't have exactly 1 matching result + if not matching_results: + logger.error('No matching series found') + return + if len(matching_results) > 1: + logger.error('Multiple matches found') + return + + # get the series + matching_result = matching_results[0] + series = get_series(matching_result['data']['id']) + + # add series information + logger.debug('Found series %r', series) + video.series = matching_result['match']['series'] + video.year = matching_result['match']['year'] + video.original_series = matching_result['match']['original_series'] + video.series_tvdb_id = series['id'] + video.series_imdb_id = series['imdbId'] or None + + # get the episode + logger.info('Getting series episode %dx%d', video.season, video.episode) + episode = get_series_episode(video.series_tvdb_id, video.season, video.episode) + if not episode: + logger.warning('No results for episode') + return + + # add episode information + logger.debug('Found episode %r', episode) + video.tvdb_id = episode['id'] + video.title = episode['episodeName'] or None + video.imdb_id = episode['imdbId'] or None diff --git a/libs/subliminal/score.py b/libs/subliminal/score.py index f9dcaede..31ccb343 100755 --- a/libs/subliminal/score.py +++ b/libs/subliminal/score.py @@ -1,90 +1,234 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals -from sympy import Eq, symbols, solve +""" +This module provides the default implementation of the `compute_score` parameter in +:meth:`~subliminal.core.ProviderPool.download_best_subtitles` and :func:`~subliminal.core.download_best_subtitles`. + +.. note:: + + To avoid unnecessary dependency on `sympy `_ and boost subliminal's import time, the + resulting scores are hardcoded here and manually updated when the set of equations change. + +Available matches: + + * hash + * title + * year + * series + * season + * episode + * release_group + * format + * audio_codec + * resolution + * hearing_impaired + * video_codec + * series_imdb_id + * imdb_id + * tvdb_id + +""" +from __future__ import division, print_function +import logging + +from .video import Episode, Movie + +logger = logging.getLogger(__name__) -# Symbols -release_group, resolution, format, video_codec, audio_codec = symbols('release_group resolution format video_codec audio_codec') -imdb_id, hash, title, series, tvdb_id, season, episode = symbols('imdb_id hash title series tvdb_id season episode') # @ReservedAssignment -year = symbols('year') +#: Scores for episodes +episode_scores = {'hash': 359, 'series': 180, 'year': 90, 'season': 30, 'episode': 30, 'release_group': 15, + 'format': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1} + +#: Scores for movies +movie_scores = {'hash': 119, 'title': 60, 'year': 30, 'release_group': 15, + 'format': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1} + +#: Equivalent release groups +equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}) -def get_episode_equations(): - """Get the score equations for a :class:`~subliminal.video.Episode` +def get_equivalent_release_groups(release_group): + """Get all the equivalents of the given release group. - The equations are the following: - - 1. hash = resolution + format + video_codec + audio_codec + series + season + episode + year + release_group - 2. series = resolution + video_codec + audio_codec + season + episode + release_group + 1 - 3. year = series - 4. tvdb_id = series + year - 5. season = resolution + video_codec + audio_codec + 1 - 6. imdb_id = series + season + episode + year - 7. format = video_codec + audio_codec - 8. resolution = video_codec - 9. video_codec = 2 * audio_codec - 10. title = season + episode - 11. season = episode - 12. release_group = season - 13. audio_codec = 1 - - :return: the score equations for an episode - :rtype: list of :class:`sympy.Eq` + :param str release_group: the release group to get the equivalents of. + :return: the equivalent release groups. + :rtype: set """ - equations = [] - equations.append(Eq(hash, resolution + format + video_codec + audio_codec + series + season + episode + year + release_group)) - equations.append(Eq(series, resolution + video_codec + audio_codec + season + episode + release_group + 1)) - equations.append(Eq(series, year)) - equations.append(Eq(tvdb_id, series + year)) - equations.append(Eq(season, resolution + video_codec + audio_codec + 1)) - equations.append(Eq(imdb_id, series + season + episode + year)) - equations.append(Eq(format, video_codec + audio_codec)) - equations.append(Eq(resolution, video_codec)) - equations.append(Eq(video_codec, 2 * audio_codec)) - equations.append(Eq(title, season + episode)) - equations.append(Eq(season, episode)) - equations.append(Eq(release_group, season)) - equations.append(Eq(audio_codec, 1)) - return equations + for equivalent_release_group in equivalent_release_groups: + if release_group in equivalent_release_group: + return equivalent_release_group + + return {release_group} -def get_movie_equations(): - """Get the score equations for a :class:`~subliminal.video.Movie` +def get_scores(video): + """Get the scores dict for the given `video`. - The equations are the following: + This will return either :data:`episode_scores` or :data:`movie_scores` based on the type of the `video`. - 1. hash = resolution + format + video_codec + audio_codec + title + year + release_group - 2. imdb_id = hash - 3. resolution = video_codec - 4. video_codec = 2 * audio_codec - 5. format = video_codec + audio_codec - 6. title = resolution + video_codec + audio_codec + year + 1 - 7. release_group = resolution + video_codec + audio_codec + 1 - 8. year = release_group + 1 - 9. audio_codec = 1 - - :return: the score equations for a movie - :rtype: list of :class:`sympy.Eq` + :param video: the video to compute the score against. + :type video: :class:`~subliminal.video.Video` + :return: the scores dict. + :rtype: dict """ - equations = [] - equations.append(Eq(hash, resolution + format + video_codec + audio_codec + title + year + release_group)) - equations.append(Eq(imdb_id, hash)) - equations.append(Eq(resolution, video_codec)) - equations.append(Eq(video_codec, 2 * audio_codec)) - equations.append(Eq(format, video_codec + audio_codec)) - equations.append(Eq(title, resolution + video_codec + audio_codec + year + 1)) - equations.append(Eq(video_codec, 2 * audio_codec)) - equations.append(Eq(release_group, resolution + video_codec + audio_codec + 1)) - equations.append(Eq(year, release_group + 1)) - equations.append(Eq(audio_codec, 1)) - return equations + if isinstance(video, Episode): + return episode_scores + elif isinstance(video, Movie): + return movie_scores + + raise ValueError('video must be an instance of Episode or Movie') -if __name__ == '__main__': - print(solve(get_episode_equations(), [release_group, resolution, format, video_codec, audio_codec, imdb_id, - hash, series, tvdb_id, season, episode, title, year])) - print(solve(get_movie_equations(), [release_group, resolution, format, video_codec, audio_codec, imdb_id, - hash, title, year])) +def compute_score(subtitle, video, hearing_impaired=None): + """Compute the score of the `subtitle` against the `video` with `hearing_impaired` preference. + + :func:`compute_score` uses the :meth:`Subtitle.get_matches ` method and + applies the scores (either from :data:`episode_scores` or :data:`movie_scores`) after some processing. + + :param subtitle: the subtitle to compute the score of. + :type subtitle: :class:`~subliminal.subtitle.Subtitle` + :param video: the video to compute the score against. + :type video: :class:`~subliminal.video.Video` + :param bool hearing_impaired: hearing impaired preference. + :return: score of the subtitle. + :rtype: int + + """ + logger.info('Computing score of %r for video %r with %r', subtitle, video, dict(hearing_impaired=hearing_impaired)) + + # get the scores dict + scores = get_scores(video) + logger.debug('Using scores %r', scores) + + # get the matches + matches = subtitle.get_matches(video) + logger.debug('Found matches %r', matches) + + # on hash match, discard everything else + if 'hash' in matches: + logger.debug('Keeping only hash match') + matches &= {'hash'} + + # handle equivalent matches + if isinstance(video, Episode): + if 'title' in matches: + logger.debug('Adding title match equivalent') + matches.add('episode') + if 'series_imdb_id' in matches: + logger.debug('Adding series_imdb_id match equivalent') + matches |= {'series', 'year'} + if 'imdb_id' in matches: + logger.debug('Adding imdb_id match equivalents') + matches |= {'series', 'year', 'season', 'episode'} + if 'tvdb_id' in matches: + logger.debug('Adding tvdb_id match equivalents') + matches |= {'series', 'year', 'season', 'episode'} + if 'series_tvdb_id' in matches: + logger.debug('Adding series_tvdb_id match equivalents') + matches |= {'series', 'year'} + elif isinstance(video, Movie): + if 'imdb_id' in matches: + logger.debug('Adding imdb_id match equivalents') + matches |= {'title', 'year'} + + # handle hearing impaired + if hearing_impaired is not None and subtitle.hearing_impaired == hearing_impaired: + logger.debug('Matched hearing_impaired') + matches.add('hearing_impaired') + + # compute the score + score = sum((scores.get(match, 0) for match in matches)) + logger.info('Computed score %r with final matches %r', score, matches) + + # ensure score is within valid bounds + assert 0 <= score <= scores['hash'] + scores['hearing_impaired'] + + return score + + +def solve_episode_equations(): + from sympy import Eq, solve, symbols + + hash, series, year, season, episode, release_group = symbols('hash series year season episode release_group') + format, audio_codec, resolution, video_codec = symbols('format audio_codec resolution video_codec') + hearing_impaired = symbols('hearing_impaired') + + equations = [ + # hash is best + Eq(hash, series + year + season + episode + release_group + format + audio_codec + resolution + video_codec), + + # series counts for the most part in the total score + Eq(series, year + season + episode + release_group + format + audio_codec + resolution + video_codec + 1), + + # year is the second most important part + Eq(year, season + episode + release_group + format + audio_codec + resolution + video_codec + 1), + + # season is important too + Eq(season, release_group + format + audio_codec + resolution + video_codec + 1), + + # episode is equally important to season + Eq(episode, season), + + # release group is the next most wanted match + Eq(release_group, format + audio_codec + resolution + video_codec + 1), + + # format counts as much as audio_codec, resolution and video_codec + Eq(format, audio_codec + resolution + video_codec), + + # audio_codec is more valuable than video_codec + Eq(audio_codec, video_codec + 1), + + # resolution counts as much as video_codec + Eq(resolution, video_codec), + + # video_codec is the least valuable match but counts more than the sum of all scoring increasing matches + Eq(video_codec, hearing_impaired + 1), + + # hearing impaired is only used for score increasing, so put it to 1 + Eq(hearing_impaired, 1), + ] + + return solve(equations, [hash, series, year, season, episode, release_group, format, audio_codec, resolution, + hearing_impaired, video_codec]) + + +def solve_movie_equations(): + from sympy import Eq, solve, symbols + + hash, title, year, release_group = symbols('hash title year release_group') + format, audio_codec, resolution, video_codec = symbols('format audio_codec resolution video_codec') + hearing_impaired = symbols('hearing_impaired') + + equations = [ + # hash is best + Eq(hash, title + year + release_group + format + audio_codec + resolution + video_codec), + + # title counts for the most part in the total score + Eq(title, year + release_group + format + audio_codec + resolution + video_codec + 1), + + # year is the second most important part + Eq(year, release_group + format + audio_codec + resolution + video_codec + 1), + + # release group is the next most wanted match + Eq(release_group, format + audio_codec + resolution + video_codec + 1), + + # format counts as much as audio_codec, resolution and video_codec + Eq(format, audio_codec + resolution + video_codec), + + # audio_codec is more valuable than video_codec + Eq(audio_codec, video_codec + 1), + + # resolution counts as much as video_codec + Eq(resolution, video_codec), + + # video_codec is the least valuable match but counts more than the sum of all scoring increasing matches + Eq(video_codec, hearing_impaired + 1), + + # hearing impaired is only used for score increasing, so put it to 1 + Eq(hearing_impaired, 1), + ] + + return solve(equations, [hash, title, year, release_group, format, audio_codec, resolution, hearing_impaired, + video_codec]) diff --git a/libs/subliminal/subtitle.py b/libs/subliminal/subtitle.py index 1ff7945d..60cdf3d6 100644 --- a/libs/subliminal/subtitle.py +++ b/libs/subliminal/subtitle.py @@ -1,31 +1,45 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals +import codecs import logging -import os.path -import babelfish +import os + import chardet -import guessit.matchtree -import guessit.transfo import pysrt + +from .score import get_equivalent_release_groups from .video import Episode, Movie +from .utils import sanitize, sanitize_release_group logger = logging.getLogger(__name__) +#: Subtitle extensions +SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl') + class Subtitle(object): - """Base class for subtitle + """Base class for subtitle. - :param language: language of the subtitle - :type language: :class:`babelfish.Language` - :param bool hearing_impaired: `True` if the subtitle is hearing impaired, `False` otherwise - :param page_link: link to the web page from which the subtitle can be downloaded, if any - :type page_link: string or None + :param language: language of the subtitle. + :type language: :class:`~babelfish.language.Language` + :param bool hearing_impaired: whether or not the subtitle is hearing impaired. + :param page_link: URL of the web page from which the subtitle can be downloaded. + :type page_link: str + :param encoding: Text encoding of the subtitle. + :type encoding: str """ - def __init__(self, language, hearing_impaired=False, page_link=None): + #: Name of the provider that returns that class of subtitle + provider_name = '' + + def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None): + #: Language of the subtitle self.language = language + + #: Whether or not the subtitle is hearing impaired self.hearing_impaired = hearing_impaired + + #: URL of the web page from which the subtitle can be downloaded self.page_link = page_link #: Content as bytes @@ -34,9 +48,60 @@ class Subtitle(object): #: Encoding to decode with when accessing :attr:`text` self.encoding = None + # validate the encoding + if encoding: + try: + self.encoding = codecs.lookup(encoding).name + except (TypeError, LookupError): + logger.debug('Unsupported encoding %s', encoding) + @property - def guessed_encoding(self): - """Guessed encoding using the language, falling back on chardet""" + def id(self): + """Unique identifier of the subtitle""" + raise NotImplementedError + + @property + def text(self): + """Content as string + + If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding` + + """ + if not self.content: + return + + if self.encoding: + return self.content.decode(self.encoding, errors='replace') + + return self.content.decode(self.guess_encoding(), errors='replace') + + def is_valid(self): + """Check if a :attr:`text` is a valid SubRip format. + + :return: whether or not the subtitle is valid. + :rtype: bool + + """ + if not self.text: + return False + + try: + pysrt.from_string(self.text, error_handling=pysrt.ERROR_RAISE) + except pysrt.Error as e: + if e.args[0] < 80: + return False + + return True + + def guess_encoding(self): + """Guess encoding using the language, falling back on chardet. + + :return: the guessed encoding. + :rtype: str + + """ + logger.info('Guessing encoding for language %s', self.language) + # always try utf-8 first encodings = ['utf-8'] @@ -62,223 +127,128 @@ class Subtitle(object): encodings.append('latin-1') # try to decode + logger.debug('Trying encodings %r', encodings) for encoding in encodings: try: self.content.decode(encoding) - return encoding except UnicodeDecodeError: pass + else: + logger.info('Guessed encoding %s', encoding) + return encoding + + logger.warning('Could not guess encoding from language') # fallback on chardet - logger.warning('Could not decode content with encodings %r', encodings) - return chardet.detect(self.content)['encoding'] + encoding = chardet.detect(self.content)['encoding'] + logger.info('Chardet found encoding %s', encoding) - @property - def text(self): - """Content as string + return encoding - If :attr:`encoding` is None, the encoding is guessed with :attr:`guessed_encoding` + def get_matches(self, video): + """Get the matches against the `video`. - """ - if not self.content: - return '' - return self.content.decode(self.encoding or self.guessed_encoding, errors='replace') - - @property - def is_valid(self): - """Check if a subtitle text is a valid SubRip format""" - try: - pysrt.from_string(self.text, error_handling=pysrt.ERROR_RAISE) - return True - except pysrt.Error as e: - if e.args[0] > 80: - return True - except: - logger.exception('Unexpected error when validating subtitle') - return False - - def compute_matches(self, video): - """Compute the matches of the subtitle against the `video` - - :param video: the video to compute the matches against + :param video: the video to get the matches with. :type video: :class:`~subliminal.video.Video` - :return: matches of the subtitle + :return: matches of the subtitle. :rtype: set """ raise NotImplementedError - def compute_score(self, video): - """Compute the score of the subtitle against the `video` - - There are equivalent matches so that a provider can match one element or its equivalent. This is - to give all provider a chance to have a score in the same range without hurting quality. - - * Matching :class:`~subliminal.video.Video`'s `hashes` is equivalent to matching everything else - * Matching :class:`~subliminal.video.Episode`'s `season` and `episode` - is equivalent to matching :class:`~subliminal.video.Episode`'s `title` - * Matching :class:`~subliminal.video.Episode`'s `tvdb_id` is equivalent to matching - :class:`~subliminal.video.Episode`'s `series` - - :param video: the video to compute the score against - :type video: :class:`~subliminal.video.Video` - :return: score of the subtitle - :rtype: int - - """ - score = 0 - # compute matches - initial_matches = self.compute_matches(video) - matches = initial_matches.copy() - # hash is the perfect match - if 'hash' in matches: - score = video.scores['hash'] - else: - # remove equivalences - if isinstance(video, Episode): - if 'imdb_id' in matches: - matches -= {'series', 'tvdb_id', 'season', 'episode', 'title', 'year'} - if 'tvdb_id' in matches: - matches -= {'series', 'year'} - if 'title' in matches: - matches -= {'season', 'episode'} - # add other scores - score += sum((video.scores[match] for match in matches)) - logger.info('Computed score %d with matches %r', score, initial_matches) - return score + def __hash__(self): + return hash(self.provider_name + '-' + self.id) def __repr__(self): - return '<%s [%s]>' % (self.__class__.__name__, self.language) + return '<%s %r [%s]>' % (self.__class__.__name__, self.id, self.language) -def get_subtitle_path(video_path, language=None): - """Create the subtitle path from the given `video_path` and `language` +def get_subtitle_path(video_path, language=None, extension='.srt'): + """Get the subtitle path using the `video_path` and `language`. - :param string video_path: path to the video - :param language: language of the subtitle to put in the path - :type language: :class:`babelfish.Language` or None - :return: path of the subtitle - :rtype: string + :param str video_path: path to the video. + :param language: language of the subtitle to put in the path. + :type language: :class:`~babelfish.language.Language` + :param str extension: extension of the subtitle. + :return: path of the subtitle. + :rtype: str """ - subtitle_path = os.path.splitext(video_path)[0] - if language is not None: - try: - return subtitle_path + '.%s.%s' % (language.alpha2, 'srt') - except babelfish.LanguageConvertError: - return subtitle_path + '.%s.%s' % (language.alpha3, 'srt') - return subtitle_path + '.srt' + subtitle_root = os.path.splitext(video_path)[0] + + if language: + subtitle_root += '.' + str(language) + + return subtitle_root + extension -def compute_guess_matches(video, guess): - """Compute matches between a `video` and a `guess` +def guess_matches(video, guess, partial=False): + """Get matches between a `video` and a `guess`. - :param video: the video to compute the matches on + If a guess is `partial`, the absence information won't be counted as a match. + + :param video: the video. :type video: :class:`~subliminal.video.Video` - :param guess: the guess to compute the matches on - :type guess: :class:`guessit.Guess` - :return: matches of the `guess` + :param guess: the guess. + :type guess: dict + :param bool partial: whether or not the guess is partial. + :return: matches between the `video` and the `guess`. :rtype: set """ matches = set() if isinstance(video, Episode): # series - if video.series and 'series' in guess and guess['series'].lower() == video.series.lower(): + if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series): matches.add('series') + # title + if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title): + matches.add('title') # season - if video.season and 'seasonNumber' in guess and guess['seasonNumber'] == video.season: + if video.season and 'season' in guess and guess['season'] == video.season: matches.add('season') # episode - if video.episode and 'episodeNumber' in guess and guess['episodeNumber'] == video.episode: + if video.episode and 'episode' in guess and guess['episode'] == video.episode: matches.add('episode') # year - if video.year == guess.get('year'): # count "no year" as an information + if video.year and 'year' in guess and guess['year'] == video.year: + matches.add('year') + # count "no year" as an information + if not partial and video.original_series and 'year' not in guess: matches.add('year') elif isinstance(video, Movie): # year if video.year and 'year' in guess and guess['year'] == video.year: matches.add('year') - # title - if video.title and 'title' in guess and guess['title'].lower() == video.title.lower(): - matches.add('title') - # release group - if video.release_group and 'releaseGroup' in guess and guess['releaseGroup'].lower() == video.release_group.lower(): + # title + if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title): + matches.add('title') + # release_group + if (video.release_group and 'release_group' in guess and + sanitize_release_group(guess['release_group']) in + get_equivalent_release_groups(sanitize_release_group(video.release_group))): matches.add('release_group') - # screen size - if video.resolution and 'screenSize' in guess and guess['screenSize'] == video.resolution: + # resolution + if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution: matches.add('resolution') # format if video.format and 'format' in guess and guess['format'].lower() == video.format.lower(): matches.add('format') - # video codec - if video.video_codec and 'videoCodec' in guess and guess['videoCodec'] == video.video_codec: + # video_codec + if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec: matches.add('video_codec') - # audio codec - if video.audio_codec and 'audioCodec' in guess and guess['audioCodec'] == video.audio_codec: + # audio_codec + if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec: matches.add('audio_codec') + return matches -def compute_guess_properties_matches(video, string, propertytype): - """Compute matches between a `video` and properties of a certain property type +def fix_line_ending(content): + """Fix line ending of `content` by changing it to \n. - :param video: the video to compute the matches on - :type video: :class:`~subliminal.video.Video` - :param string string: the string to check for a certain property type - :param string propertytype: the type of property to check (as defined in guessit) - :return: matches of a certain property type (but will only be 1 match because we are checking for 1 property type) - :rtype: set - - Supported property types: result of guessit.transfo.guess_properties.GuessProperties().supported_properties() - [u'audioProfile', - u'videoCodec', - u'container', - u'format', - u'episodeFormat', - u'videoApi', - u'screenSize', - u'videoProfile', - u'audioChannels', - u'other', - u'audioCodec'] - - """ - matches = set() - # We only check for the property types relevant for us - if propertytype == 'screenSize' and video.resolution: - for prop in guess_properties(string, propertytype): - if prop.lower() == video.resolution.lower(): - matches.add('resolution') - elif propertytype == 'format' and video.format: - for prop in guess_properties(string, propertytype): - if prop.lower() == video.format.lower(): - matches.add('format') - elif propertytype == 'videoCodec' and video.video_codec: - for prop in guess_properties(string, propertytype): - if prop.lower() == video.video_codec.lower(): - matches.add('video_codec') - elif propertytype == 'audioCodec' and video.audio_codec: - for prop in guess_properties(string, propertytype): - if prop.lower() == video.audio_codec.lower(): - matches.add('audio_codec') - return matches - - -def guess_properties(string, propertytype): - properties = set() - if string: - tree = guessit.matchtree.MatchTree(string) - guessit.transfo.guess_properties.GuessProperties().process(tree) - properties = set(n.guess[propertytype] for n in tree.nodes() if propertytype in n.guess) - return properties - - -def fix_line_endings(content): - """Fix line ending of `content` by changing it to \n - - :param bytes content: content of the subtitle - :return: the content with fixed line endings + :param bytes content: content of the subtitle. + :return: the content with fixed line endings. :rtype: bytes """ diff --git a/libs/subliminal/tests/__init__.py b/libs/subliminal/tests/__init__.py deleted file mode 100644 index 6cef7800..00000000 --- a/libs/subliminal/tests/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -from unittest import TextTestRunner, TestSuite -from subliminal import cache_region -from . import test_providers, test_subliminal - - -cache_region.configure('dogpile.cache.memory', expiration_time=60 * 30) # @UndefinedVariable -suite = TestSuite([test_providers.suite(), test_subliminal.suite()]) - - -if __name__ == '__main__': - TextTestRunner().run(suite) diff --git a/libs/subliminal/tests/common.py b/libs/subliminal/tests/common.py deleted file mode 100644 index bd1608d4..00000000 --- a/libs/subliminal/tests/common.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -from subliminal import Movie, Episode - - -MOVIES = [Movie('Man of Steel (2013)/man.of.steel.2013.720p.bluray.x264-felony.mkv', 'Man of Steel', - format='BluRay', release_group='felony', resolution='720p', video_codec='h264', audio_codec='DTS', - imdb_id=770828, size=7033732714, year=2013, - hashes={'opensubtitles': '5b8f8f4e41ccb21e', 'thesubdb': 'ad32876133355929d814457537e12dc2'})] - -EPISODES = [Episode('The Big Bang Theory/Season 07/The.Big.Bang.Theory.S07E05.720p.HDTV.X264-DIMENSION.mkv', - 'The Big Bang Theory', 7, 5, format='HDTV', release_group='DIMENSION', resolution='720p', - video_codec='h264', audio_codec='AC3', imdb_id=3229392, size=501910737, - title='The Workplace Proximity', year=2007, tvdb_id=80379, - hashes={'opensubtitles': '6878b3ef7c1bd19e', 'thesubdb': '9dbbfb7ba81c9a6237237dae8589fccc'}), - Episode('Game of Thrones/Season 03/Game.of.Thrones.S03E10.Mhysa.720p.WEB-DL.DD5.1.H.264-NTb.mkv', - 'Game of Thrones', 3, 10, format='WEB-DL', release_group='NTb', resolution='720p', - video_codec='h264', audio_codec='AC3', imdb_id=2178796, size=2142810931, title='Mhysa', - tvdb_id=121361, - hashes={'opensubtitles': 'b850baa096976c22', 'thesubdb': 'b1f899c77f4c960b84b8dbf840d4e42d'}), - Episode('Dallas.S01E03.mkv', 'Dallas', 1, 3), - Episode('Dallas.2012.S01E03.mkv', 'Dallas', 1, 3, year=2012)] diff --git a/libs/subliminal/tests/test_providers.py b/libs/subliminal/tests/test_providers.py deleted file mode 100644 index e98d9ad3..00000000 --- a/libs/subliminal/tests/test_providers.py +++ /dev/null @@ -1,475 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os -from unittest import TestCase, TestSuite, TestLoader, TextTestRunner -from babelfish import Language -from subliminal import provider_manager -from subliminal.tests.common import MOVIES, EPISODES - - -class ProviderTestCase(TestCase): - provider_name = '' - - def setUp(self): - self.Provider = provider_manager[self.provider_name] - - -class Addic7edProviderTestCase(ProviderTestCase): - provider_name = 'addic7ed' - - def test_find_show_id(self): - with self.Provider() as provider: - show_id = provider.find_show_id('the big bang') - self.assertEqual(show_id, 126) - - def test_find_show_id_no_year(self): - with self.Provider() as provider: - show_id = provider.find_show_id('dallas') - self.assertEqual(show_id, 802) - - def test_find_show_id_year(self): - with self.Provider() as provider: - show_id = provider.find_show_id('dallas', 2012) - self.assertEqual(show_id, 2559) - - def test_find_show_id_error(self): - with self.Provider() as provider: - show_id = provider.find_show_id('the big how i met your mother') - self.assertIsNone(show_id) - - def test_get_show_ids(self): - with self.Provider() as provider: - show_ids = provider.get_show_ids() - self.assertIn('the big bang theory', show_ids) - self.assertEqual(show_ids['the big bang theory'], 126) - - def test_get_show_ids_no_year(self): - with self.Provider() as provider: - show_ids = provider.get_show_ids() - self.assertIn('dallas', show_ids) - self.assertEqual(show_ids['dallas'], 802) - - def test_get_show_ids_year(self): - with self.Provider() as provider: - show_ids = provider.get_show_ids() - self.assertIn('dallas (2012)', show_ids) - self.assertEqual(show_ids['dallas (2012)'], 2559) - - def test_query_episode_0(self): - video = EPISODES[0] - languages = {Language('tur'), Language('rus'), Language('heb'), Language('ita'), Language('fra'), - Language('ron'), Language('nld'), Language('eng'), Language('deu'), Language('ell'), - Language('por', 'BR'), Language('bul'), Language('por'), Language('msa')} - matches = {frozenset(['series', 'resolution', 'season']), - frozenset(['series', 'episode', 'season', 'title']), - frozenset(['series', 'release_group', 'season']), - frozenset(['series', 'episode', 'season', 'release_group', 'title']), - frozenset(['series', 'season']), - frozenset(['series', 'season', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(video.series, video.season, video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_1(self): - video = EPISODES[1] - languages = {Language('ind'), Language('spa'), Language('hrv'), Language('ita'), Language('fra'), - Language('cat'), Language('ell'), Language('nld'), Language('eng'), Language('fas'), - Language('por'), Language('nor'), Language('deu'), Language('ron'), Language('por', 'BR'), - Language('bul')} - matches = {frozenset(['series', 'episode', 'resolution', 'season', 'title', 'year']), - frozenset(['series', 'resolution', 'season', 'year']), - frozenset(['series', 'resolution', 'season', 'year', 'format']), - frozenset(['series', 'episode', 'season', 'title', 'year']), - frozenset(['series', 'episode', 'season', 'title', 'year', 'format']), - frozenset(['series', 'release_group', 'season', 'year']), - frozenset(['series', 'release_group', 'season', 'year', 'format']), - frozenset(['series', 'resolution', 'release_group', 'season', 'year']), - frozenset(['series', 'resolution', 'release_group', 'season', 'year', 'format']), - frozenset(['series', 'episode', 'season', 'release_group', 'title', 'year', 'format']), - frozenset(['series', 'season', 'year']), - frozenset(['series', 'season', 'year', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(video.series, video.season, video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_year(self): - video_no_year = EPISODES[2] - video_year = EPISODES[3] - with self.Provider() as provider: - subtitles_no_year = provider.query(video_no_year.series, video_no_year.season, video_no_year.year) - subtitles_year = provider.query(video_year.series, video_year.season, video_year.year) - self.assertNotEqual(subtitles_no_year, subtitles_year) - - def test_list_subtitles(self): - video = EPISODES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['series', 'episode', 'season', 'release_group', 'title']), - frozenset(['series', 'episode', 'season', 'title'])} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_download_subtitle(self): - video = EPISODES[0] - languages = {Language('eng'), Language('fra')} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - provider.download_subtitle(subtitles[0]) - self.assertIsNotNone(subtitles[0].content) - self.assertTrue(subtitles[0].is_valid) - - -class OpenSubtitlesProviderTestCase(ProviderTestCase): - provider_name = 'opensubtitles' - - def test_query_movie_0_query(self): - video = MOVIES[0] - languages = {Language('eng')} - matches = {frozenset([]), - frozenset(['imdb_id', 'resolution', 'title', 'year']), - frozenset(['imdb_id', 'resolution', 'title', 'year', 'format']), - frozenset(['imdb_id', 'title', 'year']), - frozenset(['imdb_id', 'title', 'year', 'format']), - frozenset(['imdb_id', 'video_codec', 'title', 'year', 'format']), - frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year', 'format']), - frozenset(['imdb_id', 'title', 'year', 'video_codec', 'resolution', 'release_group', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(languages, query=video.title) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_0_query(self): - video = EPISODES[0] - languages = {Language('eng')} - matches = {frozenset(['series', 'episode', 'season', 'imdb_id', 'format']), - frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'format']), - frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season'])} - with self.Provider() as provider: - subtitles = provider.query(languages, query=os.path.split(video.name)[1]) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_year(self): - video_no_year = EPISODES[2] - video_year = EPISODES[3] - languages = {Language('eng')} - with self.Provider() as provider: - subtitles_no_year = provider.query(languages, query=os.path.split(video_no_year.name)[1]) - subtitles_year = provider.query(languages, query=os.path.split(video_year.name)[1]) - self.assertNotEqual(subtitles_no_year, subtitles_year) - - def test_query_episode_1_query(self): - video = EPISODES[1] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season', 'year', 'format']), - frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'year']), - frozenset(['episode', 'video_codec', 'series', 'imdb_id', 'resolution', 'season', 'year']), - frozenset(['series', 'imdb_id', 'resolution', 'episode', 'season', 'year']), - frozenset(['series', 'episode', 'season', 'imdb_id', 'year']), - frozenset(['series', 'episode', 'season', 'imdb_id', 'year', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(languages, query=os.path.split(video.name)[1]) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_movie_0_imdb_id(self): - video = MOVIES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['imdb_id', 'video_codec', 'title', 'year', 'format']), - frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year']), - frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year', 'format']), - frozenset(['imdb_id', 'title', 'year', 'video_codec', 'resolution', 'release_group', 'format']), - frozenset(['imdb_id', 'title', 'year']), - frozenset(['imdb_id', 'title', 'year', 'format']), - frozenset(['imdb_id', 'resolution', 'title', 'year']), - frozenset(['imdb_id', 'resolution', 'title', 'year', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(languages, imdb_id=video.imdb_id) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_0_imdb_id(self): - video = EPISODES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['series', 'episode', 'season', 'imdb_id', 'format']), - frozenset(['episode', 'release_group', 'video_codec', 'series', 'imdb_id', 'resolution', 'season', 'format']), - frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'format']), - frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season'])} - with self.Provider() as provider: - subtitles = provider.query(languages, imdb_id=video.imdb_id) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_movie_0_hash(self): - video = MOVIES[0] - languages = {Language('eng')} - matches = {frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'imdb_id', 'format']), - frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']), - frozenset(['year', 'video_codec', 'imdb_id', 'hash', 'title', 'format']), - frozenset([]), - frozenset(['year', 'resolution', 'imdb_id', 'hash', 'title', 'format']), - frozenset(['year', 'imdb_id', 'hash', 'title']), - frozenset(['year', 'imdb_id', 'hash', 'title', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_0_hash(self): - video = EPISODES[0] - languages = {Language('eng')} - matches = {frozenset(['series', 'hash', 'format']), - frozenset(['episode', 'season', 'series', 'imdb_id', 'video_codec', 'hash', 'format']), - frozenset(['series', 'episode', 'season', 'hash', 'imdb_id', 'format']), - frozenset(['series', 'resolution', 'hash', 'video_codec', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_list_subtitles(self): - video = MOVIES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']), - frozenset(['imdb_id', 'year', 'title']), - frozenset(['imdb_id', 'year', 'title', 'format']), - frozenset(['year', 'video_codec', 'imdb_id', 'resolution', 'title']), - frozenset(['year', 'video_codec', 'imdb_id', 'resolution', 'title', 'format']), - frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']), - frozenset(['year', 'video_codec', 'imdb_id', 'hash', 'title', 'format']), - frozenset([]), - frozenset(['year', 'resolution', 'imdb_id', 'hash', 'title', 'format']), - frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'imdb_id', 'format']), - frozenset(['year', 'imdb_id', 'hash', 'title']), - frozenset(['year', 'imdb_id', 'hash', 'title', 'format']), - frozenset(['video_codec', 'imdb_id', 'year', 'title', 'format']), - frozenset(['year', 'imdb_id', 'resolution', 'title']), - frozenset(['year', 'imdb_id', 'resolution', 'title', 'format'])} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_download_subtitle(self): - video = MOVIES[0] - languages = {Language('eng'), Language('fra')} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - provider.download_subtitle(subtitles[0]) - self.assertIsNotNone(subtitles[0].content) - self.assertTrue(subtitles[0].is_valid) - - -class PodnapisiProviderTestCase(ProviderTestCase): - provider_name = 'podnapisi' - - def test_query_movie_0(self): - video = MOVIES[0] - language = Language('eng') - matches = {frozenset(['video_codec', 'title', 'resolution', 'year']), - frozenset(['title', 'resolution', 'year']), - frozenset(['video_codec', 'title', 'year']), - frozenset(['title', 'year']), - frozenset(['title']), - frozenset(['video_codec', 'title', 'resolution', 'release_group', 'year', 'format']), - frozenset(['video_codec', 'title', 'resolution', 'audio_codec', 'year', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(language, title=video.title, year=video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, {language}) - - def test_query_episode_0(self): - video = EPISODES[0] - language = Language('eng') - matches = {frozenset(['episode', 'series', 'season', 'video_codec', 'resolution', 'release_group', 'format']), - frozenset(['season', 'video_codec', 'episode', 'resolution', 'series'])} - with self.Provider() as provider: - subtitles = provider.query(language, series=video.series, season=video.season, episode=video.episode, - year=video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, {language}) - - def test_query_episode_1(self): - video = EPISODES[1] - language = Language('eng') - matches = {frozenset(['episode', 'release_group', 'series', 'video_codec', 'resolution', 'season', 'year', 'format']), - frozenset(['episode', 'series', 'video_codec', 'resolution', 'season', 'year']), - frozenset(['season', 'video_codec', 'episode', 'series', 'year'])} - with self.Provider() as provider: - subtitles = provider.query(language, series=video.series, season=video.season, episode=video.episode, - year=video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, {language}) - - def test_list_subtitles(self): - video = MOVIES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['video_codec', 'title', 'resolution', 'year']), - frozenset(['title', 'resolution', 'year']), - frozenset(['video_codec', 'title', 'year']), - frozenset(['video_codec', 'title', 'year', 'format']), - frozenset(['title', 'year']), - frozenset(['title']), - frozenset(['video_codec', 'title', 'resolution', 'release_group', 'year', 'format']), - frozenset(['video_codec', 'title', 'resolution', 'audio_codec', 'year', 'format'])} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_download_subtitle(self): - video = MOVIES[0] - languages = {Language('eng'), Language('fra')} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - provider.download_subtitle(subtitles[0]) - self.assertIsNotNone(subtitles[0].content) - self.assertTrue(subtitles[0].is_valid) - - -class TheSubDBProviderTestCase(ProviderTestCase): - provider_name = 'thesubdb' - - def test_query_episode_0(self): - video = EPISODES[0] - languages = {Language('eng'), Language('spa'), Language('por')} - matches = {frozenset(['hash'])} - with self.Provider() as provider: - subtitles = provider.query(video.hashes['thesubdb']) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_1(self): - video = EPISODES[1] - languages = {Language('eng'), Language('por')} - matches = {frozenset(['hash'])} - with self.Provider() as provider: - subtitles = provider.query(video.hashes['thesubdb']) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_list_subtitles(self): - video = MOVIES[0] - languages = {Language('eng'), Language('por')} - matches = {frozenset(['hash'])} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_download_subtitle(self): - video = MOVIES[0] - languages = {Language('eng'), Language('por')} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - provider.download_subtitle(subtitles[0]) - provider.download_subtitle(subtitles[1]) - self.assertIsNotNone(subtitles[0].content) - self.assertTrue(subtitles[0].is_valid) - - -class TVsubtitlesProviderTestCase(ProviderTestCase): - provider_name = 'tvsubtitles' - - def test_find_show_id(self): - with self.Provider() as provider: - show_id = provider.find_show_id('the big bang') - self.assertEqual(show_id, 154) - - def test_find_show_id_ambiguous(self): - with self.Provider() as provider: - show_id = provider.find_show_id('new girl') - self.assertEqual(show_id, 977) - - def test_find_show_id_no_dots(self): - with self.Provider() as provider: - show_id = provider.find_show_id('marvel\'s agents of s h i e l d') - self.assertEqual(show_id, 1340) - - def test_find_show_id_no_year_dallas(self): - with self.Provider() as provider: - show_id = provider.find_show_id('dallas') - self.assertEqual(show_id, 646) - - def test_find_show_id_no_year_house_of_cards(self): - with self.Provider() as provider: - show_id = provider.find_show_id('house of cards') - self.assertEqual(show_id, 352) - - def test_find_show_id_year_dallas(self): - with self.Provider() as provider: - show_id = provider.find_show_id('dallas', 2012) - self.assertEqual(show_id, 1127) - - def test_find_show_id_year_house_of_cards(self): - with self.Provider() as provider: - show_id = provider.find_show_id('house of cards', 2013) - self.assertEqual(show_id, 1246) - - def test_find_show_id_error(self): - with self.Provider() as provider: - show_id = provider.find_show_id('the big gaming') - self.assertIsNone(show_id) - - def test_find_episode_ids(self): - with self.Provider() as provider: - episode_ids = provider.find_episode_ids(154, 5) - self.assertEqual(set(episode_ids.keys()), set(range(1, 25))) - - def test_query_episode_0(self): - video = EPISODES[0] - languages = {Language('fra'), Language('por'), Language('hun'), Language('ron'), Language('eng')} - matches = {frozenset(['series', 'episode', 'season', 'video_codec', 'format']), - frozenset(['series', 'episode', 'season', 'format'])} - with self.Provider() as provider: - subtitles = provider.query(video.series, video.season, video.episode, video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_query_episode_1(self): - video = EPISODES[1] - languages = {Language('fra'), Language('ell'), Language('ron'), Language('eng'), Language('hun'), - Language('por'), Language('por', 'BR'), Language('jpn')} - matches = {frozenset(['series', 'episode', 'resolution', 'season', 'year']), - frozenset(['series', 'episode', 'season', 'video_codec', 'year']), - frozenset(['series', 'episode', 'season', 'year'])} - with self.Provider() as provider: - subtitles = provider.query(video.series, video.season, video.episode, video.year) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_list_subtitles(self): - video = EPISODES[0] - languages = {Language('eng'), Language('fra')} - matches = {frozenset(['series', 'episode', 'season', 'format'])} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches) - self.assertEqual({subtitle.language for subtitle in subtitles}, languages) - - def test_download_subtitle(self): - video = EPISODES[0] - languages = {Language('hun')} - with self.Provider() as provider: - subtitles = provider.list_subtitles(video, languages) - provider.download_subtitle(subtitles[0]) - self.assertIsNotNone(subtitles[0].content) - self.assertTrue(subtitles[0].is_valid) - - -def suite(): - suite = TestSuite() - suite.addTest(TestLoader().loadTestsFromTestCase(Addic7edProviderTestCase)) - suite.addTest(TestLoader().loadTestsFromTestCase(OpenSubtitlesProviderTestCase)) - suite.addTest(TestLoader().loadTestsFromTestCase(PodnapisiProviderTestCase)) - suite.addTest(TestLoader().loadTestsFromTestCase(TheSubDBProviderTestCase)) - suite.addTest(TestLoader().loadTestsFromTestCase(TVsubtitlesProviderTestCase)) - return suite - - -if __name__ == '__main__': - TextTestRunner().run(suite()) diff --git a/libs/subliminal/tests/test_subliminal.py b/libs/subliminal/tests/test_subliminal.py deleted file mode 100644 index a991d81f..00000000 --- a/libs/subliminal/tests/test_subliminal.py +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os -import shutil -from unittest import TestCase, TestSuite, TestLoader, TextTestRunner -from babelfish import Language -from subliminal import list_subtitles, download_subtitles, save_subtitles, download_best_subtitles, scan_video -from subliminal.tests.common import MOVIES, EPISODES - - -TEST_DIR = 'test_data' - - -class ApiTestCase(TestCase): - def setUp(self): - os.mkdir(TEST_DIR) - - def tearDown(self): - shutil.rmtree(TEST_DIR) - - def test_list_subtitles_movie_0(self): - videos = [MOVIES[0]] - languages = {Language('eng')} - subtitles = list_subtitles(videos, languages) - self.assertEqual(len(subtitles), len(videos)) - self.assertGreater(len(subtitles[videos[0]]), 0) - - def test_list_subtitles_movie_0_por_br(self): - videos = [MOVIES[0]] - languages = {Language('por', 'BR')} - subtitles = list_subtitles(videos, languages) - self.assertEqual(len(subtitles), len(videos)) - self.assertGreater(len(subtitles[videos[0]]), 0) - - def test_list_subtitles_episodes(self): - videos = [EPISODES[0], EPISODES[1]] - languages = {Language('eng'), Language('fra')} - subtitles = list_subtitles(videos, languages) - self.assertEqual(len(subtitles), len(videos)) - self.assertGreater(len(subtitles[videos[0]]), 0) - - def test_download_subtitles(self): - videos = [EPISODES[0]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng')} - subtitles = list_subtitles(videos, languages) - download_subtitles(subtitles[videos[0]][:5]) - self.assertGreaterEqual(len([s for s in subtitles[videos[0]] if s.content is not None]), 4) - - def test_download_best_subtitles(self): - videos = [EPISODES[0], EPISODES[1]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng'), Language('fra')} - subtitles = download_best_subtitles(videos, languages) - for video in videos: - self.assertIn(video, subtitles) - self.assertEqual(len(subtitles[video]), 2) - - def test_save_subtitles(self): - videos = [EPISODES[0], EPISODES[1]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng'), Language('fra')} - subtitles = list_subtitles(videos, languages) - - # make a list of subtitles to download (one per language per video) - subtitles_to_download = [] - for video, video_subtitles in subtitles.items(): - video_subtitle_languages = set() - for video_subtitle in video_subtitles: - if video_subtitle.language in video_subtitle_languages: - continue - subtitles_to_download.append(video_subtitle) - video_subtitle_languages.add(video_subtitle.language) - if video_subtitle_languages == languages: - break - self.assertEqual(len(subtitles_to_download), 4) - - # download - download_subtitles(subtitles_to_download) - save_subtitles(subtitles) - for video in videos: - self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.en.srt')) - self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.fr.srt')) - - def test_save_subtitles_single(self): - videos = [EPISODES[0], EPISODES[1]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng'), Language('fra')} - subtitles = download_best_subtitles(videos, languages) - save_subtitles(subtitles, single=True) - for video in videos: - self.assertIn(video, subtitles) - self.assertEqual(len(subtitles[video]), 2) - self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.srt')) - - def test_download_best_subtitles_min_score(self): - videos = [MOVIES[0]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng'), Language('fra')} - subtitles = download_best_subtitles(videos, languages, min_score=1000) - self.assertEqual(len(subtitles), 0) - - def test_download_best_subtitles_hearing_impaired(self): - videos = [MOVIES[0]] - for video in videos: - video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1]) - languages = {Language('eng')} - subtitles = download_best_subtitles(videos, languages, hearing_impaired=True) - self.assertTrue(subtitles[videos[0]][0].hearing_impaired) - - -class VideoTestCase(TestCase): - def setUp(self): - os.mkdir(TEST_DIR) - for video in MOVIES + EPISODES: - open(os.path.join(TEST_DIR, os.path.split(video.name)[1]), 'w').close() - - def tearDown(self): - shutil.rmtree(TEST_DIR) - - def test_scan_video_movie(self): - video = MOVIES[0] - scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.title.lower(), video.title.lower()) - self.assertEqual(scanned_video.year, video.year) - self.assertEqual(scanned_video.video_codec, video.video_codec) - self.assertEqual(scanned_video.format, video.format) - self.assertEqual(scanned_video.resolution, video.resolution) - self.assertEqual(scanned_video.release_group, video.release_group) - self.assertEqual(scanned_video.subtitle_languages, set()) - self.assertEqual(scanned_video.hashes, {}) - self.assertIsNone(scanned_video.audio_codec) - self.assertIsNone(scanned_video.imdb_id) - self.assertEqual(scanned_video.size, 0) - - def test_scan_video_episode(self): - video = EPISODES[0] - scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.series, video.series) - self.assertEqual(scanned_video.season, video.season) - self.assertEqual(scanned_video.episode, video.episode) - self.assertEqual(scanned_video.video_codec, video.video_codec) - self.assertEqual(scanned_video.format, video.format) - self.assertEqual(scanned_video.resolution, video.resolution) - self.assertEqual(scanned_video.release_group, video.release_group) - self.assertEqual(scanned_video.subtitle_languages, set()) - self.assertEqual(scanned_video.hashes, {}) - self.assertIsNone(scanned_video.title) - self.assertIsNone(scanned_video.tvdb_id) - self.assertIsNone(scanned_video.imdb_id) - self.assertIsNone(scanned_video.audio_codec) - self.assertEqual(scanned_video.size, 0) - - def test_scan_video_subtitle_language_und(self): - video = EPISODES[0] - open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.srt', 'w').close() - scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.subtitle_languages, {Language('und')}) - - def test_scan_video_subtitles_language_eng(self): - video = EPISODES[0] - open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.en.srt', 'w').close() - scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.subtitle_languages, {Language('eng')}) - - def test_scan_video_subtitles_languages(self): - video = EPISODES[0] - open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.en.srt', 'w').close() - open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.fr.srt', 'w').close() - open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.srt', 'w').close() - scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1])) - self.assertEqual(scanned_video.subtitle_languages, {Language('eng'), Language('fra'), Language('und')}) - - -def suite(): - suite = TestSuite() - suite.addTest(TestLoader().loadTestsFromTestCase(ApiTestCase)) - suite.addTest(TestLoader().loadTestsFromTestCase(VideoTestCase)) - return suite - - -if __name__ == '__main__': - TextTestRunner().run(suite()) diff --git a/libs/subliminal/utils.py b/libs/subliminal/utils.py new file mode 100644 index 00000000..ac426d45 --- /dev/null +++ b/libs/subliminal/utils.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +from datetime import datetime +import hashlib +import os +import re +import struct + + +def hash_opensubtitles(video_path): + """Compute a hash using OpenSubtitles' algorithm. + + :param str video_path: path of the video. + :return: the hash. + :rtype: str + + """ + bytesize = struct.calcsize(b'' % (self.__class__.__name__, self.name) @@ -79,333 +114,108 @@ class Video(object): class Episode(Video): - """Episode :class:`Video` + """Episode :class:`Video`. - Scores are defined by a set of equations, see :func:`~subliminal.score.get_episode_equations` - - :param string series: series of the episode - :param int season: season number of the episode - :param int episode: episode number of the episode - :param string title: title of the episode - :param int year: year of series - :param int tvdb_id: TheTVDB id of the episode + :param str series: series of the episode. + :param int season: season number of the episode. + :param int episode: episode number of the episode. + :param str title: title of the episode. + :param int year: year of the series. + :param bool original_series: whether the series is the first with this name. + :param int tvdb_id: TVDB id of the episode. + :param \*\*kwargs: additional parameters for the :class:`Video` constructor. """ - scores = {'format': 3, 'video_codec': 2, 'tvdb_id': 48, 'title': 12, 'imdb_id': 60, 'audio_codec': 1, 'year': 24, - 'resolution': 2, 'season': 6, 'release_group': 6, 'series': 24, 'episode': 6, 'hash': 74} + def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None, + series_tvdb_id=None, series_imdb_id=None, **kwargs): + super(Episode, self).__init__(name, **kwargs) - def __init__(self, name, series, season, episode, format=None, release_group=None, resolution=None, video_codec=None, - audio_codec=None, imdb_id=None, hashes=None, size=None, subtitle_languages=None, title=None, - year=None, tvdb_id=None): - super(Episode, self).__init__(name, format, release_group, resolution, video_codec, audio_codec, imdb_id, hashes, - size, subtitle_languages) + #: Series of the episode self.series = series + + #: Season number of the episode self.season = season + + #: Episode number of the episode self.episode = episode + + #: Title of the episode self.title = title + + #: Year of series self.year = year + + #: The series is the first with this name + self.original_series = original_series + + #: TVDB id of the episode self.tvdb_id = tvdb_id + #: TVDB id of the series + self.series_tvdb_id = series_tvdb_id + + #: IMDb id of the series + self.series_imdb_id = series_imdb_id + @classmethod def fromguess(cls, name, guess): if guess['type'] != 'episode': raise ValueError('The guess must be an episode guess') - if 'series' not in guess or 'season' not in guess or 'episodeNumber' not in guess: + + if 'title' not in guess or 'episode' not in guess: raise ValueError('Insufficient data to process the guess') - return cls(name, guess['series'], guess['season'], guess['episodeNumber'], format=guess.get('format'), - release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'), - video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'), - title=guess.get('title'), year=guess.get('year')) + + return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'), + year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess, + release_group=guess.get('release_group'), resolution=guess.get('screen_size'), + video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec')) @classmethod def fromname(cls, name): - return cls.fromguess(os.path.split(name)[1], guessit.guess_episode_info(name)) + return cls.fromguess(name, guessit(name, {'type': 'episode'})) def __repr__(self): if self.year is None: return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode) + return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode) class Movie(Video): - """Movie :class:`Video` + """Movie :class:`Video`. - Scores are defined by a set of equations, see :func:`~subliminal.score.get_movie_equations` - - :param string title: title of the movie - :param int year: year of the movie + :param str title: title of the movie. + :param int year: year of the movie. + :param \*\*kwargs: additional parameters for the :class:`Video` constructor. """ - scores = {'format': 3, 'video_codec': 2, 'title': 13, 'imdb_id': 34, 'audio_codec': 1, 'year': 7, 'resolution': 2, - 'release_group': 6, 'hash': 34} + def __init__(self, name, title, year=None, **kwargs): + super(Movie, self).__init__(name, **kwargs) - def __init__(self, name, title, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None, - imdb_id=None, hashes=None, size=None, subtitle_languages=None, year=None): - super(Movie, self).__init__(name, format, release_group, resolution, video_codec, audio_codec, imdb_id, hashes, - size, subtitle_languages) + #: Title of the movie self.title = title + + #: Year of the movie self.year = year @classmethod def fromguess(cls, name, guess): if guess['type'] != 'movie': raise ValueError('The guess must be a movie guess') + if 'title' not in guess: raise ValueError('Insufficient data to process the guess') - return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('releaseGroup'), - resolution=guess.get('screenSize'), video_codec=guess.get('videoCodec'), - audio_codec=guess.get('audioCodec'),year=guess.get('year')) + + return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'), + resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'), + audio_codec=guess.get('audio_codec'), year=guess.get('year')) @classmethod def fromname(cls, name): - return cls.fromguess(os.path.split(name)[1], guessit.guess_movie_info(name)) + return cls.fromguess(name, guessit(name, {'type': 'movie'})) def __repr__(self): if self.year is None: return '<%s [%r]>' % (self.__class__.__name__, self.title) + return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year) - - -def scan_subtitle_languages(path): - """Search for subtitles with alpha2 extension from a video `path` and return their language - - :param string path: path to the video - :return: found subtitle languages - :rtype: set - - """ - language_extensions = tuple('.' + c for c in babelfish.language_converters['alpha2'].codes) - dirpath, filename = os.path.split(path) - subtitles = set() - for p in os.listdir(dirpath): - if not isinstance(p, bytes) and p.startswith(os.path.splitext(filename)[0]) and p.endswith(SUBTITLE_EXTENSIONS): - if os.path.splitext(p)[0].endswith(language_extensions): - subtitles.add(babelfish.Language.fromalpha2(os.path.splitext(p)[0][-2:])) - else: - subtitles.add(babelfish.Language('und')) - logger.debug('Found subtitles %r', subtitles) - return subtitles - - -def scan_video(path, subtitles=True, embedded_subtitles=True): - """Scan a video and its subtitle languages from a video `path` - - :param string path: absolute path to the video - :param bool subtitles: scan for subtitles with the same name - :param bool embedded_subtitles: scan for embedded subtitles - :return: the scanned video - :rtype: :class:`Video` - :raise: ValueError if cannot guess enough information from the path - - """ - dirpath, filename = os.path.split(path) - logger.info('Scanning video %r in %r', filename, dirpath) - video = Video.fromguess(path, guessit.guess_file_info(path)) - video.size = os.path.getsize(path) - if video.size > 10485760: - logger.debug('Size is %d', video.size) - video.hashes['opensubtitles'] = hash_opensubtitles(path) - video.hashes['thesubdb'] = hash_thesubdb(path) - logger.debug('Computed hashes %r', video.hashes) - else: - logger.warning('Size is lower than 10MB: hashes not computed') - if subtitles: - video.subtitle_languages |= scan_subtitle_languages(path) - # enzyme - try: - if filename.endswith('.mkv'): - with open(path, 'rb') as f: - mkv = enzyme.MKV(f) - if mkv.video_tracks: - video_track = mkv.video_tracks[0] - # resolution - if video_track.height in (480, 720, 1080): - if video_track.interlaced: - video.resolution = '%di' % video_track.height - logger.debug('Found resolution %s with enzyme', video.resolution) - else: - video.resolution = '%dp' % video_track.height - logger.debug('Found resolution %s with enzyme', video.resolution) - # video codec - if video_track.codec_id == 'V_MPEG4/ISO/AVC': - video.video_codec = 'h264' - logger.debug('Found video_codec %s with enzyme', video.video_codec) - elif video_track.codec_id == 'V_MPEG4/ISO/SP': - video.video_codec = 'DivX' - logger.debug('Found video_codec %s with enzyme', video.video_codec) - elif video_track.codec_id == 'V_MPEG4/ISO/ASP': - video.video_codec = 'XviD' - logger.debug('Found video_codec %s with enzyme', video.video_codec) - else: - logger.warning('MKV has no video track') - if mkv.audio_tracks: - audio_track = mkv.audio_tracks[0] - # audio codec - if audio_track.codec_id == 'A_AC3': - video.audio_codec = 'AC3' - logger.debug('Found audio_codec %s with enzyme', video.audio_codec) - elif audio_track.codec_id == 'A_DTS': - video.audio_codec = 'DTS' - logger.debug('Found audio_codec %s with enzyme', video.audio_codec) - elif audio_track.codec_id == 'A_AAC': - video.audio_codec = 'AAC' - logger.debug('Found audio_codec %s with enzyme', video.audio_codec) - else: - logger.warning('MKV has no audio track') - if mkv.subtitle_tracks: - # embedded subtitles - if embedded_subtitles: - embedded_subtitle_languages = set() - for st in mkv.subtitle_tracks: - if st.language: - try: - embedded_subtitle_languages.add(babelfish.Language.fromalpha3b(st.language)) - except babelfish.Error: - logger.error('Embedded subtitle track language %r is not a valid language', st.language) - embedded_subtitle_languages.add(babelfish.Language('und')) - elif st.name: - try: - embedded_subtitle_languages.add(babelfish.Language.fromname(st.name)) - except babelfish.Error: - logger.debug('Embedded subtitle track name %r is not a valid language', st.name) - embedded_subtitle_languages.add(babelfish.Language('und')) - else: - embedded_subtitle_languages.add(babelfish.Language('und')) - logger.debug('Found embedded subtitle %r with enzyme', embedded_subtitle_languages) - video.subtitle_languages |= embedded_subtitle_languages - else: - logger.debug('MKV has no subtitle track') - except enzyme.Error: - logger.exception('Parsing video metadata with enzyme failed') - return video - - -def scan_videos(paths, subtitles=True, embedded_subtitles=True, age=None): - """Scan `paths` for videos and their subtitle languages - - :params paths: absolute paths to scan for videos - :type paths: list of string - :param bool subtitles: scan for subtitles with the same name - :param bool embedded_subtitles: scan for embedded subtitles - :param age: age of the video, if any - :type age: datetime.timedelta or None - :return: the scanned videos - :rtype: list of :class:`Video` - - """ - videos = [] - # scan files - for filepath in [p for p in paths if os.path.isfile(p)]: - if age is not None: - try: - video_age = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(filepath)) - except ValueError: - logger.exception('Error while getting video age, skipping it') - continue - if video_age > age: - logger.info('Skipping video %r: older than %r', filepath, age) - continue - try: - videos.append(scan_video(filepath, subtitles, embedded_subtitles)) - except ValueError as e: - logger.error('Skipping video: %s', e) - continue - # scan directories - for path in [p for p in paths if os.path.isdir(p)]: - logger.info('Scanning directory %r', path) - for dirpath, dirnames, filenames in os.walk(path): - # skip badly encoded directories - if isinstance(dirpath, bytes): - logger.error('Skipping badly encoded directory %r', dirpath.decode('utf-8', errors='replace')) - continue - # skip badly encoded and hidden sub directories - for dirname in list(dirnames): - if isinstance(dirname, bytes): - logger.error('Skipping badly encoded dirname %r in %r', dirname.decode('utf-8', errors='replace'), - dirpath) - dirnames.remove(dirname) - elif dirname.startswith('.'): - logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath) - dirnames.remove(dirname) - # scan for videos - for filename in filenames: - # skip badly encoded files - if isinstance(filename, bytes): - logger.error('Skipping badly encoded filename %r in %r', filename.decode('utf-8', errors='replace'), - dirpath) - continue - # filter videos - if not filename.endswith(VIDEO_EXTENSIONS): - continue - # skip hidden files - if filename.startswith('.'): - logger.debug('Skipping hidden filename %r in %r', filename, dirpath) - continue - filepath = os.path.join(dirpath, filename) - # skip links - if os.path.islink(filepath): - logger.debug('Skipping link %r in %r', filename, dirpath) - continue - if age is not None: - try: - video_age = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(filepath)) - except ValueError: - logger.exception('Error while getting video age, skipping it') - continue - if video_age > age: - logger.info('Skipping video %r: older than %r', filepath, age) - continue - try: - video = scan_video(filepath, subtitles, embedded_subtitles) - except ValueError as e: - logger.error('Skipping video: %s', e) - continue - videos.append(video) - return videos - - -def hash_opensubtitles(video_path): - """Compute a hash using OpenSubtitles' algorithm - - :param string video_path: path of the video - :return: the hash - :rtype: string - - """ - bytesize = struct.calcsize(b' Date: Thu, 22 Sep 2016 07:11:58 +0930 Subject: [PATCH 71/82] fix guessit call. Fixes #1105 --- core/nzbToMediaUtil.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 4cfd73fb..f25a17a9 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -630,7 +630,7 @@ def getDirs(section, subsection, link='hard'): # create new path newPath = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) elif fileExt in core.MEDIACONTAINER: - f = guessit.guess_video_info(mediafile) + f = guessit.guessit(os.path.basename(mediafile)) # get title title = f.get('series') or f.get('title') @@ -1046,7 +1046,7 @@ def find_imdbid(dirName, inputName): logger.info("Found imdbID [{0}] from DNZB-MoreInfo".format(imdbid)) return imdbid logger.info('Searching IMDB for imdbID ...') - guess = guessit.guess_movie_info(inputName) + guess = guessit.guessit(inputName) if guess: # Movie Title title = None From 879584c107581e89e66f99e0b49d442cd2e6f4ed Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Thu, 22 Sep 2016 09:28:27 +0930 Subject: [PATCH 72/82] allow guessit to work of full file path --- core/nzbToMediaUtil.py | 2 +- tests/general.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index f25a17a9..37d56733 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -630,7 +630,7 @@ def getDirs(section, subsection, link='hard'): # create new path newPath = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) elif fileExt in core.MEDIACONTAINER: - f = guessit.guessit(os.path.basename(mediafile)) + f = guessit.guessit(mediafile) # get title title = f.get('series') or f.get('title') diff --git a/tests/general.py b/tests/general.py index 8610bd45..5d1542bb 100755 --- a/tests/general.py +++ b/tests/general.py @@ -44,9 +44,9 @@ from babelfish import Language lan = 'pt' lan = Language.fromalpha2(lan) print lan.alpha3 +vidName = "/volume1/Public/Movies/A Few Good Men/A Few Good Men(1992).mkv" inputName = "in.the.name.of.ben.hur.2016.bdrip.x264-rusted.nzb" guess = guessit.guessit(inputName) -print guess if guess: # Movie Title title = None @@ -56,7 +56,6 @@ if guess: year = None if 'year' in guess: year = guess['year'] - url = "http://www.omdbapi.com" r = requests.get(url, params={'y': year, 't': title}, verify=False, timeout=(60, 300)) results = r.json() @@ -66,7 +65,6 @@ import subliminal subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'}) languages = set() languages.add(lan) -vidName = "/volume1/Public/Movies/A Few Good Men/A Few Good Men(1992).mkv" video = subliminal.scan_video(vidName) subtitles = subliminal.download_best_subtitles({video}, languages) subliminal.save_subtitles(video, subtitles[video]) From 14be873c7ce1d7f2f7587d262231563a60ff62f1 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Thu, 29 Sep 2016 07:02:28 +0930 Subject: [PATCH 73/82] fix another int(Section) error. --- core/nzbToMediaUtil.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 37d56733..d916ff9c 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -734,6 +734,7 @@ def rmDir(dirName): def cleanDir(path, section, subsection): + cfg = dict(core.CFG[section][subsection]) if not os.path.exists(path): logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR') return @@ -741,8 +742,8 @@ def cleanDir(path, section, subsection): logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') rmDir(path) return - minSize = int(core.CFG[section][subsection].get('minSize', 0)) - delete_ignored = int(core.CFG[section][subsection].get('delete_ignored', 0)) + minSize = int(cfg.get('minSize', 0)) + delete_ignored = int(cfg.get('delete_ignored', 0)) try: num_files = len(listMediaFiles(path, minSize=minSize, delete_ignored=delete_ignored)) except: From cb0de3ca988d86abddb3b3d870c9dfc4c8d63ce0 Mon Sep 17 00:00:00 2001 From: Marvin Pinto Date: Mon, 3 Oct 2016 10:23:18 -0400 Subject: [PATCH 74/82] Add the ability to set octal permissions on the processed files prior to handing it off to Sickrage/Couchpotato If set, the `chmodDirectory` option instructs the nzbToMedia post-processing script to set the recursive directory permissions to the octal value specified. --- autoProcessMedia.cfg.spec | 4 ++++ core/autoProcess/autoProcessMovie.py | 6 ++++++ core/autoProcess/autoProcessTV.py | 7 +++++++ 3 files changed, 17 insertions(+) diff --git a/autoProcessMedia.cfg.spec b/autoProcessMedia.cfg.spec index f1f47058..02992a4c 100644 --- a/autoProcessMedia.cfg.spec +++ b/autoProcessMedia.cfg.spec @@ -64,6 +64,8 @@ remote_path = 0 ##### Set to path where download client places completed downloads locally for this category watch_dir = + ##### Set the recursive directory permissions to the following (0 to disable) + chmodDirectory = 0 [SickBeard] #### autoProcessing for TV Series @@ -99,6 +101,8 @@ remote_path = 0 ##### Set to path where download client places completed downloads locally for this category watch_dir = + ##### Set the recursive directory permissions to the following (0 to disable) + chmodDirectory = 0 [NzbDrone] #### autoProcessing for TV Series diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 7b89e40f..c0a2cd19 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -195,6 +195,12 @@ class autoProcessMovie(object): if result == 0: logger.debug("Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName + + chmod_directory = int(cfg.get("chmodDirectory", 0), 8) + logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) + if chmod_directory: + logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) + core.rchmod(dirName, chmod_directory) else: logger.error("Transcoding failed for files in {0}".format(dirName), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index e72b3ab0..04cd2039 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -163,12 +163,19 @@ class autoProcessTV(object): if result == 0: logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName + + chmod_directory = int(cfg.get("chmodDirectory", 0), 8) + logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) + if chmod_directory: + logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) + core.rchmod(dirName, chmod_directory) else: logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] # configure SB params to pass fork_params['quiet'] = 1 + fork_params['proc_type'] = 'manual' if inputName is not None: fork_params['nzbName'] = inputName From 2c8b2fc8cf5fed3102ebc64f440fbce927209ea0 Mon Sep 17 00:00:00 2001 From: Marvin Pinto Date: Mon, 3 Oct 2016 10:47:35 -0400 Subject: [PATCH 75/82] Typecast the 'bit_rate' and 'channels' values into floats before attempting to convert them to ints Otherwise we end up in a situation where audio3[0].get("bit_rate", 0) returns the string `"192000.000000"`, which cannot be converted into an int. Example: ``` Python 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> int("192000.000000") Traceback (most recent call last): File "", line 1, in ValueError: invalid literal for int() with base 10: '192000.000000' >>> int(float("192000.000000")) 192000 ``` Relevant log entry: ``` [10:50:57] [DEBUG]::TRANSCODER: ******* Specified bit rate is: 192000.000000 Traceback (most recent call last): File "/opt/nzbtomedia/nzbToSickBeard.py", line 254, in result = nzbToMedia.main(sys.argv, section) File "/opt/nzbtomedia/nzbToMedia.py", line 726, in main download_id='') File "/opt/nzbtomedia/nzbToMedia.py", line 615, in process download_id, inputCategory, failureLink) File "/opt/nzbtomedia/core/autoProcess/autoProcessTV.py", line 162, in processEpisode result, newDirName = transcoder.Transcode_directory(dirName) File "/opt/nzbtomedia/core/transcoder/transcoder.py", line 708, in Transcode_directory command = buildCommands(file, newDir, movieName, bitbucket) File "/opt/nzbtomedia/core/transcoder/transcoder.py", line 278, in buildCommands bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 ValueError: invalid literal for int() with base 10: '192000.000000' Exception TypeError: "'NoneType' object is not callable" in > ignored ``` --- core/transcoder/transcoder.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index cdb8b233..2ddaf909 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -268,20 +268,20 @@ def buildCommands(file, newDir, movieName, bitbucket): if audio2: # right language and codec... map_cmd.extend(['-map', '0:{index}'.format(index=audio2[0]["index"])]) a_mapped.extend([audio2[0]["index"]]) - bitrate = int(audio2[0].get("bit_rate", 0)) / 1000 - channels = int(audio2[0].get("channels", 0)) + bitrate = int(float(audio2[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio2[0].get("channels", 0))) audio_cmd.extend(['-c:a:{0}'.format(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) - bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 - channels = int(audio1[0].get("channels", 0)) + bitrate = int(float(audio1[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio1[0].get("channels", 0))) audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy']) elif audio3: # just pick the default audio track map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) - bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 - channels = int(audio3[0].get("channels", 0)) + bitrate = int(float(audio3[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio3[0].get("channels", 0))) audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy']) if core.ACHANNELS and channels and channels > core.ACHANNELS: @@ -305,14 +305,14 @@ def buildCommands(file, newDir, movieName, bitbucket): if audio4: # right language and codec. map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]["index"])]) a_mapped.extend([audio4[0]["index"]]) - bitrate = int(audio4[0].get("bit_rate", 0)) / 1000 - channels = int(audio4[0].get("channels", 0)) + bitrate = int(float(audio4[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio4[0].get("channels", 0))) audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy']) elif audio1: # right language wrong codec. map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])]) a_mapped.extend([audio1[0]["index"]]) - bitrate = int(audio1[0].get("bit_rate", 0)) / 1000 - channels = int(audio1[0].get("channels", 0)) + bitrate = int(float(audio1[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio1[0].get("channels", 0))) if core.ACODEC2: audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2]) else: @@ -320,8 +320,8 @@ def buildCommands(file, newDir, movieName, bitbucket): elif audio3: # just pick the default audio track map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])]) a_mapped.extend([audio3[0]["index"]]) - bitrate = int(audio3[0].get("bit_rate", 0)) / 1000 - channels = int(audio3[0].get("channels", 0)) + bitrate = int(float(audio3[0].get("bit_rate", 0))) / 1000 + channels = int(float(audio3[0].get("channels", 0))) if core.ACODEC2: audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2]) else: @@ -350,8 +350,8 @@ def buildCommands(file, newDir, movieName, bitbucket): used_audio += 1 map_cmd.extend(['-map', '0:{index}'.format(index=audio["index"])]) audio_cmd3 = [] - bitrate = int(audio.get("bit_rate", 0)) / 1000 - channels = int(audio.get("channels", 0)) + bitrate = int(float(audio.get("bit_rate", 0))) / 1000 + channels = int(float(audio.get("channels", 0))) if audio["codec_name"] in core.ACODEC3_ALLOW: audio_cmd3.extend(['-c:a:{0}'.format(used_audio), 'copy']) else: From 07f419640c2eb64ba04eb614e88d08752117545d Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Fri, 7 Oct 2016 22:47:01 +1030 Subject: [PATCH 76/82] catch errors if not audio codec name. Fixes #1109 --- core/transcoder/transcoder.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index cdb8b233..5554ed98 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -259,7 +259,10 @@ def buildCommands(file, newDir, movieName, bitbucket): audio1 = [item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE] except: # no language tags. Assume only 1 language. audio1 = audioStreams - audio2 = [item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW] + try: + audio2 = [item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW] + except: + audio2 = [] try: audio3 = [item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE] except: @@ -301,7 +304,10 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.ACODEC2_ALLOW: used_audio += 1 - audio4 = [item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW] + try: + audio4 = [item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW] + except: + audio4 = [] if audio4: # right language and codec. map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]["index"])]) a_mapped.extend([audio4[0]["index"]]) From d7e0eba6f6b2392fd77ddace9ffeccecf5f90739 Mon Sep 17 00:00:00 2001 From: Clinton Hall Date: Tue, 11 Oct 2016 16:56:44 +1030 Subject: [PATCH 77/82] Allow manual scans to continue. Fixes #1112 --- core/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/__init__.py b/core/__init__.py index c2851748..d94a944d 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -49,8 +49,8 @@ from core.transcoder import transcoder from core.databases import mainDB # Client Agents -NZB_CLIENTS = ['sabnzbd', 'nzbget'] -TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other'] +NZB_CLIENTS = ['sabnzbd', 'nzbget', 'manual'] +TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other', 'manual'] # sabnzbd constants SABNZB_NO_OF_ARGUMENTS = 8 From 3d915986b0084d41d28c1bb4673926e85396f730 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Wed, 12 Oct 2016 07:05:48 +1030 Subject: [PATCH 78/82] revert to 7zip if others missing. Fixes #1015 --- core/extractor/extractor.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 7cbd34b9..ed887187 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -47,13 +47,15 @@ def extract(filePath, outputDestination): for cmd in required_cmds: if call(['which', cmd], stdout=devnull, stderr=devnull): # note, returns 0 if exists, or 1 if doesn't exist. - if cmd == "7zr" and not call(["which", "7z"]): # we do have "7z" command - EXTRACT_COMMANDS[".7z"] = ["7z", "x"] - elif cmd == "7zr" and not call(["which", "7za"]): # we do have "7za" command - EXTRACT_COMMANDS[".7z"] = ["7za", "x"] - else: - for k, v in EXTRACT_COMMANDS.items(): - if cmd in v[0]: + for k, v in EXTRACT_COMMANDS.items(): + if cmd in v[0]: + if not call(["which", "7zr"], stdout=devnull, stderr=devnull): # we do have "7zr" + EXTRACT_COMMANDS[k] = ["7zr", "x", "-y"] + elif not call(["which", "7z"], stdout=devnull, stderr=devnull): # we do have "7z" + EXTRACT_COMMANDS[k] = ["7z", "x", "-y"] + elif not call(["which", "7za"], stdout=devnull, stderr=devnull): # we do have "7za" + EXTRACT_COMMANDS[k] = ["7za", "x", "-y"] + else: core.logger.error("EXTRACTOR: {cmd} not found, " "disabling support for {feature}".format (cmd=cmd, feature=k)) From 26d938eba032e9c848880d7b310cb97ce955cf7a Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Sat, 15 Oct 2016 08:35:55 +1030 Subject: [PATCH 79/82] removed extra default that casued int() to fail. Fixes #1109 --- core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index c0a2cd19..e4687fb8 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -196,7 +196,7 @@ class autoProcessMovie(object): logger.debug("Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName - chmod_directory = int(cfg.get("chmodDirectory", 0), 8) + chmod_directory = int(cfg.get("chmodDirectory", 0)) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 04cd2039..0fda55bc 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -164,7 +164,7 @@ class autoProcessTV(object): logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName - chmod_directory = int(cfg.get("chmodDirectory", 0), 8) + chmod_directory = int(cfg.get("chmodDirectory", 0)) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) From 92c356a6b0ff5dbeaed7a581c497ffd83d239eaa Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Sat, 15 Oct 2016 13:34:10 +1030 Subject: [PATCH 80/82] fix int conversion base 8 from string or int. --- core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index e4687fb8..af2ca947 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -196,7 +196,7 @@ class autoProcessMovie(object): logger.debug("Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName - chmod_directory = int(cfg.get("chmodDirectory", 0)) + chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 0fda55bc..f7279102 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -164,7 +164,7 @@ class autoProcessTV(object): logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section) dirName = newDirName - chmod_directory = int(cfg.get("chmodDirectory", 0)) + chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) From 6bfb0f4a534022281fa938a15ac95173943fc263 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Fri, 21 Oct 2016 20:56:49 +1030 Subject: [PATCH 81/82] add more logging to server tests. #1113 --- core/nzbToMediaUtil.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index d916ff9c..0a3c8e52 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1148,10 +1148,13 @@ def import_subs(filename): def server_responding(baseURL): + logger.debug("Attempting to connect to server at {0}".format(baseURL), 'SERVER') try: requests.get(baseURL, timeout=(60, 120), verify=False) + logger.debug("Server responded at {0}".format(baseURL), 'SERVER') return True except (requests.ConnectionError, requests.exceptions.Timeout): + logger.error("Server failed to responded at {0}".format(baseURL), 'SERVER') return False From 030b33485168554f536a3c579c8389c314cda4c7 Mon Sep 17 00:00:00 2001 From: clinton-hall Date: Fri, 21 Oct 2016 21:25:11 +1030 Subject: [PATCH 82/82] fix typo --- core/nzbToMediaUtil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 0a3c8e52..7edc3701 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -1154,7 +1154,7 @@ def server_responding(baseURL): logger.debug("Server responded at {0}".format(baseURL), 'SERVER') return True except (requests.ConnectionError, requests.exceptions.Timeout): - logger.error("Server failed to responded at {0}".format(baseURL), 'SERVER') + logger.error("Server failed to respond at {0}".format(baseURL), 'SERVER') return False