Merge branch 'nightly' into patch-1

# Conflicts:
#	core/__init__.py
This commit is contained in:
BBsan2k 2016-10-22 10:14:05 +02:00
commit a3824f27bb
512 changed files with 86505 additions and 22341 deletions

1
.gitignore vendored
View file

@ -5,6 +5,7 @@
*.log *.log
*.pid *.pid
*.db *.db
*.dbm
/userscripts/ /userscripts/
/logs/ /logs/
/.idea/ /.idea/

View file

@ -1,28 +1,26 @@
#!/usr/bin/env python2 #!/usr/bin/env python2
# coding=utf-8
import datetime import datetime
import os import os
import time
import shutil
import sys import sys
import core import core
from subprocess import Popen from libs.six import text_type
from core import logger, nzbToMediaDB from core import logger, nzbToMediaDB
from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update from core.nzbToMediaUtil import convert_to_ascii, CharReplace, plex_update, replace_links
from core.nzbToMediaUserScript import external_script from core.nzbToMediaUserScript import external_script
def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent):
status = 1 # 1 = failed | 0 = success status = 1 # 1 = failed | 0 = success
root = 0 root = 0
foundFile = 0 foundFile = 0
uniquePath = 1
if clientAgent != 'manual' and not core.DOWNLOADINFO: if clientAgent != 'manual' and not core.DOWNLOADINFO:
logger.debug('Adding TORRENT download info for directory %s to database' % (inputDirectory)) logger.debug('Adding TORRENT download info for directory {0} to database'.format(inputDirectory))
myDB = nzbToMediaDB.DBConnection() myDB = nzbToMediaDB.DBConnection()
encoded = False
inputDirectory1 = inputDirectory inputDirectory1 = inputDirectory
inputName1 = inputName inputName1 = inputName
@ -32,89 +30,82 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
except: except:
pass pass
controlValueDict = {"input_directory": unicode(inputDirectory1)} controlValueDict = {"input_directory": text_type(inputDirectory1)}
newValueDict = {"input_name": unicode(inputName1), newValueDict = {"input_name": text_type(inputName1),
"input_hash": unicode(inputHash), "input_hash": text_type(inputHash),
"input_id": unicode(inputID), "input_id": text_type(inputID),
"client_agent": unicode(clientAgent), "client_agent": text_type(clientAgent),
"status": 0, "status": 0,
"last_update": datetime.date.today().toordinal() "last_update": datetime.date.today().toordinal()
} }
myDB.upsert("downloads", newValueDict, controlValueDict) myDB.upsert("downloads", newValueDict, controlValueDict)
logger.debug("Received Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory))
inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, # Confirm the category by parsing directory structure
inputCategory, root, inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, inputCategory,
core.CATEGORIES) # Confirm the category by parsing directory structure root, core.CATEGORIES)
if inputCategory == "": if inputCategory == "":
inputCategory = "UNCAT" inputCategory = "UNCAT"
usercat = inputCategory usercat = inputCategory
try: try:
inputName = inputName.encode(core.SYS_ENCODING) inputName = inputName.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
try: try:
inputDirectory = inputDirectory.encode(core.SYS_ENCODING) inputDirectory = inputDirectory.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
logger.debug("Determined Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format
(inputDirectory, inputName, inputCategory))
# auto-detect section # auto-detect section
section = core.CFG.findsection(inputCategory).isenabled() section = core.CFG.findsection(inputCategory).isenabled()
if section is None: if section is None:
section = core.CFG.findsection("ALL").isenabled() section = core.CFG.findsection("ALL").isenabled()
if section is None: if section is None:
logger.error( logger.error('Category:[{0}] is not defined or is not enabled. '
'Category:[%s] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.' % ( 'Please rename it or ensure it is enabled for the appropriate section '
inputCategory)) 'in your autoProcessMedia.cfg and try again.'.format
(inputCategory))
return [-1, ""] return [-1, ""]
else: else:
usercat = "ALL" usercat = "ALL"
if len(section) > 1: if len(section) > 1:
logger.error( logger.error('Category:[{0}] is not unique, {1} are using it. '
'Category:[%s] is not unique, %s are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.' % ( 'Please rename it or disable all other sections using the same category name '
usercat, section.keys())) 'in your autoProcessMedia.cfg and try again.'.format
(usercat, section.keys()))
return [-1, ""] return [-1, ""]
if section: if section:
sectionName = section.keys()[0] sectionName = section.keys()[0]
logger.info('Auto-detected SECTION:%s' % (sectionName)) logger.info('Auto-detected SECTION:{0}'.format(sectionName))
else: else:
logger.error("Unable to locate a section with subsection:%s enabled in your autoProcessMedia.cfg, exiting!" % ( logger.error("Unable to locate a section with subsection:{0} "
inputCategory)) "enabled in your autoProcessMedia.cfg, exiting!".format
(inputCategory))
return [-1, ""] return [-1, ""]
try: section = dict(section[sectionName][usercat]) # Type cast to dict() to allow effective usage of .get()
Torrent_NoLink = int(section[usercat]["Torrent_NoLink"])
except:
Torrent_NoLink = 0
try: Torrent_NoLink = int(section.get("Torrent_NoLink", 0))
keep_archive = int(section[usercat]["keep_archive"]) keep_archive = int(section.get("keep_archive", 0))
except: extract = int(section.get('extract', 0))
keep_archive = 0 uniquePath = int(section.get("unique_path", 1))
try:
extract = int(section[usercat]['extract'])
except:
extract = 0
try:
uniquePath = int(section[usercat]["unique_path"])
except:
uniquePath = 1
if clientAgent != 'manual': if clientAgent != 'manual':
core.pause_torrent(clientAgent, inputHash, inputID, inputName) core.pause_torrent(clientAgent, inputHash, inputID, inputName)
# Incase input is not directory, make sure to create one. # In case input is not directory, make sure to create one.
# This way Processing is isolated. # This way Processing is isolated.
if not os.path.isdir(os.path.join(inputDirectory, inputName)): if not os.path.isdir(os.path.join(inputDirectory, inputName)):
basename = os.path.basename(inputDirectory) basename = os.path.basename(inputDirectory)
basename = core.sanitizeName(inputName) \ basename = core.sanitizeName(inputName) \
if inputName == basename else os.path.splitext(core.sanitizeName(inputName)[0]) if inputName == basename else os.path.splitext(core.sanitizeName(inputName))[0]
outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename) outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename)
elif uniquePath: elif uniquePath:
outputDestination = os.path.normpath( outputDestination = os.path.normpath(
@ -124,20 +115,21 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) core.os.path.join(core.OUTPUTDIRECTORY, inputCategory))
try: try:
outputDestination = outputDestination.encode(core.SYS_ENCODING) outputDestination = outputDestination.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
if outputDestination in inputDirectory: if outputDestination in inputDirectory:
outputDestination = inputDirectory outputDestination = inputDirectory
logger.info("Output directory set to: %s" % (outputDestination)) logger.info("Output directory set to: {0}".format(outputDestination))
if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR:
logger.error( logger.error('The output directory:[{0}] is the Download Directory. '
'The output directory:[%s] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting' % ( 'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format
inputDirectory)) (inputDirectory))
return [-1, ""] return [-1, ""]
logger.debug("Scanning files in directory: %s" % (inputDirectory)) logger.debug("Scanning files in directory: {0}".format(inputDirectory))
if sectionName == 'HeadPhones': if sectionName == 'HeadPhones':
core.NOFLATTEN.extend( core.NOFLATTEN.extend(
@ -149,7 +141,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
inputFiles = core.listMediaFiles(inputDirectory, archives=False) inputFiles = core.listMediaFiles(inputDirectory, archives=False)
else: else:
inputFiles = core.listMediaFiles(inputDirectory) inputFiles = core.listMediaFiles(inputDirectory)
logger.debug("Found %s files in %s" % (str(len(inputFiles)), inputDirectory)) logger.debug("Found {0} files in {1}".format(len(inputFiles), inputDirectory))
for inputFile in inputFiles: for inputFile in inputFiles:
filePath = os.path.dirname(inputFile) filePath = os.path.dirname(inputFile)
fileName, fileExt = os.path.splitext(os.path.basename(inputFile)) fileName, fileExt = os.path.splitext(os.path.basename(inputFile))
@ -160,18 +152,20 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
if not os.path.basename(filePath) in outputDestination: if not os.path.basename(filePath) in outputDestination:
targetFile = core.os.path.join( targetFile = core.os.path.join(
core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName)
logger.debug( logger.debug("Setting outputDestination to {0} to preserve folder structure".format
"Setting outputDestination to %s to preserve folder structure" % (os.path.dirname(targetFile))) (os.path.dirname(targetFile)))
try: try:
targetFile = targetFile.encode(core.SYS_ENCODING) targetFile = targetFile.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
if root == 1: if root == 1:
if not foundFile: if not foundFile:
logger.debug("Looking for %s in: %s" % (inputName, inputFile)) logger.debug("Looking for {0} in: {1}".format(inputName, inputFile))
if (core.sanitizeName(inputName) in core.sanitizeName(inputFile)) or ( if any([core.sanitizeName(inputName) in core.sanitizeName(inputFile),
core.sanitizeName(fileName) in core.sanitizeName(inputName)): core.sanitizeName(fileName) in core.sanitizeName(inputName)]):
foundFile = True foundFile = True
logger.debug("Found file %s that matches Torrent Name %s" % (fullFileName, inputName)) logger.debug("Found file {0} that matches Torrent Name {1}".format
(fullFileName, inputName))
else: else:
continue continue
@ -183,7 +177,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
logger.debug("Looking for files with modified/created dates less than 5 minutes old.") logger.debug("Looking for files with modified/created dates less than 5 minutes old.")
if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)):
foundFile = True foundFile = True
logger.debug("Found file %s with date modifed/created less than 5 minutes ago." % (fullFileName)) logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format
(fullFileName))
else: else:
continue # This file has not been recently moved or created, skip it continue # This file has not been recently moved or created, skip it
@ -192,15 +187,16 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
core.copy_link(inputFile, targetFile, core.USELINK) core.copy_link(inputFile, targetFile, core.USELINK)
core.rmReadOnly(targetFile) core.rmReadOnly(targetFile)
except: except:
logger.error("Failed to link: %s to %s" % (inputFile, targetFile)) logger.error("Failed to link: {0} to {1}".format(inputFile, targetFile))
inputName, outputDestination = convert_to_ascii(inputName, outputDestination) inputName, outputDestination = convert_to_ascii(inputName, outputDestination)
if extract == 1: if extract == 1:
logger.debug('Checking for archives to extract in directory: %s' % (inputDirectory)) logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory))
core.extractFiles(inputDirectory, outputDestination, keep_archive) core.extractFiles(inputDirectory, outputDestination, keep_archive)
if not inputCategory in core.NOFLATTEN: #don't flatten hp in case multi cd albums, and we need to copy this back later. if inputCategory not in core.NOFLATTEN:
# don't flatten hp in case multi cd albums, and we need to copy this back later.
core.flatten(outputDestination) core.flatten(outputDestination)
# Now check if video files exist in destination: # Now check if video files exist in destination:
@ -208,51 +204,55 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
numVideos = len( numVideos = len(
core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False)) core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False))
if numVideos > 0: if numVideos > 0:
logger.info("Found %s media files in %s" % (numVideos, outputDestination)) logger.info("Found {0} media files in {1}".format(numVideos, outputDestination))
status = 0 status = 0
elif extract != 1: elif extract != 1:
logger.info("Found no media files in %s. Sending to %s to process" % (outputDestination, sectionName)) logger.info("Found no media files in {0}. Sending to {1} to process".format(outputDestination, sectionName))
status = 0 status = 0
else: else:
logger.warning("Found no media files in %s" % outputDestination) logger.warning("Found no media files in {0}".format(outputDestination))
# Only these sections can handling failed downloads so make sure everything else gets through without the check for failed # Only these sections can handling failed downloads
if not sectionName in ['CouchPotato', 'SickBeard', 'NzbDrone']: # so make sure everything else gets through without the check for failed
if sectionName not in ['CouchPotato', 'SickBeard', 'NzbDrone']:
status = 0 status = 0
logger.info("Calling %s:%s to post-process:%s" % (sectionName, usercat, inputName)) logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, usercat, inputName))
if core.TORRENT_CHMOD_DIRECTORY: if core.TORRENT_CHMOD_DIRECTORY:
core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY) core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY)
result = [ 0, "" ] result = [0, ""]
if sectionName == 'UserScript': if sectionName == 'UserScript':
result = external_script(outputDestination, inputName, inputCategory, section[usercat]) result = external_script(outputDestination, inputName, inputCategory, section)
elif sectionName == 'CouchPotato': elif sectionName == 'CouchPotato':
result = core.autoProcessMovie().process(sectionName,outputDestination, inputName, status, clientAgent, inputHash, result = core.autoProcessMovie().process(sectionName, outputDestination, inputName,
inputCategory) status, clientAgent, inputHash, inputCategory)
elif sectionName in ['SickBeard','NzbDrone']: elif sectionName in ['SickBeard', 'NzbDrone']:
if inputHash: if inputHash:
inputHash = inputHash.upper() inputHash = inputHash.upper()
result = core.autoProcessTV().processEpisode(sectionName,outputDestination, inputName, status, clientAgent, result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName,
inputHash, inputCategory) status, clientAgent, inputHash, inputCategory)
elif sectionName == 'HeadPhones': elif sectionName == 'HeadPhones':
result = core.autoProcessMusic().process(sectionName,outputDestination, inputName, status, clientAgent, inputCategory) result = core.autoProcessMusic().process(sectionName, outputDestination, inputName,
status, clientAgent, inputCategory)
elif sectionName == 'Mylar': elif sectionName == 'Mylar':
result = core.autoProcessComics().processEpisode(sectionName,outputDestination, inputName, status, clientAgent, result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName,
inputCategory) status, clientAgent, inputCategory)
elif sectionName == 'Gamez': elif sectionName == 'Gamez':
result = core.autoProcessGames().process(sectionName,outputDestination, inputName, status, clientAgent, inputCategory) result = core.autoProcessGames().process(sectionName, outputDestination, inputName,
status, clientAgent, inputCategory)
plex_update(inputCategory) plex_update(inputCategory)
if result[0] != 0: if result[0] != 0:
if not core.TORRENT_RESUME_ON_FAILURE: if not core.TORRENT_RESUME_ON_FAILURE:
logger.error("A problem was reported in the autoProcess* script. torrent won't resume seeding (settings)") logger.error("A problem was reported in the autoProcess* script. "
"Torrent won't resume seeding (settings)")
elif clientAgent != 'manual': elif clientAgent != 'manual':
logger.error( logger.error("A problem was reported in the autoProcess* script. "
"A problem was reported in the autoProcess* script. If torrent was paused we will resume seeding") "If torrent was paused we will resume seeding")
core.resume_torrent(clientAgent, inputHash, inputID, inputName) core.resume_torrent(clientAgent, inputHash, inputID, inputName)
else: else:
@ -262,14 +262,15 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
# remove torrent # remove torrent
if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1:
logger.debug('Checking for sym-links to re-direct in: %s' % (inputDirectory)) logger.debug('Checking for sym-links to re-direct in: {0}'.format(inputDirectory))
for dirpath, dirs, files in os.walk(inputDirectory): for dirpath, dirs, files in os.walk(inputDirectory):
for file in files: for file in files:
logger.debug('Checking symlink: %s' % (os.path.join(dirpath,file))) logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file)))
core.replace_links(os.path.join(dirpath,file)) replace_links(os.path.join(dirpath, file))
core.remove_torrent(clientAgent, inputHash, inputID, inputName) core.remove_torrent(clientAgent, inputHash, inputID, inputName)
if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN if not sectionName == 'UserScript':
# for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN
# cleanup our processing folders of any misc unwanted files and empty directories # cleanup our processing folders of any misc unwanted files and empty directories
core.cleanDir(outputDestination, sectionName, inputCategory) core.cleanDir(outputDestination, sectionName, inputCategory)
@ -284,14 +285,14 @@ def main(args):
clientAgent = core.TORRENT_CLIENTAGENT clientAgent = core.TORRENT_CLIENTAGENT
logger.info("#########################################################") logger.info("#########################################################")
logger.info("## ..::[%s]::.. ##" % os.path.basename(__file__)) logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__)))
logger.info("#########################################################") logger.info("#########################################################")
# debug command line options # debug command line options
logger.debug("Options passed into TorrentToMedia: %s" % (args)) logger.debug("Options passed into TorrentToMedia: {0}".format(args))
# Post-Processing Result # Post-Processing Result
result = [ 0, "" ] result = [0, ""]
try: try:
inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args(clientAgent, args) inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args(clientAgent, args)
@ -310,54 +311,50 @@ def main(args):
if not core.CFG[section][subsection].isenabled(): if not core.CFG[section][subsection].isenabled():
continue continue
for dirName in core.getDirs(section, subsection, link='hard'): for dirName in core.getDirs(section, subsection, link='hard'):
logger.info("Starting manual run for %s:%s - Folder:%s" % (section, subsection, dirName)) logger.info("Starting manual run for {0}:{1} - Folder:{2}".format
(section, subsection, dirName))
logger.info("Checking database for download info for %s ..." % (os.path.basename(dirName))) logger.info("Checking database for download info for {0} ...".format
(os.path.basename(dirName)))
core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0)
if core.DOWNLOADINFO: if core.DOWNLOADINFO:
logger.info( clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual'))
"Found download info for %s, setting variables now ..." % (os.path.basename(dirName))) inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', ''))
inputID = text_type(core.DOWNLOADINFO[0].get('input_id', ''))
logger.info("Found download info for {0}, "
"setting variables now ...".format(os.path.basename(dirName)))
else: else:
logger.info( logger.info('Unable to locate download info for {0}, '
'Unable to locate download info for %s, continuing to try and process this release ...' % ( 'continuing to try and process this release ...'.format
os.path.basename(dirName)) (os.path.basename(dirName)))
)
try:
clientAgent = str(core.DOWNLOADINFO[0]['client_agent'])
except:
clientAgent = 'manual' clientAgent = 'manual'
try: inputHash = ''
inputHash = str(core.DOWNLOADINFO[0]['input_hash']) inputID = ''
except:
inputHash = None
try:
inputID = str(core.DOWNLOADINFO[0]['input_id'])
except:
inputID = None
if clientAgent.lower() not in core.TORRENT_CLIENTS and clientAgent != 'manual': if clientAgent.lower() not in core.TORRENT_CLIENTS:
continue continue
try: try:
dirName = dirName.encode(core.SYS_ENCODING) dirName = dirName.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
inputName = os.path.basename(dirName) inputName = os.path.basename(dirName)
try: try:
inputName = inputName.encode(core.SYS_ENCODING) inputName = inputName.encode(core.SYS_ENCODING)
except: pass except UnicodeError:
pass
results = processTorrent(dirName, inputName, subsection, inputHash, inputID, results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None,
clientAgent) clientAgent)
if results[0] != 0: if results[0] != 0:
logger.error("A problem was reported when trying to perform a manual run for %s:%s." % ( logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format
section, subsection)) (section, subsection))
result = results result = results
if result[0] == 0: if result[0] == 0:
logger.info("The %s script completed successfully." % (args[0])) logger.info("The {0} script completed successfully.".format(args[0]))
else: else:
logger.error("A problem was reported in the %s script." % (args[0])) logger.error("A problem was reported in the {0} script.".format(args[0]))
del core.MYAPP del core.MYAPP
return result[0] return result[0]

View file

@ -64,6 +64,8 @@
remote_path = 0 remote_path = 0
##### Set to path where download client places completed downloads locally for this category ##### Set to path where download client places completed downloads locally for this category
watch_dir = watch_dir =
##### Set the recursive directory permissions to the following (0 to disable)
chmodDirectory = 0
[SickBeard] [SickBeard]
#### autoProcessing for TV Series #### autoProcessing for TV Series
@ -85,8 +87,10 @@
process_method = process_method =
# force processing of already processed content when running a manual scan. # force processing of already processed content when running a manual scan.
force = 0 force = 0
# tell SickRage to delete all source files after processing. # tell SickRage/Medusa to delete all source files after processing.
delete_on = 0 delete_on = 0
# tell Medusa to ignore check for associated subtitle check when postponing release
ignore_subs = 0
extract = 1 extract = 1
nzbExtractionBy = Downloader nzbExtractionBy = Downloader
# Set this to minimum required size to consider a media file valid (in MB) # Set this to minimum required size to consider a media file valid (in MB)
@ -97,6 +101,8 @@
remote_path = 0 remote_path = 0
##### Set to path where download client places completed downloads locally for this category ##### Set to path where download client places completed downloads locally for this category
watch_dir = watch_dir =
##### Set the recursive directory permissions to the following (0 to disable)
chmodDirectory = 0
[NzbDrone] [NzbDrone]
#### autoProcessing for TV Series #### autoProcessing for TV Series

View file

@ -1,3 +1,7 @@
# coding=utf-8
from __future__ import print_function
import locale import locale
import os import os
import re import re
@ -6,6 +10,7 @@ import sys
import platform import platform
import time import time
# init libs # init libs
PROGRAM_DIR = os.path.dirname(os.path.normpath(os.path.abspath(os.path.join(__file__, os.pardir)))) PROGRAM_DIR = os.path.dirname(os.path.normpath(os.path.abspath(os.path.join(__file__, os.pardir))))
LIBS_DIR = os.path.join(PROGRAM_DIR, 'libs') LIBS_DIR = os.path.join(PROGRAM_DIR, 'libs')
@ -25,6 +30,8 @@ CONFIG_TV_FILE = os.path.join(PROGRAM_DIR, 'autoProcessTv.cfg')
TEST_FILE = os.path.join(os.path.join(PROGRAM_DIR, 'tests'), 'test.mp4') TEST_FILE = os.path.join(os.path.join(PROGRAM_DIR, 'tests'), 'test.mp4')
MYAPP = None MYAPP = None
from six.moves import reload_module
from core.autoProcess.autoProcessComics import autoProcessComics from core.autoProcess.autoProcessComics import autoProcessComics
from core.autoProcess.autoProcessGames import autoProcessGames from core.autoProcess.autoProcessGames import autoProcessGames
from core.autoProcess.autoProcessMovie import autoProcessMovie from core.autoProcess.autoProcessMovie import autoProcessMovie
@ -32,16 +39,18 @@ from core.autoProcess.autoProcessMusic import autoProcessMusic
from core.autoProcess.autoProcessTV import autoProcessTV from core.autoProcess.autoProcessTV import autoProcessTV
from core import logger, versionCheck, nzbToMediaDB from core import logger, versionCheck, nzbToMediaDB
from core.nzbToMediaConfig import config from core.nzbToMediaConfig import config
from core.nzbToMediaUtil import category_search, sanitizeName, copy_link, parse_args, flatten, getDirs, \ from core.nzbToMediaUtil import (
rmReadOnly,rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, \ category_search, sanitizeName, copy_link, parse_args, flatten, getDirs,
extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir, \ rmReadOnly, rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles,
create_torrent_class, listMediaFiles, RunningProcess extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir,
create_torrent_class, listMediaFiles, RunningProcess,
)
from core.transcoder import transcoder from core.transcoder import transcoder
from core.databases import mainDB from core.databases import mainDB
# Client Agents # Client Agents
NZB_CLIENTS = ['sabnzbd','nzbget'] NZB_CLIENTS = ['sabnzbd', 'nzbget', 'manual']
TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other'] TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other', 'manual']
# sabnzbd constants # sabnzbd constants
SABNZB_NO_OF_ARGUMENTS = 8 SABNZB_NO_OF_ARGUMENTS = 8
@ -54,14 +63,17 @@ FORK_FAILED = "failed"
FORK_FAILED_TORRENT = "failed-torrent" FORK_FAILED_TORRENT = "failed-torrent"
FORK_SICKRAGETV = "sickragetv" FORK_SICKRAGETV = "sickragetv"
FORK_SICKRAGE = "sickrage" FORK_SICKRAGE = "sickrage"
FORK_MEDUSA = "medusa"
FORK_SICKGEAR = "sickgear" FORK_SICKGEAR = "sickgear"
FORKS[FORK_DEFAULT] = {"dir": None} FORKS[FORK_DEFAULT] = {"dir": None}
FORKS[FORK_FAILED] = {"dirName": None, "failed": None} FORKS[FORK_FAILED] = {"dirName": None, "failed": None}
FORKS[FORK_FAILED_TORRENT] = {"dir": None, "failed": None, "process_method": None} FORKS[FORK_FAILED_TORRENT] = {"dir": None, "failed": None, "process_method": None}
FORKS[FORK_SICKRAGETV] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} FORKS[FORK_SICKRAGETV] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None}
FORKS[FORK_SICKRAGE] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} FORKS[FORK_SICKRAGE] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None}
FORKS[FORK_MEDUSA] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None, "ignore_subs":None}
FORKS[FORK_SICKGEAR] = {"dir": None, "failed": None, "process_method": None, "force": None} FORKS[FORK_SICKGEAR] = {"dir": None, "failed": None, "process_method": None, "force": None}
ALL_FORKS = {"dir": None, "dirName": None, "proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None} ALL_FORKS = {"dir": None, "dirName": None, "proc_dir": None, "failed": None, "process_method": None, "force": None,
"delete_on": None, "ignore_subs": None}
# NZBGet Exit Codes # NZBGet Exit Codes
NZBGET_POSTPROCESS_PARCHECK = 92 NZBGET_POSTPROCESS_PARCHECK = 92
@ -201,6 +213,7 @@ USER_SCRIPT_RUNONCE = None
__INITIALIZED__ = False __INITIALIZED__ = False
def initialize(section=None): def initialize(section=None):
global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \ global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \
NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, \ NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, \
@ -223,8 +236,8 @@ def initialize(section=None):
if __INITIALIZED__: if __INITIALIZED__:
return False return False
if os.environ.has_key('NTM_LOGFILE'): if 'NTM_LOGFILE' in os.environ:
LOG_FILE = os.environ['NTM_LOGFILE'] LOG_FILE = os.environ['NTM_LOGFILE']
LOG_DIR = os.path.split(LOG_FILE)[0] LOG_DIR = os.path.split(LOG_FILE)[0]
@ -247,16 +260,17 @@ def initialize(section=None):
SYS_ENCODING = 'UTF-8' SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"): if not hasattr(sys, "setdefaultencoding"):
reload(sys) reload_module(sys)
try: try:
# pylint: disable=E1101 # pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(SYS_ENCODING) sys.setdefaultencoding(SYS_ENCODING)
except: except:
print 'Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable' print('Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable'
print 'or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.' '\nor find another way to force Python to use {codec} for string encoding.'.format
if os.environ.has_key('NZBOP_SCRIPTDIR'): (codec=SYS_ENCODING))
if 'NZBOP_SCRIPTDIR' in os.environ:
sys.exit(NZBGET_POSTPROCESS_ERROR) sys.exit(NZBGET_POSTPROCESS_ERROR)
else: else:
sys.exit(1) sys.exit(1)
@ -266,18 +280,18 @@ def initialize(section=None):
# run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options.
if not config.migrate(): if not config.migrate():
logger.error("Unable to migrate config file %s, exiting ..." % (CONFIG_FILE)) logger.error("Unable to migrate config file {0}, exiting ...".format(CONFIG_FILE))
if os.environ.has_key('NZBOP_SCRIPTDIR'): if 'NZBOP_SCRIPTDIR' in os.environ:
pass # We will try and read config from Environment. pass # We will try and read config from Environment.
else: else:
sys.exit(-1) sys.exit(-1)
# run migrate to convert NzbGet data from old cfg style to new cfg style # run migrate to convert NzbGet data from old cfg style to new cfg style
if os.environ.has_key('NZBOP_SCRIPTDIR'): if 'NZBOP_SCRIPTDIR' in os.environ:
CFG = config.addnzbget() CFG = config.addnzbget()
else: # load newly migrated config else: # load newly migrated config
logger.info("Loading config from [%s]" % (CONFIG_FILE)) logger.info("Loading config from [{0}]".format(CONFIG_FILE))
CFG = config() CFG = config()
# Enable/Disable DEBUG Logging # Enable/Disable DEBUG Logging
@ -288,7 +302,7 @@ def initialize(section=None):
if LOG_ENV: if LOG_ENV:
for item in os.environ: for item in os.environ:
logger.info("%s: %s" % (item, os.environ[item]), "ENVIRONMENT") logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT")
# initialize the main SB database # initialize the main SB database
nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema)
@ -315,14 +329,16 @@ def initialize(section=None):
# restart nzbToMedia # restart nzbToMedia
try: try:
del MYAPP del MYAPP
except: pass except:
pass
restart() restart()
else: else:
logger.error("Update wasn't successful, not restarting. Check your log for more information.") logger.error("Update wasn't successful, not restarting. Check your log for more information.")
# Set Current Version # Set Current Version
logger.info( logger.info('nzbToMedia Version:{version} Branch:{branch} ({system} {release})'.format
'nzbToMedia Version:' + NZBTOMEDIA_VERSION + ' Branch:' + GIT_BRANCH + ' (' + platform.system() + ' ' + platform.release() + ')') (version=NZBTOMEDIA_VERSION, branch=GIT_BRANCH,
system=platform.system(), release=platform.release()))
if int(CFG["WakeOnLan"]["wake"]) == 1: if int(CFG["WakeOnLan"]["wake"]) == 1:
WakeUp() WakeUp()
@ -333,8 +349,10 @@ def initialize(section=None):
SABNZBDAPIKEY = CFG["Nzb"]["sabnzbd_apikey"] SABNZBDAPIKEY = CFG["Nzb"]["sabnzbd_apikey"]
NZB_DEFAULTDIR = CFG["Nzb"]["default_downloadDirectory"] NZB_DEFAULTDIR = CFG["Nzb"]["default_downloadDirectory"]
GROUPS = CFG["Custom"]["remove_group"] GROUPS = CFG["Custom"]["remove_group"]
if isinstance(GROUPS, str): GROUPS = GROUPS.split(',') if isinstance(GROUPS, str):
if GROUPS == ['']: GROUPS = None GROUPS = GROUPS.split(',')
if GROUPS == ['']:
GROUPS = None
TORRENT_CLIENTAGENT = CFG["Torrent"]["clientAgent"] # utorrent | deluge | transmission | rtorrent | vuze |other TORRENT_CLIENTAGENT = CFG["Torrent"]["clientAgent"] # utorrent | deluge | transmission | rtorrent | vuze |other
USELINK = CFG["Torrent"]["useLink"] # no | hard | sym USELINK = CFG["Torrent"]["useLink"] # no | hard | sym
@ -342,8 +360,10 @@ def initialize(section=None):
TORRENT_DEFAULTDIR = CFG["Torrent"]["default_downloadDirectory"] TORRENT_DEFAULTDIR = CFG["Torrent"]["default_downloadDirectory"]
CATEGORIES = (CFG["Torrent"]["categories"]) # music,music_videos,pictures,software CATEGORIES = (CFG["Torrent"]["categories"]) # music,music_videos,pictures,software
NOFLATTEN = (CFG["Torrent"]["noFlatten"]) NOFLATTEN = (CFG["Torrent"]["noFlatten"])
if isinstance(NOFLATTEN, str): NOFLATTEN = NOFLATTEN.split(',') if isinstance(NOFLATTEN, str):
if isinstance(CATEGORIES, str): CATEGORIES = CATEGORIES.split(',') NOFLATTEN = NOFLATTEN.split(',')
if isinstance(CATEGORIES, str):
CATEGORIES = CATEGORIES.split(',')
DELETE_ORIGINAL = int(CFG["Torrent"]["deleteOriginal"]) DELETE_ORIGINAL = int(CFG["Torrent"]["deleteOriginal"])
TORRENT_CHMOD_DIRECTORY = int(str(CFG["Torrent"]["chmodDirectory"]), 8) TORRENT_CHMOD_DIRECTORY = int(str(CFG["Torrent"]["chmodDirectory"]), 8)
TORRENT_RESUME_ON_FAILURE = int(CFG["Torrent"]["resumeOnFailure"]) TORRENT_RESUME_ON_FAILURE = int(CFG["Torrent"]["resumeOnFailure"])
@ -364,9 +384,12 @@ def initialize(section=None):
REMOTEPATHS = CFG["Network"]["mount_points"] or [] REMOTEPATHS = CFG["Network"]["mount_points"] or []
if REMOTEPATHS: if REMOTEPATHS:
if isinstance(REMOTEPATHS, list): REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list. if isinstance(REMOTEPATHS, list):
REMOTEPATHS = [ tuple(item.split(',')) for item in REMOTEPATHS.split('|') ] # /volume1/Public/,E:\|/volume2/share/,\\NAS\ REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list.
REMOTEPATHS = [ (local.strip(), remote.strip()) for local, remote in REMOTEPATHS ] # strip trailing and leading whitespaces REMOTEPATHS = [tuple(item.split(',')) for item in
REMOTEPATHS.split('|')] # /volume1/Public/,E:\|/volume2/share/,\\NAS\
REMOTEPATHS = [(local.strip(), remote.strip()) for local, remote in
REMOTEPATHS] # strip trailing and leading whitespaces
PLEXSSL = int(CFG["Plex"]["plex_ssl"]) PLEXSSL = int(CFG["Plex"]["plex_ssl"])
PLEXHOST = CFG["Plex"]["plex_host"] PLEXHOST = CFG["Plex"]["plex_host"]
@ -374,62 +397,79 @@ def initialize(section=None):
PLEXTOKEN = CFG["Plex"]["plex_token"] PLEXTOKEN = CFG["Plex"]["plex_token"]
PLEXSEC = CFG["Plex"]["plex_sections"] or [] PLEXSEC = CFG["Plex"]["plex_sections"] or []
if PLEXSEC: if PLEXSEC:
if isinstance(PLEXSEC, list): PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list. if isinstance(PLEXSEC, list):
PLEXSEC = [ tuple(item.split(',')) for item in PLEXSEC.split('|') ] PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list.
PLEXSEC = [tuple(item.split(',')) for item in PLEXSEC.split('|')]
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
try: try:
subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate() subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate()
NICENESS.extend(['nice', '-n%s' % (int(CFG["Posix"]["niceness"]))]) NICENESS.extend(['nice', '-n{0}'.format(int(CFG["Posix"]["niceness"]))])
except: pass except:
pass
try: try:
subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate() subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate()
try: try:
NICENESS.extend(['ionice', '-c%s' % (int(CFG["Posix"]["ionice_class"]))]) NICENESS.extend(['ionice', '-c{0}'.format(int(CFG["Posix"]["ionice_class"]))])
except: pass except:
pass
try: try:
if 'ionice' in NICENESS: if 'ionice' in NICENESS:
NICENESS.extend(['-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) NICENESS.extend(['-n{0}'.format(int(CFG["Posix"]["ionice_classdata"]))])
else: else:
NICENESS.extend(['ionice', '-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) NICENESS.extend(['ionice', '-n{0}'.format(int(CFG["Posix"]["ionice_classdata"]))])
except: pass except:
except: pass pass
except:
pass
devnull.close() devnull.close()
COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I), COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I),
re.compile('.part\d+.rar$', re.I), re.compile('.part\d+.rar$', re.I),
re.compile('.rar$', re.I)] re.compile('.rar$', re.I)]
COMPRESSEDCONTAINER += [re.compile('%s$' % ext, re.I) for ext in CFG["Extensions"]["compressedExtensions"]] COMPRESSEDCONTAINER += [re.compile('{0}$'.format(ext), re.I) for ext in CFG["Extensions"]["compressedExtensions"]]
MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"] MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"]
AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"] AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"]
METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt
if isinstance(COMPRESSEDCONTAINER, str): COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',') if isinstance(COMPRESSEDCONTAINER, str):
if isinstance(MEDIACONTAINER, str): MEDIACONTAINER = MEDIACONTAINER.split(',') COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',')
if isinstance(AUDIOCONTAINER, str): AUDIOCONTAINER = AUDIOCONTAINER.split(',') if isinstance(MEDIACONTAINER, str):
if isinstance(METACONTAINER, str): METACONTAINER = METACONTAINER.split(',') MEDIACONTAINER = MEDIACONTAINER.split(',')
if isinstance(AUDIOCONTAINER, str):
AUDIOCONTAINER = AUDIOCONTAINER.split(',')
if isinstance(METACONTAINER, str):
METACONTAINER = METACONTAINER.split(',')
GETSUBS = int(CFG["Transcoder"]["getSubs"]) GETSUBS = int(CFG["Transcoder"]["getSubs"])
TRANSCODE = int(CFG["Transcoder"]["transcode"]) TRANSCODE = int(CFG["Transcoder"]["transcode"])
DUPLICATE = int(CFG["Transcoder"]["duplicate"]) DUPLICATE = int(CFG["Transcoder"]["duplicate"])
CONCAT = int(CFG["Transcoder"]["concat"]) CONCAT = int(CFG["Transcoder"]["concat"])
IGNOREEXTENSIONS = (CFG["Transcoder"]["ignoreExtensions"]) IGNOREEXTENSIONS = (CFG["Transcoder"]["ignoreExtensions"])
if isinstance(IGNOREEXTENSIONS, str): IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',') if isinstance(IGNOREEXTENSIONS, str):
IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',')
OUTPUTFASTSTART = int(CFG["Transcoder"]["outputFastStart"]) OUTPUTFASTSTART = int(CFG["Transcoder"]["outputFastStart"])
GENERALOPTS = (CFG["Transcoder"]["generalOptions"]) GENERALOPTS = (CFG["Transcoder"]["generalOptions"])
if isinstance(GENERALOPTS, str): GENERALOPTS = GENERALOPTS.split(',') if isinstance(GENERALOPTS, str):
if GENERALOPTS == ['']: GENERALOPTS = [] GENERALOPTS = GENERALOPTS.split(',')
if not '-fflags' in GENERALOPTS: GENERALOPTS.append('-fflags') if GENERALOPTS == ['']:
if not '+genpts' in GENERALOPTS: GENERALOPTS.append('+genpts') GENERALOPTS = []
if '-fflags' not in GENERALOPTS:
GENERALOPTS.append('-fflags')
if '+genpts' not in GENERALOPTS:
GENERALOPTS.append('+genpts')
try: try:
OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"]) OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"])
except: pass except:
pass
OUTPUTVIDEOPATH = CFG["Transcoder"]["outputVideoPath"] OUTPUTVIDEOPATH = CFG["Transcoder"]["outputVideoPath"]
PROCESSOUTPUT = int(CFG["Transcoder"]["processOutput"]) PROCESSOUTPUT = int(CFG["Transcoder"]["processOutput"])
ALANGUAGE = CFG["Transcoder"]["audioLanguage"] ALANGUAGE = CFG["Transcoder"]["audioLanguage"]
AINCLUDE = int(CFG["Transcoder"]["allAudioLanguages"]) AINCLUDE = int(CFG["Transcoder"]["allAudioLanguages"])
SLANGUAGES = CFG["Transcoder"]["subLanguages"] SLANGUAGES = CFG["Transcoder"]["subLanguages"]
if isinstance(SLANGUAGES, str): SLANGUAGES = SLANGUAGES.split(',') if isinstance(SLANGUAGES, str):
if SLANGUAGES == ['']: SLANGUAGES = [] SLANGUAGES = SLANGUAGES.split(',')
if SLANGUAGES == ['']:
SLANGUAGES = []
SINCLUDE = int(CFG["Transcoder"]["allSubLanguages"]) SINCLUDE = int(CFG["Transcoder"]["allSubLanguages"])
SEXTRACT = int(CFG["Transcoder"]["extractSubs"]) SEXTRACT = int(CFG["Transcoder"]["extractSubs"])
SEMBED = int(CFG["Transcoder"]["embedSubs"]) SEMBED = int(CFG["Transcoder"]["embedSubs"])
@ -437,63 +477,81 @@ def initialize(section=None):
VEXTENSION = CFG["Transcoder"]["outputVideoExtension"].strip() VEXTENSION = CFG["Transcoder"]["outputVideoExtension"].strip()
VCODEC = CFG["Transcoder"]["outputVideoCodec"].strip() VCODEC = CFG["Transcoder"]["outputVideoCodec"].strip()
VCODEC_ALLOW = CFG["Transcoder"]["VideoCodecAllow"].strip() VCODEC_ALLOW = CFG["Transcoder"]["VideoCodecAllow"].strip()
if isinstance(VCODEC_ALLOW, str): VCODEC_ALLOW = VCODEC_ALLOW.split(',') if isinstance(VCODEC_ALLOW, str):
if VCODEC_ALLOW == ['']: VCODEC_ALLOW = [] VCODEC_ALLOW = VCODEC_ALLOW.split(',')
if VCODEC_ALLOW == ['']:
VCODEC_ALLOW = []
VPRESET = CFG["Transcoder"]["outputVideoPreset"].strip() VPRESET = CFG["Transcoder"]["outputVideoPreset"].strip()
try: try:
VFRAMERATE = float(CFG["Transcoder"]["outputVideoFramerate"].strip()) VFRAMERATE = float(CFG["Transcoder"]["outputVideoFramerate"].strip())
except: pass except:
pass
try: try:
VCRF = int(CFG["Transcoder"]["outputVideoCRF"].strip()) VCRF = int(CFG["Transcoder"]["outputVideoCRF"].strip())
except: pass except:
pass
try: try:
VLEVEL = CFG["Transcoder"]["outputVideoLevel"].strip() VLEVEL = CFG["Transcoder"]["outputVideoLevel"].strip()
except: pass except:
pass
try: try:
VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k','000')) VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k', '000'))
except: pass except:
pass
VRESOLUTION = CFG["Transcoder"]["outputVideoResolution"] VRESOLUTION = CFG["Transcoder"]["outputVideoResolution"]
ACODEC = CFG["Transcoder"]["outputAudioCodec"].strip() ACODEC = CFG["Transcoder"]["outputAudioCodec"].strip()
ACODEC_ALLOW = CFG["Transcoder"]["AudioCodecAllow"].strip() ACODEC_ALLOW = CFG["Transcoder"]["AudioCodecAllow"].strip()
if isinstance(ACODEC_ALLOW, str): ACODEC_ALLOW = ACODEC_ALLOW.split(',') if isinstance(ACODEC_ALLOW, str):
if ACODEC_ALLOW == ['']: ACODEC_ALLOW = [] ACODEC_ALLOW = ACODEC_ALLOW.split(',')
if ACODEC_ALLOW == ['']:
ACODEC_ALLOW = []
try: try:
ACHANNELS = int(CFG["Transcoder"]["outputAudioChannels"].strip()) ACHANNELS = int(CFG["Transcoder"]["outputAudioChannels"].strip())
except: pass except:
pass
try: try:
ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k','000')) ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k', '000'))
except: pass except:
pass
ACODEC2 = CFG["Transcoder"]["outputAudioTrack2Codec"].strip() ACODEC2 = CFG["Transcoder"]["outputAudioTrack2Codec"].strip()
ACODEC2_ALLOW = CFG["Transcoder"]["AudioCodec2Allow"].strip() ACODEC2_ALLOW = CFG["Transcoder"]["AudioCodec2Allow"].strip()
if isinstance(ACODEC2_ALLOW, str): ACODEC2_ALLOW = ACODEC2_ALLOW.split(',') if isinstance(ACODEC2_ALLOW, str):
if ACODEC2_ALLOW == ['']: ACODEC2_ALLOW = [] ACODEC2_ALLOW = ACODEC2_ALLOW.split(',')
if ACODEC2_ALLOW == ['']:
ACODEC2_ALLOW = []
try: try:
ACHANNELS2 = int(CFG["Transcoder"]["outputAudioTrack2Channels"].strip()) ACHANNELS2 = int(CFG["Transcoder"]["outputAudioTrack2Channels"].strip())
except: pass except:
pass
try: try:
ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k','000')) ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k', '000'))
except: pass except:
pass
ACODEC3 = CFG["Transcoder"]["outputAudioOtherCodec"].strip() ACODEC3 = CFG["Transcoder"]["outputAudioOtherCodec"].strip()
ACODEC3_ALLOW = CFG["Transcoder"]["AudioOtherCodecAllow"].strip() ACODEC3_ALLOW = CFG["Transcoder"]["AudioOtherCodecAllow"].strip()
if isinstance(ACODEC3_ALLOW, str): ACODEC3_ALLOW = ACODEC3_ALLOW.split(',') if isinstance(ACODEC3_ALLOW, str):
if ACODEC3_ALLOW == ['']: ACODEC3_ALLOW = [] ACODEC3_ALLOW = ACODEC3_ALLOW.split(',')
if ACODEC3_ALLOW == ['']:
ACODEC3_ALLOW = []
try: try:
ACHANNELS3 = int(CFG["Transcoder"]["outputAudioOtherChannels"].strip()) ACHANNELS3 = int(CFG["Transcoder"]["outputAudioOtherChannels"].strip())
except: pass except:
pass
try: try:
ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k','000')) ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k', '000'))
except: pass except:
pass
SCODEC = CFG["Transcoder"]["outputSubtitleCodec"].strip() SCODEC = CFG["Transcoder"]["outputSubtitleCodec"].strip()
BURN = int(CFG["Transcoder"]["burnInSubtitle"].strip()) BURN = int(CFG["Transcoder"]["burnInSubtitle"].strip())
DEFAULTS = CFG["Transcoder"]["outputDefault"].strip() DEFAULTS = CFG["Transcoder"]["outputDefault"].strip()
HWACCEL = int(CFG["Transcoder"]["hwAccel"]) HWACCEL = int(CFG["Transcoder"]["hwAccel"])
allow_subs = ['.mkv','.mp4', '.m4v', 'asf', 'wma', 'wmv'] allow_subs = ['.mkv', '.mp4', '.m4v', 'asf', 'wma', 'wmv']
codec_alias = { codec_alias = {
'libx264':['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], 'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'],
'libmp3lame':['libmp3lame', 'mp3'], 'libmp3lame': ['libmp3lame', 'mp3'],
'libfaac':['libfaac', 'aac', 'faac'] 'libfaac': ['libfaac', 'aac', 'faac']
} }
transcode_defaults = { transcode_defaults = {
'iPad':{ 'iPad':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
@ -609,6 +667,7 @@ def initialize(section=None):
'SCODEC':'mov_text' 'SCODEC':'mov_text'
} }
} }
}
if DEFAULTS and DEFAULTS in transcode_defaults: if DEFAULTS and DEFAULTS in transcode_defaults:
VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION']
VCODEC = transcode_defaults[DEFAULTS]['VCODEC'] VCODEC = transcode_defaults[DEFAULTS]['VCODEC']
@ -638,25 +697,29 @@ def initialize(section=None):
if VEXTENSION in allow_subs: if VEXTENSION in allow_subs:
ALLOWSUBS = 1 ALLOWSUBS = 1
if not VCODEC_ALLOW and VCODEC: VCODEC_ALLOW.extend([VCODEC]) if not VCODEC_ALLOW and VCODEC:
VCODEC_ALLOW.extend([VCODEC])
for codec in VCODEC_ALLOW: for codec in VCODEC_ALLOW:
if codec in codec_alias: if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in VCODEC_ALLOW ] extra = [item for item in codec_alias[codec] if item not in VCODEC_ALLOW]
VCODEC_ALLOW.extend(extra) VCODEC_ALLOW.extend(extra)
if not ACODEC_ALLOW and ACODEC: ACODEC_ALLOW.extend([ACODEC]) if not ACODEC_ALLOW and ACODEC:
ACODEC_ALLOW.extend([ACODEC])
for codec in ACODEC_ALLOW: for codec in ACODEC_ALLOW:
if codec in codec_alias: if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC_ALLOW ] extra = [item for item in codec_alias[codec] if item not in ACODEC_ALLOW]
ACODEC_ALLOW.extend(extra) ACODEC_ALLOW.extend(extra)
if not ACODEC2_ALLOW and ACODEC2: ACODEC2_ALLOW.extend([ACODEC2]) if not ACODEC2_ALLOW and ACODEC2:
ACODEC2_ALLOW.extend([ACODEC2])
for codec in ACODEC2_ALLOW: for codec in ACODEC2_ALLOW:
if codec in codec_alias: if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC2_ALLOW ] extra = [item for item in codec_alias[codec] if item not in ACODEC2_ALLOW]
ACODEC2_ALLOW.extend(extra) ACODEC2_ALLOW.extend(extra)
if not ACODEC3_ALLOW and ACODEC3: ACODEC3_ALLOW.extend([ACODEC3]) if not ACODEC3_ALLOW and ACODEC3:
ACODEC3_ALLOW.extend([ACODEC3])
for codec in ACODEC3_ALLOW: for codec in ACODEC3_ALLOW:
if codec in codec_alias: if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC3_ALLOW ] extra = [item for item in codec_alias[codec] if item not in ACODEC3_ALLOW]
ACODEC3_ALLOW.extend(extra) ACODEC3_ALLOW.extend(extra)
codec_alias = {} # clear memory codec_alias = {} # clear memory
@ -682,47 +745,59 @@ def initialize(section=None):
else: else:
try: try:
SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
if not SEVENZIP: pass
if not SEVENZIP:
try: try:
SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
if not SEVENZIP: pass
if not SEVENZIP:
try: try:
SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
pass
if not SEVENZIP: if not SEVENZIP:
SEVENZIP = None SEVENZIP = None
logger.warning("Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!") logger.warning(
if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'), os.X_OK): "Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!")
if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'),
os.X_OK):
FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg') FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg')
elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'), os.X_OK): elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'),
os.X_OK):
FFMPEG = os.path.join(FFMPEG_PATH, 'avconv') FFMPEG = os.path.join(FFMPEG_PATH, 'avconv')
else: else:
try: try:
FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip() FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
if not FFMPEG: pass
if not FFMPEG:
try: try:
FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip() FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
pass
if not FFMPEG: if not FFMPEG:
FFMPEG = None FFMPEG = None
logger.warning("Failed to locate ffmpeg. Transcoding disabled!") logger.warning("Failed to locate ffmpeg. Transcoding disabled!")
logger.warning("Install ffmpeg with x264 support to enable this feature ...") logger.warning("Install ffmpeg with x264 support to enable this feature ...")
if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'), os.X_OK): if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'),
os.X_OK):
FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe') FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe')
elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'), os.X_OK): elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'),
os.X_OK):
FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe') FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe')
else: else:
try: try:
FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip() FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
if not FFPROBE: pass
if not FFPROBE:
try: try:
FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip() FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass except:
pass
if not FFPROBE: if not FFPROBE:
FFPROBE = None FFPROBE = None
if CHECK_MEDIA: if CHECK_MEDIA:
@ -731,7 +806,7 @@ def initialize(section=None):
# check for script-defied section and if None set to allow sections # check for script-defied section and if None set to allow sections
SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)] SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)]
for section,subsections in SECTIONS.items(): for section, subsections in SECTIONS.items():
CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()]) CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()])
CATEGORIES = list(set(CATEGORIES)) CATEGORIES = list(set(CATEGORIES))
@ -741,6 +816,7 @@ def initialize(section=None):
# finished initalizing # finished initalizing
return True return True
def restart(): def restart():
install_type = versionCheck.CheckVersion().install_type install_type = versionCheck.CheckVersion().install_type
@ -752,7 +828,7 @@ def restart():
if popen_list: if popen_list:
popen_list += SYS_ARGV popen_list += SYS_ARGV
logger.log(u"Restarting nzbToMedia with " + str(popen_list)) logger.log(u"Restarting nzbToMedia with {args}".format(args=popen_list))
logger.close() logger.close()
p = subprocess.Popen(popen_list, cwd=os.getcwd()) p = subprocess.Popen(popen_list, cwd=os.getcwd())
p.wait() p.wait()
@ -760,11 +836,12 @@ def restart():
os._exit(status) os._exit(status)
def rchmod(path, mod): def rchmod(path, mod):
logger.log("Changing file mode of %s to %s" % (path, oct(mod))) logger.log("Changing file mode of {0} to {1}".format(path, oct(mod)))
os.chmod(path, mod) os.chmod(path, mod)
if not os.path.isdir(path): if not os.path.isdir(path):
return # Skip files return # Skip files
for root, dirs, files in os.walk(path): for root, dirs, files in os.walk(path):
for d in dirs: for d in dirs:

View file

@ -0,0 +1 @@
# coding=utf-8

View file

@ -1,81 +1,70 @@
# coding=utf-8
import os import os
import time
import core import core
import requests import requests
import time
from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding
from core.nzbToMediaSceneExceptions import process_all_exceptions
from core import logger from core import logger
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessComics:
class autoProcessComics(object):
def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None):
if int(status) != 0: if int(status) != 0:
logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.",section) logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.", section)
return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)]
host = core.CFG[section][inputCategory]["host"] cfg = dict(core.CFG[section][inputCategory])
port = core.CFG[section][inputCategory]["port"]
username = core.CFG[section][inputCategory]["username"]
password = core.CFG[section][inputCategory]["password"]
try:
ssl = int(core.CFG[section][inputCategory]["ssl"])
except:
ssl = 0
try:
web_root = core.CFG[section][inputCategory]["web_root"]
except:
web_root = ""
try:
remote_path = int(core.CFG[section][inputCategory]["remote_path"])
except:
remote_path = 0
if ssl: host = cfg["host"]
protocol = "https://" port = cfg["port"]
else: username = cfg["username"]
protocol = "http://" password = cfg["password"]
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
remote_path = int(cfg.get("remote_path"), 0)
protocol = "https://" if ssl else "http://"
url = "%s%s:%s%s/post_process" % (protocol, host, port, web_root) url = "{0}{1}:{2}{3}/post_process".format(protocol, host, port, web_root)
if not server_responding(url): if not server_responding(url):
logger.error("Server did not respond. Exiting", section) logger.error("Server did not respond. Exiting", section)
return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
clean_name, ext = os.path.splitext(inputName) clean_name, ext = os.path.splitext(inputName)
if len(ext) == 4: # we assume this was a standrard extension. if len(ext) == 4: # we assume this was a standard extension.
inputName = clean_name inputName = clean_name
params = {} params = {
params['nzb_folder'] = dirName 'nzb_folder': remoteDir(dirName) if remote_path else dirName,
}
if remote_path: if inputName is not None:
params['nzb_folder'] = remoteDir(dirName)
if inputName != None:
params['nzb_name'] = inputName params['nzb_name'] = inputName
success = False success = False
logger.debug("Opening URL: %s" % (url), section) logger.debug("Opening URL: {0}".format(url), section)
try: try:
r = requests.get(url, auth=(username, password), params=params, stream=True, verify=False, timeout=(30, 300)) r = requests.get(url, auth=(username, password), params=params, stream=True, verify=False, timeout=(30, 300))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL", section) logger.error("Unable to open URL", section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
for line in r.iter_lines(): for line in r.iter_lines():
if line: logger.postprocess("%s" % (line), section) if line:
if ("Post Processing SUCCESSFUL!" or "Post Processing SUCCESSFULL!")in line: success = True logger.postprocess("{0}".format(line), section)
if "Post Processing SUCCESSFUL" in line:
success = True
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
if success: if success:
logger.postprocess("SUCCESS: This issue has been processed successfully",section) logger.postprocess("SUCCESS: This issue has been processed successfully", section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
else: else:
logger.warning("The issue does not appear to have successfully processed. Please check your Logs",section) logger.warning("The issue does not appear to have successfully processed. Please check your Logs", section)
return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)]

View file

@ -1,86 +1,77 @@
# coding=utf-8
import os
import core import core
import requests import requests
import shutil import shutil
from core.nzbToMediaUtil import convert_to_ascii, server_responding from core.nzbToMediaUtil import convert_to_ascii, server_responding
from core.nzbToMediaSceneExceptions import process_all_exceptions
from core import logger from core import logger
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessGames:
class autoProcessGames(object):
def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None):
status = int(status) status = int(status)
host = core.CFG[section][inputCategory]["host"] cfg = dict(core.CFG[section][inputCategory])
port = core.CFG[section][inputCategory]["port"]
apikey = core.CFG[section][inputCategory]["apikey"]
try:
library = core.CFG[section][inputCategory]["library"]
except:
library = None
try:
ssl = int(core.CFG[section][inputCategory]["ssl"])
except:
ssl = 0
try:
web_root = core.CFG[section][inputCategory]["web_root"]
except:
web_root = ""
if ssl: host = cfg["host"]
protocol = "https://" port = cfg["port"]
else: apikey = cfg["apikey"]
protocol = "http://" library = cfg.get("library")
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
protocol = "https://" if ssl else "http://"
url = "%s%s:%s%s/api" % (protocol, host, port, web_root) url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root)
if not server_responding(url): if not server_responding(url):
logger.error("Server did not respond. Exiting", section) logger.error("Server did not respond. Exiting", section)
return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
fields = inputName.split("-") fields = inputName.split("-")
gamezID = fields[0].replace("[","").replace("]","").replace(" ","") gamezID = fields[0].replace("[", "").replace("]", "").replace(" ", "")
downloadStatus = 'Wanted' downloadStatus = 'Downloaded' if status == 0 else 'Wanted'
if status == 0:
downloadStatus = 'Downloaded'
params = {} params = {
params['api_key'] = apikey 'api_key': apikey,
params['mode'] = 'UPDATEREQUESTEDSTATUS' 'mode': 'UPDATEREQUESTEDSTATUS',
params['db_id'] = gamezID 'db_id': gamezID,
params['status'] = downloadStatus 'status': downloadStatus
}
logger.debug("Opening URL: %s" % (url),section) logger.debug("Opening URL: {0}".format(url), section)
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 300)) r = requests.get(url, params=params, verify=False, timeout=(30, 300))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL") logger.error("Unable to open URL")
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json() result = r.json()
logger.postprocess("%s" % (result),section) logger.postprocess("{0}".format(result), section)
if library: if library:
logger.postprocess("moving files to library: %s" % (library),section) logger.postprocess("moving files to library: {0}".format(library), section)
try: try:
shutil.move(dirName, os.path.join(library, inputName)) shutil.move(dirName, os.path.join(library, inputName))
except: except:
logger.error("Unable to move %s to %s" % (dirName, os.path.join(library, inputName)), section) logger.error("Unable to move {0} to {1}".format(dirName, os.path.join(library, inputName)), section)
return [1, "%s: Failed to post-process - Unable to move files" % (section) ] return [1, "{0}: Failed to post-process - Unable to move files".format(section)]
else: else:
logger.error("No library specified to move files to. Please edit your configuration.", section) logger.error("No library specified to move files to. Please edit your configuration.", section)
return [1, "%s: Failed to post-process - No library defined in %s" % (section, section) ] return [1, "{0}: Failed to post-process - No library defined in {1}".format(section, section)]
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']: elif result['success']:
logger.postprocess("SUCCESS: Status for %s has been set to %s in Gamez" % (gamezID, downloadStatus),section) logger.postprocess("SUCCESS: Status for {0} has been set to {1} in Gamez".format(gamezID, downloadStatus), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
else: else:
logger.error("FAILED: Status for %s has NOT been updated in Gamez" % (gamezID),section) logger.error("FAILED: Status for {0} has NOT been updated in Gamez".format(gamezID), section)
return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)]

View file

@ -1,3 +1,5 @@
# coding=utf-8
import os import os
import time import time
import requests import requests
@ -10,12 +12,13 @@ from core.transcoder import transcoder
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessMovie:
class autoProcessMovie(object):
def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None): def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None):
results = {} results = {}
params = {} params = {}
# determin cmd and params to send to CouchPotato to get our results # determine cmd and params to send to CouchPotato to get our results
section = 'movies' section = 'movies'
cmd = "/media.list" cmd = "/media.list"
if release_id or imdbid: if release_id or imdbid:
@ -24,27 +27,28 @@ class autoProcessMovie:
params['id'] = release_id or imdbid params['id'] = release_id or imdbid
url = baseURL + cmd url = baseURL + cmd
logger.debug("Opening URL: %s with PARAMS: %s" % (url, params)) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params))
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 60)) r = requests.get(url, params=params, verify=False, timeout=(30, 60))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL %s" % url) logger.error("Unable to open URL {0}".format(url))
return results return results
try: try:
result = r.json() result = r.json()
except: except ValueError:
# ValueError catches simplejson's JSONDecodeError and json's ValueError
logger.error("CouchPotato returned the following non-json data") logger.error("CouchPotato returned the following non-json data")
for line in r.iter_lines(): for line in r.iter_lines():
logger.error("%s" %(line)) logger.error("{0}".format(line))
return results return results
if not result['success']: if not result['success']:
if 'error' in result: if 'error' in result:
logger.error(str(result['error'])) logger.error('{0}'.format(result['error']))
else: else:
logger.error("no media found for id %s" % (params['id'])) logger.error("no media found for id {0}".format(params['id']))
return results return results
# Gather release info and return it back, no need to narrow results # Gather release info and return it back, no need to narrow results
@ -53,7 +57,8 @@ class autoProcessMovie:
id = result[section]['_id'] id = result[section]['_id']
results[id] = result[section] results[id] = result[section]
return results return results
except:pass except:
pass
# Gather release info and proceed with trying to narrow results to one release choice # Gather release info and proceed with trying to narrow results to one release choice
@ -100,39 +105,24 @@ class autoProcessMovie:
def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None): def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None):
host = core.CFG[section][inputCategory]["host"] cfg = dict(core.CFG[section][inputCategory])
port = core.CFG[section][inputCategory]["port"]
apikey = core.CFG[section][inputCategory]["apikey"]
method = core.CFG[section][inputCategory]["method"]
delete_failed = int(core.CFG[section][inputCategory]["delete_failed"])
wait_for = int(core.CFG[section][inputCategory]["wait_for"])
try: host = cfg["host"]
ssl = int(core.CFG[section][inputCategory]["ssl"]) port = cfg["port"]
except: apikey = cfg["apikey"]
ssl = 0 method = cfg["method"]
try: delete_failed = int(cfg["delete_failed"])
web_root = core.CFG[section][inputCategory]["web_root"] wait_for = int(cfg["wait_for"])
except: ssl = int(cfg.get("ssl", 0))
web_root = "" web_root = cfg.get("web_root", "")
try: remote_path = int(cfg.get("remote_path", 0))
remote_path = int(core.CFG[section][inputCategory]["remote_path"]) extract = int(cfg.get("extract", 0))
except: protocol = "https://" if ssl else "http://"
remote_path = 0
try:
extract = int(section[inputCategory]["extract"])
except:
extract = 0
if ssl: baseURL = "{0}{1}:{2}{3}/api/{4}".format(protocol, host, port, web_root, apikey)
protocol = "https://"
else:
protocol = "http://"
baseURL = "%s%s:%s%s/api/%s" % (protocol, host, port, web_root, apikey)
if not server_responding(baseURL): if not server_responding(baseURL):
logger.error("Server did not respond. Exiting", section) logger.error("Server did not respond. Exiting", section)
return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
imdbid = find_imdbid(dirName, inputName) imdbid = find_imdbid(dirName, inputName)
release = self.get_release(baseURL, imdbid, download_id) release = self.get_release(baseURL, imdbid, download_id)
@ -152,7 +142,7 @@ class autoProcessMovie:
except: except:
pass pass
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0] dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName)) SpecificPath = os.path.join(dirName, str(inputName))
@ -166,7 +156,7 @@ class autoProcessMovie:
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: %s' % (dirName)) logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName) core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
@ -179,41 +169,47 @@ class autoProcessMovie:
if transcoder.isVideoGood(video, status): if transcoder.isVideoGood(video, status):
import_subs(video) import_subs(video)
good_files += 1 good_files += 1
if num_files > 0 and good_files == num_files: if num_files and good_files == num_files:
if status: if status:
logger.info("Status shown as failed from Downloader, but %s valid video files found. Setting as success." % (str(good_files)), section) logger.info("Status shown as failed from Downloader, but {0} valid video files found. Setting as success.".format(good_files), section)
status = 0 status = 0
elif num_files > 0 and good_files < num_files: elif num_files and good_files < num_files:
logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section)
if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failureLink: if failureLink:
failureLink = failureLink + '&corrupt=true' failureLink += '&corrupt=true'
status = 1 status = 1
elif clientAgent == "manual": elif clientAgent == "manual":
logger.warning("No media files found in directory %s to manually process." % (dirName), section) logger.warning("No media files found in directory {0} to manually process.".format(dirName), section)
return [0, ""] # Success (as far as this script is concerned) return [0, ""] # Success (as far as this script is concerned)
else: else:
logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1 status = 1
if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if status == 0: if status == 0:
if core.TRANSCODE == 1: if core.TRANSCODE == 1:
result, newDirName = transcoder.Transcode_directory(dirName) result, newDirName = transcoder.Transcode_directory(dirName)
if result == 0: if result == 0:
logger.debug("Transcoding succeeded for files in %s" % (dirName), section) logger.debug("Transcoding succeeded for files in {0}".format(dirName), section)
dirName = newDirName dirName = newDirName
chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8)
logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section)
if chmod_directory:
logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section)
core.rchmod(dirName, chmod_directory)
else: else:
logger.error("Transcoding failed for files in %s" % (dirName), section) logger.error("Transcoding failed for files in {0}".format(dirName), section)
return [1, "%s: Failed to post-process - Transcoding failed" % (section) ] return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if not release and not ".cp(tt" in video and imdbid: if not release and ".cp(tt" not in video and imdbid:
videoName, videoExt = os.path.splitext(video) videoName, videoExt = os.path.splitext(video)
video2 = "%s.cp(%s)%s" % (videoName, imdbid, videoExt) video2 = "{0}.cp({1}){2}".format(videoName, imdbid, videoExt)
if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'):
logger.debug('Renaming: %s to: %s' % (video, video2)) logger.debug('Renaming: {0} to: {1}'.format(video, video2))
os.rename(video, video2) os.rename(video, video2)
params = {} params = {}
@ -221,9 +217,7 @@ class autoProcessMovie:
params['downloader'] = downloader or clientAgent params['downloader'] = downloader or clientAgent
params['download_id'] = download_id params['download_id'] = download_id
params['media_folder'] = dirName params['media_folder'] = remoteDir(dirName) if remote_path else dirName
if remote_path:
params['media_folder'] = remoteDir(dirName)
if method == "manage": if method == "manage":
command = "/manage.update" command = "/manage.update"
@ -231,129 +225,129 @@ class autoProcessMovie:
else: else:
command = "/renamer.scan" command = "/renamer.scan"
url = "%s%s" % (baseURL, command) url = "{0}{1}".format(baseURL, command)
logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
logger.postprocess("Starting %s scan for %s" % (method, inputName), section) logger.postprocess("Starting {0} scan for {1}".format(method, inputName), section)
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) r = requests.get(url, params=params, verify=False, timeout=(30, 1800))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL", section) logger.error("Unable to open URL", section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json() result = r.json()
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']: elif result['success']:
logger.postprocess("SUCCESS: Finished %s scan for folder %s" % (method, dirName), section) logger.postprocess("SUCCESS: Finished {0} scan for folder {1}".format(method, dirName), section)
if method == "manage": if method == "manage":
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
else: else:
logger.error("FAILED: %s scan was unable to finish for folder %s. exiting!" % (method, dirName), logger.error("FAILED: {0} scan was unable to finish for folder {1}. exiting!".format(method, dirName),
section) section)
return [1, "%s: Failed to post-process - Server did not return success" % (section) ] return [1, "{0}: Failed to post-process - Server did not return success".format(section)]
else: else:
core.FAILED = True core.FAILED = True
logger.postprocess("FAILED DOWNLOAD DETECTED FOR %s" % (inputName), section) logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(inputName), section)
if failureLink: if failureLink:
reportNzb(failureLink, clientAgent) reportNzb(failureLink, clientAgent)
if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder %s" % dirName, section) logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName) rmDir(dirName)
if not release_id and not media_id: if not release_id and not media_id:
logger.error("Could not find a downloaded movie in the database matching %s, exiting!" % inputName, logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(inputName),
section) section)
return [1, "%s: Failed to post-process - Failed download not found in %s" % (section, section) ] return [1, "{0}: Failed to post-process - Failed download not found in {1}".format(section, section)]
if release_id: if release_id:
logger.postprocess("Setting failed release %s to ignored ..." % (inputName), section) logger.postprocess("Setting failed release {0} to ignored ...".format(inputName), section)
url = baseURL + "/release.ignore" url = "{url}/release.ignore".format(url=baseURL)
params = {'id': release_id} params = {'id': release_id}
logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120)) r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL %s" % (url), section) logger.error("Unable to open URL {0}".format(url), section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json() result = r.json()
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']: elif result['success']:
logger.postprocess("SUCCESS: %s has been set to ignored ..." % (inputName), section) logger.postprocess("SUCCESS: {0} has been set to ignored ...".format(inputName), section)
else: else:
logger.warning("FAILED: Unable to set %s to ignored!" % (inputName), section) logger.warning("FAILED: Unable to set {0} to ignored!".format(inputName), section)
return [1, "%s: Failed to post-process - Unable to set %s to ignored" % (section, inputName) ] return [1, "{0}: Failed to post-process - Unable to set {1} to ignored".format(section, inputName)]
logger.postprocess("Trying to snatch the next highest ranked release.", section) logger.postprocess("Trying to snatch the next highest ranked release.", section)
url = "%s/movie.searcher.try_next" % (baseURL) url = "{0}/movie.searcher.try_next".format(baseURL)
logger.debug("Opening URL: %s" % (url), section) logger.debug("Opening URL: {0}".format(url), section)
try: try:
r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600)) r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL %s" % (url), section) logger.error("Unable to open URL {0}".format(url), section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json() result = r.json()
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']: elif result['success']:
logger.postprocess("SUCCESS: Snatched the next highest release ...", section) logger.postprocess("SUCCESS: Snatched the next highest release ...", section)
return [0, "%s: Successfully snatched next highest release" % (section) ] return [0, "{0}: Successfully snatched next highest release".format(section)]
else: else:
logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section) logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section)
return [0, "%s: No new release found now. %s will keep searching" % (section, section) ] return [0, "{0}: No new release found now. {1} will keep searching".format(section, section)]
# Added a releease that was not in the wanted list so confirm rename successful by finding this movie media.list. # Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list.
if not release: if not release:
download_id = None # we don't want to filter new releases based on this. download_id = None # we don't want to filter new releases based on this.
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * wait_for timeout = time.time() + 60 * wait_for
while (time.time() < timeout): # only wait 2 (default) minutes, then return. while time.time() < timeout: # only wait 2 (default) minutes, then return.
logger.postprocess("Checking for status change, please stand by ...", section) logger.postprocess("Checking for status change, please stand by ...", section)
release = self.get_release(baseURL, imdbid, download_id, release_id) release = self.get_release(baseURL, imdbid, download_id, release_id)
if release: if release:
try: try:
if release_id is None and release_status_old is None: # we didn't have a release before, but now we do. if release_id is None and release_status_old is None: # we didn't have a release before, but now we do.
logger.postprocess("SUCCESS: Movie %s has now been added to CouchPotato" % (imdbid), section) logger.postprocess("SUCCESS: Movie {0} has now been added to CouchPotato".format(imdbid), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
release_status_new = release[release_id]['status'] release_status_new = release[release_id]['status']
if release_status_new != release_status_old: if release_status_new != release_status_old:
logger.postprocess("SUCCESS: Release %s has now been marked with a status of [%s]" % ( logger.postprocess("SUCCESS: Release {0} has now been marked with a status of [{1}]".format(
inputName, str(release_status_new).upper()), section) inputName, str(release_status_new).upper()), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
except: except:
pass pass
if not os.path.isdir(dirName): if not os.path.isdir(dirName):
logger.postprocess("SUCCESS: Input Directory [%s] has been processed and removed" % ( logger.postprocess("SUCCESS: Input Directory [{0}] has been processed and removed".format(
dirName), section) dirName), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True): elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True):
logger.postprocess("SUCCESS: Input Directory [%s] has no remaining media files. This has been fully processed." % ( logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format(
dirName), section) dirName), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
# pause and let CouchPotatoServer catch its breath # pause and let CouchPotatoServer catch its breath
time.sleep(10 * wait_for) time.sleep(10 * wait_for)
# The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now. # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resume seeding now.
logger.warning( logger.warning(
"%s does not appear to have changed status after %s minutes, Please check your logs." % (inputName, wait_for), "{0} does not appear to have changed status after {1} minutes, Please check your logs.".format(inputName, wait_for),
section) section)
return [1, "%s: Failed to post-process - No change in status" % (section) ] return [1, "{0}: Failed to post-process - No change in status".format(section)]

View file

@ -1,3 +1,5 @@
# coding=utf-8
import os import os
import time import time
import requests import requests
@ -9,67 +11,55 @@ from core import logger
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessMusic:
class autoProcessMusic(object):
def get_status(self, url, apikey, dirName): def get_status(self, url, apikey, dirName):
logger.debug("Attempting to get current status for release:%s" % (os.path.basename(dirName))) logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dirName)))
params = {} params = {
params['apikey'] = apikey 'apikey': apikey,
params['cmd'] = "getHistory" 'cmd': "getHistory"
}
logger.debug("Opening URL: %s with PARAMS: %s" % (url, params)) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params))
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120)) r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except Exception, e: except requests.RequestException:
logger.error("Unable to open URL") logger.error("Unable to open URL")
return None return None
try: try:
result = r.json() result = r.json()
for album in result: except ValueError:
if os.path.basename(dirName) == album['FolderName']: # ValueError catches simplejson's JSONDecodeError and json's ValueError
return album["Status"].lower()
except:
return None return None
for album in result:
if os.path.basename(dirName) == album['FolderName']:
return album["Status"].lower()
def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None):
status = int(status) status = int(status)
host = core.CFG[section][inputCategory]["host"] cfg = dict(core.CFG[section][inputCategory])
port = core.CFG[section][inputCategory]["port"]
apikey = core.CFG[section][inputCategory]["apikey"]
wait_for = int(core.CFG[section][inputCategory]["wait_for"])
try: host = cfg["host"]
ssl = int(core.CFG[section][inputCategory]["ssl"]) port = cfg["port"]
except: apikey = cfg["apikey"]
ssl = 0 wait_for = int(cfg["wait_for"])
try: ssl = int(cfg.get("ssl", 0))
web_root = core.CFG[section][inputCategory]["web_root"] web_root = cfg.get("web_root", "")
except: remote_path = int(cfg.get("remote_path", 0))
web_root = "" extract = int(cfg.get("extract", 0))
try: protocol = "https://" if ssl else "http://"
remote_path = int(core.CFG[section][inputCategory]["remote_path"])
except:
remote_path = 0
try:
extract = int(section[inputCategory]["extract"])
except:
extract = 0
if ssl: url = "{0}{1}:{2}{3}/api".format(protocol, host, port, web_root)
protocol = "https://"
else:
protocol = "http://"
url = "%s%s:%s%s/api" % (protocol,host,port,web_root)
if not server_responding(url): if not server_responding(url):
logger.error("Server did not respond. Exiting", section) logger.error("Server did not respond. Exiting", section)
return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0] dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName)) SpecificPath = os.path.join(dirName, str(inputName))
@ -83,63 +73,61 @@ class autoProcessMusic:
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
if not listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: if not listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: %s' % (dirName)) logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName) core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
if listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and status: if listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and status:
logger.info("Status shown as failed from Downloader, but %s valid video files found. Setting as successful." % (str(good_files)), section) logger.info("Status shown as failed from Downloader, but valid video files found. Setting as successful.", section)
status = 0 status = 0
if status == 0: if status == 0:
params = {} params = {
params['apikey'] = apikey 'apikey': apikey,
params['cmd'] = "forceProcess" 'cmd': "forceProcess",
'dir': remoteDir(os.path.dirname(dirName)) if remote_path else os.path.dirname(dirName)
params['dir'] = os.path.dirname(dirName) }
if remote_path:
params['dir'] = remoteDir(os.path.dirname(dirName))
release_status = self.get_status(url, apikey, dirName) release_status = self.get_status(url, apikey, dirName)
if not release_status: if not release_status:
logger.error("Could not find a status for %s, is it in the wanted list ?" % (inputName),section) logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section)
logger.debug("Opening URL: %s with PARAMS: %s" % (url, params), section) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
try: try:
r = requests.get(url, params=params, verify=False, timeout=(30, 300)) r = requests.get(url, params=params, verify=False, timeout=(30, 300))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL %s" % (url) ,section) logger.error("Unable to open URL {0}".format(url), section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
logger.debug("Result: %s" % (r.text),section) logger.debug("Result: {0}".format(r.text), section)
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif r.text == "OK": elif r.text == "OK":
logger.postprocess("SUCCESS: Post-Processing started for %s in folder %s ..." % (inputName, dirName),section) logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(inputName, dirName), section)
else: else:
logger.error("FAILED: Post-Processing has NOT started for %s in folder %s. exiting!" % (inputName, dirName),section) logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(inputName, dirName), section)
return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)]
else: else:
logger.warning("FAILED DOWNLOAD DETECTED", section) logger.warning("FAILED DOWNLOAD DETECTED", section)
return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)]
# we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * wait_for timeout = time.time() + 60 * wait_for
while (time.time() < timeout): while time.time() < timeout:
current_status = self.get_status(url, apikey, dirName) current_status = self.get_status(url, apikey, dirName)
if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie.
logger.postprocess("SUCCESS: This release is now marked as status [%s]" % (current_status),section) logger.postprocess("SUCCESS: This release is now marked as status [{0}]".format(current_status), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
if not os.path.isdir(dirName): if not os.path.isdir(dirName):
logger.postprocess("SUCCESS: The input directory %s has been removed Processing must have finished." % (dirName),section) logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dirName), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
time.sleep(10 * wait_for) time.sleep(10 * wait_for)
# The status hasn't changed. uTorrent can resume seeding now. # The status hasn't changed. uTorrent can resume seeding now.
logger.warning("The music album does not appear to have changed status after %s minutes. Please check your Logs" % (wait_for),section) logger.warning("The music album does not appear to have changed status after {0} minutes. Please check your Logs".format(wait_for), section)
return [1, "%s: Failed to post-process - No change in wanted status" % (section) ] return [1, "{0}: Failed to post-process - No change in wanted status".format(section)]

View file

@ -1,3 +1,5 @@
# coding=utf-8
import copy import copy
import os import os
import time import time
@ -14,108 +16,72 @@ from core.transcoder import transcoder
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessTV:
class autoProcessTV(object):
def command_complete(self, url, params, headers, section): def command_complete(self, url, params, headers, section):
r = None
try: try:
r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL: %s" % (url1), section) logger.error("Unable to open URL: {0}".format(url), section)
return None return None
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return None return None
else: else:
try: try:
res = json.loads(r.content) return r.json()['state']
return res['state'] except (ValueError, KeyError):
except: # ValueError catches simplejson's JSONDecodeError and json's ValueError
logger.error("%s did not return expected json data." % section, section) logger.error("{0} did not return expected json data.".format(section), section)
return None return None
def CDH(self, url2, headers): def CDH(self, url2, headers, section="MAIN"):
r = None
try: try:
r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL: %s" % (url2), section) logger.error("Unable to open URL: {0}".format(url2), section)
return False return False
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return False return False
else: else:
try: try:
res = json.loads(r.content) return r.json().get("enableCompletedDownloadHandling", False)
return res["enableCompletedDownloadHandling"] except ValueError:
except: # ValueError catches simplejson's JSONDecodeError and json's ValueError
return False return False
def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent = "manual", download_id=None, inputCategory=None, failureLink=None): def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None):
host = core.CFG[section][inputCategory]["host"]
port = core.CFG[section][inputCategory]["port"] cfg = dict(core.CFG[section][inputCategory])
try:
ssl = int(core.CFG[section][inputCategory]["ssl"]) host = cfg["host"]
except: port = cfg["port"]
ssl = 0 ssl = int(cfg.get("ssl", 0))
if ssl: web_root = cfg.get("web_root", "")
protocol = "https://" protocol = "https://" if ssl else "http://"
else:
protocol = "http://" if not server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)):
try:
web_root = core.CFG[section][inputCategory]["web_root"]
except:
web_root = ""
if not server_responding("%s%s:%s%s" % (protocol,host,port,web_root)):
logger.error("Server did not respond. Exiting", section) logger.error("Server did not respond. Exiting", section)
return [1, "%s: Failed to post-process - %s did not respond." % (section, section) ] return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
# auto-detect correct fork # auto-detect correct fork
fork, fork_params = autoFork(section, inputCategory) fork, fork_params = autoFork(section, inputCategory)
try: username = cfg.get("username", "")
username = core.CFG[section][inputCategory]["username"] password = cfg.get("password", "")
password = core.CFG[section][inputCategory]["password"] apikey = cfg.get("apikey", "")
except: delete_failed = int(cfg.get("delete_failed", 0))
username = "" nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader")
password = "" process_method = cfg.get("process_method")
try: remote_path = int(cfg.get("remote_path", 0))
apikey = core.CFG[section][inputCategory]["apikey"] wait_for = int(cfg.get("wait_for", 2))
except: force = int(cfg.get("force", 0))
apikey = "" delete_on = int(cfg.get("delete_on", 0))
try: ignore_subs = int(cfg.get("ignore_subs", 0))
delete_failed = int(core.CFG[section][inputCategory]["delete_failed"]) extract = int(cfg.get("extract", 0))
except:
delete_failed = 0
try:
nzbExtractionBy = core.CFG[section][inputCategory]["nzbExtractionBy"]
except:
nzbExtractionBy = "Downloader"
try:
process_method = core.CFG[section][inputCategory]["process_method"]
except:
process_method = None
try:
remote_path = int(core.CFG[section][inputCategory]["remote_path"])
except:
remote_path = 0
try:
wait_for = int(core.CFG[section][inputCategory]["wait_for"])
except:
wait_for = 2
try:
force = int(core.CFG[section][inputCategory]["force"])
except:
force = 0
try:
delete_on = int(core.CFG[section][inputCategory]["delete_on"])
except:
delete_on = 0
try:
extract = int(section[inputCategory]["extract"])
except:
extract = 0
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0] dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName)) SpecificPath = os.path.join(dirName, str(inputName))
@ -130,12 +96,12 @@ class autoProcessTV:
# won't process the directory because it doesn't exist. # won't process the directory because it doesn't exist.
try: try:
os.makedirs(dirName) # Attempt to create the directory os.makedirs(dirName) # Attempt to create the directory
except OSError, e: except OSError as e:
# Re-raise the error if it wasn't about the directory not existing # Re-raise the error if it wasn't about the directory not existing
if e.errno != errno.EEXIST: if e.errno != errno.EEXIST:
raise raise
if not 'process_method' in fork_params or (clientAgent in ['nzbget','sabnzbd'] and nzbExtractionBy != "Destination"): if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"):
if inputName: if inputName:
process_all_exceptions(inputName, dirName) process_all_exceptions(inputName, dirName)
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
@ -143,13 +109,13 @@ class autoProcessTV:
# Now check if tv files exist in destination. # Now check if tv files exist in destination.
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: %s' % (dirName)) logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName) core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName) inputName, dirName = convert_to_ascii(inputName, dirName)
if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
flatten(dirName) flatten(dirName)
# Check video files for corruption # Check video files for corruption
status = int(failed) status = int(failed)
good_files = 0 good_files = 0
@ -159,7 +125,7 @@ class autoProcessTV:
if transcoder.isVideoGood(video, status): if transcoder.isVideoGood(video, status):
good_files += 1 good_files += 1
import_subs(video) import_subs(video)
if num_files > 0: if num_files > 0:
if good_files == num_files and not status == 0: if good_files == num_files and not status == 0:
logger.info('Found Valid Videos. Setting status Success') logger.info('Found Valid Videos. Setting status Success')
status = 0 status = 0
@ -168,13 +134,13 @@ class autoProcessTV:
logger.info('Found corrupt videos. Setting status Failed') logger.info('Found corrupt videos. Setting status Failed')
status = 1 status = 1
failed = 1 failed = 1
if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failureLink: if failureLink:
failureLink = failureLink + '&corrupt=true' failureLink += '&corrupt=true'
elif clientAgent == "manual": elif clientAgent == "manual":
logger.warning("No media files found in directory %s to manually process." % (dirName), section) logger.warning("No media files found in directory {0} to manually process.".format(dirName), section)
return [0, ""] # Success (as far as this script is concerned) return [0, ""] # Success (as far as this script is concerned)
elif nzbExtractionBy == "Destination": elif nzbExtractionBy == "Destination":
logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.") logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.")
if int(failed) == 0: if int(failed) == 0:
@ -182,27 +148,34 @@ class autoProcessTV:
status = 0 status = 0
failed = 0 failed = 0
else: else:
logger.info("Downloader reported an error during download or verification. Processing this as a failed download.") logger.info("Downloader reported an error during download or verification. Processing this as a failed download.")
status = 1 status = 1
failed = 1 failed = 1
else: else:
logger.warning("No media files found in directory %s. Processing this as a failed download" % (dirName), section) logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1 status = 1
failed = 1 failed = 1
if os.environ.has_key('NZBOP_VERSION') and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads
result, newDirName = transcoder.Transcode_directory(dirName) result, newDirName = transcoder.Transcode_directory(dirName)
if result == 0: if result == 0:
logger.debug("SUCCESS: Transcoding succeeded for files in %s" % (dirName), section) logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section)
dirName = newDirName dirName = newDirName
chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8)
logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section)
if chmod_directory:
logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section)
core.rchmod(dirName, chmod_directory)
else: else:
logger.error("FAILED: Transcoding failed for files in %s" % (dirName), section) logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section)
return [1, "%s: Failed to post-process - Transcoding failed" % (section) ] return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
# configure SB params to pass # configure SB params to pass
fork_params['quiet'] = 1 fork_params['quiet'] = 1
fork_params['proc_type'] = 'manual'
if inputName is not None: if inputName is not None:
fork_params['nzbName'] = inputName fork_params['nzbName'] = inputName
@ -233,8 +206,14 @@ class autoProcessTV:
else: else:
del fork_params[param] del fork_params[param]
if param == "ignore_subs":
if ignore_subs:
fork_params[param] = ignore_subs
else:
del fork_params[param]
# delete any unused params so we don't pass them to SB by mistake # delete any unused params so we don't pass them to SB by mistake
[fork_params.pop(k) for k,v in fork_params.items() if v is None] [fork_params.pop(k) for k, v in fork_params.items() if v is None]
if status == 0: if status == 0:
logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section) logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section)
@ -243,62 +222,60 @@ class autoProcessTV:
if failureLink: if failureLink:
reportNzb(failureLink, clientAgent) reportNzb(failureLink, clientAgent)
if 'failed' in fork_params: if 'failed' in fork_params:
logger.postprocess("FAILED: The download failed. Sending 'failed' process request to %s branch" % (fork), section) logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section)
elif section == "NzbDrone": elif section == "NzbDrone":
logger.postprocess("FAILED: The download failed. Sending failed download to %s for CDH processing" % (fork), section) logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(fork), section)
return [1, "%s: Downlaod Failed. Sending back to %s" % (section, section) ] # Return as failed to flag this in the downloader. return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader.
else: else:
logger.postprocess("FAILED: The download failed. %s branch does not handle failed downloads. Nothing to process" % (fork), section) logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section)
if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder %s" % (dirName), section) logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName) rmDir(dirName)
return [1, "%s: Failed to post-process. %s does not support failed downloads" % (section, section) ] # Return as failed to flag this in the downloader. return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader.
url = None url = None
if section == "SickBeard": if section == "SickBeard":
url = "%s%s:%s%s/home/postprocess/processEpisode" % (protocol,host,port,web_root) url = "{0}{1}:{2}{3}/home/postprocess/processEpisode".format(protocol, host, port, web_root)
elif section == "NzbDrone": elif section == "NzbDrone":
url = "%s%s:%s%s/api/command" % (protocol, host, port, web_root) url = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root)
url2 = "%s%s:%s%s/api/config/downloadClient" % (protocol, host, port, web_root) url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root)
headers = {"X-Api-Key": apikey} headers = {"X-Api-Key": apikey}
params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
if remote_path: if remote_path:
logger.debug("remote_path: %s" % (remoteDir(dirName)),section) logger.debug("remote_path: {0}".format(remoteDir(dirName)), section)
data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id} data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id}
else: else:
logger.debug("path: %s" % (dirName),section) logger.debug("path: {0}".format(dirName), section)
data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id} data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id}
if not download_id: if not download_id:
data.pop("downloadClientId") data.pop("downloadClientId")
data = json.dumps(data) data = json.dumps(data)
try: try:
if section == "SickBeard": if section == "SickBeard":
logger.debug("Opening URL: %s with params: %s" % (url, str(fork_params)), section) logger.debug("Opening URL: {0} with params: {1}".format(url, fork_params), section)
r = None
s = requests.Session() s = requests.Session()
login = "%s%s:%s%s/login" % (protocol,host,port,web_root) login = "{0}{1}:{2}{3}/login".format(protocol, host, port, web_root)
login_params = {'username': username, 'password': password} login_params = {'username': username, 'password': password}
s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800))
elif section == "NzbDrone": elif section == "NzbDrone":
logger.debug("Opening URL: %s with data: %s" % (url, str(data)), section) logger.debug("Opening URL: {0} with data: {1}".format(url, data), section)
r = None
r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800)) r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL: %s" % (url), section) logger.error("Unable to open URL: {0}".format(url), section)
return [1, "%s: Failed to post-process - Unable to connect to %s" % (section, section) ] return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
if not r.status_code in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status %s" % (str(r.status_code)), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "%s: Failed to post-process - Server returned status %s" % (section, str(r.status_code)) ] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
Success = False Success = False
Started = False Started = False
if section == "SickBeard": if section == "SickBeard":
for line in r.iter_lines(): for line in r.iter_lines():
if line: if line:
logger.postprocess("%s" % (line), section) logger.postprocess("{0}".format(line), section)
if "Moving file from" in line: if "Moving file from" in line:
inputName = os.path.split(line)[1] inputName = os.path.split(line)[1]
if "Processing succeeded" in line or "Successfully processed" in line: if "Processing succeeded" in line or "Successfully processed" in line:
@ -307,45 +284,45 @@ class autoProcessTV:
try: try:
res = json.loads(r.content) res = json.loads(r.content)
scan_id = int(res['id']) scan_id = int(res['id'])
logger.debug("Scan started with id: %s" % (str(scan_id)), section) logger.debug("Scan started with id: {0}".format(scan_id), section)
Started = True Started = True
except Exception as e: except Exception as e:
logger.warning("No scan id was returned due to: %s" % (e), section) logger.warning("No scan id was returned due to: {0}".format(e), section)
scan_id = None scan_id = None
Started = False Started = False
if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName: if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder %s" % (dirName),section) logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName) rmDir(dirName)
if Success: if Success:
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif section == "NzbDrone" and Started: elif section == "NzbDrone" and Started:
n = 0 n = 0
params = {} params = {}
url = url + "/" + str(scan_id) url = "{0}/{1}".format(url, scan_id)
while n < 6: # set up wait_for minutes to see if command completes.. while n < 6: # set up wait_for minutes to see if command completes..
time.sleep(10 * wait_for) time.sleep(10 * wait_for)
command_status = self.command_complete(url, params, headers, section) command_status = self.command_complete(url, params, headers, section)
if command_status and command_status in ['completed', 'failed']: if command_status and command_status in ['completed', 'failed']:
break break
n += 1 n += 1
if command_status: if command_status:
logger.debug("The Scan command return status: %s" % (command_status), section) logger.debug("The Scan command return status: {0}".format(command_status), section)
if not os.path.exists(dirName): if not os.path.exists(dirName):
logger.debug("The directory %s has been removed. Renaming was successful." % (dirName), section) logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['completed']: elif command_status and command_status in ['completed']:
logger.debug("The Scan command has completed successfully. Renaming was successful.", section) logger.debug("The Scan command has completed successfully. Renaming was successful.", section)
return [0, "%s: Successfully post-processed %s" % (section, inputName) ] return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status and command_status in ['failed']: elif command_status and command_status in ['failed']:
logger.debug("The Scan command has failed. Renaming was not successful.", section) logger.debug("The Scan command has failed. Renaming was not successful.", section)
#return [1, "%s: Failed to post-process %s" % (section, inputName) ] # return [1, "%s: Failed to post-process %s" % (section, inputName) ]
if self.CDH(url2, headers): if self.CDH(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to %s." % (section), section) logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "%s: Complete DownLoad Handling is enabled. Passing back to %s" % (section, section) ] return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
else: else:
logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section)
return [1, "%s: Failed to post-process %s" % (section, inputName) ] return [1, "{0}: Failed to post-process {1}".format(section, inputName)]
else: else:
return [1, "%s: Failed to post-process - Returned log from %s was not as expected." % (section, section) ] # We did not receive Success confirmation. return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # We did not receive Success confirmation.

View file

@ -1 +1,2 @@
__all__ = ["mainDB"] # coding=utf-8
__all__ = ["mainDB"]

View file

@ -1,10 +1,12 @@
import core # coding=utf-8
from core import logger, nzbToMediaDB from core import logger, nzbToMediaDB
from core.nzbToMediaUtil import backupVersionedFile from core.nzbToMediaUtil import backupVersionedFile
MIN_DB_VERSION = 1 # oldest db version we support migrating from MIN_DB_VERSION = 1 # oldest db version we support migrating from
MAX_DB_VERSION = 2 MAX_DB_VERSION = 2
def backupDatabase(version): def backupDatabase(version):
logger.info("Backing up database before upgrade") logger.info("Backing up database before upgrade")
if not backupVersionedFile(nzbToMediaDB.dbFilename(), version): if not backupVersionedFile(nzbToMediaDB.dbFilename(), version):
@ -12,6 +14,7 @@ def backupDatabase(version):
else: else:
logger.info("Proceeding with upgrade") logger.info("Proceeding with upgrade")
# ====================== # ======================
# = Main DB Migrations = # = Main DB Migrations =
# ====================== # ======================
@ -39,25 +42,24 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade):
cur_db_version = self.checkDBVersion() cur_db_version = self.checkDBVersion()
if cur_db_version < MIN_DB_VERSION: if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit("Your database version (" + str( logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate "
cur_db_version) + ") is too old to migrate from what this version of nzbToMedia supports (" + \ u"from what this version of nzbToMedia supports ({min})."
str(MIN_DB_VERSION) + ").\n" + \ u"\nPlease remove nzbtomedia.db file to begin fresh.".format
"Please remove nzbtomedia.db file to begin fresh." (current=cur_db_version, min=MIN_DB_VERSION))
)
if cur_db_version > MAX_DB_VERSION: if cur_db_version > MAX_DB_VERSION:
logger.log_error_and_exit("Your database version (" + str( logger.log_error_and_exit(u"Your database version ({current}) has been incremented "
cur_db_version) + ") has been incremented past what this version of nzbToMedia supports (" + \ u"past what this version of nzbToMedia supports ({max})."
str(MAX_DB_VERSION) + ").\n" + \ u"\nIf you have used other forks of nzbToMedia, your database "
"If you have used other forks of nzbToMedia, your database may be unusable due to their modifications." u"may be unusable due to their modifications.".format
) (current=cur_db_version, max=MAX_DB_VERSION))
if cur_db_version < MAX_DB_VERSION: # We need to upgrade. if cur_db_version < MAX_DB_VERSION: # We need to upgrade.
queries = [ queries = [
"CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", "CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));",
"INSERT INTO downloads2 SELECT * FROM downloads;", "INSERT INTO downloads2 SELECT * FROM downloads;",
"DROP TABLE IF EXISTS downloads;", "DROP TABLE IF EXISTS downloads;",
"ALTER TABLE downloads2 RENAME TO downloads;", "ALTER TABLE downloads2 RENAME TO downloads;",
"INSERT INTO db_version (db_version) VALUES (2);" "INSERT INTO db_version (db_version) VALUES (2);"
] ]
for query in queries: for query in queries:
self.connection.action(query) self.connection.action(query)

View file

@ -0,0 +1 @@
# coding=utf-8

View file

@ -1,3 +1,5 @@
# coding=utf-8
import os import os
import platform import platform
import shutil import shutil
@ -7,6 +9,7 @@ import core
from subprocess import call, Popen from subprocess import call, Popen
import subprocess import subprocess
def extract(filePath, outputDestination): def extract(filePath, outputDestination):
success = 0 success = 0
# Using Windows # Using Windows
@ -21,9 +24,9 @@ def extract(filePath, outputDestination):
# Using unix # Using unix
else: else:
required_cmds = ["unrar", "unzip", "tar", "unxz", "unlzma", "7zr", "bunzip2"] required_cmds = ["unrar", "unzip", "tar", "unxz", "unlzma", "7zr", "bunzip2"]
## Possible future suport: # ## Possible future suport:
# gunzip: gz (cmd will delete original archive) # gunzip: gz (cmd will delete original archive)
## the following do not extract to dest dir # ## the following do not extract to dest dir
# ".xz": ["xz", "-d --keep"], # ".xz": ["xz", "-d --keep"],
# ".lzma": ["xz", "-d --format=lzma --keep"], # ".lzma": ["xz", "-d --format=lzma --keep"],
# ".bz2": ["bzip2", "-d --keep"], # ".bz2": ["bzip2", "-d --keep"],
@ -42,15 +45,20 @@ def extract(filePath, outputDestination):
if not os.getenv('TR_TORRENT_DIR'): if not os.getenv('TR_TORRENT_DIR'):
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
for cmd in required_cmds: for cmd in required_cmds:
if call(['which', cmd], stdout=devnull, stderr=devnull): #note, returns 0 if exists, or 1 if doesn't exist. if call(['which', cmd], stdout=devnull,
if cmd == "7zr" and not call(["which", "7z"]): # we do have "7z" command stderr=devnull): # note, returns 0 if exists, or 1 if doesn't exist.
EXTRACT_COMMANDS[".7z"] = ["7z", "x"] for k, v in EXTRACT_COMMANDS.items():
elif cmd == "7zr" and not call(["which", "7za"]): # we do have "7za" command if cmd in v[0]:
EXTRACT_COMMANDS[".7z"] = ["7za", "x"] if not call(["which", "7zr"], stdout=devnull, stderr=devnull): # we do have "7zr"
else: EXTRACT_COMMANDS[k] = ["7zr", "x", "-y"]
for k, v in EXTRACT_COMMANDS.items(): elif not call(["which", "7z"], stdout=devnull, stderr=devnull): # we do have "7z"
if cmd in v[0]: EXTRACT_COMMANDS[k] = ["7z", "x", "-y"]
core.logger.error("EXTRACTOR: %s not found, disabling support for %s" % (cmd, k)) elif not call(["which", "7za"], stdout=devnull, stderr=devnull): # we do have "7za"
EXTRACT_COMMANDS[k] = ["7za", "x", "-y"]
else:
core.logger.error("EXTRACTOR: {cmd} not found, "
"disabling support for {feature}".format
(cmd=cmd, feature=k))
del EXTRACT_COMMANDS[k] del EXTRACT_COMMANDS[k]
devnull.close() devnull.close()
else: else:
@ -64,7 +72,7 @@ def extract(filePath, outputDestination):
if ext[1] in (".gz", ".bz2", ".lzma"): if ext[1] in (".gz", ".bz2", ".lzma"):
# Check if this is a tar # Check if this is a tar
if os.path.splitext(ext[0])[1] == ".tar": if os.path.splitext(ext[0])[1] == ".tar":
cmd = EXTRACT_COMMANDS[".tar" + ext[1]] cmd = EXTRACT_COMMANDS[".tar{ext}".format(ext=ext[1])]
elif ext[1] in (".1", ".01", ".001") and os.path.splitext(ext[0])[1] in (".rar", ".zip", ".7z"): elif ext[1] in (".1", ".01", ".001") and os.path.splitext(ext[0])[1] in (".rar", ".zip", ".7z"):
cmd = EXTRACT_COMMANDS[os.path.splitext(ext[0])[1]] cmd = EXTRACT_COMMANDS[os.path.splitext(ext[0])[1]]
elif ext[1] in (".cb7", ".cba", ".cbr", ".cbt", ".cbz"): # don't extract these comic book archives. elif ext[1] in (".cb7", ".cba", ".cbr", ".cbt", ".cbz"): # don't extract these comic book archives.
@ -73,10 +81,11 @@ def extract(filePath, outputDestination):
if ext[1] in EXTRACT_COMMANDS: if ext[1] in EXTRACT_COMMANDS:
cmd = EXTRACT_COMMANDS[ext[1]] cmd = EXTRACT_COMMANDS[ext[1]]
else: else:
core.logger.debug("EXTRACTOR: Unknown file type: %s" % ext[1]) core.logger.debug("EXTRACTOR: Unknown file type: {ext}".format
(ext=ext[1]))
return False return False
# Create outputDestination folder # Create outputDestination folder
core.makeDir(outputDestination) core.makeDir(outputDestination)
if core.PASSWORDSFILE != "" and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): if core.PASSWORDSFILE != "" and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)):
@ -84,8 +93,10 @@ def extract(filePath, outputDestination):
else: else:
passwords = [] passwords = []
core.logger.info("Extracting %s to %s" % (filePath, outputDestination)) core.logger.info("Extracting {file} to {destination}".format
core.logger.debug("Extracting %s %s %s" % (cmd, filePath, outputDestination)) (file=filePath, destination=outputDestination))
core.logger.debug("Extracting {cmd} {file} {destination}".format
(cmd=cmd, file=filePath, destination=outputDestination))
origFiles = [] origFiles = []
origDirs = [] origDirs = []
@ -98,7 +109,7 @@ def extract(filePath, outputDestination):
pwd = os.getcwd() # Get our Present Working Directory pwd = os.getcwd() # Get our Present Working Directory
os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
try: # now works same for nt and *nix try: # now works same for nt and *nix
info = None info = None
cmd.append(filePath) # add filePath to final cmd arg. cmd.append(filePath) # add filePath to final cmd arg.
@ -112,7 +123,8 @@ def extract(filePath, outputDestination):
p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine.
res = p.wait() res = p.wait()
if (res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful. if (res >= 0 and os.name == 'nt') or res == 0: # for windows chp returns process id if successful or -1*Error code. Linux returns 0 for successful.
core.logger.info("EXTRACTOR: Extraction was successful for %s to %s" % (filePath, outputDestination)) core.logger.info("EXTRACTOR: Extraction was successful for {file} to {destination}".format
(file=filePath, destination=outputDestination))
success = 1 success = 1
elif len(passwords) > 0: elif len(passwords) > 0:
core.logger.info("EXTRACTOR: Attempting to extract with passwords") core.logger.info("EXTRACTOR: Attempting to extract with passwords")
@ -120,20 +132,23 @@ def extract(filePath, outputDestination):
if password == "": # if edited in windows or otherwise if blank lines. if password == "": # if edited in windows or otherwise if blank lines.
continue continue
cmd2 = cmd cmd2 = cmd
#append password here. # append password here.
passcmd = "-p" + password passcmd = "-p{pwd}".format(pwd=password)
cmd2.append(passcmd) cmd2.append(passcmd)
p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine.
res = p.wait() res = p.wait()
if (res >= 0 and platform == 'Windows') or res == 0: if (res >= 0 and platform == 'Windows') or res == 0:
core.logger.info("EXTRACTOR: Extraction was successful for %s to %s using password: %s" % ( core.logger.info("EXTRACTOR: Extraction was successful "
filePath, outputDestination, password)) "for {file} to {destination} using password: {pwd}".format
(file=filePath, destination=outputDestination, pwd=password))
success = 1 success = 1
break break
else: else:
continue continue
except: except:
core.logger.error("EXTRACTOR: Extraction failed for %s. Could not call command %s" % (filePath, cmd)) core.logger.error("EXTRACTOR: Extraction failed for {file}. "
"Could not call command {cmd}".format
(file=filePath, cmd=cmd))
os.chdir(pwd) os.chdir(pwd)
return False return False
@ -141,20 +156,24 @@ def extract(filePath, outputDestination):
os.chdir(pwd) # Go back to our Original Working Directory os.chdir(pwd) # Go back to our Original Working Directory
if success: if success:
# sleep to let files finish writing to disk # sleep to let files finish writing to disk
sleep (3) sleep(3)
perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode) perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode)
for dir, subdirs, files in os.walk(outputDestination): for dir, subdirs, files in os.walk(outputDestination):
for subdir in subdirs: for subdir in subdirs:
if not os.path.join(dir, subdir) in origFiles: if not os.path.join(dir, subdir) in origFiles:
try: try:
os.chmod(os.path.join(dir, subdir), perms) os.chmod(os.path.join(dir, subdir), perms)
except: pass except:
pass
for file in files: for file in files:
if not os.path.join(dir, file) in origFiles: if not os.path.join(dir, file) in origFiles:
try: try:
shutil.copymode(filePath, os.path.join(dir, file)) shutil.copymode(filePath, os.path.join(dir, file))
except: pass except:
pass
return True return True
else: else:
core.logger.error("EXTRACTOR: Extraction failed for %s. Result was %s" % (filePath, res)) core.logger.error("EXTRACTOR: Extraction failed for {file}. "
"Result was {result}".format
(file=filePath, result=res))
return False return False

View file

@ -1,5 +1,8 @@
import json # coding=utf-8
import requests import requests
from six import iteritems
class GitHub(object): class GitHub(object):
""" """
@ -17,10 +20,11 @@ class GitHub(object):
Access the API at the path given and with the optional params given. Access the API at the path given and with the optional params given.
""" """
url = 'https://api.github.com/' + '/'.join(path) url = 'https://api.github.com/{path}'.format(path='/'.join(path))
if params and type(params) is dict: if params and type(params) is dict:
url += '?' + '&'.join([str(x) + '=' + str(params[x]) for x in params.keys()]) url += '?{params}'.format(params='&'.join(['{key}={value}'.format(key=k, value=v)
for k, v in iteritems(params)]))
data = requests.get(url, verify=False) data = requests.get(url, verify=False)
@ -57,6 +61,6 @@ class GitHub(object):
Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/
""" """
access_API = self._access_API( access_API = self._access_API(
['repos', self.github_repo_user, self.github_repo, 'compare', base + '...' + head], ['repos', self.github_repo_user, self.github_repo, 'compare', '{base}...{head}'.format(base=base, head=head)],
params={'per_page': per_page}) params={'per_page': per_page})
return access_API return access_API

View file

@ -0,0 +1 @@
# coding=utf-8

View file

@ -1,3 +1,4 @@
# coding=utf-8
# Linktastic Module # Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems # - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
# #
@ -29,61 +30,65 @@ if os.name == 'nt':
info = subprocess.STARTUPINFO() info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Prevent spaces from messing with us! # Prevent spaces from messing with us!
def _escape_param(param): def _escape_param(param):
return '"%s"' % param return '"{0}"'.format(param)
# Private function to create link on nt-based systems # Private function to create link on nt-based systems
def _link_windows(src, dest): def _link_windows(src, dest):
try: try:
subprocess.check_output( subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)), 'cmd /C mklink /H {0} {1}'.format(_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info) stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err: except CalledProcessError as err:
raise IOError(err.output.decode('utf-8')) raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink # TODO, find out what kind of messages Windows sends us from mklink
# print(stdout) # print(stdout)
# assume if they ret-coded 0 we're good # assume if they ret-coded 0 we're good
def _symlink_windows(src, dest): def _symlink_windows(src, dest):
try: try:
subprocess.check_output( subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)), 'cmd /C mklink {0} {1}'.format(_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info) stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err: except CalledProcessError as err:
raise IOError(err.output.decode('utf-8')) raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink # TODO, find out what kind of messages Windows sends us from mklink
# print(stdout) # print(stdout)
# assume if they ret-coded 0 we're good # assume if they ret-coded 0 we're good
def _dirlink_windows(src, dest): def _dirlink_windows(src, dest):
try: try:
subprocess.check_output( subprocess.check_output(
'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)), 'cmd /C mklink /J {0} {1}'.format(_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info) stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err: except CalledProcessError as err:
raise IOError(err.output.decode('utf-8')) raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink # TODO, find out what kind of messages Windows sends us from mklink
# print(stdout) # print(stdout)
# assume if they ret-coded 0 we're good # assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest): def _junctionlink_windows(src, dest):
try: try:
subprocess.check_output( subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)), 'cmd /C mklink /D {0} {1}'.format(_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info) stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err: except CalledProcessError as err:
raise IOError(err.output.decode('utf-8')) raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink # TODO, find out what kind of messages Windows sends us from mklink
# print(stdout) # print(stdout)
# assume if they ret-coded 0 we're good # assume if they ret-coded 0 we're good
# Create a hard link to src named as dest # Create a hard link to src named as dest
# This version of link, unlike os.link, supports nt systems as well # This version of link, unlike os.link, supports nt systems as well
@ -101,6 +106,7 @@ def symlink(src, dest):
else: else:
os.symlink(src, dest) os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt # Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest): def dirlink(src, dest):
if os.name == 'nt': if os.name == 'nt':
@ -108,9 +114,10 @@ def dirlink(src, dest):
else: else:
os.symlink(src, dest) os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt # Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest): def junctionlink(src, dest):
if os.name == 'nt': if os.name == 'nt':
_junctionlink_windows(src, dest) _junctionlink_windows(src, dest)
else: else:
os.symlink(src, dest) os.symlink(src, dest)

View file

@ -1,3 +1,4 @@
# coding=utf-8
from __future__ import with_statement from __future__ import with_statement
import os import os
@ -26,6 +27,7 @@ reverseNames = {u'ERROR': ERROR,
u'POSTPROCESS': POSTPROCESS, u'POSTPROCESS': POSTPROCESS,
u'DB': DB} u'DB': DB}
class NTMRotatingLogHandler(object): class NTMRotatingLogHandler(object):
def __init__(self, log_file, num_files, num_bytes): def __init__(self, log_file, num_files, num_bytes):
self.num_files = num_files self.num_files = num_files
@ -67,7 +69,7 @@ class NTMRotatingLogHandler(object):
if self.cur_handler: if self.cur_handler:
old_handler = self.cur_handler old_handler = self.cur_handler
else: else:
#Add a new logging levels # Add a new logging levels
logging.addLevelName(21, 'POSTPROCESS') logging.addLevelName(21, 'POSTPROCESS')
logging.addLevelName(5, 'DB') logging.addLevelName(5, 'DB')
@ -84,7 +86,7 @@ class NTMRotatingLogHandler(object):
{'nzbtomedia': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), {'nzbtomedia': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'),
'postprocess': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), 'postprocess': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'),
'db': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S') 'db': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S')
}, },
logging.Formatter('%(message)s'), )) logging.Formatter('%(message)s'), ))
# add the handler to the root logger # add the handler to the root logger
@ -121,7 +123,7 @@ class NTMRotatingLogHandler(object):
{'nzbtomedia': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), {'nzbtomedia': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'),
'postprocess': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), 'postprocess': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'),
'db': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S') 'db': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S')
}, },
logging.Formatter('%(message)s'), )) logging.Formatter('%(message)s'), ))
return file_handler return file_handler
@ -134,7 +136,7 @@ class NTMRotatingLogHandler(object):
i: Log number to ues i: Log number to ues
""" """
return self.log_file_path + ('.' + str(i) if i else '') return self.log_file_path + ('.{0}'.format(i) if i else '')
def _num_logs(self): def _num_logs(self):
""" """
@ -191,9 +193,9 @@ class NTMRotatingLogHandler(object):
self.writes_since_check += 1 self.writes_since_check += 1
try: try:
message = u"%s: %s" % (str(section).upper(), toLog) message = u"{0}: {1}".format(section.upper(), toLog)
except: except UnicodeError:
message = u"%s: Message contains non-utf-8 string" % (str(section).upper()) message = u"{0}: Message contains non-utf-8 string".format(section.upper())
out_line = message out_line = message
@ -226,14 +228,15 @@ class NTMRotatingLogHandler(object):
def log_error_and_exit(self, error_msg): def log_error_and_exit(self, error_msg):
log(error_msg, ERROR) log(error_msg, ERROR)
if os.environ.has_key('NZBOP_SCRIPTDIR'): if 'NZBOP_SCRIPTDIR' in os.environ:
sys.exit(core.NZBGET_POSTPROCESS_ERROR) sys.exit(core.NZBGET_POSTPROCESS_ERROR)
elif not self.console_logging: elif not self.console_logging:
sys.exit(error_msg.encode(core.SYS_ENCODING, 'xmlcharrefreplace')) sys.exit(error_msg.encode(core.SYS_ENCODING, 'xmlcharrefreplace'))
else: else:
sys.exit(1) sys.exit(1)
class DispatchingFormatter:
class DispatchingFormatter(object):
def __init__(self, formatters, default_formatter): def __init__(self, formatters, default_formatter):
self._formatters = formatters self._formatters = formatters
self._default_formatter = default_formatter self._default_formatter = default_formatter
@ -242,31 +245,41 @@ class DispatchingFormatter:
formatter = self._formatters.get(record.name, self._default_formatter) formatter = self._formatters.get(record.name, self._default_formatter)
return formatter.format(record) return formatter.format(record)
ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE) ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE, section='MAIN'): def log(toLog, logLevel=MESSAGE, section='MAIN'):
ntm_log_instance.log(toLog, logLevel, section) ntm_log_instance.log(toLog, logLevel, section)
def info(toLog, section='MAIN'): def info(toLog, section='MAIN'):
log(toLog, MESSAGE, section) log(toLog, MESSAGE, section)
def error(toLog, section='MAIN'): def error(toLog, section='MAIN'):
log(toLog, ERROR, section) log(toLog, ERROR, section)
def warning(toLog, section='MAIN'): def warning(toLog, section='MAIN'):
log(toLog, WARNING, section) log(toLog, WARNING, section)
def debug(toLog, section='MAIN'): def debug(toLog, section='MAIN'):
log(toLog, DEBUG, section) log(toLog, DEBUG, section)
def postprocess(toLog, section='POSTPROCESS'): def postprocess(toLog, section='POSTPROCESS'):
log(toLog, POSTPROCESS, section) log(toLog, POSTPROCESS, section)
def db(toLog, section='DB'): def db(toLog, section='DB'):
log(toLog, DB, section) log(toLog, DB, section)
def log_error_and_exit(error_msg): def log_error_and_exit(error_msg):
ntm_log_instance.log_error_and_exit(error_msg) ntm_log_instance.log_error_and_exit(error_msg)
def close(): def close():
ntm_log_instance.close_log() ntm_log_instance.close_log()

View file

@ -1,103 +1,96 @@
import urllib # coding=utf-8
import core
import requests import requests
from six import iteritems
import core
from core import logger from core import logger
def autoFork(section, inputCategory): def autoFork(section, inputCategory):
# auto-detect correct section # auto-detect correct section
# config settings # config settings
try:
host = core.CFG[section][inputCategory]["host"]
port = core.CFG[section][inputCategory]["port"]
except:
host = None
port = None
try: cfg = dict(core.CFG[section][inputCategory])
username = core.CFG[section][inputCategory]["username"]
password = core.CFG[section][inputCategory]["password"]
except:
username = None
password = None
host = cfg.get("host")
port = cfg.get("port")
username = cfg.get("username")
password = cfg.get("password")
apikey = cfg.get("apikey")
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
try: try:
apikey = core.CFG[section][inputCategory]["apikey"] fork = core.FORKS.items()[core.FORKS.keys().index(cfg.get("fork", "auto"))]
except:
apikey = None
try:
ssl = int(core.CFG[section][inputCategory]["ssl"])
except:
ssl = 0
try:
web_root = core.CFG[section][inputCategory]["web_root"]
except:
web_root = ""
try:
fork = core.FORKS.items()[core.FORKS.keys().index(core.CFG[section][inputCategory]["fork"])]
except: except:
fork = "auto" fork = "auto"
protocol = "https://" if ssl else "http://"
if ssl:
protocol = "https://"
else:
protocol = "http://"
detected = False detected = False
if section == "NzbDrone": if section == "NzbDrone":
logger.info("Attempting to verify %s fork" % inputCategory) logger.info("Attempting to verify {category} fork".format
url = "%s%s:%s%s/api/rootfolder" % (protocol,host,port,web_root) (category=inputCategory))
headers={"X-Api-Key": apikey} url = "{protocol}{host}:{port}{root}/api/rootfolder".format(
protocol=protocol, host=host, port=port, root=web_root)
headers = {"X-Api-Key": apikey}
try: try:
r = requests.get(url, headers=headers, stream=True, verify=False) r = requests.get(url, headers=headers, stream=True, verify=False)
except requests.ConnectionError: except requests.ConnectionError:
logger.warning("Could not connect to %s:%s to verify fork!" % (section, inputCategory)) logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, inputCategory))
if not r.ok: if not r.ok:
logger.warning("Connection to %s:%s failed! Check your configuration" % (section, inputCategory)) logger.warning("Connection to {section}:{category} failed! "
"Check your configuration".format
(section=section, category=inputCategory))
fork = ['default', {}] fork = ['default', {}]
elif fork == "auto": elif fork == "auto":
params = core.ALL_FORKS params = core.ALL_FORKS
rem_params = [] rem_params = []
logger.info("Attempting to auto-detect %s fork" % inputCategory) logger.info("Attempting to auto-detect {category} fork".format(category=inputCategory))
# define the order to test. Default must be first since the default fork doesn't reject parameters. # define the order to test. Default must be first since the default fork doesn't reject parameters.
# then in order of most unique parameters. # then in order of most unique parameters.
url = "%s%s:%s%s/home/postprocess/" % (protocol,host,port,web_root) url = "{protocol}{host}:{port}{root}/home/postprocess/".format(
protocol=protocol, host=host, port=port, root=web_root)
# attempting to auto-detect fork # attempting to auto-detect fork
try: try:
if username and password: if username and password:
s = requests.Session() s = requests.Session()
login = "%s%s:%s%s/login" % (protocol,host,port,web_root) login = "{protocol}{host}:{port}{root}/login".format(
protocol=protocol, host=host, port=port, root=web_root)
login_params = {'username': username, 'password': password} login_params = {'username': username, 'password': password}
s.post(login, data=login_params, stream=True, verify=False) s.post(login, data=login_params, stream=True, verify=False)
r = s.get(url, auth=(username, password), verify=False) r = s.get(url, auth=(username, password), verify=False)
else: else:
r = requests.get(url, verify=False) r = requests.get(url, verify=False)
except requests.ConnectionError: except requests.ConnectionError:
logger.info("Could not connect to %s:%s to perform auto-fork detection!" % (section, inputCategory)) logger.info("Could not connect to {section}:{category} to perform auto-fork detection!".format
(section=section, category=inputCategory))
r = [] r = []
if r and r.ok: if r and r.ok:
for param in params: for param in params:
if not 'name="%s"' %(param) in r.text: if not 'name="{param}"'.format(param=param) in r.text:
rem_params.append(param) rem_params.append(param)
for param in rem_params: for param in rem_params:
params.pop(param) params.pop(param)
for fork in sorted(core.FORKS.iteritems(), reverse=False): for fork in sorted(iteritems(core.FORKS), reverse=False):
if params == fork[1]: if params == fork[1]:
detected = True detected = True
break break
if detected: if detected:
logger.info("%s:%s fork auto-detection successful ..." % (section, inputCategory)) logger.info("{section}:{category} fork auto-detection successful ...".format
(section=section, category=inputCategory))
elif rem_params: elif rem_params:
logger.info("%s:%s fork auto-detection found custom params %s" % (section, inputCategory, params)) logger.info("{section}:{category} fork auto-detection found custom params {params}".format
(section=section, category=inputCategory, params=params))
fork = ['custom', params] fork = ['custom', params]
else: else:
logger.info("%s:%s fork auto-detection failed" % (section, inputCategory)) logger.info("{section}:{category} fork auto-detection failed".format
(section=section, category=inputCategory))
fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)] fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)]
logger.info("%s:%s fork set to %s" % (section, inputCategory, fork[0])) logger.info("{section}:{category} fork set to {fork}".format
return fork[0], fork[1] (section=section, category=inputCategory, fork=fork[0]))
return fork[0], fork[1]

View file

@ -1,3 +1,6 @@
# coding=utf-8
from six import iteritems
import os import os
import shutil import shutil
import copy import copy
@ -7,13 +10,15 @@ from core import logger
from itertools import chain from itertools import chain
class Section(configobj.Section):
class Section(configobj.Section, object):
def isenabled(section): def isenabled(section):
# checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {} # checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {}
if not section.sections: if not section.sections:
try: try:
value = list(ConfigObj.find_key(section, 'enabled'))[0] value = list(ConfigObj.find_key(section, 'enabled'))[0]
except:value = 0 except:
value = 0
if int(value) == 1: if int(value) == 1:
return section return section
else: else:
@ -22,7 +27,8 @@ class Section(configobj.Section):
for subsection in subsections: for subsection in subsections:
try: try:
value = list(ConfigObj.find_key(subsections, 'enabled'))[0] value = list(ConfigObj.find_key(subsections, 'enabled'))[0]
except:value = 0 except:
value = 0
if int(value) != 1: if int(value) != 1:
del to_return[section_name][subsection] del to_return[section_name][subsection]
@ -38,7 +44,8 @@ class Section(configobj.Section):
for subsection in to_return: for subsection in to_return:
try: try:
value = list(ConfigObj.find_key(to_return[subsection], key))[0] value = list(ConfigObj.find_key(to_return[subsection], key))[0]
except:value = None except:
value = None
if not value: if not value:
del to_return[subsection] del to_return[subsection]
@ -79,6 +86,7 @@ class Section(configobj.Section):
return to_return return to_return
class ConfigObj(configobj.ConfigObj, Section): class ConfigObj(configobj.ConfigObj, Section):
def __init__(self, *args, **kw): def __init__(self, *args, **kw):
if len(args) == 0: if len(args) == 0:
@ -110,16 +118,16 @@ class ConfigObj(configobj.ConfigObj, Section):
if not os.path.isfile(core.CONFIG_FILE): if not os.path.isfile(core.CONFIG_FILE):
shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE) shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE)
CFG_OLD = config(core.CONFIG_FILE) CFG_OLD = config(core.CONFIG_FILE)
except Exception, e: except Exception as error:
logger.debug("Error %s when copying to .cfg" % (e)) logger.debug("Error {msg} when copying to .cfg".format(msg=error))
try: try:
# check for autoProcessMedia.cfg.spec and create if it does not exist # check for autoProcessMedia.cfg.spec and create if it does not exist
if not os.path.isfile(core.CONFIG_SPEC_FILE): if not os.path.isfile(core.CONFIG_SPEC_FILE):
shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE) shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE)
CFG_NEW = config(core.CONFIG_SPEC_FILE) CFG_NEW = config(core.CONFIG_SPEC_FILE)
except Exception, e: except Exception as error:
logger.debug("Error %s when copying to .spec" % (e)) logger.debug("Error {msg} when copying to .spec".format(msg=error))
# check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail # check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail
if CFG_NEW is None or CFG_OLD is None: if CFG_NEW is None or CFG_OLD is None:
@ -144,7 +152,7 @@ class ConfigObj(configobj.ConfigObj, Section):
continue continue
def cleanup_values(values, section): def cleanup_values(values, section):
for option, value in values.iteritems(): for option, value in iteritems(values):
if section in ['CouchPotato']: if section in ['CouchPotato']:
if option == ['outputDirectory']: if option == ['outputDirectory']:
CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0]
@ -180,7 +188,7 @@ class ConfigObj(configobj.ConfigObj, Section):
CFG_NEW['Posix'][option] = value CFG_NEW['Posix'][option] = value
values.pop(option) values.pop(option)
if option == "remote_path": if option == "remote_path":
if value and not value in ['0', '1', 0, 1]: if value and value not in ['0', '1', 0, 1]:
value = 1 value = 1
elif not value: elif not value:
value = 0 value = 0
@ -189,7 +197,8 @@ class ConfigObj(configobj.ConfigObj, Section):
if not list(ConfigObj.find_key(CFG_NEW, option)): if not list(ConfigObj.find_key(CFG_NEW, option)):
try: try:
values.pop(option) values.pop(option)
except: pass except:
pass
return values return values
@ -220,7 +229,7 @@ class ConfigObj(configobj.ConfigObj, Section):
subsection = None subsection = None
if section in list(chain.from_iterable(subsections.values())): if section in list(chain.from_iterable(subsections.values())):
subsection = section subsection = section
section = ''.join([k for k,v in subsections.iteritems() if subsection in v]) section = ''.join([k for k, v in iteritems(subsections) if subsection in v])
process_section(section, subsection) process_section(section, subsection)
elif section in subsections.keys(): elif section in subsections.keys():
subsection = subsections[section] subsection = subsections[section]
@ -229,7 +238,7 @@ class ConfigObj(configobj.ConfigObj, Section):
process_section(section, subsection) process_section(section, subsection)
# create a backup of our old config # create a backup of our old config
CFG_OLD.filename = core.CONFIG_FILE + ".old" CFG_OLD.filename ="{config}.old".format(config=core.CONFIG_FILE)
CFG_OLD.write() CFG_OLD.write()
# write our new config to autoProcessMedia.cfg # write our new config to autoProcessMedia.cfg
@ -244,13 +253,15 @@ class ConfigObj(configobj.ConfigObj, Section):
CFG_NEW = config() CFG_NEW = config()
try: try:
if os.environ.has_key('NZBPO_NDCATEGORY') and os.environ.has_key('NZBPO_SBCATEGORY'): if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ:
if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']:
logger.warning("%s category is set for SickBeard and NzbDrone. Please check your config in NZBGet" % (os.environ['NZBPO_NDCATEGORY'])) logger.warning("{x} category is set for SickBeard and NzbDrone. "
"Please check your config in NZBGet".format
(x=os.environ['NZBPO_NDCATEGORY']))
section = "Nzb" section = "Nzb"
key = 'NZBOP_DESTDIR' key = 'NZBOP_DESTDIR'
if os.environ.has_key(key): if key in os.environ:
option = 'default_downloadDirectory' option = 'default_downloadDirectory'
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
@ -259,8 +270,8 @@ class ConfigObj(configobj.ConfigObj, Section):
envKeys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE'] envKeys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE']
cfgKeys = ['auto_update', 'check_media', 'safe_mode'] cfgKeys = ['auto_update', 'check_media', 'safe_mode']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
@ -269,20 +280,22 @@ class ConfigObj(configobj.ConfigObj, Section):
envKeys = ['MOUNTPOINTS'] envKeys = ['MOUNTPOINTS']
cfgKeys = ['mount_points'] cfgKeys = ['mount_points']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
section = "CouchPotato" section = "CouchPotato"
envCatKey = 'NZBPO_CPSCATEGORY' envCatKey = 'NZBPO_CPSCATEGORY'
envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR'] envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH',
cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir'] 'WAIT_FOR', 'WATCH_DIR']
if os.environ.has_key(envCatKey): cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path',
'wait_for', 'watch_dir']
if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_CPS' + envKeys[index] key = 'NZBPO_CPS{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -292,12 +305,14 @@ class ConfigObj(configobj.ConfigObj, Section):
section = "SickBeard" section = "SickBeard"
envCatKey = 'NZBPO_SBCATEGORY' envCatKey = 'NZBPO_SBCATEGORY'
envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK',
cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD']
if os.environ.has_key(envCatKey): cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork',
'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method']
if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_SB' + envKeys[index] key = 'NZBPO_SB{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -311,10 +326,10 @@ class ConfigObj(configobj.ConfigObj, Section):
envCatKey = 'NZBPO_HPCATEGORY' envCatKey = 'NZBPO_HPCATEGORY'
envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH'] envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH']
cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path'] cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path']
if os.environ.has_key(envCatKey): if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_HP' + envKeys[index] key = 'NZBPO_HP{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -324,12 +339,14 @@ class ConfigObj(configobj.ConfigObj, Section):
section = "Mylar" section = "Mylar"
envCatKey = 'NZBPO_MYCATEGORY' envCatKey = 'NZBPO_MYCATEGORY'
envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR',
cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] 'REMOTE_PATH']
if os.environ.has_key(envCatKey): cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir',
'remote_path']
if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_MY' + envKeys[index] key = 'NZBPO_MY{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -341,10 +358,10 @@ class ConfigObj(configobj.ConfigObj, Section):
envCatKey = 'NZBPO_GZCATEGORY' envCatKey = 'NZBPO_GZCATEGORY'
envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH']
cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path']
if os.environ.has_key(envCatKey): if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_GZ' + envKeys[index] key = 'NZBPO_GZ{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -354,12 +371,14 @@ class ConfigObj(configobj.ConfigObj, Section):
section = "NzbDrone" section = "NzbDrone"
envCatKey = 'NZBPO_NDCATEGORY' envCatKey = 'NZBPO_NDCATEGORY'
envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED',
cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH']
if os.environ.has_key(envCatKey): cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed',
'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path']
if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_ND' + envKeys[index] key = 'NZBPO_ND{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -373,8 +392,8 @@ class ConfigObj(configobj.ConfigObj, Section):
envKeys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] envKeys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS']
cfgKeys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] cfgKeys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
@ -383,26 +402,36 @@ class ConfigObj(configobj.ConfigObj, Section):
envKeys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] envKeys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA']
cfgKeys = ['niceness', 'ionice_class', 'ionice_classdata'] cfgKeys = ['niceness', 'ionice_class', 'ionice_classdata']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
section = "Transcoder" section = "Transcoder"
envKeys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', envKeys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH',
'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES',
'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR',
'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW',
'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS'] 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC',
cfgKeys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS',
'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE',
'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE',
'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS',
'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels'] 'OUTPUTAUDIOOTHERCHANNELS']
cfgKeys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath',
'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages',
'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir',
'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow',
'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec',
'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs',
'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate',
'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate',
'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels',
'outputAudioOtherChannels']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
@ -411,20 +440,22 @@ class ConfigObj(configobj.ConfigObj, Section):
envKeys = ['WAKE', 'HOST', 'PORT', 'MAC'] envKeys = ['WAKE', 'HOST', 'PORT', 'MAC']
cfgKeys = ['wake', 'host', 'port', 'mac'] cfgKeys = ['wake', 'host', 'port', 'mac']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_WOL' + envKeys[index] key = 'NZBPO_WOL{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
CFG_NEW[section][option] = value CFG_NEW[section][option] = value
section = "UserScript" section = "UserScript"
envCatKey = 'NZBPO_USCATEGORY' envCatKey = 'NZBPO_USCATEGORY'
envKeys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] envKeys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE',
cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH']
if os.environ.has_key(envCatKey): cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce',
'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path']
if envCatKey in os.environ:
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_' + envKeys[index] key = 'NZBPO_{index}'.format(index=envKeys[index])
if os.environ.has_key(key): if key in os.environ:
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]
if os.environ[envCatKey] not in CFG_NEW[section].sections: if os.environ[envCatKey] not in CFG_NEW[section].sections:
@ -432,18 +463,19 @@ class ConfigObj(configobj.ConfigObj, Section):
CFG_NEW[section][os.environ[envCatKey]][option] = value CFG_NEW[section][os.environ[envCatKey]][option] = value
CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1
except Exception, e: except Exception as error:
logger.debug("Error %s when applying NZBGet config" % (e)) logger.debug("Error {msg} when applying NZBGet config".format(msg=error))
try: try:
# write our new config to autoProcessMedia.cfg # write our new config to autoProcessMedia.cfg
CFG_NEW.filename = core.CONFIG_FILE CFG_NEW.filename = core.CONFIG_FILE
CFG_NEW.write() CFG_NEW.write()
except Exception, e: except Exception as error:
logger.debug("Error %s when writing changes to .cfg" % (e)) logger.debug("Error {msg} when writing changes to .cfg".format(msg=error))
return CFG_NEW return CFG_NEW
configobj.Section = Section configobj.Section = Section
configobj.ConfigObj = ConfigObj configobj.ConfigObj = ConfigObj
config = ConfigObj config = ConfigObj

View file

@ -1,4 +1,6 @@
from __future__ import with_statement # coding=utf-8
from __future__ import print_function, with_statement
import re import re
import sqlite3 import sqlite3
@ -7,6 +9,7 @@ import time
import core import core
from core import logger from core import logger
def dbFilename(filename="nzbtomedia.db", suffix=None): def dbFilename(filename="nzbtomedia.db", suffix=None):
""" """
@param filename: The sqlite database filename to use. If not specified, @param filename: The sqlite database filename to use. If not specified,
@ -16,11 +19,11 @@ def dbFilename(filename="nzbtomedia.db", suffix=None):
@return: the correct location of the database file. @return: the correct location of the database file.
""" """
if suffix: if suffix:
filename = "%s.%s" % (filename, suffix) filename = "{0}.{1}".format(filename, suffix)
return core.os.path.join(core.PROGRAM_DIR, filename) return core.os.path.join(core.PROGRAM_DIR, filename)
class DBConnection: class DBConnection(object):
def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None): def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None):
self.filename = filename self.filename = filename
@ -34,7 +37,7 @@ class DBConnection:
result = None result = None
try: try:
result = self.select("SELECT db_version FROM db_version") result = self.select("SELECT db_version FROM db_version")
except sqlite3.OperationalError, e: except sqlite3.OperationalError as e:
if "no such table: db_version" in e.args[0]: if "no such table: db_version" in e.args[0]:
return 0 return 0
@ -44,7 +47,7 @@ class DBConnection:
return 0 return 0
def fetch(self, query, args=None): def fetch(self, query, args=None):
if query == None: if query is None:
return return
sqlResult = None sqlResult = None
@ -52,35 +55,36 @@ class DBConnection:
while attempt < 5: while attempt < 5:
try: try:
if args == None: if args is None:
logger.log(self.filename + ": " + query, logger.DB) logger.log("{name}: {query}".format(name=self.filename, query=query), logger.DB)
cursor = self.connection.cursor() cursor = self.connection.cursor()
cursor.execute(query) cursor.execute(query)
sqlResult = cursor.fetchone()[0] sqlResult = cursor.fetchone()[0]
else: else:
logger.log(self.filename + ": " + query + " with args " + str(args), logger.DB) logger.log("{name}: {query} with args {args}".format
(name=self.filename, query=query, args=args), logger.DB)
cursor = self.connection.cursor() cursor = self.connection.cursor()
cursor.execute(query, args) cursor.execute(query, args)
sqlResult = cursor.fetchone()[0] sqlResult = cursor.fetchone()[0]
# get out of the connection attempt loop since we were successful # get out of the connection attempt loop since we were successful
break break
except sqlite3.OperationalError, e: except sqlite3.OperationalError as error:
if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]:
logger.log(u"DB error: " + str(e), logger.WARNING) logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING)
attempt += 1 attempt += 1
time.sleep(1) time.sleep(1)
else: else:
logger.log(u"DB error: " + str(e), logger.ERROR) logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR)
raise raise
except sqlite3.DatabaseError, e: except sqlite3.DatabaseError as error:
logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR)
raise raise
return sqlResult return sqlResult
def mass_action(self, querylist, logTransaction=False): def mass_action(self, querylist, logTransaction=False):
if querylist == None: if querylist is None:
return return
sqlResult = [] sqlResult = []
@ -95,33 +99,32 @@ class DBConnection:
sqlResult.append(self.connection.execute(qu[0])) sqlResult.append(self.connection.execute(qu[0]))
elif len(qu) > 1: elif len(qu) > 1:
if logTransaction: if logTransaction:
logger.log(qu[0] + " with args " + str(qu[1]), logger.DEBUG) logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG)
sqlResult.append(self.connection.execute(qu[0], qu[1])) sqlResult.append(self.connection.execute(qu[0], qu[1]))
self.connection.commit() self.connection.commit()
logger.log(u"Transaction with " + str(len(querylist)) + u" query's executed", logger.DEBUG) logger.log(u"Transaction with {x} query's executed".format(x=len(querylist)), logger.DEBUG)
return sqlResult return sqlResult
except sqlite3.OperationalError, e: except sqlite3.OperationalError as error:
sqlResult = [] sqlResult = []
if self.connection: if self.connection:
self.connection.rollback() self.connection.rollback()
if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]:
logger.log(u"DB error: " + str(e), logger.WARNING) logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING)
attempt += 1 attempt += 1
time.sleep(1) time.sleep(1)
else: else:
logger.log(u"DB error: " + str(e), logger.ERROR) logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR)
raise raise
except sqlite3.DatabaseError, e: except sqlite3.DatabaseError as error:
sqlResult = []
if self.connection: if self.connection:
self.connection.rollback() self.connection.rollback()
logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR)
raise raise
return sqlResult return sqlResult
def action(self, query, args=None): def action(self, query, args=None):
if query == None: if query is None:
return return
sqlResult = None sqlResult = None
@ -129,35 +132,35 @@ class DBConnection:
while attempt < 5: while attempt < 5:
try: try:
if args == None: if args is None:
logger.log(self.filename + ": " + query, logger.DB) logger.log(u"{name}: {query}".format(name=self.filename, query=query), logger.DB)
sqlResult = self.connection.execute(query) sqlResult = self.connection.execute(query)
else: else:
logger.log(self.filename + ": " + query + " with args " + str(args), logger.DB) logger.log(u"{name}: {query} with args {args}".format
(name=self.filename, query=query, args=args), logger.DB)
sqlResult = self.connection.execute(query, args) sqlResult = self.connection.execute(query, args)
self.connection.commit() self.connection.commit()
# get out of the connection attempt loop since we were successful # get out of the connection attempt loop since we were successful
break break
except sqlite3.OperationalError, e: except sqlite3.OperationalError as error:
if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]:
logger.log(u"DB error: " + str(e), logger.WARNING) logger.log(u"DB error: {msg}".format(msg=error), logger.WARNING)
attempt += 1 attempt += 1
time.sleep(1) time.sleep(1)
else: else:
logger.log(u"DB error: " + str(e), logger.ERROR) logger.log(u"DB error: {msg}".format(msg=error), logger.ERROR)
raise raise
except sqlite3.DatabaseError, e: except sqlite3.DatabaseError as error:
logger.log(u"Fatal error executing query: " + str(e), logger.ERROR) logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR)
raise raise
return sqlResult return sqlResult
def select(self, query, args=None): def select(self, query, args=None):
sqlResults = self.action(query, args).fetchall() sqlResults = self.action(query, args).fetchall()
if sqlResults == None: if sqlResults is None:
return [] return []
return sqlResults return sqlResults
@ -166,21 +169,32 @@ class DBConnection:
changesBefore = self.connection.total_changes changesBefore = self.connection.total_changes
genParams = lambda myDict: [x + " = ?" for x in myDict.keys()] genParams = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()]
query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join( self.action(
genParams(keyDict)) "UPDATE {table} "
"SET {params} "
self.action(query, valueDict.values() + keyDict.values()) "WHERE {conditions}".format(
table=tableName,
params=", ".join(genParams(valueDict)),
conditions=" AND ".join(genParams(keyDict))),
valueDict.values() + keyDict.values()
)
if self.connection.total_changes == changesBefore: if self.connection.total_changes == changesBefore:
query = "INSERT OR IGNORE INTO " + tableName + " (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \ self.action(
" VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")" "INSERT OR IGNORE INTO {table} ({columns}) "
self.action(query, valueDict.values() + keyDict.values()) "VALUES ({values})".format(
table=tableName,
columns=", ".join(valueDict.keys() + keyDict.keys()),
values=", ".join(["?"] * len(valueDict.keys() + keyDict.keys()))
)
, valueDict.values() + keyDict.values()
)
def tableInfo(self, tableName): def tableInfo(self, tableName):
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
cursor = self.connection.execute("PRAGMA table_info(%s)" % tableName) cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName))
columns = {} columns = {}
for column in cursor: for column in cursor:
columns[column['name']] = {'type': column['type']} columns[column['name']] = {'type': column['type']}
@ -221,17 +235,22 @@ def prettyName(class_name):
def _processUpgrade(connection, upgradeClass): def _processUpgrade(connection, upgradeClass):
instance = upgradeClass(connection) instance = upgradeClass(connection)
logger.log(u"Checking " + prettyName(upgradeClass.__name__) + " database upgrade", logger.DEBUG) logger.log(u"Checking {name} database upgrade".format
(name=prettyName(upgradeClass.__name__)), logger.DEBUG)
if not instance.test(): if not instance.test():
logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.MESSAGE) logger.log(u"Database upgrade required: {name}".format
(name=prettyName(upgradeClass.__name__)), logger.MESSAGE)
try: try:
instance.execute() instance.execute()
except sqlite3.DatabaseError, e: except sqlite3.DatabaseError as error:
print "Error in " + str(upgradeClass.__name__) + ": " + str(e) print(u"Error in {name}: {msg}".format
(name=upgradeClass.__name__, msg=error))
raise raise
logger.log(upgradeClass.__name__ + " upgrade completed", logger.DEBUG) logger.log(u"{name} upgrade completed".format
(name=upgradeClass.__name__), logger.DEBUG)
else: else:
logger.log(upgradeClass.__name__ + " upgrade not required", logger.DEBUG) logger.log(u"{name} upgrade not required".format
(name=upgradeClass.__name__), logger.DEBUG)
for upgradeSubClass in upgradeClass.__subclasses__(): for upgradeSubClass in upgradeClass.__subclasses__():
_processUpgrade(connection, upgradeSubClass) _processUpgrade(connection, upgradeSubClass)
@ -243,14 +262,14 @@ class SchemaUpgrade(object):
self.connection = connection self.connection = connection
def hasTable(self, tableName): def hasTable(self, tableName):
return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName, )).fetchall()) > 0 return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0
def hasColumn(self, tableName, column): def hasColumn(self, tableName, column):
return column in self.connection.tableInfo(tableName) return column in self.connection.tableInfo(tableName)
def addColumn(self, table, column, type="NUMERIC", default=0): def addColumn(self, table, column, type="NUMERIC", default=0):
self.connection.action("ALTER TABLE %s ADD %s %s" % (table, column, type)) self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type))
self.connection.action("UPDATE %s SET %s = ?" % (table, column), (default,)) self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,))
def checkDBVersion(self): def checkDBVersion(self):
result = self.connection.select("SELECT db_version FROM db_version") result = self.connection.select("SELECT db_version FROM db_version")
@ -263,4 +282,3 @@ class SchemaUpgrade(object):
new_version = self.checkDBVersion() + 1 new_version = self.checkDBVersion() + 1
self.connection.action("UPDATE db_version SET db_version = ?", [new_version]) self.connection.action("UPDATE db_version SET db_version = ?", [new_version])
return new_version return new_version

View file

@ -1,23 +1,29 @@
# coding=utf-8
import os import os
import re import re
import core import core
import shlex import shlex
from core import logger from core import logger
from core.nzbToMediaUtil import listMediaFiles from core.nzbToMediaUtil import listMediaFiles
reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.",
r"\.ld[.-]?bew\.", r"\.pir.?(dov|dvd|bew|db|rb)\.", r"\brdvd\.", r"\.vts\.", r"\.reneercs\.", r"\.dcv\.", r"\b(pir|mac)dh\b", r"\.reporp\.", r"\.kcaper\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.",
r"\.ld[.-]?bew\.", r"\.pir.?(dov|dvd|bew|db|rb)\.", r"\brdvd\.", r"\.vts\.", r"\.reneercs\.",
r"\.dcv\.", r"\b(pir|mac)dh\b", r"\.reporp\.", r"\.kcaper\.",
r"\.lanretni\.", r"\b3ca\b", r"\.cstn\."] r"\.lanretni\.", r"\b3ca\b", r"\.cstn\."]
reverse_pattern = re.compile('|'.join(reverse_list), flags=re.IGNORECASE) reverse_pattern = re.compile('|'.join(reverse_list), flags=re.IGNORECASE)
season_pattern = re.compile(r"(.*\.\d{2}e\d{2}s\.)(.*)", flags=re.IGNORECASE) season_pattern = re.compile(r"(.*\.\d{2}e\d{2}s\.)(.*)", flags=re.IGNORECASE)
word_pattern = re.compile(r"([^A-Z0-9]*[A-Z0-9]+)") word_pattern = re.compile(r"([^A-Z0-9]*[A-Z0-9]+)")
media_list = [r"\.s\d{2}e\d{2}\.", r"\.1080[pi]\.", r"\.720p\.", r"\.576[pi]", r"\.480[pi]\.", r"\.360p\.", r"\.[xh]26[45]\b", r"\.bluray\.", r"\.[hp]dtv\.", media_list = [r"\.s\d{2}e\d{2}\.", r"\.1080[pi]\.", r"\.720p\.", r"\.576[pi]", r"\.480[pi]\.", r"\.360p\.",
r"\.web[.-]?dl\.", r"\.(vod|dvd|web|bd|br).?rip\.", r"\.dvdr\b", r"\.stv\.", r"\.screener\.", r"\.vcd\.", r"\bhd(cam|rip)\b", r"\.proper\.", r"\.repack\.", r"\.[xh]26[45]\b", r"\.bluray\.", r"\.[hp]dtv\.",
r"\.web[.-]?dl\.", r"\.(vod|dvd|web|bd|br).?rip\.", r"\.dvdr\b", r"\.stv\.", r"\.screener\.", r"\.vcd\.",
r"\bhd(cam|rip)\b", r"\.proper\.", r"\.repack\.",
r"\.internal\.", r"\bac3\b", r"\.ntsc\.", r"\.pal\.", r"\.secam\.", r"\bdivx\b", r"\bxvid\b"] r"\.internal\.", r"\bac3\b", r"\.ntsc\.", r"\.pal\.", r"\.secam\.", r"\bdivx\b", r"\bxvid\b"]
media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE) media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE)
garbage_name = re.compile(r"^[a-zA-Z0-9]*$") garbage_name = re.compile(r"^[a-zA-Z0-9]*$")
char_replace = [[r"(\w)1\.(\w)",r"\1i\2"] char_replace = [[r"(\w)1\.(\w)", r"\1i\2"]
] ]
def process_all_exceptions(name, dirname): def process_all_exceptions(name, dirname):
rename_script(dirname) rename_script(dirname)
@ -26,7 +32,7 @@ def process_all_exceptions(name, dirname):
parentDir = os.path.dirname(filename) parentDir = os.path.dirname(filename)
head, fileExtension = os.path.splitext(os.path.basename(filename)) head, fileExtension = os.path.splitext(os.path.basename(filename))
if reverse_pattern.search(head) is not None: if reverse_pattern.search(head) is not None:
exception = reverse_filename exception = reverse_filename
elif garbage_name.search(head) is not None: elif garbage_name.search(head) is not None:
exception = replace_filename exception = replace_filename
else: else:
@ -37,7 +43,8 @@ def process_all_exceptions(name, dirname):
if core.GROUPS: if core.GROUPS:
newfilename = strip_groups(newfilename) newfilename = strip_groups(newfilename)
if newfilename != filename: if newfilename != filename:
rename_file(filename, newfilename) rename_file(filename, newfilename)
def strip_groups(filename): def strip_groups(filename):
if not core.GROUPS: if not core.GROUPS:
@ -47,33 +54,38 @@ def strip_groups(filename):
newname = head.replace(' ', '.') newname = head.replace(' ', '.')
for group in core.GROUPS: for group in core.GROUPS:
newname = newname.replace(group, '') newname = newname.replace(group, '')
newname = newname.replace('[]', '') newname = newname.replace('[]', '')
newfile = newname + fileExtension newfile = newname + fileExtension
newfilePath = os.path.join(dirname, newfile) newfilePath = os.path.join(dirname, newfile)
return newfilePath return newfilePath
def rename_file(filename, newfilePath): def rename_file(filename, newfilePath):
logger.debug("Replacing file name %s with download name %s" % (filename, newfilePath), "EXCEPTION") logger.debug("Replacing file name {old} with download name {new}".format
(old=filename, new=newfilePath), "EXCEPTION")
try: try:
os.rename(filename, newfilePath) os.rename(filename, newfilePath)
except Exception,e: except Exception as error:
logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION")
def replace_filename(filename, dirname, name): def replace_filename(filename, dirname, name):
head, fileExtension = os.path.splitext(os.path.basename(filename)) head, fileExtension = os.path.splitext(os.path.basename(filename))
if media_pattern.search(os.path.basename(dirname).replace(' ','.')) is not None: if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None:
newname = os.path.basename(dirname).replace(' ', '.') newname = os.path.basename(dirname).replace(' ', '.')
logger.debug("Replacing file name %s with directory name %s" % (head, newname), "EXCEPTION") logger.debug("Replacing file name {old} with directory name {new}".format(old=head, new=newname), "EXCEPTION")
elif media_pattern.search(name.replace(' ','.').lower()) is not None: elif media_pattern.search(name.replace(' ', '.').lower()) is not None:
newname = name.replace(' ', '.') newname = name.replace(' ', '.')
logger.debug("Replacing file name %s with download name %s" % (head, newname), "EXCEPTION") logger.debug("Replacing file name {old} with download name {new}".format
(old=head, new=newname), "EXCEPTION")
else: else:
logger.warning("No name replacement determined for %s" % (head), "EXCEPTION") logger.warning("No name replacement determined for {name}".format(name=head), "EXCEPTION")
newname = name newname = name
newfile = newname + fileExtension newfile = newname + fileExtension
newfilePath = os.path.join(dirname, newfile) newfilePath = os.path.join(dirname, newfile)
return newfilePath return newfilePath
def reverse_filename(filename, dirname, name): def reverse_filename(filename, dirname, name):
head, fileExtension = os.path.splitext(os.path.basename(filename)) head, fileExtension = os.path.splitext(os.path.basename(filename))
na_parts = season_pattern.search(head) na_parts = season_pattern.search(head)
@ -84,29 +96,31 @@ def reverse_filename(filename, dirname, name):
for wp in word_p: for wp in word_p:
if wp[0] == ".": if wp[0] == ".":
new_words += "." new_words += "."
new_words += re.sub(r"\W","",wp) new_words += re.sub(r"\W", "", wp)
else: else:
new_words = na_parts.group(2) new_words = na_parts.group(2)
for cr in char_replace: for cr in char_replace:
new_words = re.sub(cr[0],cr[1],new_words) new_words = re.sub(cr[0], cr[1], new_words)
newname = new_words[::-1] + na_parts.group(1)[::-1] newname = new_words[::-1] + na_parts.group(1)[::-1]
else: else:
newname = head[::-1].title() newname = head[::-1].title()
newname = newname.replace(' ', '.') newname = newname.replace(' ', '.')
logger.debug("Reversing filename %s to %s" % (head, newname), "EXCEPTION") logger.debug("Reversing filename {old} to {new}".format
(old=head, new=newname), "EXCEPTION")
newfile = newname + fileExtension newfile = newname + fileExtension
newfilePath = os.path.join(dirname, newfile) newfilePath = os.path.join(dirname, newfile)
return newfilePath return newfilePath
def rename_script(dirname): def rename_script(dirname):
rename_file = "" rename_file = ""
for dir, dirs, files in os.walk(dirname): for dir, dirs, files in os.walk(dirname):
for file in files: for file in files:
if re.search('(rename\S*\.(sh|bat)$)',file,re.IGNORECASE): if re.search('(rename\S*\.(sh|bat)$)', file, re.IGNORECASE):
rename_file = os.path.join(dir, file) rename_file = os.path.join(dir, file)
dirname = dir dirname = dir
break break
if rename_file: if rename_file:
rename_lines = [line.strip() for line in open(rename_file)] rename_lines = [line.strip() for line in open(rename_file)]
for line in rename_lines: for line in rename_lines:
if re.search('^(mv|Move)', line, re.IGNORECASE): if re.search('^(mv|Move)', line, re.IGNORECASE):
@ -118,13 +132,13 @@ def rename_script(dirname):
dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1]) dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1])
if os.path.isfile(dest): if os.path.isfile(dest):
continue continue
logger.debug("Renaming file %s to %s" % (orig, dest), "EXCEPTION") logger.debug("Renaming file {source} to {destination}".format
(source=orig, destination=dest), "EXCEPTION")
try: try:
os.rename(orig, dest) os.rename(orig, dest)
except Exception,e: except Exception as error:
logger.error("Unable to rename file due to: %s" % (str(e)), "EXCEPTION") logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION")
# dict for custom groups # dict for custom groups
# we can add more to this list # we can add more to this list
#__customgroups__ = {'Q o Q': process_qoq, '-ECI': process_eci} # _customgroups = {'Q o Q': process_qoq, '-ECI': process_eci}

View file

@ -1,3 +1,4 @@
# coding=utf-8
import os import os
import core import core
from subprocess import Popen from subprocess import Popen
@ -5,45 +6,43 @@ from core.transcoder import transcoder
from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir
from core import logger from core import logger
def external_script(outputDestination, torrentName, torrentLabel, settings): def external_script(outputDestination, torrentName, torrentLabel, settings):
final_result = 0 # start at 0. final_result = 0 # start at 0.
num_files = 0 num_files = 0
try: try:
core.USER_SCRIPT_MEDIAEXTENSIONS = settings["user_script_mediaExtensions"] core.USER_SCRIPT_MEDIAEXTENSIONS = settings["user_script_mediaExtensions"]
if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str): core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',') if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str):
core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',')
except: except:
core.USER_SCRIPT_MEDIAEXTENSIONS = [] core.USER_SCRIPT_MEDIAEXTENSIONS = []
try:
core.USER_SCRIPT = settings["user_script_path"] core.USER_SCRIPT = settings.get("user_script_path")
except:
core.USER_SCRIPT = None if not core.USER_SCRIPT or core.USER_SCRIPT == "None": # do nothing and return success.
if core.USER_SCRIPT is None or core.USER_SCRIPT == "None": # do nothing and return success.
return [0, ""] return [0, ""]
try: try:
core.USER_SCRIPT_PARAM = settings["user_script_param"] core.USER_SCRIPT_PARAM = settings["user_script_param"]
if isinstance(core.USER_SCRIPT_PARAM, str): core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',') if isinstance(core.USER_SCRIPT_PARAM, str):
core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',')
except: except:
core.USER_SCRIPT_PARAM = [] core.USER_SCRIPT_PARAM = []
try: try:
core.USER_SCRIPT_SUCCESSCODES = settings["user_script_successCodes"] core.USER_SCRIPT_SUCCESSCODES = settings["user_script_successCodes"]
if isinstance(core.USER_SCRIPT_SUCCESSCODES, str): core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',') if isinstance(core.USER_SCRIPT_SUCCESSCODES, str):
core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',')
except: except:
core.USER_SCRIPT_SUCCESSCODES = 0 core.USER_SCRIPT_SUCCESSCODES = 0
try:
core.USER_SCRIPT_CLEAN = int(settings["user_script_clean"]) core.USER_SCRIPT_CLEAN = int(settings.get("user_script_clean", 1))
except: core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1))
core.USER_SCRIPT_CLEAN = 1
try:
core.USER_SCRIPT_RUNONCE = int(settings["user_script_runOnce"])
except:
core.USER_SCRIPT_RUNONCE = 1
if core.CHECK_MEDIA: if core.CHECK_MEDIA:
for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False): for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False):
if transcoder.isVideoGood(video, 0): if transcoder.isVideoGood(video, 0):
import_subs(video) import_subs(video)
else: else:
logger.info("Corrupt video file found %s. Deleting." % (video), "USERSCRIPT") logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT")
os.unlink(video) os.unlink(video)
for dirpath, dirnames, filenames in os.walk(outputDestination): for dirpath, dirnames, filenames in os.walk(outputDestination):
@ -53,66 +52,65 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
fileName, fileExtension = os.path.splitext(file) fileName, fileExtension = os.path.splitext(file)
if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or "ALL" in core.USER_SCRIPT_MEDIAEXTENSIONS: if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or "ALL" in core.USER_SCRIPT_MEDIAEXTENSIONS:
num_files = num_files + 1 num_files += 1
if core.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files. if core.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files.
continue continue
command = [core.USER_SCRIPT] command = [core.USER_SCRIPT]
for param in core.USER_SCRIPT_PARAM: for param in core.USER_SCRIPT_PARAM:
if param == "FN": if param == "FN":
command.append('%s' % file) command.append('{0}'.format(file))
continue continue
elif param == "FP": elif param == "FP":
command.append('%s' % filePath) command.append('{0}'.format(filePath))
continue continue
elif param == "TN": elif param == "TN":
command.append('%s' % torrentName) command.append('{0}'.format(torrentName))
continue continue
elif param == "TL": elif param == "TL":
command.append('%s' % torrentLabel) command.append('{0}'.format(torrentLabel))
continue continue
elif param == "DN": elif param == "DN":
if core.USER_SCRIPT_RUNONCE == 1: if core.USER_SCRIPT_RUNONCE == 1:
command.append('%s' % outputDestination) command.append('{0}'.format(outputDestination))
else: else:
command.append('%s' % dirpath) command.append('{0}'.format(dirpath))
continue continue
else: else:
command.append(param) command.append(param)
continue continue
cmd = "" cmd = ""
for item in command: for item in command:
cmd = cmd + " " + item cmd = "{cmd} {item}".format(cmd=cmd, item=item)
logger.info("Running script %s on file %s." % (cmd, filePath), "USERSCRIPT") logger.info("Running script {cmd} on file {path}.".format(cmd=cmd, path=filePath), "USERSCRIPT")
try: try:
p = Popen(command) p = Popen(command)
res = p.wait() res = p.wait()
if str(res) in core.USER_SCRIPT_SUCCESSCODES: # Linux returns 0 for successful. if str(res) in core.USER_SCRIPT_SUCCESSCODES: # Linux returns 0 for successful.
logger.info("UserScript %s was successfull" % (command[0])) logger.info("UserScript {0} was successfull".format(command[0]))
result = 0 result = 0
else: else:
logger.error("UserScript %s has failed with return code: %s" % (command[0], res), "USERSCRIPT") logger.error("UserScript {0} has failed with return code: {1}".format(command[0], res), "USERSCRIPT")
logger.info( logger.info(
"If the UserScript completed successfully you should add %s to the user_script_successCodes" % ( "If the UserScript completed successfully you should add {0} to the user_script_successCodes".format(
res), "USERSCRIPT") res), "USERSCRIPT")
result = int(1) result = int(1)
except: except:
logger.error("UserScript %s has failed" % (command[0]), "USERSCRIPT") logger.error("UserScript {0} has failed".format(command[0]), "USERSCRIPT")
result = int(1) result = int(1)
final_result = final_result + result final_result += result
num_files_new = 0 num_files_new = 0
for dirpath, dirnames, filenames in os.walk(outputDestination): for dirpath, dirnames, filenames in os.walk(outputDestination):
for file in filenames: for file in filenames:
filePath = core.os.path.join(dirpath, file)
fileName, fileExtension = os.path.splitext(file) fileName, fileExtension = os.path.splitext(file)
if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or core.USER_SCRIPT_MEDIAEXTENSIONS == "ALL": if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or core.USER_SCRIPT_MEDIAEXTENSIONS == "ALL":
num_files_new = num_files_new + 1 num_files_new += 1
if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0:
logger.info("All files have been processed. Cleaning outputDirectory %s" % (outputDestination)) logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination))
rmDir(outputDestination) rmDir(outputDestination)
elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0:
logger.info("%s files were processed, but %s still remain. outputDirectory will not be cleaned." % ( logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format(
num_files, num_files_new)) num_files, num_files_new))
return [final_result, ''] return [final_result, '']

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,4 @@
# coding=utf-8
"""A synchronous implementation of the Deluge RPC protocol """A synchronous implementation of the Deluge RPC protocol
based on gevent-deluge by Christopher Rosell. based on gevent-deluge by Christopher Rosell.
@ -14,10 +15,9 @@ Example usage:
download_location = client.core.get_config_value("download_location").get() download_location = client.core.get_config_value("download_location").get()
""" """
from core.synchronousdeluge.exceptions import DelugeRPCError
__title__ = "synchronous-deluge" __title__ = "synchronous-deluge"
__version__ = "0.1" __version__ = "0.1"
__author__ = "Christian Dale" __author__ = "Christian Dale"
from core.synchronousdeluge.exceptions import DelugeRPCError

View file

@ -1,16 +1,15 @@
# coding=utf-8
import os import os
import platform import platform
from collections import defaultdict from collections import defaultdict
from itertools import imap from itertools import imap
from exceptions import DelugeRPCError from .exceptions import DelugeRPCError
from protocol import DelugeRPCRequest, DelugeRPCResponse from .protocol import DelugeRPCRequest, DelugeRPCResponse
from transfer import DelugeTransfer from .transfer import DelugeTransfer
__all__ = ["DelugeClient"] __all__ = ["DelugeClient"]
RPC_RESPONSE = 1 RPC_RESPONSE = 1
RPC_ERROR = 2 RPC_ERROR = 2
RPC_EVENT = 3 RPC_EVENT = 3
@ -24,13 +23,13 @@ class DelugeClient(object):
self._request_counter = 0 self._request_counter = 0
def _get_local_auth(self): def _get_local_auth(self):
auth_file = ""
username = password = "" username = password = ""
if platform.system() in ('Windows', 'Microsoft'): if platform.system() in ('Windows', 'Microsoft'):
appDataPath = os.environ.get("APPDATA") appDataPath = os.environ.get("APPDATA")
if not appDataPath: if not appDataPath:
import _winreg import _winreg
hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders") hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders")
appDataReg = _winreg.QueryValueEx(hkey, "AppData") appDataReg = _winreg.QueryValueEx(hkey, "AppData")
appDataPath = appDataReg[0] appDataPath = appDataReg[0]
_winreg.CloseKey(hkey) _winreg.CloseKey(hkey)
@ -40,10 +39,9 @@ class DelugeClient(object):
from xdg.BaseDirectory import save_config_path from xdg.BaseDirectory import save_config_path
try: try:
auth_file = os.path.join(save_config_path("deluge"), "auth") auth_file = os.path.join(save_config_path("deluge"), "auth")
except OSError, e: except OSError:
return username, password return username, password
if os.path.exists(auth_file): if os.path.exists(auth_file):
for line in open(auth_file): for line in open(auth_file):
if line.startswith("#"): if line.startswith("#"):
@ -52,7 +50,7 @@ class DelugeClient(object):
line = line.strip() line = line.strip()
try: try:
lsplit = line.split(":") lsplit = line.split(":")
except Exception, e: except Exception:
continue continue
if len(lsplit) == 2: if len(lsplit) == 2:
@ -63,9 +61,9 @@ class DelugeClient(object):
continue continue
if username == "localclient": if username == "localclient":
return (username, password) return username, password
return ("", "") return "", ""
def _create_module_method(self, module, method): def _create_module_method(self, module, method):
fullname = "{0}.{1}".format(module, method) fullname = "{0}.{1}".format(module, method)
@ -107,20 +105,20 @@ class DelugeClient(object):
message_type = message[0] message_type = message[0]
# if message_type == RPC_EVENT: # if message_type == RPC_EVENT:
# event = message[1] # event = message[1]
# values = message[2] # values = message[2]
# #
# if event in self._event_handlers: # if event in self._event_handlers:
# for handler in self._event_handlers[event]: # for handler in self._event_handlers[event]:
# gevent.spawn(handler, *values) # gevent.spawn(handler, *values)
# #
# elif message_type in (RPC_RESPONSE, RPC_ERROR): # elif message_type in (RPC_RESPONSE, RPC_ERROR):
if message_type in (RPC_RESPONSE, RPC_ERROR): if message_type in (RPC_RESPONSE, RPC_ERROR):
request_id = message[1] request_id = message[1]
value = message[2] value = message[2]
if request_id == self._request_counter : if request_id == self._request_counter:
if message_type == RPC_RESPONSE: if message_type == RPC_RESPONSE:
response.set(value) response.set(value)
elif message_type == RPC_ERROR: elif message_type == RPC_ERROR:
@ -159,4 +157,3 @@ class DelugeClient(object):
def disconnect(self): def disconnect(self):
"""Disconnects from the daemon.""" """Disconnects from the daemon."""
self.transfer.disconnect() self.transfer.disconnect()

View file

@ -1,5 +1,7 @@
# coding=utf-8
__all__ = ["DelugeRPCError"] __all__ = ["DelugeRPCError"]
class DelugeRPCError(Exception): class DelugeRPCError(Exception):
def __init__(self, name, msg, traceback): def __init__(self, name, msg, traceback):
self.name = name self.name = name
@ -8,4 +10,3 @@ class DelugeRPCError(Exception):
def __str__(self): def __str__(self):
return "{0}: {1}: {2}".format(self.__class__.__name__, self.name, self.msg) return "{0}: {1}: {2}".format(self.__class__.__name__, self.name, self.msg)

View file

@ -1,5 +1,7 @@
# coding=utf-8
__all__ = ["DelugeRPCRequest", "DelugeRPCResponse"] __all__ = ["DelugeRPCRequest", "DelugeRPCResponse"]
class DelugeRPCRequest(object): class DelugeRPCRequest(object):
def __init__(self, request_id, method, *args, **kwargs): def __init__(self, request_id, method, *args, **kwargs):
self.request_id = request_id self.request_id = request_id
@ -8,7 +10,8 @@ class DelugeRPCRequest(object):
self.kwargs = kwargs self.kwargs = kwargs
def format(self): def format(self):
return (self.request_id, self.method, self.args, self.kwargs) return self.request_id, self.method, self.args, self.kwargs
class DelugeRPCResponse(object): class DelugeRPCResponse(object):
def __init__(self): def __init__(self):
@ -35,4 +38,3 @@ class DelugeRPCResponse(object):
return self.value return self.value
else: else:
raise self._exception raise self._exception

View file

@ -1,4 +1,4 @@
# coding=utf-8
""" """
rencode -- Web safe object pickling/unpickling. rencode -- Web safe object pickling/unpickling.
@ -9,9 +9,9 @@ BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than many small elements, r-encodings take up significantly less space than
b-encodings: b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99})) >>> len(rencode.dumps({'a': 0, 'b': [1, 2], 'c': 99}))
13 13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99})) >>> len(bencode.bencode({'a': 0, 'b': [1, 2], 'c': 99}))
26 26
The rencode format is not standardized, and may change with different The rencode format is not standardized, and may change with different
@ -19,6 +19,13 @@ rencode module versions, so you should check that you are using the
same rencode version throughout your project. same rencode version throughout your project.
""" """
import struct
from threading import Lock
from six import PY3
if PY3:
long = int
__version__ = '1.0.1' __version__ = '1.0.1'
__all__ = ['dumps', 'loads'] __all__ = ['dumps', 'loads']
@ -62,9 +69,6 @@ __all__ = ['dumps', 'loads']
# (The rencode module is licensed under the above license as well). # (The rencode module is licensed under the above license as well).
# #
import struct
from threading import Lock
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()). # Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32 DEFAULT_FLOAT_BITS = 32
@ -73,19 +77,19 @@ MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and # The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set. # relocated on the base-256 character set.
CHR_LIST = chr(59) CHR_LIST = chr(59)
CHR_DICT = chr(60) CHR_DICT = chr(60)
CHR_INT = chr(61) CHR_INT = chr(61)
CHR_INT1 = chr(62) CHR_INT1 = chr(62)
CHR_INT2 = chr(63) CHR_INT2 = chr(63)
CHR_INT4 = chr(64) CHR_INT4 = chr(64)
CHR_INT8 = chr(65) CHR_INT8 = chr(65)
CHR_FLOAT32 = chr(66) CHR_FLOAT32 = chr(66)
CHR_FLOAT64 = chr(44) CHR_FLOAT64 = chr(44)
CHR_TRUE = chr(67) CHR_TRUE = chr(67)
CHR_FALSE = chr(68) CHR_FALSE = chr(68)
CHR_NONE = chr(69) CHR_NONE = chr(69)
CHR_TERM = chr(127) CHR_TERM = chr(127)
# Positive integers with value embedded in typecode. # Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0 INT_POS_FIXED_START = 0
@ -104,9 +108,10 @@ STR_FIXED_START = 128
STR_FIXED_COUNT = 64 STR_FIXED_COUNT = 64
# Lists with length embedded in typecode. # Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT
LIST_FIXED_COUNT = 64 LIST_FIXED_COUNT = 64
def decode_int(x, f): def decode_int(x, f):
f += 1 f += 1
newf = x.index(CHR_TERM, f) newf = x.index(CHR_TERM, f)
@ -119,35 +124,42 @@ def decode_int(x, f):
if x[f] == '-': if x[f] == '-':
if x[f + 1] == '0': if x[f + 1] == '0':
raise ValueError raise ValueError
elif x[f] == '0' and newf != f+1: elif x[f] == '0' and newf != f + 1:
raise ValueError raise ValueError
return (n, newf+1) return n, newf + 1
def decode_intb(x, f): def decode_intb(x, f):
f += 1 f += 1
return (struct.unpack('!b', x[f:f+1])[0], f+1) return struct.unpack('!b', x[f:f + 1])[0], f + 1
def decode_inth(x, f): def decode_inth(x, f):
f += 1 f += 1
return (struct.unpack('!h', x[f:f+2])[0], f+2) return struct.unpack('!h', x[f:f + 2])[0], f + 2
def decode_intl(x, f): def decode_intl(x, f):
f += 1 f += 1
return (struct.unpack('!l', x[f:f+4])[0], f+4) return struct.unpack('!l', x[f:f + 4])[0], f + 4
def decode_intq(x, f): def decode_intq(x, f):
f += 1 f += 1
return (struct.unpack('!q', x[f:f+8])[0], f+8) return struct.unpack('!q', x[f:f + 8])[0], f + 8
def decode_float32(x, f): def decode_float32(x, f):
f += 1 f += 1
n = struct.unpack('!f', x[f:f+4])[0] n = struct.unpack('!f', x[f:f + 4])[0]
return (n, f+4) return n, f + 4
def decode_float64(x, f): def decode_float64(x, f):
f += 1 f += 1
n = struct.unpack('!d', x[f:f+8])[0] n = struct.unpack('!d', x[f:f + 8])[0]
return (n, f+8) return n, f + 8
def decode_string(x, f): def decode_string(x, f):
colon = x.index(':', f) colon = x.index(':', f)
@ -155,123 +167,147 @@ def decode_string(x, f):
n = int(x[f:colon]) n = int(x[f:colon])
except (OverflowError, ValueError): except (OverflowError, ValueError):
n = long(x[f:colon]) n = long(x[f:colon])
if x[f] == '0' and colon != f+1: if x[f] == '0' and colon != f + 1:
raise ValueError raise ValueError
colon += 1 colon += 1
s = x[colon:colon+n] s = x[colon:colon + n]
try: try:
t = s.decode("utf8") t = s.decode("utf8")
if len(t) != len(s): if len(t) != len(s):
s = t s = t
except UnicodeDecodeError: except UnicodeDecodeError:
pass pass
return (s, colon+n) return s, colon + n
def decode_list(x, f): def decode_list(x, f):
r, f = [], f+1 r, f = [], f + 1
while x[f] != CHR_TERM: while x[f] != CHR_TERM:
v, f = decode_func[x[f]](x, f) v, f = decode_func[x[f]](x, f)
r.append(v) r.append(v)
return (tuple(r), f + 1) return tuple(r), f + 1
def decode_dict(x, f): def decode_dict(x, f):
r, f = {}, f+1 r, f = {}, f + 1
while x[f] != CHR_TERM: while x[f] != CHR_TERM:
k, f = decode_func[x[f]](x, f) k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f)
return (r, f + 1) return r, f + 1
def decode_true(x, f): def decode_true(x, f):
return (True, f+1) return True, f + 1
def decode_false(x, f): def decode_false(x, f):
return (False, f+1) return False, f + 1
def decode_none(x, f): def decode_none(x, f):
return (None, f+1) return None, f + 1
decode_func = {
'0': decode_string,
'1': decode_string,
'2': decode_string,
'3': decode_string,
'4': decode_string,
'5': decode_string,
'6': decode_string,
'7': decode_string,
'8': decode_string,
'9': decode_string,
CHR_LIST: decode_list,
CHR_DICT: decode_dict,
CHR_INT: decode_int,
CHR_INT1: decode_intb,
CHR_INT2: decode_inth,
CHR_INT4: decode_intl,
CHR_INT8: decode_intq,
CHR_FLOAT32: decode_float32,
CHR_FLOAT64: decode_float64,
CHR_TRUE: decode_true,
CHR_FALSE: decode_false,
CHR_NONE: decode_none,
}
decode_func = {}
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
decode_func[CHR_LIST ] = decode_list
decode_func[CHR_DICT ] = decode_dict
decode_func[CHR_INT ] = decode_int
decode_func[CHR_INT1 ] = decode_intb
decode_func[CHR_INT2 ] = decode_inth
decode_func[CHR_INT4 ] = decode_intl
decode_func[CHR_INT8 ] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE ] = decode_true
decode_func[CHR_FALSE ] = decode_false
decode_func[CHR_NONE ] = decode_none
def make_fixed_length_string_decoders(): def make_fixed_length_string_decoders():
def make_decoder(slen): def make_decoder(slen):
def f(x, f): def f(x, f):
s = x[f+1:f+1+slen] s = x[f + 1:f + 1 + slen]
try: try:
t = s.decode("utf8") t = s.decode("utf8")
if len(t) != len(s): if len(t) != len(s):
s = t s = t
except UnicodeDecodeError: except UnicodeDecodeError:
pass pass
return (s, f+1+slen) return s, f + 1 + slen
return f return f
for i in range(STR_FIXED_COUNT): for i in range(STR_FIXED_COUNT):
decode_func[chr(STR_FIXED_START+i)] = make_decoder(i) decode_func[chr(STR_FIXED_START + i)] = make_decoder(i)
make_fixed_length_string_decoders() make_fixed_length_string_decoders()
def make_fixed_length_list_decoders(): def make_fixed_length_list_decoders():
def make_decoder(slen): def make_decoder(slen):
def f(x, f): def f(x, f):
r, f = [], f+1 r, f = [], f + 1
for i in range(slen): for i in range(slen):
v, f = decode_func[x[f]](x, f) v, f = decode_func[x[f]](x, f)
r.append(v) r.append(v)
return (tuple(r), f) return tuple(r), f
return f return f
for i in range(LIST_FIXED_COUNT): for i in range(LIST_FIXED_COUNT):
decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i) decode_func[chr(LIST_FIXED_START + i)] = make_decoder(i)
make_fixed_length_list_decoders() make_fixed_length_list_decoders()
def make_fixed_length_int_decoders(): def make_fixed_length_int_decoders():
def make_decoder(j): def make_decoder(j):
def f(x, f): def f(x, f):
return (j, f+1) return j, f + 1
return f return f
for i in range(INT_POS_FIXED_COUNT): for i in range(INT_POS_FIXED_COUNT):
decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i) decode_func[chr(INT_POS_FIXED_START + i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT): for i in range(INT_NEG_FIXED_COUNT):
decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i) decode_func[chr(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i)
make_fixed_length_int_decoders() make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders(): def make_fixed_length_dict_decoders():
def make_decoder(slen): def make_decoder(slen):
def f(x, f): def f(x, f):
r, f = {}, f+1 r, f = {}, f + 1
for j in range(slen): for j in range(slen):
k, f = decode_func[x[f]](x, f) k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f) r[k], f = decode_func[x[f]](x, f)
return (r, f) return r, f
return f return f
for i in range(DICT_FIXED_COUNT): for i in range(DICT_FIXED_COUNT):
decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i) decode_func[chr(DICT_FIXED_START + i)] = make_decoder(i)
make_fixed_length_dict_decoders() make_fixed_length_dict_decoders()
def encode_dict(x,r):
def encode_dict(x, r):
r.append(CHR_DICT) r.append(CHR_DICT)
for k, v in x.items(): for k, v in x.items():
encode_func[type(k)](k, r) encode_func[type(k)](k, r)
@ -288,13 +324,15 @@ def loads(x):
raise ValueError raise ValueError
return r return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType
def encode_int(x, r): def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT: if 0 <= x < INT_POS_FIXED_COUNT:
r.append(chr(INT_POS_FIXED_START+x)) r.append(chr(INT_POS_FIXED_START + x))
elif -INT_NEG_FIXED_COUNT <= x < 0: elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(chr(INT_NEG_FIXED_START-1-x)) r.append(chr(INT_NEG_FIXED_START - 1 - x))
elif -128 <= x < 128: elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x))) r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768: elif -32768 <= x < 32768:
@ -309,27 +347,34 @@ def encode_int(x, r):
raise ValueError('overflow') raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM)) r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r): def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x))) r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r): def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x))) r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r): def encode_bool(x, r):
r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)]) r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r): def encode_none(x, r):
r.extend(CHR_NONE) r.extend(CHR_NONE)
def encode_string(x, r): def encode_string(x, r):
if len(x) < STR_FIXED_COUNT: if len(x) < STR_FIXED_COUNT:
r.extend((chr(STR_FIXED_START + len(x)), x)) r.extend((chr(STR_FIXED_START + len(x)), x))
else: else:
r.extend((str(len(x)), ':', x)) r.extend((str(len(x)), ':', x))
def encode_unicode(x, r): def encode_unicode(x, r):
encode_string(x.encode("utf8"), r) encode_string(x.encode("utf8"), r)
def encode_list(x, r): def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT: if len(x) < LIST_FIXED_COUNT:
r.append(chr(LIST_FIXED_START + len(x))) r.append(chr(LIST_FIXED_START + len(x)))
@ -341,7 +386,8 @@ def encode_list(x, r):
encode_func[type(i)](i, r) encode_func[type(i)](i, r)
r.append(CHR_TERM) r.append(CHR_TERM)
def encode_dict(x,r):
def encode_dict(x, r):
if len(x) < DICT_FIXED_COUNT: if len(x) < DICT_FIXED_COUNT:
r.append(chr(DICT_FIXED_START + len(x))) r.append(chr(DICT_FIXED_START + len(x)))
for k, v in x.items(): for k, v in x.items():
@ -354,24 +400,28 @@ def encode_dict(x,r):
encode_func[type(v)](v, r) encode_func[type(v)](v, r)
r.append(CHR_TERM) r.append(CHR_TERM)
encode_func = {}
encode_func[IntType] = encode_int encode_func = {
encode_func[LongType] = encode_int IntType: encode_int,
encode_func[StringType] = encode_string LongType: encode_int,
encode_func[ListType] = encode_list StringType: encode_string,
encode_func[TupleType] = encode_list ListType: encode_list,
encode_func[DictType] = encode_dict TupleType: encode_list,
encode_func[NoneType] = encode_none DictType: encode_dict,
encode_func[UnicodeType] = encode_unicode NoneType: encode_none,
UnicodeType: encode_unicode,
}
lock = Lock() lock = Lock()
try: try:
from types import BooleanType from types import BooleanType
encode_func[BooleanType] = encode_bool encode_func[BooleanType] = encode_bool
except ImportError: except ImportError:
pass pass
def dumps(x, float_bits=DEFAULT_FLOAT_BITS): def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
""" """
Dump data structure to str. Dump data structure to str.
@ -385,48 +435,53 @@ def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
elif float_bits == 64: elif float_bits == 64:
encode_func[FloatType] = encode_float64 encode_func[FloatType] = encode_float64
else: else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits) raise ValueError('Float bits ({0:d}) is not 32 or 64'.format(float_bits))
r = [] r = []
encode_func[type(x)](x, r) encode_func[type(x)](x, r)
finally: finally:
lock.release() lock.release()
return ''.join(r) return ''.join(r)
def test(): def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0] f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0] f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0] f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),) L = (({'a': 15, 'bb': f1, 'ccc': f2, '': (f3, (), False, True, '')}, ('a', 10 ** 20), tuple(range(-100000, 100000)),
'b' * 31, 'b' * 62, 'b' * 64, 2 ** 30, 2 ** 33, 2 ** 62, 2 ** 64, 2 ** 30, 2 ** 33, 2 ** 62, 2 ** 64, False,
False, True, -1, 2, 0),)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
d = dict(zip(range(-100000,100000),range(-100000,100000))) d = dict(zip(range(-100000, 100000), range(-100000, 100000)))
d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False}) d.update({'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False})
L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''}) L = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: ''})
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000) L = ('', 'a' * 10, 'a' * 100, 'a' * 1000, 'a' * 10000, 'a' * 100000, 'a' * 1000000, 'a' * 10000000)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',) L = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',) L = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = tuple([tuple(range(n)) for n in range(100)]) + ('b',) L = tuple([tuple(range(n)) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + ('b',) L = tuple(['a' * n for n in range(1000)]) + ('b',)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + (None,True,None) L = tuple(['a' * n for n in range(1000)]) + (None, True, None)
assert loads(dumps(L)) == L assert loads(dumps(L)) == L
assert loads(dumps(None)) == None assert loads(dumps(None)) is None
assert loads(dumps({None:None})) == {None:None} assert loads(dumps({None: None})) == {None: None}
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6 assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6 assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6
assert abs(loads(dumps(1.1,64))-1.1)<1e-12 assert abs(loads(dumps(1.1, 64)) - 1.1) < 1e-12
assert loads(dumps(u"Hello World!!")) assert loads(dumps(u"Hello World!!"))
try: try:
import psyco import psyco
psyco.bind(dumps) psyco.bind(dumps)
psyco.bind(loads) psyco.bind(loads)
except ImportError: except ImportError:
pass pass
if __name__ == '__main__': if __name__ == '__main__':
test() test()

View file

@ -1,3 +1,4 @@
# coding=utf-8
import zlib import zlib
import struct import struct
import socket import socket
@ -5,9 +6,9 @@ import ssl
from core.synchronousdeluge import rencode from core.synchronousdeluge import rencode
__all__ = ["DelugeTransfer"] __all__ = ["DelugeTransfer"]
class DelugeTransfer(object): class DelugeTransfer(object):
def __init__(self): def __init__(self):
self.sock = None self.sock = None
@ -53,5 +54,3 @@ class DelugeTransfer(object):
buf = dobj.unused_data buf = dobj.unused_data
yield message yield message

View file

@ -1 +1,2 @@
# coding=utf-8
__author__ = 'Justin' __author__ = 'Justin'

View file

@ -1,15 +1,18 @@
# coding=utf-8
from six import iteritems
import errno import errno
import os import os
import platform import platform
import subprocess import subprocess
import urllib2
import traceback
import core import core
import json import json
import shutil import shutil
import re import re
from core import logger from core import logger
from core.nzbToMediaUtil import makeDir from core.nzbToMediaUtil import makeDir
from babelfish import Language
def isVideoGood(videofile, status): def isVideoGood(videofile, status):
fileNameExt = os.path.basename(videofile) fileNameExt = os.path.basename(videofile)
@ -19,7 +22,7 @@ def isVideoGood(videofile, status):
disable = True disable = True
else: else:
test_details, res = getVideoDetails(core.TEST_FILE) test_details, res = getVideoDetails(core.TEST_FILE)
if res !=0 or test_details.get("error"): if res != 0 or test_details.get("error"):
disable = True disable = True
logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER') logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER')
if test_details.get("streams"): if test_details.get("streams"):
@ -27,41 +30,46 @@ def isVideoGood(videofile, status):
audStreams = [item for item in test_details["streams"] if item["codec_type"] == "audio"] audStreams = [item for item in test_details["streams"] if item["codec_type"] == "audio"]
if not (len(vidStreams) > 0 and len(audStreams) > 0): if not (len(vidStreams) > 0 and len(audStreams) > 0):
disable = True disable = True
logger.info("DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.", 'TRANSCODER') logger.info("DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.",
'TRANSCODER')
if disable: if disable:
if status: # if the download was "failed", assume bad. If it was successful, assume good. if status: # if the download was "failed", assume bad. If it was successful, assume good.
return False return False
else: else:
return True return True
logger.info('Checking [%s] for corruption, please stand by ...' % (fileNameExt), 'TRANSCODER') logger.info('Checking [{0}] for corruption, please stand by ...'.format(fileNameExt), 'TRANSCODER')
video_details, result = getVideoDetails(videofile) video_details, result = getVideoDetails(videofile)
if result != 0: if result != 0:
logger.error("FAILED: [%s] is corrupted!" % (fileNameExt), 'TRANSCODER') logger.error("FAILED: [{0}] is corrupted!".format(fileNameExt), 'TRANSCODER')
return False return False
if video_details.get("error"): if video_details.get("error"):
logger.info("FAILED: [%s] returned error [%s]." % (fileNameExt, str(video_details.get("error"))), 'TRANSCODER') logger.info("FAILED: [{0}] returned error [{1}].".format(fileNameExt, video_details.get("error")), 'TRANSCODER')
return False return False
if video_details.get("streams"): if video_details.get("streams"):
videoStreams = [item for item in video_details["streams"] if item["codec_type"] == "video"] videoStreams = [item for item in video_details["streams"] if item["codec_type"] == "video"]
audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"]
if len(videoStreams) > 0 and len(audioStreams) > 0: if len(videoStreams) > 0 and len(audioStreams) > 0:
logger.info("SUCCESS: [%s] has no corruption." % (fileNameExt), 'TRANSCODER') logger.info("SUCCESS: [{0}] has no corruption.".format(fileNameExt), 'TRANSCODER')
return True return True
else: else:
logger.info("FAILED: [%s] has %s video streams and %s audio streams. Assume corruption." % (fileNameExt, str(len(videoStreams)), str(len(audioStreams))), 'TRANSCODER') logger.info("FAILED: [{0}] has {1} video streams and {2} audio streams. "
"Assume corruption.".format
(fileNameExt, len(videoStreams), len(audioStreams)), 'TRANSCODER')
return False return False
def zip_out(file, img, bitbucket): def zip_out(file, img, bitbucket):
procin = None procin = None
cmd = [core.SEVENZIP, '-so', 'e', img, file] cmd = [core.SEVENZIP, '-so', 'e', img, file]
try: try:
procin = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) procin = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
except: except:
logger.error("Extracting [%s] has failed" % (file), 'TRANSCODER') logger.error("Extracting [{0}] has failed".format(file), 'TRANSCODER')
return procin return procin
def getVideoDetails(videofile, img=None, bitbucket=None): def getVideoDetails(videofile, img=None, bitbucket=None):
video_details = {} video_details = {}
result = 1 result = 1
@ -75,7 +83,8 @@ def getVideoDetails(videofile, img=None, bitbucket=None):
try: try:
if img: if img:
videofile = '-' videofile = '-'
command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', videofile] command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error',
videofile]
print_cmd(command) print_cmd(command)
if img: if img:
procin = zip_out(file, img, bitbucket) procin = zip_out(file, img, bitbucket)
@ -86,7 +95,8 @@ def getVideoDetails(videofile, img=None, bitbucket=None):
out, err = proc.communicate() out, err = proc.communicate()
result = proc.returncode result = proc.returncode
video_details = json.loads(out) video_details = json.loads(out)
except: pass except:
pass
if not video_details: if not video_details:
try: try:
command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile]
@ -100,9 +110,10 @@ def getVideoDetails(videofile, img=None, bitbucket=None):
result = proc.returncode result = proc.returncode
video_details = json.loads(out) video_details = json.loads(out)
except: except:
logger.error("Checking [%s] has failed" % (file), 'TRANSCODER') logger.error("Checking [{0}] has failed".format(file), 'TRANSCODER')
return video_details, result return video_details, result
def buildCommands(file, newDir, movieName, bitbucket): def buildCommands(file, newDir, movieName, bitbucket):
if isinstance(file, str): if isinstance(file, str):
inputFile = file inputFile = file
@ -115,13 +126,13 @@ def buildCommands(file, newDir, movieName, bitbucket):
if check and core.CONCAT: if check and core.CONCAT:
name = movieName name = movieName
elif check: elif check:
name = ('%s.cd%s' % (movieName, check.groups()[0])) name = ('{0}.cd{1}'.format(movieName, check.groups()[0]))
elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name):
name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name)
if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself.
core.VEXTENSION = '-transcoded' + core.VEXTENSION # adds '-transcoded.ext' core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext'
else: else:
img, data = file.iteritems().next() img, data = iteritems(file).next()
name = data['name'] name = data['name']
video_details, result = getVideoDetails(data['files'][0], img, bitbucket) video_details, result = getVideoDetails(data['files'][0], img, bitbucket)
inputFile = '-' inputFile = '-'
@ -133,12 +144,12 @@ def buildCommands(file, newDir, movieName, bitbucket):
video_cmd = [] video_cmd = []
audio_cmd = [] audio_cmd = []
audio_cmd2 = [] audio_cmd2 = []
audio_cmd3 = []
sub_cmd = [] sub_cmd = []
meta_cmd = [] meta_cmd = []
other_cmd = [] other_cmd = []
if not video_details or not video_details.get("streams"): # we couldn't read streams with ffprobe. Set defaults to try transcoding. if not video_details or not video_details.get(
"streams"): # we couldn't read streams with ffprobe. Set defaults to try transcoding.
videoStreams = [] videoStreams = []
audioStreams = [] audioStreams = []
subStreams = [] subStreams = []
@ -155,7 +166,7 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.VBITRATE: if core.VBITRATE:
video_cmd.extend(['-b:v', str(core.VBITRATE)]) video_cmd.extend(['-b:v', str(core.VBITRATE)])
if core.VRESOLUTION: if core.VRESOLUTION:
video_cmd.extend(['-vf', 'scale=' + core.VRESOLUTION]) video_cmd.extend(['-vf', 'scale={vres}'.format(vres=core.VRESOLUTION)])
if core.VPRESET: if core.VPRESET:
video_cmd.extend(['-preset', core.VPRESET]) video_cmd.extend(['-preset', core.VPRESET])
if core.VCRF: if core.VCRF:
@ -165,12 +176,13 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.ACODEC: if core.ACODEC:
audio_cmd.extend(['-c:a', core.ACODEC]) audio_cmd.extend(['-c:a', core.ACODEC])
if core.ACODEC in ['aac', 'dts']: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg if core.ACODEC in ['aac',
'dts']: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg
audio_cmd.extend(['-strict', '-2']) audio_cmd.extend(['-strict', '-2'])
else: else:
audio_cmd.extend(['-c:a', 'copy']) audio_cmd.extend(['-c:a', 'copy'])
if core.ACHANNELS: if core.ACHANNELS:
audio_cmd.extend(['-ac', str(core.ACHANNELS)]) audio_cmd.extend(['-ac', str(core.ACHANNELS)])
if core.ABITRATE: if core.ABITRATE:
audio_cmd.extend(['-b:a', str(core.ABITRATE)]) audio_cmd.extend(['-b:a', str(core.ABITRATE)])
if core.OUTPUTQUALITYPERCENT: if core.OUTPUTQUALITYPERCENT:
@ -182,7 +194,7 @@ def buildCommands(file, newDir, movieName, bitbucket):
sub_cmd.extend(['-c:s', 'copy']) sub_cmd.extend(['-c:s', 'copy'])
else: # http://en.wikibooks.org/wiki/FFMPEG_An_Intermediate_Guide/subtitle_options else: # http://en.wikibooks.org/wiki/FFMPEG_An_Intermediate_Guide/subtitle_options
sub_cmd.extend(['-sn']) # Don't copy the subtitles over sub_cmd.extend(['-sn']) # Don't copy the subtitles over
if core.OUTPUTFASTSTART: if core.OUTPUTFASTSTART:
other_cmd.extend(['-movflags', '+faststart']) other_cmd.extend(['-movflags', '+faststart'])
@ -191,23 +203,16 @@ def buildCommands(file, newDir, movieName, bitbucket):
audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"]
subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle"] subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle"]
if core.VEXTENSION not in ['.mkv', '.mpegts']: if core.VEXTENSION not in ['.mkv', '.mpegts']:
subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] subStreams = [item for item in video_details["streams"] if
item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[
"codec_name"] != "pgssub"]
for video in videoStreams: for video in videoStreams:
codec = video["codec_name"] codec = video["codec_name"]
try: fr = video.get("avg_frame_rate", 0)
fr = video["avg_frame_rate"] width = video.get("width", 0)
except: fr = 0 height = video.get("height", 0)
try:
width = video["width"]
except: width = 0
try:
height = video["height"]
except: height = 0
scale = core.VRESOLUTION scale = core.VRESOLUTION
try:
framerate = float(fr.split('/')[0])/float(fr.split('/')[1])
except: framerate = 0
if codec in core.VCODEC_ALLOW or not core.VCODEC: if codec in core.VCODEC_ALLOW or not core.VCODEC:
video_cmd.extend(['-c:v', 'copy']) video_cmd.extend(['-c:v', 'copy'])
else: else:
@ -215,16 +220,22 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.VFRAMERATE and not (core.VFRAMERATE * 0.999 <= fr <= core.VFRAMERATE * 1.001): if core.VFRAMERATE and not (core.VFRAMERATE * 0.999 <= fr <= core.VFRAMERATE * 1.001):
video_cmd.extend(['-r', str(core.VFRAMERATE)]) video_cmd.extend(['-r', str(core.VFRAMERATE)])
if scale: if scale:
w_scale = width/float(scale.split(':')[0]) w_scale = width / float(scale.split(':')[0])
h_scale = height/float(scale.split(':')[1]) h_scale = height / float(scale.split(':')[1])
if w_scale > h_scale: # widescreen, Scale by width only. if w_scale > h_scale: # widescreen, Scale by width only.
scale = scale.split(':')[0] + ":" + str(int((height/w_scale)/2)*2) scale = "{width}:{height}".format(
if w_scale > 1: width=scale.split(':')[0],
video_cmd.extend(['-vf', 'scale=' + scale]) height=int((height / w_scale) / 2) * 2,
else: # lower or mathcing ratio, scale by height only. )
scale = str(int((width/h_scale)/2)*2) + ":" + scale.split(':')[1] if w_scale > 1:
if h_scale > 1: video_cmd.extend(['-vf', 'scale={width}'.format(width=scale)])
video_cmd.extend(['-vf', 'scale=' + scale]) else: # lower or matching ratio, scale by height only.
scale = "{width}:{height}".format(
width=int((width / h_scale) / 2) * 2,
height=scale.split(':')[1],
)
if h_scale > 1:
video_cmd.extend(['-vf', 'scale={height}'.format(height=scale)])
if core.VBITRATE: if core.VBITRATE:
video_cmd.extend(['-b:v', str(core.VBITRATE)]) video_cmd.extend(['-b:v', str(core.VBITRATE)])
if core.VPRESET: if core.VPRESET:
@ -237,70 +248,55 @@ def buildCommands(file, newDir, movieName, bitbucket):
if video_cmd[1] == 'copy' and any(i in video_cmd for i in no_copy): if video_cmd[1] == 'copy' and any(i in video_cmd for i in no_copy):
video_cmd[1] = core.VCODEC video_cmd[1] = core.VCODEC
if core.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding. if core.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding.
video_cmd = ['-c:v', 'copy'] video_cmd = ['-c:v', 'copy']
map_cmd.extend(['-map', '0:' + str(video["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=video["index"])])
break # Only one video needed break # Only one video needed
used_audio = 0 used_audio = 0
a_mapped = [] a_mapped = []
if audioStreams: if audioStreams:
try: try:
audio1 = [ item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE ] audio1 = [item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE]
except: # no language tags. Assume only 1 language. except: # no language tags. Assume only 1 language.
audio1 = audioStreams audio1 = audioStreams
audio2 = [ item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW ]
try: try:
audio3 = [ item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE ] audio2 = [item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW]
except:
audio2 = []
try:
audio3 = [item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE]
except: except:
audio3 = [] audio3 = []
if audio2: # right language and codec... if audio2: # right language and codec...
map_cmd.extend(['-map', '0:' + str(audio2[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio2[0]["index"])])
a_mapped.extend([audio2[0]["index"]]) a_mapped.extend([audio2[0]["index"]])
try: bitrate = int(float(audio2[0].get("bit_rate", 0))) / 1000
bitrate = int(audio2[0]["bit_rate"])/1000 channels = int(float(audio2[0].get("channels", 0)))
except: bitrate = 0 audio_cmd.extend(['-c:a:{0}'.format(used_audio), 'copy'])
try:
channels = int(audio2[0]["channels"])
except: channels = 0
audio_cmd.extend(['-c:a:' + str(used_audio), 'copy'])
elif audio1: # right language wrong codec. elif audio1: # right language wrong codec.
map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])])
a_mapped.extend([audio1[0]["index"]]) a_mapped.extend([audio1[0]["index"]])
try: bitrate = int(float(audio1[0].get("bit_rate", 0))) / 1000
bitrate = int(audio1[0]["bit_rate"])/1000 channels = int(float(audio1[0].get("channels", 0)))
except: bitrate = 0 audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy'])
try:
channels = int(audio1[0]["channels"])
except: channels = 0
if core.ACODEC:
audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC])
else:
audio_cmd.extend(['-c:a:' + str(used_audio), 'copy'])
elif audio3: # just pick the default audio track elif audio3: # just pick the default audio track
map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])])
a_mapped.extend([audio3[0]["index"]]) a_mapped.extend([audio3[0]["index"]])
try: bitrate = int(float(audio3[0].get("bit_rate", 0))) / 1000
bitrate = int(audio3[0]["bit_rate"])/1000 channels = int(float(audio3[0].get("channels", 0)))
except: bitrate = 0 audio_cmd.extend(['-c:a:{0}'.format(used_audio), core.ACODEC if core.ACODEC else 'copy'])
try:
channels = int(audio3[0]["channels"])
except: channels = 0
if core.ACODEC:
audio_cmd.extend(['-c:a:' + str(used_audio), core.ACODEC])
else:
audio_cmd.extend(['-c:a:' + str(used_audio), 'copy'])
if core.ACHANNELS and channels and channels > core.ACHANNELS: if core.ACHANNELS and channels and channels > core.ACHANNELS:
audio_cmd.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS)]) audio_cmd.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS)])
if audio_cmd[1] == 'copy': if audio_cmd[1] == 'copy':
audio_cmd[1] = core.ACODEC audio_cmd[1] = core.ACODEC
if core.ABITRATE and not (core.ABITRATE * 0.9 < bitrate < core.ABITRATE * 1.1): if core.ABITRATE and not (core.ABITRATE * 0.9 < bitrate < core.ABITRATE * 1.1):
audio_cmd.extend(['-b:a:' + str(used_audio), str(core.ABITRATE)]) audio_cmd.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE)])
if audio_cmd[1] == 'copy': if audio_cmd[1] == 'copy':
audio_cmd[1] = core.ACODEC audio_cmd[1] = core.ACODEC
if core.OUTPUTQUALITYPERCENT: if core.OUTPUTQUALITYPERCENT:
audio_cmd.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) audio_cmd.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)])
if audio_cmd[1] == 'copy': if audio_cmd[1] == 'copy':
audio_cmd[1] = core.ACODEC audio_cmd[1] = core.ACODEC
if audio_cmd[1] in ['aac', 'dts']: if audio_cmd[1] in ['aac', 'dts']:
@ -308,54 +304,45 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.ACODEC2_ALLOW: if core.ACODEC2_ALLOW:
used_audio += 1 used_audio += 1
audio4 = [ item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW ] try:
audio4 = [item for item in audio1 if item["codec_name"] in core.ACODEC2_ALLOW]
except:
audio4 = []
if audio4: # right language and codec. if audio4: # right language and codec.
map_cmd.extend(['-map', '0:' + str(audio4[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]["index"])])
a_mapped.extend([audio4[0]["index"]]) a_mapped.extend([audio4[0]["index"]])
try: bitrate = int(float(audio4[0].get("bit_rate", 0))) / 1000
bitrate = int(audio4[0]["bit_rate"])/1000 channels = int(float(audio4[0].get("channels", 0)))
except: bitrate = 0 audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy'])
try:
channels = int(audio4[0]["channels"])
except: channels = 0
audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy'])
elif audio1: # right language wrong codec. elif audio1: # right language wrong codec.
map_cmd.extend(['-map', '0:' + str(audio1[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]["index"])])
a_mapped.extend([audio1[0]["index"]]) a_mapped.extend([audio1[0]["index"]])
try: bitrate = int(float(audio1[0].get("bit_rate", 0))) / 1000
bitrate = int(audio1[0]["bit_rate"])/1000 channels = int(float(audio1[0].get("channels", 0)))
except: bitrate = 0
try:
channels = int(audio1[0]["channels"])
except: channels = 0
if core.ACODEC2: if core.ACODEC2:
audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2])
else: else:
audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy'])
elif audio3: # just pick the default audio track elif audio3: # just pick the default audio track
map_cmd.extend(['-map', '0:' + str(audio3[0]["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]["index"])])
a_mapped.extend([audio3[0]["index"]]) a_mapped.extend([audio3[0]["index"]])
try: bitrate = int(float(audio3[0].get("bit_rate", 0))) / 1000
bitrate = int(audio3[0]["bit_rate"])/1000 channels = int(float(audio3[0].get("channels", 0)))
except: bitrate = 0
try:
channels = int(audio3[0]["channels"])
except: channels = 0
if core.ACODEC2: if core.ACODEC2:
audio_cmd2.extend(['-c:a:' + str(used_audio), core.ACODEC2]) audio_cmd2.extend(['-c:a:{0}'.format(used_audio), core.ACODEC2])
else: else:
audio_cmd2.extend(['-c:a:' + str(used_audio), 'copy']) audio_cmd2.extend(['-c:a:{0}'.format(used_audio), 'copy'])
if core.ACHANNELS2 and channels and channels > core.ACHANNELS2: if core.ACHANNELS2 and channels and channels > core.ACHANNELS2:
audio_cmd2.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS2)]) audio_cmd2.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS2)])
if audio_cmd2[1] == 'copy': if audio_cmd2[1] == 'copy':
audio_cmd2[1] = core.ACODEC2 audio_cmd2[1] = core.ACODEC2
if core.ABITRATE2 and not (core.ABITRATE2 * 0.9 < bitrate < core.ABITRATE2 * 1.1): if core.ABITRATE2 and not (core.ABITRATE2 * 0.9 < bitrate < core.ABITRATE2 * 1.1):
audio_cmd2.extend(['-b:a:' + str(used_audio), str(core.ABITRATE2)]) audio_cmd2.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE2)])
if audio_cmd2[1] == 'copy': if audio_cmd2[1] == 'copy':
audio_cmd2[1] = core.ACODEC2 audio_cmd2[1] = core.ACODEC2
if core.OUTPUTQUALITYPERCENT: if core.OUTPUTQUALITYPERCENT:
audio_cmd2.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) audio_cmd2.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)])
if audio_cmd2[1] == 'copy': if audio_cmd2[1] == 'copy':
audio_cmd2[1] = core.ACODEC2 audio_cmd2[1] = core.ACODEC2
if audio_cmd2[1] in ['aac', 'dts']: if audio_cmd2[1] in ['aac', 'dts']:
@ -367,32 +354,28 @@ def buildCommands(file, newDir, movieName, bitbucket):
if audio["index"] in a_mapped: if audio["index"] in a_mapped:
continue continue
used_audio += 1 used_audio += 1
map_cmd.extend(['-map', '0:' + str(audio["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=audio["index"])])
audio_cmd3 = [] audio_cmd3 = []
try: bitrate = int(float(audio.get("bit_rate", 0))) / 1000
bitrate = int(audio["bit_rate"])/1000 channels = int(float(audio.get("channels", 0)))
except: bitrate = 0
try:
channels = int(audio["channels"])
except: channels = 0
if audio["codec_name"] in core.ACODEC3_ALLOW: if audio["codec_name"] in core.ACODEC3_ALLOW:
audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) audio_cmd3.extend(['-c:a:{0}'.format(used_audio), 'copy'])
else: else:
if core.ACODEC3: if core.ACODEC3:
audio_cmd3.extend(['-c:a:' + str(used_audio), core.ACODEC3]) audio_cmd3.extend(['-c:a:{0}'.format(used_audio), core.ACODEC3])
else: else:
audio_cmd3.extend(['-c:a:' + str(used_audio), 'copy']) audio_cmd3.extend(['-c:a:{0}'.format(used_audio), 'copy'])
if core.ACHANNELS3 and channels and channels > core.ACHANNELS3: if core.ACHANNELS3 and channels and channels > core.ACHANNELS3:
audio_cmd3.extend(['-ac:a:' + str(used_audio), str(core.ACHANNELS3)]) audio_cmd3.extend(['-ac:a:{0}'.format(used_audio), str(core.ACHANNELS3)])
if audio_cmd3[1] == 'copy': if audio_cmd3[1] == 'copy':
audio_cmd3[1] = core.ACODEC3 audio_cmd3[1] = core.ACODEC3
if core.ABITRATE3 and not (core.ABITRATE3 * 0.9 < bitrate < core.ABITRATE3 * 1.1): if core.ABITRATE3 and not (core.ABITRATE3 * 0.9 < bitrate < core.ABITRATE3 * 1.1):
audio_cmd3.extend(['-b:a:' + str(used_audio), str(core.ABITRATE3)]) audio_cmd3.extend(['-b:a:{0}'.format(used_audio), str(core.ABITRATE3)])
if audio_cmd3[1] == 'copy': if audio_cmd3[1] == 'copy':
audio_cmd3[1] = core.ACODEC3 audio_cmd3[1] = core.ACODEC3
if core.OUTPUTQUALITYPERCENT > 0: if core.OUTPUTQUALITYPERCENT > 0:
audio_cmd3.extend(['-q:a:' + str(used_audio), str(core.OUTPUTQUALITYPERCENT)]) audio_cmd3.extend(['-q:a:{0}'.format(used_audio), str(core.OUTPUTQUALITYPERCENT)])
if audio_cmd3[1] == 'copy': if audio_cmd3[1] == 'copy':
audio_cmd3[1] = core.ACODEC3 audio_cmd3[1] = core.ACODEC3
if audio_cmd3[1] in ['aac', 'dts']: if audio_cmd3[1] in ['aac', 'dts']:
@ -400,17 +383,17 @@ def buildCommands(file, newDir, movieName, bitbucket):
audio_cmd.extend(audio_cmd3) audio_cmd.extend(audio_cmd3)
s_mapped = [] s_mapped = []
subs1 = []
burnt = 0 burnt = 0
n = 0 n = 0
for lan in core.SLANGUAGES: for lan in core.SLANGUAGES:
try: try:
subs1 = [ item for item in subStreams if item["tags"]["language"] == lan ] subs1 = [item for item in subStreams if item["tags"]["language"] == lan]
except: subs1 = [] except:
subs1 = []
if core.BURN and not subs1 and not burnt and os.path.isfile(file): if core.BURN and not subs1 and not burnt and os.path.isfile(file):
for subfile in get_subs(file): for subfile in get_subs(file):
if lan in os.path.split(subfile)[1]: if lan in os.path.split(subfile)[1]:
video_cmd.extend(['-vf', 'subtitles=' + subfile]) video_cmd.extend(['-vf', 'subtitles={subs}'.format(subs=subfile)])
burnt = 1 burnt = 1
for sub in subs1: for sub in subs1:
if core.BURN and not burnt and os.path.isfile(inputFile): if core.BURN and not burnt and os.path.isfile(inputFile):
@ -419,21 +402,21 @@ def buildCommands(file, newDir, movieName, bitbucket):
if subStreams[index]["index"] == sub["index"]: if subStreams[index]["index"] == sub["index"]:
subloc = index subloc = index
break break
video_cmd.extend(['-vf', 'subtitles=' + inputFile + ':si=' + str(subloc)]) video_cmd.extend(['-vf', 'subtitles={sub}:si={loc}'.format(sub=inputFile, loc=subloc)])
burnt = 1 burnt = 1
if not core.ALLOWSUBS: if not core.ALLOWSUBS:
break break
map_cmd.extend(['-map', '0:' + str(sub["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=sub["index"])])
s_mapped.extend([sub["index"]]) s_mapped.extend([sub["index"]])
if core.SINCLUDE: if core.SINCLUDE:
for sub in subStreams: for sub in subStreams:
if not core.ALLOWSUBS: if not core.ALLOWSUBS:
break break
if sub["index"] in s_mapped: if sub["index"] in s_mapped:
continue continue
map_cmd.extend(['-map', '0:' + str(sub["index"])]) map_cmd.extend(['-map', '0:{index}'.format(index=sub["index"])])
s_mapped.extend([sub["index"]]) s_mapped.extend([sub["index"]])
if core.OUTPUTFASTSTART: if core.OUTPUTFASTSTART:
other_cmd.extend(['-movflags', '+faststart']) other_cmd.extend(['-movflags', '+faststart'])
@ -445,22 +428,31 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.GENERALOPTS: if core.GENERALOPTS:
command.extend(core.GENERALOPTS) command.extend(core.GENERALOPTS)
command.extend([ '-i', inputFile]) command.extend(['-i', inputFile])
if core.SEMBED and os.path.isfile(file): if core.SEMBED and os.path.isfile(file):
for subfile in get_subs(file): for subfile in get_subs(file):
sub_details, result = getVideoDetails(subfile) sub_details, result = getVideoDetails(subfile)
if not sub_details or not sub_details.get("streams"): if not sub_details or not sub_details.get("streams"):
continue continue
lan = os.path.splitext(os.path.splitext(subfile)[0])[1]
command.extend(['-i', subfile]) command.extend(['-i', subfile])
meta_cmd.extend(['-metadata:s:s:' + str(len(s_mapped) + n), 'language=' + lan[1:]]) lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split('-')[0]
metlan = None
try:
if len(lan) == 3:
metlan = Language(lan)
if len(lan) == 2:
metlan = Language.fromalpha2(lan)
except: pass
if metlan:
meta_cmd.extend(['-metadata:s:s:{x}'.format(x=len(s_mapped) + n),
'language={lang}'.format(lang=metlan.alpha3)])
n += 1 n += 1
map_cmd.extend(['-map', str(n) + ':0']) map_cmd.extend(['-map', '{x}:0'.format(x=n)])
if not core.ALLOWSUBS or (not s_mapped and not n): if not core.ALLOWSUBS or (not s_mapped and not n):
sub_cmd.extend(['-sn']) sub_cmd.extend(['-sn'])
else: else:
if core.SCODEC: if core.SCODEC:
sub_cmd.extend(['-c:s', core.SCODEC]) sub_cmd.extend(['-c:s', core.SCODEC])
else: else:
@ -477,6 +469,7 @@ def buildCommands(file, newDir, movieName, bitbucket):
command = core.NICENESS + command command = core.NICENESS + command
return command return command
def get_subs(file): def get_subs(file):
filepaths = [] filepaths = []
subExt = ['.srt', '.sub', '.idx'] subExt = ['.srt', '.sub', '.idx']
@ -485,9 +478,10 @@ def get_subs(file):
for dirname, dirs, filenames in os.walk(dir): for dirname, dirs, filenames in os.walk(dir):
for filename in filenames: for filename in filenames:
filepaths.extend([os.path.join(dirname, filename)]) filepaths.extend([os.path.join(dirname, filename)])
subfiles = [ item for item in filepaths if os.path.splitext(item)[1] in subExt and name in item ] subfiles = [item for item in filepaths if os.path.splitext(item)[1] in subExt and name in item]
return subfiles return subfiles
def extract_subs(file, newfilePath, bitbucket): def extract_subs(file, newfilePath, bitbucket):
video_details, result = getVideoDetails(file) video_details, result = getVideoDetails(file)
if not video_details: if not video_details:
@ -500,34 +494,36 @@ def extract_subs(file, newfilePath, bitbucket):
name = os.path.splitext(os.path.split(newfilePath)[1])[0] name = os.path.splitext(os.path.split(newfilePath)[1])[0]
try: try:
subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["tags"]["language"] in core.SLANGUAGES and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] subStreams = [item for item in video_details["streams"] if
item["codec_type"] == "subtitle" and item["tags"]["language"] in core.SLANGUAGES and item[
"codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"]
except: except:
subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] subStreams = [item for item in video_details["streams"] if
item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[
"codec_name"] != "pgssub"]
num = len(subStreams) num = len(subStreams)
for n in range(num): for n in range(num):
sub = subStreams[n] sub = subStreams[n]
idx = sub["index"] idx = sub["index"]
try: lan = sub.get("tags", {}).get("language", "unk")
lan = sub["tags"]["language"]
except:
lan = "unk"
if num == 1: if num == 1:
outputFile = os.path.join(subdir, "%s.srt" %(name)) outputFile = os.path.join(subdir, "{0}.srt".format(name))
if os.path.isfile(outputFile): if os.path.isfile(outputFile):
outputFile = os.path.join(subdir, "%s.%s.srt" %(name, n)) outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, n))
else: else:
outputFile = os.path.join(subdir, "%s.%s.srt" %(name, lan)) outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, lan))
if os.path.isfile(outputFile): if os.path.isfile(outputFile):
outputFile = os.path.join(subdir, "%s.%s.%s.srt" %(name, lan, n)) outputFile = os.path.join(subdir, "{0}.{1}.{2}.srt".format(name, lan, n))
command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', '-codec:' + str(idx), 'srt', outputFile] command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an',
'-codec:{index}'.format(index=idx), 'srt', outputFile]
if platform.system() != 'Windows': if platform.system() != 'Windows':
command = core.NICENESS + command command = core.NICENESS + command
logger.info("Extracting %s subtitle from: %s" % (lan, file)) logger.info("Extracting {0} subtitle from: {1}".format(lan, file))
print_cmd(command) print_cmd(command)
result = 1 # set result to failed in case call fails. result = 1 # set result to failed in case call fails.
try: try:
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket)
proc.communicate() proc.communicate()
@ -538,30 +534,30 @@ def extract_subs(file, newfilePath, bitbucket):
if result == 0: if result == 0:
try: try:
shutil.copymode(file, outputFile) shutil.copymode(file, outputFile)
except: pass except:
logger.info("Extracting %s subtitle from %s has succeeded" % (lan, file)) pass
logger.info("Extracting {0} subtitle from {1} has succeeded".format(lan, file))
else: else:
logger.error("Extracting subtitles has failed") logger.error("Extracting subtitles has failed")
def processList(List, newDir, bitbucket): def processList(List, newDir, bitbucket):
remList = [] remList = []
newList = [] newList = []
delList = []
combine = [] combine = []
vtsPath = None vtsPath = None
success = True success = True
for item in List: for item in List:
newfile = None
ext = os.path.splitext(item)[1].lower() ext = os.path.splitext(item)[1].lower()
if ext in ['.iso', '.bin', '.img'] and not ext in core.IGNOREEXTENSIONS: if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS:
logger.debug("Attempting to rip disk image: %s" % (item), "TRANSCODER") logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER")
newList.extend(ripISO(item, newDir, bitbucket)) newList.extend(ripISO(item, newDir, bitbucket))
remList.append(item) remList.append(item)
elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and not '.vob' in core.IGNOREEXTENSIONS: elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS:
logger.debug("Found VIDEO_TS image file: %s" % (item), "TRANSCODER") logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER")
if not vtsPath: if not vtsPath:
try: try:
vtsPath = re.match("(.+VIDEO_TS)",item).groups()[0] vtsPath = re.match("(.+VIDEO_TS)", item).groups()[0]
except: except:
vtsPath = os.path.split(item)[0] vtsPath = os.path.split(item)[0]
remList.append(item) remList.append(item)
@ -570,48 +566,50 @@ def processList(List, newDir, bitbucket):
elif core.CONCAT and re.match(".+[cC][dD][0-9].", item): elif core.CONCAT and re.match(".+[cC][dD][0-9].", item):
remList.append(item) remList.append(item)
combine.append(item) combine.append(item)
else: continue else:
continue
if vtsPath: if vtsPath:
newList.extend(combineVTS(vtsPath)) newList.extend(combineVTS(vtsPath))
if combine: if combine:
newList.extend(combineCD(combine)) newList.extend(combineCD(combine))
for file in newList: for file in newList:
if isinstance(file, str) and not 'concat:' in file and not os.path.isfile(file): if isinstance(file, str) and 'concat:' not in file and not os.path.isfile(file):
success = False success = False
break break
if success and newList: if success and newList:
List.extend(newList) List.extend(newList)
for item in remList: for item in remList:
List.remove(item) List.remove(item)
logger.debug("Successfully extracted .vob file %s from disk image" % (newList[0]), "TRANSCODER") logger.debug("Successfully extracted .vob file {0} from disk image".format(newList[0]), "TRANSCODER")
elif newList and not success: elif newList and not success:
newList = [] newList = []
remList = [] remList = []
logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER") logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER")
return List, remList, newList, success return List, remList, newList, success
def ripISO(item, newDir, bitbucket): def ripISO(item, newDir, bitbucket):
newFiles = [] newFiles = []
failure_dir = 'failure' failure_dir = 'failure'
# Mount the ISO in your OS and call combineVTS. # Mount the ISO in your OS and call combineVTS.
if not core.SEVENZIP: if not core.SEVENZIP:
logger.error("No 7zip installed. Can't extract image file %s" % (item), "TRANSCODER") logger.error("No 7zip installed. Can't extract image file {0}".format(item), "TRANSCODER")
newFiles = [failure_dir] newFiles = [failure_dir]
return newFiles return newFiles
cmd = [core.SEVENZIP, 'l', item] cmd = [core.SEVENZIP, 'l', item]
try: try:
logger.debug("Attempting to extract .vob from image file %s" % (item), "TRANSCODER") logger.debug("Attempting to extract .vob from image file {0}".format(item), "TRANSCODER")
print_cmd(cmd) print_cmd(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
out, err = proc.communicate() out, err = proc.communicate()
result = proc.returncode fileList = [re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in
fileList = [ re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line) ] out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line)]
combined = [] combined = []
for n in range(99): for n in range(99):
concat = [] concat = []
m = 1 m = 1
while True: while True:
vtsName = 'VIDEO_TS%sVTS_%02d_%d.VOB' % (os.sep, n+1, m) vtsName = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m)
if vtsName in fileList: if vtsName in fileList:
concat.append(vtsName) concat.append(vtsName)
m += 1 m += 1
@ -622,19 +620,22 @@ def ripISO(item, newDir, bitbucket):
if core.CONCAT: if core.CONCAT:
combined.extend(concat) combined.extend(concat)
continue continue
name = '%s.cd%s' % (os.path.splitext(os.path.split(item)[1])[0] ,str(n+1)) name = '{name}.cd{x}'.format(
newFiles.append({item: {'name': name , 'files': concat}}) name=os.path.splitext(os.path.split(item)[1])[0], x=n + 1
)
newFiles.append({item: {'name': name, 'files': concat}})
if core.CONCAT: if core.CONCAT:
name = os.path.splitext(os.path.split(item)[1])[0] name = os.path.splitext(os.path.split(item)[1])[0]
newFiles.append({item: {'name': name , 'files': combined}}) newFiles.append({item: {'name': name, 'files': combined}})
if not newFiles: if not newFiles:
logger.error("No VIDEO_TS folder found in image file %s" % (item), "TRANSCODER") logger.error("No VIDEO_TS folder found in image file {0}".format(item), "TRANSCODER")
newFiles = [failure_dir] newFiles = [failure_dir]
except: except:
logger.error("Failed to extract from image file %s" % (item), "TRANSCODER") logger.error("Failed to extract from image file {0}".format(item), "TRANSCODER")
newFiles = [failure_dir] newFiles = [failure_dir]
return newFiles return newFiles
def combineVTS(vtsPath): def combineVTS(vtsPath):
newFiles = [] newFiles = []
combined = '' combined = ''
@ -642,47 +643,51 @@ def combineVTS(vtsPath):
concat = '' concat = ''
m = 1 m = 1
while True: while True:
vtsName = 'VTS_%02d_%d.VOB' % (n+1, m) vtsName = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m)
if os.path.isfile(os.path.join(vtsPath, vtsName)): if os.path.isfile(os.path.join(vtsPath, vtsName)):
concat = concat + os.path.join(vtsPath, vtsName) + '|' concat += '{file}|'.format(file=os.path.join(vtsPath, vtsName))
m += 1 m += 1
else: else:
break break
if not concat: if not concat:
break break
if core.CONCAT: if core.CONCAT:
combined = combined + concat + '|' combined += '{files}|'.format(files=concat)
continue continue
newFiles.append('concat:%s' % concat[:-1]) newFiles.append('concat:{0}'.format(concat[:-1]))
if core.CONCAT: if core.CONCAT:
newFiles.append('concat:%s' % combined[:-1]) newFiles.append('concat:{0}'.format(combined[:-1]))
return newFiles return newFiles
def combineCD(combine): def combineCD(combine):
newFiles = [] newFiles = []
for item in set([ re.match("(.+)[cC][dD][0-9].",item).groups()[0] for item in combine ]): for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]):
concat = '' concat = ''
for n in range(99): for n in range(99):
files = [ file for file in combine if n+1 == int(re.match(".+[cC][dD]([0-9]+).",file).groups()[0]) and item in file ] files = [file for file in combine if
n + 1 == int(re.match(".+[cC][dD]([0-9]+).", file).groups()[0]) and item in file]
if files: if files:
concat = concat + files[0] + '|' concat += '{file}|'.format(file=files[0])
else: else:
break break
if concat: if concat:
newFiles.append('concat:%s' % concat[:-1]) newFiles.append('concat:{0}'.format(concat[:-1]))
return newFiles return newFiles
def print_cmd(command): def print_cmd(command):
cmd = "" cmd = ""
for item in command: for item in command:
cmd = cmd + " " + str(item) cmd = "{cmd} {item}".format(cmd=cmd, item=item)
logger.debug("calling command:%s" % (cmd)) logger.debug("calling command:{0}".format(cmd))
def Transcode_directory(dirName): def Transcode_directory(dirName):
if not core.FFMPEG: if not core.FFMPEG:
return 1, dirName return 1, dirName
logger.info("Checking for files to be transcoded") logger.info("Checking for files to be transcoded")
final_result = 0 # initialize as successful final_result = 0 # initialize as successful
if core.OUTPUTVIDEOPATH: if core.OUTPUTVIDEOPATH:
newDir = core.OUTPUTVIDEOPATH newDir = core.OUTPUTVIDEOPATH
makeDir(newDir) makeDir(newDir)
@ -712,22 +717,22 @@ def Transcode_directory(dirName):
if core.SEXTRACT and isinstance(file, str): if core.SEXTRACT and isinstance(file, str):
extract_subs(file, newfilePath, bitbucket) extract_subs(file, newfilePath, bitbucket)
try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason)
os.remove(newfilePath) os.remove(newfilePath)
except OSError, e: except OSError as e:
if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist
logger.debug("Error when removing transcoding target: %s" % (e)) logger.debug("Error when removing transcoding target: {0}".format(e))
except Exception, e: except Exception as e:
logger.debug("Error when removing transcoding target: %s" % (e)) logger.debug("Error when removing transcoding target: {0}".format(e))
logger.info("Transcoding video: %s" % (newfilePath)) logger.info("Transcoding video: {0}".format(newfilePath))
print_cmd(command) print_cmd(command)
result = 1 # set result to failed in case call fails. result = 1 # set result to failed in case call fails.
try: try:
if isinstance(file, str): if isinstance(file, str):
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket)
else: else:
img, data = file.iteritems().next() img, data = iteritems(file).next()
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE)
for vob in data['files']: for vob in data['files']:
procin = zip_out(vob, img, bitbucket) procin = zip_out(vob, img, bitbucket)
@ -737,7 +742,7 @@ def Transcode_directory(dirName):
proc.communicate() proc.communicate()
result = proc.returncode result = proc.returncode
except: except:
logger.error("Transcoding of video %s has failed" % (newfilePath)) logger.error("Transcoding of video {0} has failed".format(newfilePath))
if core.SUBSDIR and result == 0 and isinstance(file, str): if core.SUBSDIR and result == 0 and isinstance(file, str):
for sub in get_subs(file): for sub in get_subs(file):
@ -751,22 +756,25 @@ def Transcode_directory(dirName):
if result == 0: if result == 0:
try: try:
shutil.copymode(file, newfilePath) shutil.copymode(file, newfilePath)
except: pass except:
logger.info("Transcoding of video to %s succeeded" % (newfilePath)) pass
logger.info("Transcoding of video to {0} succeeded".format(newfilePath))
if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE): if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE):
try: try:
os.unlink(file) os.unlink(file)
except: pass except:
pass
else: else:
logger.error("Transcoding of video to %s failed with result %s" % (newfilePath, str(result))) logger.error("Transcoding of video to {0} failed with result {1}".format(newfilePath, result))
# this will be 0 (successful) it all are successful, else will return a positive integer for failure. # this will be 0 (successful) it all are successful, else will return a positive integer for failure.
final_result = final_result + result final_result = final_result + result
if final_result == 0 and not core.DUPLICATE: if final_result == 0 and not core.DUPLICATE:
for file in remList: for file in remList:
try: try:
os.unlink(file) os.unlink(file)
except: pass except:
if not os.listdir(newDir): #this is an empty directory and we didn't transcode into it. pass
if not os.listdir(newDir): # this is an empty directory and we didn't transcode into it.
os.rmdir(newDir) os.rmdir(newDir)
newDir = dirName newDir = dirName
if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB

View file

@ -10,9 +10,9 @@ from core.transmissionrpc.session import Session
from core.transmissionrpc.client import Client from core.transmissionrpc.client import Client
from core.transmissionrpc.utils import add_stdout_logger, add_file_logger from core.transmissionrpc.utils import add_stdout_logger, add_file_logger
__author__ = 'Erik Svensson <erik.public@gmail.com>' __author__ = 'Erik Svensson <erik.public@gmail.com>'
__version_major__ = 0 __version_major__ = 0
__version_minor__ = 11 __version_minor__ = 11
__version__ = '{0}.{1}'.format(__version_major__, __version_minor__) __version__ = '{0}.{1}'.format(__version_major__, __version_minor__)
__copyright__ = 'Copyright (c) 2008-2013 Erik Svensson' __copyright__ = 'Copyright (c) 2008-2013 Erik Svensson'
__license__ = 'MIT' __license__ = 'MIT'

View file

@ -18,13 +18,9 @@ from core.transmissionrpc.torrent import Torrent
from core.transmissionrpc.session import Session from core.transmissionrpc.session import Session
from six import PY3, integer_types, string_types, iteritems from six import PY3, integer_types, string_types, iteritems
from six.moves.urllib_parse import urlparse
from six.moves.urllib_request import urlopen
if PY3:
from urllib.parse import urlparse
from urllib.request import urlopen
else:
from urlparse import urlparse
from urllib2 import urlopen
def debug_httperror(error): def debug_httperror(error):
""" """
@ -49,6 +45,7 @@ def debug_httperror(error):
) )
) )
def parse_torrent_id(arg): def parse_torrent_id(arg):
"""Parse an torrent id or torrent hashString.""" """Parse an torrent id or torrent hashString."""
torrent_id = None torrent_id = None
@ -62,7 +59,7 @@ def parse_torrent_id(arg):
elif isinstance(arg, string_types): elif isinstance(arg, string_types):
try: try:
torrent_id = int(arg) torrent_id = int(arg)
if torrent_id >= 2**31: if torrent_id >= 2 ** 31:
torrent_id = None torrent_id = None
except (ValueError, TypeError): except (ValueError, TypeError):
pass pass
@ -75,6 +72,7 @@ def parse_torrent_id(arg):
pass pass
return torrent_id return torrent_id
def parse_torrent_ids(args): def parse_torrent_ids(args):
""" """
Take things and make them valid torrent identifiers Take things and make them valid torrent identifiers
@ -102,19 +100,20 @@ def parse_torrent_ids(args):
except ValueError: except ValueError:
pass pass
if not addition: if not addition:
raise ValueError('Invalid torrent id, \"%s\"' % item) raise ValueError('Invalid torrent id, {item!r}'.format(item=item))
ids.extend(addition) ids.extend(addition)
elif isinstance(args, (list, tuple)): elif isinstance(args, (list, tuple)):
for item in args: for item in args:
ids.extend(parse_torrent_ids(item)) ids.extend(parse_torrent_ids(item))
else: else:
torrent_id = parse_torrent_id(args) torrent_id = parse_torrent_id(args)
if torrent_id == None: if torrent_id is None:
raise ValueError('Invalid torrent id') raise ValueError('Invalid torrent id')
else: else:
ids = [torrent_id] ids = [torrent_id]
return ids return ids
""" """
Torrent ids Torrent ids
@ -129,26 +128,27 @@ possible to provide a argument called ``timeout``. Timeout is only effective
when using Python 2.6 or later and the default timeout is 30 seconds. when using Python 2.6 or later and the default timeout is 30 seconds.
""" """
class Client(object): class Client(object):
""" """
Client is the class handling the Transmission JSON-RPC client protocol. Client is the class handling the Transmission JSON-RPC client protocol.
""" """
def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None): def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None,
timeout=None):
if isinstance(timeout, (integer_types, float)): if isinstance(timeout, (integer_types, float)):
self._query_timeout = float(timeout) self._query_timeout = float(timeout)
else: else:
self._query_timeout = DEFAULT_TIMEOUT self._query_timeout = DEFAULT_TIMEOUT
urlo = urlparse(address) urlo = urlparse(address)
if urlo.scheme == '': if not urlo.scheme:
base_url = 'http://' + address + ':' + str(port) self.url = 'http://{host}:{port}/transmission/rpc/'.format(host=address, port=port)
self.url = base_url + '/transmission/rpc/'
else: else:
if urlo.port: if urlo.port:
self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path self.url = '{url.scheme}://{url.hostname}:{url.port}{url.path}'.format(url=urlo)
else: else:
self.url = urlo.scheme + '://' + urlo.hostname + urlo.path self.url = '{url.scheme}://{url.hostname}{url.path}'.format(url=urlo)
LOGGER.info('Using custom URL "' + self.url + '".') LOGGER.info('Using custom URL {url!r}.'.format(url=self.url))
if urlo.username and urlo.password: if urlo.username and urlo.password:
user = urlo.username user = urlo.username
password = urlo.password password = urlo.password
@ -204,7 +204,8 @@ class Client(object):
if timeout is None: if timeout is None:
timeout = self._query_timeout timeout = self._query_timeout
while True: while True:
LOGGER.debug(json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2)) LOGGER.debug(
json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
try: try:
result = self.http_handler.request(self.url, query, headers, timeout) result = self.http_handler.request(self.url, query, headers, timeout)
break break
@ -244,26 +245,25 @@ class Client(object):
elif require_ids: elif require_ids:
raise ValueError('request require ids') raise ValueError('request require ids')
query = json.dumps({'tag': self._sequence, 'method': method query = json.dumps({'tag': self._sequence, 'method': method, 'arguments': arguments})
, 'arguments': arguments})
self._sequence += 1 self._sequence += 1
start = time.time() start = time.time()
http_data = self._http_query(query, timeout) http_data = self._http_query(query, timeout)
elapsed = time.time() - start elapsed = time.time() - start
LOGGER.info('http request took %.3f s' % (elapsed)) LOGGER.info('http request took {time:.3f} s'.format(time=elapsed))
try: try:
data = json.loads(http_data) data = json.loads(http_data)
except ValueError as error: except ValueError as error:
LOGGER.error('Error: ' + str(error)) LOGGER.error('Error: {msg}'.format(msg=error))
LOGGER.error('Request: \"%s\"' % (query)) LOGGER.error('Request: {request!r}'.format(request=query))
LOGGER.error('HTTP data: \"%s\"' % (http_data)) LOGGER.error('HTTP data: {data!r}'.format(data=http_data))
raise raise
LOGGER.debug(json.dumps(data, indent=2)) LOGGER.debug(json.dumps(data, indent=2))
if 'result' in data: if 'result' in data:
if data['result'] != 'success': if data['result'] != 'success':
raise TransmissionError('Query failed with result \"%s\".' % (data['result'])) raise TransmissionError('Query failed with result {result!r}.'.format(result=data['result']))
else: else:
raise TransmissionError('Query failed without result.') raise TransmissionError('Query failed without result.')
@ -347,8 +347,9 @@ class Client(object):
Add a warning to the log if the Transmission RPC version is lower then the provided version. Add a warning to the log if the Transmission RPC version is lower then the provided version.
""" """
if self.rpc_version < version: if self.rpc_version < version:
LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.' LOGGER.warning('Using feature not supported by server. '
% (self.rpc_version, version)) 'RPC version for server {x}, feature introduced in {y}.'.format
(x=self.rpc_version, y=version))
def add_torrent(self, torrent, timeout=None, **kwargs): def add_torrent(self, torrent, timeout=None, **kwargs):
""" """
@ -408,11 +409,8 @@ class Client(object):
pass pass
if might_be_base64: if might_be_base64:
torrent_data = torrent torrent_data = torrent
args = {}
if torrent_data: args = {'metainfo': torrent_data} if torrent_data else {'filename': torrent}
args = {'metainfo': torrent_data}
else:
args = {'filename': torrent}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-add', argument, value, self.rpc_version) (arg, val) = argument_value_convert('torrent-add', argument, value, self.rpc_version)
@ -476,7 +474,7 @@ class Client(object):
""" """
self._rpc_version_warning(3) self._rpc_version_warning(3)
self._request('torrent-remove', self._request('torrent-remove',
{'delete-local-data':rpc_bool(delete_data)}, ids, True, timeout=timeout) {'delete-local-data': rpc_bool(delete_data)}, ids, True, timeout=timeout)
def remove(self, ids, delete_data=False, timeout=None): def remove(self, ids, delete_data=False, timeout=None):
""" """
@ -606,34 +604,34 @@ class Client(object):
the new methods. list returns a dictionary indexed by torrent id. the new methods. list returns a dictionary indexed by torrent id.
""" """
warnings.warn('list has been deprecated, please use get_torrent or get_torrents instead.', DeprecationWarning) warnings.warn('list has been deprecated, please use get_torrent or get_torrents instead.', DeprecationWarning)
fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone' fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone',
, 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver' 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver',
, 'downloadedEver', 'uploadRatio', 'queuePosition'] 'downloadedEver', 'uploadRatio', 'queuePosition']
return self._request('torrent-get', {'fields': fields}, timeout=timeout) return self._request('torrent-get', {'fields': fields}, timeout=timeout)
def get_files(self, ids=None, timeout=None): def get_files(self, ids=None, timeout=None):
""" """
Get list of files for provided torrent id(s). If ids is empty, Get list of files for provided torrent id(s). If ids is empty,
information for all torrents are fetched. This function returns a dictionary information for all torrents are fetched. This function returns a dictionary
for each requested torrent id holding the information about the files. for each requested torrent id holding the information about the files.
:: ::
{ {
<torrent id>: { <torrent id>: {
<file id>: { <file id>: {
'name': <file name>, 'name': <file name>,
'size': <file size in bytes>, 'size': <file size in bytes>,
'completed': <bytes completed>, 'completed': <bytes completed>,
'priority': <priority ('high'|'normal'|'low')>, 'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)> 'selected': <selected for download (True|False)>
} }
... ...
} }
... ...
} }
""" """
fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted'] fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted']
request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout) request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout)
@ -645,22 +643,22 @@ class Client(object):
def set_files(self, items, timeout=None): def set_files(self, items, timeout=None):
""" """
Set file properties. Takes a dictionary with similar contents as the result Set file properties. Takes a dictionary with similar contents as the result
of `get_files`. of `get_files`.
:: ::
{ {
<torrent id>: { <torrent id>: {
<file id>: { <file id>: {
'priority': <priority ('high'|'normal'|'low')>, 'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)> 'selected': <selected for download (True|False)>
} }
... ...
} }
... ...
} }
""" """
if not isinstance(items, dict): if not isinstance(items, dict):
raise ValueError('Invalid file description') raise ValueError('Invalid file description')
@ -703,8 +701,8 @@ class Client(object):
def change_torrent(self, ids, timeout=None, **kwargs): def change_torrent(self, ids, timeout=None, **kwargs):
""" """
Change torrent parameters for the torrent(s) with the supplied id's. The Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are: parameters are:
============================ ===== =============== ======================================================================================= ============================ ===== =============== =======================================================================================
Argument RPC Replaced by Description Argument RPC Replaced by Description
@ -736,13 +734,13 @@ class Client(object):
``uploadLimited`` 5 - Enable upload speed limiter. ``uploadLimited`` 5 - Enable upload speed limiter.
============================ ===== =============== ======================================================================================= ============================ ===== =============== =======================================================================================
.. NOTE:: .. NOTE::
transmissionrpc will try to automatically fix argument errors. transmissionrpc will try to automatically fix argument errors.
""" """
args = {} args = {}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-set' , argument, value, self.rpc_version) (arg, val) = argument_value_convert('torrent-set', argument, value, self.rpc_version)
args[arg] = val args[arg] = val
if len(args) > 0: if len(args) > 0:
@ -803,7 +801,7 @@ class Client(object):
raise ValueError("Target name cannot contain a path delimiter") raise ValueError("Target name cannot contain a path delimiter")
args = {'path': location, 'name': name} args = {'path': location, 'name': name}
result = self._request('torrent-rename-path', args, torrent_id, True, timeout=timeout) result = self._request('torrent-rename-path', args, torrent_id, True, timeout=timeout)
return (result['path'], result['name']) return result['path'], result['name']
def queue_top(self, ids, timeout=None): def queue_top(self, ids, timeout=None):
"""Move transfer to the top of the queue.""" """Move transfer to the top of the queue."""
@ -814,7 +812,7 @@ class Client(object):
"""Move transfer to the bottom of the queue.""" """Move transfer to the bottom of the queue."""
self._rpc_version_warning(14) self._rpc_version_warning(14)
self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout) self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout)
def queue_up(self, ids, timeout=None): def queue_up(self, ids, timeout=None):
"""Move transfer up in the queue.""" """Move transfer up in the queue."""
self._rpc_version_warning(14) self._rpc_version_warning(14)
@ -888,14 +886,14 @@ class Client(object):
================================ ===== ================= ========================================================================================================================== ================================ ===== ================= ==========================================================================================================================
.. NOTE:: .. NOTE::
transmissionrpc will try to automatically fix argument errors. transmissionrpc will try to automatically fix argument errors.
""" """
args = {} args = {}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']: if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']:
raise ValueError('Invalid encryption value') raise ValueError('Invalid encryption value')
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('session-set' , argument, value, self.rpc_version) (arg, val) = argument_value_convert('session-set', argument, value, self.rpc_version)
args[arg] = val args[arg] = val
if len(args) > 0: if len(args) > 0:
self._request('session-set', args, timeout=timeout) self._request('session-set', args, timeout=timeout)

View file

@ -6,10 +6,10 @@ import logging
from core.transmissionrpc.six import iteritems from core.transmissionrpc.six import iteritems
LOGGER = logging.getLogger('transmissionrpc') LOGGER = logging.getLogger('transmissionrpc')
LOGGER.setLevel(logging.ERROR) LOGGER.setLevel(logging.ERROR)
def mirror_dict(source): def mirror_dict(source):
""" """
Creates a dictionary with all values as keys and all keys as values. Creates a dictionary with all values as keys and all keys as values.
@ -17,38 +17,39 @@ def mirror_dict(source):
source.update(dict((value, key) for key, value in iteritems(source))) source.update(dict((value, key) for key, value in iteritems(source)))
return source return source
DEFAULT_PORT = 9091 DEFAULT_PORT = 9091
DEFAULT_TIMEOUT = 30.0 DEFAULT_TIMEOUT = 30.0
TR_PRI_LOW = -1 TR_PRI_LOW = -1
TR_PRI_NORMAL = 0 TR_PRI_NORMAL = 0
TR_PRI_HIGH = 1 TR_PRI_HIGH = 1
PRIORITY = mirror_dict({ PRIORITY = mirror_dict({
'low' : TR_PRI_LOW, 'low': TR_PRI_LOW,
'normal' : TR_PRI_NORMAL, 'normal': TR_PRI_NORMAL,
'high' : TR_PRI_HIGH 'high': TR_PRI_HIGH
}) })
TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings
TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio
TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio
RATIO_LIMIT = mirror_dict({ RATIO_LIMIT = mirror_dict({
'global' : TR_RATIOLIMIT_GLOBAL, 'global': TR_RATIOLIMIT_GLOBAL,
'single' : TR_RATIOLIMIT_SINGLE, 'single': TR_RATIOLIMIT_SINGLE,
'unlimited' : TR_RATIOLIMIT_UNLIMITED 'unlimited': TR_RATIOLIMIT_UNLIMITED
}) })
TR_IDLELIMIT_GLOBAL = 0 # follow the global settings TR_IDLELIMIT_GLOBAL = 0 # follow the global settings
TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time
TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity
IDLE_LIMIT = mirror_dict({ IDLE_LIMIT = mirror_dict({
'global' : TR_RATIOLIMIT_GLOBAL, 'global': TR_RATIOLIMIT_GLOBAL,
'single' : TR_RATIOLIMIT_SINGLE, 'single': TR_RATIOLIMIT_SINGLE,
'unlimited' : TR_RATIOLIMIT_UNLIMITED 'unlimited': TR_RATIOLIMIT_UNLIMITED
}) })
# A note on argument maps # A note on argument maps
@ -62,236 +63,266 @@ IDLE_LIMIT = mirror_dict({
# Arguments for torrent methods # Arguments for torrent methods
TORRENT_ARGS = { TORRENT_ARGS = {
'get' : { 'get': {
'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'), 'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'),
'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'), 'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'),
'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'), 'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'),
'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'), 'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'),
'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'), 'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'),
'comment': ('string', 1, None, None, None, 'Torrent comment.'), 'comment': ('string', 1, None, None, None, 'Torrent comment.'),
'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'), 'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'),
'creator': ('string', 1, None, None, None, 'Torrent creator.'), 'creator': ('string', 1, None, None, None, 'Torrent creator.'),
'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'), 'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'),
'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'), 'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'),
'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'), 'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'),
'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'), 'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'),
'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'), 'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'),
'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'), 'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'),
'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'), 'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'),
'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'), 'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'),
'downloadLimitMode': ('number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'), 'downloadLimitMode': (
'error': ('number', 1, None, None, None, 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'), 'number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'errorString': ('number', 1, None, None, None, 'Error message.'), 'error': ('number', 1, None, None, None,
'eta': ('number', 1, None, None, None, 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'), 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'),
'etaIdle': ('number', 15, None, None, None, 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'), 'errorString': ('number', 1, None, None, None, 'Error message.'),
'files': ('array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'), 'eta': ('number', 1, None, None, None,
'fileStats': ('array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'), 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'),
'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'), 'etaIdle': ('number', 15, None, None, None,
'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'), 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'),
'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'), 'files': (
'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'), 'array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'),
'id': ('number', 1, None, None, None, 'Session unique torrent id.'), 'fileStats': (
'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'), 'array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'),
'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'), 'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'),
'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'), 'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'),
'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'), 'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'),
'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'), 'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'),
'leechers': ('number', 1, 7, None, None, 'Number of leechers.'), 'id': ('number', 1, None, None, None, 'Session unique torrent id.'),
'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'), 'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'),
'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'), 'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'),
'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'), 'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'),
'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'), 'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'),
'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'), 'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'),
'name': ('string', 1, None, None, None, 'Torrent name.'), 'leechers': ('number', 1, 7, None, None, 'Number of leechers.'),
'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'), 'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'),
'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'), 'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'),
'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'), 'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'),
'peers': ('array', 2, None, None, None, 'Array of peer objects.'), 'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'),
'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'), 'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'),
'peersFrom': ('object', 1, None, None, None, 'Object containing download peers counts for different peer types.'), 'name': ('string', 1, None, None, None, 'Torrent name.'),
'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'), 'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'),
'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'), 'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'),
'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'), 'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'),
'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'), 'peers': ('array', 2, None, None, None, 'Array of peer objects.'),
'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'), 'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'),
'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'), 'peersFrom': (
'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'), 'object', 1, None, None, None, 'Object containing download peers counts for different peer types.'),
'priorities': ('array', 1, None, None, None, 'Array of file priorities.'), 'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'),
'queuePosition': ('number', 14, None, None, None, 'The queue position.'), 'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'),
'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'), 'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'),
'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'), 'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'),
'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'), 'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'),
'secondsDownloading': ('number', 15, None, None, None, ''), 'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'),
'secondsSeeding': ('number', 15, None, None, None, ''), 'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'),
'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'), 'priorities': ('array', 1, None, None, None, 'Array of file priorities.'),
'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'), 'queuePosition': ('number', 14, None, None, None, 'The queue position.'),
'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'), 'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'), 'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'),
'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), 'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'), 'secondsDownloading': ('number', 15, None, None, None, ''),
'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), 'secondsSeeding': ('number', 15, None, None, None, ''),
'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'), 'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'),
'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'), 'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'),
'status': ('number', 1, None, None, None, 'Current status, see source'), 'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'),
'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'), 'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'),
'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'), 'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'), 'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'),
'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'), 'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'), 'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'),
'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'), 'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'),
'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'), 'status': ('number', 1, None, None, None, 'Current status, see source'),
'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'), 'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'),
'uploadLimitMode': ('number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'), 'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'),
'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'), 'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'),
'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'), 'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'),
'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'), 'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'),
'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'), 'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'),
'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'), 'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'),
'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'),
'uploadLimitMode': (
'number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'),
'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'),
'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'),
'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'),
'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'),
}, },
'set': { 'set': {
'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'), 'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'),
'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'), 'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'),
'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'), 'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'honorsSessionLimits': ('boolean', 5, None, None, None, "Enables or disables the transfer to honour the upload limit set in the session."), 'honorsSessionLimits': ('boolean', 5, None, None, None,
'location': ('array', 1, None, None, None, 'Local download location.'), "Enables or disables the transfer to honour the upload limit set in the session."),
'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'), 'location': ('array', 1, None, None, None, 'Local download location.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), 'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."), 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'), 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), 'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'),
'seedIdleMode': ('number', 10, None, None, None, 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), 'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'), 'seedIdleMode': ('number', 10, None, None, None,
'seedRatioMode': ('number', 5, None, None, None, 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'), 'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'),
'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'), 'seedRatioMode': ('number', 5, None, None, None,
'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'), 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'), 'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'),
'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'), 'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'),
'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'), 'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'),
'trackerReplace': ('array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'), 'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'),
'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'), 'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'),
'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'), 'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'),
'trackerReplace': (
'array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'),
'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'),
'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'),
}, },
'add': { 'add': {
'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'), 'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'),
'download-dir': ('string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'), 'download-dir': (
'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'), 'string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'),
'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."), 'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), 'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'), 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'), 'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'),
'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'), 'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), 'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."), 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
} }
} }
# Arguments for session methods # Arguments for session methods
SESSION_ARGS = { SESSION_ARGS = {
'get': { 'get': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'), "alt-speed-enabled": (
"alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), 'boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'), "alt-speed-time-begin": (
"alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), 'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'), "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'), "alt-speed-time-end": (
"blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'), 'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'), "alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), "blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'),
"config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'), "blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'),
"dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'), "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"download-dir": ('string', 1, None, None, None, 'The download directory.'), "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'), "config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), "dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'), "download-dir": ('string', 1, None, None, None, 'The download directory.'),
"encryption": ('string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), "download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'),
"idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'), "download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'),
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'), "encryption": (
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'), 'string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'), "idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), "incomplete-dir": (
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), 'string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'), "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'), "lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'), "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'), "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'),
"queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'), "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'), "peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'), "peer-port-random-on-start": (
"rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'), 'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'), "port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'),
"script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), "queue-stalled-minutes": (
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), 'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'), "queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), "rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'), "rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), "rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'), "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), "script-torrent-done-filename": (
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'), 'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'), "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"trash-original-torrent-files": ('boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'), "seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'),
'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'), "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'), "seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'),
"version": ('string', 3, None, None, None, 'Transmission version.'), "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'),
'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'),
'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'),
"version": ('string', 3, None, None, None, 'Transmission version.'),
}, },
'set': { 'set': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'), "alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'),
"alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), "alt-speed-time-begin": (
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'), 'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'), "alt-speed-time-end": (
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'), 'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'), "alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), "blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'),
"dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'), "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"download-dir": ('string', 1, None, None, None, 'Set the session download directory.'), "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), "dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'), "download-dir": ('string', 1, None, None, None, 'Set the session download directory.'),
"encryption": ('string', 1, None, None, None, 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'), "download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'), "encryption": ('string', 1, None, None, None,
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'), 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'), "idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'), "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), "incomplete-dir-enabled": ('boolean', 7, None, None, None,
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'), "lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'), "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'), "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'), "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'), "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), "peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'), "peer-port-random-on-start": (
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'), 'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), "port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), "rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'), "queue-stalled-minutes": (
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), 'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'), "queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'), "script-torrent-done-filename": (
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'), 'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'), "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'), "seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'),
"trash-original-torrent-files": ('boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'), "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'), "seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'),
'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'),
}, },
} }

View file

@ -4,11 +4,13 @@
from core.transmissionrpc.six import string_types, integer_types from core.transmissionrpc.six import string_types, integer_types
class TransmissionError(Exception): class TransmissionError(Exception):
""" """
This exception is raised when there has occurred an error related to This exception is raised when there has occurred an error related to
communication with Transmission. It is a subclass of Exception. communication with Transmission. It is a subclass of Exception.
""" """
def __init__(self, message='', original=None): def __init__(self, message='', original=None):
Exception.__init__(self) Exception.__init__(self)
self.message = message self.message = message
@ -17,15 +19,17 @@ class TransmissionError(Exception):
def __str__(self): def __str__(self):
if self.original: if self.original:
original_name = type(self.original).__name__ original_name = type(self.original).__name__
return '%s Original exception: %s, "%s"' % (self.message, original_name, str(self.original)) return '{0} Original exception: {1}, "{2}"'.format(self.message, original_name, str(self.original))
else: else:
return self.message return self.message
class HTTPHandlerError(Exception): class HTTPHandlerError(Exception):
""" """
This exception is raised when there has occurred an error related to This exception is raised when there has occurred an error related to
the HTTP handler. It is a subclass of Exception. the HTTP handler. It is a subclass of Exception.
""" """
def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None): def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None):
Exception.__init__(self) Exception.__init__(self)
self.url = '' self.url = ''
@ -45,10 +49,10 @@ class HTTPHandlerError(Exception):
self.data = httpdata self.data = httpdata
def __repr__(self): def __repr__(self):
return '<HTTPHandlerError %d, %s>' % (self.code, self.message) return '<HTTPHandlerError {0:d}, {1}>'.format(self.code, self.message)
def __str__(self): def __str__(self):
return 'HTTPHandlerError %d: %s' % (self.code, self.message) return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message)
def __unicode__(self): def __unicode__(self):
return 'HTTPHandlerError %d: %s' % (self.code, self.message) return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message)

View file

@ -4,24 +4,22 @@
import sys import sys
from core.transmissionrpc.error import HTTPHandlerError from six.moves.urllib_request import (
from six import PY3 build_opener, install_opener,
HTTPBasicAuthHandler, HTTPDigestAuthHandler, HTTPPasswordMgrWithDefaultRealm,
Request,
)
from six.moves.urllib_error import HTTPError, URLError
from six.moves.http_client import BadStatusLine
from core.transmissionrpc.error import HTTPHandlerError
if PY3:
from urllib.request import Request, build_opener, \
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler
from urllib.error import HTTPError, URLError
from http.client import BadStatusLine
else:
from urllib2 import Request, build_opener, \
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler
from urllib2 import HTTPError, URLError
from httplib import BadStatusLine
class HTTPHandler(object): class HTTPHandler(object):
""" """
Prototype for HTTP handling. Prototype for HTTP handling.
""" """
def set_authentication(self, uri, login, password): def set_authentication(self, uri, login, password):
""" """
Transmission use basic authentication in earlier versions and digest Transmission use basic authentication in earlier versions and digest
@ -44,10 +42,12 @@ class HTTPHandler(object):
""" """
raise NotImplementedError("Bad HTTPHandler, failed to implement request.") raise NotImplementedError("Bad HTTPHandler, failed to implement request.")
class DefaultHTTPHandler(HTTPHandler): class DefaultHTTPHandler(HTTPHandler):
""" """
The default HTTP handler provided with transmissionrpc. The default HTTP handler provided with transmissionrpc.
""" """
def __init__(self): def __init__(self):
HTTPHandler.__init__(self) HTTPHandler.__init__(self)
self.http_opener = build_opener() self.http_opener = build_opener()
@ -75,7 +75,7 @@ class DefaultHTTPHandler(HTTPHandler):
if hasattr(error.reason, 'args') and isinstance(error.reason.args, tuple) and len(error.reason.args) == 2: if hasattr(error.reason, 'args') and isinstance(error.reason.args, tuple) and len(error.reason.args) == 2:
raise HTTPHandlerError(httpcode=error.reason.args[0], httpmsg=error.reason.args[1]) raise HTTPHandlerError(httpcode=error.reason.args[0], httpmsg=error.reason.args[1])
else: else:
raise HTTPHandlerError(httpmsg='urllib2.URLError: %s' % (error.reason)) raise HTTPHandlerError(httpmsg='urllib2.URLError: {error.reason}'.format(error=error))
except BadStatusLine as error: except BadStatusLine as error:
raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: %s' % (error.line)) raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: {error.line}'.format(error=error))
return response.read().decode('utf-8') return response.read().decode('utf-8')

View file

@ -6,6 +6,7 @@ from core.transmissionrpc.utils import Field
from core.transmissionrpc.six import iteritems, integer_types from core.transmissionrpc.six import iteritems, integer_types
class Session(object): class Session(object):
""" """
Session is a class holding the session data for a Transmission daemon. Session is a class holding the session data for a Transmission daemon.
@ -26,12 +27,12 @@ class Session(object):
try: try:
return self._fields[name].value return self._fields[name].value
except KeyError: except KeyError:
raise AttributeError('No attribute %s' % name) raise AttributeError('No attribute {0}'.format(name))
def __str__(self): def __str__(self):
text = '' text = ''
for key in sorted(self._fields.keys()): for key in sorted(self._fields.keys()):
text += "% 32s: %s\n" % (key[-32:], self._fields[key].value) text += "{0:32}: {1}\n".format(key[-32:], self._fields[key].value)
return text return text
def _update_fields(self, other): def _update_fields(self, other):

View file

@ -1,6 +1,6 @@
"""Utilities for writing code that runs on Python 2 and 3""" """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson # Copyright (c) 2010-2015 Benjamin Peterson
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
@ -20,17 +20,22 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. # SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator import operator
import sys import sys
import types import types
__author__ = "Benjamin Peterson <benjamin@python.org>" __author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.4.1" __version__ = "1.10.0"
# Useful for very coarse version differentiation. # Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2 PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3 PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3: if PY3:
string_types = str, string_types = str,
@ -53,6 +58,7 @@ else:
else: else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t). # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object): class X(object):
def __len__(self): def __len__(self):
return 1 << 31 return 1 << 31
try: try:
@ -84,9 +90,13 @@ class _LazyDescr(object):
def __get__(self, obj, tp): def __get__(self, obj, tp):
result = self._resolve() result = self._resolve()
setattr(obj, self.name, result) setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again. try:
delattr(tp, self.name) # This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result return result
@ -104,6 +114,27 @@ class MovedModule(_LazyDescr):
def _resolve(self): def _resolve(self):
return _import_module(self.mod) return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr): class MovedAttribute(_LazyDescr):
@ -130,9 +161,75 @@ class MovedAttribute(_LazyDescr):
return getattr(module, self.attr) return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects""" """Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [ _moved_attributes = [
@ -140,25 +237,33 @@ _moved_attributes = [
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"), MovedModule("builtins", "__builtin__"),
MovedModule("config", "config"), MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"), MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"), MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
@ -168,12 +273,14 @@ _moved_attributes = [
MovedModule("queue", "Queue"), MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"), MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"), MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"), MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser", MovedModule("tkinter_colorchooser", "tkColorChooser",
@ -189,22 +296,35 @@ _moved_attributes = [
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
] ]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes: for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr) setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") _MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
class Module_six_moves_urllib_parse(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_parse""" """Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [ _urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
@ -218,16 +338,27 @@ _urllib_parse_moved_attributes = [
MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
] ]
for attr in _urllib_parse_moved_attributes: for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr) setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr del attr
sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse") Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(types.ModuleType): class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error""" """Lazy loading of moved objects in six.moves.urllib_error"""
@ -240,11 +371,14 @@ for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr) setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr del attr
sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error") Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(types.ModuleType): class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request""" """Lazy loading of moved objects in six.moves.urllib_request"""
@ -281,16 +415,20 @@ _urllib_request_moved_attributes = [
MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
] ]
for attr in _urllib_request_moved_attributes: for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr) setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr del attr
sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request") Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(types.ModuleType): class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response""" """Lazy loading of moved objects in six.moves.urllib_response"""
@ -304,11 +442,14 @@ for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr) setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr del attr
sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response") Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(types.ModuleType): class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser""" """Lazy loading of moved objects in six.moves.urllib_robotparser"""
@ -319,20 +460,27 @@ for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr) setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr del attr
sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser") Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType): class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace""" """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"] __path__ = [] # mark as package
error = sys.modules[__name__ + ".moves.urllib_error"] parse = _importer._get_module("moves.urllib_parse")
request = sys.modules[__name__ + ".moves.urllib_request"] error = _importer._get_module("moves.urllib_error")
response = sys.modules[__name__ + ".moves.urllib_response"] request = _importer._get_module("moves.urllib_request")
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"] response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib") _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move): def add_move(move):
@ -359,11 +507,6 @@ if PY3:
_func_code = "__code__" _func_code = "__code__"
_func_defaults = "__defaults__" _func_defaults = "__defaults__"
_func_globals = "__globals__" _func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else: else:
_meth_func = "im_func" _meth_func = "im_func"
_meth_self = "im_self" _meth_self = "im_self"
@ -373,11 +516,6 @@ else:
_func_defaults = "func_defaults" _func_defaults = "func_defaults"
_func_globals = "func_globals" _func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try: try:
advance_iterator = next advance_iterator = next
@ -400,6 +538,9 @@ if PY3:
create_bound_method = types.MethodType create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object Iterator = object
else: else:
def get_unbound_function(unbound): def get_unbound_function(unbound):
@ -408,6 +549,9 @@ else:
def create_bound_method(func, obj): def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__) return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object): class Iterator(object):
def next(self): def next(self):
@ -426,74 +570,121 @@ get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals) get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw): if PY3:
"""Return an iterator over the keys of a dictionary.""" def iterkeys(d, **kw):
return iter(getattr(d, _iterkeys)(**kw)) return iter(d.keys(**kw))
def itervalues(d, **kw): def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary.""" return iter(d.values(**kw))
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw): def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary.""" return iter(d.items(**kw))
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw): def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary.""" return iter(d.lists(**kw))
return iter(getattr(d, _iterlists)(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3: if PY3:
def b(s): def b(s):
return s.encode("latin-1") return s.encode("latin-1")
def u(s): def u(s):
return s return s
unichr = chr unichr = chr
if sys.version_info[1] <= 1: import struct
def int2byte(i): int2byte = struct.Struct(">B").pack
return bytes((i,)) del struct
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0) byte2int = operator.itemgetter(0)
indexbytes = operator.getitem indexbytes = operator.getitem
iterbytes = iter iterbytes = iter
import io import io
StringIO = io.StringIO StringIO = io.StringIO
BytesIO = io.BytesIO BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else: else:
def b(s): def b(s):
return s return s
# Workaround for standalone backslash
def u(s): def u(s):
return unicode(s, "unicode_escape") return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr unichr = unichr
int2byte = chr int2byte = chr
def byte2int(bs): def byte2int(bs):
return ord(bs[0]) return ord(bs[0])
def indexbytes(buf, i): def indexbytes(buf, i):
return ord(buf[i]) return ord(buf[i])
def iterbytes(buf): iterbytes = functools.partial(itertools.imap, ord)
return (ord(byte) for byte in buf)
import StringIO import StringIO
StringIO = BytesIO = StringIO.StringIO StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""") _add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""") _add_doc(u, """Text literal""")
if PY3: def assertCountEqual(self, *args, **kwargs):
import builtins return getattr(self, _assertCountEqual)(*args, **kwargs)
exec_ = getattr(builtins, "exec")
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None): def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb: if value.__traceback__ is not tb:
raise value.with_traceback(tb) raise value.with_traceback(tb)
raise value raise value
print_ = getattr(builtins, "print")
del builtins
else: else:
def exec_(_code_, _globs_=None, _locs_=None): def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace.""" """Execute code in a namespace."""
@ -507,20 +698,45 @@ else:
_locs_ = _globs_ _locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""") exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None): exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb raise tp, value, tb
""") """)
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs): def print_(*args, **kwargs):
"""The new-style print function.""" """The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout) fp = kwargs.pop("file", sys.stdout)
if fp is None: if fp is None:
return return
def write(data): def write(data):
if not isinstance(data, basestring): if not isinstance(data, basestring):
data = str(data) data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data) fp.write(data)
want_unicode = False want_unicode = False
sep = kwargs.pop("sep", None) sep = kwargs.pop("sep", None)
@ -557,21 +773,96 @@ else:
write(sep) write(sep)
write(arg) write(arg)
write(end) write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""") _add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases): def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.""" """Create a base class with a metaclass."""
return meta("NewBase", bases, {}) # This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass): def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass.""" """Class decorator for creating a class with a metaclass."""
def wrapper(cls): def wrapper(cls):
orig_vars = cls.__dict__.copy() orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None) orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None) orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars) return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)

View file

@ -13,14 +13,15 @@ from six import integer_types, string_types, text_type, iteritems
def get_status_old(code): def get_status_old(code):
"""Get the torrent status using old status codes""" """Get the torrent status using old status codes"""
mapping = { mapping = {
(1<<0): 'check pending', (1 << 0): 'check pending',
(1<<1): 'checking', (1 << 1): 'checking',
(1<<2): 'downloading', (1 << 2): 'downloading',
(1<<3): 'seeding', (1 << 3): 'seeding',
(1<<4): 'stopped', (1 << 4): 'stopped',
} }
return mapping[code] return mapping[code]
def get_status_new(code): def get_status_new(code):
"""Get the torrent status using new status codes""" """Get the torrent status using new status codes"""
mapping = { mapping = {
@ -34,6 +35,7 @@ def get_status_new(code):
} }
return mapping[code] return mapping[code]
class Torrent(object): class Torrent(object):
""" """
Torrent is a class holding the data received from Transmission regarding a bittorrent transfer. Torrent is a class holding the data received from Transmission regarding a bittorrent transfer.
@ -71,14 +73,14 @@ class Torrent(object):
tid = self._fields['id'].value tid = self._fields['id'].value
name = self._get_name_string() name = self._get_name_string()
if isinstance(name, str): if isinstance(name, str):
return '<Torrent %d \"%s\">' % (tid, name) return '<Torrent {0:d} \"{1}\">'.format(tid, name)
else: else:
return '<Torrent %d>' % (tid) return '<Torrent {0:d}>'.format(tid)
def __str__(self): def __str__(self):
name = self._get_name_string() name = self._get_name_string()
if isinstance(name, str): if isinstance(name, str):
return 'Torrent \"%s\"' % (name) return 'Torrent \"{0}\"'.format(name)
else: else:
return 'Torrent' return 'Torrent'
@ -89,7 +91,7 @@ class Torrent(object):
try: try:
return self._fields[name].value return self._fields[name].value
except KeyError: except KeyError:
raise AttributeError('No attribute %s' % name) raise AttributeError('No attribute {0}'.format(name))
def _rpc_version(self): def _rpc_version(self):
"""Get the Transmission RPC API version.""" """Get the Transmission RPC API version."""
@ -99,8 +101,9 @@ class Torrent(object):
def _dirty_fields(self): def _dirty_fields(self):
"""Enumerate changed fields""" """Enumerate changed fields"""
outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition' outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition',
, 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', 'uploadLimited'] 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit',
'uploadLimited']
fields = [] fields = []
for key in outgoing_keys: for key in outgoing_keys:
if key in self._fields and self._fields[key].dirty: if key in self._fields and self._fields[key].dirty:
@ -121,7 +124,6 @@ class Torrent(object):
""" """
Update the torrent data from a Transmission JSON-RPC arguments dictionary Update the torrent data from a Transmission JSON-RPC arguments dictionary
""" """
fields = None
if isinstance(other, dict): if isinstance(other, dict):
for key, value in iteritems(other): for key, value in iteritems(other):
self._fields[key.replace('-', '_')] = Field(value, False) self._fields[key.replace('-', '_')] = Field(value, False)
@ -131,7 +133,7 @@ class Torrent(object):
else: else:
raise ValueError('Cannot update with supplied data') raise ValueError('Cannot update with supplied data')
self._incoming_pending = False self._incoming_pending = False
def _status(self): def _status(self):
"""Get the torrent status""" """Get the torrent status"""
code = self._fields['status'].value code = self._fields['status'].value
@ -264,13 +266,14 @@ class Torrent(object):
self._fields['downloadLimited'] = Field(True, True) self._fields['downloadLimited'] = Field(True, True)
self._fields['downloadLimit'] = Field(limit, True) self._fields['downloadLimit'] = Field(limit, True)
self._push() self._push()
elif limit == None: elif limit is None:
self._fields['downloadLimited'] = Field(False, True) self._fields['downloadLimited'] = Field(False, True)
self._push() self._push()
else: else:
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
download_limit = property(_get_download_limit, _set_download_limit, None, "Download limit in Kbps or None. This is a mutator.") download_limit = property(_get_download_limit, _set_download_limit, None,
"Download limit in Kbps or None. This is a mutator.")
def _get_peer_limit(self): def _get_peer_limit(self):
""" """
@ -307,7 +310,7 @@ class Torrent(object):
self._push() self._push()
priority = property(_get_priority, _set_priority, None priority = property(_get_priority, _set_priority, None
, "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.") , "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.")
def _get_seed_idle_limit(self): def _get_seed_idle_limit(self):
""" """
@ -326,7 +329,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None
, "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.") , "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.")
def _get_seed_idle_mode(self): def _get_seed_idle_mode(self):
""" """
@ -345,7 +348,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None, seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None,
""" """
Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'. Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed idle limit. * global, use session seed idle limit.
@ -354,7 +357,7 @@ class Torrent(object):
This is a mutator. This is a mutator.
""" """
) )
def _get_seed_ratio_limit(self): def _get_seed_ratio_limit(self):
""" """
@ -373,7 +376,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None
, "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.") , "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.")
def _get_seed_ratio_mode(self): def _get_seed_ratio_mode(self):
""" """
@ -392,7 +395,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None, seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None,
""" """
Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed ratio limit. * global, use session seed ratio limit.
@ -401,7 +404,7 @@ class Torrent(object):
This is a mutator. This is a mutator.
""" """
) )
def _get_upload_limit(self): def _get_upload_limit(self):
""" """
@ -422,13 +425,14 @@ class Torrent(object):
self._fields['uploadLimited'] = Field(True, True) self._fields['uploadLimited'] = Field(True, True)
self._fields['uploadLimit'] = Field(limit, True) self._fields['uploadLimit'] = Field(limit, True)
self._push() self._push()
elif limit == None: elif limit is None:
self._fields['uploadLimited'] = Field(False, True) self._fields['uploadLimited'] = Field(False, True)
self._push() self._push()
else: else:
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
upload_limit = property(_get_upload_limit, _set_upload_limit, None, "Upload limit in Kbps or None. This is a mutator.") upload_limit = property(_get_upload_limit, _set_upload_limit, None,
"Upload limit in Kbps or None. This is a mutator.")
def _get_queue_position(self): def _get_queue_position(self):
"""Get the queue position for this torrent.""" """Get the queue position for this torrent."""

View file

@ -2,14 +2,18 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
import socket, datetime, logging, constants import constants
import datetime
import logging
import socket
from collections import namedtuple from collections import namedtuple
from constants import LOGGER
from constants import LOGGER
from six import string_types, iteritems from six import string_types, iteritems
UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
def format_size(size): def format_size(size):
""" """
Format byte size into IEC prefixes, B, KiB, MiB ... Format byte size into IEC prefixes, B, KiB, MiB ...
@ -19,14 +23,16 @@ def format_size(size):
while size >= 1024.0 and i < len(UNITS): while size >= 1024.0 and i < len(UNITS):
i += 1 i += 1
size /= 1024.0 size /= 1024.0
return (size, UNITS[i]) return size, UNITS[i]
def format_speed(size): def format_speed(size):
""" """
Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ... Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ...
""" """
(size, unit) = format_size(size) (size, unit) = format_size(size)
return (size, unit + '/s') return size, '{unit}/s'.format(unit=unit)
def format_timedelta(delta): def format_timedelta(delta):
""" """
@ -34,7 +40,8 @@ def format_timedelta(delta):
""" """
minutes, seconds = divmod(delta.seconds, 60) minutes, seconds = divmod(delta.seconds, 60)
hours, minutes = divmod(minutes, 60) hours, minutes = divmod(minutes, 60)
return '%d %02d:%02d:%02d' % (delta.days, hours, minutes, seconds) return '{0:d} {1:02d}:{2:02d}:{3:02d}'.format(delta.days, hours, minutes, seconds)
def format_timestamp(timestamp, utc=False): def format_timestamp(timestamp, utc=False):
""" """
@ -49,12 +56,14 @@ def format_timestamp(timestamp, utc=False):
else: else:
return '-' return '-'
class INetAddressError(Exception): class INetAddressError(Exception):
""" """
Error parsing / generating a internet address. Error parsing / generating a internet address.
""" """
pass pass
def inet_address(address, default_port, default_address='localhost'): def inet_address(address, default_port, default_address='localhost'):
""" """
Parse internet address. Parse internet address.
@ -71,18 +80,19 @@ def inet_address(address, default_port, default_address='localhost'):
try: try:
port = int(addr[1]) port = int(addr[1])
except ValueError: except ValueError:
raise INetAddressError('Invalid address "%s".' % address) raise INetAddressError('Invalid address "{0}".'.format(address))
if len(addr[0]) == 0: if len(addr[0]) == 0:
addr = default_address addr = default_address
else: else:
addr = addr[0] addr = addr[0]
else: else:
raise INetAddressError('Invalid address "%s".' % address) raise INetAddressError('Invalid address "{0}".'.format(address))
try: try:
socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM)
except socket.gaierror: except socket.gaierror:
raise INetAddressError('Cannot look up address "%s".' % address) raise INetAddressError('Cannot look up address "{0}".'.format(address))
return (addr, port) return addr, port
def rpc_bool(arg): def rpc_bool(arg):
""" """
@ -95,27 +105,31 @@ def rpc_bool(arg):
arg = arg.lower() in ['true', 'yes'] arg = arg.lower() in ['true', 'yes']
return 1 if bool(arg) else 0 return 1 if bool(arg) else 0
TR_TYPE_MAP = { TR_TYPE_MAP = {
'number' : int, 'number': int,
'string' : str, 'string': str,
'double': float, 'double': float,
'boolean' : rpc_bool, 'boolean': rpc_bool,
'array': list, 'array': list,
'object': dict 'object': dict
} }
def make_python_name(name): def make_python_name(name):
""" """
Convert Transmission RPC name to python compatible name. Convert Transmission RPC name to python compatible name.
""" """
return name.replace('-', '_') return name.replace('-', '_')
def make_rpc_name(name): def make_rpc_name(name):
""" """
Convert python compatible name to Transmission RPC name. Convert python compatible name to Transmission RPC name.
""" """
return name.replace('_', '-') return name.replace('_', '-')
def argument_value_convert(method, argument, value, rpc_version): def argument_value_convert(method, argument, value, rpc_version):
""" """
Check and fix Transmission RPC issues with regards to methods, arguments and values. Check and fix Transmission RPC issues with regards to methods, arguments and values.
@ -125,7 +139,7 @@ def argument_value_convert(method, argument, value, rpc_version):
elif method in ('session-get', 'session-set'): elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]] args = constants.SESSION_ARGS[method[-3:]]
else: else:
return ValueError('Method "%s" not supported' % (method)) return ValueError('Method "{0}" not supported'.format(method))
if argument in args: if argument in args:
info = args[argument] info = args[argument]
invalid_version = True invalid_version = True
@ -141,19 +155,18 @@ def argument_value_convert(method, argument, value, rpc_version):
if invalid_version: if invalid_version:
if replacement: if replacement:
LOGGER.warning( LOGGER.warning(
'Replacing requested argument "%s" with "%s".' 'Replacing requested argument "{0}" with "{1}".'.format(argument, replacement))
% (argument, replacement))
argument = replacement argument = replacement
info = args[argument] info = args[argument]
else: else:
raise ValueError( raise ValueError(
'Method "%s" Argument "%s" does not exist in version %d.' 'Method "{0}" Argument "{1}" does not exist in version {2:d}.'.format(method, argument, rpc_version))
% (method, argument, rpc_version)) return argument, TR_TYPE_MAP[info[0]](value)
return (argument, TR_TYPE_MAP[info[0]](value))
else: else:
raise ValueError('Argument "%s" does not exists for method "%s".', raise ValueError('Argument "%s" does not exists for method "%s".',
(argument, method)) (argument, method))
def get_arguments(method, rpc_version): def get_arguments(method, rpc_version):
""" """
Get arguments for method in specified Transmission RPC version. Get arguments for method in specified Transmission RPC version.
@ -163,7 +176,7 @@ def get_arguments(method, rpc_version):
elif method in ('session-get', 'session-set'): elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]] args = constants.SESSION_ARGS[method[-3:]]
else: else:
return ValueError('Method "%s" not supported' % (method)) return ValueError('Method "{0}" not supported'.format(method))
accessible = [] accessible = []
for argument, info in iteritems(args): for argument, info in iteritems(args):
valid_version = True valid_version = True
@ -175,6 +188,7 @@ def get_arguments(method, rpc_version):
accessible.append(argument) accessible.append(argument)
return accessible return accessible
def add_stdout_logger(level='debug'): def add_stdout_logger(level='debug'):
""" """
Add a stdout target for the transmissionrpc logging. Add a stdout target for the transmissionrpc logging.
@ -189,6 +203,7 @@ def add_stdout_logger(level='debug'):
loghandler.setLevel(loglevel) loghandler.setLevel(loglevel)
trpc_logger.addHandler(loghandler) trpc_logger.addHandler(loghandler)
def add_file_logger(filepath, level='debug'): def add_file_logger(filepath, level='debug'):
""" """
Add a stdout target for the transmissionrpc logging. Add a stdout target for the transmissionrpc logging.
@ -203,4 +218,5 @@ def add_file_logger(filepath, level='debug'):
loghandler.setLevel(loglevel) loghandler.setLevel(loglevel)
trpc_logger.addHandler(loghandler) trpc_logger.addHandler(loghandler)
Field = namedtuple('Field', ['value', 'dirty']) Field = namedtuple('Field', ['value', 'dirty'])

View file

@ -0,0 +1 @@
# coding=utf-8

View file

@ -1,115 +1,117 @@
#coding=utf8 # coding=utf8
import urllib
import urllib2 import json
import urlparse
import cookielib
import re import re
import StringIO
try: from six import StringIO
import json from six.moves.http_cookiejar import CookieJar
except ImportError: from six.moves.urllib_error import HTTPError
import simplejson as json from six.moves.urllib_parse import urljoin, urlencode
from six.moves.urllib_request import (
build_opener, install_opener,
HTTPBasicAuthHandler, HTTPCookieProcessor,
Request,
)
from core.utorrent.upload import MultiPartForm from core.utorrent.upload import MultiPartForm
class UTorrentClient(object):
class UTorrentClient(object):
def __init__(self, base_url, username, password): def __init__(self, base_url, username, password):
self.base_url = base_url self.base_url = base_url
self.username = username self.username = username
self.password = password self.password = password
self.opener = self._make_opener('uTorrent', base_url, username, password) self.opener = self._make_opener('uTorrent', base_url, username, password)
self.token = self._get_token() self.token = self._get_token()
#TODO refresh token, when necessary # TODO refresh token, when necessary
def _make_opener(self, realm, base_url, username, password): def _make_opener(self, realm, base_url, username, password):
'''uTorrent API need HTTP Basic Auth and cookie support for token verify.''' """uTorrent API need HTTP Basic Auth and cookie support for token verify."""
auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler = HTTPBasicAuthHandler()
auth_handler.add_password(realm=realm, auth_handler.add_password(realm=realm,
uri=base_url, uri=base_url,
user=username, user=username,
passwd=password) passwd=password)
opener = urllib2.build_opener(auth_handler) opener = build_opener(auth_handler)
urllib2.install_opener(opener) install_opener(opener)
cookie_jar = cookielib.CookieJar() cookie_jar = CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar) cookie_handler = HTTPCookieProcessor(cookie_jar)
handlers = [auth_handler, cookie_handler] handlers = [auth_handler, cookie_handler]
opener = urllib2.build_opener(*handlers) opener = build_opener(*handlers)
return opener return opener
def _get_token(self): def _get_token(self):
url = urlparse.urljoin(self.base_url, 'token.html') url = urljoin(self.base_url, 'token.html')
response = self.opener.open(url) response = self.opener.open(url)
token_re = "<div id='token' style='display:none;'>([^<>]+)</div>" token_re = "<div id='token' style='display:none;'>([^<>]+)</div>"
match = re.search(token_re, response.read()) match = re.search(token_re, response.read())
return match.group(1) return match.group(1)
def list(self, **kwargs): def list(self, **kwargs):
params = [('list', '1')] params = [('list', '1')]
params += kwargs.items() params += kwargs.items()
return self._action(params) return self._action(params)
def start(self, *hashes): def start(self, *hashes):
params = [('action', 'start'),] params = [('action', 'start'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def stop(self, *hashes): def stop(self, *hashes):
params = [('action', 'stop'),] params = [('action', 'stop'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def pause(self, *hashes): def pause(self, *hashes):
params = [('action', 'pause'),] params = [('action', 'pause'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def forcestart(self, *hashes): def forcestart(self, *hashes):
params = [('action', 'forcestart'),] params = [('action', 'forcestart'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def remove(self, *hashes): def remove(self, *hashes):
params = [('action', 'remove'),] params = [('action', 'remove'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def removedata(self, *hashes): def removedata(self, *hashes):
params = [('action', 'removedata'),] params = [('action', 'removedata'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def recheck(self, *hashes): def recheck(self, *hashes):
params = [('action', 'recheck'),] params = [('action', 'recheck'), ]
for hash in hashes: for hash in hashes:
params.append(('hash', hash)) params.append(('hash', hash))
return self._action(params) return self._action(params)
def getfiles(self, hash): def getfiles(self, hash):
params = [('action', 'getfiles'), ('hash', hash)] params = [('action', 'getfiles'), ('hash', hash)]
return self._action(params) return self._action(params)
def getprops(self, hash): def getprops(self, hash):
params = [('action', 'getprops'), ('hash', hash)] params = [('action', 'getprops'), ('hash', hash)]
return self._action(params) return self._action(params)
def setprio(self, hash, priority, *files): def setprio(self, hash, priority, *files):
params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))] params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))]
for file_index in files: for file_index in files:
params.append(('f', str(file_index))) params.append(('f', str(file_index)))
return self._action(params) return self._action(params)
def addfile(self, filename, filepath=None, bytes=None): def addfile(self, filename, filepath=None, bytes=None):
params = [('action', 'add-file')] params = [('action', 'add-file')]
@ -118,15 +120,15 @@ class UTorrentClient(object):
file_handler = open(filepath) file_handler = open(filepath)
else: else:
file_handler = StringIO.StringIO(bytes) file_handler = StringIO.StringIO(bytes)
form.add_file('torrent_file', filename.encode('utf-8'), file_handler) form.add_file('torrent_file', filename.encode('utf-8'), file_handler)
return self._action(params, str(form), form.get_content_type()) return self._action(params, str(form), form.get_content_type())
def _action(self, params, body=None, content_type=None): def _action(self, params, body=None, content_type=None):
#about token, see https://github.com/bittorrent/webui/wiki/TokenSystem # about token, see https://github.com/bittorrent/webui/wiki/TokenSystem
url = self.base_url + '?token=' + self.token + '&' + urllib.urlencode(params) url = '{url}?token={token}&{params}'.format(url=self.url, token=self.token, params=urlencode(params))
request = urllib2.Request(url) request = Request(url)
if body: if body:
request.add_data(body) request.add_data(body)
@ -137,6 +139,5 @@ class UTorrentClient(object):
try: try:
response = self.opener.open(request) response = self.opener.open(request)
return response.code, json.loads(response.read()) return response.code, json.loads(response.read())
except urllib2.HTTPError,e: except HTTPError:
raise raise

View file

@ -1,7 +1,8 @@
#code copied from http://www.doughellmann.com/PyMOTW/urllib2/ # coding=utf-8
# code copied from http://www.doughellmann.com/PyMOTW/urllib2/
from email.generator import _make_boundary as make_boundary
import itertools import itertools
import mimetools
import mimetypes import mimetypes
@ -11,11 +12,11 @@ class MultiPartForm(object):
def __init__(self): def __init__(self):
self.form_fields = [] self.form_fields = []
self.files = [] self.files = []
self.boundary = mimetools.choose_boundary() self.boundary = make_boundary()
return return
def get_content_type(self): def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary return 'multipart/form-data; boundary={0}'.format(self.boundary)
def add_field(self, name, value): def add_field(self, name, value):
"""Add a simple field to the form data.""" """Add a simple field to the form data."""
@ -29,7 +30,7 @@ class MultiPartForm(object):
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, body)) self.files.append((fieldname, filename, mimetype, body))
return return
def __str__(self): def __str__(self):
"""Return a string representing the form data, including attached files.""" """Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the # Build a list of lists, each containing "lines" of the
@ -37,33 +38,32 @@ class MultiPartForm(object):
# Once the list is built, return a string where each # Once the list is built, return a string where each
# line is separated by '\r\n'. # line is separated by '\r\n'.
parts = [] parts = []
part_boundary = '--' + self.boundary part_boundary = '--{boundary}'.format(boundary=self.boundary)
# Add the form fields # Add the form fields
parts.extend( parts.extend(
[ part_boundary, [part_boundary,
'Content-Disposition: form-data; name="%s"' % name, 'Content-Disposition: form-data; name="{0}"'.format(name),
'', '',
value, value,
] ]
for name, value in self.form_fields for name, value in self.form_fields
) )
# Add the files to upload # Add the files to upload
parts.extend( parts.extend(
[ part_boundary, [part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \ 'Content-Disposition: file; name="{0}"; filename="{1}"'.format(field_name, filename),
(field_name, filename), 'Content-Type: {0}'.format(content_type),
'Content-Type: %s' % content_type, '',
'', body,
body, ]
]
for field_name, filename, content_type, body in self.files for field_name, filename, content_type, body in self.files
) )
# Flatten the list and add closing boundary marker, # Flatten the list and add closing boundary marker,
# then return CR+LF separated data # then return CR+LF separated data
flattened = list(itertools.chain(*parts)) flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--') flattened.append('--{boundary}--'.format(boundary=self.boundary))
flattened.append('') flattened.append('')
return '\r\n'.join(flattened) return '\r\n'.join(flattened)

View file

@ -1,3 +1,4 @@
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca> # Author: Nic Wolfe <nic@wolfeden.ca>
# Modified by: echel0n # Modified by: echel0n
@ -15,7 +16,8 @@ import gh_api as github
import core import core
from core import logger from core import logger
class CheckVersion():
class CheckVersion(object):
""" """
Version check class meant to run as a thread object with the SB scheduler. Version check class meant to run as a thread object with the SB scheduler.
""" """
@ -66,7 +68,7 @@ class CheckVersion():
logger.log(u"Version checking is disabled, not checking for the newest version") logger.log(u"Version checking is disabled, not checking for the newest version")
return False return False
logger.log(u"Checking if " + self.install_type + " needs an update") logger.log(u"Checking if {install} needs an update".format(install=self.install_type))
if not self.updater.need_update(): if not self.updater.need_update():
core.NEWEST_VERSION_STRING = None core.NEWEST_VERSION_STRING = None
logger.log(u"No update needed") logger.log(u"No update needed")
@ -79,7 +81,8 @@ class CheckVersion():
if self.updater.need_update(): if self.updater.need_update():
return self.updater.update() return self.updater.update()
class UpdateManager():
class UpdateManager(object):
def get_github_repo_user(self): def get_github_repo_user(self):
return core.GIT_USER return core.GIT_USER
@ -89,6 +92,7 @@ class UpdateManager():
def get_github_branch(self): def get_github_branch(self):
return core.GIT_BRANCH return core.GIT_BRANCH
class GitUpdateManager(UpdateManager): class GitUpdateManager(UpdateManager):
def __init__(self): def __init__(self):
self._git_path = self._find_working_git() self._git_path = self._find_working_git()
@ -102,24 +106,26 @@ class GitUpdateManager(UpdateManager):
self._num_commits_ahead = 0 self._num_commits_ahead = 0
def _git_error(self): def _git_error(self):
logger.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') logger.debug(
'Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.')
def _find_working_git(self): def _find_working_git(self):
test_cmd = 'version' test_cmd = 'version'
if core.GIT_PATH: if core.GIT_PATH:
main_git = '"' + core.GIT_PATH + '"' main_git = '"{git}"'.format(git=core.GIT_PATH)
else: else:
main_git = 'git' main_git = 'git'
logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG) logger.log(u"Checking if we can use git commands: {git} {cmd}".format
(git=main_git, cmd=test_cmd), logger.DEBUG)
output, err, exit_status = self._run_git(main_git, test_cmd) output, err, exit_status = self._run_git(main_git, test_cmd)
if exit_status == 0: if exit_status == 0:
logger.log(u"Using: " + main_git, logger.DEBUG) logger.log(u"Using: {git}".format(git=main_git), logger.DEBUG)
return main_git return main_git
else: else:
logger.log(u"Not using: " + main_git, logger.DEBUG) logger.log(u"Not using: {git}".format(git=main_git), logger.DEBUG)
# trying alternatives # trying alternatives
@ -137,33 +143,38 @@ class GitUpdateManager(UpdateManager):
logger.log(u"Trying known alternative git locations", logger.DEBUG) logger.log(u"Trying known alternative git locations", logger.DEBUG)
for cur_git in alternative_git: for cur_git in alternative_git:
logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG) logger.log(u"Checking if we can use git commands: {git} {cmd}".format
(git=cur_git, cmd=test_cmd), logger.DEBUG)
output, err, exit_status = self._run_git(cur_git, test_cmd) output, err, exit_status = self._run_git(cur_git, test_cmd)
if exit_status == 0: if exit_status == 0:
logger.log(u"Using: " + cur_git, logger.DEBUG) logger.log(u"Using: {git}".format(git=cur_git), logger.DEBUG)
return cur_git return cur_git
else: else:
logger.log(u"Not using: " + cur_git, logger.DEBUG) logger.log(u"Not using: {git}".format(git=cur_git), logger.DEBUG)
# Still haven't found a working git # Still haven't found a working git
logger.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') logger.debug('Unable to find your git executable - '
'Set git_path in your autoProcessMedia.cfg OR '
'delete your .git folder and run from source to enable updates.')
return None return None
def _run_git(self, git_path, args): def _run_git(self, git_path, args):
output = err = exit_status = None output = None
err = None
if not git_path: if not git_path:
logger.log(u"No git specified, can't use git commands", logger.DEBUG) logger.log(u"No git specified, can't use git commands", logger.DEBUG)
exit_status = 1 exit_status = 1
return (output, err, exit_status) return output, err, exit_status
cmd = git_path + ' ' + args cmd = '{git} {args}'.format(git=git_path, args=args)
try: try:
logger.log(u"Executing " + cmd + " with your shell in " + core.PROGRAM_DIR, logger.DEBUG) logger.log(u"Executing {cmd} with your shell in {directory}".format
(cmd=cmd, directory=core.PROGRAM_DIR), logger.DEBUG)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=core.PROGRAM_DIR) shell=True, cwd=core.PROGRAM_DIR)
output, err = p.communicate() output, err = p.communicate()
@ -172,32 +183,26 @@ class GitUpdateManager(UpdateManager):
if output: if output:
output = output.strip() output = output.strip()
if core.LOG_GIT: if core.LOG_GIT:
logger.log(u"git output: " + output, logger.DEBUG) logger.log(u"git output: {output}".format(output=output), logger.DEBUG)
except OSError: except OSError:
logger.log(u"Command " + cmd + " didn't work") logger.log(u"Command {cmd} didn't work".format(cmd=cmd))
exit_status = 1 exit_status = 1
exit_status = 128 if ('fatal:' in output) or err else exit_status
if exit_status == 0: if exit_status == 0:
logger.log(cmd + u" : returned successful", logger.DEBUG) logger.log(u"{cmd} : returned successful".format(cmd=cmd), logger.DEBUG)
exit_status = 0 exit_status = 0
elif core.LOG_GIT and exit_status in (1, 128):
elif exit_status == 1: logger.log(u"{cmd} returned : {output}".format
if core.LOG_GIT: (cmd=cmd, output=output), logger.DEBUG)
logger.log(cmd + u" returned : " + output, logger.DEBUG)
exit_status = 1
elif exit_status == 128 or 'fatal:' in output or err:
if core.LOG_GIT:
logger.log(cmd + u" returned : " + output, logger.DEBUG)
exit_status = 128
else: else:
if core.LOG_GIT: if core.LOG_GIT:
logger.log(cmd + u" returned : " + output + u", treat as error for now", logger.DEBUG) logger.log(u"{cmd} returned : {output}, treat as error for now".format
(cmd=cmd, output=output), logger.DEBUG)
exit_status = 1 exit_status = 1
return (output, err, exit_status) return output, err, exit_status
def _find_installed_version(self): def _find_installed_version(self):
""" """
@ -278,20 +283,18 @@ class GitUpdateManager(UpdateManager):
logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG) logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG)
return return
logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) logger.log(u"cur_commit = {current} % (newest_commit)= {new}, "
+ u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str( u"num_commits_behind = {x}, num_commits_ahead = {y}".format
self._num_commits_ahead), logger.DEBUG) (current=self._cur_commit_hash, new=self._newest_commit_hash,
x=self._num_commits_behind, y=self._num_commits_ahead), logger.DEBUG)
def set_newest_text(self): def set_newest_text(self):
if self._num_commits_ahead: if self._num_commits_ahead:
logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.ERROR) logger.log(u"Local branch is ahead of {branch}. Automatic update not possible.".format
elif self._num_commits_behind > 0: (branch=self.branch), logger.ERROR)
newest_text = 'There is a newer version available ' elif self._num_commits_behind:
newest_text += " (you're " + str(self._num_commits_behind) + " commit" logger.log(u"There is a newer version available (you're {x} commit{s} behind)".format
if self._num_commits_behind > 1: (x=self._num_commits_behind, s=u's' if self._num_commits_behind > 1 else u''), logger.MESSAGE)
newest_text += 's'
newest_text += ' behind)'
logger.log(newest_text, logger.MESSAGE)
else: else:
return return
@ -305,8 +308,8 @@ class GitUpdateManager(UpdateManager):
else: else:
try: try:
self._check_github_for_update() self._check_github_for_update()
except Exception, e: except Exception as error:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) logger.log(u"Unable to contact github, can't check for update: {msg!r}".format(msg=error), logger.ERROR)
return False return False
if self._num_commits_behind > 0: if self._num_commits_behind > 0:
@ -320,7 +323,7 @@ class GitUpdateManager(UpdateManager):
on the call's success. on the call's success.
""" """
output, err, exit_status = self._run_git(self._git_path, 'pull origin ' + self.branch) # @UnusedVariable output, err, exit_status = self._run_git(self._git_path, 'pull origin {branch}'.format(branch=self.branch)) # @UnusedVariable
if exit_status == 0: if exit_status == 0:
return True return True
@ -349,8 +352,8 @@ class SourceUpdateManager(UpdateManager):
try: try:
with open(version_file, 'r') as fp: with open(version_file, 'r') as fp:
self._cur_commit_hash = fp.read().strip(' \n\r') self._cur_commit_hash = fp.read().strip(' \n\r')
except EnvironmentError, e: except EnvironmentError as error:
logger.log(u"Unable to open 'version.txt': " + str(e), logger.DEBUG) logger.log(u"Unable to open 'version.txt': {msg}".format(msg=error), logger.DEBUG)
if not self._cur_commit_hash: if not self._cur_commit_hash:
self._cur_commit_hash = None self._cur_commit_hash = None
@ -363,8 +366,8 @@ class SourceUpdateManager(UpdateManager):
try: try:
self._check_github_for_update() self._check_github_for_update()
except Exception, e: except Exception as error:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) logger.log(u"Unable to contact github, can't check for update: {msg!r}".format(msg=error), logger.ERROR)
return False return False
if not self._cur_commit_hash or self._num_commits_behind > 0: if not self._cur_commit_hash or self._num_commits_behind > 0:
@ -410,8 +413,8 @@ class SourceUpdateManager(UpdateManager):
# when _cur_commit_hash doesn't match anything _num_commits_behind == 100 # when _cur_commit_hash doesn't match anything _num_commits_behind == 100
self._num_commits_behind += 1 self._num_commits_behind += 1
logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u" % (newest_commit)= " + str(self._newest_commit_hash) logger.log(u"cur_commit = {current} % (newest_commit)= {new}, num_commits_behind = {x}".format
+ u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG) (current=self._cur_commit_hash, new=self._newest_commit_hash, x=self._num_commits_behind), logger.DEBUG)
def set_newest_text(self): def set_newest_text(self):
@ -421,12 +424,8 @@ class SourceUpdateManager(UpdateManager):
if not self._cur_commit_hash: if not self._cur_commit_hash:
logger.log(u"Unknown current version number, don't know if we should update or not", logger.ERROR) logger.log(u"Unknown current version number, don't know if we should update or not", logger.ERROR)
elif self._num_commits_behind > 0: elif self._num_commits_behind > 0:
newest_text = 'There is a newer version available' logger.log(u"There is a newer version available (you're {x} commit{s} behind)".format
newest_text += " (you're " + str(self._num_commits_behind) + " commit" (x=self._num_commits_behind, s=u's' if self._num_commits_behind > 1 else u''), logger.MESSAGE)
if self._num_commits_behind > 1:
newest_text += "s"
newest_text += " behind)"
logger.log(newest_text, logger.MESSAGE)
else: else:
return return
@ -434,8 +433,8 @@ class SourceUpdateManager(UpdateManager):
""" """
Downloads the latest source tarball from github and installs it over the existing version. Downloads the latest source tarball from github and installs it over the existing version.
""" """
base_url = 'https://github.com/' + self.github_repo_user + '/' + self.github_repo tar_download_url = 'https://github.com/{org}/{repo}/tarball/{branch}'.format(
tar_download_url = base_url + '/tarball/' + self.branch org=self.github_repo_user, repo=self.github_repo, branch=self.branch)
version_path = os.path.join(core.PROGRAM_DIR, u'version.txt') version_path = os.path.join(core.PROGRAM_DIR, u'version.txt')
try: try:
@ -443,61 +442,65 @@ class SourceUpdateManager(UpdateManager):
sb_update_dir = os.path.join(core.PROGRAM_DIR, u'sb-update') sb_update_dir = os.path.join(core.PROGRAM_DIR, u'sb-update')
if os.path.isdir(sb_update_dir): if os.path.isdir(sb_update_dir):
logger.log(u"Clearing out update folder " + sb_update_dir + " before extracting") logger.log(u"Clearing out update folder {dir} before extracting".format(dir=sb_update_dir))
shutil.rmtree(sb_update_dir) shutil.rmtree(sb_update_dir)
logger.log(u"Creating update folder " + sb_update_dir + " before extracting") logger.log(u"Creating update folder {dir} before extracting".format(dir=sb_update_dir))
os.makedirs(sb_update_dir) os.makedirs(sb_update_dir)
# retrieve file # retrieve file
logger.log(u"Downloading update from " + repr(tar_download_url)) logger.log(u"Downloading update from {url!r}".format(url=tar_download_url))
tar_download_path = os.path.join(sb_update_dir, u'nzbtomedia-update.tar') tar_download_path = os.path.join(sb_update_dir, u'nzbtomedia-update.tar')
urllib.urlretrieve(tar_download_url, tar_download_path) urllib.urlretrieve(tar_download_url, tar_download_path)
if not os.path.isfile(tar_download_path): if not os.path.isfile(tar_download_path):
logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.ERROR) logger.log(u"Unable to retrieve new version from {url}, can't update".format
(url=tar_download_url), logger.ERROR)
return False return False
if not tarfile.is_tarfile(tar_download_path): if not tarfile.is_tarfile(tar_download_path):
logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR) logger.log(u"Retrieved version from {url} is corrupt, can't update".format
(url=tar_download_url), logger.ERROR)
return False return False
# extract to sb-update dir # extract to sb-update dir
logger.log(u"Extracting file " + tar_download_path) logger.log(u"Extracting file {path}".format(path=tar_download_path))
tar = tarfile.open(tar_download_path) tar = tarfile.open(tar_download_path)
tar.extractall(sb_update_dir) tar.extractall(sb_update_dir)
tar.close() tar.close()
# delete .tar.gz # delete .tar.gz
logger.log(u"Deleting file " + tar_download_path) logger.log(u"Deleting file {path}".format(path=tar_download_path))
os.remove(tar_download_path) os.remove(tar_download_path)
# find update dir name # find update dir name
update_dir_contents = [x for x in os.listdir(sb_update_dir) if update_dir_contents = [x for x in os.listdir(sb_update_dir) if
os.path.isdir(os.path.join(sb_update_dir, x))] os.path.isdir(os.path.join(sb_update_dir, x))]
if len(update_dir_contents) != 1: if len(update_dir_contents) != 1:
logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR) logger.log(u"Invalid update data, update failed: {0}".format(update_dir_contents), logger.ERROR)
return False return False
content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) content_dir = os.path.join(sb_update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder # walk temp folder and move files to main folder
logger.log(u"Moving files from " + content_dir + " to " + core.PROGRAM_DIR) logger.log(u"Moving files from {source} to {destination}".format
(source=content_dir, destination=core.PROGRAM_DIR))
for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable
dirname = dirname[len(content_dir) + 1:] dirname = dirname[len(content_dir) + 1:]
for curfile in filenames: for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile) old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(core.PROGRAM_DIR, dirname, curfile) new_path = os.path.join(core.PROGRAM_DIR, dirname, curfile)
#Avoid DLL access problem on WIN32/64 # Avoid DLL access problem on WIN32/64
#These files needing to be updated manually # These files needing to be updated manually
#or find a way to kill the access from memory # or find a way to kill the access from memory
if curfile in ('unrar.dll', 'unrar64.dll'): if curfile in ('unrar.dll', 'unrar64.dll'):
try: try:
os.chmod(new_path, stat.S_IWRITE) os.chmod(new_path, stat.S_IWRITE)
os.remove(new_path) os.remove(new_path)
os.renames(old_path, new_path) os.renames(old_path, new_path)
except Exception, e: except Exception as error:
logger.log(u"Unable to update " + new_path + ': ' + str(e), logger.DEBUG) logger.log(u"Unable to update {path}: {msg}".format
(path=new_path, msg=error), logger.DEBUG)
os.remove(old_path) # Trash the updated file without moving in new path os.remove(old_path) # Trash the updated file without moving in new path
continue continue
@ -509,13 +512,15 @@ class SourceUpdateManager(UpdateManager):
try: try:
with open(version_path, 'w') as ver_file: with open(version_path, 'w') as ver_file:
ver_file.write(self._newest_commit_hash) ver_file.write(self._newest_commit_hash)
except EnvironmentError, e: except EnvironmentError as error:
logger.log(u"Unable to write version file, update not complete: " + str(e), logger.ERROR) logger.log(u"Unable to write version file, update not complete: {msg}".format
(msg=error), logger.ERROR)
return False return False
except Exception, e: except Exception as error:
logger.log(u"Error while trying to update: " + str(e), logger.ERROR) logger.log(u"Error while trying to update: {msg}".format
logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) (msg=error), logger.ERROR)
logger.log(u"Traceback: {error}".format(error=traceback.format_exc()), logger.DEBUG)
return False return False
return True return True

View file

@ -0,0 +1 @@
import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('backports',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('backports', types.ModuleType('backports'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -0,0 +1,184 @@
from __future__ import absolute_import
import functools
from collections import namedtuple
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
@functools.wraps(functools.update_wrapper)
def update_wrapper(wrapper,
wrapped,
assigned = functools.WRAPPER_ASSIGNMENTS,
updated = functools.WRAPPER_UPDATES):
"""
Patch two bugs in functools.update_wrapper.
"""
# workaround for http://bugs.python.org/issue3445
assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
# workaround for https://bugs.python.org/issue17482
wrapper.__wrapped__ = wrapped
return wrapper
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)]),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -12,15 +13,30 @@
# The above copyright notice and this permission notice shall be # The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
# This particular version has been slightly modified to work with headphones from __future__ import division, absolute_import, print_function
# https://github.com/rembo10/headphones
__version__ = '1.3.4' import os
__author__ = 'Adrian Sampson <adrian@radbox.org>'
import beets.library
from beets.util import confit from beets.util import confit
Library = beets.library.Library __version__ = u'1.3.18'
__author__ = u'Adrian Sampson <adrian@radbox.org>'
config = confit.LazyConfig('beets', __name__)
class IncludeLazyConfig(confit.LazyConfig):
"""A version of Confit's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
"""
def read(self, user=True, defaults=True):
super(IncludeLazyConfig, self).read(user, defaults)
try:
for view in self['include']:
filename = view.as_filename()
if os.path.isfile(filename):
self.set_file(filename)
except confit.NotFoundError:
pass
config = IncludeLazyConfig('beets', __name__)

200
libs/beets/art.py Normal file
View file

@ -0,0 +1,200 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""High-level utilities for manipulating image files associated with
music and items' embedded album art.
"""
from __future__ import division, absolute_import, print_function
import subprocess
import platform
from tempfile import NamedTemporaryFile
import imghdr
import os
from beets.util import displayable_path, syspath
from beets.util.artresizer import ArtResizer
from beets import mediafile
def mediafile_image(image_path, maxwidth=None):
"""Return a `mediafile.Image` object for the path.
"""
with open(syspath(image_path), 'rb') as f:
data = f.read()
return mediafile.Image(data, type=mediafile.ImageType.front)
def get_art(log, item):
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warning(u'Could not extract art from {0}: {1}',
displayable_path(item.path), exc)
return
return mf.art
def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
compare_threshold=0, ifempty=False, as_album=False):
"""Embed an image into the item's media file.
"""
# Conditions and filters.
if compare_threshold:
if not check_art_similarity(log, item, imagepath, compare_threshold):
log.info(u'Image not similar; skipping.')
return
if ifempty and get_art(log, item):
log.info(u'media file already contained art')
return
if maxwidth and not as_album:
imagepath = resize_image(log, imagepath, maxwidth)
# Get the `Image` object from the file.
try:
log.debug(u'embedding {0}', displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth)
except IOError as exc:
log.warning(u'could not read image file: {0}', exc)
return
# Make sure the image kind is safe (some formats only support PNG
# and JPEG).
if image.mime_type not in ('image/jpeg', 'image/png'):
log.info('not embedding image of unsupported type: {}',
image.mime_type)
return
item.try_write(path=itempath, tags={'images': [image]})
def embed_album(log, album, maxwidth=None, quiet=False,
compare_threshold=0, ifempty=False):
"""Embed album art into all of the album's items.
"""
imagepath = album.artpath
if not imagepath:
log.info(u'No album art present for {0}', album)
return
if not os.path.isfile(syspath(imagepath)):
log.info(u'Album art not found at {0} for {1}',
displayable_path(imagepath), album)
return
if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth)
log.info(u'Embedding album art into {0}', album)
for item in album.items():
embed_item(log, item, imagepath, maxwidth, None,
compare_threshold, ifempty, as_album=True)
def resize_image(log, imagepath, maxwidth):
"""Returns path to an image resized to maxwidth.
"""
log.debug(u'Resizing album art to {0} pixels wide', maxwidth)
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))
return imagepath
def check_art_similarity(log, item, imagepath, compare_threshold):
"""A boolean indicating if an image is similar to embedded item art.
"""
with NamedTemporaryFile(delete=True) as f:
art = extract(log, f.name, item)
if art:
is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score.
convert_proc = subprocess.Popen(
[b'convert', syspath(imagepath), syspath(art),
b'-colorspace', b'gray', b'MIFF:-'],
stdout=subprocess.PIPE,
close_fds=not is_windows,
)
compare_proc = subprocess.Popen(
[b'compare', b'-metric', b'PHASH', b'-', b'null:'],
stdin=convert_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
convert_proc.stdout.close()
stdout, stderr = compare_proc.communicate()
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug(u'IM phashes compare failed for {0}, {1}',
displayable_path(imagepath),
displayable_path(art))
return
out_str = stderr
else:
out_str = stdout
try:
phash_diff = float(out_str)
except ValueError:
log.debug(u'IM output is not a number: {0!r}', out_str)
return
log.debug(u'compare PHASH score is {0}', phash_diff)
return phash_diff <= compare_threshold
return True
def extract(log, outpath, item):
art = get_art(log, item)
if not art:
log.info(u'No album art present in {0}, skipping.', item)
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
if not ext:
log.warning(u'Unknown image type in {0}.',
displayable_path(item.path))
return
outpath += b'.' + ext
log.info(u'Extracting album art from: {0} to: {1}',
item, displayable_path(outpath))
with open(syspath(outpath), 'wb') as f:
f.write(art)
return outpath
def extract_first(log, outpath, items):
for item in items:
real_path = extract(log, outpath, item)
if real_path:
return real_path
def clear(log, lib, query):
items = lib.items(query)
log.info(u'Clearing album art from {0} items', len(items))
for item in items:
log.debug(u'Clearing art for {0}', item)
item.try_write(tags={'images': None})

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,135 +15,23 @@
"""Facilities for automatically determining files' correct metadata. """Facilities for automatically determining files' correct metadata.
""" """
import os
import logging
import re
from beets import library, mediafile, config from __future__ import division, absolute_import, print_function
from beets.util import sorted_walk, ancestry, displayable_path
from beets import logging
from beets import config
# Parts of external interface. # Parts of external interface.
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa
from .match import tag_item, tag_album from .match import tag_item, tag_album # noqa
from .match import recommendation from .match import Recommendation # noqa
# Global logger. # Global logger.
log = logging.getLogger('beets') log = logging.getLogger('beets')
# Constants for directory walker.
MULTIDISC_MARKERS = (r'dis[ck]', r'cd')
MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d'
# Additional utilities for the main interface. # Additional utilities for the main interface.
def albums_in_dir(path):
"""Recursively searches the given directory and returns an iterable
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_pat = collapse_paths = collapse_items = None
for root, dirs, files in sorted_walk(path,
ignore=config['ignore'].as_str_seq(),
logger=log):
# Get a list of items in the directory.
items = []
for filename in files:
try:
i = library.Item.from_path(os.path.join(root, filename))
except mediafile.FileTypeError:
pass
except mediafile.UnreadableFileError:
log.warn(u'unreadable file: {0}'.format(
displayable_path(filename))
)
else:
items.append(i)
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_paths, collapse_items
collapse_pat = collapse_paths = collapse_items = None
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
subdir_pat = re.compile(r'^%s\d' %
re.escape(match.group(1)), re.I)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(r'^%s\d' %
re.escape(match.group(1)), re.I)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield [root], items
# Clear out any unfinished collapse.
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
def apply_item_metadata(item, track_info): def apply_item_metadata(item, track_info):
"""Set an item's metadata from its matched TrackInfo object. """Set an item's metadata from its matched TrackInfo object.
""" """
@ -153,9 +42,12 @@ def apply_item_metadata(item, track_info):
item.mb_trackid = track_info.track_id item.mb_trackid = track_info.track_id
if track_info.artist_id: if track_info.artist_id:
item.mb_artistid = track_info.artist_id item.mb_artistid = track_info.artist_id
if track_info.data_source:
item.data_source = track_info.data_source
# At the moment, the other metadata is left intact (including album # At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied? # and track number). Perhaps these should be emptied?
def apply_metadata(album_info, mapping): def apply_metadata(album_info, mapping):
"""Set the items' metadata to match an AlbumInfo object using a """Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects. mapping from Items to TrackInfo objects.
@ -171,8 +63,8 @@ def apply_metadata(album_info, mapping):
# Artist sort and credit names. # Artist sort and credit names.
item.artist_sort = track_info.artist_sort or album_info.artist_sort item.artist_sort = track_info.artist_sort or album_info.artist_sort
item.artist_credit = track_info.artist_credit or \ item.artist_credit = (track_info.artist_credit or
album_info.artist_credit album_info.artist_credit)
item.albumartist_sort = album_info.artist_sort item.albumartist_sort = album_info.artist_sort
item.albumartist_credit = album_info.artist_credit item.albumartist_credit = album_info.artist_credit
@ -203,7 +95,11 @@ def apply_metadata(album_info, mapping):
item.title = track_info.title item.title = track_info.title
if config['per_disc_numbering']: if config['per_disc_numbering']:
item.track = track_info.medium_index or track_info.index # We want to let the track number be zero, but if the medium index
# is not provided we need to fall back to the overall index.
item.track = track_info.medium_index
if item.track is None:
item.track = track_info.index
item.tracktotal = track_info.medium_total or len(album_info.tracks) item.tracktotal = track_info.medium_total or len(album_info.tracks)
else: else:
item.track = track_info.index item.track = track_info.index
@ -235,13 +131,13 @@ def apply_metadata(album_info, mapping):
'language', 'language',
'country', 'country',
'albumstatus', 'albumstatus',
'media', 'albumdisambig',
'albumdisambig'): 'data_source',):
value = getattr(album_info, field) value = getattr(album_info, field)
if value is not None: if value is not None:
item[field] = value item[field] = value
if track_info.disctitle is not None: if track_info.disctitle is not None:
item.disctitle = track_info.disctitle item.disctitle = track_info.disctitle
# Headphones seal of approval if track_info.media is not None:
item.comments = 'tagged by headphones/beets' item.media = track_info.media

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -13,14 +14,16 @@
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
"""Glue between metadata sources and the matching logic.""" """Glue between metadata sources and the matching logic."""
import logging from __future__ import division, absolute_import, print_function
from collections import namedtuple from collections import namedtuple
import re import re
from beets import logging
from beets import plugins from beets import plugins
from beets import config from beets import config
from beets.autotag import mb from beets.autotag import mb
from beets.util import levenshtein from jellyfish import levenshtein_distance
from unidecode import unidecode from unidecode import unidecode
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -109,13 +112,14 @@ class AlbumInfo(object):
'catalognum', 'script', 'language', 'country', 'catalognum', 'script', 'language', 'country',
'albumstatus', 'albumdisambig', 'artist_credit', 'media']: 'albumstatus', 'albumdisambig', 'artist_credit', 'media']:
value = getattr(self, fld) value = getattr(self, fld)
if isinstance(value, str): if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore')) setattr(self, fld, value.decode(codec, 'ignore'))
if self.tracks: if self.tracks:
for track in self.tracks: for track in self.tracks:
track.decode(codec) track.decode(codec)
class TrackInfo(object): class TrackInfo(object):
"""Describes a canonical track present on a release. Appears as part """Describes a canonical track present on a release. Appears as part
of an AlbumInfo's ``tracks`` list. Consists of these data members: of an AlbumInfo's ``tracks`` list. Consists of these data members:
@ -126,12 +130,15 @@ class TrackInfo(object):
- ``artist_id`` - ``artist_id``
- ``length``: float: duration of the track in seconds - ``length``: float: duration of the track in seconds
- ``index``: position on the entire release - ``index``: position on the entire release
- ``media``: delivery mechanism (Vinyl, etc.)
- ``medium``: the disc number this track appears on in the album - ``medium``: the disc number this track appears on in the album
- ``medium_index``: the track's position on the disc - ``medium_index``: the track's position on the disc
- ``medium_total``: the number of tracks on the item's disc - ``medium_total``: the number of tracks on the item's disc
- ``artist_sort``: name of the track artist for sorting - ``artist_sort``: name of the track artist for sorting
- ``disctitle``: name of the individual medium (subtitle) - ``disctitle``: name of the individual medium (subtitle)
- ``artist_credit``: Recording-specific artist name - ``artist_credit``: Recording-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
Only ``title`` and ``track_id`` are required. The rest of the fields Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index`` may be None. The indices ``index``, ``medium``, and ``medium_index``
@ -140,13 +147,15 @@ class TrackInfo(object):
def __init__(self, title, track_id, artist=None, artist_id=None, def __init__(self, title, track_id, artist=None, artist_id=None,
length=None, index=None, medium=None, medium_index=None, length=None, index=None, medium=None, medium_index=None,
medium_total=None, artist_sort=None, disctitle=None, medium_total=None, artist_sort=None, disctitle=None,
artist_credit=None, data_source=None, data_url=None): artist_credit=None, data_source=None, data_url=None,
media=None):
self.title = title self.title = title
self.track_id = track_id self.track_id = track_id
self.artist = artist self.artist = artist
self.artist_id = artist_id self.artist_id = artist_id
self.length = length self.length = length
self.index = index self.index = index
self.media = media
self.medium = medium self.medium = medium
self.medium_index = medium_index self.medium_index = medium_index
self.medium_total = medium_total self.medium_total = medium_total
@ -162,9 +171,9 @@ class TrackInfo(object):
to Unicode. to Unicode.
""" """
for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle', for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle',
'artist_credit']: 'artist_credit', 'media']:
value = getattr(self, fld) value = getattr(self, fld)
if isinstance(value, str): if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore')) setattr(self, fld, value.decode(codec, 'ignore'))
@ -187,27 +196,33 @@ SD_REPLACE = [
(r'&', 'and'), (r'&', 'and'),
] ]
def _string_dist_basic(str1, str2): def _string_dist_basic(str1, str2):
"""Basic edit distance between two strings, ignoring """Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string transliteration/lowering to ASCII characters. Normalized by string
length. length.
""" """
str1 = unidecode(str1) assert isinstance(str1, unicode)
str2 = unidecode(str2) assert isinstance(str2, unicode)
str1 = unidecode(str1).decode('ascii')
str2 = unidecode(str2).decode('ascii')
str1 = re.sub(r'[^a-z0-9]', '', str1.lower()) str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower()) str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2: if not str1 and not str2:
return 0.0 return 0.0
return levenshtein(str1, str2) / float(max(len(str1), len(str2))) return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2): def string_dist(str1, str2):
"""Gives an "intuitive" edit distance between two strings. This is """Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text. tweaks that reflect intuition about text.
""" """
if str1 == None and str2 == None: return 0.0 if str1 is None and str2 is None:
if str1 == None or str2 == None: return 1.0 return 0.0
if str1 is None or str2 is None:
return 1.0
str1 = str1.lower() str1 = str1.lower()
str2 = str2.lower() str2 = str2.lower()
@ -217,9 +232,9 @@ def string_dist(str1, str2):
# "something, the". # "something, the".
for word in SD_END_WORDS: for word in SD_END_WORDS:
if str1.endswith(', %s' % word): if str1.endswith(', %s' % word):
str1 = '%s %s' % (word, str1[:-len(word)-2]) str1 = '%s %s' % (word, str1[:-len(word) - 2])
if str2.endswith(', %s' % word): if str2.endswith(', %s' % word):
str2 = '%s %s' % (word, str2[:-len(word)-2]) str2 = '%s %s' % (word, str2[:-len(word) - 2])
# Perform a couple of basic normalizing substitutions. # Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE: for pat, repl in SD_REPLACE:
@ -256,6 +271,23 @@ def string_dist(str1, str2):
return base_dist + penalty return base_dist + penalty
class LazyClassProperty(object):
"""A decorator implementing a read-only property that is *lazy* in
the sense that the getter is only invoked once. Subsequent accesses
through *any* instance use the cached result.
"""
def __init__(self, getter):
self.getter = getter
self.computed = False
def __get__(self, obj, owner):
if not self.computed:
self.value = self.getter(owner)
self.computed = True
return self.value
class Distance(object): class Distance(object):
"""Keeps track of multiple distance penalties. Provides a single """Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance weighted distance for all penalties as well as a weighted distance
@ -264,11 +296,15 @@ class Distance(object):
def __init__(self): def __init__(self):
self._penalties = {} self._penalties = {}
@LazyClassProperty
def _weights(cls): # noqa
"""A dictionary from keys to floating-point weights.
"""
weights_view = config['match']['distance_weights'] weights_view = config['match']['distance_weights']
self._weights = {} weights = {}
for key in weights_view.keys(): for key in weights_view.keys():
self._weights[key] = weights_view[key].as_number() weights[key] = weights_view[key].as_number()
return weights
# Access the components and their aggregates. # Access the components and their aggregates.
@ -313,8 +349,10 @@ class Distance(object):
# Convert distance into a negative float we can sort items in # Convert distance into a negative float we can sort items in
# ascending order (for keys, when the penalty is equal) and # ascending order (for keys, when the penalty is equal) and
# still get the items with the biggest distance first. # still get the items with the biggest distance first.
return sorted(list_, key=lambda (key, dist): (0-dist, key)) return sorted(
list_,
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
# Behave like a float. # Behave like a float.
@ -323,12 +361,15 @@ class Distance(object):
def __float__(self): def __float__(self):
return self.distance return self.distance
def __sub__(self, other): def __sub__(self, other):
return self.distance - other return self.distance - other
def __rsub__(self, other): def __rsub__(self, other):
return other - self.distance return other - self.distance
def __unicode__(self):
return "{0:.2f}".format(self.distance)
# Behave like a dict. # Behave like a dict.
@ -355,11 +396,11 @@ class Distance(object):
""" """
if not isinstance(dist, Distance): if not isinstance(dist, Distance):
raise ValueError( raise ValueError(
'`dist` must be a Distance object. It is: %r' % dist) u'`dist` must be a Distance object, not {0}'.format(type(dist))
)
for key, penalties in dist._penalties.iteritems(): for key, penalties in dist._penalties.iteritems():
self._penalties.setdefault(key, []).extend(penalties) self._penalties.setdefault(key, []).extend(penalties)
# Adding components. # Adding components.
def _eq(self, value1, value2): def _eq(self, value1, value2):
@ -379,7 +420,8 @@ class Distance(object):
""" """
if not 0.0 <= dist <= 1.0: if not 0.0 <= dist <= 1.0:
raise ValueError( raise ValueError(
'`dist` must be between 0.0 and 1.0. It is: %r' % dist) u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist)
)
self._penalties.setdefault(key, []).append(dist) self._penalties.setdefault(key, []).append(dist)
def add_equality(self, key, value, options): def add_equality(self, key, value, options):
@ -472,31 +514,47 @@ def album_for_mbid(release_id):
if the ID is not found. if the ID is not found.
""" """
try: try:
return mb.album_for_id(release_id) album = mb.album_for_id(release_id)
if album:
plugins.send(u'albuminfo_received', info=album)
return album
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
def track_for_mbid(recording_id): def track_for_mbid(recording_id):
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None """Get a TrackInfo object for a MusicBrainz recording ID. Return None
if the ID is not found. if the ID is not found.
""" """
try: try:
return mb.track_for_id(recording_id) track = mb.track_for_id(recording_id)
if track:
plugins.send(u'trackinfo_received', info=track)
return track
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
def albums_for_id(album_id): def albums_for_id(album_id):
"""Get a list of albums for an ID.""" """Get a list of albums for an ID."""
candidates = [album_for_mbid(album_id)] candidates = [album_for_mbid(album_id)]
candidates.extend(plugins.album_for_id(album_id)) plugin_albums = plugins.album_for_id(album_id)
for a in plugin_albums:
plugins.send(u'albuminfo_received', info=a)
candidates.extend(plugin_albums)
return filter(None, candidates) return filter(None, candidates)
def tracks_for_id(track_id): def tracks_for_id(track_id):
"""Get a list of tracks for an ID.""" """Get a list of tracks for an ID."""
candidates = [track_for_mbid(track_id)] candidates = [track_for_mbid(track_id)]
candidates.extend(plugins.track_for_id(track_id)) plugin_tracks = plugins.track_for_id(track_id)
for t in plugin_tracks:
plugins.send(u'trackinfo_received', info=t)
candidates.extend(plugin_tracks)
return filter(None, candidates) return filter(None, candidates)
def album_candidates(items, artist, album, va_likely): def album_candidates(items, artist, album, va_likely):
"""Search for album matches. ``items`` is a list of Item objects """Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective that make up the album. ``artist`` and ``album`` are the respective
@ -523,8 +581,13 @@ def album_candidates(items, artist, album, va_likely):
# Candidates from plugins. # Candidates from plugins.
out.extend(plugins.candidates(items, artist, album, va_likely)) out.extend(plugins.candidates(items, artist, album, va_likely))
# Notify subscribed plugins about fetched album info
for a in out:
plugins.send(u'albuminfo_received', info=a)
return out return out
def item_candidates(item, artist, title): def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched. """Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or ``artist`` and ``title`` are strings and either reflect the item or
@ -542,4 +605,8 @@ def item_candidates(item, artist, title):
# Plugin candidates. # Plugin candidates.
out.extend(plugins.item_candidates(item, artist, title)) out.extend(plugins.item_candidates(item, artist, title))
# Notify subscribed plugins about fetched track info
for i in out:
plugins.send(u'trackinfo_received', info=i)
return out return out

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -15,21 +16,20 @@
"""Matches existing metadata with canonical information to identify """Matches existing metadata with canonical information to identify
releases and tracks. releases and tracks.
""" """
from __future__ import division
from __future__ import division, absolute_import, print_function
import datetime import datetime
import logging
import re import re
from munkres import Munkres from munkres import Munkres
from beets import logging
from beets import plugins from beets import plugins
from beets import config from beets import config
from beets.util import plurality from beets.util import plurality
from beets.util.enumeration import enum
from beets.autotag import hooks from beets.autotag import hooks
from beets.util.enumeration import OrderedEnum
# Recommendation enumeration. from functools import reduce
recommendation = enum('none', 'low', 'medium', 'strong', name='recommendation')
# Artist signals that indicate "various artists". These are used at the # Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA # album level to determine whether a given release is likely a VA
@ -41,6 +41,18 @@ VA_ARTISTS = (u'', u'various artists', u'various', u'va', u'unknown')
log = logging.getLogger('beets') log = logging.getLogger('beets')
# Recommendation enumeration.
class Recommendation(OrderedEnum):
"""Indicates a qualitative suggestion to the user about what should
be done with a given match.
"""
none = 0
low = 1
medium = 2
strong = 3
# Primary matching functionality. # Primary matching functionality.
def current_metadata(items): def current_metadata(items):
@ -56,10 +68,10 @@ def current_metadata(items):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal', fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media', 'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig'] 'albumdisambig']
for key in fields: for field in fields:
values = [getattr(item, key) for item in items if item] values = [item[field] for item in items if item]
likelies[key], freq = plurality(values) likelies[field], freq = plurality(values)
consensus[key] = (freq == len(values)) consensus[field] = (freq == len(values))
# If there's an album artist consensus, use this for the artist. # If there's an album artist consensus, use this for the artist.
if consensus['albumartist'] and likelies['albumartist']: if consensus['albumartist'] and likelies['albumartist']:
@ -67,6 +79,7 @@ def current_metadata(items):
return likelies, consensus return likelies, consensus
def assign_items(items, tracks): def assign_items(items, tracks):
"""Given a list of Items and a list of TrackInfo objects, find the """Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo best mapping between them. Returns a mapping from Items to TrackInfo
@ -93,12 +106,14 @@ def assign_items(items, tracks):
extra_tracks.sort(key=lambda t: (t.index, t.title)) extra_tracks.sort(key=lambda t: (t.index, t.title))
return mapping, extra_items, extra_tracks return mapping, extra_items, extra_tracks
def track_index_changed(item, track_info): def track_index_changed(item, track_info):
"""Returns True if the item and track info index is different. Tolerates """Returns True if the item and track info index is different. Tolerates
per disc and per release numbering. per disc and per release numbering.
""" """
return item.track not in (track_info.medium_index, track_info.index) return item.track not in (track_info.medium_index, track_info.index)
def track_distance(item, track_info, incl_artist=False): def track_distance(item, track_info, incl_artist=False):
"""Determines the significance of a track metadata change. Returns a """Determines the significance of a track metadata change. Returns a
Distance object. `incl_artist` indicates that a distance component should Distance object. `incl_artist` indicates that a distance component should
@ -109,7 +124,7 @@ def track_distance(item, track_info, incl_artist=False):
# Length. # Length.
if track_info.length: if track_info.length:
diff = abs(item.length - track_info.length) - \ diff = abs(item.length - track_info.length) - \
config['match']['track_length_grace'].as_number() config['match']['track_length_grace'].as_number()
dist.add_ratio('track_length', diff, dist.add_ratio('track_length', diff,
config['match']['track_length_max'].as_number()) config['match']['track_length_max'].as_number())
@ -134,6 +149,7 @@ def track_distance(item, track_info, incl_artist=False):
return dist return dist
def distance(items, album_info, mapping): def distance(items, album_info, mapping):
"""Determines how "significant" an album metadata change would be. """Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object Returns a Distance object. `album_info` is an AlbumInfo object
@ -239,6 +255,7 @@ def distance(items, album_info, mapping):
return dist return dist
def match_by_id(items): def match_by_id(items):
"""If the items are tagged with a MusicBrainz album ID, returns an """If the items are tagged with a MusicBrainz album ID, returns an
AlbumInfo object for the corresponding album. Otherwise, returns AlbumInfo object for the corresponding album. Otherwise, returns
@ -247,16 +264,17 @@ def match_by_id(items):
# Is there a consensus on the MB album ID? # Is there a consensus on the MB album ID?
albumids = [item.mb_albumid for item in items if item.mb_albumid] albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids: if not albumids:
log.debug('No album IDs found.') log.debug(u'No album IDs found.')
return None return None
# If all album IDs are equal, look up the album. # If all album IDs are equal, look up the album.
if bool(reduce(lambda x,y: x if x==y else (), albumids)): if bool(reduce(lambda x, y: x if x == y else (), albumids)):
albumid = albumids[0] albumid = albumids[0]
log.debug('Searching for discovered album ID: ' + albumid) log.debug(u'Searching for discovered album ID: {0}', albumid)
return hooks.album_for_mbid(albumid) return hooks.album_for_mbid(albumid)
else: else:
log.debug('No album ID consensus.') log.debug(u'No album ID consensus.')
def _recommendation(results): def _recommendation(results):
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a """Given a sorted list of AlbumMatch or TrackMatch objects, return a
@ -268,26 +286,26 @@ def _recommendation(results):
""" """
if not results: if not results:
# No candidates: no recommendation. # No candidates: no recommendation.
return recommendation.none return Recommendation.none
# Basic distance thresholding. # Basic distance thresholding.
min_dist = results[0].distance min_dist = results[0].distance
if min_dist < config['match']['strong_rec_thresh'].as_number(): if min_dist < config['match']['strong_rec_thresh'].as_number():
# Strong recommendation level. # Strong recommendation level.
rec = recommendation.strong rec = Recommendation.strong
elif min_dist <= config['match']['medium_rec_thresh'].as_number(): elif min_dist <= config['match']['medium_rec_thresh'].as_number():
# Medium recommendation level. # Medium recommendation level.
rec = recommendation.medium rec = Recommendation.medium
elif len(results) == 1: elif len(results) == 1:
# Only a single candidate. # Only a single candidate.
rec = recommendation.low rec = Recommendation.low
elif results[1].distance - min_dist >= \ elif results[1].distance - min_dist >= \
config['match']['rec_gap_thresh'].as_number(): config['match']['rec_gap_thresh'].as_number():
# Gap between first two candidates is large. # Gap between first two candidates is large.
rec = recommendation.low rec = Recommendation.low
else: else:
# No conclusion. Return immediately. Can't be downgraded any further. # No conclusion. Return immediately. Can't be downgraded any further.
return recommendation.none return Recommendation.none
# Downgrade to the max rec if it is lower than the current rec for an # Downgrade to the max rec if it is lower than the current rec for an
# applied penalty. # applied penalty.
@ -299,28 +317,40 @@ def _recommendation(results):
for key in keys: for key in keys:
if key in max_rec_view.keys(): if key in max_rec_view.keys():
max_rec = max_rec_view[key].as_choice({ max_rec = max_rec_view[key].as_choice({
'strong': recommendation.strong, 'strong': Recommendation.strong,
'medium': recommendation.medium, 'medium': Recommendation.medium,
'low': recommendation.low, 'low': Recommendation.low,
'none': recommendation.none, 'none': Recommendation.none,
}) })
rec = min(rec, max_rec) rec = min(rec, max_rec)
return rec return rec
def _add_candidate(items, results, info): def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate """Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for checking the track count, ordering the items, checking for
duplicates, and calculating the distance. duplicates, and calculating the distance.
""" """
log.debug('Candidate: %s - %s' % (info.artist, info.album)) log.debug(u'Candidate: {0} - {1}', info.artist, info.album)
# Discard albums with zero tracks.
if not info.tracks:
log.debug(u'No tracks.')
return
# Don't duplicate. # Don't duplicate.
if info.album_id in results: if info.album_id in results:
log.debug('Duplicate.') log.debug(u'Duplicate.')
return return
# Discard matches without required tags.
for req_tag in config['match']['required'].as_str_seq():
if getattr(info, req_tag) is None:
log.debug(u'Ignored. Missing required tag: {0}', req_tag)
return
# Find mapping between the items and the track info. # Find mapping between the items and the track info.
mapping, extra_items, extra_tracks = assign_items(items, info.tracks) mapping, extra_items, extra_tracks = assign_items(items, info.tracks)
@ -328,42 +358,53 @@ def _add_candidate(items, results, info):
dist = distance(items, info, mapping) dist = distance(items, info, mapping)
# Skip matches with ignored penalties. # Skip matches with ignored penalties.
penalties = [key for _, key in dist] penalties = [key for key, _ in dist]
for penalty in config['match']['ignored'].as_str_seq(): for penalty in config['match']['ignored'].as_str_seq():
if penalty in penalties: if penalty in penalties:
log.debug('Ignored. Penalty: %s' % penalty) log.debug(u'Ignored. Penalty: {0}', penalty)
return return
log.debug('Success. Distance: %f' % dist) log.debug(u'Success. Distance: {0}', dist)
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping, results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks) extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None, def tag_album(items, search_artist=None, search_album=None,
search_id=None): search_ids=[]):
"""Bundles together the functionality used to infer tags for a """Return a tuple of a artist name, an album name, a list of
set of items comprised by an album. Returns everything relevant: `AlbumMatch` candidates from the metadata backend, and a
- The current artist. `Recommendation`.
- The current album.
- A list of AlbumMatch objects. The candidates are sorted by The artist and album are the most common values of these fields
distance (i.e., best match first). among `items`.
- A recommendation.
If search_artist and search_album or search_id are provided, then The `AlbumMatch` objects are generated by searching the metadata
they are used as search terms in place of the current metadata. backends. By default, the metadata of the items is used for the
search. This can be customized by setting the parameters.
`search_ids` is a list of metadata backend IDs: if specified,
it will restrict the candidates to those IDs, ignoring
`search_artist` and `search album`. The `mapping` field of the
album has the matched `items` as keys.
The recommendation is calculated from the match quality of the
candidates.
""" """
# Get current metadata. # Get current metadata.
likelies, consensus = current_metadata(items) likelies, consensus = current_metadata(items)
cur_artist = likelies['artist'] cur_artist = likelies['artist']
cur_album = likelies['album'] cur_album = likelies['album']
log.debug('Tagging %s - %s' % (cur_artist, cur_album)) log.debug(u'Tagging {0} - {1}', cur_artist, cur_album)
# The output result (distance, AlbumInfo) tuples (keyed by MB album # The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID). # ID).
candidates = {} candidates = {}
# Search by explicit ID. # Search by explicit ID.
if search_id is not None: if search_ids:
log.debug('Searching for album ID: ' + search_id) search_cands = []
search_cands = hooks.albums_for_id(search_id) for search_id in search_ids:
log.debug(u'Searching for album ID: {0}', search_id)
search_cands.extend(hooks.albums_for_id(search_id))
# Use existing metadata or text search. # Use existing metadata or text search.
else: else:
@ -372,32 +413,32 @@ def tag_album(items, search_artist=None, search_album=None,
if id_info: if id_info:
_add_candidate(items, candidates, id_info) _add_candidate(items, candidates, id_info)
rec = _recommendation(candidates.values()) rec = _recommendation(candidates.values())
log.debug('Album ID match recommendation is ' + str(rec)) log.debug(u'Album ID match recommendation is {0}', rec)
if candidates and not config['import']['timid']: if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately. # If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based # Otherwise, this match will compete against metadata-based
# matches. # matches.
if rec == recommendation.strong: if rec == Recommendation.strong:
log.debug('ID match.') log.debug(u'ID match.')
return cur_artist, cur_album, candidates.values(), rec return cur_artist, cur_album, candidates.values(), rec
# Search terms. # Search terms.
if not (search_artist and search_album): if not (search_artist and search_album):
# No explicit search terms -- use current metadata. # No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album search_artist, search_album = cur_artist, cur_album
log.debug(u'Search terms: %s - %s' % (search_artist, search_album)) log.debug(u'Search terms: {0} - {1}', search_artist, search_album)
# Is this album likely to be a "various artist" release? # Is this album likely to be a "various artist" release?
va_likely = ((not consensus['artist']) or va_likely = ((not consensus['artist']) or
(search_artist.lower() in VA_ARTISTS) or (search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items)) any(item.comp for item in items))
log.debug(u'Album might be VA: %s' % str(va_likely)) log.debug(u'Album might be VA: {0}', va_likely)
# Get the results from the data sources. # Get the results from the data sources.
search_cands = hooks.album_candidates(items, search_artist, search_cands = hooks.album_candidates(items, search_artist,
search_album, va_likely) search_album, va_likely)
log.debug(u'Evaluating %i candidates.' % len(search_cands)) log.debug(u'Evaluating {0} candidates.', len(search_cands))
for info in search_cands: for info in search_cands:
_add_candidate(items, candidates, info) _add_candidate(items, candidates, info)
@ -406,43 +447,47 @@ def tag_album(items, search_artist=None, search_album=None,
rec = _recommendation(candidates) rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec return cur_artist, cur_album, candidates, rec
def tag_item(item, search_artist=None, search_title=None, def tag_item(item, search_artist=None, search_title=None,
search_id=None): search_ids=[]):
"""Attempts to find metadata for a single track. Returns a """Attempts to find metadata for a single track. Returns a
`(candidates, recommendation)` pair where `candidates` is a list of `(candidates, recommendation)` pair where `candidates` is a list of
TrackMatch objects. `search_artist` and `search_title` may be used TrackMatch objects. `search_artist` and `search_title` may be used
to override the current metadata for the purposes of the MusicBrainz to override the current metadata for the purposes of the MusicBrainz
title; likewise `search_id`. title. `search_ids` may be used for restricting the search to a list
of metadata backend IDs.
""" """
# Holds candidates found so far: keys are MBIDs; values are # Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs. # (distance, TrackInfo) pairs.
candidates = {} candidates = {}
# First, try matching by MusicBrainz ID. # First, try matching by MusicBrainz ID.
trackid = search_id or item.mb_trackid trackids = search_ids or filter(None, [item.mb_trackid])
if trackid: if trackids:
log.debug('Searching for track ID: ' + trackid) for trackid in trackids:
for track_info in hooks.tracks_for_id(trackid): log.debug(u'Searching for track ID: {0}', trackid)
dist = track_distance(item, track_info, incl_artist=True) for track_info in hooks.tracks_for_id(trackid):
candidates[track_info.track_id] = \ dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info) hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching. # If this is a good match, then don't keep searching.
rec = _recommendation(candidates.values()) rec = _recommendation(sorted(candidates.itervalues()))
if rec == recommendation.strong and not config['import']['timid']: if rec == Recommendation.strong and \
log.debug('Track ID match.') not config['import']['timid']:
return candidates.values(), rec log.debug(u'Track ID match.')
return sorted(candidates.itervalues()), rec
# If we're searching by ID, don't proceed. # If we're searching by ID, don't proceed.
if search_id is not None: if search_ids:
if candidates: if candidates:
return candidates.values(), rec return sorted(candidates.itervalues()), rec
else: else:
return [], recommendation.none return [], Recommendation.none
# Search terms. # Search terms.
if not (search_artist and search_title): if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title search_artist, search_title = item.artist, item.title
log.debug(u'Item search terms: %s - %s' % (search_artist, search_title)) log.debug(u'Item search terms: {0} - {1}', search_artist, search_title)
# Get and evaluate candidate metadata. # Get and evaluate candidate metadata.
for track_info in hooks.item_candidates(item, search_artist, search_title): for track_info in hooks.item_candidates(item, search_artist, search_title):
@ -450,7 +495,7 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation. # Sort by distance and return with recommendation.
log.debug('Found %i candidates.' % len(candidates)) log.debug(u'Found {0} candidates.', len(candidates))
candidates = sorted(candidates.itervalues()) candidates = sorted(candidates.itervalues())
rec = _recommendation(candidates) rec = _recommendation(candidates)
return candidates, rec return candidates, rec

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,23 +15,25 @@
"""Searches for albums in the MusicBrainz database. """Searches for albums in the MusicBrainz database.
""" """
import logging from __future__ import division, absolute_import, print_function
import musicbrainzngs import musicbrainzngs
import re import re
import traceback import traceback
from urlparse import urljoin from urlparse import urljoin
from beets import logging
import beets.autotag.hooks import beets.autotag.hooks
import beets import beets
from beets import util from beets import util
from beets import config from beets import config
SEARCH_LIMIT = 5
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377' VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
BASE_URL = 'http://musicbrainz.org/' BASE_URL = 'http://musicbrainz.org/'
musicbrainzngs.set_useragent('beets', beets.__version__, musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.radbox.org/') 'http://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException): class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the """An error while talking to MusicBrainz. The `query` field is the
@ -38,10 +41,12 @@ class MusicBrainzAPIError(util.HumanReadableException):
""" """
def __init__(self, reason, verb, query, tb=None): def __init__(self, reason, verb, query, tb=None):
self.query = query self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb) super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self): def get_message(self):
return u'"{0}" in {1} with query {2}'.format( return u'{0} in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query) self._reasonstr(), self.verb, repr(self.query)
) )
@ -51,12 +56,15 @@ RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases'] 'labels', 'artist-credits', 'aliases']
TRACK_INCLUDES = ['artists', 'aliases'] TRACK_INCLUDES = ['artists', 'aliases']
def track_url(trackid): def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid) return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid): def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid) return urljoin(BASE_URL, 'release/' + albumid)
def configure(): def configure():
"""Set up the python-musicbrainz-ngs module according to settings """Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup. from the beets configuration. This should be called at startup.
@ -67,6 +75,7 @@ def configure():
config['musicbrainz']['ratelimit'].get(int), config['musicbrainz']['ratelimit'].get(int),
) )
def _preferred_alias(aliases): def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select """Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching and return the user's preferred alias alias or None if no matching
@ -81,13 +90,15 @@ def _preferred_alias(aliases):
# Search configured locales in order. # Search configured locales in order.
for locale in config['import']['languages'].as_str_seq(): for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale. # Find matching primary aliases for this locale.
matches = [a for a in aliases if a['locale'] == locale and 'primary' in a] matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches # Skip to the next locale if we have no matches
if not matches: if not matches:
continue continue
return matches[0] return matches[0]
def _flatten_artist_credit(credit): def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the """Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and data into a triple of joined artist name strings: canonical, sort, and
@ -133,6 +144,7 @@ def _flatten_artist_credit(credit):
''.join(artist_credit_parts), ''.join(artist_credit_parts),
) )
def track_info(recording, index=None, medium=None, medium_index=None, def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None): medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets """Translates a MusicBrainz recording result dictionary into a beets
@ -149,6 +161,7 @@ def track_info(recording, index=None, medium=None, medium_index=None,
medium=medium, medium=medium,
medium_index=medium_index, medium_index=medium_index,
medium_total=medium_total, medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']), data_url=track_url(recording['id']),
) )
@ -167,6 +180,7 @@ def track_info(recording, index=None, medium=None, medium_index=None,
info.decode() info.decode()
return info return info
def _set_date_str(info, date_str, original=False): def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo """Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If object, set the object's release date fields appropriately. If
@ -186,6 +200,7 @@ def _set_date_str(info, date_str, original=False):
key = 'original_' + key key = 'original_' + key
setattr(info, key, date_num) setattr(info, key, date_num)
def album_info(release): def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets """Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release. AlbumInfo object containing the interesting data about that release.
@ -199,7 +214,13 @@ def album_info(release):
index = 0 index = 0
for medium in release['medium-list']: for medium in release['medium-list']:
disctitle = medium.get('title') disctitle = medium.get('title')
for track in medium['track-list']: format = medium.get('format')
all_tracks = medium['track-list']
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
# Basic information from the recording. # Basic information from the recording.
index += 1 index += 1
ti = track_info( ti = track_info(
@ -210,6 +231,7 @@ def album_info(release):
len(medium['track-list']), len(medium['track-list']),
) )
ti.disctitle = disctitle ti.disctitle = disctitle
ti.media = format
# Prefer track data, where present, over recording data. # Prefer track data, where present, over recording data.
if track.get('title'): if track.get('title'):
@ -233,10 +255,12 @@ def album_info(release):
mediums=len(release['medium-list']), mediums=len(release['medium-list']),
artist_sort=artist_sort_name, artist_sort=artist_sort_name,
artist_credit=artist_credit_name, artist_credit=artist_credit_name,
data_source='MusicBrainz', data_source=u'MusicBrainz',
data_url=album_url(release['id']), data_url=album_url(release['id']),
) )
info.va = info.artist_id == VARIOUS_ARTISTS_ID info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].get(unicode)
info.asin = release.get('asin') info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id'] info.releasegroup_id = release['release-group']['id']
info.country = release.get('country') info.country = release.get('country')
@ -288,7 +312,8 @@ def album_info(release):
info.decode() info.decode()
return info return info
def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
def match_album(artist, album, tracks=None):
"""Searches for a single album ("release" in MusicBrainz parlance) """Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError. MusicBrainzAPIError.
@ -297,21 +322,22 @@ def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
optionally, a number of tracks on the album. optionally, a number of tracks on the album.
""" """
# Build search criteria. # Build search criteria.
criteria = {'release': album.lower()} criteria = {'release': album.lower().strip()}
if artist is not None: if artist is not None:
criteria['artist'] = artist.lower() criteria['artist'] = artist.lower().strip()
else: else:
# Various Artists search. # Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None: if tracks is not None:
criteria['tracks'] = str(tracks) criteria['tracks'] = unicode(tracks)
# Abort if we have no search terms. # Abort if we have no search terms.
if not any(criteria.itervalues()): if not any(criteria.itervalues()):
return return
try: try:
res = musicbrainzngs.search_releases(limit=limit, **criteria) res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria, raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc()) traceback.format_exc())
@ -322,69 +348,74 @@ def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
if albuminfo is not None: if albuminfo is not None:
yield albuminfo yield albuminfo
def match_track(artist, title, limit=SEARCH_LIMIT):
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo """Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError. objects. May raise a MusicBrainzAPIError.
""" """
criteria = { criteria = {
'artist': artist.lower(), 'artist': artist.lower().strip(),
'recording': title.lower(), 'recording': title.lower().strip(),
} }
if not any(criteria.itervalues()): if not any(criteria.itervalues()):
return return
try: try:
res = musicbrainzngs.search_recordings(limit=limit, **criteria) res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria, raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc()) traceback.format_exc())
for recording in res['recording-list']: for recording in res['recording-list']:
yield track_info(recording) yield track_info(recording)
def _parse_id(s): def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If """Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None. no ID can be found, return None.
""" """
# Find the first thing that looks like a UUID/MBID. # Find the first thing that looks like a UUID/MBID.
match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) match = re.search(ur'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match: if match:
return match.group() return match.group()
def album_for_id(albumid):
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo """Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a object or None if the album is not found. May raise a
MusicBrainzAPIError. MusicBrainzAPIError.
""" """
albumid = _parse_id(albumid) albumid = _parse_id(releaseid)
if not albumid: if not albumid:
log.error('Invalid MBID.') log.debug(u'Invalid MBID ({0}).', releaseid)
return return
try: try:
res = musicbrainzngs.get_release_by_id(albumid, res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES) RELEASE_INCLUDES)
except musicbrainzngs.ResponseError: except musicbrainzngs.ResponseError:
log.debug('Album ID match failed.') log.debug(u'Album ID match failed.')
return None return None
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get release by ID', albumid, raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
traceback.format_exc()) traceback.format_exc())
return album_info(res['release']) return album_info(res['release'])
def track_for_id(trackid):
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object """Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError. or None if no track is found. May raise a MusicBrainzAPIError.
""" """
trackid = _parse_id(trackid) trackid = _parse_id(releaseid)
if not trackid: if not trackid:
log.error('Invalid MBID.') log.debug(u'Invalid MBID ({0}).', releaseid)
return return
try: try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError: except musicbrainzngs.ResponseError:
log.debug('Track ID match failed.') log.debug(u'Track ID match failed.')
return None return None
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get recording by ID', trackid, raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
traceback.format_exc()) traceback.format_exc())
return track_info(res['recording']) return track_info(res['recording'])

View file

@ -5,6 +5,7 @@ import:
write: yes write: yes
copy: yes copy: yes
move: no move: no
link: no
delete: no delete: no
resume: ask resume: ask
incremental: no incremental: no
@ -20,9 +21,13 @@ import:
detail: no detail: no
flat: no flat: no
group_albums: no group_albums: no
pretend: false
search_ids: []
clutter: ["Thumbs.DB", ".DS_Store"] clutter: ["Thumbs.DB", ".DS_Store"]
ignore: [".*", "*~", "System Volume Information"] ignore: [".*", "*~", "System Volume Information", "lost+found"]
ignore_hidden: yes
replace: replace:
'[\\/]': _ '[\\/]': _
'^\.': _ '^\.': _
@ -32,27 +37,42 @@ replace:
'\s+$': '' '\s+$': ''
'^\s+': '' '^\s+': ''
path_sep_replace: _ path_sep_replace: _
asciify_paths: false
art_filename: cover art_filename: cover
max_filename_length: 0 max_filename_length: 0
plugins: [] plugins: []
pluginpath: [] pluginpath: []
threaded: yes threaded: yes
color: yes
timeout: 5.0 timeout: 5.0
per_disc_numbering: no per_disc_numbering: no
verbose: no verbose: 0
terminal_encoding: utf8 terminal_encoding:
original_date: no original_date: no
id3v23: no id3v23: no
va_name: "Various Artists"
ui: ui:
terminal_width: 80 terminal_width: 80
length_diff_thresh: 10.0 length_diff_thresh: 10.0
color: yes
colors:
text_success: green
text_warning: yellow
text_error: red
text_highlight: red
text_highlight_minor: lightgray
action_default: turquoise
action: blue
list_format_item: $artist - $album - $title format_item: $artist - $album - $title
list_format_album: $albumartist - $album format_album: $albumartist - $album
time_format: '%Y-%m-%d %H:%M:%S' time_format: '%Y-%m-%d %H:%M:%S'
format_raw_length: no
sort_album: albumartist+ album+
sort_item: artist+ album+ disc+ track+
sort_case_insensitive: yes
paths: paths:
default: $albumartist/$album%aunique{}/$track $title default: $albumartist/$album%aunique{}/$track $title
@ -65,6 +85,7 @@ musicbrainz:
host: musicbrainz.org host: musicbrainz.org
ratelimit: 1 ratelimit: 1
ratelimit_interval: 1.0 ratelimit_interval: 1.0
searchlimit: 5
match: match:
strong_rec_thresh: 0.04 strong_rec_thresh: 0.04
@ -98,5 +119,6 @@ match:
media: [] media: []
original_year: no original_year: no
ignored: [] ignored: []
required: []
track_length_grace: 10 track_length_grace: 10
track_length_max: 30 track_length_max: 30

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2014, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -15,6 +16,14 @@
"""DBCore is an abstract database package that forms the basis for beets' """DBCore is an abstract database package that forms the basis for beets'
Library. Library.
""" """
from __future__ import division, absolute_import, print_function
from .db import Model, Database from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
from .queryparse import parse_sorted_query
from .query import InvalidQueryError
# flake8: noqa

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2014, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,22 +15,70 @@
"""The central Model and Database constructs for DBCore. """The central Model and Database constructs for DBCore.
""" """
from __future__ import division, absolute_import, print_function
import time import time
import os import os
from collections import defaultdict from collections import defaultdict
import threading import threading
import sqlite3 import sqlite3
import contextlib import contextlib
import collections
import beets import beets
from beets.util.functemplate import Template from beets.util.functemplate import Template
from .query import MatchQuery from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery
class FormattedMapping(collections.Mapping):
"""A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string.
If `for_path` is true, all path separators in the formatted values
are replaced.
"""
def __init__(self, model, for_path=False):
self.for_path = for_path
self.model = model
self.model_keys = model.keys(True)
def __getitem__(self, key):
if key in self.model_keys:
return self._get_formatted(self.model, key)
else:
raise KeyError(key)
def __iter__(self):
return iter(self.model_keys)
def __len__(self):
return len(self.model_keys)
def get(self, key, default=None):
if default is None:
default = self.model._type(key).format(None)
return super(FormattedMapping, self).get(key, default)
def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
if self.for_path:
sep_repl = beets.config['path_sep_replace'].get(unicode)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
# Abstract base for model classes. # Abstract base for model classes.
class Model(object): class Model(object):
"""An abstract object representing an object in the database. Model """An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., the allow subscript access like objects act like dictionaries (i.e., the allow subscript access like
@ -66,12 +115,7 @@ class Model(object):
_fields = {} _fields = {}
"""A mapping indicating available "fixed" fields on this type. The """A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are Type objects. keys are field names and the values are `Type` objects.
"""
_bytes_keys = ()
"""Keys whose values should be stored as raw bytes blobs rather than
strings.
""" """
_search_fields = () _search_fields = ()
@ -79,6 +123,21 @@ class Model(object):
terms. terms.
""" """
_types = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_always_dirty = False
"""By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`).
"""
@classmethod @classmethod
def _getters(cls): def _getters(cls):
"""Return a mapping from field names to getter functions. """Return a mapping from field names to getter functions.
@ -94,7 +153,6 @@ class Model(object):
# As above: we could consider caching this result. # As above: we could consider caching this result.
raise NotImplementedError() raise NotImplementedError()
# Basic operation. # Basic operation.
def __init__(self, db=None, **values): def __init__(self, db=None, **values):
@ -110,6 +168,20 @@ class Model(object):
self.update(values) self.update(values)
self.clear_dirty() self.clear_dirty()
@classmethod
def _awaken(cls, db=None, fixed_values={}, flex_values={}):
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
for key, value in fixed_values.iteritems():
obj._values_fixed[key] = cls._type(key).from_sql(value)
for key, value in flex_values.iteritems():
obj._values_flex[key] = cls._type(key).from_sql(value)
return obj
def __repr__(self): def __repr__(self):
return '{0}({1})'.format( return '{0}({1})'.format(
type(self).__name__, type(self).__name__,
@ -128,13 +200,23 @@ class Model(object):
exception is raised otherwise. exception is raised otherwise.
""" """
if not self._db: if not self._db:
raise ValueError('{0} has no database'.format(type(self).__name__)) raise ValueError(
u'{0} has no database'.format(type(self).__name__)
)
if need_id and not self.id: if need_id and not self.id:
raise ValueError('{0} has no id'.format(type(self).__name__)) raise ValueError(u'{0} has no id'.format(type(self).__name__))
# Essential field accessors. # Essential field accessors.
@classmethod
def _type(cls, key):
"""Get the type of a field, a `Type` instance.
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def __getitem__(self, key): def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is """Get the value for a field. Raise a KeyError if the field is
not available. not available.
@ -152,11 +234,19 @@ class Model(object):
def __setitem__(self, key, value): def __setitem__(self, key, value):
"""Assign the value for a field. """Assign the value for a field.
""" """
source = self._values_fixed if key in self._fields \ # Choose where to place the value.
else self._values_flex if key in self._fields:
source = self._values_fixed
else:
source = self._values_flex
# If the field has a type, filter the value.
value = self._type(key).normalize(value)
# Assign value and possibly mark as dirty.
old_value = source.get(key) old_value = source.get(key)
source[key] = value source[key] = value
if old_value != value: if self._always_dirty or old_value != value:
self._dirty.add(key) self._dirty.add(key)
def __delitem__(self, key): def __delitem__(self, key):
@ -166,11 +256,11 @@ class Model(object):
del self._values_flex[key] del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store. self._dirty.add(key) # Mark for dropping on store.
elif key in self._getters(): # Computed. elif key in self._getters(): # Computed.
raise KeyError('computed field {0} cannot be deleted'.format(key)) raise KeyError(u'computed field {0} cannot be deleted'.format(key))
elif key in self._fields: # Fixed. elif key in self._fields: # Fixed.
raise KeyError('fixed field {0} cannot be deleted'.format(key)) raise KeyError(u'fixed field {0} cannot be deleted'.format(key))
else: else:
raise KeyError('no such field {0}'.format(key)) raise KeyError(u'no such field {0}'.format(key))
def keys(self, computed=False): def keys(self, computed=False):
"""Get a list of available field names for this object. The """Get a list of available field names for this object. The
@ -183,6 +273,12 @@ class Model(object):
else: else:
return base_keys return base_keys
@classmethod
def all_keys(cls):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + cls._getters().keys()
# Act like a dictionary. # Act like a dictionary.
@ -219,17 +315,16 @@ class Model(object):
""" """
return iter(self.keys()) return iter(self.keys())
# Convenient attribute access. # Convenient attribute access.
def __getattr__(self, key): def __getattr__(self, key):
if key.startswith('_'): if key.startswith('_'):
raise AttributeError('model has no attribute {0!r}'.format(key)) raise AttributeError(u'model has no attribute {0!r}'.format(key))
else: else:
try: try:
return self[key] return self[key]
except KeyError: except KeyError:
raise AttributeError('no such field {0!r}'.format(key)) raise AttributeError(u'no such field {0!r}'.format(key))
def __setattr__(self, key, value): def __setattr__(self, key, value):
if key.startswith('_'): if key.startswith('_'):
@ -243,7 +338,6 @@ class Model(object):
else: else:
del self[key] del self[key]
# Database interaction (CRUD methods). # Database interaction (CRUD methods).
def store(self): def store(self):
@ -252,19 +346,15 @@ class Model(object):
self._check_db() self._check_db()
# Build assignments for query. # Build assignments for query.
assignments = '' assignments = []
subvars = [] subvars = []
for key in self._fields: for key in self._fields:
if key != 'id' and key in self._dirty: if key != 'id' and key in self._dirty:
self._dirty.remove(key) self._dirty.remove(key)
assignments += key + '=?,' assignments.append(key + '=?')
value = self[key] value = self._type(key).to_sql(self[key])
# Wrap path strings in buffers so they get stored
# "in the raw".
if key in self._bytes_keys and isinstance(value, str):
value = buffer(value)
subvars.append(value) subvars.append(value)
assignments = assignments[:-1] # Knock off last , assignments = ','.join(assignments)
with self._db.transaction() as tx: with self._db.transaction() as tx:
# Main table update. # Main table update.
@ -301,7 +391,9 @@ class Model(object):
""" """
self._check_db() self._check_db()
stored_obj = self._db._get(type(self), self.id) stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, "object {0} not in DB".format(self.id) assert stored_obj is not None, u"object {0} not in DB".format(self.id)
self._values_fixed = {}
self._values_flex = {}
self.update(dict(stored_obj)) self.update(dict(stored_obj))
self.clear_dirty() self.clear_dirty()
@ -344,76 +436,26 @@ class Model(object):
self._dirty.add(key) self._dirty.add(key)
self.store() self.store()
# Formatting and templating. # Formatting and templating.
@classmethod _formatter = FormattedMapping
def _format(cls, key, value, for_path=False):
"""Format a value as the given field for this model.
"""
# Format the value as a string according to its type, if any.
if key in cls._fields:
value = cls._fields[key].format(value)
# Formatting must result in a string. To deal with
# Python2isms, implicitly convert ASCII strings.
assert isinstance(value, basestring), \
u'field formatter must produce strings'
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
elif not isinstance(value, unicode): def formatted(self, for_path=False):
# Fallback formatter. Convert to unicode at all cost.
if value is None:
value = u''
elif isinstance(value, basestring):
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
else:
value = unicode(value)
if for_path:
sep_repl = beets.config['path_sep_replace'].get(unicode)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
def _get_formatted(self, key, for_path=False):
"""Get a field value formatted as a string (`unicode` object)
for display to the user. If `for_path` is true, then the value
will be sanitized for inclusion in a pathname (i.e., path
separators will be removed from the value).
"""
return self._format(key, self.get(key), for_path)
def _formatted_mapping(self, for_path=False):
"""Get a mapping containing all values on this object formatted """Get a mapping containing all values on this object formatted
as human-readable strings. as human-readable unicode strings.
""" """
# In the future, this could be made "lazy" to avoid computing return self._formatter(self, for_path)
# fields unnecessarily.
out = {}
for key in self.keys(True):
out[key] = self._get_formatted(key, for_path)
return out
def evaluate_template(self, template, for_path=False): def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using """Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path the object's fields. If `for_path` is true, then no new path
separators will be added to the template. separators will be added to the template.
""" """
# Build value mapping.
mapping = self._formatted_mapping(for_path)
# Get template functions.
funcs = self._template_funcs()
# Perform substitution. # Perform substitution.
if isinstance(template, basestring): if isinstance(template, basestring):
template = Template(template) template = Template(template)
return template.substitute(mapping, funcs) return template.substitute(self.formatted(for_path),
self._template_funcs())
# Parsing. # Parsing.
@ -422,65 +464,124 @@ class Model(object):
"""Parse a string as a value for the given key. """Parse a string as a value for the given key.
""" """
if not isinstance(string, basestring): if not isinstance(string, basestring):
raise TypeError("_parse() argument must be a string") raise TypeError(u"_parse() argument must be a string")
typ = cls._fields.get(key) return cls._type(key).parse(string)
if typ:
return typ.parse(string)
else:
# Fall back to unparsed string.
return string
def set_parse(self, key, string):
"""Set the object's key to a value represented by a string.
"""
self[key] = self._parse(key, string)
# Database controller and supporting interfaces. # Database controller and supporting interfaces.
class Results(object): class Results(object):
"""An item query result set. Iterating over the collection lazily """An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows. constructs LibModel objects that reflect database rows.
""" """
def __init__(self, model_class, rows, db, query=None): def __init__(self, model_class, rows, db, query=None, sort=None):
"""Create a result set that will construct objects of type """Create a result set that will construct objects of type
`model_class`, which should be a subclass of `LibModel`, out of `model_class`.
the query result mapping in `rows`. The new objects are
associated with the database `db`. If `query` is provided, it is `model_class` is a subclass of `LibModel` that will be
used as a predicate to filter the results for a "slow query" that constructed. `rows` is a query result: a list of mappings. The
cannot be evaluated by the database directly. new objects will be associated with the database `db`.
If `query` is provided, it is used as a predicate to filter the
results for a "slow query" that cannot be evaluated by the
database directly. If `sort` is provided, it is used to sort the
full list of results before returning. This means it is a "slow
sort" and all objects must be built before returning the first
one.
""" """
self.model_class = model_class self.model_class = model_class
self.rows = rows self.rows = rows
self.db = db self.db = db
self.query = query self.query = query
self.sort = sort
# We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of
# rows.
self._rows = rows
self._row_count = len(rows)
# The materialized objects corresponding to rows that have been
# consumed.
self._objects = []
def _get_objects(self):
"""Construct and generate Model objects for they query. The
objects are returned in the order emitted from the database; no
slow sort is applied.
For performance, this generator caches materialized objects to
avoid constructing them more than once. This way, iterating over
a `Results` object a second time should be much faster than the
first.
"""
index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce?
if index < len(self._objects):
yield self._objects[index]
index += 1
# Otherwise, we consume another row, materialize its object
# and produce it.
else:
while self._rows:
row = self._rows.pop(0)
obj = self._make_model(row)
# If there is a slow-query predicate, ensurer that the
# object passes it.
if not self.query or self.query.match(obj):
self._objects.append(obj)
index += 1
yield obj
break
def __iter__(self): def __iter__(self):
"""Construct Python objects for all rows that pass the query """Construct and generate Model objects for all matching
predicate. objects, in sorted order.
""" """
for row in self.rows: if self.sort:
# Get the flexible attributes for the object. # Slow sort. Must build the full list first.
with self.db.transaction() as tx: objects = self.sort.sort(list(self._get_objects()))
flex_rows = tx.query( return iter(objects)
'SELECT * FROM {0} WHERE entity_id=?'.format(
self.model_class._flex_table else:
), # Objects are pre-sorted (i.e., by the database).
(row['id'],) return self._get_objects()
)
values = dict(row) def _make_model(self, row):
values.update( # Get the flexible attributes for the object.
dict((row['key'], row['value']) for row in flex_rows) with self.db.transaction() as tx:
flex_rows = tx.query(
'SELECT * FROM {0} WHERE entity_id=?'.format(
self.model_class._flex_table
),
(row['id'],)
) )
# Construct the Python object and yield it if it passes the cols = dict(row)
# predicate. values = dict((k, v) for (k, v) in cols.items()
obj = self.model_class(self.db, **values) if not k[:4] == 'flex')
if not self.query or self.query.match(obj): flex_values = dict((row['key'], row['value']) for row in flex_rows)
yield obj
# Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values)
return obj
def __len__(self): def __len__(self):
"""Get the number of matching objects. """Get the number of matching objects.
""" """
if self.query: if not self._rows:
# Fully materialized. Just count the objects.
return len(self._objects)
elif self.query:
# A slow query. Fall back to testing every object. # A slow query. Fall back to testing every object.
count = 0 count = 0
for obj in self: for obj in self:
@ -489,7 +590,7 @@ class Results(object):
else: else:
# A fast query. Just count the rows. # A fast query. Just count the rows.
return len(self.rows) return self._row_count
def __nonzero__(self): def __nonzero__(self):
"""Does this result contain any objects? """Does this result contain any objects?
@ -500,13 +601,18 @@ class Results(object):
"""Get the nth item in this result set. This is inefficient: all """Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away. items up to n are materialized and thrown away.
""" """
if not self._rows and not self.sort:
# Fully materialized and already in order. Just look up the
# object.
return self._objects[n]
it = iter(self) it = iter(self)
try: try:
for i in range(n): for i in range(n):
it.next() next(it)
return it.next() return next(it)
except StopIteration: except StopIteration:
raise IndexError('result index {0} out of range'.format(n)) raise IndexError(u'result index {0} out of range'.format(n))
def get(self): def get(self):
"""Return the first matching object, or None if no objects """Return the first matching object, or None if no objects
@ -514,7 +620,7 @@ class Results(object):
""" """
it = iter(self) it = iter(self)
try: try:
return it.next() return next(it)
except StopIteration: except StopIteration:
return None return None
@ -604,7 +710,6 @@ class Database(object):
self._make_table(model_cls._table, model_cls._fields) self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table) self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions. # Primitive access control: connections and transactions.
def _connection(self): def _connection(self):
@ -644,7 +749,6 @@ class Database(object):
""" """
return Transaction(self) return Transaction(self)
# Schema setup and migration. # Schema setup and migration.
def _make_table(self, table, fields): def _make_table(self, table, fields):
@ -698,27 +802,33 @@ class Database(object):
ON {0} (entity_id); ON {0} (entity_id);
""".format(flex_table)) """.format(flex_table))
# Querying. # Querying.
def _fetch(self, model_cls, query, order_by=None): def _fetch(self, model_cls, query=None, sort=None):
"""Fetch the objects of type `model_cls` matching the given """Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). If provided, Query object, or None (to fetch everything). `sort` is an
`order_by` is a SQLite ORDER BY clause for sorting. `Sort` object.
""" """
query = query or TrueQuery() # A null query.
sort = sort or NullSort() # Unsorted.
where, subvals = query.clause() where, subvals = query.clause()
order_by = sort.order_clause()
sql = "SELECT * FROM {0} WHERE {1}".format( sql = ("SELECT * FROM {0} WHERE {1} {2}").format(
model_cls._table, model_cls._table,
where or '1', where or '1',
"ORDER BY {0}".format(order_by) if order_by else '',
) )
if order_by:
sql += " ORDER BY {0}".format(order_by)
with self.transaction() as tx: with self.transaction() as tx:
rows = tx.query(sql, subvals) rows = tx.query(sql, subvals)
return Results(model_cls, rows, self, None if where else query) return Results(
model_cls, rows, self,
None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component.
)
def _get(self, model_cls, id): def _get(self, model_cls, id):
"""Get a Model object by its id or None if the id does not """Get a Model object by its id or None if the id does not

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2014, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,9 +15,45 @@
"""The Query type hierarchy for DBCore. """The Query type hierarchy for DBCore.
""" """
from __future__ import division, absolute_import, print_function
import re import re
from operator import mul
from beets import util from beets import util
from datetime import datetime, timedelta from datetime import datetime, timedelta
import unicodedata
from functools import reduce
class ParsingError(ValueError):
"""Abstract class for any unparseable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = u"'{0}': {1}".format(query, explanation)
super(InvalidQueryError, self).__init__(message)
class InvalidQueryArgumentTypeError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = u"'{0}' is not {1}".format(what, expected)
if detail:
message = u"{0}: {1}".format(message, detail)
super(InvalidQueryArgumentTypeError, self).__init__(message)
class Query(object): class Query(object):
@ -24,9 +61,8 @@ class Query(object):
""" """
def clause(self): def clause(self):
"""Generate an SQLite expression implementing the query. """Generate an SQLite expression implementing the query.
Return a clause string, a sequence of substitution values for
the clause, and a Query object representing the "remainder" Return (clause, subvals) where clause is a valid sqlite
Returns (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause. items to be substituted for ?s in the clause.
""" """
@ -38,6 +74,15 @@ class Query(object):
""" """
raise NotImplementedError raise NotImplementedError
def __repr__(self):
return "{0.__class__.__name__}()".format(self)
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 0
class FieldQuery(Query): class FieldQuery(Query):
"""An abstract query that searches in a specific field for a """An abstract query that searches in a specific field for a
@ -71,6 +116,17 @@ class FieldQuery(Query):
def match(self, item): def match(self, item):
return self.value_match(self.pattern, item.get(self.field)) return self.value_match(self.pattern, item.get(self.field))
def __repr__(self):
return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
"{0.fast})".format(self))
def __eq__(self, other):
return super(FieldQuery, self).__eq__(other) and \
self.field == other.field and self.pattern == other.pattern
def __hash__(self):
return hash((self.field, hash(self.pattern)))
class MatchQuery(FieldQuery): class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field.""" """A query that looks for exact matches in an item field."""
@ -82,6 +138,25 @@ class MatchQuery(FieldQuery):
return pattern == value return pattern == value
class NoneQuery(FieldQuery):
def __init__(self, field, fast=True):
super(NoneQuery, self).__init__(field, None, fast)
def col_clause(self):
return self.field + " IS NULL", ()
@classmethod
def match(cls, item):
try:
return item[cls.field] is None
except KeyError:
return True
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class StringFieldQuery(FieldQuery): class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching """A FieldQuery that converts values to strings before matching
them. them.
@ -104,8 +179,11 @@ class StringFieldQuery(FieldQuery):
class SubstringQuery(StringFieldQuery): class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field.""" """A query that matches a substring in a specific item field."""
def col_clause(self): def col_clause(self):
search = '%' + (self.pattern.replace('\\','\\\\').replace('%','\\%') pattern = (self.pattern
.replace('_','\\_')) + '%' .replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'" clause = self.field + " like ? escape '\\'"
subvals = [search] subvals = [search]
return clause, subvals return clause, subvals
@ -118,15 +196,31 @@ class SubstringQuery(StringFieldQuery):
class RegexpQuery(StringFieldQuery): class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item """A query that matches a regular expression in a specific item
field. field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
""" """
def __init__(self, field, pattern, fast=True):
super(RegexpQuery, self).__init__(field, pattern, fast)
pattern = self._normalize(pattern)
try:
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentTypeError(pattern,
u"a regular expression",
format(exc))
@staticmethod
def _normalize(s):
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize('NFC', s)
@classmethod @classmethod
def string_match(cls, pattern, value): def string_match(cls, pattern, value):
try: return pattern.search(cls._normalize(value)) is not None
res = re.search(pattern, value)
except re.error:
# Invalid regular expression.
return False
return res is not None
class BooleanQuery(MatchQuery): class BooleanQuery(MatchQuery):
@ -142,7 +236,7 @@ class BooleanQuery(MatchQuery):
class BytesQuery(MatchQuery): class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack """Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `str` and to work around the `sqlite3` module's desire to treat `bytes` and
`unicode` equivalently in Python 2. Always use this query instead of `unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values. `MatchQuery` when matching on BLOB values.
""" """
@ -170,19 +264,26 @@ class NumericQuery(FieldQuery):
"""Matches numeric fields. A syntax using Ruby-style range ellipses """Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example, (``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century. ``year:2001..`` finds music released since the turn of the century.
Raises InvalidQueryError when the pattern does not represent an int or
a float.
""" """
def _convert(self, s): def _convert(self, s):
"""Convert a string to a numeric type (float or int). If the """Convert a string to a numeric type (float or int).
string cannot be converted, return None.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
""" """
# This is really just a bit of fun premature optimization. # This is really just a bit of fun premature optimization.
if not s:
return None
try: try:
return int(s) return int(s)
except ValueError: except ValueError:
try: try:
return float(s) return float(s)
except ValueError: except ValueError:
return None raise InvalidQueryArgumentTypeError(s, u"an int or a float")
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(NumericQuery, self).__init__(field, pattern, fast) super(NumericQuery, self).__init__(field, pattern, fast)
@ -200,7 +301,9 @@ class NumericQuery(FieldQuery):
self.rangemax = self._convert(parts[1]) self.rangemax = self._convert(parts[1])
def match(self, item): def match(self, item):
value = getattr(item, self.field) if self.field not in item:
return False
value = item[self.field]
if isinstance(value, basestring): if isinstance(value, basestring):
value = self._convert(value) value = self._convert(value)
@ -225,7 +328,7 @@ class NumericQuery(FieldQuery):
elif self.rangemax is not None: elif self.rangemax is not None:
return u'{0} <= ?'.format(self.field), (self.rangemax,) return u'{0} <= ?'.format(self.field), (self.rangemax,)
else: else:
return '1', () return u'1', ()
class CollectionQuery(Query): class CollectionQuery(Query):
@ -236,17 +339,21 @@ class CollectionQuery(Query):
self.subqueries = subqueries self.subqueries = subqueries
# Act like a sequence. # Act like a sequence.
def __len__(self): def __len__(self):
return len(self.subqueries) return len(self.subqueries)
def __getitem__(self, key): def __getitem__(self, key):
return self.subqueries[key] return self.subqueries[key]
def __iter__(self): def __iter__(self):
return iter(self.subqueries) return iter(self.subqueries)
def __contains__(self, item): def __contains__(self, item):
return item in self.subqueries return item in self.subqueries
def clause_with_joiner(self, joiner): def clause_with_joiner(self, joiner):
"""Returns a clause created by joining together the clauses of """Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces). all subqueries with the string joiner (padded by spaces).
""" """
clause_parts = [] clause_parts = []
@ -261,6 +368,19 @@ class CollectionQuery(Query):
clause = (' ' + joiner + ' ').join(clause_parts) clause = (' ' + joiner + ' ').join(clause_parts)
return clause, subvals return clause, subvals
def __repr__(self):
return "{0.__class__.__name__}({0.subqueries!r})".format(self)
def __eq__(self, other):
return super(CollectionQuery, self).__eq__(other) and \
self.subqueries == other.subqueries
def __hash__(self):
"""Since subqueries are mutable, this object should not be hashable.
However and for conveniences purposes, it can be hashed.
"""
return reduce(mul, map(hash, self.subqueries), 1)
class AnyFieldQuery(CollectionQuery): class AnyFieldQuery(CollectionQuery):
"""A query that matches if a given FieldQuery subclass matches in """A query that matches if a given FieldQuery subclass matches in
@ -286,6 +406,17 @@ class AnyFieldQuery(CollectionQuery):
return True return True
return False return False
def __repr__(self):
return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, "
"{0.query_class.__name__})".format(self))
def __eq__(self, other):
return super(AnyFieldQuery, self).__eq__(other) and \
self.query_class == other.query_class
def __hash__(self):
return hash((self.pattern, tuple(self.fields), self.query_class))
class MutableCollectionQuery(CollectionQuery): class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the """A collection query whose subqueries may be modified after the
@ -316,6 +447,36 @@ class OrQuery(MutableCollectionQuery):
return any([q.match(item) for q in self.subqueries]) return any([q.match(item) for q in self.subqueries])
class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions.
"""
def __init__(self, subquery):
self.subquery = subquery
def clause(self):
clause, subvals = self.subquery.clause()
if clause:
return 'not ({0})'.format(clause), subvals
else:
# If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries.
return clause, subvals
def match(self, item):
return not self.subquery.match(item)
def __repr__(self):
return "{0.__class__.__name__}({0.subquery!r})".format(self)
def __eq__(self, other):
return super(NotQuery, self).__eq__(other) and \
self.subquery == other.subquery
def __hash__(self):
return hash(('not', hash(self.subquery)))
class TrueQuery(Query): class TrueQuery(Query):
"""A query that always matches.""" """A query that always matches."""
def clause(self): def clause(self):
@ -334,21 +495,15 @@ class FalseQuery(Query):
return False return False
# Time/date queries. # Time/date queries.
def _to_epoch_time(date): def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since """Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch. the (local) Unix epoch.
""" """
epoch = datetime.fromtimestamp(0) epoch = datetime.fromtimestamp(0)
delta = date - epoch delta = date - epoch
try: return int(delta.total_seconds())
return int(delta.total_seconds())
except AttributeError:
# datetime.timedelta.total_seconds() is not available on Python 2.6
return delta.seconds + delta.days * 24 * 3600
def _parse_periods(pattern): def _parse_periods(pattern):
@ -380,7 +535,7 @@ class Period(object):
precision (a string, one of "year", "month", or "day"). precision (a string, one of "year", "month", or "day").
""" """
if precision not in Period.precisions: if precision not in Period.precisions:
raise ValueError('Invalid precision ' + str(precision)) raise ValueError(u'Invalid precision {0}'.format(precision))
self.date = date self.date = date
self.precision = precision self.precision = precision
@ -393,10 +548,14 @@ class Period(object):
return None return None
ordinal = string.count('-') ordinal = string.count('-')
if ordinal >= len(cls.date_formats): if ordinal >= len(cls.date_formats):
raise ValueError('date is not in one of the formats ' # Too many components.
+ ', '.join(cls.date_formats)) return None
date_format = cls.date_formats[ordinal] date_format = cls.date_formats[ordinal]
date = datetime.strptime(string, date_format) try:
date = datetime.strptime(string, date_format)
except ValueError:
# Parsing failed.
return None
precision = cls.precisions[ordinal] precision = cls.precisions[ordinal]
return cls(date, precision) return cls(date, precision)
@ -416,7 +575,7 @@ class Period(object):
elif 'day' == precision: elif 'day' == precision:
return date + timedelta(days=1) return date + timedelta(days=1)
else: else:
raise ValueError('unhandled precision ' + str(precision)) raise ValueError(u'unhandled precision {0}'.format(precision))
class DateInterval(object): class DateInterval(object):
@ -428,7 +587,7 @@ class DateInterval(object):
def __init__(self, start, end): def __init__(self, start, end):
if start is not None and end is not None and not start < end: if start is not None and end is not None and not start < end:
raise ValueError("start date {0} is not before end date {1}" raise ValueError(u"start date {0} is not before end date {1}"
.format(start, end)) .format(start, end))
self.start = start self.start = start
self.end = end self.end = end
@ -449,7 +608,7 @@ class DateInterval(object):
return True return True
def __str__(self): def __str__(self):
return'[{0}, {1})'.format(self.start, self.end) return '[{0}, {1})'.format(self.start, self.end)
class DateQuery(FieldQuery): class DateQuery(FieldQuery):
@ -492,3 +651,208 @@ class DateQuery(FieldQuery):
# Match any date. # Match any date.
clause = '1' clause = '1'
return clause, subvals return clause, subvals
class DurationQuery(NumericQuery):
"""NumericQuery that allow human-friendly (M:SS) time interval formats.
Converts the range(s) to a float value, and delegates on NumericQuery.
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s):
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
if not s:
return None
try:
return util.raw_seconds_short(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentTypeError(
s,
u"a M:SS string or a float")
# Sorting.
class Sort(object):
"""An abstract class representing a sort operation for a query into
the item database.
"""
def order_clause(self):
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items):
"""Sort the list of objects and return a list.
"""
return sorted(items)
def is_slow(self):
"""Indicate whether this query is *slow*, meaning that it cannot
be executed in SQL and must be executed in Python.
"""
return False
def __hash__(self):
return 0
def __eq__(self, other):
return type(self) == type(other)
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts.
"""
def __init__(self, sorts=None):
self.sorts = sorts or []
def add_sort(self, sort):
self.sorts.append(sort)
def _sql_sorts(self):
"""Return the list of sub-sorts for which we can be (at least
partially) fast.
A contiguous suffix of fast (SQL-capable) sub-sorts are
executable in SQL. The remaining, even if they are fast
independently, must be executed slowly.
"""
sql_sorts = []
for sort in reversed(self.sorts):
if not sort.order_clause() is None:
sql_sorts.append(sort)
else:
break
sql_sorts.reverse()
return sql_sorts
def order_clause(self):
order_strings = []
for sort in self._sql_sorts():
order = sort.order_clause()
order_strings.append(order)
return ", ".join(order_strings)
def is_slow(self):
for sort in self.sorts:
if sort.is_slow():
return True
return False
def sort(self, items):
slow_sorts = []
switch_slow = False
for sort in reversed(self.sorts):
if switch_slow:
slow_sorts.append(sort)
elif sort.order_clause() is None:
switch_slow = True
slow_sorts.append(sort)
else:
pass
for sort in slow_sorts:
items = sort.sort(items)
return items
def __repr__(self):
return 'MultipleSort({!r})'.format(self.sorts)
def __hash__(self):
return hash(tuple(self.sorts))
def __eq__(self, other):
return super(MultipleSort, self).__eq__(other) and \
self.sorts == other.sorts
class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field
self.ascending = ascending
self.case_insensitive = case_insensitive
def sort(self, objs):
# TODO: Conversion and null-detection here. In Python 3,
# comparisons with None fail. We should also support flexible
# attributes with different types without falling over.
def key(item):
field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, unicode):
field_val = field_val.lower()
return field_val
return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self):
return '<{0}: {1}{2}>'.format(
type(self).__name__,
self.field,
'+' if self.ascending else '-',
)
def __hash__(self):
return hash((self.field, self.ascending))
def __eq__(self, other):
return super(FieldSort, self).__eq__(other) and \
self.field == other.field and \
self.ascending == other.ascending
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
field = '(CASE ' \
'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \
'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \
'ELSE {0} END)'.format(self.field)
else:
field = self.field
return "{0} {1}".format(field, order)
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self):
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items):
return items
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
def __eq__(self, other):
return type(self) == type(other) or other is None
def __hash__(self):
return 0

View file

@ -0,0 +1,250 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Parsing of strings into DBCore queries.
"""
from __future__ import division, absolute_import, print_function
import re
import itertools
from . import query
import beets
PARSE_QUERY_PART_REGEX = re.compile(
# Non-capturing optional segment for the keyword.
r'(-|\^)?' # Negation prefixes.
r'(?:'
r'(\S+?)' # The field key.
r'(?<!\\):' # Unescaped :
r')?'
r'(.*)', # The term itself.
re.I # Case-insensitive.
)
def parse_query_part(part, query_classes={}, prefixes={},
default_class=query.SubstringQuery):
"""Parse a single *query part*, which is a chunk of a complete query
string representing a single criterion.
A query part is a string consisting of:
- A *pattern*: the value to look for.
- Optionally, a *field name* preceding the pattern, separated by a
colon. So in `foo:bar`, `foo` is the field name and `bar` is the
pattern.
- Optionally, a *query prefix* just before the pattern (and after the
optional colon) indicating the type of query that should be used. For
example, in `~foo`, `~` might be a prefix. (The set of prefixes to
look for is given in the `prefixes` parameter.)
- Optionally, a negation indicator, `-` or `^`, at the very beginning.
Both prefixes and the separating `:` character may be escaped with a
backslash to avoid their normal meaning.
The function returns a tuple consisting of:
- The field name: a string or None if it's not present.
- The pattern, a string.
- The query class to use, which inherits from the base
:class:`Query` type.
- A negation flag, a bool.
The three optional parameters determine which query class is used (i.e.,
the third return value). They are:
- `query_classes`, which maps field names to query classes. These
are used when no explicit prefix is present.
- `prefixes`, which maps prefix strings to query classes.
- `default_class`, the fallback when neither the field nor a prefix
indicates a query class.
So the precedence for determining which query class to return is:
prefix, followed by field, and finally the default.
For example, assuming the `:` prefix is used for `RegexpQuery`:
- `'stapler'` -> `(None, 'stapler', SubstringQuery, False)`
- `'color:red'` -> `('color', 'red', SubstringQuery, False)`
- `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because
the `^` follows the `:`
- `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)`
- `'-color:red'` -> `('color', 'red', SubstringQuery, True)`
"""
# Apply the regular expression and extract the components.
part = part.strip()
match = PARSE_QUERY_PART_REGEX.match(part)
assert match # Regex should always match
negate = bool(match.group(1))
key = match.group(2)
term = match.group(3).replace('\:', ':')
# Check whether there's a prefix in the query and use the
# corresponding query type.
for pre, query_class in prefixes.items():
if term.startswith(pre):
return key, term[len(pre):], query_class, negate
# No matching prefix, so use either the query class determined by
# the field or the default as a fallback.
query_class = query_classes.get(key, default_class)
return key, term, query_class, negate
def construct_query_part(model_cls, prefixes, query_part):
"""Parse a *query part* string and return a :class:`Query` object.
:param model_cls: The :class:`Model` class that this is a query for.
This is used to determine the appropriate query types for the
model's fields.
:param prefixes: A map from prefix strings to :class:`Query` types.
:param query_part: The string to parse.
See the documentation for `parse_query_part` for more information on
query part syntax.
"""
# A shortcut for empty query parts.
if not query_part:
return query.TrueQuery()
# Use `model_cls` to build up a map from field names to `Query`
# classes.
query_classes = {}
for k, t in itertools.chain(model_cls._fields.items(),
model_cls._types.items()):
query_classes[k] = t.query
# Parse the string.
key, pattern, query_class, negate = \
parse_query_part(query_part, query_classes, prefixes)
# If there's no key (field name) specified, this is a "match
# anything" query.
if key is None:
if issubclass(query_class, query.FieldQuery):
# The query type matches a specific field, but none was
# specified. So we use a version of the query that matches
# any field.
q = query.AnyFieldQuery(pattern, model_cls._search_fields,
query_class)
if negate:
return query.NotQuery(q)
else:
return q
else:
# Non-field query type.
if negate:
return query.NotQuery(query_class(pattern))
else:
return query_class(pattern)
# Otherwise, this must be a `FieldQuery`. Use the field name to
# construct the query object.
key = key.lower()
q = query_class(key.lower(), pattern, key in model_cls._fields)
if negate:
return query.NotQuery(q)
return q
def query_from_strings(query_cls, model_cls, prefixes, query_parts):
"""Creates a collection query of type `query_cls` from a list of
strings in the format used by parse_query_part. `model_cls`
determines how queries are constructed from strings.
"""
subqueries = []
for part in query_parts:
subqueries.append(construct_query_part(model_cls, prefixes, part))
if not subqueries: # No terms in query.
subqueries = [query.TrueQuery()]
return query_cls(subqueries)
def construct_sort_part(model_cls, part):
"""Create a `Sort` from a single string criterion.
`model_cls` is the `Model` being queried. `part` is a single string
ending in ``+`` or ``-`` indicating the sort.
"""
assert part, "part must be a field name and + or -"
field = part[:-1]
assert field, "field is missing"
direction = part[-1]
assert direction in ('+', '-'), "part must end with + or -"
is_ascending = direction == '+'
case_insensitive = beets.config['sort_case_insensitive'].get(bool)
if field in model_cls._sorts:
sort = model_cls._sorts[field](model_cls, is_ascending,
case_insensitive)
elif field in model_cls._fields:
sort = query.FixedFieldSort(field, is_ascending, case_insensitive)
else:
# Flexible or computed.
sort = query.SlowFieldSort(field, is_ascending, case_insensitive)
return sort
def sort_from_strings(model_cls, sort_parts):
"""Create a `Sort` from a list of sort criteria (strings).
"""
if not sort_parts:
sort = query.NullSort()
elif len(sort_parts) == 1:
sort = construct_sort_part(model_cls, sort_parts[0])
else:
sort = query.MultipleSort()
for part in sort_parts:
sort.add_sort(construct_sort_part(model_cls, part))
return sort
def parse_sorted_query(model_cls, parts, prefixes={}):
"""Given a list of strings, create the `Query` and `Sort` that they
represent.
"""
# Separate query token and sort token.
query_parts = []
sort_parts = []
# Split up query in to comma-separated subqueries, each representing
# an AndQuery, which need to be joined together in one OrQuery
subquery_parts = []
for part in parts + [u',']:
if part.endswith(u','):
# Ensure we can catch "foo, bar" as well as "foo , bar"
last_subquery_part = part[:-1]
if last_subquery_part:
subquery_parts.append(last_subquery_part)
# Parse the subquery in to a single AndQuery
# TODO: Avoid needlessly wrapping AndQueries containing 1 subquery?
query_parts.append(query_from_strings(
query.AndQuery, model_cls, prefixes, subquery_parts
))
del subquery_parts[:]
else:
# Sort parts (1) end in + or -, (2) don't have a field, and
# (3) consist of more than just the + or -.
if part.endswith((u'+', u'-')) \
and u':' not in part \
and len(part) > 1:
sort_parts.append(part)
else:
subquery_parts.append(part)
# Avoid needlessly wrapping single statements in an OR
q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0]
s = sort_from_strings(model_cls, sort_parts)
return q, s

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2014, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,59 +15,117 @@
"""Representation of type information for DBCore model fields. """Representation of type information for DBCore model fields.
""" """
from __future__ import division, absolute_import, print_function
from . import query from . import query
from beets.util import str2bool from beets.util import str2bool
# Abstract base. # Abstract base.
class Type(object): class Type(object):
"""An object encapsulating the type of a model field. Includes """An object encapsulating the type of a model field. Includes
information about how to store the value in the database, query, information about how to store, query, format, and parse a given
format, and parse a given field. field.
""" """
sql = None sql = u'TEXT'
"""The SQLite column type for the value. """The SQLite column type for the value.
""" """
query = None query = query.SubstringQuery
"""The `Query` subclass to be used when querying the field. """The `Query` subclass to be used when querying the field.
""" """
model_type = unicode
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
is accessed. To this end, the constructor is used by the `normalize`
and `from_sql` methods and the `default` property.
"""
@property
def null(self):
"""The value to be exposed when the underlying value is None.
"""
return self.model_type()
def format(self, value): def format(self, value):
"""Given a value of this type, produce a Unicode string """Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation. representing the value. This is used in template evaluation.
""" """
raise NotImplementedError() if value is None:
value = self.null
# `self.null` might be `None`
if value is None:
value = u''
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
return unicode(value)
def parse(self, string): def parse(self, string):
"""Parse a (possibly human-written) string and return the """Parse a (possibly human-written) string and return the
indicated value of this type. indicated value of this type.
""" """
raise NotImplementedError() try:
return self.model_type(string)
except ValueError:
return self.null
def normalize(self, value):
"""Given a value that will be assigned into a field of this
type, normalize the value to have the appropriate type. This
base implementation only reinterprets `None`.
"""
if value is None:
return self.null
else:
# TODO This should eventually be replaced by
# `self.model_type(value)`
return value
def from_sql(self, sql_value):
"""Receives the value stored in the SQL backend and return the
value to be stored in the model.
For fixed fields the type of `value` is determined by the column
type affinity given in the `sql` property and the SQL to Python
mapping of the database adapter. For more information see:
http://www.sqlite.org/datatype3.html
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `buffer` or a `unicode` object` and the
method must handle these in addition.
"""
if isinstance(sql_value, buffer):
sql_value = bytes(sql_value).decode('utf8', 'ignore')
if isinstance(sql_value, unicode):
return self.parse(sql_value)
else:
return self.normalize(sql_value)
def to_sql(self, model_value):
"""Convert a value as stored in the model object to a value used
by the database adapter.
"""
return model_value
# Reusable types. # Reusable types.
class Default(Type):
null = None
class Integer(Type): class Integer(Type):
"""A basic integer type. """A basic integer type.
""" """
sql = u'INTEGER' sql = u'INTEGER'
query = query.NumericQuery query = query.NumericQuery
model_type = int
def format(self, value):
return unicode(value or 0)
def parse(self, string):
try:
return int(string)
except ValueError:
return 0
class PaddedInt(Integer): class PaddedInt(Integer):
@ -93,9 +152,14 @@ class ScaledInt(Integer):
class Id(Integer): class Id(Integer):
"""An integer used as the row key for a SQLite table. """An integer used as the row id or a foreign key in a SQLite table.
This type is nullable: None values are not translated to zero.
""" """
sql = u'INTEGER PRIMARY KEY' null = None
def __init__(self, primary=True):
if primary:
self.sql = u'INTEGER PRIMARY KEY'
class Float(Type): class Float(Type):
@ -103,15 +167,16 @@ class Float(Type):
""" """
sql = u'REAL' sql = u'REAL'
query = query.NumericQuery query = query.NumericQuery
model_type = float
def format(self, value): def format(self, value):
return u'{0:.1f}'.format(value or 0.0) return u'{0:.1f}'.format(value or 0.0)
def parse(self, string):
try: class NullFloat(Float):
return float(string) """Same as `Float`, but does not normalize `None` to `0.0`.
except ValueError: """
return 0.0 null = None
class String(Type): class String(Type):
@ -120,21 +185,27 @@ class String(Type):
sql = u'TEXT' sql = u'TEXT'
query = query.SubstringQuery query = query.SubstringQuery
def format(self, value):
return unicode(value) if value else u''
def parse(self, string):
return string
class Boolean(Type): class Boolean(Type):
"""A boolean type. """A boolean type.
""" """
sql = u'INTEGER' sql = u'INTEGER'
query = query.BooleanQuery query = query.BooleanQuery
model_type = bool
def format(self, value): def format(self, value):
return unicode(bool(value)) return unicode(bool(value))
def parse(self, string): def parse(self, string):
return str2bool(string) return str2bool(string)
# Shared instances of common types.
DEFAULT = Default()
INTEGER = Integer()
PRIMARY_ID = Id(True)
FOREIGN_ID = Id(False)
FLOAT = Float()
NULL_FLOAT = NullFloat()
STRING = String()
BOOLEAN = Boolean()

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

133
libs/beets/logging.py Normal file
View file

@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A drop-in replacement for the standard-library `logging` module that
allows {}-style log formatting on Python 2 and 3.
Provides everything the "logging" module does. The only difference is
that when getLogger(name) instantiates a logger that logger uses
{}-style formatting.
"""
from __future__ import division, absolute_import, print_function
from copy import copy
from logging import * # noqa
import subprocess
import threading
def logsafe(val):
"""Coerce a potentially "problematic" value so it can be formatted
in a Unicode log string.
This works around a number of pitfalls when logging objects in
Python 2:
- Logging path names, which must be byte strings, requires
conversion for output.
- Some objects, including some exceptions, will crash when you call
`unicode(v)` while `str(v)` works fine. CalledProcessError is an
example.
"""
# Already Unicode.
if isinstance(val, unicode):
return val
# Bytestring: needs decoding.
elif isinstance(val, bytes):
# Blindly convert with UTF-8. Eventually, it would be nice to
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode('utf8', 'replace')
# A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError):
try:
return unicode(val)
except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__
# instead.
return str(val).decode('utf8', 'replace')
# Other objects are used as-is so field access, etc., still works in
# the format string.
else:
return val
class StrFormatLogger(Logger):
"""A version of `Logger` that uses `str.format`-style formatting
instead of %-style formatting.
"""
class _LogMessage(object):
def __init__(self, msg, args, kwargs):
self.msg = msg
self.args = args
self.kwargs = kwargs
def __str__(self):
args = [logsafe(a) for a in self.args]
kwargs = dict((k, logsafe(v)) for (k, v) in self.kwargs.items())
return self.msg.format(*args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs):
"""Log msg.format(*args, **kwargs)"""
m = self._LogMessage(msg, args, kwargs)
return super(StrFormatLogger, self)._log(level, m, (), exc_info, extra)
class ThreadLocalLevelLogger(Logger):
"""A version of `Logger` whose level is thread-local instead of shared.
"""
def __init__(self, name, level=NOTSET):
self._thread_level = threading.local()
self.default_level = NOTSET
super(ThreadLocalLevelLogger, self).__init__(name, level)
@property
def level(self):
try:
return self._thread_level.level
except AttributeError:
self._thread_level.level = self.default_level
return self.level
@level.setter
def level(self, value):
self._thread_level.level = value
def set_global_level(self, level):
"""Set the level on the current thread + the default value for all
threads.
"""
self.default_level = level
self.setLevel(level)
class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger):
pass
my_manager = copy(Logger.manager)
my_manager.loggerClass = BeetsLogger
def getLogger(name=None): # noqa
if name:
return my_manager.getLogger(name)
else:
return Logger.root

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -14,11 +15,17 @@
"""Support for beets plugins.""" """Support for beets plugins."""
import logging from __future__ import division, absolute_import, print_function
import inspect
import traceback import traceback
import re
from collections import defaultdict from collections import defaultdict
from functools import wraps
import beets import beets
from beets import logging
from beets import mediafile from beets import mediafile
PLUGIN_NAMESPACE = 'beetsplug' PLUGIN_NAMESPACE = 'beetsplug'
@ -30,6 +37,31 @@ LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'
log = logging.getLogger('beets') log = logging.getLogger('beets')
class PluginConflictException(Exception):
"""Indicates that the services provided by one plugin conflict with
those of another.
For example two plugins may define different types for flexible fields.
"""
class PluginLogFilter(logging.Filter):
"""A logging filter that identifies the plugin that emitted a log
message.
"""
def __init__(self, plugin):
self.prefix = u'{0}: '.format(plugin.name)
def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
basestring):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, basestring):
record.msg = self.prefix + record.msg
return True
# Managing the plugins themselves. # Managing the plugins themselves.
class BeetsPlugin(object): class BeetsPlugin(object):
@ -40,8 +72,6 @@ class BeetsPlugin(object):
def __init__(self, name=None): def __init__(self, name=None):
"""Perform one-time plugin setup. """Perform one-time plugin setup.
""" """
_add_media_fields(self.item_fields())
self.import_stages = []
self.name = name or self.__module__.split('.')[-1] self.name = name or self.__module__.split('.')[-1]
self.config = beets.config[self.name] self.config = beets.config[self.name]
if not self.template_funcs: if not self.template_funcs:
@ -50,6 +80,12 @@ class BeetsPlugin(object):
self.template_fields = {} self.template_fields = {}
if not self.album_template_fields: if not self.album_template_fields:
self.album_template_fields = {} self.album_template_fields = {}
self.import_stages = []
self._log = log.getChild(self.name)
self._log.setLevel(logging.NOTSET) # Use `beets` logger level.
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
def commands(self): def commands(self):
"""Should return a list of beets.ui.Subcommand objects for """Should return a list of beets.ui.Subcommand objects for
@ -57,6 +93,46 @@ class BeetsPlugin(object):
""" """
return () return ()
def get_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages.
The callables are wrapped versions of the functions in
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return [self._set_log_level_and_params(logging.WARNING, import_stage)
for import_stage in self.import_stages]
def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to
`base_log_level` + config options (and restore it to its previous
value after the function returns). Also determines which params may not
be sent for backwards-compatibility.
"""
argspec = inspect.getargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
assert self._log.level == logging.NOTSET
verbosity = beets.config['verbose'].get(int)
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
self._log.setLevel(log_level)
try:
try:
return func(*args, **kwargs)
except TypeError as exc:
if exc.args[0].startswith(func.__name__):
# caused by 'func' and not stuff internal to 'func'
kwargs = dict((arg, val) for arg, val in kwargs.items()
if arg in argspec.args)
return func(*args, **kwargs)
else:
raise
finally:
self._log.setLevel(logging.NOTSET)
return wrapper
def queries(self): def queries(self):
"""Should return a dict mapping prefixes to Query subclasses. """Should return a dict mapping prefixes to Query subclasses.
""" """
@ -86,14 +162,6 @@ class BeetsPlugin(object):
""" """
return () return ()
def item_fields(self):
"""Returns field descriptors to be added to the MediaFile class,
in the form of a dictionary whose keys are field names and whose
values are descriptor (e.g., MediaField) instances. The Library
database schema is not (currently) extended.
"""
return {}
def album_for_id(self, album_id): def album_for_id(self, album_id):
"""Return an AlbumInfo object or None if no matching release was """Return an AlbumInfo object or None if no matching release was
found. found.
@ -106,38 +174,36 @@ class BeetsPlugin(object):
""" """
return None return None
def add_media_field(self, name, descriptor):
"""Add a field that is synchronized between media files and items.
When a media field is added ``item.write()`` will set the name
property of the item's MediaFile to ``item[name]`` and save the
changes. Similarly ``item.read()`` will set ``item[name]`` to
the value of the name property of the media file.
``descriptor`` must be an instance of ``mediafile.MediaField``.
"""
# Defer impor to prevent circular dependency
from beets import library
mediafile.MediaFile.add_field(name, descriptor)
library.Item._media_fields.add(name)
_raw_listeners = None
listeners = None listeners = None
@classmethod def register_listener(self, event, func):
def register_listener(cls, event, func): """Add a function as a listener for the specified event.
"""Add a function as a listener for the specified event. (An
imperative alternative to the @listen decorator.)
""" """
if cls.listeners is None: wrapped_func = self._set_log_level_and_params(logging.WARNING, func)
cls = self.__class__
if cls.listeners is None or cls._raw_listeners is None:
cls._raw_listeners = defaultdict(list)
cls.listeners = defaultdict(list) cls.listeners = defaultdict(list)
cls.listeners[event].append(func) if func not in cls._raw_listeners[event]:
cls._raw_listeners[event].append(func)
@classmethod cls.listeners[event].append(wrapped_func)
def listen(cls, event):
"""Decorator that adds a function as an event handler for the
specified event (as a string). The parameters passed to function
will vary depending on what event occurred.
The function should respond to named parameters.
function(**kwargs) will trap all arguments in a dictionary.
Example:
>>> @MyPlugin.listen("imported")
>>> def importListener(**kwargs):
>>> pass
"""
def helper(func):
if cls.listeners is None:
cls.listeners = defaultdict(list)
cls.listeners[event].append(func)
return func
return helper
template_funcs = None template_funcs = None
template_fields = None template_fields = None
@ -170,7 +236,10 @@ class BeetsPlugin(object):
return func return func
return helper return helper
_classes = set() _classes = set()
def load_plugins(names=()): def load_plugins(names=()):
"""Imports the modules for a sequence of plugin names. Each name """Imports the modules for a sequence of plugin names. Each name
must be the name of a Python module under the "beetsplug" namespace must be the name of a Python module under the "beetsplug" namespace
@ -178,14 +247,14 @@ def load_plugins(names=()):
BeetsPlugin subclasses desired. BeetsPlugin subclasses desired.
""" """
for name in names: for name in names:
modname = '%s.%s' % (PLUGIN_NAMESPACE, name) modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name)
try: try:
try: try:
namespace = __import__(modname, None, None) namespace = __import__(modname, None, None)
except ImportError as exc: except ImportError as exc:
# Again, this is hacky: # Again, this is hacky:
if exc.args[0].endswith(' ' + name): if exc.args[0].endswith(' ' + name):
log.warn('** plugin %s not found' % name) log.warn(u'** plugin {0} not found', name)
else: else:
raise raise
else: else:
@ -195,10 +264,16 @@ def load_plugins(names=()):
_classes.add(obj) _classes.add(obj)
except: except:
log.warn('** error loading plugin %s' % name) log.warn(
log.warn(traceback.format_exc()) u'** error loading plugin {}:\n{}',
name,
traceback.format_exc(),
)
_instances = {} _instances = {}
def find_plugins(): def find_plugins():
"""Returns a list of BeetsPlugin subclass instances from all """Returns a list of BeetsPlugin subclass instances from all
currently loaded beets plugins. Loads the default plugin set currently loaded beets plugins. Loads the default plugin set
@ -224,6 +299,7 @@ def commands():
out += plugin.commands() out += plugin.commands()
return out return out
def queries(): def queries():
"""Returns a dict mapping prefix strings to Query subclasses all loaded """Returns a dict mapping prefix strings to Query subclasses all loaded
plugins. plugins.
@ -233,6 +309,24 @@ def queries():
out.update(plugin.queries()) out.update(plugin.queries())
return out return out
def types(model_cls):
# Gives us `item_types` and `album_types`
attr_name = '{0}_types'.format(model_cls.__name__.lower())
types = {}
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictException(
u'Plugin {0} defines flexible field {1} '
u'which has already been defined with '
u'another type.'.format(plugin.name, field)
)
types.update(plugin_types)
return types
def track_distance(item, info): def track_distance(item, info):
"""Gets the track distance calculated by all loaded plugins. """Gets the track distance calculated by all loaded plugins.
Returns a Distance object. Returns a Distance object.
@ -243,6 +337,7 @@ def track_distance(item, info):
dist.update(plugin.track_distance(item, info)) dist.update(plugin.track_distance(item, info))
return dist return dist
def album_distance(items, album_info, mapping): def album_distance(items, album_info, mapping):
"""Returns the album distance calculated by plugins.""" """Returns the album distance calculated by plugins."""
from beets.autotag.hooks import Distance from beets.autotag.hooks import Distance
@ -251,6 +346,7 @@ def album_distance(items, album_info, mapping):
dist.update(plugin.album_distance(items, album_info, mapping)) dist.update(plugin.album_distance(items, album_info, mapping))
return dist return dist
def candidates(items, artist, album, va_likely): def candidates(items, artist, album, va_likely):
"""Gets MusicBrainz candidates for an album from each plugin. """Gets MusicBrainz candidates for an album from each plugin.
""" """
@ -259,6 +355,7 @@ def candidates(items, artist, album, va_likely):
out.extend(plugin.candidates(items, artist, album, va_likely)) out.extend(plugin.candidates(items, artist, album, va_likely))
return out return out
def item_candidates(item, artist, title): def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins. """Gets MusicBrainz candidates for an item from the plugins.
""" """
@ -267,6 +364,7 @@ def item_candidates(item, artist, title):
out.extend(plugin.item_candidates(item, artist, title)) out.extend(plugin.item_candidates(item, artist, title))
return out return out
def album_for_id(album_id): def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string. """Get AlbumInfo objects for a given ID string.
""" """
@ -277,6 +375,7 @@ def album_for_id(album_id):
out.append(res) out.append(res)
return out return out
def track_for_id(track_id): def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string. """Get TrackInfo objects for a given ID string.
""" """
@ -287,6 +386,7 @@ def track_for_id(track_id):
out.append(res) out.append(res)
return out return out
def template_funcs(): def template_funcs():
"""Get all the template functions declared by plugins as a """Get all the template functions declared by plugins as a
dictionary. dictionary.
@ -297,19 +397,12 @@ def template_funcs():
funcs.update(plugin.template_funcs) funcs.update(plugin.template_funcs)
return funcs return funcs
def _add_media_fields(fields):
"""Adds a {name: descriptor} dictionary of fields to the MediaFile
class. Called during the plugin initialization.
"""
for key, value in fields.iteritems():
setattr(mediafile.MediaFile, key, value)
def import_stages(): def import_stages():
"""Get a list of import stage functions defined by plugins.""" """Get a list of import stage functions defined by plugins."""
stages = [] stages = []
for plugin in find_plugins(): for plugin in find_plugins():
if hasattr(plugin, 'import_stages'): stages += plugin.get_import_stages()
stages += plugin.import_stages
return stages return stages
@ -325,6 +418,7 @@ def item_field_getters():
funcs.update(plugin.template_fields) funcs.update(plugin.template_fields)
return funcs return funcs
def album_field_getters(): def album_field_getters():
"""As above, for album fields. """As above, for album fields.
""" """
@ -348,12 +442,48 @@ def event_handlers():
all_handlers[event] += handlers all_handlers[event] += handlers
return all_handlers return all_handlers
def send(event, **arguments):
"""Sends an event to all assigned event listeners. Event is the
name of the event to send, all other named arguments go to the
event handler(s).
Returns a list of return values from the handlers. def send(event, **arguments):
"""Send an event to all assigned event listeners.
`event` is the name of the event to send, all other named arguments
are passed along to the handlers.
Return a list of non-None values returned from the handlers.
""" """
log.debug('Sending event: %s' % event) log.debug(u'Sending event: {0}', event)
return [handler(**arguments) for handler in event_handlers()[event]] results = []
for handler in event_handlers()[event]:
result = handler(**arguments)
if result is not None:
results.append(result)
return results
def feat_tokens(for_artist=True):
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.']
if for_artist:
feat_words += ['with', 'vs', 'and', 'con', '&']
return '(?<=\s)(?:{0})(?=\s)'.format(
'|'.join(re.escape(x) for x in feat_words)
)
def sanitize_choices(choices, choices_all):
"""Clean up a stringlist configuration attribute: keep only choices
elements present in choices_all, remove duplicate elements, expand '*'
wildcard while keeping original stringlist order.
"""
seen = set()
others = [x for x in choices_all if x not in choices]
res = []
for s in choices:
if s in list(choices_all) + ['*']:
if not (s in seen or seen.add(s)):
res.extend(list(others) if s == '*' else [s])
return res

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,401 +0,0 @@
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Conversion from legacy (pre-1.1) configuration to Confit/YAML
configuration.
"""
import os
import ConfigParser
import codecs
import yaml
import logging
import time
import itertools
import re
import beets
from beets import util
from beets import ui
from beets.util import confit
CONFIG_PATH_VAR = 'BEETSCONFIG'
DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig'
DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini'
DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb'
DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb'
WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~'
OLD_CONFIG_SUFFIX = '.old'
PLUGIN_NAMES = {
'rdm': 'random',
'fuzzy_search': 'fuzzy',
}
AUTO_KEYS = ('automatic', 'autofetch', 'autoembed', 'autoscrub')
IMPORTFEEDS_PREFIX = 'feeds_'
CONFIG_MIGRATED_MESSAGE = u"""
You appear to be upgrading from beets 1.0 (or earlier) to 1.1. Your
configuration file has been migrated automatically to:
{newconfig}
Edit this file to configure beets. You might want to remove your
old-style ".beetsconfig" file now. See the documentation for more
details on the new configuration system:
http://beets.readthedocs.org/page/reference/config.html
""".strip()
DB_MIGRATED_MESSAGE = u'Your database file has also been copied to:\n{newdb}'
YAML_COMMENT = '# Automatically migrated from legacy .beetsconfig.\n\n'
log = logging.getLogger('beets')
# An itertools recipe.
def grouper(n, iterable):
args = [iter(iterable)] * n
return itertools.izip_longest(*args)
def _displace(fn):
"""Move a file aside using a timestamp suffix so a new file can be
put in its place.
"""
util.move(
fn,
u'{0}.old.{1}'.format(fn, int(time.time())),
True
)
def default_paths():
"""Produces the appropriate default config and library database
paths for the current system. On Unix, this is always in ~. On
Windows, tries ~ first and then $APPDATA for the config and library
files (for backwards compatibility).
"""
windows = os.path.__name__ == 'ntpath'
if windows:
windata = os.environ.get('APPDATA') or '~'
# Shorthand for joining paths.
def exp(*vals):
return os.path.expanduser(os.path.join(*vals))
config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX)
if windows and not os.path.exists(config):
config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS)
libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX)
if windows and not os.path.exists(libpath):
libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS)
return config, libpath
def get_config():
"""Using the same logic as beets 1.0, locate and read the
.beetsconfig file. Return a ConfigParser instance or None if no
config is found.
"""
default_config, default_libpath = default_paths()
if CONFIG_PATH_VAR in os.environ:
configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR])
else:
configpath = default_config
config = ConfigParser.SafeConfigParser()
if os.path.exists(util.syspath(configpath)):
with codecs.open(configpath, 'r', encoding='utf-8') as f:
config.readfp(f)
return config, configpath
else:
return None, configpath
def flatten_config(config):
"""Given a ConfigParser, flatten the values into a dict-of-dicts
representation where each section gets its own dictionary of values.
"""
out = confit.OrderedDict()
for section in config.sections():
sec_dict = out[section] = confit.OrderedDict()
for option in config.options(section):
sec_dict[option] = config.get(section, option, True)
return out
def transform_value(value):
"""Given a string read as the value of a config option, return a
massaged version of that value (possibly with a different type).
"""
# Booleans.
if value.lower() in ('false', 'no', 'off'):
return False
elif value.lower() in ('true', 'yes', 'on'):
return True
# Integers.
try:
return int(value)
except ValueError:
pass
# Floats.
try:
return float(value)
except ValueError:
pass
return value
def transform_data(data):
"""Given a dict-of-dicts representation of legacy config data, tweak
the data into a new form. This new form is suitable for dumping as
YAML.
"""
out = confit.OrderedDict()
for section, pairs in data.items():
if section == 'beets':
# The "main" section. In the new config system, these values
# are in the "root": no section at all.
for key, value in pairs.items():
value = transform_value(value)
if key.startswith('import_'):
# Importer config is now under an "import:" key.
if 'import' not in out:
out['import'] = confit.OrderedDict()
out['import'][key[7:]] = value
elif key == 'plugins':
# Renamed plugins.
plugins = value.split()
new_plugins = [PLUGIN_NAMES.get(p, p) for p in plugins]
out['plugins'] = ' '.join(new_plugins)
elif key == 'replace':
# YAMLy representation for character replacements.
replacements = confit.OrderedDict()
for pat, repl in grouper(2, value.split()):
if repl == '<strip>':
repl = ''
replacements[pat] = repl
out['replace'] = replacements
elif key == 'pluginpath':
# Used to be a colon-separated string. Now a list.
out['pluginpath'] = value.split(':')
else:
out[key] = value
elif pairs:
# Other sections (plugins, etc).
sec_out = out[section] = confit.OrderedDict()
for key, value in pairs.items():
# Standardized "auto" option.
if key in AUTO_KEYS:
key = 'auto'
# Unnecessary : hack in queries.
if section == 'paths':
key = key.replace('_', ':')
# Changed option names for importfeeds plugin.
if section == 'importfeeds':
if key.startswith(IMPORTFEEDS_PREFIX):
key = key[len(IMPORTFEEDS_PREFIX):]
sec_out[key] = transform_value(value)
return out
class Dumper(yaml.SafeDumper):
"""A PyYAML Dumper that represents OrderedDicts as ordinary mappings
(in order, of course).
"""
# From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and \
not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and \
not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
Dumper.add_representer(confit.OrderedDict, Dumper.represent_dict)
def migrate_config(replace=False):
"""Migrate a legacy beetsconfig file to a new-style config.yaml file
in an appropriate place. If `replace` is enabled, then any existing
config.yaml will be moved aside. Otherwise, the process is aborted
when the file exists.
"""
# Load legacy configuration data, if any.
config, configpath = get_config()
if not config:
log.debug(u'no config file found at {0}'.format(
util.displayable_path(configpath)
))
return
# Get the new configuration file path and possibly move it out of
# the way.
destfn = os.path.join(beets.config.config_dir(), confit.CONFIG_FILENAME)
if os.path.exists(destfn):
if replace:
log.debug(u'moving old config aside: {0}'.format(
util.displayable_path(destfn)
))
_displace(destfn)
else:
# File exists and we won't replace it. We're done.
return
log.debug(u'migrating config file {0}'.format(
util.displayable_path(configpath)
))
# Convert the configuration to a data structure ready to be dumped
# as the new Confit file.
data = transform_data(flatten_config(config))
# Encode result as YAML.
yaml_out = yaml.dump(
data,
Dumper=Dumper,
default_flow_style=False,
indent=4,
width=1000,
)
# A ridiculous little hack to add some whitespace between "sections"
# in the YAML output. I hope this doesn't break any YAML syntax.
yaml_out = re.sub(r'(\n\w+:\n [^-\s])', '\n\\1', yaml_out)
yaml_out = YAML_COMMENT + yaml_out
# Write the data to the new config destination.
log.debug(u'writing migrated config to {0}'.format(
util.displayable_path(destfn)
))
with open(destfn, 'w') as f:
f.write(yaml_out)
return destfn
def migrate_db(replace=False):
"""Copy the beets library database file to the new location (e.g.,
from ~/.beetsmusic.blb to ~/.config/beets/library.db).
"""
_, srcfn = default_paths()
destfn = beets.config['library'].as_filename()
if not os.path.exists(srcfn) or srcfn == destfn:
# Old DB does not exist or we're configured to point to the same
# database. Do nothing.
return
if os.path.exists(destfn):
if replace:
log.debug(u'moving old database aside: {0}'.format(
util.displayable_path(destfn)
))
_displace(destfn)
else:
return
log.debug(u'copying database from {0} to {1}'.format(
util.displayable_path(srcfn), util.displayable_path(destfn)
))
util.copy(srcfn, destfn)
return destfn
def migrate_state(replace=False):
"""Copy the beets runtime state file from the old path (i.e.,
~/.beetsstate) to the new path (i.e., ~/.config/beets/state.pickle).
"""
srcfn = os.path.expanduser(os.path.join('~', '.beetsstate'))
if not os.path.exists(srcfn):
return
destfn = beets.config['statefile'].as_filename()
if os.path.exists(destfn):
if replace:
_displace(destfn)
else:
return
log.debug(u'copying state file from {0} to {1}'.format(
util.displayable_path(srcfn), util.displayable_path(destfn)
))
util.copy(srcfn, destfn)
return destfn
# Automatic migration when beets starts.
def automigrate():
"""Migrate the configuration, database, and state files. If any
migration occurs, print out a notice with some helpful next steps.
"""
config_fn = migrate_config()
db_fn = migrate_db()
migrate_state()
if config_fn:
ui.print_(ui.colorize('fuchsia', u'MIGRATED CONFIGURATION'))
ui.print_(CONFIG_MIGRATED_MESSAGE.format(
newconfig=util.displayable_path(config_fn))
)
if db_fn:
ui.print_(DB_MIGRATED_MESSAGE.format(
newdb=util.displayable_path(db_fn)
))
ui.input_(ui.colorize('fuchsia', u'Press ENTER to continue:'))
ui.print_()
# CLI command for explicit migration.
migrate_cmd = ui.Subcommand('migrate', help='convert legacy config')
def migrate_func(lib, opts, args):
"""Explicit command for migrating files. Existing files in each
destination are moved aside.
"""
config_fn = migrate_config(replace=True)
if config_fn:
log.info(u'Migrated configuration to: {0}'.format(
util.displayable_path(config_fn)
))
db_fn = migrate_db(replace=True)
if db_fn:
log.info(u'Migrated library database to: {0}'.format(
util.displayable_path(db_fn)
))
state_fn = migrate_state(replace=True)
if state_fn:
log.info(u'Migrated state file to: {0}'.format(
util.displayable_path(state_fn)
))
migrate_cmd.func = migrate_func

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -13,20 +14,25 @@
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions.""" """Miscellaneous utility functions."""
from __future__ import division
from __future__ import division, absolute_import, print_function
import os import os
import sys import sys
import re import re
import shutil import shutil
import fnmatch import fnmatch
from collections import defaultdict from collections import Counter
import traceback import traceback
import subprocess import subprocess
import platform
import shlex
from beets.util import hidden
MAX_FILENAME_LENGTH = 200 MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = u'\\\\?\\' WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
class HumanReadableException(Exception): class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to """An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for be logged without a traceback. Can preserve a traceback for
@ -51,12 +57,12 @@ class HumanReadableException(Exception):
def _gerund(self): def _gerund(self):
"""Generate a (likely) gerund form of the English verb. """Generate a (likely) gerund form of the English verb.
""" """
if ' ' in self.verb: if u' ' in self.verb:
return self.verb return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb gerund = self.verb[:-1] if self.verb.endswith(u'e') else self.verb
gerund += 'ing' gerund += u'ing'
return gerund return gerund
def _reasonstr(self): def _reasonstr(self):
"""Get the reason as a string.""" """Get the reason as a string."""
if isinstance(self.reason, unicode): if isinstance(self.reason, unicode):
@ -80,7 +86,8 @@ class HumanReadableException(Exception):
""" """
if self.tb: if self.tb:
logger.debug(self.tb) logger.debug(self.tb)
logger.error(u'{0}: {1}'.format(self.error_kind, self.args[0])) logger.error(u'{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException): class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation """An error that occurred while performing a filesystem manipulation
@ -111,6 +118,7 @@ class FilesystemError(HumanReadableException):
return u'{0} {1}'.format(self._reasonstr(), clause) return u'{0} {1}'.format(self._reasonstr(), clause)
def normpath(path): def normpath(path):
"""Provide the canonical form of the path suitable for storing in """Provide the canonical form of the path suitable for storing in
the database. the database.
@ -119,6 +127,7 @@ def normpath(path):
path = os.path.normpath(os.path.abspath(os.path.expanduser(path))) path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path) return bytestring_path(path)
def ancestry(path): def ancestry(path):
"""Return a list consisting of path's parent directory, its """Return a list consisting of path's parent directory, its
grandparent, and so on. For instance: grandparent, and so on. For instance:
@ -137,11 +146,13 @@ def ancestry(path):
break break
last_path = path last_path = path
if path: # don't yield '' if path:
# don't yield ''
out.insert(0, path) out.insert(0, path)
return out return out
def sorted_walk(path, ignore=(), logger=None):
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted, """Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then pattern in `ignore` are skipped. If `logger` is provided, then
@ -175,10 +186,11 @@ def sorted_walk(path, ignore=(), logger=None):
# Add to output as either a file or a directory. # Add to output as either a file or a directory.
cur = os.path.join(path, base) cur = os.path.join(path, base)
if os.path.isdir(syspath(cur)): if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
dirs.append(base) if os.path.isdir(syspath(cur)):
else: dirs.append(base)
files.append(base) else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level. # Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower) dirs.sort(key=bytes.lower)
@ -189,9 +201,10 @@ def sorted_walk(path, ignore=(), logger=None):
for base in dirs: for base in dirs:
cur = os.path.join(path, base) cur = os.path.join(path, base)
# yield from sorted_walk(...) # yield from sorted_walk(...)
for res in sorted_walk(cur, ignore, logger): for res in sorted_walk(cur, ignore, ignore_hidden, logger):
yield res yield res
def mkdirall(path): def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the """Make all the enclosing directories of path (like mkdir -p on the
parent). parent).
@ -204,6 +217,7 @@ def mkdirall(path):
raise FilesystemError(exc, 'create', (ancestor,), raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc()) traceback.format_exc())
def fnmatch_all(names, patterns): def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of """Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions. the `patterns`, which should be shell glob expressions.
@ -218,6 +232,7 @@ def fnmatch_all(names, patterns):
return False return False
return True return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')): def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove """If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are path's ancestry up to root (which is never removed) where there are
@ -236,7 +251,7 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
ancestors = [] ancestors = []
elif root in ancestors: elif root in ancestors:
# Only remove directories below the root. # Only remove directories below the root.
ancestors = ancestors[ancestors.index(root)+1:] ancestors = ancestors[ancestors.index(root) + 1:]
else: else:
# Remove nothing. # Remove nothing.
return return
@ -258,6 +273,7 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
else: else:
break break
def components(path): def components(path):
"""Return a list of the path components in path. For instance: """Return a list of the path components in path. For instance:
@ -281,6 +297,7 @@ def components(path):
return comps return comps
def _fsencoding(): def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always """Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS). UTF-8 (not MBCS).
@ -295,12 +312,13 @@ def _fsencoding():
encoding = 'utf8' encoding = 'utf8'
return encoding return encoding
def bytestring_path(path): def bytestring_path(path):
"""Given a path, which is either a str or a unicode, returns a str """Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames). path (ensuring that we never deal with Unicode pathnames).
""" """
# Pass through bytestrings. # Pass through bytestrings.
if isinstance(path, str): if isinstance(path, bytes):
return path return path
# On Windows, remove the magic prefix added by `syspath`. This makes # On Windows, remove the magic prefix added by `syspath`. This makes
@ -315,6 +333,7 @@ def bytestring_path(path):
except (UnicodeError, LookupError): except (UnicodeError, LookupError):
return path.encode('utf8') return path.encode('utf8')
def displayable_path(path, separator=u'; '): def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the """Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a purpose of displaying it to the user. If the `path` argument is a
@ -324,7 +343,7 @@ def displayable_path(path, separator=u'; '):
return separator.join(displayable_path(p) for p in path) return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode): elif isinstance(path, unicode):
return path return path
elif not isinstance(path, str): elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation. # A non-string object: just get its unicode representation.
return unicode(path) return unicode(path)
@ -333,6 +352,7 @@ def displayable_path(path, separator=u'; '):
except (UnicodeError, LookupError): except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') return path.decode('utf8', 'ignore')
def syspath(path, prefix=True): def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular, """Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted paths on Windows must receive a magic prefix and must be converted
@ -356,16 +376,22 @@ def syspath(path, prefix=True):
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace') path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there # Add the magic prefix if it isn't already there.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX): if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith(u'\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = u'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path path = WINDOWS_MAGIC_PREFIX + path
return path return path
def samefile(p1, p2): def samefile(p1, p2):
"""Safer equality for paths.""" """Safer equality for paths."""
return shutil._samefile(syspath(p1), syspath(p2)) return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True): def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the """Remove the file. If `soft`, then no error will be raised if the
file does not exist. file does not exist.
@ -378,6 +404,7 @@ def remove(path, soft=True):
except (OSError, IOError) as exc: except (OSError, IOError) as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc()) raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False): def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already """Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no exists, raises a FilesystemError unless `replace` is True. Has no
@ -389,13 +416,14 @@ def copy(path, dest, replace=False):
path = syspath(path) path = syspath(path)
dest = syspath(dest) dest = syspath(dest)
if not replace and os.path.exists(dest): if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest)) raise FilesystemError(u'file exists', 'copy', (path, dest))
try: try:
shutil.copyfile(path, dest) shutil.copyfile(path, dest)
except (OSError, IOError) as exc: except (OSError, IOError) as exc:
raise FilesystemError(exc, 'copy', (path, dest), raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc()) traceback.format_exc())
def move(path, dest, replace=False): def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already """Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if exists, raises an OSError unless `replace` is True. Has no effect if
@ -409,7 +437,7 @@ def move(path, dest, replace=False):
path = syspath(path) path = syspath(path)
dest = syspath(dest) dest = syspath(dest)
if os.path.exists(dest) and not replace: if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest), raise FilesystemError(u'file exists', 'rename', (path, dest),
traceback.format_exc()) traceback.format_exc())
# First, try renaming the file. # First, try renaming the file.
@ -424,6 +452,27 @@ def move(path, dest, replace=False):
raise FilesystemError(exc, 'move', (path, dest), raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc()) traceback.format_exc())
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`."""
if (samefile(path, dest)):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest),
traceback.format_exc())
try:
os.symlink(path, dest)
except OSError:
raise FilesystemError(u'Operating system does not support symbolic '
u'links.', 'link', (path, dest),
traceback.format_exc())
def unique_path(path): def unique_path(path):
"""Returns a version of ``path`` that does not exist on the """Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then filesystem. Specifically, if ``path` itself already exists, then
@ -433,7 +482,7 @@ def unique_path(path):
return path return path
base, ext = os.path.splitext(path) base, ext = os.path.splitext(path)
match = re.search(r'\.(\d)+$', base) match = re.search(br'\.(\d)+$', base)
if match: if match:
num = int(match.group(1)) num = int(match.group(1))
base = base[:match.start()] base = base[:match.start()]
@ -441,7 +490,7 @@ def unique_path(path):
num = 0 num = 0
while True: while True:
num += 1 num += 1
new_path = '%s.%i%s' % (base, num, ext) new_path = b'%s.%i%s' % (base, num, ext)
if not os.path.exists(new_path): if not os.path.exists(new_path):
return new_path return new_path
@ -450,13 +499,15 @@ def unique_path(path):
# shares, which are sufficiently common as to cause frequent problems. # shares, which are sufficiently common as to cause frequent problems.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [ CHAR_REPLACE = [
(re.compile(ur'[\\/]'), u'_'), # / and \ -- forbidden everywhere. (re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere.
(re.compile(ur'^\.'), u'_'), # Leading dot (hidden files on Unix). (re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix).
(re.compile(ur'[\x00-\x1f]'), u''), # Control characters. (re.compile(r'[\x00-\x1f]'), u''), # Control characters.
(re.compile(ur'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters". (re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters".
(re.compile(ur'\.$'), u'_'), # Trailing dots. (re.compile(r'\.$'), u'_'), # Trailing dots.
(re.compile(ur'\s+$'), u''), # Trailing whitespace. (re.compile(r'\s+$'), u''), # Trailing whitespace.
] ]
def sanitize_path(path, replacements=None): def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is """Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work legal. Returns a new path. Only works with fragments; won't work
@ -477,6 +528,7 @@ def sanitize_path(path, replacements=None):
comps[i] = comp comps[i] = comp
return os.path.join(*comps) return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH): def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the """Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension components to a legal length. In the last component, the extension
@ -493,12 +545,83 @@ def truncate_path(path, length=MAX_FILENAME_LENGTH):
return os.path.join(*out) return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def str2bool(value): def str2bool(value):
"""Returns a boolean reflecting a human-entered string.""" """Returns a boolean reflecting a human-entered string."""
if value.lower() in ('yes', '1', 'true', 't', 'y'): return value.lower() in (u'yes', u'1', u'true', u't', u'y')
return True
else:
return False
def as_string(value): def as_string(value):
"""Convert a value to a Unicode object for matching with a query. """Convert a value to a Unicode object for matching with a query.
@ -507,56 +630,23 @@ def as_string(value):
if value is None: if value is None:
return u'' return u''
elif isinstance(value, buffer): elif isinstance(value, buffer):
return str(value).decode('utf8', 'ignore') return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, str): elif isinstance(value, bytes):
return value.decode('utf8', 'ignore') return value.decode('utf8', 'ignore')
else: else:
return unicode(value) return unicode(value)
def levenshtein(s1, s2):
"""A nice DP edit distance implementation from Wikibooks:
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/
Levenshtein_distance#Python
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def plurality(objs): def plurality(objs):
"""Given a sequence of comparable objects, returns the object that """Given a sequence of hashble objects, returns the object that
is most common in the set and the frequency of that object. The is most common in the set and the its number of appearance. The
sequence must contain at least one object. sequence must contain at least one object.
""" """
# Calculate frequencies. c = Counter(objs)
freqs = defaultdict(int) if not c:
for obj in objs: raise ValueError(u'sequence must be non-empty')
freqs[obj] += 1 return c.most_common(1)[0]
if not freqs:
raise ValueError('sequence must be non-empty')
# Find object with maximum frequency.
max_freq = 0
res = None
for obj, freq in freqs.items():
if freq > max_freq:
max_freq = freq
res = obj
return res, max_freq
def cpu_count(): def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT """Return the number of hardware thread contexts (cores or SMT
@ -571,8 +661,8 @@ def cpu_count():
num = 0 num = 0
elif sys.platform == 'darwin': elif sys.platform == 'darwin':
try: try:
num = int(os.popen('sysctl -n hw.ncpu').read()) num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu']))
except ValueError: except (ValueError, OSError, subprocess.CalledProcessError):
num = 0 num = 0
else: else:
try: try:
@ -584,23 +674,38 @@ def cpu_count():
else: else:
return 1 return 1
def command_output(cmd):
"""Wraps the `subprocess` module to invoke a command (given as a
list of arguments starting with the command name) and collect
stdout. The stderr stream is ignored. May raise
`subprocess.CalledProcessError` or an `OSError`.
This replaces `subprocess.check_output`, which isn't available in def command_output(cmd, shell=False):
Python 2.6 and which can have problems if lots of output is sent to """Runs the command and returns its output after it has exited.
stderr.
``cmd`` is a list of byte string arguments starting with the command names.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
""" """
with open(os.devnull, 'w') as devnull: proc = subprocess.Popen(
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull) cmd,
stdout, _ = proc.communicate() stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode: if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, cmd) raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=b' '.join(cmd),
output=stdout + stderr,
)
return stdout return stdout
def max_filename_length(path, limit=MAX_FILENAME_LENGTH): def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the """Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`, filesystem containing `path`. If the value is greater than `limit`,
@ -616,3 +721,142 @@ def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
return min(res[9], limit) return min(res[9], limit)
else: else:
return limit return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def shlex_split(s):
"""Split a Unicode or bytes string according to shell lexing rules.
Raise `ValueError` if the string is not a well-formed shell string.
This is a workaround for a bug in some versions of Python.
"""
if isinstance(s, bytes):
# Shlex works fine.
return shlex.split(s)
elif isinstance(s, unicode):
# Work around a Python bug.
# http://bugs.python.org/issue6988
bs = s.encode('utf8')
return [c.decode('utf8') for c in shlex.split(bs)]
else:
raise TypeError(u'shlex_split called with non-string')
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex_split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, unicode):
short_path = unicode(short_path)
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r'^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError(u'String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Fabrice Laporte # Copyright 2016, Fabrice Laporte
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -15,11 +16,15 @@
"""Abstraction layer to resize images using PIL, ImageMagick, or a """Abstraction layer to resize images using PIL, ImageMagick, or a
public resizing proxy if neither is available. public resizing proxy if neither is available.
""" """
from __future__ import division, absolute_import, print_function
import urllib import urllib
import subprocess import subprocess
import os import os
import re
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
import logging
from beets import logging
from beets import util from beets import util
# Resizing methods # Resizing methods
@ -37,8 +42,8 @@ def resize_url(url, maxwidth):
maxwidth (preserving aspect ratio). maxwidth (preserving aspect ratio).
""" """
return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({ return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({
'url': url.replace('http://',''), 'url': url.replace('http://', ''),
'w': str(maxwidth), 'w': bytes(maxwidth),
})) }))
@ -57,9 +62,8 @@ def pil_resize(maxwidth, path_in, path_out=None):
""" """
path_out = path_out or temp_file_for(path_in) path_out = path_out or temp_file_for(path_in)
from PIL import Image from PIL import Image
log.debug(u'artresizer: PIL resizing {0} to {1}'.format( log.debug(u'artresizer: PIL resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out) util.displayable_path(path_in), util.displayable_path(path_out))
))
try: try:
im = Image.open(util.syspath(path_in)) im = Image.open(util.syspath(path_in))
@ -68,20 +72,18 @@ def pil_resize(maxwidth, path_in, path_out=None):
im.save(path_out) im.save(path_out)
return path_out return path_out
except IOError: except IOError:
log.error(u"PIL cannot create thumbnail for '{0}'".format( log.error(u"PIL cannot create thumbnail for '{0}'",
util.displayable_path(path_in) util.displayable_path(path_in))
))
return path_in return path_in
def im_resize(maxwidth, path_in, path_out=None): def im_resize(maxwidth, path_in, path_out=None):
"""Resize using ImageMagick's ``convert`` tool. """Resize using ImageMagick's ``convert`` tool.
tool. Return the output path of resized image. Return the output path of resized image.
""" """
path_out = path_out or temp_file_for(path_in) path_out = path_out or temp_file_for(path_in)
log.debug(u'artresizer: ImageMagick resizing {0} to {1}'.format( log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out) util.displayable_path(path_in), util.displayable_path(path_out))
))
# "-resize widthxheight>" shrinks images with dimension(s) larger # "-resize widthxheight>" shrinks images with dimension(s) larger
# than the corresponding width and/or height dimension(s). The > # than the corresponding width and/or height dimension(s). The >
@ -89,13 +91,13 @@ def im_resize(maxwidth, path_in, path_out=None):
# compatibility. # compatibility.
try: try:
util.command_output([ util.command_output([
'convert', util.syspath(path_in), b'convert', util.syspath(path_in, prefix=False),
'-resize', '{0}x^>'.format(maxwidth), path_out b'-resize', b'{0}x^>'.format(maxwidth),
util.syspath(path_out, prefix=False),
]) ])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
log.warn(u'artresizer: IM convert failed for {0}'.format( log.warn(u'artresizer: IM convert failed for {0}',
util.displayable_path(path_in) util.displayable_path(path_in))
))
return path_in return path_in
return path_out return path_out
@ -106,21 +108,56 @@ BACKEND_FUNCS = {
} }
def pil_getsize(path_in):
from PIL import Image
try:
im = Image.open(util.syspath(path_in))
return im.size
except IOError as exc:
log.error(u"PIL could not read file {}: {}",
util.displayable_path(path_in), exc)
def im_getsize(path_in):
cmd = [b'identify', b'-format', b'%w %h',
util.syspath(path_in, prefix=False)]
try:
out = util.command_output(cmd)
except subprocess.CalledProcessError as exc:
log.warn(u'ImageMagick size query failed')
log.debug(
u'`convert` exited with (status {}) when '
u'getting size with command {}:\n{}',
exc.returncode, cmd, exc.output.strip()
)
return
try:
return tuple(map(int, out.split(b' ')))
except IndexError:
log.warn(u'Could not understand IM output: {0!r}', out)
BACKEND_GET_SIZE = {
PIL: pil_getsize,
IMAGEMAGICK: im_getsize,
}
class Shareable(type): class Shareable(type):
"""A pseudo-singleton metaclass that allows both shared and """A pseudo-singleton metaclass that allows both shared and
non-shared instances. The ``MyClass.shared`` property holds a non-shared instances. The ``MyClass.shared`` property holds a
lazily-created shared instance of ``MyClass`` while calling lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual. ``MyClass()`` to construct a new object works as usual.
""" """
def __init__(cls, name, bases, dict): def __init__(self, name, bases, dict):
super(Shareable, cls).__init__(name, bases, dict) super(Shareable, self).__init__(name, bases, dict)
cls._instance = None self._instance = None
@property @property
def shared(cls): def shared(self):
if cls._instance is None: if self._instance is None:
cls._instance = cls() self._instance = self()
return cls._instance return self._instance
class ArtResizer(object): class ArtResizer(object):
@ -128,12 +165,12 @@ class ArtResizer(object):
""" """
__metaclass__ = Shareable __metaclass__ = Shareable
def __init__(self, method=None): def __init__(self):
"""Create a resizer object for the given method or, if none is """Create a resizer object with an inferred method.
specified, with an inferred method.
""" """
self.method = method or self._guess_method() self.method = self._check_method()
log.debug(u"artresizer: method is {0}".format(self.method)) log.debug(u"artresizer: method is {0}", self.method)
self.can_compare = self._can_compare()
def resize(self, maxwidth, path_in, path_out=None): def resize(self, maxwidth, path_in, path_out=None):
"""Manipulate an image file according to the method, returning a """Manipulate an image file according to the method, returning a
@ -141,7 +178,7 @@ class ArtResizer(object):
temporary file. For WEBPROXY, returns `path_in` unmodified. temporary file. For WEBPROXY, returns `path_in` unmodified.
""" """
if self.local: if self.local:
func = BACKEND_FUNCS[self.method] func = BACKEND_FUNCS[self.method[0]]
return func(maxwidth, path_in, path_out) return func(maxwidth, path_in, path_out)
else: else:
return path_in return path_in
@ -159,30 +196,63 @@ class ArtResizer(object):
@property @property
def local(self): def local(self):
"""A boolean indicating whether the resizing method is performed """A boolean indicating whether the resizing method is performed
locally (i.e., PIL or IMAGEMAGICK). locally (i.e., PIL or ImageMagick).
""" """
return self.method in BACKEND_FUNCS return self.method[0] in BACKEND_FUNCS
def get_size(self, path_in):
"""Return the size of an image file as an int couple (width, height)
in pixels.
Only available locally
"""
if self.local:
func = BACKEND_GET_SIZE[self.method[0]]
return func(path_in)
def _can_compare(self):
"""A boolean indicating whether image comparison is available"""
return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)
@staticmethod @staticmethod
def _guess_method(): def _check_method():
"""Determine which resizing method to use. Returns PIL, """Return a tuple indicating an available method and its version."""
IMAGEMAGICK, or WEBPROXY depending on available dependencies. version = get_im_version()
""" if version:
# Try importing PIL. return IMAGEMAGICK, version
try:
__import__('PIL', fromlist=['Image'])
return PIL
except ImportError:
pass
# Try invoking ImageMagick's "convert". version = get_pil_version()
try: if version:
out = util.command_output(['convert', '--version']) return PIL, version
if 'imagemagick' in out.lower():
# system32/convert.exe may be interfering
return IMAGEMAGICK
except (subprocess.CalledProcessError, OSError):
pass
# Fall back to Web proxy method. return WEBPROXY, (0)
return WEBPROXY
def get_im_version():
"""Return Image Magick version or None if it is unavailable
Try invoking ImageMagick's "convert"."""
try:
out = util.command_output([b'identify', b'--version'])
if 'imagemagick' in out.lower():
pattern = r".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
return (int(match.group(1)),
int(match.group(2)),
int(match.group(3)))
return (0,)
except (subprocess.CalledProcessError, OSError):
return None
def get_pil_version():
"""Return Image Magick version or None if it is unavailable
Try importing PIL."""
try:
__import__('PIL', fromlist=[str('Image')])
return (0,)
except ImportError:
return None

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extremely simple pure-Python implementation of coroutine-style """Extremely simple pure-Python implementation of coroutine-style
asynchronous socket I/O. Inspired by, but inferior to, Eventlet. asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
Bluelet can also be thought of as a less-terrible replacement for Bluelet can also be thought of as a less-terrible replacement for
@ -5,6 +7,8 @@ asyncore.
Bluelet: easy concurrency without all the messy parallelism. Bluelet: easy concurrency without all the messy parallelism.
""" """
from __future__ import division, absolute_import, print_function
import socket import socket
import select import select
import sys import sys
@ -38,6 +42,7 @@ class Event(object):
""" """
pass pass
class WaitableEvent(Event): class WaitableEvent(Event):
"""A waitable event is one encapsulating an action that can be """A waitable event is one encapsulating an action that can be
waited for using a select() call. That is, it's an event with an waited for using a select() call. That is, it's an event with an
@ -57,21 +62,25 @@ class WaitableEvent(Event):
""" """
pass pass
class ValueEvent(Event): class ValueEvent(Event):
"""An event that does nothing but return a fixed value.""" """An event that does nothing but return a fixed value."""
def __init__(self, value): def __init__(self, value):
self.value = value self.value = value
class ExceptionEvent(Event): class ExceptionEvent(Event):
"""Raise an exception at the yield point. Used internally.""" """Raise an exception at the yield point. Used internally."""
def __init__(self, exc_info): def __init__(self, exc_info):
self.exc_info = exc_info self.exc_info = exc_info
class SpawnEvent(Event): class SpawnEvent(Event):
"""Add a new coroutine thread to the scheduler.""" """Add a new coroutine thread to the scheduler."""
def __init__(self, coro): def __init__(self, coro):
self.spawned = coro self.spawned = coro
class JoinEvent(Event): class JoinEvent(Event):
"""Suspend the thread until the specified child thread has """Suspend the thread until the specified child thread has
completed. completed.
@ -79,11 +88,13 @@ class JoinEvent(Event):
def __init__(self, child): def __init__(self, child):
self.child = child self.child = child
class KillEvent(Event): class KillEvent(Event):
"""Unschedule a child thread.""" """Unschedule a child thread."""
def __init__(self, child): def __init__(self, child):
self.child = child self.child = child
class DelegationEvent(Event): class DelegationEvent(Event):
"""Suspend execution of the current thread, start a new thread and, """Suspend execution of the current thread, start a new thread and,
once the child thread finished, return control to the parent once the child thread finished, return control to the parent
@ -92,6 +103,7 @@ class DelegationEvent(Event):
def __init__(self, coro): def __init__(self, coro):
self.spawned = coro self.spawned = coro
class ReturnEvent(Event): class ReturnEvent(Event):
"""Return a value the current thread's delegator at the point of """Return a value the current thread's delegator at the point of
delegation. Ends the current (delegate) thread. delegation. Ends the current (delegate) thread.
@ -99,6 +111,7 @@ class ReturnEvent(Event):
def __init__(self, value): def __init__(self, value):
self.value = value self.value = value
class SleepEvent(WaitableEvent): class SleepEvent(WaitableEvent):
"""Suspend the thread for a given duration. """Suspend the thread for a given duration.
""" """
@ -108,6 +121,7 @@ class SleepEvent(WaitableEvent):
def time_left(self): def time_left(self):
return max(self.wakeup_time - time.time(), 0.0) return max(self.wakeup_time - time.time(), 0.0)
class ReadEvent(WaitableEvent): class ReadEvent(WaitableEvent):
"""Reads from a file-like object.""" """Reads from a file-like object."""
def __init__(self, fd, bufsize): def __init__(self, fd, bufsize):
@ -120,6 +134,7 @@ class ReadEvent(WaitableEvent):
def fire(self): def fire(self):
return self.fd.read(self.bufsize) return self.fd.read(self.bufsize)
class WriteEvent(WaitableEvent): class WriteEvent(WaitableEvent):
"""Writes to a file-like object.""" """Writes to a file-like object."""
def __init__(self, fd, data): def __init__(self, fd, data):
@ -192,15 +207,19 @@ def _event_select(events):
return ready_events return ready_events
class ThreadException(Exception): class ThreadException(Exception):
def __init__(self, coro, exc_info): def __init__(self, coro, exc_info):
self.coro = coro self.coro = coro
self.exc_info = exc_info self.exc_info = exc_info
def reraise(self): def reraise(self):
_reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2]) _reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads. SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
class Delegated(Event): class Delegated(Event):
"""Placeholder indicating that a thread has delegated execution to a """Placeholder indicating that a thread has delegated execution to a
different thread. different thread.
@ -208,6 +227,7 @@ class Delegated(Event):
def __init__(self, child): def __init__(self, child):
self.child = child self.child = child
def run(root_coro): def run(root_coro):
"""Schedules a coroutine, running it to completion. This """Schedules a coroutine, running it to completion. This
encapsulates the Bluelet scheduler, which the root coroutine can encapsulates the Bluelet scheduler, which the root coroutine can
@ -329,7 +349,7 @@ def run(root_coro):
break break
# Wait and fire. # Wait and fire.
event2coro = dict((v,k) for k,v in threads.items()) event2coro = dict((v, k) for k, v in threads.items())
for event in _event_select(threads.values()): for event in _event_select(threads.values()):
# Run the IO operation, but catch socket errors. # Run the IO operation, but catch socket errors.
try: try:
@ -378,6 +398,7 @@ def run(root_coro):
class SocketClosedError(Exception): class SocketClosedError(Exception):
pass pass
class Listener(object): class Listener(object):
"""A socket wrapper object for listening sockets. """A socket wrapper object for listening sockets.
""" """
@ -407,6 +428,7 @@ class Listener(object):
self._closed = True self._closed = True
self.sock.close() self.sock.close()
class Connection(object): class Connection(object):
"""A socket wrapper object for connected sockets. """A socket wrapper object for connected sockets.
""" """
@ -468,6 +490,7 @@ class Connection(object):
yield ReturnEvent(line) yield ReturnEvent(line)
break break
class AcceptEvent(WaitableEvent): class AcceptEvent(WaitableEvent):
"""An event for Listener objects (listening sockets) that suspends """An event for Listener objects (listening sockets) that suspends
execution until the socket gets a connection. execution until the socket gets a connection.
@ -482,6 +505,7 @@ class AcceptEvent(WaitableEvent):
sock, addr = self.listener.sock.accept() sock, addr = self.listener.sock.accept()
return Connection(sock, addr) return Connection(sock, addr)
class ReceiveEvent(WaitableEvent): class ReceiveEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for """An event for Connection objects (connected sockets) for
asynchronously reading data. asynchronously reading data.
@ -496,6 +520,7 @@ class ReceiveEvent(WaitableEvent):
def fire(self): def fire(self):
return self.conn.sock.recv(self.bufsize) return self.conn.sock.recv(self.bufsize)
class SendEvent(WaitableEvent): class SendEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for """An event for Connection objects (connected sockets) for
asynchronously writing data. asynchronously writing data.
@ -523,29 +548,33 @@ def null():
""" """
return ValueEvent(None) return ValueEvent(None)
def spawn(coro): def spawn(coro):
"""Event: add another coroutine to the scheduler. Both the parent """Event: add another coroutine to the scheduler. Both the parent
and child coroutines run concurrently. and child coroutines run concurrently.
""" """
if not isinstance(coro, types.GeneratorType): if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % str(coro)) raise ValueError(u'%s is not a coroutine' % coro)
return SpawnEvent(coro) return SpawnEvent(coro)
def call(coro): def call(coro):
"""Event: delegate to another coroutine. The current coroutine """Event: delegate to another coroutine. The current coroutine
is resumed once the sub-coroutine finishes. If the sub-coroutine is resumed once the sub-coroutine finishes. If the sub-coroutine
returns a value using end(), then this event returns that value. returns a value using end(), then this event returns that value.
""" """
if not isinstance(coro, types.GeneratorType): if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % str(coro)) raise ValueError(u'%s is not a coroutine' % coro)
return DelegationEvent(coro) return DelegationEvent(coro)
def end(value=None): def end(value=None):
"""Event: ends the coroutine and returns a value to its """Event: ends the coroutine and returns a value to its
delegator. delegator.
""" """
return ReturnEvent(value) return ReturnEvent(value)
def read(fd, bufsize=None): def read(fd, bufsize=None):
"""Event: read from a file descriptor asynchronously.""" """Event: read from a file descriptor asynchronously."""
if bufsize is None: if bufsize is None:
@ -563,10 +592,12 @@ def read(fd, bufsize=None):
else: else:
return ReadEvent(fd, bufsize) return ReadEvent(fd, bufsize)
def write(fd, data): def write(fd, data):
"""Event: write to a file descriptor asynchronously.""" """Event: write to a file descriptor asynchronously."""
return WriteEvent(fd, data) return WriteEvent(fd, data)
def connect(host, port): def connect(host, port):
"""Event: connect to a network address and return a Connection """Event: connect to a network address and return a Connection
object for communicating on the socket. object for communicating on the socket.
@ -575,17 +606,20 @@ def connect(host, port):
sock = socket.create_connection(addr) sock = socket.create_connection(addr)
return ValueEvent(Connection(sock, addr)) return ValueEvent(Connection(sock, addr))
def sleep(duration): def sleep(duration):
"""Event: suspend the thread for ``duration`` seconds. """Event: suspend the thread for ``duration`` seconds.
""" """
return SleepEvent(duration) return SleepEvent(duration)
def join(coro): def join(coro):
"""Suspend the thread until another, previously `spawn`ed thread """Suspend the thread until another, previously `spawn`ed thread
completes. completes.
""" """
return JoinEvent(coro) return JoinEvent(coro)
def kill(coro): def kill(coro):
"""Halt the execution of a different `spawn`ed thread. """Halt the execution of a different `spawn`ed thread.
""" """

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -12,167 +13,31 @@
# The above copyright notice and this permission notice shall be # The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
"""A metaclass for enumerated types that really are types. from __future__ import division, absolute_import, print_function
You can create enumerations with `enum(values, [name])` and they work from enum import Enum
how you would expect them to.
>>> from enumeration import enum
>>> Direction = enum('north east south west', name='Direction')
>>> Direction.west
Direction.west
>>> Direction.west == Direction.west
True
>>> Direction.west == Direction.east
False
>>> isinstance(Direction.west, Direction)
True
>>> Direction[3]
Direction.west
>>> Direction['west']
Direction.west
>>> Direction.west.name
'west'
>>> Direction.north < Direction.west
True
Enumerations are classes; their instances represent the possible values class OrderedEnum(Enum):
of the enumeration. Because Python classes must have names, you may
provide a `name` parameter to `enum`; if you don't, a meaningless one
will be chosen for you.
"""
import random
class Enumeration(type):
"""A metaclass whose classes are enumerations.
The `values` attribute of the class is used to populate the
enumeration. Values may either be a list of enumerated names or a
string containing a space-separated list of names. When the class
is created, it is instantiated for each name value in `values`.
Each such instance is the name of the enumerated item as the sole
argument.
The `Enumerated` class is a good choice for a superclass.
""" """
An Enum subclass that allows comparison of members.
def __init__(cls, name, bases, dic):
super(Enumeration, cls).__init__(name, bases, dic)
if 'values' not in dic:
# Do nothing if no values are provided (i.e., with
# Enumerated itself).
return
# May be called with a single string, in which case we split on
# whitespace for convenience.
values = dic['values']
if isinstance(values, basestring):
values = values.split()
# Create the Enumerated instances for each value. We have to use
# super's __setattr__ here because we disallow setattr below.
super(Enumeration, cls).__setattr__('_items_dict', {})
super(Enumeration, cls).__setattr__('_items_list', [])
for value in values:
item = cls(value, len(cls._items_list))
cls._items_dict[value] = item
cls._items_list.append(item)
def __getattr__(cls, key):
try:
return cls._items_dict[key]
except KeyError:
raise AttributeError("enumeration '" + cls.__name__ +
"' has no item '" + key + "'")
def __setattr__(cls, key, val):
raise TypeError("enumerations do not support attribute assignment")
def __getitem__(cls, key):
if isinstance(key, int):
return cls._items_list[key]
else:
return getattr(cls, key)
def __len__(cls):
return len(cls._items_list)
def __iter__(cls):
return iter(cls._items_list)
def __nonzero__(cls):
# Ensures that __len__ doesn't get called before __init__ by
# pydoc.
return True
class Enumerated(object):
"""An item in an enumeration.
Contains instance methods inherited by enumerated objects. The
metaclass is preset to `Enumeration` for your convenience.
Instance attributes:
name -- The name of the item.
index -- The index of the item in its enumeration.
>>> from enumeration import Enumerated
>>> class Garment(Enumerated):
... values = 'hat glove belt poncho lederhosen suspenders'
... def wear(self):
... print('now wearing a ' + self.name)
...
>>> Garment.poncho.wear()
now wearing a poncho
""" """
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
__metaclass__ = Enumeration def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __init__(self, name, index): def __le__(self, other):
self.name = name if self.__class__ is other.__class__:
self.index = index return self.value <= other.value
return NotImplemented
def __str__(self): def __lt__(self, other):
return type(self).__name__ + '.' + self.name if self.__class__ is other.__class__:
return self.value < other.value
def __repr__(self): return NotImplemented
return str(self)
def __cmp__(self, other):
if type(self) is type(other):
# Note that we're assuming that the items are direct
# instances of the same Enumeration (i.e., no fancy
# subclassing), which is probably okay.
return cmp(self.index, other.index)
else:
return NotImplemented
def enum(*values, **kwargs):
"""Shorthand for creating a new Enumeration class.
Call with enumeration values as a list, a space-delimited string, or
just an argument list. To give the class a name, pass it as the
`name` keyword argument. Otherwise, a name will be chosen for you.
The following are all equivalent:
enum('pinkie ring middle index thumb')
enum('pinkie', 'ring', 'middle', 'index', 'thumb')
enum(['pinkie', 'ring', 'middle', 'index', 'thumb'])
"""
if ('name' not in kwargs) or kwargs['name'] is None:
# Create a probably-unique name. It doesn't really have to be
# unique, but getting distinct names each time helps with
# identification in debugging.
name = 'Enumeration' + hex(random.randint(0,0xfffffff))[2:].upper()
else:
name = kwargs['name']
if len(values) == 1:
# If there's only one value, we have a couple of alternate calling
# styles.
if isinstance(values[0], basestring) or hasattr(values[0], '__iter__'):
values = values[0]
return type(name, (Enumerated,), {'values': values})

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -25,13 +26,16 @@ library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache. engine like Jinja2 or Mustache.
""" """
from __future__ import print_function
from __future__ import division, absolute_import, print_function
import re import re
import ast import ast
import dis import dis
import types import types
from .confit import NUMERIC_TYPES
SYMBOL_DELIM = u'$' SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%' FUNC_DELIM = u'%'
GROUP_OPEN = u'{' GROUP_OPEN = u'{'
@ -42,6 +46,7 @@ ESCAPE_CHAR = u'$'
VARIABLE_PREFIX = '__var_' VARIABLE_PREFIX = '__var_'
FUNCTION_PREFIX = '__func_' FUNCTION_PREFIX = '__func_'
class Environment(object): class Environment(object):
"""Contains the values and functions to be substituted into a """Contains the values and functions to be substituted into a
template. template.
@ -57,23 +62,26 @@ def ex_lvalue(name):
"""A variable load expression.""" """A variable load expression."""
return ast.Name(name, ast.Store()) return ast.Name(name, ast.Store())
def ex_rvalue(name): def ex_rvalue(name):
"""A variable store expression.""" """A variable store expression."""
return ast.Name(name, ast.Load()) return ast.Name(name, ast.Load())
def ex_literal(val): def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given """An int, float, long, bool, string, or None literal with the given
value. value.
""" """
if val is None: if val is None:
return ast.Name('None', ast.Load()) return ast.Name('None', ast.Load())
elif isinstance(val, (int, float, long)): elif isinstance(val, NUMERIC_TYPES):
return ast.Num(val) return ast.Num(val)
elif isinstance(val, bool): elif isinstance(val, bool):
return ast.Name(str(val), ast.Load()) return ast.Name(bytes(val), ast.Load())
elif isinstance(val, basestring): elif isinstance(val, basestring):
return ast.Str(val) return ast.Str(val)
raise TypeError('no literal for {0}'.format(type(val))) raise TypeError(u'no literal for {0}'.format(type(val)))
def ex_varassign(name, expr): def ex_varassign(name, expr):
"""Assign an expression into a single variable. The expression may """Assign an expression into a single variable. The expression may
@ -83,6 +91,7 @@ def ex_varassign(name, expr):
expr = ex_literal(expr) expr = ex_literal(expr)
return ast.Assign([ex_lvalue(name)], expr) return ast.Assign([ex_lvalue(name)], expr)
def ex_call(func, args): def ex_call(func, args):
"""A function-call expression with only positional parameters. The """A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each function may be an expression or the name of a function. Each
@ -98,13 +107,14 @@ def ex_call(func, args):
return ast.Call(func, args, [], None, None) return ast.Call(func, args, [], None, None)
def compile_func(arg_names, statements, name='_the_func', debug=False): def compile_func(arg_names, statements, name='_the_func', debug=False):
"""Compile a list of statements as the body of a function and return """Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the the resulting Python function. If `debug`, then print out the
bytecode of the compiled function. bytecode of the compiled function.
""" """
func_def = ast.FunctionDef( func_def = ast.FunctionDef(
name, name.encode('utf8'),
ast.arguments( ast.arguments(
[ast.Name(n, ast.Param()) for n in arg_names], [ast.Name(n, ast.Param()) for n in arg_names],
None, None, None, None,
@ -126,7 +136,7 @@ def compile_func(arg_names, statements, name='_the_func', debug=False):
dis.dis(const) dis.dis(const)
the_locals = {} the_locals = {}
exec prog in {}, the_locals exec(prog, {}, the_locals)
return the_locals[name] return the_locals[name]
@ -157,6 +167,7 @@ class Symbol(object):
expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8')) expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8'))
return [expr], set([self.ident.encode('utf8')]), set() return [expr], set([self.ident.encode('utf8')]), set()
class Call(object): class Call(object):
"""A function call in a template.""" """A function call in a template."""
def __init__(self, ident, args, original): def __init__(self, ident, args, original):
@ -214,6 +225,7 @@ class Call(object):
) )
return [subexpr_call], varnames, funcnames return [subexpr_call], varnames, funcnames
class Expression(object): class Expression(object):
"""Top-level template construct: contains a list of text blobs, """Top-level template construct: contains a list of text blobs,
Symbols, and Calls. Symbols, and Calls.
@ -259,6 +271,7 @@ class Expression(object):
class ParseError(Exception): class ParseError(Exception):
pass pass
class Parser(object): class Parser(object):
"""Parses a template expression string. Instantiate the class with """Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field the template source and call ``parse_expression``. The ``pos`` field
@ -280,7 +293,7 @@ class Parser(object):
# Common parsing resources. # Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE, special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ARG_SEP, ESCAPE_CHAR) ARG_SEP, ESCAPE_CHAR)
special_char_re = re.compile(ur'[%s]|$' % special_char_re = re.compile(r'[%s]|$' %
u''.join(re.escape(c) for c in special_chars)) u''.join(re.escape(c) for c in special_chars))
def parse_expression(self): def parse_expression(self):
@ -298,8 +311,8 @@ class Parser(object):
# A non-special character. Skip to the next special # A non-special character. Skip to the next special
# character, treating the interstice as literal text. # character, treating the interstice as literal text.
next_pos = ( next_pos = (
self.special_char_re.search(self.string[self.pos:]).start() self.special_char_re.search(
+ self.pos self.string[self.pos:]).start() + self.pos
) )
text_parts.append(self.string[self.pos:next_pos]) text_parts.append(self.string[self.pos:next_pos])
self.pos = next_pos self.pos = next_pos
@ -316,13 +329,13 @@ class Parser(object):
next_char = self.string[self.pos + 1] next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in \ if char == ESCAPE_CHAR and next_char in \
(SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP): (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):
# An escaped special character ($$, $}, etc.). Note that # An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with # ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just # the start of a symbol and it's not necessary (just
# using { suffices in all cases). # using { suffices in all cases).
text_parts.append(next_char) text_parts.append(next_char)
self.pos += 2 # Skip the next character. self.pos += 2 # Skip the next character.
continue continue
# Shift all characters collected so far into a single string. # Shift all characters collected so far into a single string.
@ -372,7 +385,7 @@ class Parser(object):
if next_char == GROUP_OPEN: if next_char == GROUP_OPEN:
# A symbol like ${this}. # A symbol like ${this}.
self.pos += 1 # Skip opening. self.pos += 1 # Skip opening.
closer = self.string.find(GROUP_CLOSE, self.pos) closer = self.string.find(GROUP_CLOSE, self.pos)
if closer == -1 or closer == self.pos: if closer == -1 or closer == self.pos:
# No closing brace found or identifier is empty. # No closing brace found or identifier is empty.
@ -431,7 +444,7 @@ class Parser(object):
self.parts.append(self.string[start_pos:self.pos]) self.parts.append(self.string[start_pos:self.pos])
return return
self.pos += 1 # Move past closing brace. self.pos += 1 # Move past closing brace.
self.parts.append(Call(ident, args, self.string[start_pos:self.pos])) self.parts.append(Call(ident, args, self.string[start_pos:self.pos]))
def parse_argument_list(self): def parse_argument_list(self):
@ -468,10 +481,11 @@ class Parser(object):
Updates ``pos``. Updates ``pos``.
""" """
remainder = self.string[self.pos:] remainder = self.string[self.pos:]
ident = re.match(ur'\w*', remainder).group(0) ident = re.match(r'\w*', remainder).group(0)
self.pos += len(ident) self.pos += len(ident)
return ident return ident
def _parse(template): def _parse(template):
"""Parse a top-level template string Expression. Any extraneous text """Parse a top-level template string Expression. Any extraneous text
is considered literal text. is considered literal text.
@ -522,9 +536,9 @@ class Template(object):
argnames = [] argnames = []
for varname in varnames: for varname in varnames:
argnames.append(VARIABLE_PREFIX.encode('utf8') + varname) argnames.append(VARIABLE_PREFIX + varname)
for funcname in funcnames: for funcname in funcnames:
argnames.append(FUNCTION_PREFIX.encode('utf8') + funcname) argnames.append(FUNCTION_PREFIX + funcname)
func = compile_func( func = compile_func(
argnames, argnames,
@ -558,4 +572,4 @@ if __name__ == '__main__':
'from __main__ import _tmpl, _vars, _funcs', 'from __main__ import _tmpl, _vars, _funcs',
number=10000) number=10000)
print(comp_time) print(comp_time)
print('Speedup:', interp_time / comp_time) print(u'Speedup:', interp_time / comp_time)

88
libs/beets/util/hidden.py Normal file
View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple library to work out if a file is hidden on different platforms."""
from __future__ import division, absolute_import, print_function
import os
import stat
import ctypes
import sys
def _is_hidden_osx(path):
"""Return whether or not a file is hidden on OS X.
This uses os.lstat to work out if a file has the "hidden" flag.
"""
file_stat = os.lstat(path)
if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'):
return bool(file_stat.st_flags & stat.UF_HIDDEN)
else:
return False
def _is_hidden_win(path):
"""Return whether or not a file is hidden on Windows.
This uses GetFileAttributes to work out if a file has the "hidden" flag
(FILE_ATTRIBUTE_HIDDEN).
"""
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
# Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(path)
# Ensure we have valid attribues and compare them against the mask.
return attrs >= 0 and attrs & hidden_mask
def _is_hidden_dot(path):
"""Return whether or not a file starts with a dot.
Files starting with a dot are seen as "hidden" files on Unix-based OSes.
"""
return os.path.basename(path).startswith('.')
def is_hidden(path):
"""Return whether or not a file is hidden.
This method works differently depending on the platform it is called on.
On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to
work out if a file is hidden.
On Windows, it uses the result of `is_hidden_win` to work out if a file is
hidden.
On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to
work out if a file is hidden.
"""
# Convert the path to unicode if it is not already.
if not isinstance(path, unicode):
path = path.decode('utf-8')
# Run platform specific functions depending on the platform
if sys.platform == 'darwin':
return _is_hidden_osx(path) or _is_hidden_dot(path)
elif sys.platform == 'win32':
return _is_hidden_win(path)
else:
return _is_hidden_dot(path)
__all__ = ['is_hidden']

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -30,18 +31,19 @@ up a bottleneck stage by dividing its work among multiple threads.
To do so, pass an iterable of coroutines to the Pipeline constructor To do so, pass an iterable of coroutines to the Pipeline constructor
in place of any single coroutine. in place of any single coroutine.
""" """
from __future__ import print_function
from __future__ import division, absolute_import, print_function
import Queue import Queue
from threading import Thread, Lock from threading import Thread, Lock
import sys import sys
import types
BUBBLE = '__PIPELINE_BUBBLE__' BUBBLE = '__PIPELINE_BUBBLE__'
POISON = '__PIPELINE_POISON__' POISON = '__PIPELINE_POISON__'
DEFAULT_QUEUE_SIZE = 16 DEFAULT_QUEUE_SIZE = 16
def _invalidate_queue(q, val=None, sync=True): def _invalidate_queue(q, val=None, sync=True):
"""Breaks a Queue such that it never blocks, always has size 1, """Breaks a Queue such that it never blocks, always has size 1,
and has no maximum size. get()ing from the queue returns `val`, and has no maximum size. get()ing from the queue returns `val`,
@ -50,8 +52,10 @@ def _invalidate_queue(q, val=None, sync=True):
""" """
def _qsize(len=len): def _qsize(len=len):
return 1 return 1
def _put(item): def _put(item):
pass pass
def _get(): def _get():
return val return val
@ -70,6 +74,7 @@ def _invalidate_queue(q, val=None, sync=True):
if sync: if sync:
q.mutex.release() q.mutex.release()
class CountedQueue(Queue.Queue): class CountedQueue(Queue.Queue):
"""A queue that keeps track of the number of threads that are """A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are still feeding into it. The queue is poisoned when all threads are
@ -104,6 +109,7 @@ class CountedQueue(Queue.Queue):
# Replacement _get invalidates when no items remain. # Replacement _get invalidates when no items remain.
_old_get = self._get _old_get = self._get
def _get(): def _get():
out = _old_get() out = _old_get()
if not self.queue: if not self.queue:
@ -117,18 +123,67 @@ class CountedQueue(Queue.Queue):
# No items. Invalidate immediately. # No items. Invalidate immediately.
_invalidate_queue(self, POISON, False) _invalidate_queue(self, POISON, False)
class MultiMessage(object): class MultiMessage(object):
"""A message yielded by a pipeline stage encapsulating multiple """A message yielded by a pipeline stage encapsulating multiple
values to be sent to the next stage. values to be sent to the next stage.
""" """
def __init__(self, messages): def __init__(self, messages):
self.messages = messages self.messages = messages
def multiple(messages): def multiple(messages):
"""Yield multiple([message, ..]) from a pipeline stage to send """Yield multiple([message, ..]) from a pipeline stage to send
multiple values to the next pipeline stage. multiple values to the next pipeline stage.
""" """
return MultiMessage(messages) return MultiMessage(messages)
def stage(func):
"""Decorate a function to become a simple stage.
>>> @stage
... def add(n, i):
... return i + n
>>> pipe = Pipeline([
... iter([1, 2, 3]),
... add(2),
... ])
>>> list(pipe.pull())
[3, 4, 5]
"""
def coro(*args):
task = None
while True:
task = yield task
task = func(*(args + (task,)))
return coro
def mutator_stage(func):
"""Decorate a function that manipulates items in a coroutine to
become a simple stage.
>>> @mutator_stage
... def setkey(key, item):
... item[key] = True
>>> pipe = Pipeline([
... iter([{'x': False}, {'a': False}]),
... setkey('x'),
... ])
>>> list(pipe.pull())
[{'x': True}, {'a': False, 'x': True}]
"""
def coro(*args):
task = None
while True:
task = yield task
func(*(args + (task,)))
return coro
def _allmsgs(obj): def _allmsgs(obj):
"""Returns a list of all the messages encapsulated in obj. If obj """Returns a list of all the messages encapsulated in obj. If obj
is a MultiMessage, returns its enclosed messages. If obj is BUBBLE, is a MultiMessage, returns its enclosed messages. If obj is BUBBLE,
@ -141,6 +196,7 @@ def _allmsgs(obj):
else: else:
return [obj] return [obj]
class PipelineThread(Thread): class PipelineThread(Thread):
"""Abstract base class for pipeline-stage threads.""" """Abstract base class for pipeline-stage threads."""
def __init__(self, all_threads): def __init__(self, all_threads):
@ -169,6 +225,7 @@ class PipelineThread(Thread):
for thread in self.all_threads: for thread in self.all_threads:
thread.abort() thread.abort()
class FirstPipelineThread(PipelineThread): class FirstPipelineThread(PipelineThread):
"""The thread running the first stage in a parallel pipeline setup. """The thread running the first stage in a parallel pipeline setup.
The coroutine should just be a generator. The coroutine should just be a generator.
@ -191,7 +248,7 @@ class FirstPipelineThread(PipelineThread):
# Get the value from the generator. # Get the value from the generator.
try: try:
msg = self.coro.next() msg = next(self.coro)
except StopIteration: except StopIteration:
break break
@ -209,6 +266,7 @@ class FirstPipelineThread(PipelineThread):
# Generator finished; shut down the pipeline. # Generator finished; shut down the pipeline.
self.out_queue.release() self.out_queue.release()
class MiddlePipelineThread(PipelineThread): class MiddlePipelineThread(PipelineThread):
"""A thread running any stage in the pipeline except the first or """A thread running any stage in the pipeline except the first or
last. last.
@ -223,7 +281,7 @@ class MiddlePipelineThread(PipelineThread):
def run(self): def run(self):
try: try:
# Prime the coroutine. # Prime the coroutine.
self.coro.next() next(self.coro)
while True: while True:
with self.abort_lock: with self.abort_lock:
@ -256,6 +314,7 @@ class MiddlePipelineThread(PipelineThread):
# Pipeline is shutting down normally. # Pipeline is shutting down normally.
self.out_queue.release() self.out_queue.release()
class LastPipelineThread(PipelineThread): class LastPipelineThread(PipelineThread):
"""A thread running the last stage in a pipeline. The coroutine """A thread running the last stage in a pipeline. The coroutine
should yield nothing. should yield nothing.
@ -267,7 +326,7 @@ class LastPipelineThread(PipelineThread):
def run(self): def run(self):
# Prime the coroutine. # Prime the coroutine.
self.coro.next() next(self.coro)
try: try:
while True: while True:
@ -291,6 +350,7 @@ class LastPipelineThread(PipelineThread):
self.abort_all(sys.exc_info()) self.abort_all(sys.exc_info())
return return
class Pipeline(object): class Pipeline(object):
"""Represents a staged pattern of work. Each stage in the pipeline """Represents a staged pattern of work. Each stage in the pipeline
is a coroutine that receives messages from the previous stage and is a coroutine that receives messages from the previous stage and
@ -301,7 +361,7 @@ class Pipeline(object):
be at least two stages. be at least two stages.
""" """
if len(stages) < 2: if len(stages) < 2:
raise ValueError('pipeline must have at least two stages') raise ValueError(u'pipeline must have at least two stages')
self.stages = [] self.stages = []
for stage in stages: for stage in stages:
if isinstance(stage, (list, tuple)): if isinstance(stage, (list, tuple)):
@ -322,7 +382,8 @@ class Pipeline(object):
messages between the stages are stored in queues of the given messages between the stages are stored in queues of the given
size. size.
""" """
queues = [CountedQueue(queue_size) for i in range(len(self.stages)-1)] queue_count = len(self.stages) - 1
queues = [CountedQueue(queue_size) for i in range(queue_count)]
threads = [] threads = []
# Set up first stage. # Set up first stage.
@ -330,10 +391,10 @@ class Pipeline(object):
threads.append(FirstPipelineThread(coro, queues[0], threads)) threads.append(FirstPipelineThread(coro, queues[0], threads))
# Middle stages. # Middle stages.
for i in range(1, len(self.stages)-1): for i in range(1, queue_count):
for coro in self.stages[i]: for coro in self.stages[i]:
threads.append(MiddlePipelineThread( threads.append(MiddlePipelineThread(
coro, queues[i-1], queues[i], threads coro, queues[i - 1], queues[i], threads
)) ))
# Last stage. # Last stage.
@ -383,7 +444,7 @@ class Pipeline(object):
# "Prime" the coroutines. # "Prime" the coroutines.
for coro in coros[1:]: for coro in coros[1:]:
coro.next() next(coro)
# Begin the pipeline. # Begin the pipeline.
for out in coros[0]: for out in coros[0]:
@ -405,20 +466,23 @@ if __name__ == '__main__':
# in parallel. # in parallel.
def produce(): def produce():
for i in range(5): for i in range(5):
print('generating %i' % i) print(u'generating %i' % i)
time.sleep(1) time.sleep(1)
yield i yield i
def work(): def work():
num = yield num = yield
while True: while True:
print('processing %i' % num) print(u'processing %i' % num)
time.sleep(2) time.sleep(2)
num = yield num*2 num = yield num * 2
def consume(): def consume():
while True: while True:
num = yield num = yield
time.sleep(1) time.sleep(1)
print('received %i' % num) print(u'received %i' % num)
ts_start = time.time() ts_start = time.time()
Pipeline([produce(), work(), consume()]).run_sequential() Pipeline([produce(), work(), consume()]).run_sequential()
ts_seq = time.time() ts_seq = time.time()
@ -426,29 +490,30 @@ if __name__ == '__main__':
ts_par = time.time() ts_par = time.time()
Pipeline([produce(), (work(), work()), consume()]).run_parallel() Pipeline([produce(), (work(), work()), consume()]).run_parallel()
ts_end = time.time() ts_end = time.time()
print('Sequential time:', ts_seq - ts_start) print(u'Sequential time:', ts_seq - ts_start)
print('Parallel time:', ts_par - ts_seq) print(u'Parallel time:', ts_par - ts_seq)
print('Multiply-parallel time:', ts_end - ts_par) print(u'Multiply-parallel time:', ts_end - ts_par)
print() print()
# Test a pipeline that raises an exception. # Test a pipeline that raises an exception.
def exc_produce(): def exc_produce():
for i in range(10): for i in range(10):
print('generating %i' % i) print(u'generating %i' % i)
time.sleep(1) time.sleep(1)
yield i yield i
def exc_work(): def exc_work():
num = yield num = yield
while True: while True:
print('processing %i' % num) print(u'processing %i' % num)
time.sleep(3) time.sleep(3)
if num == 3: if num == 3:
raise Exception() raise Exception()
num = yield num * 2 num = yield num * 2
def exc_consume(): def exc_consume():
while True: while True:
num = yield num = yield
#if num == 4: print(u'received %i' % num)
# raise Exception()
print('received %i' % num)
Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1) Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1)

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2013, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -15,11 +16,14 @@
"""A simple utility for constructing filesystem-like trees from beets """A simple utility for constructing filesystem-like trees from beets
libraries. libraries.
""" """
from __future__ import division, absolute_import, print_function
from collections import namedtuple from collections import namedtuple
from beets import util from beets import util
Node = namedtuple('Node', ['files', 'dirs']) Node = namedtuple('Node', ['files', 'dirs'])
def _insert(node, path, itemid): def _insert(node, path, itemid):
"""Insert an item into a virtual filesystem node.""" """Insert an item into a virtual filesystem node."""
if len(path) == 1: if len(path) == 1:
@ -33,6 +37,7 @@ def _insert(node, path, itemid):
node.dirs[dirname] = Node({}, {}) node.dirs[dirname] = Node({}, {})
_insert(node.dirs[dirname], rest, itemid) _insert(node.dirs[dirname], rest, itemid)
def libtree(lib): def libtree(lib):
"""Generates a filesystem-like directory tree for the files """Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named contained in `lib`. Filesystem nodes are (files, dirs) named

View file

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A namespace package for beets plugins."""
from __future__ import division, absolute_import, print_function
# Make this a namespace package.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View file

@ -0,0 +1,165 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015-2016, Ohm Patel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch various AcousticBrainz metadata using MBID.
"""
from __future__ import division, absolute_import, print_function
import requests
import operator
from beets import plugins, ui
from functools import reduce
ACOUSTIC_BASE = "https://acousticbrainz.org/"
LEVELS = ["/low-level", "/high-level"]
class AcousticPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcousticPlugin, self).__init__()
self.config.add({'auto': True})
if self.config['auto']:
self.register_listener('import_task_files',
self.import_task_files)
def commands(self):
cmd = ui.Subcommand('acousticbrainz',
help=u"fetch metadata from AcousticBrainz")
def func(lib, opts, args):
items = lib.items(ui.decargs(args))
fetch_info(self._log, items, ui.should_write())
cmd.func = func
return [cmd]
def import_task_files(self, session, task):
"""Function is called upon beet import.
"""
items = task.imported_items()
fetch_info(self._log, items, False)
def fetch_info(log, items, write):
"""Get data from AcousticBrainz for the items.
"""
def get_value(*map_path):
try:
return reduce(operator.getitem, map_path, data)
except KeyError:
log.debug(u'Invalid Path: {}', map_path)
for item in items:
if item.mb_trackid:
log.info(u'getting data for: {}', item)
# Fetch the data from the AB API.
urls = [generate_url(item.mb_trackid, path) for path in LEVELS]
log.debug(u'fetching URLs: {}', ' '.join(urls))
try:
res = [requests.get(url) for url in urls]
except requests.RequestException as exc:
log.info(u'request error: {}', exc)
continue
# Check for missing tracks.
if any(r.status_code == 404 for r in res):
log.info(u'recording ID {} not found', item.mb_trackid)
continue
# Parse the JSON response.
try:
data = res[0].json()
data.update(res[1].json())
except ValueError:
log.debug(u'Invalid Response: {} & {}', [r.text for r in res])
# Get each field and assign it on the item.
item.danceable = get_value(
"highlevel", "danceability", "all", "danceable",
)
item.gender = get_value(
"highlevel", "gender", "value",
)
item.genre_rosamerica = get_value(
"highlevel", "genre_rosamerica", "value"
)
item.mood_acoustic = get_value(
"highlevel", "mood_acoustic", "all", "acoustic"
)
item.mood_aggressive = get_value(
"highlevel", "mood_aggressive", "all", "aggressive"
)
item.mood_electronic = get_value(
"highlevel", "mood_electronic", "all", "electronic"
)
item.mood_happy = get_value(
"highlevel", "mood_happy", "all", "happy"
)
item.mood_party = get_value(
"highlevel", "mood_party", "all", "party"
)
item.mood_relaxed = get_value(
"highlevel", "mood_relaxed", "all", "relaxed"
)
item.mood_sad = get_value(
"highlevel", "mood_sad", "all", "sad"
)
item.rhythm = get_value(
"highlevel", "ismir04_rhythm", "value"
)
item.tonal = get_value(
"highlevel", "tonal_atonal", "all", "tonal"
)
item.voice_instrumental = get_value(
"highlevel", "voice_instrumental", "value"
)
item.average_loudness = get_value(
"lowlevel", "average_loudness"
)
item.chords_changes_rate = get_value(
"tonal", "chords_changes_rate"
)
item.chords_key = get_value(
"tonal", "chords_key"
)
item.chords_number_rate = get_value(
"tonal", "chords_number_rate"
)
item.chords_scale = get_value(
"tonal", "chords_scale"
)
item.initial_key = '{} {}'.format(
get_value("tonal", "key_key"),
get_value("tonal", "key_scale")
)
item.key_strength = get_value(
"tonal", "key_strength"
)
# Store the data.
item.store()
if write:
item.try_write()
def generate_url(mbid, level):
"""Generates AcousticBrainz end point url for given MBID.
"""
return ACOUSTIC_BASE + mbid + level

120
libs/beetsplug/badfiles.py Normal file
View file

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, confit
from beets import ui
from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT
import shlex
import os
import errno
import sys
class BadFiles(BeetsPlugin):
def run_command(self, cmd):
self._log.debug(u"running command: {}",
displayable_path(list2cmdline(cmd)))
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
if e.errno == errno.ENOENT:
ui.print_(u"command not found: {}".format(cmd[0]))
sys.exit(1)
else:
raise
output = output.decode(sys.getfilesystemencoding())
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config['commands'].get(dict).get(ext)
except confit.NotFoundError:
command = None
if command:
return self.check_custom(command)
elif ext == "mp3":
return self.check_mp3val
elif ext == "flac":
return self.check_flac
def check_bad(self, lib, opts, args):
for item in lib.items(ui.decargs(args)):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug(u"checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_(u"{}: file does not exist".format(
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:]
checker = self.get_checker(ext)
if not checker:
continue
path = item.path
if not isinstance(path, unicode):
path = item.path.decode(sys.getfilesystemencoding())
status, errors, output = checker(path)
if status > 0:
ui.print_(u"{}: checker exited withs status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
ui.print_(" {}".format(displayable_path(line)))
elif errors > 0:
ui.print_(u"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
ui.print_(u" {}".format(displayable_path(line)))
else:
ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath)))
def commands(self):
bad_command = Subcommand('bad',
help=u'check for corrupt or missing files')
bad_command.func = self.check_bad
return [bad_command]

109
libs/beetsplug/bench.py Normal file
View file

@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some simple performance benchmarks for beets.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
from beets import vfs
from beets import library
from beets.util.functemplate import Template
from beets.autotag import match
from beets import plugins
from beets import importer
import cProfile
import timeit
def aunique_benchmark(lib, prof):
def _build_tree():
vfs.libtree(lib)
# Measure path generation performance with %aunique{} included.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%aunique{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('With %aunique:', interval)
# And with %aunique replaceed with a "cheap" no-op function.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%lower{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withoutaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('Without %aunique:', interval)
def match_benchmark(lib, prof, query=None, album_id=None):
# If no album ID is provided, we'll match against a suitably huge
# album.
if not album_id:
album_id = '9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc'
# Get an album from the library to use as the source for the match.
items = lib.albums(query).get().items()
# Ensure fingerprinting is invoked (if enabled).
plugins.send('import_task_start',
task=importer.ImportTask(None, None, items),
session=importer.ImportSession(lib, None, None, None))
# Run the match.
def _run_match():
match.tag_album(items, search_ids=[album_id])
if prof:
cProfile.runctx('_run_match()', {}, {'_run_match': _run_match},
'match.prof')
else:
interval = timeit.timeit(_run_match, number=1)
print('match duration:', interval)
class BenchmarkPlugin(BeetsPlugin):
"""A plugin for performing some simple performance benchmarks.
"""
def commands(self):
aunique_bench_cmd = ui.Subcommand('bench_aunique',
help='benchmark for %aunique{}')
aunique_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
aunique_bench_cmd.func = lambda lib, opts, args: \
aunique_benchmark(lib, opts.profile)
match_bench_cmd = ui.Subcommand('bench_match',
help='benchmark for track matching')
match_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
match_bench_cmd.parser.add_option('-i', '--id', default=None,
help='album ID to match against')
match_bench_cmd.func = lambda lib, opts, args: \
match_benchmark(lib, opts.profile, ui.decargs(args), opts.id)
return [aunique_bench_cmd, match_bench_cmd]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,223 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
from __future__ import division, absolute_import, print_function
import sys
import time
import gobject
import thread
import os
import copy
import urllib
import pygst
pygst.require('0.10')
import gst # noqa
class GstPlayer(object):
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# http://pygstdocs.berlios.de/pygst-tutorial/playbin.html
self.player = gst.element_factory_make("playbin2", "player")
fakesink = gst.element_factory_make("fakesink", "fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state()[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == gst.MESSAGE_EOS:
# file finished playing
self.player.set_state(gst.STATE_NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == gst.MESSAGE_ERROR:
# error
self.player.set_state(gst.STATE_NULL)
err, debug = message.parse_error()
print(u"Error: {0}".format(err))
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(gst.STATE_NULL)
if isinstance(path, unicode):
path = path.encode('utf8')
uri = 'file://' + urllib.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == gst.STATE_PAUSED:
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(gst.STATE_PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(gst.STATE_NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
gobject.threads_init()
def start():
loop = gobject.MainLoop()
loop.run()
thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = gst.Format(gst.FORMAT_TIME)
try:
pos = self.player.query_position(fmt, None)[0] / (10 ** 9)
length = self.player.query_duration(fmt, None)[0] / (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except gst.QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = gst.Format(gst.FORMAT_TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p))
for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)

87
libs/beetsplug/bpm.py Normal file
View file

@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, aroquen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Determine BPM by pressing a key to the rhythm."""
from __future__ import division, absolute_import, print_function
import time
from beets import ui
from beets.plugins import BeetsPlugin
def bpm(max_strokes):
"""Returns average BPM (possibly of a playing song)
listening to Enter keystrokes.
"""
t0 = None
dt = []
for i in range(max_strokes):
# Press enter to the rhythm...
s = raw_input()
if s == '':
t1 = time.time()
# Only start measuring at the second stroke
if t0:
dt.append(t1 - t0)
t0 = t1
else:
break
# Return average BPM
# bpm = (max_strokes-1) / sum(dt) * 60
ave = sum([1.0 / dti * 60 for dti in dt]) / len(dt)
return ave
class BPMPlugin(BeetsPlugin):
def __init__(self):
super(BPMPlugin, self).__init__()
self.config.add({
u'max_strokes': 3,
u'overwrite': True,
})
def commands(self):
cmd = ui.Subcommand('bpm',
help=u'determine bpm of a song by pressing '
u'a key to the rhythm')
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
self.get_bpm(lib.items(ui.decargs(args)))
def get_bpm(self, items, write=False):
overwrite = self.config['overwrite'].get(bool)
if len(items) > 1:
raise ValueError(u'Can only get bpm of one song at time')
item = items[0]
if item['bpm']:
self._log.info(u'Found bpm {0}', item['bpm'])
if not overwrite:
return
self._log.info(u'Press Enter {0} times to the rhythm or Ctrl-D '
u'to exit', self.config['max_strokes'].get(int))
new_bpm = bpm(self.config['max_strokes'].get(int))
item['bpm'] = int(new_bpm)
if write:
item.try_write()
item.store()
self._log.info(u'Added new bpm {0}', item['bpm'])

243
libs/beetsplug/bucket.py Normal file
View file

@ -0,0 +1,243 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the %bucket{} function for path formatting.
"""
from __future__ import division, absolute_import, print_function
from datetime import datetime
import re
import string
from itertools import tee, izip
from beets import plugins, ui
ASCII_DIGITS = string.digits + string.ascii_lowercase
class BucketError(Exception):
pass
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def span_from_str(span_str):
"""Build a span dict from the span string representation.
"""
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year
"""
if yearfrom < 100:
raise BucketError(u"%d must be expressed on 4 digits" % yearfrom)
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
if d < 100:
if (d % 100) < (yearfrom % 100):
d = (yearfrom - yearfrom % 100) + 100 + d
else:
d = (yearfrom - yearfrom % 100) + d
return d
years = [int(x) for x in re.findall('\d+', span_str)]
if not years:
raise ui.UserError(u"invalid range defined for year bucket '%s': no "
u"year found" % span_str)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError(u"invalid range defined for year bucket '%s': %s" %
(span_str, exc))
res = {'from': years[0], 'str': span_str}
if len(years) > 1:
res['to'] = years[-1]
return res
def complete_year_spans(spans):
"""Set the `to` value of spans if empty and sort them chronologically.
"""
spans.sort(key=lambda x: x['from'])
for (x, y) in pairwise(spans):
if 'to' not in x:
x['to'] = y['from'] - 1
if spans and 'to' not in spans[-1]:
spans[-1]['to'] = datetime.now().year
def extend_year_spans(spans, spanlen, start=1900, end=2014):
"""Add new spans to given spans list so that every year of [start,end]
belongs to a span.
"""
extended_spans = spans[:]
for (x, y) in pairwise(spans):
# if a gap between two spans, fill the gap with as much spans of
# spanlen length as necessary
for span_from in range(x['to'] + 1, y['from'], spanlen):
extended_spans.append({'from': span_from})
# Create spans prior to declared ones
for span_from in range(spans[0]['from'] - spanlen, start, -spanlen):
extended_spans.append({'from': span_from})
# Create spans after the declared ones
for span_from in range(spans[-1]['to'] + 1, end, spanlen):
extended_spans.append({'from': span_from})
complete_year_spans(extended_spans)
return extended_spans
def build_year_spans(year_spans_str):
"""Build a chronologically ordered list of spans dict from unordered spans
stringlist.
"""
spans = []
for elem in year_spans_str:
spans.append(span_from_str(elem))
complete_year_spans(spans)
return spans
def str2fmt(s):
"""Deduces formatting syntax from a span string.
"""
regex = re.compile(r"(?P<bef>\D*)(?P<fromyear>\d+)(?P<sep>\D*)"
r"(?P<toyear>\d*)(?P<after>\D*)")
m = re.match(regex, s)
res = {'fromnchars': len(m.group('fromyear')),
'tonchars': len(m.group('toyear'))}
res['fmt'] = "%s%%s%s%s%s" % (m.group('bef'),
m.group('sep'),
'%s' if res['tonchars'] else '',
m.group('after'))
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation.
"""
args = (bytes(yearfrom)[-fromnchars:])
if tonchars:
args = (bytes(yearfrom)[-fromnchars:], bytes(yearto)[-tonchars:])
return fmt % args
def extract_modes(spans):
"""Extract the most common spans lengths and representation formats
"""
rangelen = sorted([x['to'] - x['from'] + 1 for x in spans])
deflen = sorted(rangelen, key=rangelen.count)[-1]
reprs = [str2fmt(x['str']) for x in spans]
deffmt = sorted(reprs, key=reprs.count)[-1]
return deflen, deffmt
def build_alpha_spans(alpha_spans_str, alpha_regexs):
"""Extract alphanumerics from string and return sorted list of chars
[from...to]
"""
spans = []
for elem in alpha_spans_str:
if elem in alpha_regexs:
spans.append(re.compile(alpha_regexs[elem]))
else:
bucket = sorted([x for x in elem.lower() if x.isalnum()])
if bucket:
begin_index = ASCII_DIGITS.index(bucket[0])
end_index = ASCII_DIGITS.index(bucket[-1])
else:
raise ui.UserError(u"invalid range defined for alpha bucket "
u"'%s': no alphanumeric character found" %
elem)
spans.append(
re.compile(
"^[" + ASCII_DIGITS[begin_index:end_index + 1] +
ASCII_DIGITS[begin_index:end_index + 1].upper() + "]"
)
)
return spans
class BucketPlugin(plugins.BeetsPlugin):
def __init__(self):
super(BucketPlugin, self).__init__()
self.template_funcs['bucket'] = self._tmpl_bucket
self.config.add({
'bucket_year': [],
'bucket_alpha': [],
'bucket_alpha_regex': {},
'extrapolate': False
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
self.year_spans = build_year_spans(self.config['bucket_year'].get())
if self.year_spans and self.config['extrapolate']:
[self.ys_len_mode,
self.ys_repr_mode] = extract_modes(self.year_spans)
self.year_spans = extend_year_spans(self.year_spans,
self.ys_len_mode)
self.alpha_spans = build_alpha_spans(
self.config['bucket_alpha'].get(),
self.config['bucket_alpha_regex'].get()
)
def find_bucket_year(self, year):
"""Return bucket that matches given year or return the year
if no matching bucket.
"""
for ys in self.year_spans:
if ys['from'] <= int(year) <= ys['to']:
if 'str' in ys:
return ys['str']
else:
return format_span(self.ys_repr_mode['fmt'],
ys['from'], ys['to'],
self.ys_repr_mode['fromnchars'],
self.ys_repr_mode['tonchars'])
return year
def find_bucket_alpha(self, s):
"""Return alpha-range bucket that matches given string or return the
string initial if no matching bucket.
"""
for (i, span) in enumerate(self.alpha_spans):
if span.match(s):
return self.config['bucket_alpha'].get()[i]
return s[0].upper()
def _tmpl_bucket(self, text, field=None):
if not field and len(text) == 4 and text.isdigit():
field = 'year'
if field == 'year':
func = self.find_bucket_year
else:
func = self.find_bucket_alpha
return func(text)

308
libs/beetsplug/chroma.py Normal file
View file

@ -0,0 +1,308 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Chromaprint/Acoustid acoustic fingerprinting support to the
autotagger. Requires the pyacoustid library.
"""
from __future__ import division, absolute_import, print_function
from beets import plugins
from beets import ui
from beets import util
from beets import config
from beets.util import confit
from beets.autotag import hooks
import acoustid
from collections import defaultdict
API_KEY = '1vOwZtEn'
SCORE_THRESH = 0.5
TRACK_ID_WEIGHT = 10.0
COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common?
MAX_RECORDINGS = 5
MAX_RELEASES = 5
# Stores the Acoustid match information for each track. This is
# populated when an import task begins and then used when searching for
# candidates. It maps audio file paths to (recording_ids, release_ids)
# pairs. If a given path is not present in the mapping, then no match
# was found.
_matches = {}
# Stores the fingerprint and Acoustid ID for each track. This is stored
# as metadata for each track for later use but is not relevant for
# autotagging.
_fingerprints = {}
_acoustids = {}
def prefix(it, count):
"""Truncate an iterable to at most `count` items.
"""
for i, v in enumerate(it):
if i >= count:
break
yield v
def acoustid_match(log, path):
"""Gets metadata for a file from Acoustid and populates the
_matches, _fingerprints, and _acoustids dictionaries accordingly.
"""
try:
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
log.error(u'fingerprinting of {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
_fingerprints[path] = fp
try:
res = acoustid.lookup(API_KEY, fp, duration,
meta='recordings releases')
except acoustid.AcoustidError as exc:
log.debug(u'fingerprint matching {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
log.debug(u'chroma: fingerprinted {0}',
util.displayable_path(repr(path)))
# Ensure the response is usable and parse it.
if res['status'] != 'ok' or not res.get('results'):
log.debug(u'no match found')
return None
result = res['results'][0] # Best match.
if result['score'] < SCORE_THRESH:
log.debug(u'no results above threshold')
return None
_acoustids[path] = result['id']
# Get recording and releases from the result.
if not result.get('recordings'):
log.debug(u'no recordings found')
return None
recording_ids = []
release_ids = []
for recording in result['recordings']:
recording_ids.append(recording['id'])
if 'releases' in recording:
release_ids += [rel['id'] for rel in recording['releases']]
log.debug(u'matched recordings {0} on releases {1}',
recording_ids, release_ids)
_matches[path] = recording_ids, release_ids
# Plugin structure and autotagging logic.
def _all_releases(items):
"""Given an iterable of Items, determines (according to Acoustid)
which releases the items have in common. Generates release IDs.
"""
# Count the number of "hits" for each release.
relcounts = defaultdict(int)
for item in items:
if item.path not in _matches:
continue
_, release_ids = _matches[item.path]
for release_id in release_ids:
relcounts[release_id] += 1
for release_id, count in relcounts.iteritems():
if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id
class AcoustidPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcoustidPlugin, self).__init__()
self.config.add({
'auto': True,
})
config['acoustid']['apikey'].redact = True
if self.config['auto']:
self.register_listener('import_task_start', self.fingerprint_task)
self.register_listener('import_task_apply', apply_acoustid_metadata)
def fingerprint_task(self, task, session):
return fingerprint_task(self._log, task, session)
def track_distance(self, item, info):
dist = hooks.Distance()
if item.path not in _matches or not info.track_id:
# Match failed or no track ID.
return dist
recording_ids, _ = _matches[item.path]
dist.add_expr('track_id', info.track_id not in recording_ids)
return dist
def candidates(self, items, artist, album, va_likely):
albums = []
for relid in prefix(_all_releases(items), MAX_RELEASES):
album = hooks.album_for_mbid(relid)
if album:
albums.append(album)
self._log.debug(u'acoustid album candidates: {0}', len(albums))
return albums
def item_candidates(self, item, artist, title):
if item.path not in _matches:
return []
recording_ids, _ = _matches[item.path]
tracks = []
for recording_id in prefix(recording_ids, MAX_RECORDINGS):
track = hooks.track_for_mbid(recording_id)
if track:
tracks.append(track)
self._log.debug(u'acoustid item candidates: {0}', len(tracks))
return tracks
def commands(self):
submit_cmd = ui.Subcommand('submit',
help=u'submit Acoustid fingerprints')
def submit_cmd_func(lib, opts, args):
try:
apikey = config['acoustid']['apikey'].get(unicode)
except confit.NotFoundError:
raise ui.UserError(u'no Acoustid user API key provided')
submit_items(self._log, apikey, lib.items(ui.decargs(args)))
submit_cmd.func = submit_cmd_func
fingerprint_cmd = ui.Subcommand(
'fingerprint',
help=u'generate fingerprints for items without them'
)
def fingerprint_cmd_func(lib, opts, args):
for item in lib.items(ui.decargs(args)):
fingerprint_item(self._log, item, write=ui.should_write())
fingerprint_cmd.func = fingerprint_cmd_func
return [submit_cmd, fingerprint_cmd]
# Hooks into import process.
def fingerprint_task(log, task, session):
"""Fingerprint each item in the task for later use during the
autotagging candidate search.
"""
items = task.items if task.is_album else [task.item]
for item in items:
acoustid_match(log, item.path)
def apply_acoustid_metadata(task, session):
"""Apply Acoustid metadata (fingerprint and ID) to the task's items.
"""
for item in task.imported_items():
if item.path in _fingerprints:
item.acoustid_fingerprint = _fingerprints[item.path]
if item.path in _acoustids:
item.acoustid_id = _acoustids[item.path]
# UI commands.
def submit_items(log, userkey, items, chunksize=64):
"""Submit fingerprints for the items to the Acoustid server.
"""
data = [] # The running list of dictionaries to submit.
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
log.info(u'submitting {0} fingerprints', len(data))
try:
acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc:
log.warn(u'acoustid submission error: {0}', exc)
del data[:]
for item in items:
fp = fingerprint_item(log, item)
# Construct a submission dictionary for this item.
item_data = {
'duration': int(item.length),
'fingerprint': fp,
}
if item.mb_trackid:
item_data['mbid'] = item.mb_trackid
log.debug(u'submitting MBID')
else:
item_data.update({
'track': item.title,
'artist': item.artist,
'album': item.album,
'albumartist': item.albumartist,
'year': item.year,
'trackno': item.track,
'discno': item.disc,
})
log.debug(u'submitting textual metadata')
data.append(item_data)
# If we have enough data, submit a chunk.
if len(data) >= chunksize:
submit_chunk()
# Submit remaining data in a final chunk.
if data:
submit_chunk()
def fingerprint_item(log, item, write=False):
"""Get the fingerprint for an Item. If the item already has a
fingerprint, it is not regenerated. If fingerprint generation fails,
return None. If the items are associated with a library, they are
saved to the database. If `write` is set, then the new fingerprints
are also written to files' metadata.
"""
# Get a fingerprint and length for this track.
if not item.length:
log.info(u'{0}: no duration available',
util.displayable_path(item.path))
elif item.acoustid_fingerprint:
if write:
log.info(u'{0}: fingerprint exists, skipping',
util.displayable_path(item.path))
else:
log.info(u'{0}: using existing fingerprint',
util.displayable_path(item.path))
return item.acoustid_fingerprint
else:
log.info(u'{0}: fingerprinting',
util.displayable_path(item.path))
try:
_, fp = acoustid.fingerprint_file(item.path)
item.acoustid_fingerprint = fp
if write:
log.info(u'{0}: writing fingerprint',
util.displayable_path(item.path))
item.try_write()
if item._db:
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
log.info(u'fingerprint generation failed: {0}', exc)

449
libs/beetsplug/convert.py Normal file
View file

@ -0,0 +1,449 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
from __future__ import division, absolute_import, print_function
import os
import threading
import subprocess
import tempfile
import shlex
from string import Template
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError
from beets import art
from beets.util.artresizer import ArtResizer
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
return os.path.splitext(path)[0] + b'.' + ext
def get_format(fmt=None):
"""Return the command template and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].get(unicode).lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info.get('extension', fmt)
except KeyError:
raise ui.UserError(
u'convert: format {0} needs the "command" field'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
extension = config['convert']['extension'].get(unicode)
return (command.encode('utf8'), extension.encode('utf8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'pretend': False,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest',
u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest',
u'opus':
u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
u'ogg':
u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest',
u'wma':
u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
u'max_bitrate': 500,
u'auto': False,
u'tmpdir': None,
u'quiet': False,
u'embed': True,
u'paths': {},
u'never_convert_lossy_files': False,
u'copy_album_art': False,
u'album_art_maxwidth': 0,
})
self.import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help=u'convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help=u'show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help=u'change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help=u'keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help=u'set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help=u'set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help=u'do not ask for confirmation')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
self.convert_on_import(config.lib, item)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
'source': source,
'dest': dest,
})
if pretend:
self._log.info(u' '.join(ui.decargs(args)))
return
try:
util.command_output(args)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}',
exc.cmd.decode('utf8', 'ignore'),
exc.returncode)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
u"convert: couldn't invoke '{0}': {1}".format(
u' '.join(ui.decargs(args)), exc
)
)
if not quiet and not pretend:
self._log.info(u'Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False):
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info(u'mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info(u'Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(original),
util.displayable_path(converted))
else:
# No transcoding necessary.
self._log.info(u'Copying {0}',
util.displayable_path(item.path))
util.copy(original, converted)
if pretend:
continue
# Write tags from the database to the converted file.
item.try_write(path=converted)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed']:
album = item.get_album()
if album and album.artpath:
self._log.debug(u'embedding album art from {}',
util.displayable_path(album.artpath))
art.embed_item(self._log, item, album.artpath,
itempath=converted)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False):
"""Copies or converts the associated cover art of the album. Album must
have at least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
# Decide whether we need to resize the cover-art image.
resize = False
maxwidth = None
if self.config['album_art_maxwidth']:
maxwidth = self.config['album_art_maxwidth'].get(int)
size = ArtResizer.shared.get_size(album.artpath)
self._log.debug('image size: {}', size)
if size:
resize = size[0] > maxwidth
else:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies).')
# Either copy or resize (while copying) the image.
if resize:
self._log.info(u'Resizing cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
if not pretend:
ArtResizer.shared.resize(maxwidth, album.artpath, dest)
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
else:
self._log.info(u'Copying cover art to {0}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
if not opts.dest:
opts.dest = self.config['dest'].get()
if not opts.dest:
raise ui.UserError(u'no convert destination set')
opts.dest = util.bytestring_path(opts.dest)
if not opts.threads:
opts.threads = self.config['threads'].get(int)
if self.config['paths']:
path_formats = ui.get_path_formats(self.config['paths'])
else:
path_formats = ui.get_path_formats()
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album:
albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items())
if self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, opts.dest, path_formats,
pretend)
else:
items = iter(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest,
opts.keep_new,
path_formats,
opts.format,
pretend)
for _ in range(opts.threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].get(unicode).lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
# Convert.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
# Change the newly-imported database entry to point to the
# converted file.
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)

57
libs/beetsplug/cue.py Normal file
View file

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Bruno Cauet
# Split an album-file in tracks thanks a cue file
from __future__ import division, absolute_import, print_function
import subprocess
from os import path
from glob import glob
from beets.util import command_output, displayable_path
from beets.plugins import BeetsPlugin
from beets.autotag import TrackInfo
class CuePlugin(BeetsPlugin):
def __init__(self):
super(CuePlugin, self).__init__()
# this does not seem supported by shnsplit
self.config.add({
'keep_before': .1,
'keep_after': .9,
})
# self.register_listener('import_task_start', self.look_for_cues)
def candidates(self, items, artist, album, va_likely):
import pdb
pdb.set_trace()
def item_candidates(self, item, artist, album):
dir = path.dirname(item.path)
cues = glob.glob(path.join(dir, "*.cue"))
if not cues:
return
if len(cues) > 1:
self._log.info(u"Found multiple cue files doing nothing: {0}",
map(displayable_path, cues))
cue_file = cues[0]
self._log.info("Found {} for {}", displayable_path(cue_file), item)
try:
# careful: will ask for input in case of conflicts
command_output(['shnsplit', '-f', cue_file, item.path])
except (subprocess.CalledProcessError, OSError):
self._log.exception(u'shnsplit execution failed')
return
tracks = glob(path.join(dir, "*.wav"))
self._log.info("Generated {0} tracks", len(tracks))
for t in tracks:
title = "dunno lol"
track_id = "wtf"
index = int(path.basename(t)[len("split-track"):-len(".wav")])
yield TrackInfo(title, track_id, index=index, artist=artist)
# generate TrackInfo instances

350
libs/beetsplug/discogs.py Normal file
View file

@ -0,0 +1,350 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
discogs-client library.
"""
from __future__ import division, absolute_import, print_function
import beets.ui
from beets import logging
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import beets
import re
import time
import json
import socket
import httplib
import os
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super(DiscogsPlugin, self).__init__()
self.config.add({
'apikey': 'rAzVUQYRaoFjeBjyWuWZ',
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].get(unicode)
c_secret = self.config['apisecret'].get(unicode)
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete toke file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'communication with Discogs failed')
beets.ui.print_(u"To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_(u"Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError(u'Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'Discogs token request failed')
# Save the token for later use.
self._log.debug(u'Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
dist = Distance()
if album_info.data_source == 'Discogs':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = '%s %s' % (artist, album)
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album search', exc_info=True)
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug(u'Searching for release {0}', album_id)
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
# displays the release ID on its webpage.
match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])',
album_id)
if not match:
return None
result = Release(self.discogs_client, {'id': int(match.group(2))})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album lookup', exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# TEMPORARY: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug(u"Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [self.get_album_info(release) for release in releases[:5]]
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various'
if va:
artist = config['va_name'].get(unicode)
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
if catalogno == 'none':
catalogno = None
country = result.data.get('country')
media = result.data['formats'][0]['name']
data_url = result.data['uri']
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
def get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of discogs album or track artists.
"""
artist_id = None
bits = []
for i, artist in enumerate(artists):
if not artist_id:
artist_id = artist['id']
name = artist['name']
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name)
bits.append(name)
if artist['join'] and i < len(artists) - 1:
bits.append(artist['join'])
artist = ' '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
tracks = []
index_tracks = {}
index = 0
for track in tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
tracks.append(self.get_track_info(track, index))
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count = 0, 0
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
)
if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium
# changes.
medium = track.medium
medium_count += 1
index_count = 0
index_count += 1
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position.
"""
# medium_index is a number at the end of position. medium is everything
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc.
match = re.match(r'^(.*?)(\d*)$', position.upper())
if match:
medium, index = match.groups()
else:
self._log.debug(u'Invalid position: {0}', position)
medium = index = None
return medium or None, index or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec

View file

@ -0,0 +1,337 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
from __future__ import division, absolute_import, print_function
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess
from beets.library import Item, Album
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super(DuplicatesPlugin, self).__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option(
u'-c', u'--count', dest='count',
action='store_true',
help=u'show duplicate counts',
)
self._command.parser.add_option(
u'-C', u'--checksum', dest='checksum',
action='store', metavar='PROG',
help=u'report duplicates based on arbitrary command',
)
self._command.parser.add_option(
u'-d', u'--delete', dest='delete',
action='store_true',
help=u'delete items from library and disk',
)
self._command.parser.add_option(
u'-F', u'--full', dest='full',
action='store_true',
help=u'show all versions of duplicate tracks or albums',
)
self._command.parser.add_option(
u'-s', u'--strict', dest='strict',
action='store_true',
help=u'report duplicates only if all attributes are set',
)
self._command.parser.add_option(
u'-k', u'--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
help=u'report duplicates based on keys',
)
self._command.parser.add_option(
u'-M', u'--merge', dest='merge',
action='store_true',
help=u'merge duplicate items',
)
self._command.parser.add_option(
u'-m', u'--move', dest='move',
action='store', metavar='DEST',
help=u'move items to dest',
)
self._command.parser.add_option(
u'-o', u'--copy', dest='copy',
action='store', metavar='DEST',
help=u'copy items to dest',
)
self._command.parser.add_option(
u'-t', u'--tag', dest='tag',
action='store',
help=u'tag matched items with \'k=v\' attribute',
)
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = self.config['copy'].get(str)
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].get(list)
merge = self.config['merge'].get(bool)
move = self.config['move'].get(str)
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
if path:
fmt = '$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, copy=True)
item.store()
if move:
item.move(basedir=move, copy=False)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except:
raise UserError(
u"{}: can't parse k=v tag: {}".format(PLUGIN, tag)
)
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=item.path) for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(u'key {0} on item {1} not cached:'
u'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
self._log.debug(u'computed checksum for {0} using {1}',
item.title, key)
except subprocess.CalledProcessError as e:
self._log.debug(u'failed to checksum {0}: {1}',
displayable_path(item.path), e)
else:
self._log.debug(u'key {0} on item {1} cached:'
u'not computing checksum',
key, displayable_path(item.path))
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
if strict and len(values) < len(keys):
self._log.debug(u'some keys {0} on item {1} are null or empty:'
u' skipping',
keys, displayable_path(obj.path))
elif (not strict and not len(values)):
self._log.debug(u'all keys {0} on item {1} are null or empty:'
u' skipping',
keys, displayable_path(obj.path))
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
if tiebreak:
kind = 'items' if all(isinstance(o, Item)
for o in objs) else 'albums'
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, unicode) else True)
fields = kind.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = Item.all_keys()
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ''):
value = getattr(o, f, None)
if value:
self._log.debug(u'key {0} on item {1} is null '
u'or empty: setting from item {2}',
f, displayable_path(objs[0].path),
displayable_path(o.path))
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug(u'item {0} missing from album {1}:'
u' merging from {2} into {3}',
missing,
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(copy=True)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).iteritems():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])

392
libs/beetsplug/edit.py Normal file
View file

@ -0,0 +1,392 @@
# This file is part of beets.
# Copyright 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from __future__ import division, absolute_import, print_function
from beets import plugins
from beets import util
from beets import ui
from beets.dbcore import types
from beets.importer import action
from beets.ui.commands import _do_query, PromptChoice
from copy import deepcopy
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import os
# These "safe" types can avoid the format/parse cycle that most fields go
# through: they are safe to edit with native YAML types.
SAFE_TYPES = (types.Float, types.Integer, types.Boolean)
class ParseError(Exception):
"""The modified file is unreadable. The user should be offered a chance to
fix the error.
"""
def edit(filename, log):
"""Open `filename` in a text editor.
"""
cmd = util.shlex_split(util.editor_command())
cmd.append(filename)
log.debug(u'invoking editor command: {!r}', cmd)
try:
subprocess.call(cmd)
except OSError as exc:
raise ui.UserError(u'could not run editor command {!r}: {}'.format(
cmd[0], exc
))
def dump(arg):
"""Dump a sequence of dictionaries as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a sequence of YAML documents back to a list of dictionaries
with string keys.
Can raise a `ParseError`.
"""
try:
out = []
for d in yaml.load_all(s):
if not isinstance(d, dict):
raise ParseError(
u'each entry must be a dictionary; found {}'.format(
type(d).__name__
)
)
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({unicode(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError(u'invalid YAML: {}'.format(e))
return out
def _safe_value(obj, key, value):
"""Check whether the `value` is safe to represent in YAML and trust as
returned from parsed YAML.
This ensures that values do not change their type when the user edits their
YAML representation.
"""
typ = obj._type(key)
return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
def flatten(obj, fields):
"""Represent `obj`, a `dbcore.Model` object, as a dictionary for
serialization. Only include the given `fields` if provided;
otherwise, include everything.
The resulting dictionary's keys are strings and the values are
safely YAML-serializable types.
"""
# Format each value.
d = {}
for key in obj.keys():
value = obj[key]
if _safe_value(obj, key, value):
# A safe value that is faithfully representable in YAML.
d[key] = value
else:
# A value that should be edited as a string.
d[key] = obj.formatted()[key]
# Possibly filter field names.
if fields:
return {k: v for k, v in d.items() if k in fields}
else:
return d
def apply_(obj, data):
"""Set the fields of a `dbcore.Model` object according to a
dictionary.
This is the opposite of `flatten`. The `data` dictionary should have
strings as values.
"""
for key, value in data.items():
if _safe_value(obj, key, value):
# A safe value *stayed* represented as a safe type. Assign it
# directly.
obj[key] = value
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, unicode(value))
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super(EditPlugin, self).__init__()
self.config.add({
# The default fields to edit.
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
# Silently ignore any changes to these fields.
'ignore_fields': 'id path',
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
edit_command = ui.Subcommand(
'edit',
help=u'interactively edit metadata'
)
edit_command.parser.add_option(
u'-f', u'--field',
metavar='FIELD',
action='append',
help=u'edit this field also',
)
edit_command.parser.add_option(
u'--all',
action='store_true', dest='all',
help=u'edit all fields',
)
edit_command.parser.add_album_option()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command.
"""
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_(u'Nothing to edit.')
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self._get_fields(opts.album, opts.field)
self.edit(opts.album, objs, fields)
def _get_fields(self, album, extra):
"""Get the set of fields to edit.
"""
# Start with the configured base fields.
if album:
fields = self.config['albumfields'].as_str_seq()
else:
fields = self.config['itemfields'].as_str_seq()
# Add the requested extra fields.
if extra:
fields += extra
# Ensure we always have the `id` field for identification.
fields.append('id')
return set(fields)
def edit(self, album, objs, fields):
"""The core editor function.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit (or None to edit
everything).
"""
# Present the YAML to the user and let her change it.
success = self.edit_objects(objs, fields)
# Save the new data.
if success:
self.save_changes(objs)
def edit_objects(self, objs, fields):
"""Dump a set of Model objects to a file as text, ask the user
to edit it, and apply any changes to the objects.
Return a boolean indicating whether the edit succeeded.
"""
# Get the content to edit as raw data structures.
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
old_str = dump(old_data)
new.write(old_str)
new.close()
# Loop until we have parseable data and the user confirms.
try:
while True:
# Ask the user to edit the data.
edit(new.name, self._log)
# Read the data back after editing and check whether anything
# changed.
with open(new.name) as f:
new_str = f.read()
if new_str == old_str:
ui.print_(u"No changes; aborting.")
return False
# Parse the updated data.
try:
new_data = load(new_str)
except ParseError as e:
ui.print_(u"Could not read data: {}".format(e))
if ui.input_yn(u"Edit again to fix? (Y/n)", True):
continue
else:
return False
# Show the changes.
# If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes.
objs_old = [deepcopy(obj) if not obj._db else None
for obj in objs]
self.apply_data(objs, old_data, new_data)
changed = False
for obj, obj_old in zip(objs, objs_old):
changed |= ui.show_model_changes(obj, obj_old)
if not changed:
ui.print_(u'No changes to apply.')
return False
# Confirm the changes.
choice = ui.input_options(
(u'continue Editing', u'apply', u'cancel')
)
if choice == u'a': # Apply.
return True
elif choice == u'c': # Cancel.
return False
elif choice == u'e': # Keep editing.
# Reset the temporary changes to the objects.
for obj in objs:
obj.read()
continue
# Remove the temporary file before returning.
finally:
os.remove(new.name)
def apply_data(self, objs, old_data, new_data):
"""Take potentially-updated data and apply it to a set of Model
objects.
The objects are not written back to the database, so the changes
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warn(u'number of objects changed from {} to {}',
len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs}
ignore_fields = self.config['ignore_fields'].as_str_seq()
for old_dict, new_dict in zip(old_data, new_data):
# Prohibit any changes to forbidden fields to avoid
# clobbering `id` and such by mistake.
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warn(u'ignoring object whose {} changed', key)
forbidden = True
break
if forbidden:
continue
id_ = int(old_dict['id'])
apply_(obj_by_id[id_], new_dict)
def save_changes(self, objs):
"""Save a list of updated Model objects to the database.
"""
# Save to the database and possibly write tags.
for ob in objs:
if ob._dirty:
self._log.debug(u'saving changes to {}', ob)
ob.try_sync(ui.should_write(), ui.should_move())
# Methods for interactive importer execution.
def before_choose_candidate_listener(self, session, task):
"""Append an "Edit" choice and an "edit Candidates" choice (if
there are candidates) to the interactive importer prompt.
"""
choices = [PromptChoice('d', 'eDit', self.importer_edit)]
if task.candidates:
choices.append(PromptChoice('c', 'edit Candidates',
self.importer_edit_candidate))
return choices
def importer_edit(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on the *original* item tags.
"""
# Assign temporary ids to the Items.
for i, obj in enumerate(task.items):
obj.id = i + 1
# Present the YAML to the user and let her change it.
fields = self._get_fields(album=False, extra=[])
success = self.edit_objects(task.items, fields)
# Remove temporary ids.
for obj in task.items:
obj.id = None
# Save the new data.
if success:
# Return action.RETAG, which makes the importer write the tags
# to the files if needed without re-applying metadata.
return action.RETAG
else:
# Edit cancelled / no edits made. Revert changes.
for obj in task.items:
obj.read()
def importer_edit_candidate(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on a *candidate*. The candidate's metadata is
applied to the original items.
"""
# Prompt the user for a candidate.
sel = ui.input_options([], numrange=(1, len(task.candidates)))
# Force applying the candidate on the items.
task.match = task.candidates[sel - 1]
task.apply_metadata()
return self.importer_edit(session, task)

154
libs/beetsplug/embedart.py Normal file
View file

@ -0,0 +1,154 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
from __future__ import division, absolute_import, print_function
import os.path
from beets.plugins import BeetsPlugin
from beets import ui
from beets.ui import decargs
from beets.util import syspath, normpath, displayable_path, bytestring_path
from beets.util.artresizer import ArtResizer
from beets import config
from beets import art
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
def __init__(self):
super(EmbedCoverArtPlugin, self).__init__()
self.config.add({
'maxwidth': 0,
'auto': True,
'compare_threshold': 0,
'ifempty': False,
'remove_art_file': False
})
if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
self.config['maxwidth'] = 0
self._log.warning(u"ImageMagick or PIL not found; "
u"'maxwidth' option ignored")
if self.config['compare_threshold'].get(int) and not \
ArtResizer.shared.can_compare:
self.config['compare_threshold'] = 0
self._log.warning(u"ImageMagick 6.8.7 or higher not installed; "
u"'compare_threshold' option ignored")
self.register_listener('art_set', self.process_album)
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand(
'embedart', help=u'embed image files into file metadata'
)
embed_cmd.parser.add_option(
u'-f', u'--file', metavar='PATH', help=u'the image file to embed'
)
maxwidth = self.config['maxwidth'].get(int)
compare_threshold = self.config['compare_threshold'].get(int)
ifempty = self.config['ifempty'].get(bool)
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
if not os.path.isfile(syspath(imagepath)):
raise ui.UserError(u'image file {0} not found'.format(
displayable_path(imagepath)
))
for item in lib.items(decargs(args)):
art.embed_item(self._log, item, imagepath, maxwidth, None,
compare_threshold, ifempty)
else:
for album in lib.albums(decargs(args)):
art.embed_album(self._log, album, maxwidth, False,
compare_threshold, ifempty)
self.remove_artfile(album)
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand(
'extractart',
help=u'extract an image from file metadata',
)
extract_cmd.parser.add_option(
u'-o', dest='outpath',
help=u'image output file',
)
extract_cmd.parser.add_option(
u'-n', dest='filename',
help=u'image filename to create for all matched albums',
)
extract_cmd.parser.add_option(
'-a', dest='associate', action='store_true',
help='associate the extracted images with the album',
)
def extract_func(lib, opts, args):
if opts.outpath:
art.extract_first(self._log, normpath(opts.outpath),
lib.items(decargs(args)))
else:
filename = bytestring_path(opts.filename or
config['art_filename'].get())
if os.path.dirname(filename) != '':
self._log.error(
u"Only specify a name rather than a path for -n")
return
for album in lib.albums(decargs(args)):
artpath = normpath(os.path.join(album.path, filename))
artpath = art.extract_first(self._log, artpath,
album.items())
if artpath and opts.associate:
album.set_art(artpath)
album.store()
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand(
'clearart',
help=u'remove images from file metadata',
)
def clear_func(lib, opts, args):
art.clear(self._log, lib, decargs(args))
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
def process_album(self, album):
"""Automatically embed art after art has been set
"""
if self.config['auto'] and ui.should_write():
max_width = self.config['maxwidth'].get(int)
art.embed_album(self._log, album, max_width, True,
self.config['compare_threshold'].get(int),
self.config['ifempty'].get(bool))
self.remove_artfile(album)
def remove_artfile(self, album):
"""Possibly delete the album art file for an album (if the
appropriate configuration option is enabled.
"""
if self.config['remove_art_file'] and album.artpath:
if os.path.isfile(album.artpath):
self._log.debug(u'Removing album art file for {0}', album)
os.remove(album.artpath)
album.artpath = None
album.store()

View file

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
"""Updates the Emby Library whenever the beets library is changed.
emby:
host: localhost
port: 8096
username: user
password: password
"""
from __future__ import division, absolute_import, print_function
from beets import config
from beets.plugins import BeetsPlugin
from urllib import urlencode
from urlparse import urljoin, parse_qs, urlsplit, urlunsplit
import hashlib
import requests
def api_url(host, port, endpoint):
"""Returns a joined url.
"""
joined = urljoin('http://{0}:{1}'.format(host, port), endpoint)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
query_params['format'] = ['json']
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def password_data(username, password):
"""Returns a dict with username and its encoded password.
"""
return {
'username': username,
'password': hashlib.sha1(password).hexdigest(),
'passwordMd5': hashlib.md5(password).hexdigest()
}
def create_headers(user_id, token=None):
"""Return header dict that is needed to talk to the Emby API.
"""
headers = {
'Authorization': 'MediaBrowser',
'UserId': user_id,
'Client': 'other',
'Device': 'empy',
'DeviceId': 'beets',
'Version': '0.0.0'
}
if token:
headers['X-MediaBrowser-Token'] = token
return headers
def get_token(host, port, headers, auth_data):
"""Return token for a user.
"""
url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data)
return r.json().get('AccessToken')
def get_user(host, port, username):
"""Return user dict from server or None if there is no user.
"""
url = api_url(host, port, '/Users/Public')
r = requests.get(url)
user = [i for i in r.json() if i['Name'] == username]
return user
class EmbyUpdate(BeetsPlugin):
def __init__(self):
super(EmbyUpdate, self).__init__()
# Adding defaults.
config['emby'].add({
u'host': u'localhost',
u'port': 8096
})
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end.
"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Emby.
"""
self._log.info(u'Updating Emby library...')
host = config['emby']['host'].get()
port = config['emby']['port'].get()
username = config['emby']['username'].get()
password = config['emby']['password'].get()
# Get user information from the Emby API.
user = get_user(host, port, username)
if not user:
self._log.warning(u'User {0} could not be found.'.format(username))
return
# Create Authentication data and headers.
auth_data = password_data(username, password)
headers = create_headers(user[0]['Id'])
# Get authentication token.
token = get_token(host, port, headers, auth_data)
if not token:
self._log.warning(
u'Could not get token for user {0}', username
)
return
# Recreate headers with a token.
headers = create_headers(user[0]['Id'], token=token)
# Trigger the Update.
url = api_url(host, port, '/Library/Refresh')
r = requests.post(url, headers=headers)
if r.status_code != 204:
self._log.warning(u'Update could not be triggered')
else:
self._log.info(u'Update triggered.')

151
libs/beetsplug/export.py Normal file
View file

@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Exports data from beets
"""
from __future__ import division, absolute_import, print_function
import sys
import json
import codecs
from datetime import datetime, date
from beets.plugins import BeetsPlugin
from beets import ui
from beets import mediafile
from beetsplug.info import make_key_filter, library_data, tag_data
class ExportEncoder(json.JSONEncoder):
"""Deals with dates because JSON doesn't have a standard"""
def default(self, o):
if isinstance(o, datetime) or isinstance(o, date):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class ExportPlugin(BeetsPlugin):
def __init__(self):
super(ExportPlugin, self).__init__()
self.config.add({
'default_format': 'json',
'json': {
# json module formatting options
'formatting': {
'ensure_ascii': False,
'indent': 4,
'separators': (',', ': '),
'sort_keys': True
}
},
# TODO: Use something like the edit plugin
# 'item_fields': []
})
def commands(self):
# TODO: Add option to use albums
cmd = ui.Subcommand('export', help=u'export data from beets')
cmd.func = self.run
cmd.parser.add_option(
u'-l', u'--library', action='store_true',
help=u'show library fields instead of tags',
)
cmd.parser.add_option(
u'--append', action='store_true', default=False,
help=u'if should append data to the file',
)
cmd.parser.add_option(
u'-i', u'--include-keys', default=[],
action='append', dest='included_keys',
help=u'comma separated list of keys to show',
)
cmd.parser.add_option(
u'-o', u'--output',
help=u'path for the output file. If not given, will print the data'
)
return [cmd]
def run(self, lib, opts, args):
file_path = opts.output
file_format = self.config['default_format'].get(str)
file_mode = 'a' if opts.append else 'w'
format_options = self.config[file_format]['formatting'].get(dict)
export_format = ExportFormat.factory(
file_format, **{
'file_path': file_path,
'file_mode': file_mode
}
)
items = []
data_collector = library_data if opts.library else tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(','))
key_filter = make_key_filter(included_keys)
for data_emitter in data_collector(lib, ui.decargs(args)):
try:
data, item = data_emitter()
except (mediafile.UnreadableFileError, IOError) as ex:
self._log.error(u'cannot read file: {0}', ex)
continue
data = key_filter(data)
items += [data]
export_format.export(items, **format_options)
class ExportFormat(object):
"""The output format type"""
@classmethod
def factory(cls, type, **kwargs):
if type == "json":
if kwargs['file_path']:
return JsonFileFormat(**kwargs)
else:
return JsonPrintFormat()
raise NotImplementedError()
def export(self, data, **kwargs):
raise NotImplementedError()
class JsonPrintFormat(ExportFormat):
"""Outputs to the console"""
def export(self, data, **kwargs):
json.dump(data, sys.stdout, cls=ExportEncoder, **kwargs)
class JsonFileFormat(ExportFormat):
"""Saves in a json file"""
def __init__(self, file_path, file_mode=u'w', encoding=u'utf-8'):
self.path = file_path
self.mode = file_mode
self.encoding = encoding
def export(self, data, **kwargs):
with codecs.open(self.path, self.mode, self.encoding) as f:
json.dump(data, f, cls=ExportEncoder, **kwargs)

861
libs/beetsplug/fetchart.py Normal file
View file

@ -0,0 +1,861 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import division, absolute_import, print_function
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.util.artresizer import ArtResizer
from beets.util import confit
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg', 'image/png')
DOWNLOAD_EXTENSION = '.jpg'
class Candidate(object):
"""Holds information about a matching artwork, deals with validation of
dimension restrictions and resizing.
"""
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
MATCH_EXACT = 0
MATCH_FALLBACK = 1
def __init__(self, log, path=None, url=None, source=u'',
match=None, size=None):
self._log = log
self.path = path
self.url = url
self.source = source
self.check = None
self.match = match
self.size = size
def _validate(self, extra):
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be resized.
"""
if not self.path:
return self.CANDIDATE_BAD
if not (extra['enforce_ratio'] or
extra['minwidth'] or
extra['maxwidth']):
return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
self._log.debug(u'image size: {}', self.size)
if not self.size:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies). '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return self.CANDIDATE_EXACT
short_edge = min(self.size)
long_edge = max(self.size)
# Check minimum size.
if extra['minwidth'] and self.size[0] < extra['minwidth']:
self._log.debug(u'image too small ({} < {})',
self.size[0], extra['minwidth'])
return self.CANDIDATE_BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if extra['enforce_ratio']:
if extra['margin_px']:
if edge_diff > extra['margin_px']:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, extra['margin_px'])
return self.CANDIDATE_BAD
elif extra['margin_percent']:
margin_px = extra['margin_percent'] * long_edge
if edge_diff > margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, margin_px)
return self.CANDIDATE_BAD
elif edge_diff:
# also reached for margin_px == 0 and margin_percent == 0.0
self._log.debug(u'image is not square ({} != {})',
self.size[0], self.size[1])
return self.CANDIDATE_BAD
# Check maximum size.
if extra['maxwidth'] and self.size[0] > extra['maxwidth']:
self._log.debug(u'image needs resizing ({} > {})',
self.size[0], extra['maxwidth'])
return self.CANDIDATE_DOWNSCALE
return self.CANDIDATE_EXACT
def validate(self, extra):
self.check = self._validate(extra)
return self.check
def resize(self, extra):
if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path)
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log, config):
self._log = log
self._config = config
def get(self, album, extra):
raise NotImplementedError()
def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, extra):
raise NotImplementedError()
class LocalArtSource(ArtSource):
IS_LOCAL = True
LOC_STR = u'local'
def fetch_image(self, candidate, extra):
pass
class RemoteArtSource(ArtSource):
IS_LOCAL = False
LOC_STR = u'remote'
def fetch_image(self, candidate, extra):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
if extra['maxwidth']:
candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'],
candidate.url)
try:
with closing(self.request(candidate.url, stream=True,
message=u'downloading image')) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug(
u'not a supported image: {}',
resp.headers.get('Content-Type') or u'no content type',
)
candidate.path = None
return
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION,
delete=False) as fh:
for chunk in resp.iter_content(chunk_size=1024):
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
candidate.path = fh.name
return
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug(u'error fetching art: {}', exc)
candidate.path = None
return
class CoverArtArchive(RemoteArtSource):
NAME = u"Cover Art Archive"
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album, extra):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield self._candidate(url=self.URL.format(mbid=album.mb_albumid),
match=Candidate.MATCH_EXACT)
if album.mb_releasegroupid:
yield self._candidate(
url=self.GROUP_URL.format(mbid=album.mb_releasegroupid),
match=Candidate.MATCH_FALLBACK)
class Amazon(RemoteArtSource):
NAME = u"Amazon"
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album, extra):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self._candidate(url=self.URL % (album.asin, index),
match=Candidate.MATCH_EXACT)
class AlbumArtOrg(RemoteArtSource):
NAME = u"AlbumArt.org scraper"
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album, extra):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug(u'scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug(u'error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT)
else:
self._log.debug(u'no image found on page')
class GoogleImages(RemoteArtSource):
NAME = u"Google Images"
URL = u'https://www.googleapis.com/customsearch/v1'
def __init__(self, *args, **kwargs):
super(GoogleImages, self).__init__(*args, **kwargs)
self.key = self._config['google_key'].get(),
self.cx = self._config['google_engine'].get(),
def get(self, album, extra):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = self.request(self.URL, params={
'key': self.key,
'cx': self.cx,
'q': search_string,
'searchType': 'image'
})
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug(u'google: error loading response: {}'
.format(response.text))
return
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google fetchart error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
yield self._candidate(url=item['link'],
match=Candidate.MATCH_EXACT)
class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API"""
NAME = u"fanart.tv"
API_URL = 'http://webservice.fanart.tv/v3/'
API_ALBUMS = API_URL + 'music/albums/'
PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e'
def __init__(self, *args, **kwargs):
super(FanartTV, self).__init__(*args, **kwargs)
self.client_key = self._config['fanarttv_key'].get()
def get(self, album, extra):
if not album.mb_releasegroupid:
return
response = self.request(
self.API_ALBUMS + album.mb_releasegroupid,
headers={'api-key': self.PROJECT_KEY,
'client-key': self.client_key})
try:
data = response.json()
except ValueError:
self._log.debug(u'fanart.tv: error loading response: {}',
response.text)
return
if u'status' in data and data[u'status'] == u'error':
if u'not found' in data[u'error message'].lower():
self._log.debug(u'fanart.tv: no image found')
elif u'api key' in data[u'error message'].lower():
self._log.warning(u'fanart.tv: Invalid API key given, please '
u'enter a valid one in your config file.')
else:
self._log.debug(u'fanart.tv: error on request: {}',
data[u'error message'])
return
matches = []
# can there be more than one releasegroupid per response?
for mbid, art in data.get(u'albums', dict()).items():
# there might be more art referenced, e.g. cdart, and an albumcover
# might not be present, even if the request was succesful
if album.mb_releasegroupid == mbid and u'albumcover' in art:
matches.extend(art[u'albumcover'])
# can this actually occur?
else:
self._log.debug(u'fanart.tv: unexpected mb_releasegroupid in '
u'response!')
matches.sort(key=lambda x: x[u'likes'], reverse=True)
for item in matches:
# fanart.tv has a strict size requirement for album art to be
# uploaded
yield self._candidate(url=item[u'url'],
match=Candidate.MATCH_EXACT,
size=(1000, 1000))
class ITunesStore(RemoteArtSource):
NAME = u"iTunes Store"
def get(self, album, extra):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
results = itunes.search_album(search_string)
except Exception as exc:
self._log.debug(u'iTunes search failed: {0}', exc)
return
# Get the first match.
if results:
itunes_album = results[0]
else:
self._log.debug(u'iTunes search for {:r} got no results',
search_string)
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield self._candidate(url=big_url, match=Candidate.MATCH_EXACT)
else:
self._log.debug(u'album has no artwork in iTunes Store')
except IndexError:
self._log.debug(u'album not found in iTunes Store')
class Wikipedia(RemoteArtSource):
NAME = u"Wikipedia (queried through DBpedia)"
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
SPARQL_QUERY = u'''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album, extra):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug(u'wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug(u'wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
u'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
# Try to see if one of the images on the pages matches our
# imcomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
u'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.iteritems():
image_url = result['imageinfo'][0]['url']
yield self._candidate(url=image_url,
match=Candidate.MATCH_EXACT)
except (ValueError, KeyError, IndexError):
self._log.debug(u'wikipedia: error scraping imageinfo')
return
class FileSystem(LocalArtSource):
NAME = u"Filesystem"
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, album, extra):
"""Look for album art files in the specified directories.
"""
paths = extra['paths']
if not paths:
return
cover_names = extra['cover_names']
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names))
cautious = extra['cautious']
for path in paths:
if not os.path.isdir(path):
continue
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \
os.path.isfile(os.path.join(path, fn)):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x:
self.filename_priority(x, cover_names))
remaining = []
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(u'using well-named art file {0}',
util.displayable_path(fn))
yield self._candidate(path=os.path.join(path, fn),
match=Candidate.MATCH_EXACT)
else:
remaining.append(fn)
# Fall back to any image in the folder.
if remaining and not cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(remaining[0]))
yield self._candidate(path=os.path.join(path, remaining[0]),
match=Candidate.MATCH_FALLBACK)
# Try each source in turn.
SOURCES_ALL = [u'filesystem',
u'coverart', u'itunes', u'amazon', u'albumart',
u'wikipedia', u'google', u'fanarttv']
ART_SOURCES = {
u'filesystem': FileSystem,
u'coverart': CoverArtArchive,
u'itunes': ITunesStore,
u'albumart': AlbumArtOrg,
u'amazon': Amazon,
u'wikipedia': Wikipedia,
u'google': GoogleImages,
u'fanarttv': FanartTV,
}
SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
PAT_PX = r"(0|[1-9][0-9]*)px"
PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%"
def __init__(self):
super(FetchArtPlugin, self).__init__()
# Holds candidates corresponding to downloaded images between
# fetching them and placing them in the filesystem.
self.art_candidates = {}
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'enforce_ratio': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['filesystem',
'coverart', 'itunes', 'amazon', 'albumart'],
'google_key': None,
'google_engine': u'001442825323518660753:hrh5ch1gjzm',
'fanarttv_key': None,
'store_source': False,
})
self.config['google_key'].redact = True
self.config['fanarttv_key'].redact = True
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
# allow both pixel and percentage-based margin specifications
self.enforce_ratio = self.config['enforce_ratio'].get(
confit.OneOf([bool,
confit.String(pattern=self.PAT_PX),
confit.String(pattern=self.PAT_PERCENT)]))
self.margin_px = None
self.margin_percent = None
if type(self.enforce_ratio) is unicode:
if self.enforce_ratio[-1] == u'%':
self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == u'px':
self.margin_px = int(self.enforce_ratio[:-2])
else:
# shouldn't happen
raise confit.ConfigValueError()
self.enforce_ratio = True
cover_names = self.config['cover_names'].as_str_seq()
self.cover_names = map(util.bytestring_path, cover_names)
self.cautious = self.config['cautious'].get(bool)
self.store_source = self.config['store_source'].get(bool)
self.src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
if not self.config['google_key'].get() and \
u'google' in available_sources:
available_sources.remove(u'google')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if 'remote_priority' in self.config:
self._log.warning(
u'The `fetch_art.remote_priority` configuration option has '
u'been deprecated, see the documentation.')
if self.config['remote_priority'].get(bool):
try:
sources_name.remove(u'filesystem')
sources_name.append(u'filesystem')
except ValueError:
pass
self.sources = [ART_SOURCES[s](self._log, self.config)
for s in sources_name]
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(task.album.artpath):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
candidate = self.art_for_album(task.album, task.paths, local)
if candidate:
self.art_candidates[task] = candidate
def _set_art(self, album, candidate, delete=False):
album.set_art(candidate.path, delete)
if self.store_source:
# store the source of the chosen artwork in a flexible field
self._log.debug(
u"Storing art_source for {0.albumartist} - {0.album}",
album)
album.art_source = SOURCE_NAMES[type(candidate.source)]
album.store()
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_candidates:
candidate = self.art_candidates.pop(task)
self._set_art(task.album, candidate, not self.src_removed)
if self.src_removed:
task.prune(candidate.path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option(
u'-f', u'--force', dest='force',
action='store_true', default=False,
help=u're-download art when already present'
)
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
# all the information any of the sources might need
extra = {'paths': paths,
'cover_names': self.cover_names,
'cautious': self.cautious,
'enforce_ratio': self.enforce_ratio,
'margin_px': self.margin_px,
'margin_percent': self.margin_percent,
'minwidth': self.minwidth,
'maxwidth': self.maxwidth}
for source in self.sources:
if source.IS_LOCAL or not local_only:
self._log.debug(
u'trying source {0} for album {1.albumartist} - {1.album}',
SOURCE_NAMES[type(source)],
album,
)
# URLs might be invalid at this point, or the image may not
# fulfill the requirements
for candidate in source.get(album, extra):
source.fetch_image(candidate, extra)
if candidate.validate(extra):
out = candidate
self._log.debug(
u'using {0.LOC_STR} image {1}'.format(
source, util.displayable_path(out.path)))
break
if out:
break
if out:
out.resize(extra)
return out
def batch_fetch_art(self, lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', u'has album art')
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
candidate = self.art_for_album(album, local_paths)
if candidate:
self._set_art(album, candidate)
message = ui.colorize('text_success', u'found album art')
else:
message = ui.colorize('text_error', u'no art found')
self._log.info(u'{0}: {1}', album, message)

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Filter imported files using a regular expression.
"""
from __future__ import division, absolute_import, print_function
import re
from beets import config
from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask
class FileFilterPlugin(BeetsPlugin):
def __init__(self):
super(FileFilterPlugin, self).__init__()
self.register_listener('import_task_created',
self.import_task_created_event)
self.config.add({
'path': '.*'
})
self.path_album_regex = \
self.path_singleton_regex = \
re.compile(self.config['path'].get())
if 'album_path' in self.config:
self.path_album_regex = re.compile(self.config['album_path'].get())
if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile(
self.config['singleton_path'].get())
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
items_to_import = []
for item in task.items:
if self.file_filter(item['path']):
items_to_import.append(item)
if len(items_to_import) > 0:
task.items = items_to_import
else:
# Returning an empty list of tasks from the handler
# drops the task from the rest of the importer pipeline.
return []
elif isinstance(task, SingletonImportTask):
if not self.file_filter(task.item['path']):
return []
# If not filtered, return the original task unchanged.
return [task]
def file_filter(self, full_path):
"""Checks if the configured regular expressions allow the import
of the file given in full_path.
"""
import_config = dict(config['import'])
if 'singletons' not in import_config or not import_config[
'singletons']:
# Album
return self.path_album_regex.match(full_path) is not None
else:
# Singleton
return self.path_singleton_regex.match(full_path) is not None

View file

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Matt Lichtenberg.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Creates freedesktop.org-compliant .directory files on an album level.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
class FreedesktopPlugin(BeetsPlugin):
def commands(self):
deprecated = ui.Subcommand(
"freedesktop",
help=u"Print a message to redirect to thumbnails --dolphin")
deprecated.func = self.deprecation_message
return [deprecated]
def deprecation_message(self, lib, opts, args):
ui.print_(u"This plugin is deprecated. Its functionality is "
u"superseded by the 'thumbnails' plugin")
ui.print_(u"'thumbnails --dolphin' replaces freedesktop. See doc & "
u"changelog for more information")

View file

@ -0,0 +1,173 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Jan-Erik Dahlin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""If the title is empty, try to extract track and title from the
filename.
"""
from __future__ import division, absolute_import, print_function
from beets import plugins
from beets.util import displayable_path
import os
import re
# Filename field extraction patterns.
PATTERNS = [
# "01 - Track 01" and "01": do nothing
r'^(\d+)\s*-\s*track\s*\d$',
r'^\d+$',
# Useful patterns.
r'^(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<title>.+)$',
r'^(?P<title>.+) by (?P<artist>.+)$',
]
# Titles considered "empty" and in need of replacement.
BAD_TITLE_PATTERNS = [
r'^$',
r'\d+?\s?-?\s*track\s*\d+',
]
def equal(seq):
"""Determine whether a sequence holds identical elements.
"""
return len(set(seq)) <= 1
def equal_fields(matchdict, field):
"""Do all items in `matchdict`, whose values are dictionaries, have
the same value for `field`? (If they do, the field is probably not
the title.)
"""
return equal(m[field] for m in matchdict.values())
def all_matches(names, pattern):
"""If all the filenames in the item/filename mapping match the
pattern, return a dictionary mapping the items to dictionaries
giving the value for each named subpattern in the match. Otherwise,
return None.
"""
matches = {}
for item, name in names.items():
m = re.match(pattern, name, re.IGNORECASE)
if m and m.groupdict():
# Only yield a match when the regex applies *and* has
# capture groups. Otherwise, no information can be extracted
# from the filename.
matches[item] = m.groupdict()
else:
return None
return matches
def bad_title(title):
"""Determine whether a given title is "bad" (empty or otherwise
meaningless) and in need of replacement.
"""
for pat in BAD_TITLE_PATTERNS:
if re.match(pat, title, re.IGNORECASE):
return True
return False
def apply_matches(d):
"""Given a mapping from items to field dicts, apply the fields to
the objects.
"""
some_map = d.values()[0]
keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames.
if 'tag' in keys and not equal_fields(d, 'tag'):
return
# Given both an "artist" and "title" field, assume that one is
# *actually* the artist, which must be uniform, and use the other
# for the title. This, of course, won't work for VA albums.
if 'artist' in keys:
if equal_fields(d, 'artist'):
artist = some_map['artist']
title_field = 'title'
elif equal_fields(d, 'title'):
artist = some_map['title']
title_field = 'artist'
else:
# Both vary. Abort.
return
for item in d:
if not item.artist:
item.artist = artist
# No artist field: remaining field is the title.
else:
title_field = 'title'
# Apply the title and track.
for item in d:
if bad_title(item.title):
item.title = unicode(d[item][title_field])
if 'track' in d[item] and item.track == 0:
item.track = int(d[item]['track'])
# Plugin structure and hook into import process.
class FromFilenamePlugin(plugins.BeetsPlugin):
def __init__(self):
super(FromFilenamePlugin, self).__init__()
self.register_listener('import_task_start', filename_task)
def filename_task(task, session):
"""Examine each item in the task to see if we can extract a title
from the filename. Try to match all filenames to a number of
regexps, starting with the most complex patterns and successively
trying less complex patterns. As soon as all filenames match the
same regex we can make an educated guess of which part of the
regex that contains the title.
"""
items = task.items if task.is_album else [task.item]
# Look for suspicious (empty or meaningless) titles.
missing_titles = sum(bad_title(i.title) for i in items)
if missing_titles:
# Get the base filenames (no path or extension).
names = {}
for item in items:
path = displayable_path(item.path)
name, _ = os.path.splitext(os.path.basename(path))
names[item] = name
# Look for useful information in the filenames.
for pattern in PATTERNS:
d = all_matches(names, pattern)
if d:
apply_matches(d)

169
libs/beetsplug/ftintitle.py Normal file
View file

@ -0,0 +1,169 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Verrus, <github.com/Verrus/beets-plugin-featInTitle>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves "featured" artists to the title from the artist field.
"""
from __future__ import division, absolute_import, print_function
import re
from beets import plugins
from beets import ui
from beets.util import displayable_path
def split_on_feat(artist):
"""Given an artist string, split the "main" artist from any artist
on the right-hand side of a string like "feat". Return the main
artist, which is always a string, and the featuring artist, which
may be a string or None if none is present.
"""
# split on the first "feat".
regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)
parts = [s.strip() for s in regex.split(artist, 1)]
if len(parts) == 1:
return parts[0], None
else:
return tuple(parts)
def contains_feat(title):
"""Determine whether the title contains a "featured" marker.
"""
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
def find_feat_part(artist, albumartist):
"""Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found.
"""
feat_part = None
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist, 1)
if len(albumartist_split) <= 1:
return feat_part
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
# featured artist.
elif albumartist_split[-1] != '':
# Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[-1])
# Otherwise, if there's nothing on the right-hand side, look for a
# featuring artist on the left-hand side.
else:
lhs, rhs = split_on_feat(albumartist_split[0])
if lhs:
feat_part = lhs
return feat_part
class FtInTitlePlugin(plugins.BeetsPlugin):
def __init__(self):
super(FtInTitlePlugin, self).__init__()
self.config.add({
'auto': True,
'drop': False,
'format': u'feat. {0}',
})
self._command = ui.Subcommand(
'ftintitle',
help=u'move featured artists to the title field')
self._command.parser.add_option(
u'-d', u'--drop', dest='drop',
action='store_true', default=False,
help=u'drop featuring from artists and ignore title update')
if self.config['auto']:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
drop_feat = self.config['drop'].get(bool)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self.ft_in_title(item, drop_feat)
item.store()
if write:
item.try_write()
self._command.func = func
return [self._command]
def imported(self, session, task):
"""Import hook for moving featuring artist automatically.
"""
drop_feat = self.config['drop'].get(bool)
for item in task.imported_items():
self.ft_in_title(item, drop_feat)
item.store()
def update_metadata(self, item, feat_part, drop_feat):
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In all cases, update the artist fields.
self._log.info(u'artist: {0} -> {1}', item.artist, item.albumartist)
item.artist = item.albumartist
if item.artist_sort:
# Just strip the featured artist from the sort name.
item.artist_sort, _ = split_on_feat(item.artist_sort)
# Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
feat_format = self.config['format'].get(unicode)
new_format = feat_format.format(feat_part)
new_title = u"{0} {1}".format(item.title, new_format)
self._log.info(u'title: {0} -> {1}', item.title, new_title)
item.title = new_title
def ft_in_title(self, item, drop_feat):
"""Look for featured artists in the item's artist fields and move
them to the title.
"""
artist = item.artist.strip()
albumartist = item.albumartist.strip()
# Check whether there is a featured artist on this track and the
# artist field does not exactly match the album artist field. In
# that case, we attempt to move the featured artist to the title.
_, featured = split_on_feat(artist)
if featured and albumartist != artist and albumartist:
self._log.info('{}', displayable_path(item.path))
feat_part = None
# Attempt to find the featured artist.
feat_part = find_feat_part(artist, albumartist)
# If we have a featuring artist, move it to the title.
if feat_part:
self.update_metadata(item, feat_part, drop_feat)
else:
self._log.info(u'no featuring artists found')

48
libs/beetsplug/fuzzy.py Normal file
View file

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides a fuzzy matching query.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.dbcore.query import StringFieldQuery
from beets import config
import difflib
class FuzzyQuery(StringFieldQuery):
@classmethod
def string_match(cls, pattern, val):
# smartcase
if pattern.islower():
val = val.lower()
query_matcher = difflib.SequenceMatcher(None, pattern, val)
threshold = config['fuzzy']['threshold'].as_number()
return query_matcher.quick_ratio() >= threshold
class FuzzyPlugin(BeetsPlugin):
def __init__(self):
super(FuzzyPlugin, self).__init__()
self.config.add({
'prefix': '~',
'threshold': 0.7,
})
def queries(self):
prefix = self.config['prefix'].get(basestring)
return {prefix: FuzzyQuery}

108
libs/beetsplug/hook.py Normal file
View file

@ -0,0 +1,108 @@
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
from __future__ import division, absolute_import, print_function
import string
import subprocess
from beets.plugins import BeetsPlugin
from beets.ui import _arg_encoding
from beets.util import shlex_split
class CodingFormatter(string.Formatter):
"""A custom string formatter that decodes the format string and it's
fields.
"""
def __init__(self, coding):
"""Creates a new coding formatter with the provided coding."""
self._coding = coding
def format(self, format_string, *args, **kwargs):
"""Formats the provided string using the provided arguments and keyword
arguments.
This method decodes the format string using the formatter's coding.
See str.format and string.Formatter.format.
"""
try:
format_string = format_string.decode(self._coding)
except UnicodeEncodeError:
pass
return super(CodingFormatter, self).format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
See string.Formatter.convert_field.
"""
converted = super(CodingFormatter, self).convert_field(value,
conversion)
try:
converted = converted.decode(self._coding)
except UnicodeEncodeError:
pass
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super(HookPlugin, self).__init__()
self.config.add({
'hooks': []
})
hooks = self.config['hooks'].get(list)
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].get(unicode)
hook_command = hook['command'].get(unicode)
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{0}"', command)
return
formatter = CodingFormatter(_arg_encoding())
command_pieces = shlex_split(command)
for i, piece in enumerate(command_pieces):
command_pieces[i] = formatter.format(piece, event=event,
**kwargs)
self._log.debug(u'running command "{0}" for event {1}',
u' '.join(command_pieces), event)
try:
subprocess.Popen(command_pieces).wait()
except OSError as exc:
self._log.error(u'hook for {0} failed: {1}', event, exc)
self.register_listener(event, hook_function)

Some files were not shown because too many files have changed in this diff Show more