Merge pull request #1428 from clinton-hall/libs/requirements

Update requirements
This commit is contained in:
Labrys of Knossos 2018-12-16 11:52:04 -05:00 committed by GitHub
commit 8dbb1a2451
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
75 changed files with 2171 additions and 3848 deletions

View file

@ -0,0 +1 @@
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

View file

@ -14,5 +14,10 @@ from concurrent.futures._base import (FIRST_COMPLETED,
Executor, Executor,
wait, wait,
as_completed) as_completed)
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor from concurrent.futures.thread import ThreadPoolExecutor
try:
from concurrent.futures.process import ProcessPoolExecutor
except ImportError:
# some platforms don't have multiprocessing
pass

View file

@ -1,15 +1,12 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved. # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement. # Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement import collections
import logging import logging
import threading import threading
import itertools
import time import time
import types
try:
from collections import namedtuple
except ImportError:
from concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan (brian@sweetapp.com)' __author__ = 'Brian Quinlan (brian@sweetapp.com)'
@ -175,6 +172,29 @@ def _create_and_install_waiters(fs, return_when):
return waiter return waiter
def _yield_finished_futures(fs, waiter, ref_collect):
"""
Iterate on the list *fs*, yielding finished futures one by one in
reverse order.
Before yielding a future, *waiter* is removed from its waiters
and the future is removed from each set in the collection of sets
*ref_collect*.
The aim of this function is to avoid keeping stale references after
the future is yielded and before the iterator resumes.
"""
while fs:
f = fs[-1]
for futures_set in ref_collect:
futures_set.remove(f)
with f._condition:
f._waiters.remove(waiter)
del f
# Careful not to keep a reference to the popped value
yield fs.pop()
def as_completed(fs, timeout=None): def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes. """An iterator over the given futures that yields each as it completes.
@ -186,7 +206,8 @@ def as_completed(fs, timeout=None):
Returns: Returns:
An iterator that yields the given Futures as they complete (finished or An iterator that yields the given Futures as they complete (finished or
cancelled). cancelled). If any given Futures are duplicated, they will be returned
once.
Raises: Raises:
TimeoutError: If the entire result iterator could not be generated TimeoutError: If the entire result iterator could not be generated
@ -195,16 +216,20 @@ def as_completed(fs, timeout=None):
if timeout is not None: if timeout is not None:
end_time = timeout + time.time() end_time = timeout + time.time()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs): with _AcquireFutures(fs):
finished = set( finished = set(
f for f in fs f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED) waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try: try:
for future in finished: for f in _yield_finished_futures(finished, waiter,
yield future ref_collect=(fs,)):
f = [f]
yield f.pop()
while pending: while pending:
if timeout is None: if timeout is None:
@ -214,7 +239,7 @@ def as_completed(fs, timeout=None):
if wait_timeout < 0: if wait_timeout < 0:
raise TimeoutError( raise TimeoutError(
'%d (of %d) futures unfinished' % ( '%d (of %d) futures unfinished' % (
len(pending), len(fs))) len(pending), total_futures))
waiter.event.wait(wait_timeout) waiter.event.wait(wait_timeout)
@ -223,15 +248,20 @@ def as_completed(fs, timeout=None):
waiter.finished_futures = [] waiter.finished_futures = []
waiter.event.clear() waiter.event.clear()
for future in finished: # reverse to keep finishing order
yield future finished.reverse()
pending.remove(future) for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending)):
f = [f]
yield f.pop()
finally: finally:
# Remove waiter from unfinished futures
for f in fs: for f in fs:
f._waiters.remove(waiter) with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple( DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done') 'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED): def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete. """Wait for the futures in the given sequence to complete.
@ -276,7 +306,8 @@ def wait(fs, timeout=None, return_when=ALL_COMPLETED):
waiter.event.wait(timeout) waiter.event.wait(timeout)
for f in fs: for f in fs:
f._waiters.remove(waiter) with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures) done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done) return DoneAndNotDoneFutures(done, set(fs) - done)
@ -290,6 +321,7 @@ class Future(object):
self._state = PENDING self._state = PENDING
self._result = None self._result = None
self._exception = None self._exception = None
self._traceback = None
self._waiters = [] self._waiters = []
self._done_callbacks = [] self._done_callbacks = []
@ -299,22 +331,41 @@ class Future(object):
callback(self) callback(self)
except Exception: except Exception:
LOGGER.exception('exception calling callback for %r', self) LOGGER.exception('exception calling callback for %r', self)
except BaseException:
# Explicitly let all other new-style exceptions through so
# that we can catch all old-style exceptions with a simple
# "except:" clause below.
#
# All old-style exception objects are instances of
# types.InstanceType, but "except types.InstanceType:" does
# not catch old-style exceptions for some reason. Thus, the
# only way to catch all old-style exceptions without catching
# any new-style exceptions is to filter out the new-style
# exceptions, which all derive from BaseException.
raise
except:
# Because of the BaseException clause above, this handler only
# executes for old-style exception objects.
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self): def __repr__(self):
with self._condition: with self._condition:
if self._state == FINISHED: if self._state == FINISHED:
if self._exception: if self._exception:
return '<Future at %s state=%s raised %s>' % ( return '<%s at %#x state=%s raised %s>' % (
hex(id(self)), self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state], _STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__) self._exception.__class__.__name__)
else: else:
return '<Future at %s state=%s returned %s>' % ( return '<%s at %#x state=%s returned %s>' % (
hex(id(self)), self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state], _STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__) self._result.__class__.__name__)
return '<Future at %s state=%s>' % ( return '<%s at %#x state=%s>' % (
hex(id(self)), self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state]) _STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self): def cancel(self):
@ -337,7 +388,7 @@ class Future(object):
return True return True
def cancelled(self): def cancelled(self):
"""Return True if the future has cancelled.""" """Return True if the future was cancelled."""
with self._condition: with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
@ -353,7 +404,14 @@ class Future(object):
def __get_result(self): def __get_result(self):
if self._exception: if self._exception:
raise self._exception if isinstance(self._exception, types.InstanceType):
# The exception is an instance of an old-style class, which
# means type(self._exception) returns types.ClassType instead
# of the exception's actual class type.
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else: else:
return self._result return self._result
@ -405,6 +463,39 @@ class Future(object):
else: else:
raise TimeoutError() raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None): def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents. """Return the exception raised by the call that the future represents.
@ -422,21 +513,7 @@ class Future(object):
TimeoutError: If the future didn't finish executing before the given TimeoutError: If the future didn't finish executing before the given
timeout. timeout.
""" """
return self.exception_info(timeout)[0]
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests. # The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self): def set_running_or_notify_cancel(self):
@ -475,8 +552,8 @@ class Future(object):
return True return True
else: else:
LOGGER.critical('Future %s in unexpected state: %s', LOGGER.critical('Future %s in unexpected state: %s',
id(self.future), id(self),
self.future._state) self._state)
raise RuntimeError('Future in unexpected state') raise RuntimeError('Future in unexpected state')
def set_result(self, result): def set_result(self, result):
@ -492,19 +569,28 @@ class Future(object):
self._condition.notify_all() self._condition.notify_all()
self._invoke_callbacks() self._invoke_callbacks()
def set_exception(self, exception): def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception. """Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests. Should only be used by Executor implementations and unit tests.
""" """
with self._condition: with self._condition:
self._exception = exception self._exception = exception
self._traceback = traceback
self._state = FINISHED self._state = FINISHED
for waiter in self._waiters: for waiter in self._waiters:
waiter.add_exception(self) waiter.add_exception(self)
self._condition.notify_all() self._condition.notify_all()
self._invoke_callbacks() self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object): class Executor(object):
"""This is an abstract base class for concrete asynchronous executors.""" """This is an abstract base class for concrete asynchronous executors."""
@ -520,7 +606,7 @@ class Executor(object):
raise NotImplementedError() raise NotImplementedError()
def map(self, fn, *iterables, **kwargs): def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter). """Returns an iterator equivalent to map(fn, iter).
Args: Args:
fn: A callable that will take as many arguments as there are fn: A callable that will take as many arguments as there are
@ -541,17 +627,24 @@ class Executor(object):
if timeout is not None: if timeout is not None:
end_time = timeout + time.time() end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)] fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
try: # Yield must be hidden in closure so that the futures are submitted
for future in fs: # before the first iterator value is required.
if timeout is None: def result_iterator():
yield future.result() try:
else: # reverse to keep finishing order
yield future.result(end_time - time.time()) fs.reverse()
finally: while fs:
for future in fs: # Careful not to keep a reference to the popped future
future.cancel() if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True): def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor. """Clean-up the resources associated with the Executor.

View file

@ -1,101 +0,0 @@
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result

View file

@ -43,20 +43,14 @@ Process #1..n:
_ResultItems in "Request Q" _ResultItems in "Request Q"
""" """
from __future__ import with_statement
import atexit import atexit
from concurrent.futures import _base
import Queue as queue
import multiprocessing import multiprocessing
import threading import threading
import weakref import weakref
import sys import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)' __author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the # Workers are created as daemon threads and processes. This is done to allow the
@ -79,11 +73,11 @@ _shutdown = False
def _python_exit(): def _python_exit():
global _shutdown global _shutdown
_shutdown = True _shutdown = True
items = list(_threads_queues.items()) items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items: for t, q in items:
q.put(None) q.put(None)
for t, q in items: for t, q in items:
t.join() t.join(sys.maxint)
# Controls how many more calls than processes will be queued in the call queue. # Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for # A smaller number will mean that processes spend more time idle waiting for
@ -132,7 +126,7 @@ def _process_worker(call_queue, result_queue):
return return
try: try:
r = call_item.fn(*call_item.args, **call_item.kwargs) r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException: except:
e = sys.exc_info()[1] e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id, result_queue.put(_ResultItem(call_item.work_id,
exception=e)) exception=e))
@ -220,6 +214,8 @@ def _queue_management_worker(executor_reference,
work_item.future.set_exception(result_item.exception) work_item.future.set_exception(result_item.exception)
else: else:
work_item.future.set_result(result_item.result) work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down. # Check whether we should start shutting down.
executor = executor_reference() executor = executor_reference()
# No more work items can be added if: # No more work items can be added if:
@ -266,6 +262,7 @@ def _check_system_limits():
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited) raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor): class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None): def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance. """Initializes a new ProcessPoolExecutor instance.
@ -280,6 +277,9 @@ class ProcessPoolExecutor(_base.Executor):
if max_workers is None: if max_workers is None:
self._max_workers = multiprocessing.cpu_count() self._max_workers = multiprocessing.cpu_count()
else: else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to # Make the call queue slightly larger than the number of processes to
@ -351,7 +351,7 @@ class ProcessPoolExecutor(_base.Executor):
# Wake up queue management thread # Wake up queue management thread
self._result_queue.put(None) self._result_queue.put(None)
if wait: if wait:
self._queue_management_thread.join() self._queue_management_thread.join(sys.maxint)
# To reduce the risk of openning too many files, remove references to # To reduce the risk of openning too many files, remove references to
# objects that use file descriptors. # objects that use file descriptors.
self._queue_management_thread = None self._queue_management_thread = None

View file

@ -3,18 +3,20 @@
"""Implements ThreadPoolExecutor.""" """Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit import atexit
from concurrent.futures import _base
import itertools
import Queue as queue
import threading import threading
import weakref import weakref
import sys import sys
from concurrent.futures import _base
try: try:
import queue from multiprocessing import cpu_count
except ImportError: except ImportError:
import Queue as queue # some platforms don't have multiprocessing
def cpu_count():
return None
__author__ = 'Brian Quinlan (brian@sweetapp.com)' __author__ = 'Brian Quinlan (brian@sweetapp.com)'
@ -38,11 +40,11 @@ _shutdown = False
def _python_exit(): def _python_exit():
global _shutdown global _shutdown
_shutdown = True _shutdown = True
items = list(_threads_queues.items()) items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items: for t, q in items:
q.put(None) q.put(None)
for t, q in items: for t, q in items:
t.join() t.join(sys.maxint)
atexit.register(_python_exit) atexit.register(_python_exit)
@ -59,9 +61,9 @@ class _WorkItem(object):
try: try:
result = self.fn(*self.args, **self.kwargs) result = self.fn(*self.args, **self.kwargs)
except BaseException: except:
e = sys.exc_info()[1] e, tb = sys.exc_info()[1:]
self.future.set_exception(e) self.future.set_exception_info(e, tb)
else: else:
self.future.set_result(result) self.future.set_result(result)
@ -71,6 +73,8 @@ def _worker(executor_reference, work_queue):
work_item = work_queue.get(block=True) work_item = work_queue.get(block=True)
if work_item is not None: if work_item is not None:
work_item.run() work_item.run()
# Delete references to object. See issue16284
del work_item
continue continue
executor = executor_reference() executor = executor_reference()
# Exit if: # Exit if:
@ -82,22 +86,37 @@ def _worker(executor_reference, work_queue):
work_queue.put(None) work_queue.put(None)
return return
del executor del executor
except BaseException: except:
_base.LOGGER.critical('Exception in worker', exc_info=True) _base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor): class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().next
def __init__(self, max_workers=None, thread_name_prefix=''):
"""Initializes a new ThreadPoolExecutor instance. """Initializes a new ThreadPoolExecutor instance.
Args: Args:
max_workers: The maximum number of threads that can be used to max_workers: The maximum number of threads that can be used to
execute the given calls. execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
""" """
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers self._max_workers = max_workers
self._work_queue = queue.Queue() self._work_queue = queue.Queue()
self._threads = set() self._threads = set()
self._shutdown = False self._shutdown = False
self._shutdown_lock = threading.Lock() self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
def submit(self, fn, *args, **kwargs): def submit(self, fn, *args, **kwargs):
with self._shutdown_lock: with self._shutdown_lock:
@ -119,8 +138,11 @@ class ThreadPoolExecutor(_base.Executor):
q.put(None) q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more # TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue. # idle threads than items in the work queue.
if len(self._threads) < self._max_workers: num_threads = len(self._threads)
t = threading.Thread(target=_worker, if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=_worker,
args=(weakref.ref(self, weakref_cb), args=(weakref.ref(self, weakref_cb),
self._work_queue)) self._work_queue))
t.daemon = True t.daemon = True
@ -134,5 +156,5 @@ class ThreadPoolExecutor(_base.Executor):
self._work_queue.put(None) self._work_queue.put(None)
if wait: if wait:
for t in self._threads: for t in self._threads:
t.join() t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__ shutdown.__doc__ = _base.Executor.shutdown.__doc__

View file

@ -1,15 +0,0 @@
Copyright (c) 2005-2016 Marko Kreen <markokr@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View file

@ -1,3 +0,0 @@
include README.rst Makefile MANIFEST.in LICENSE dumprar.py
include doc/*.rst doc/Makefile doc/conf.py doc/make.bat
include test/Makefile test/*.sh test/files/*.rar test/files/*.exp

View file

@ -1,31 +0,0 @@
prefix = /usr/local
all:
python setup.py build
install:
python setup.py install --prefix=$(prefix)
tgz: clean
python setup.py sdist
clean:
rm -rf __pycache__ build dist
rm -f *.pyc MANIFEST *.orig *.rej *.html *.class
rm -rf doc/_build doc/_static doc/_templates
make -C test clean
html:
rst2html README.rst > README.html
make -C doc html
lint:
pylint -E rarfile.py
rbuild:
curl -X POST https://readthedocs.org/build/6715
upload:
python setup.py sdist upload

View file

@ -1,56 +0,0 @@
Metadata-Version: 1.1
Name: rarfile
Version: 2.8
Summary: RAR archive reader for Python
Home-page: https://github.com/markokr/rarfile
Author: Marko Kreen
Author-email: markokr@gmail.com
License: ISC
Description: rarfile - RAR archive reader for Python
=======================================
This is Python module for RAR_ archive reading. The interface
is made as zipfile_ like as possible. Licensed under ISC_
license.
Features:
- Supports both RAR2 and RAR3 archives (WinRAR 2.x .. WinRAR 4.x).
- Supports multi volume archives.
- Supports Unicode filenames.
- Supports password-protected archives.
- Supports archive and file comments.
- Archive parsing and non-compressed files are handled in pure Python code.
- Compressed files are extracted by executing external tool: either ``unrar``
from RARLAB_ or ``bsdtar`` from libarchive_.
- Works with both Python 2.7 and 3.x.
Notes:
- Does not support the RAR5 format introduced in WinRAR 5.0.
- ``bsdtar`` does not support all RAR3 features.
Links:
- `Documentation`_
- `Downloads`_
- `Git`_ repo
.. _RAR: https://en.wikipedia.org/wiki/RAR_%28file_format%29
.. _zipfile: https://docs.python.org/2/library/zipfile.html
.. _ISC: https://en.wikipedia.org/wiki/ISC_license
.. _Git: https://github.com/markokr/rarfile
.. _Downloads: https://pypi.python.org/pypi/rarfile
.. _Documentation: https://rarfile.readthedocs.io/
.. _libarchive: https://github.com/libarchive/libarchive
.. _RARLAB: http://www.rarlab.com/
Keywords: rar,unrar,archive
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: ISC License (ISCL)
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Archiving :: Compression

View file

@ -1,39 +0,0 @@
rarfile - RAR archive reader for Python
=======================================
This is Python module for RAR_ archive reading. The interface
is made as zipfile_ like as possible. Licensed under ISC_
license.
Features:
- Supports both RAR2 and RAR3 archives (WinRAR 2.x .. WinRAR 4.x).
- Supports multi volume archives.
- Supports Unicode filenames.
- Supports password-protected archives.
- Supports archive and file comments.
- Archive parsing and non-compressed files are handled in pure Python code.
- Compressed files are extracted by executing external tool: either ``unrar``
from RARLAB_ or ``bsdtar`` from libarchive_.
- Works with both Python 2.7 and 3.x.
Notes:
- Does not support the RAR5 format introduced in WinRAR 5.0.
- ``bsdtar`` does not support all RAR3 features.
Links:
- `Documentation`_
- `Downloads`_
- `Git`_ repo
.. _RAR: https://en.wikipedia.org/wiki/RAR_%28file_format%29
.. _zipfile: https://docs.python.org/2/library/zipfile.html
.. _ISC: https://en.wikipedia.org/wiki/ISC_license
.. _Git: https://github.com/markokr/rarfile
.. _Downloads: https://pypi.python.org/pypi/rarfile
.. _Documentation: https://rarfile.readthedocs.io/
.. _libarchive: https://github.com/libarchive/libarchive
.. _RARLAB: http://www.rarlab.com/

View file

@ -1,153 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/RarFile.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/RarFile.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/RarFile"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/RarFile"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View file

@ -1,111 +0,0 @@
rarfile API documentation
=========================
.. contents:: Table Of Contents
Introduction
------------
.. automodule:: rarfile
RarFile class
-------------
.. autoclass:: RarFile
:members:
:inherited-members:
RarInfo class
-------------
.. autoclass:: RarInfo
:members:
:inherited-members:
RarExtFile class
----------------
.. autoclass:: RarExtFile
:members:
:inherited-members:
Functions
---------
.. autofunction:: is_rarfile
Module Configuration
--------------------
.. autodata:: UNRAR_TOOL
.. autodata:: DEFAULT_CHARSET
.. autodata:: TRY_ENCODINGS
.. autodata:: USE_DATETIME
.. autodata:: PATH_SEP
.. autodata:: NEED_COMMENTS
.. autodata:: UNICODE_COMMENTS
.. autodata:: USE_EXTRACT_HACK
.. autodata:: HACK_SIZE_LIMIT
Constants
---------
.. py:data:: RAR_M0
No compression.
.. py:data:: RAR_M1
Compression level `-m1` - Fastest compression.
.. py:data:: RAR_M2
Compression level `-m2`.
.. py:data:: RAR_M3
Compression level `-m3`.
.. py:data:: RAR_M4
Compression level `-m4`.
.. py:data:: RAR_M5
Compression level `-m5` - Maximum compression.
.. py:data:: RAR_OS_MSDOS
.. py:data:: RAR_OS_OS2
.. py:data:: RAR_OS_WIN32
.. py:data:: RAR_OS_UNIX
.. py:data:: RAR_OS_MACOS
.. py:data:: RAR_OS_BEOS
Exceptions
----------
.. autoclass:: Error
.. autoclass:: BadRarFile
.. autoclass:: NotRarFile
.. autoclass:: BadRarName
.. autoclass:: NoRarEntry
.. autoclass:: PasswordRequired
.. autoclass:: NeedFirstVolume
.. autoclass:: NoCrypto
.. autoclass:: RarExecError
.. autoclass:: RarWarning
.. autoclass:: RarFatalError
.. autoclass:: RarCRCError
.. autoclass:: RarLockedArchiveError
.. autoclass:: RarWriteError
.. autoclass:: RarOpenError
.. autoclass:: RarUserError
.. autoclass:: RarMemoryError
.. autoclass:: RarCreateError
.. autoclass:: RarNoFilesError
.. autoclass:: RarUserBreak
.. autoclass:: RarUnknownError
.. autoclass:: RarSignalExit

View file

@ -1,249 +0,0 @@
# -*- coding: utf-8 -*-
#
# RarFile documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 24 13:29:46 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, os.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import rarfile
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_member_order = 'bysource'
autoclass_content = 'both'
autodoc_default_flags = ['show-inheritance']
intersphinx_mapping = {'python': ('http://docs.python.org/2', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RarFile'
copyright = u'2005-2016, Marko Kreen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = rarfile.__version__
# The full version, including alpha/beta/rc tags.
release = rarfile.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'RarFiledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RarFile.tex', u'RarFile Documentation',
u'Marko Kreen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# ('index', 'rarfile', u'RarFile Documentation',
# [u'Marko Kreen'], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RarFile', u'RarFile Documentation',
u'Marko Kreen', 'RarFile', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

View file

@ -1,87 +0,0 @@
rarfile FAQ
===========
.. contents:: Table of Contents
What are the dependencies?
--------------------------
It depends on ``unrar`` command-line utility to do the actual decompression.
Note that by default it expect it to be in ``PATH``. If unrar
launching fails, you need to fix this.
Alternatively, :mod:`rarfile` can use bsdtar_ from libarchive_ as
decompression backend, but that is a bit problematic as bsdtar_ does not support
all RAR features.
.. _bsdtar: https://github.com/libarchive/libarchive/wiki/ManPageBsdtar1
.. _libarchive: http://www.libarchive.org/
It depends on cryptography_ or PyCrypto_ modules to process
archives with password-protected headers.
.. _cryptography: https://pypi.python.org/pypi/cryptography
.. _PyCrypto: https://pypi.python.org/pypi/pycrypto
Does it parse ``unrar`` output to get archive contents?
-------------------------------------------------------
No, :mod:`rarfile` parses RAR structure in Python code. Also it can
read uncompressed files from archive without external utility.
Will rarfile support wrapping unrarlib/unrar.dll/unrar.so in the future?
------------------------------------------------------------------------
No. The current architecture - parsing in Python and decompression with
command line tools work well across all interesting operating systems
(Windows/Linux/MacOS), wrapping a library does not bring any advantages.
Simple execution of command-line tools is also legally simpler situation
than linking with external library.
How can I get it work on Windows?
---------------------------------
On Windows the ``unrar.exe`` is not in ``PATH`` so simple ``Popen("unrar ..")`` does not work.
It can be solved several ways:
1. Add location of ``unrar.exe`` to PATH.
2. Set :data:`rarfile.UNRAR_TOOL` to full path of ``unrar.exe``.
3. Copy ``unrar.exe`` to your program directory.
4. Copy ``unrar.exe`` to system directory that is in PATH, eg. ``C:\Windows``.
How to avoid the need for user to manually install rarfile/unrar?
-----------------------------------------------------------------
Include ``rarfile.py`` and/or ``unrar`` with your application.
Will it support creating RAR archives?
--------------------------------------
No. RARLAB_ is not interested in RAR becoming open format
and specifically discourages writing RAR creation software.
In the meantime use either Zip_ (better compatibility) or 7z_ (better compression)
format for your own archives.
.. _RARLAB: http://www.rarlab.com/
.. _Zip: https://en.wikipedia.org/wiki/ZIP_%28file_format%29
.. _7z: https://en.wikipedia.org/wiki/7z
What is the USE_EXTRACT_HACK?
-----------------------------
RarFile uses ``unrar`` to extract compressed files. But when extracting
single file from archive containing many entries, ``unrar`` needs to parse
whole archive until it finds the right entry. This makes random-access
to entries slow. To avoid that, RarFile remembers location of compressed
data for each entry and on read it copies it to temporary archive containing
only data for that one file, thus making ``unrar`` fast.
The logic is only activated for entries smaller than :data:`rarfile.HACK_SIZE_LIMIT`
(20M by default). Bigger files are accessed directly from RAR.
Note - it only works for non-solid archives. So if you care about
random access to files in your archive, do not create solid archives.

View file

@ -1,42 +0,0 @@
rarfile - RAR archive reader for Python
=======================================
This is Python module for RAR_ archive reading. The interface
is made as zipfile_ like as possible. Licensed under ISC_
license.
.. _RAR: http://en.wikipedia.org/wiki/RAR
.. _zipfile: http://docs.python.org/library/zipfile.html
.. _ISC: http://en.wikipedia.org/wiki/ISC_license
Features:
- Supports both RAR 2.x and 3.x archives.
- Supports multi volume archives.
- Supports Unicode filenames.
- Supports password-protected archives.
- Supports archive and file comments.
- Archive parsing and non-compressed files are handled in pure Python code.
- For compressed files runs ``unrar`` utility.
- Works with both Python 2.x and 3.x.
Documentation:
.. toctree::
:maxdepth: 1
Module Documentation <api>
FAQs <faq>
Release News <news>
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View file

@ -1,190 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\RarFile.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\RarFile.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end

View file

@ -1,243 +0,0 @@
rarfile history
===============
.. py:currentmodule:: rarfile
Version 2.8 (2016-06-07)
------------------------
* Fix: support solid archives from in-memory file object.
Full archive will be written out to temp file.
[`#21 <https://github.com/markokr/rarfile/issues/21>`_]
* Fix: ask unrar stop switches scanning,
to handle archive names starting with "-".
(Alexander Shadchin)
[`#12 <https://github.com/markokr/rarfile/pull/12>`_]
* Fix: add missing _parse_error variable to RarFile object.
(Gregory Mazzola)
[`#20 <https://github.com/markokr/rarfile/pull/20>`_]
* Fix: return proper boolean from :meth:`RarInfo.needs_password`.
[`#22 <https://github.com/markokr/rarfile/issues/22>`_]
* Fix: do not insert non-string rarfile into exception string.
(Tim Muller)
[`#23 <https://github.com/markokr/rarfile/pull/23>`_]
* Fix: make :meth:`RarFile.extract` and :meth:`RarFile.testrar`
support in-memory archives.
* Use cryptography_ module as preferred crypto backend.
PyCrypto_ will be used as fallback.
* Cleanup: remove compat code for Python 2.4/2.5/2.6.
.. _cryptography: https://pypi.python.org/pypi/cryptography
.. _PyCrypto: https://pypi.python.org/pypi/pycrypto
Version 2.7 (2014-11-23)
------------------------
* Allow use of bsdtar_ as decompression backend. It sits
on top of libarchive_, which has support for reading RAR archives.
Limitations of ``libarchive`` RAR backend:
- Does not support solid archives.
- Does not support password-protected archives.
- Does not support "parsing filters" used for audio/image/executable data,
so few non-solid, non-encrypted archives also fail.
Now :mod:`rarfile` checks if ``unrar`` and if not then tries ``bsdtar``.
If that works, then keeps using it. If not then configuration
stays with ``unrar`` which will then appear in error messages.
.. _bsdtar: https://github.com/libarchive/libarchive/wiki/ManPageBsdtar1
.. _libarchive: http://www.libarchive.org/
* Both :class:`RarFile` and :func:`is_rarfile` now accept file-like
object. Eg. :class:`io.BytesIO`. Only requirement is that the object
must be seekable. This mirrors similar funtionality in zipfile.
Based on patch by Chase Zhang.
* Uniform error handling. :class:`RarFile` accepts ``errors="strict"``
argument.
Allow user to tune whether parsing and missing file errors will raise
exception. If error is not raised, the error string can be queried
with :meth:`RarFile.strerror` method.
Version 2.6 (2013-04-10)
------------------------
* Add context manager support for :class:`RarFile` class.
Both :class:`RarFile` and :class:`RarExtFile` support
:keyword:`with` statement now.
(Wentao Han)
* :meth:`RarFile.volumelist` method, returns filenames of archive volumes.
* Re-throw clearer error in case ``unrar`` is not found in ``PATH``.
* Sync new unrar4.x error code from ``rar.txt``.
* Use Sphinx for documentation, push docs to rtfd.org_
.. _rtfd.org: https://rarfile.readthedocs.org/
Version 2.5 (2012-01-19)
------------------------
Fixes:
* :meth:`RarExtFile.read` and :meth:`RarExtFile.readinto` now do looping read
to work properly on short reads. Important for Python 3.2+ where read from pipe
can return short result even on blocking file descriptor.
* Proper error reporting in :meth:`RarFile.extract`, :meth:`RarFile.extractall`
and :meth:`RarFile.testrar`.
* :meth:`RarExtFile.read` from unrar pipe: prefer to return unrar error code,
if thats not available, do own error checks.
* Avoid string addition in :meth:`RarExtFile.read`, instead use always list+join to
merge multi-part reads.
* dumprar: dont re-encode byte strings (Python 2.x). This avoids
unneccessary failure when printing invalid unicode.
Version 2.4 (2011-11-05)
------------------------
Fixes:
* :data:`USE_DATETIME`: survive bad values from RAR
* Fix bug in corrupt unicode filename handling
* dumprar: make unicode chars work with both pipe and console
Version 2.3 (2011-07-03)
------------------------
Features:
* Support .seek() method on file streams. (Kristian Larsson)
* Support .readinto() method on file streams. Optimized implementation
is available on Python 2.6+ where :class:`memoryview` is available.
* Support file comments - :attr:`RarInfo.comment` contains decompressed data if available.
* File objects returned by :meth:`RarFile.open()` are :class:`io.RawIOBase`-compatible.
They can further wrapped with :class:`io.BufferedReader` and :class:`io.TextIOWrapper`.
* Now .getinfo() uses dict lookup instead of sequential scan when
searching archive entry. This speeds up prococessing for archives that
have many entries.
* Option :data:`UNICODE_COMMENTS` to decode both archive and file comments to unicode.
It uses :data:`TRY_ENCODINGS` for list of encodings to try. If off, comments are
left as byte strings. Default: 0
* Option :data:`PATH_SEP` to change path separator. Default: ``r'\'``,
set ``rarfile.PATH_SEP='/'`` to be compatibe with zipfile.
* Option :data:`USE_DATETIME` to convert timestamps to datetime objects.
Default: 0, timestamps are tuples.
* Option :data:`TRY_ENCODINGS` to allow tuning attempted encoding list.
* Reorder :class:`RarInfo` fiels to better show zipfile-compatible fields.
* Standard regtests to make sure various features work
Compatibility:
* Drop :attr:`RarInfo.unicode_filename`, plain :attr:`RarInfo.filename` is already unicode since 2.0.
* .read(-1) reads now until EOF. Previously it returned empty buffer.
Fixes:
* Make encrypted headers work with Python 3.x bytes() and with old 2.x 'sha' module.
* Simplify :class:`subprocess.Popen` usage when launching ``unrar``. Previously
it tried to optimize and work around OS/Python bugs, but this is not
maintainable.
* Use temp rar file hack on multi-volume archives too.
* Always .wait() on unrar, to avoid zombies
* Convert struct.error to BadRarFile
* Plug some fd leaks. Affected: Jython, PyPy.
* Broken archives are handled more robustly.
Version 2.2 (2010-08-19)
------------------------
Fixes:
* Relaxed volume naming. Now it just calculates new volume name by finding number
in old one and increasing it, without any expectations what that number should be.
* Files with 4G of compressed data in one colume were handled wrong. Fix.
* DOS timestamp seconds need to be multiplied with 2.
* Correct EXTTIME parsing.
Cleanups:
* Compressed size is per-volume, sum them together, so that user sees complete
compressed size for files split over several volumes.
* dumprar: Show unknown bits.
* Use :class:`struct.Struct` to cache unpack formats.
* Support missing :data:`os.devnull`. (Python 2.3)
Version 2.1 (2010-07-31)
------------------------
Features:
* Minimal implmentation for :meth:`RarFile.extract`, :meth:`RarFile.extractall`, :meth:`RarFile.testrar`.
They are simple shortcuts to ``unrar`` invocation.
* Accept :class:`RarInfo` object where filename is expected.
* Include ``dumprar.py`` in .tgz. It can be used to visualize RAR structure
and test module.
* Support for encrypted file headers.
Fixes:
* Don't read past ENDARC, there could be non-RAR data there.
* RAR 2.x: It does not write ENDARC, but our volume code expected it. Fix that.
* RAR 2.x: Support more than 200 old-style volumes.
Cleanups:
* Load comment only when requested.
* Cleanup of internal config variables. They should have now final names.
* :meth:`RarFile.open`: Add mode=r argument to match zipfile.
* Doc and comments cleanup, minimize duplication.
* Common wrappers for both compressed and uncompressed files,
now :meth:`RarFile.open` also does CRC-checking.
Version 2.0 (2010-04-29)
------------------------
Features:
* Python 3 support. Still works with 2.x.
* Parses extended time fields. (.mtime, .ctime, .atime)
* :meth:`RarFile.open` method. This makes possible to process large
entries that do not fit into memory.
* Supports password-protected archives.
* Supports archive comments.
Cleanups:
* Uses :mod:`subprocess` module to launch ``unrar``.
* .filename is always Unicode string, .unicode_filename is now deprecated.
* .CRC is unsigned again, as python3 crc32() is unsigned.
Version 1.1 (2008-08-31)
------------------------
Fixes:
* Replace :func:`os.tempnam` with :func:`tempfile.mkstemp`. (Jason Moiron)
* Fix infinite loop in _extract_hack on unexpected EOF
* :attr:`RarInfo.CRC` is now signed value to match crc32()
* :meth:`RarFile.read` now checks file crc
Cleanups:
* more docstrings
* throw proper exceptions (subclasses of :exc:`rarfile.Error`)
* RarInfo has fields pre-initialized, so they appear in help()
* rename RarInfo.data to RarInfo.header_data
* dont use "print" when header parsing fails
* use try/finally to delete temp rar
Version 1.0 (2005-08-08)
------------------------
* First release.

View file

@ -1,361 +0,0 @@
#! /usr/bin/env python
"""Dump archive contents, test extraction."""
import io
import sys
import rarfile as rf
from binascii import crc32, hexlify
from datetime import datetime
try:
bytearray
except NameError:
import array
def bytearray(v):
return array.array('B', v)
rf.UNICODE_COMMENTS = 1
rf.USE_DATETIME = 1
usage = """
dumprar [switches] [ARC1 ARC2 ...] [@ARCLIST]
switches:
@file read archive names from file
-pPSW set password
-Ccharset set fallback charset
-v increase verbosity
-t attempt to read all files
-x write read files out
-c show archive comment
-h show usage
-- stop switch parsing
""".strip()
os_list = ['DOS', 'OS2', 'WIN', 'UNIX', 'MACOS', 'BEOS']
block_strs = ['MARK', 'MAIN', 'FILE', 'OLD_COMMENT', 'OLD_EXTRA',
'OLD_SUB', 'OLD_RECOVERY', 'OLD_AUTH', 'SUB', 'ENDARC']
def rarType(type):
if type < rf.RAR_BLOCK_MARK or type > rf.RAR_BLOCK_ENDARC:
return "*UNKNOWN*"
return block_strs[type - rf.RAR_BLOCK_MARK]
main_bits = (
(rf.RAR_MAIN_VOLUME, "VOL"),
(rf.RAR_MAIN_COMMENT, "COMMENT"),
(rf.RAR_MAIN_LOCK, "LOCK"),
(rf.RAR_MAIN_SOLID, "SOLID"),
(rf.RAR_MAIN_NEWNUMBERING, "NEWNR"),
(rf.RAR_MAIN_AUTH, "AUTH"),
(rf.RAR_MAIN_RECOVERY, "RECOVERY"),
(rf.RAR_MAIN_PASSWORD, "PASSWORD"),
(rf.RAR_MAIN_FIRSTVOLUME, "FIRSTVOL"),
(rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
(rf.RAR_LONG_BLOCK, "LONG"),
)
endarc_bits = (
(rf.RAR_ENDARC_NEXT_VOLUME, "NEXTVOL"),
(rf.RAR_ENDARC_DATACRC, "DATACRC"),
(rf.RAR_ENDARC_REVSPACE, "REVSPACE"),
(rf.RAR_ENDARC_VOLNR, "VOLNR"),
(rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
(rf.RAR_LONG_BLOCK, "LONG"),
)
file_bits = (
(rf.RAR_FILE_SPLIT_BEFORE, "SPLIT_BEFORE"),
(rf.RAR_FILE_SPLIT_AFTER, "SPLIT_AFTER"),
(rf.RAR_FILE_PASSWORD, "PASSWORD"),
(rf.RAR_FILE_COMMENT, "COMMENT"),
(rf.RAR_FILE_SOLID, "SOLID"),
(rf.RAR_FILE_LARGE, "LARGE"),
(rf.RAR_FILE_UNICODE, "UNICODE"),
(rf.RAR_FILE_SALT, "SALT"),
(rf.RAR_FILE_VERSION, "VERSION"),
(rf.RAR_FILE_EXTTIME, "EXTTIME"),
(rf.RAR_FILE_EXTFLAGS, "EXTFLAGS"),
(rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
(rf.RAR_LONG_BLOCK, "LONG"),
)
generic_bits = (
(rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
(rf.RAR_LONG_BLOCK, "LONG"),
)
file_parms = ("D64", "D128", "D256", "D512",
"D1024", "D2048", "D4096", "DIR")
def xprint(m, *args):
if sys.hexversion < 0x3000000:
m = m.decode('utf8')
if args:
m = m % args
if sys.hexversion < 0x3000000:
m = m.encode('utf8')
sys.stdout.write(m)
sys.stdout.write('\n')
def render_flags(flags, bit_list):
res = []
known = 0
for bit in bit_list:
known = known | bit[0]
if flags & bit[0]:
res.append(bit[1])
unknown = flags & ~known
n = 0
while unknown:
if unknown & 1:
res.append("UNK_%04x" % (1 << n))
unknown = unknown >> 1
n += 1
return ",".join(res)
def get_file_flags(flags):
res = render_flags(flags & ~rf.RAR_FILE_DICTMASK, file_bits)
xf = (flags & rf.RAR_FILE_DICTMASK) >> 5
res += "," + file_parms[xf]
return res
def get_main_flags(flags):
return render_flags(flags, main_bits)
def get_endarc_flags(flags):
return render_flags(flags, endarc_bits)
def get_generic_flags(flags):
return render_flags(flags, generic_bits)
def fmt_time(t):
if isinstance(t, datetime):
return t.isoformat(' ')
return "%04d-%02d-%02d %02d:%02d:%02d" % t
def show_item(h):
st = rarType(h.type)
unknown = h.header_size - h.header_base
xprint("%s: hdrlen=%d datlen=%d hdr_unknown=%d", st, h.header_size,
h.add_size, unknown)
if unknown > 0 and cf_verbose > 1:
dat = h.header_data[h.header_base : ]
xprint(" unknown: %s", hexlify(dat))
if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB):
if h.host_os == rf.RAR_OS_UNIX:
s_mode = "0%o" % h.mode
else:
s_mode = "0x%x" % h.mode
xprint(" flags=0x%04x:%s", h.flags, get_file_flags(h.flags))
if h.host_os >= 0 and h.host_os < len(os_list):
s_os = os_list[h.host_os]
else:
s_os = "?"
xprint(" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d",
h.host_os, s_os,
h.extract_version, s_mode, h.compress_type,
h.compress_size, h.file_size, h.volume)
ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1)
xprint(" crc=0x%08x (%d) time=%s", ucrc, h.CRC, fmt_time(h.date_time))
xprint(" name=%s", h.filename)
if h.mtime:
xprint(" mtime=%s", fmt_time(h.mtime))
if h.ctime:
xprint(" ctime=%s", fmt_time(h.ctime))
if h.atime:
xprint(" atime=%s", fmt_time(h.atime))
if h.arctime:
xprint(" arctime=%s", fmt_time(h.arctime))
elif h.type == rf.RAR_BLOCK_MAIN:
xprint(" flags=0x%04x:%s", h.flags, get_main_flags(h.flags))
elif h.type == rf.RAR_BLOCK_ENDARC:
xprint(" flags=0x%04x:%s", h.flags, get_endarc_flags(h.flags))
elif h.type == rf.RAR_BLOCK_MARK:
xprint(" flags=0x%04x:", h.flags)
else:
xprint(" flags=0x%04x:%s", h.flags, get_generic_flags(h.flags))
if h.comment is not None:
cm = repr(h.comment)
if cm[0] == 'u':
cm = cm[1:]
xprint(" comment=%s", cm)
cf_show_comment = 0
cf_verbose = 0
cf_charset = None
cf_extract = 0
cf_test_read = 0
cf_test_unrar = 0
cf_test_memory = 0
def check_crc(f, inf):
ucrc = f.CRC
if ucrc < 0:
ucrc += (long(1) << 32)
if ucrc != inf.CRC:
print ('crc error')
def test_read_long(r, inf):
f = r.open(inf.filename)
total = 0
while 1:
data = f.read(8192)
if not data:
break
total += len(data)
if total != inf.file_size:
xprint("\n *** %s has corrupt file: %s ***", r.rarfile, inf.filename)
xprint(" *** short read: got=%d, need=%d ***\n", total, inf.file_size)
check_crc(f, inf)
# test .seek() & .readinto()
if cf_test_read > 1:
f.seek(0,0)
# hack: re-enable crc calc
f.crc_check = 1
f.CRC = 0
total = 0
buf = bytearray(rf.ZERO*4096)
while 1:
res = f.readinto(buf)
if not res:
break
total += res
if inf.file_size != total:
xprint(" *** readinto failed: got=%d, need=%d ***\n", total, inf.file_size)
check_crc(f, inf)
f.close()
def test_read(r, inf):
test_read_long(r, inf)
def test_real(fn, psw):
xprint("Archive: %s", fn)
cb = None
if cf_verbose > 1:
cb = show_item
rfarg = fn
if cf_test_memory:
rfarg = io.BytesIO(open(fn, 'rb').read())
# check if rar
if not rf.is_rarfile(rfarg):
xprint(" --- %s is not a RAR file ---", fn)
return
# open
r = rf.RarFile(rfarg, charset = cf_charset, info_callback = cb)
# set password
if r.needs_password():
if psw:
r.setpassword(psw)
else:
xprint(" --- %s requires password ---", fn)
return
# show comment
if cf_show_comment and r.comment:
for ln in r.comment.split('\n'):
xprint(" %s", ln)
elif cf_verbose == 1 and r.comment:
cm = repr(r.comment)
if cm[0] == 'u':
cm = cm[1:]
xprint(" comment=%s", cm)
# process
for n in r.namelist():
inf = r.getinfo(n)
if inf.isdir():
continue
if cf_verbose == 1:
show_item(inf)
if cf_test_read:
test_read(r, inf)
if cf_extract:
r.extractall()
for inf in r.infolist():
r.extract(inf)
if cf_test_unrar:
r.testrar()
def test(fn, psw):
try:
test_real(fn, psw)
except rf.NeedFirstVolume:
xprint(" --- %s is middle part of multi-vol archive ---", fn)
except rf.Error:
exc, msg, tb = sys.exc_info()
xprint("\n *** %s: %s ***\n", exc.__name__, msg)
del tb
except IOError:
exc, msg, tb = sys.exc_info()
xprint("\n *** %s: %s ***\n", exc.__name__, msg)
del tb
def main():
global cf_verbose, cf_show_comment, cf_charset
global cf_extract, cf_test_read, cf_test_unrar
global cf_test_memory
# parse args
args = []
psw = None
noswitch = False
for a in sys.argv[1:]:
if noswitch:
args.append(a)
elif a[0] == "@":
for ln in open(a[1:], 'r'):
fn = ln[:-1]
args.append(fn)
elif a[0] != '-':
args.append(a)
elif a[1] == 'p':
psw = a[2:]
elif a == '--':
noswitch = True
elif a == '-h':
xprint(usage)
return
elif a == '-v':
cf_verbose += 1
elif a == '-c':
cf_show_comment = 1
elif a == '-x':
cf_extract = 1
elif a == '-t':
cf_test_read += 1
elif a == '-T':
cf_test_unrar = 1
elif a == '-M':
cf_test_memory = 1
elif a[1] == 'C':
cf_charset = a[2:]
else:
raise Exception("unknown switch: "+a)
if not args:
xprint(usage)
for fn in args:
test(fn, psw)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass

View file

@ -1,33 +0,0 @@
#! /usr/bin/env python
from distutils.core import setup
import rarfile
ver = rarfile.__version__
ldesc = open("README.rst").read().strip()
sdesc = ldesc.split('\n')[0].split(' - ')[1].strip()
setup(
name = "rarfile",
version = ver,
description = sdesc,
long_description = ldesc,
author = "Marko Kreen",
license = "ISC",
author_email = "markokr@gmail.com",
url = "https://github.com/markokr/rarfile",
py_modules = ['rarfile'],
keywords = ['rar', 'unrar', 'archive'],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Archiving :: Compression",
]
)

View file

@ -1,9 +0,0 @@
test:
./test1.sh
./test2.sh
clean:
rm -rf __pycache__
rm -f files/*.rar.[pj]* *.pyc *.class *.diffs
rm -f rarfile.py

Binary file not shown.

View file

@ -1,7 +0,0 @@
Archive: files/ctime0.rar
FILE: hdrlen=46 datlen=0 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0
crc=0x00000000 (0) time=2011-05-10 21:28:47.899345
name=afile.txt
mtime=2011-05-10 21:28:47.899345

Binary file not shown.

View file

@ -1,8 +0,0 @@
Archive: files/ctime1.rar
FILE: hdrlen=50 datlen=0 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0
crc=0x00000000 (0) time=2011-05-10 21:28:47.899345
name=afile.txt
mtime=2011-05-10 21:28:47.899345
ctime=2011-05-10 21:28:47

Binary file not shown.

View file

@ -1,8 +0,0 @@
Archive: files/ctime2.rar
FILE: hdrlen=51 datlen=0 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0
crc=0x00000000 (0) time=2011-05-10 21:28:47.899345
name=afile.txt
mtime=2011-05-10 21:28:47.899345
ctime=2011-05-10 21:28:47.897843

Binary file not shown.

View file

@ -1,8 +0,0 @@
Archive: files/ctime3.rar
FILE: hdrlen=52 datlen=0 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0
crc=0x00000000 (0) time=2011-05-10 21:28:47.899345
name=afile.txt
mtime=2011-05-10 21:28:47.899345
ctime=2011-05-10 21:28:47.899327

Binary file not shown.

View file

@ -1,8 +0,0 @@
Archive: files/ctime4.rar
FILE: hdrlen=53 datlen=0 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=2:WIN ver=29 mode=0x20 meth=0 cmp=0 dec=0 vol=0
crc=0x00000000 (0) time=2011-05-10 21:28:47.899345
name=afile.txt
mtime=2011-05-10 21:28:47.899345
ctime=2011-05-10 21:28:47.899345

View file

@ -1,14 +0,0 @@
Archive: files/rar15-comment-lock.rar
comment='RARcomment -----'
FILE: hdrlen=72 datlen=7 hdr_unknown=31
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=15 mode=0x20 meth=3 cmp=7 dec=7 vol=0
crc=0xe27f07a9 (3799975849) time=2010-11-03 19:49:32
name=FILE1.TXT
comment='file1comment -----'
FILE: hdrlen=72 datlen=8 hdr_unknown=31
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=15 mode=0x20 meth=0 cmp=8 dec=8 vol=0
crc=0x3c4306f7 (1011025655) time=2010-11-03 19:49:38
name=FILE2.TXT
comment='file2comment -----'

View file

@ -1,14 +0,0 @@
Archive: files/rar15-comment.rar
comment='RARcomment -----'
FILE: hdrlen=72 datlen=7 hdr_unknown=31
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=15 mode=0x20 meth=3 cmp=7 dec=7 vol=0
crc=0xe27f07a9 (3799975849) time=2010-11-03 19:49:32
name=FILE1.TXT
comment='file1comment -----'
FILE: hdrlen=72 datlen=8 hdr_unknown=31
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=15 mode=0x20 meth=0 cmp=8 dec=8 vol=0
crc=0x3c4306f7 (1011025655) time=2010-11-03 19:49:38
name=FILE2.TXT
comment='file2comment -----'

View file

@ -1,14 +0,0 @@
Archive: files/rar202-comment-nopsw.rar
comment='RARcomment'
FILE: hdrlen=66 datlen=7 hdr_unknown=25
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=20 mode=0x20 meth=0 cmp=7 dec=7 vol=0
crc=0x7a197dba (2048490938) time=2010-11-03 00:27:28
name=FILE1.TXT
comment='file1comment'
FILE: hdrlen=66 datlen=7 hdr_unknown=25
flags=0x8008:COMMENT,LONG,D64
os=0:DOS ver=20 mode=0x20 meth=0 cmp=7 dec=7 vol=0
crc=0x785fc3e3 (2019541987) time=2010-11-03 00:27:34
name=FILE2.TXT
comment='file2comment'

View file

@ -1,14 +0,0 @@
Archive: files/rar202-comment-psw.rar
comment='RARcomment'
FILE: hdrlen=66 datlen=32 hdr_unknown=25
flags=0x800c:PASSWORD,COMMENT,LONG,D64
os=0:DOS ver=20 mode=0x20 meth=3 cmp=32 dec=7 vol=0
crc=0x7a197dba (2048490938) time=2010-11-03 00:27:28
name=FILE1.TXT
comment='file1comment'
FILE: hdrlen=66 datlen=32 hdr_unknown=25
flags=0x800c:PASSWORD,COMMENT,LONG,D64
os=0:DOS ver=20 mode=0x20 meth=3 cmp=32 dec=7 vol=0
crc=0x785fc3e3 (2019541987) time=2010-11-03 00:27:34
name=FILE2.TXT
comment='file2comment'

View file

@ -1,16 +0,0 @@
Archive: files/rar3-comment-hpsw.rar
comment='RARcomment\n'
FILE: hdrlen=51 datlen=16 hdr_unknown=0
flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file1.txt
mtime=2010-11-02 10:03:25
comment='Comment1v2\n'
FILE: hdrlen=51 datlen=16 hdr_unknown=0
flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file2.txt
mtime=2010-11-02 10:03:25
comment='Comment2v2\n'

View file

@ -1,16 +0,0 @@
Archive: files/rar3-comment-plain.rar
comment='RARcomment\n'
FILE: hdrlen=43 datlen=8 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=8 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file1.txt
mtime=2010-11-02 10:03:25
comment='Comment1v2\n'
FILE: hdrlen=43 datlen=8 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=8 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file2.txt
mtime=2010-11-02 10:03:25
comment='Comment2v2\n'

View file

@ -1,16 +0,0 @@
Archive: files/rar3-comment-psw.rar
comment='RARcomment\n'
FILE: hdrlen=51 datlen=16 hdr_unknown=0
flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file1.txt
mtime=2010-11-02 10:03:25
comment='Comment1v2\n'
FILE: hdrlen=51 datlen=16 hdr_unknown=0
flags=0x9424:PASSWORD,SALT,EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=3 cmp=16 dec=0 vol=0
crc=0x00000000 (0) time=2010-11-02 10:03:25
name=file2.txt
mtime=2010-11-02 10:03:25
comment='Comment2v2\n'

View file

@ -1,13 +0,0 @@
Archive: files/seektest.rar
FILE: hdrlen=44 datlen=90 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=3:UNIX ver=29 mode=0100644 meth=5 cmp=90 dec=2048 vol=0
crc=0xc5b7e6a2 (3317163682) time=2011-06-12 12:53:33
name=stest1.txt
mtime=2011-06-12 12:53:33
FILE: hdrlen=44 datlen=2048 hdr_unknown=0
flags=0x9020:EXTTIME,LONG,D128
os=3:UNIX ver=20 mode=0100644 meth=0 cmp=2048 dec=2048 vol=0
crc=0xc5b7e6a2 (3317163682) time=2011-06-12 12:53:33
name=stest2.txt
mtime=2011-06-12 12:53:33

Binary file not shown.

View file

@ -1,11 +0,0 @@
Archive: files/unicode.rar
FILE: hdrlen=54 datlen=17 hdr_unknown=0
flags=0x8080:LONG,D1024
os=3:UNIX ver=29 mode=0100644 meth=5 cmp=17 dec=2 vol=0
crc=0x6751fc53 (1733426259) time=2011-07-06 16:48:04
name=уииоотивл.txt
FILE: hdrlen=52 datlen=13 hdr_unknown=0
flags=0x8090:SOLID,LONG,D1024
os=3:UNIX ver=29 mode=0100644 meth=5 cmp=13 dec=2 vol=0
crc=0x6751fc53 (1733426259) time=2011-07-06 16:48:04
name=𝐀𝐁𝐁𝐂.txt

View file

@ -1,32 +0,0 @@
#! /bin/sh
PYTHONPATH=..:$PYTHONPATH
export PYTHONPATH
JAVA_OPTIONS="-Dpython.path=`pwd`/.."
export JAVA_OPTIONS
plist="python2.7 python3.2 python3.3 python3.4 python3.5 python3.6 pypy jython jython2.7"
rm -f test.diffs
for py in $plist; do
if which $py > /dev/null; then
for f in files/*.rar; do
printf "%s -> %-30s .. " $py $f
$py ../dumprar.py -t -t -v -ppassword $f > $f.$py
if diff -uw $f.exp $f.$py > /dev/null; then
echo "ok"
else
echo "FAIL"
echo "#### $py ####" >> test.diffs
diff -uw $f.exp $f.$py >> test.diffs
fi
done
echo ""
else
echo $py not available
echo ""
fi
done

View file

@ -1,19 +0,0 @@
#! /bin/sh
cp ../rarfile.py .
#ulimit -n 16
plist="python2.7 python3.2 python3.3 python3.4 python3.5 python3.6 pypy jython jython2.7"
for py in $plist; do
if which $py > /dev/null; then
echo "== $py =="
$py ./testseek.py
$py ./testio.py
$py ./testcorrupt.py --quick
fi
done
rm -f rarfile.py

View file

@ -1,85 +0,0 @@
#! /usr/bin/env python
import rarfile
import sys, os, time
import tempfile
def progress():
sys.stdout.write('.')
sys.stdout.flush()
def try_read(tmpfn):
#progress()
try:
rf = rarfile.RarFile(tmpfn)
if rf.needs_password():
rf.setpassword('password')
except rarfile.Error:
return
for fn in rf.namelist():
try:
data = rf.read(fn)
pass
except rarfile.Error:
pass
def test_rar(rarfn):
data = open(rarfn, "rb").read()
fd, tmpfn = tempfile.mkstemp('.rar')
os.close(fd)
print('testcorrupt 1')
for n in range(len(data)):
bad = data[:n]
f = open(tmpfn, 'wb')
f.write(bad)
f.close()
try_read(tmpfn)
print('testcorrupt 2')
crap = rarfile.RAR_ID
for n in range(1, len(data)):
for i in range(len(crap)):
c = crap[i:i+1]
bad = data[:n - 1] + c + data[n:]
f = open(tmpfn, 'wb')
f.write(bad)
f.close()
try_read(tmpfn)
os.unlink(tmpfn)
test_rar_list = [
"files/ctime0.rar",
"files/ctime1.rar",
"files/ctime2.rar",
"files/ctime3.rar",
"files/ctime4.rar",
"files/seektest.rar",
"files/rar15-comment-lock.rar",
"files/rar15-comment.rar",
"files/rar202-comment-nopsw.rar",
"files/rar202-comment-psw.rar",
"files/rar3-comment-hpsw.rar",
"files/rar3-comment-plain.rar",
"files/rar3-comment-psw.rar",
"files/unicode.rar",
]
def main():
if sys.argv[-1] == '--quick':
test_rar("files/rar3-comment-plain.rar")
return
for rar in test_rar_list:
print(rar)
test_rar(rar)
if __name__ == '__main__':
try:
main()
except OSError:
print('OSError: pid = %d' % os.getpid())
time.sleep(80000)

View file

@ -1,35 +0,0 @@
#! /usr/bin/env python
import rarfile, os, os.path, time, sys
try:
from io import BufferedReader, TextIOWrapper
except ImportError:
print('no io module')
sys.exit(0)
def BufferedReader(x): return x
def TextIOWrapper(x): return x
def test_readline(rf, fn):
f = rf.open(fn)
tr = TextIOWrapper(BufferedReader(f))
while 1:
ln = tr.readline()
if not ln:
break
tr.close()
def main():
files = ['stest1.txt', 'stest2.txt']
arc = 'files/seektest.rar'
rf = rarfile.RarFile(arc, crc_check=0)
for fn in files:
sys.stdout.write('test/readline: %s .. ' % fn)
sys.stdout.flush()
test_readline(rf, fn)
print('ok')
if __name__ == '__main__':
main()

View file

@ -1,103 +0,0 @@
#! /usr/bin/env python
import rarfile, os, os.path, time, sys
def show_fds():
fdir = "/proc/%d/fd" % os.getpid()
if os.path.isdir(fdir):
os.system('printf "fds = "; ls -l %s | wc -l' % fdir)
def do_seek(f, pos, lim):
ofs = pos*4
fsize = lim*4
if ofs < 0:
exp = 0
elif ofs > fsize:
exp = fsize
else:
exp = ofs
f.seek(ofs)
got = f.tell()
if got != exp:
raise Exception('seek failed (got=%d, exp=%d)' % (got, exp))
ln = f.read(4)
if got == fsize and ln:
raise Exception('unexpected read')
if not ln and got < fsize:
raise Exception('unexpected read failure')
if ln:
spos = int(ln)
if spos*4 != got:
raise Exception('unexpected pos: spos=%d pos=%d' % (spos, pos))
def test_seek(rf, fn):
inf = rf.getinfo(fn)
cnt = int(inf.file_size / 4)
f = rf.open(fn)
do_seek(f, int(cnt/2), cnt)
do_seek(f, 0, cnt)
for i in range(int(cnt/2)):
do_seek(f, i*2, cnt)
for i in range(cnt):
do_seek(f, i*2 - int(cnt / 2), cnt)
for i in range(cnt + 10):
do_seek(f, cnt - i - 5, cnt)
f.close()
print('OK')
def test_arc(arc, desc):
files = ['stest1.txt', 'stest2.txt']
rf = rarfile.RarFile(arc, crc_check=0)
for fn in files:
sys.stdout.write('%s | test/seek %s .. ' % (desc, fn))
sys.stdout.flush()
test_seek(rf, fn)
def main():
arc = 'files/seektest.rar'
data = open(arc, 'rb').read()
# filename
test_arc(arc, "fn")
# filelike: cStringIO
try:
import cStringIO
test_arc(cStringIO.StringIO(data), "cStringIO")
except ImportError:
pass
# filelike: io.BytesIO, io.open()
try:
import io
test_arc(io.BytesIO(data), "io.BytesIO")
test_arc(io.open(arc, 'rb'), "io.open")
except ImportError:
pass
# filelike: StringIO
try:
import StringIO
test_arc(StringIO.StringIO(data), "StringIO")
except ImportError:
pass
# filelike: file()
test_arc(open(arc, 'rb'), "file")
time.sleep(1)
show_fds()
if __name__ == '__main__':
main()

View file

@ -2,25 +2,25 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
import re, time, operator, warnings, os
import base64 import base64
import json import json
import operator
import os
import re
import time
import warnings
from six import PY3, integer_types, iteritems, string_types from transmissionrpc.constants import DEFAULT_PORT, DEFAULT_TIMEOUT
from six.moves.urllib_parse import urlparse from transmissionrpc.error import TransmissionError, HTTPHandlerError
from six.moves.urllib_request import urlopen from transmissionrpc.utils import LOGGER, get_arguments, make_rpc_name, argument_value_convert, rpc_bool
from transmissionrpc.httphandler import DefaultHTTPHandler
from transmissionrpc.torrent import Torrent
from transmissionrpc.session import Session
from .constants import DEFAULT_PORT, DEFAULT_TIMEOUT from six import PY3, integer_types, string_types, iteritems
from .error import HTTPHandlerError, TransmissionError
from .httphandler import DefaultHTTPHandler
from .session import Session
from .torrent import Torrent
from .utils import LOGGER, argument_value_convert, get_arguments, make_rpc_name, rpc_bool
if PY3:
from urllib.parse import urlparse
from urllib.request import urlopen
else:
from urlparse import urlparse
from urllib2 import urlopen
def debug_httperror(error): def debug_httperror(error):
""" """
@ -45,7 +45,6 @@ def debug_httperror(error):
) )
) )
def parse_torrent_id(arg): def parse_torrent_id(arg):
"""Parse an torrent id or torrent hashString.""" """Parse an torrent id or torrent hashString."""
torrent_id = None torrent_id = None
@ -59,7 +58,7 @@ def parse_torrent_id(arg):
elif isinstance(arg, string_types): elif isinstance(arg, string_types):
try: try:
torrent_id = int(arg) torrent_id = int(arg)
if torrent_id >= 2 ** 31: if torrent_id >= 2**31:
torrent_id = None torrent_id = None
except (ValueError, TypeError): except (ValueError, TypeError):
pass pass
@ -72,7 +71,6 @@ def parse_torrent_id(arg):
pass pass
return torrent_id return torrent_id
def parse_torrent_ids(args): def parse_torrent_ids(args):
""" """
Take things and make them valid torrent identifiers Take things and make them valid torrent identifiers
@ -100,20 +98,19 @@ def parse_torrent_ids(args):
except ValueError: except ValueError:
pass pass
if not addition: if not addition:
raise ValueError('Invalid torrent id, {item!r}'.format(item=item)) raise ValueError('Invalid torrent id, \"%s\"' % item)
ids.extend(addition) ids.extend(addition)
elif isinstance(args, (list, tuple)): elif isinstance(args, (list, tuple)):
for item in args: for item in args:
ids.extend(parse_torrent_ids(item)) ids.extend(parse_torrent_ids(item))
else: else:
torrent_id = parse_torrent_id(args) torrent_id = parse_torrent_id(args)
if torrent_id is None: if torrent_id == None:
raise ValueError('Invalid torrent id') raise ValueError('Invalid torrent id')
else: else:
ids = [torrent_id] ids = [torrent_id]
return ids return ids
""" """
Torrent ids Torrent ids
@ -128,27 +125,26 @@ possible to provide a argument called ``timeout``. Timeout is only effective
when using Python 2.6 or later and the default timeout is 30 seconds. when using Python 2.6 or later and the default timeout is 30 seconds.
""" """
class Client(object): class Client(object):
""" """
Client is the class handling the Transmission JSON-RPC client protocol. Client is the class handling the Transmission JSON-RPC client protocol.
""" """
def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None):
timeout=None):
if isinstance(timeout, (integer_types, float)): if isinstance(timeout, (integer_types, float)):
self._query_timeout = float(timeout) self._query_timeout = float(timeout)
else: else:
self._query_timeout = DEFAULT_TIMEOUT self._query_timeout = DEFAULT_TIMEOUT
urlo = urlparse(address) urlo = urlparse(address)
if not urlo.scheme: if urlo.scheme == '':
self.url = 'http://{host}:{port}/transmission/rpc/'.format(host=address, port=port) base_url = 'http://' + address + ':' + str(port)
self.url = base_url + '/transmission/rpc'
else: else:
if urlo.port: if urlo.port:
self.url = '{url.scheme}://{url.hostname}:{url.port}{url.path}'.format(url=urlo) self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path
else: else:
self.url = '{url.scheme}://{url.hostname}{url.path}'.format(url=urlo) self.url = urlo.scheme + '://' + urlo.hostname + urlo.path
LOGGER.info('Using custom URL {url!r}.'.format(url=self.url)) LOGGER.info('Using custom URL "' + self.url + '".')
if urlo.username and urlo.password: if urlo.username and urlo.password:
user = urlo.username user = urlo.username
password = urlo.password password = urlo.password
@ -204,8 +200,7 @@ class Client(object):
if timeout is None: if timeout is None:
timeout = self._query_timeout timeout = self._query_timeout
while True: while True:
LOGGER.debug( LOGGER.debug(json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
try: try:
result = self.http_handler.request(self.url, query, headers, timeout) result = self.http_handler.request(self.url, query, headers, timeout)
break break
@ -245,25 +240,26 @@ class Client(object):
elif require_ids: elif require_ids:
raise ValueError('request require ids') raise ValueError('request require ids')
query = json.dumps({'tag': self._sequence, 'method': method, 'arguments': arguments}) query = json.dumps({'tag': self._sequence, 'method': method
, 'arguments': arguments})
self._sequence += 1 self._sequence += 1
start = time.time() start = time.time()
http_data = self._http_query(query, timeout) http_data = self._http_query(query, timeout)
elapsed = time.time() - start elapsed = time.time() - start
LOGGER.info('http request took {time:.3f} s'.format(time=elapsed)) LOGGER.info('http request took %.3f s' % (elapsed))
try: try:
data = json.loads(http_data) data = json.loads(http_data)
except ValueError as error: except ValueError as error:
LOGGER.error('Error: {msg}'.format(msg=error)) LOGGER.error('Error: ' + str(error))
LOGGER.error('Request: {request!r}'.format(request=query)) LOGGER.error('Request: \"%s\"' % (query))
LOGGER.error('HTTP data: {data!r}'.format(data=http_data)) LOGGER.error('HTTP data: \"%s\"' % (http_data))
raise raise
LOGGER.debug(json.dumps(data, indent=2)) LOGGER.debug(json.dumps(data, indent=2))
if 'result' in data: if 'result' in data:
if data['result'] != 'success': if data['result'] != 'success':
raise TransmissionError('Query failed with result {result!r}.'.format(result=data['result'])) raise TransmissionError('Query failed with result \"%s\".' % (data['result']))
else: else:
raise TransmissionError('Query failed without result.') raise TransmissionError('Query failed without result.')
@ -347,9 +343,8 @@ class Client(object):
Add a warning to the log if the Transmission RPC version is lower then the provided version. Add a warning to the log if the Transmission RPC version is lower then the provided version.
""" """
if self.rpc_version < version: if self.rpc_version < version:
LOGGER.warning('Using feature not supported by server. ' LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.'
'RPC version for server {x}, feature introduced in {y}.'.format % (self.rpc_version, version))
(x=self.rpc_version, y=version))
def add_torrent(self, torrent, timeout=None, **kwargs): def add_torrent(self, torrent, timeout=None, **kwargs):
""" """
@ -409,8 +404,11 @@ class Client(object):
pass pass
if might_be_base64: if might_be_base64:
torrent_data = torrent torrent_data = torrent
args = {}
args = {'metainfo': torrent_data} if torrent_data else {'filename': torrent} if torrent_data:
args = {'metainfo': torrent_data}
else:
args = {'filename': torrent}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-add', argument, value, self.rpc_version) (arg, val) = argument_value_convert('torrent-add', argument, value, self.rpc_version)
@ -474,7 +472,7 @@ class Client(object):
""" """
self._rpc_version_warning(3) self._rpc_version_warning(3)
self._request('torrent-remove', self._request('torrent-remove',
{'delete-local-data': rpc_bool(delete_data)}, ids, True, timeout=timeout) {'delete-local-data':rpc_bool(delete_data)}, ids, True, timeout=timeout)
def remove(self, ids, delete_data=False, timeout=None): def remove(self, ids, delete_data=False, timeout=None):
""" """
@ -604,34 +602,34 @@ class Client(object):
the new methods. list returns a dictionary indexed by torrent id. the new methods. list returns a dictionary indexed by torrent id.
""" """
warnings.warn('list has been deprecated, please use get_torrent or get_torrents instead.', DeprecationWarning) warnings.warn('list has been deprecated, please use get_torrent or get_torrents instead.', DeprecationWarning)
fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone', fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone'
'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver', , 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver'
'downloadedEver', 'uploadRatio', 'queuePosition'] , 'downloadedEver', 'uploadRatio', 'queuePosition']
return self._request('torrent-get', {'fields': fields}, timeout=timeout) return self._request('torrent-get', {'fields': fields}, timeout=timeout)
def get_files(self, ids=None, timeout=None): def get_files(self, ids=None, timeout=None):
""" """
Get list of files for provided torrent id(s). If ids is empty, Get list of files for provided torrent id(s). If ids is empty,
information for all torrents are fetched. This function returns a dictionary information for all torrents are fetched. This function returns a dictionary
for each requested torrent id holding the information about the files. for each requested torrent id holding the information about the files.
:: ::
{ {
<torrent id>: { <torrent id>: {
<file id>: { <file id>: {
'name': <file name>, 'name': <file name>,
'size': <file size in bytes>, 'size': <file size in bytes>,
'completed': <bytes completed>, 'completed': <bytes completed>,
'priority': <priority ('high'|'normal'|'low')>, 'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)> 'selected': <selected for download (True|False)>
} }
... ...
} }
... ...
} }
""" """
fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted'] fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted']
request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout) request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout)
@ -643,22 +641,22 @@ class Client(object):
def set_files(self, items, timeout=None): def set_files(self, items, timeout=None):
""" """
Set file properties. Takes a dictionary with similar contents as the result Set file properties. Takes a dictionary with similar contents as the result
of `get_files`. of `get_files`.
:: ::
{ {
<torrent id>: { <torrent id>: {
<file id>: { <file id>: {
'priority': <priority ('high'|'normal'|'low')>, 'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)> 'selected': <selected for download (True|False)>
} }
... ...
} }
... ...
} }
""" """
if not isinstance(items, dict): if not isinstance(items, dict):
raise ValueError('Invalid file description') raise ValueError('Invalid file description')
@ -701,8 +699,8 @@ class Client(object):
def change_torrent(self, ids, timeout=None, **kwargs): def change_torrent(self, ids, timeout=None, **kwargs):
""" """
Change torrent parameters for the torrent(s) with the supplied id's. The Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are: parameters are:
============================ ===== =============== ======================================================================================= ============================ ===== =============== =======================================================================================
Argument RPC Replaced by Description Argument RPC Replaced by Description
@ -734,13 +732,13 @@ class Client(object):
``uploadLimited`` 5 - Enable upload speed limiter. ``uploadLimited`` 5 - Enable upload speed limiter.
============================ ===== =============== ======================================================================================= ============================ ===== =============== =======================================================================================
.. NOTE:: .. NOTE::
transmissionrpc will try to automatically fix argument errors. transmissionrpc will try to automatically fix argument errors.
""" """
args = {} args = {}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-set', argument, value, self.rpc_version) (arg, val) = argument_value_convert('torrent-set' , argument, value, self.rpc_version)
args[arg] = val args[arg] = val
if len(args) > 0: if len(args) > 0:
@ -801,7 +799,7 @@ class Client(object):
raise ValueError("Target name cannot contain a path delimiter") raise ValueError("Target name cannot contain a path delimiter")
args = {'path': location, 'name': name} args = {'path': location, 'name': name}
result = self._request('torrent-rename-path', args, torrent_id, True, timeout=timeout) result = self._request('torrent-rename-path', args, torrent_id, True, timeout=timeout)
return result['path'], result['name'] return (result['path'], result['name'])
def queue_top(self, ids, timeout=None): def queue_top(self, ids, timeout=None):
"""Move transfer to the top of the queue.""" """Move transfer to the top of the queue."""
@ -812,7 +810,7 @@ class Client(object):
"""Move transfer to the bottom of the queue.""" """Move transfer to the bottom of the queue."""
self._rpc_version_warning(14) self._rpc_version_warning(14)
self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout) self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout)
def queue_up(self, ids, timeout=None): def queue_up(self, ids, timeout=None):
"""Move transfer up in the queue.""" """Move transfer up in the queue."""
self._rpc_version_warning(14) self._rpc_version_warning(14)
@ -886,14 +884,14 @@ class Client(object):
================================ ===== ================= ========================================================================================================================== ================================ ===== ================= ==========================================================================================================================
.. NOTE:: .. NOTE::
transmissionrpc will try to automatically fix argument errors. transmissionrpc will try to automatically fix argument errors.
""" """
args = {} args = {}
for key, value in iteritems(kwargs): for key, value in iteritems(kwargs):
if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']: if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']:
raise ValueError('Invalid encryption value') raise ValueError('Invalid encryption value')
argument = make_rpc_name(key) argument = make_rpc_name(key)
(arg, val) = argument_value_convert('session-set', argument, value, self.rpc_version) (arg, val) = argument_value_convert('session-set' , argument, value, self.rpc_version)
args[arg] = val args[arg] = val
if len(args) > 0: if len(args) > 0:
self._request('session-set', args, timeout=timeout) self._request('session-set', args, timeout=timeout)

View file

@ -3,13 +3,11 @@
# Licensed under the MIT license. # Licensed under the MIT license.
import logging import logging
from six import iteritems from six import iteritems
LOGGER = logging.getLogger('transmissionrpc') LOGGER = logging.getLogger('transmissionrpc')
LOGGER.setLevel(logging.ERROR) LOGGER.setLevel(logging.ERROR)
def mirror_dict(source): def mirror_dict(source):
""" """
Creates a dictionary with all values as keys and all keys as values. Creates a dictionary with all values as keys and all keys as values.
@ -17,39 +15,38 @@ def mirror_dict(source):
source.update(dict((value, key) for key, value in iteritems(source))) source.update(dict((value, key) for key, value in iteritems(source)))
return source return source
DEFAULT_PORT = 9091 DEFAULT_PORT = 9091
DEFAULT_TIMEOUT = 30.0 DEFAULT_TIMEOUT = 30.0
TR_PRI_LOW = -1 TR_PRI_LOW = -1
TR_PRI_NORMAL = 0 TR_PRI_NORMAL = 0
TR_PRI_HIGH = 1 TR_PRI_HIGH = 1
PRIORITY = mirror_dict({ PRIORITY = mirror_dict({
'low': TR_PRI_LOW, 'low' : TR_PRI_LOW,
'normal': TR_PRI_NORMAL, 'normal' : TR_PRI_NORMAL,
'high': TR_PRI_HIGH 'high' : TR_PRI_HIGH
}) })
TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings
TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio
TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio
RATIO_LIMIT = mirror_dict({ RATIO_LIMIT = mirror_dict({
'global': TR_RATIOLIMIT_GLOBAL, 'global' : TR_RATIOLIMIT_GLOBAL,
'single': TR_RATIOLIMIT_SINGLE, 'single' : TR_RATIOLIMIT_SINGLE,
'unlimited': TR_RATIOLIMIT_UNLIMITED 'unlimited' : TR_RATIOLIMIT_UNLIMITED
}) })
TR_IDLELIMIT_GLOBAL = 0 # follow the global settings TR_IDLELIMIT_GLOBAL = 0 # follow the global settings
TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time
TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity
IDLE_LIMIT = mirror_dict({ IDLE_LIMIT = mirror_dict({
'global': TR_RATIOLIMIT_GLOBAL, 'global' : TR_RATIOLIMIT_GLOBAL,
'single': TR_RATIOLIMIT_SINGLE, 'single' : TR_RATIOLIMIT_SINGLE,
'unlimited': TR_RATIOLIMIT_UNLIMITED 'unlimited' : TR_RATIOLIMIT_UNLIMITED
}) })
# A note on argument maps # A note on argument maps
@ -63,266 +60,236 @@ IDLE_LIMIT = mirror_dict({
# Arguments for torrent methods # Arguments for torrent methods
TORRENT_ARGS = { TORRENT_ARGS = {
'get': { 'get' : {
'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'), 'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'),
'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'), 'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'),
'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'), 'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'),
'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'), 'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'),
'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'), 'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'),
'comment': ('string', 1, None, None, None, 'Torrent comment.'), 'comment': ('string', 1, None, None, None, 'Torrent comment.'),
'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'), 'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'),
'creator': ('string', 1, None, None, None, 'Torrent creator.'), 'creator': ('string', 1, None, None, None, 'Torrent creator.'),
'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'), 'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'),
'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'), 'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'),
'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'), 'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'),
'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'), 'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'),
'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'), 'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'),
'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'), 'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'),
'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'), 'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'),
'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'), 'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'),
'downloadLimitMode': ( 'downloadLimitMode': ('number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'), 'error': ('number', 1, None, None, None, 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'),
'error': ('number', 1, None, None, None, 'errorString': ('number', 1, None, None, None, 'Error message.'),
'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'), 'eta': ('number', 1, None, None, None, 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'),
'errorString': ('number', 1, None, None, None, 'Error message.'), 'etaIdle': ('number', 15, None, None, None, 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'),
'eta': ('number', 1, None, None, None, 'files': ('array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'),
'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'), 'fileStats': ('array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'),
'etaIdle': ('number', 15, None, None, None, 'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'),
'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'), 'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'),
'files': ( 'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'),
'array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'), 'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'),
'fileStats': ( 'id': ('number', 1, None, None, None, 'Session unique torrent id.'),
'array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'), 'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'),
'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'), 'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'),
'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'), 'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'),
'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'), 'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'),
'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'), 'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'),
'id': ('number', 1, None, None, None, 'Session unique torrent id.'), 'leechers': ('number', 1, 7, None, None, 'Number of leechers.'),
'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'), 'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'),
'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'), 'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'),
'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'), 'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'),
'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'), 'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'),
'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'), 'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'),
'leechers': ('number', 1, 7, None, None, 'Number of leechers.'), 'name': ('string', 1, None, None, None, 'Torrent name.'),
'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'), 'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'),
'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'), 'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'),
'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'), 'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'),
'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'), 'peers': ('array', 2, None, None, None, 'Array of peer objects.'),
'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'), 'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'),
'name': ('string', 1, None, None, None, 'Torrent name.'), 'peersFrom': ('object', 1, None, None, None, 'Object containing download peers counts for different peer types.'),
'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'), 'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'),
'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'), 'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'),
'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'), 'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'),
'peers': ('array', 2, None, None, None, 'Array of peer objects.'), 'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'),
'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'), 'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'),
'peersFrom': ( 'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'),
'object', 1, None, None, None, 'Object containing download peers counts for different peer types.'), 'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'),
'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'), 'priorities': ('array', 1, None, None, None, 'Array of file priorities.'),
'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'), 'queuePosition': ('number', 14, None, None, None, 'The queue position.'),
'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'), 'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'),
'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'), 'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'),
'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'), 'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'),
'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'), 'secondsDownloading': ('number', 15, None, None, None, ''),
'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'), 'secondsSeeding': ('number', 15, None, None, None, ''),
'priorities': ('array', 1, None, None, None, 'Array of file priorities.'), 'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'),
'queuePosition': ('number', 14, None, None, None, 'The queue position.'), 'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'),
'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'), 'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'),
'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'), 'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'),
'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'), 'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'secondsDownloading': ('number', 15, None, None, None, ''), 'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'),
'secondsSeeding': ('number', 15, None, None, None, ''), 'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'), 'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'),
'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'), 'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'),
'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'), 'status': ('number', 1, None, None, None, 'Current status, see source'),
'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'), 'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'),
'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), 'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'), 'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'),
'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'), 'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'),
'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'), 'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'),
'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'), 'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'),
'status': ('number', 1, None, None, None, 'Current status, see source'), 'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'),
'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'), 'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'),
'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'), 'uploadLimitMode': ('number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'), 'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'),
'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'), 'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'),
'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'), 'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'),
'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'), 'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'),
'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'), 'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'),
'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'),
'uploadLimitMode': (
'number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'),
'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'),
'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'),
'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'),
'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'),
}, },
'set': { 'set': {
'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'), 'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'),
'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'), 'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'),
'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'), 'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'honorsSessionLimits': ('boolean', 5, None, None, None, 'honorsSessionLimits': ('boolean', 5, None, None, None, "Enables or disables the transfer to honour the upload limit set in the session."),
"Enables or disables the transfer to honour the upload limit set in the session."), 'location': ('array', 1, None, None, None, 'Local download location.'),
'location': ('array', 1, None, None, None, 'Local download location.'), 'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'),
'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'), 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."), 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."), 'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'),
'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'), 'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), 'seedIdleMode': ('number', 10, None, None, None, 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'seedIdleMode': ('number', 10, None, None, None, 'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'),
'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), 'seedRatioMode': ('number', 5, None, None, None, 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'), 'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'),
'seedRatioMode': ('number', 5, None, None, None, 'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'),
'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'), 'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'),
'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'), 'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'),
'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'), 'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'),
'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'), 'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'),
'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'), 'trackerReplace': ('array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'),
'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'), 'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'),
'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'), 'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'),
'trackerReplace': (
'array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'),
'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'),
'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'),
}, },
'add': { 'add': {
'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'), 'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'),
'download-dir': ( 'download-dir': ('string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'),
'string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'), 'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'),
'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'), 'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."),
'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."), 'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."), 'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."), 'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'),
'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'), 'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'),
'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'), 'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'),
'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'), 'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."), 'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."), 'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
} }
} }
# Arguments for session methods # Arguments for session methods
SESSION_ARGS = { SESSION_ARGS = {
'get': { 'get': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ( "alt-speed-enabled": ('boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'),
'boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'), "alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-begin": ( "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'),
'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), "alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'), "alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'),
"alt-speed-time-end": ( "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'),
'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), "blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'), "blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'), "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'), "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'), "config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), "dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), "download-dir": ('string', 1, None, None, None, 'The download directory.'),
"config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'), "download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'),
"dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'), "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-dir": ('string', 1, None, None, None, 'The download directory.'), "download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'),
"download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'), "encryption": ('string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), "idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'), "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'),
"encryption": ( "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'),
'string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'), "lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'), "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"incomplete-dir": ( "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
'string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'), "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'), "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'), "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), "peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), "peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'), "port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'), "queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), "queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'), "rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'),
"peer-port-random-on-start": ( "rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'),
'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), "rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'), "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'),
"queue-stalled-minutes": ( "script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'), "seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'), "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'), "seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'),
"rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'), "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'), "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'),
"script-torrent-done-filename": ( "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), "start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'), "trash-original-torrent-files": ('boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), 'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'), 'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'), "version": ('string', 3, None, None, None, 'Transmission version.'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'),
'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'),
'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'),
"version": ('string', 3, None, None, None, 'Transmission version.'),
}, },
'set': { 'set': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'), "alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'), "alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'),
"alt-speed-time-begin": ( "alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'), "alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'), "alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-end": ( "alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'),
'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'), "alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'), "blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'), "blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'), "cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'), "dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'), "download-dir": ('string', 1, None, None, None, 'Set the session download directory.'),
"dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'), "download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-dir": ('string', 1, None, None, None, 'Set the session download directory.'), "download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'), "encryption": ('string', 1, None, None, None, 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'), "idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'),
"encryption": ('string', 1, None, None, None, "idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'),
'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'), "incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'), "incomplete-dir-enabled": ('boolean', 7, None, None, None, 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'), "lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'),
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'), "peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, "peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'), "peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'), "pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'), "pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'), "port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'), "peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'), "peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'), "port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'), "rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'), "queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"peer-port-random-on-start": ( "queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'),
'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'), "script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'), "script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'), "seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"queue-stalled-minutes": ( "seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'),
'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'), "seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'), "seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'), "speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"script-torrent-done-filename": ( "speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'),
'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'), "speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'), "speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'), "start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'), "trash-original-torrent-files": ('boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'), 'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'),
'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'),
}, },
} }

View file

@ -2,15 +2,13 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
from six import integer_types, string_types from six import string_types, integer_types
class TransmissionError(Exception): class TransmissionError(Exception):
""" """
This exception is raised when there has occurred an error related to This exception is raised when there has occurred an error related to
communication with Transmission. It is a subclass of Exception. communication with Transmission. It is a subclass of Exception.
""" """
def __init__(self, message='', original=None): def __init__(self, message='', original=None):
Exception.__init__(self) Exception.__init__(self)
self.message = message self.message = message
@ -19,17 +17,15 @@ class TransmissionError(Exception):
def __str__(self): def __str__(self):
if self.original: if self.original:
original_name = type(self.original).__name__ original_name = type(self.original).__name__
return '{0} Original exception: {1}, "{2}"'.format(self.message, original_name, str(self.original)) return '%s Original exception: %s, "%s"' % (self.message, original_name, str(self.original))
else: else:
return self.message return self.message
class HTTPHandlerError(Exception): class HTTPHandlerError(Exception):
""" """
This exception is raised when there has occurred an error related to This exception is raised when there has occurred an error related to
the HTTP handler. It is a subclass of Exception. the HTTP handler. It is a subclass of Exception.
""" """
def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None): def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None):
Exception.__init__(self) Exception.__init__(self)
self.url = '' self.url = ''
@ -49,10 +45,10 @@ class HTTPHandlerError(Exception):
self.data = httpdata self.data = httpdata
def __repr__(self): def __repr__(self):
return '<HTTPHandlerError {0:d}, {1}>'.format(self.code, self.message) return '<HTTPHandlerError %d, %s>' % (self.code, self.message)
def __str__(self): def __str__(self):
return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message) return 'HTTPHandlerError %d: %s' % (self.code, self.message)
def __unicode__(self): def __unicode__(self):
return 'HTTPHandlerError {0:d}: {1}'.format(self.code, self.message) return 'HTTPHandlerError %d: %s' % (self.code, self.message)

View file

@ -4,24 +4,25 @@
import sys import sys
from six.moves.http_client import BadStatusLine from transmissionrpc.error import HTTPHandlerError
from six.moves.urllib_error import HTTPError, URLError
from six.moves.urllib_request import (
HTTPBasicAuthHandler,
HTTPDigestAuthHandler,
HTTPPasswordMgrWithDefaultRealm,
Request,
build_opener,
)
from .error import HTTPHandlerError from six import PY3
if PY3:
from urllib.request import Request, build_opener, \
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler
from urllib.error import HTTPError, URLError
from http.client import BadStatusLine
else:
from urllib2 import Request, build_opener, \
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPDigestAuthHandler
from urllib2 import HTTPError, URLError
from httplib import BadStatusLine
class HTTPHandler(object): class HTTPHandler(object):
""" """
Prototype for HTTP handling. Prototype for HTTP handling.
""" """
def set_authentication(self, uri, login, password): def set_authentication(self, uri, login, password):
""" """
Transmission use basic authentication in earlier versions and digest Transmission use basic authentication in earlier versions and digest
@ -44,12 +45,10 @@ class HTTPHandler(object):
""" """
raise NotImplementedError("Bad HTTPHandler, failed to implement request.") raise NotImplementedError("Bad HTTPHandler, failed to implement request.")
class DefaultHTTPHandler(HTTPHandler): class DefaultHTTPHandler(HTTPHandler):
""" """
The default HTTP handler provided with transmissionrpc. The default HTTP handler provided with transmissionrpc.
""" """
def __init__(self): def __init__(self):
HTTPHandler.__init__(self) HTTPHandler.__init__(self)
self.http_opener = build_opener() self.http_opener = build_opener()
@ -77,7 +76,7 @@ class DefaultHTTPHandler(HTTPHandler):
if hasattr(error.reason, 'args') and isinstance(error.reason.args, tuple) and len(error.reason.args) == 2: if hasattr(error.reason, 'args') and isinstance(error.reason.args, tuple) and len(error.reason.args) == 2:
raise HTTPHandlerError(httpcode=error.reason.args[0], httpmsg=error.reason.args[1]) raise HTTPHandlerError(httpcode=error.reason.args[0], httpmsg=error.reason.args[1])
else: else:
raise HTTPHandlerError(httpmsg='urllib2.URLError: {error.reason}'.format(error=error)) raise HTTPHandlerError(httpmsg='urllib2.URLError: %s' % (error.reason))
except BadStatusLine as error: except BadStatusLine as error:
raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: {error.line}'.format(error=error)) raise HTTPHandlerError(httpmsg='httplib.BadStatusLine: %s' % (error.line))
return response.read().decode('utf-8') return response.read().decode('utf-8')

View file

@ -2,10 +2,9 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
from six import integer_types, iteritems from transmissionrpc.utils import Field
from .utils import Field
from six import iteritems, integer_types
class Session(object): class Session(object):
""" """
@ -27,12 +26,12 @@ class Session(object):
try: try:
return self._fields[name].value return self._fields[name].value
except KeyError: except KeyError:
raise AttributeError('No attribute {0}'.format(name)) raise AttributeError('No attribute %s' % name)
def __str__(self): def __str__(self):
text = '' text = ''
for key in sorted(self._fields.keys()): for key in sorted(self._fields.keys()):
text += "{0:32}: {1}\n".format(key[-32:], self._fields[key].value) text += "% 32s: %s\n" % (key[-32:], self._fields[key].value)
return text return text
def _update_fields(self, other): def _update_fields(self, other):

View file

@ -2,27 +2,25 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
import datetime import sys, datetime
import sys
from six import integer_types, iteritems, string_types, text_type from transmissionrpc.constants import PRIORITY, RATIO_LIMIT, IDLE_LIMIT
from transmissionrpc.utils import Field, format_timedelta
from .constants import IDLE_LIMIT, PRIORITY, RATIO_LIMIT from six import integer_types, string_types, text_type, iteritems
from .utils import Field, format_timedelta
def get_status_old(code): def get_status_old(code):
"""Get the torrent status using old status codes""" """Get the torrent status using old status codes"""
mapping = { mapping = {
(1 << 0): 'check pending', (1<<0): 'check pending',
(1 << 1): 'checking', (1<<1): 'checking',
(1 << 2): 'downloading', (1<<2): 'downloading',
(1 << 3): 'seeding', (1<<3): 'seeding',
(1 << 4): 'stopped', (1<<4): 'stopped',
} }
return mapping[code] return mapping[code]
def get_status_new(code): def get_status_new(code):
"""Get the torrent status using new status codes""" """Get the torrent status using new status codes"""
mapping = { mapping = {
@ -36,7 +34,6 @@ def get_status_new(code):
} }
return mapping[code] return mapping[code]
class Torrent(object): class Torrent(object):
""" """
Torrent is a class holding the data received from Transmission regarding a bittorrent transfer. Torrent is a class holding the data received from Transmission regarding a bittorrent transfer.
@ -74,14 +71,14 @@ class Torrent(object):
tid = self._fields['id'].value tid = self._fields['id'].value
name = self._get_name_string() name = self._get_name_string()
if isinstance(name, str): if isinstance(name, str):
return '<Torrent {0:d} \"{1}\">'.format(tid, name) return '<Torrent %d \"%s\">' % (tid, name)
else: else:
return '<Torrent {0:d}>'.format(tid) return '<Torrent %d>' % (tid)
def __str__(self): def __str__(self):
name = self._get_name_string() name = self._get_name_string()
if isinstance(name, str): if isinstance(name, str):
return 'Torrent \"{0}\"'.format(name) return 'Torrent \"%s\"' % (name)
else: else:
return 'Torrent' return 'Torrent'
@ -92,7 +89,7 @@ class Torrent(object):
try: try:
return self._fields[name].value return self._fields[name].value
except KeyError: except KeyError:
raise AttributeError('No attribute {0}'.format(name)) raise AttributeError('No attribute %s' % name)
def _rpc_version(self): def _rpc_version(self):
"""Get the Transmission RPC API version.""" """Get the Transmission RPC API version."""
@ -102,9 +99,8 @@ class Torrent(object):
def _dirty_fields(self): def _dirty_fields(self):
"""Enumerate changed fields""" """Enumerate changed fields"""
outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition', outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition'
'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', , 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', 'uploadLimited']
'uploadLimited']
fields = [] fields = []
for key in outgoing_keys: for key in outgoing_keys:
if key in self._fields and self._fields[key].dirty: if key in self._fields and self._fields[key].dirty:
@ -125,6 +121,7 @@ class Torrent(object):
""" """
Update the torrent data from a Transmission JSON-RPC arguments dictionary Update the torrent data from a Transmission JSON-RPC arguments dictionary
""" """
fields = None
if isinstance(other, dict): if isinstance(other, dict):
for key, value in iteritems(other): for key, value in iteritems(other):
self._fields[key.replace('-', '_')] = Field(value, False) self._fields[key.replace('-', '_')] = Field(value, False)
@ -134,7 +131,7 @@ class Torrent(object):
else: else:
raise ValueError('Cannot update with supplied data') raise ValueError('Cannot update with supplied data')
self._incoming_pending = False self._incoming_pending = False
def _status(self): def _status(self):
"""Get the torrent status""" """Get the torrent status"""
code = self._fields['status'].value code = self._fields['status'].value
@ -267,14 +264,13 @@ class Torrent(object):
self._fields['downloadLimited'] = Field(True, True) self._fields['downloadLimited'] = Field(True, True)
self._fields['downloadLimit'] = Field(limit, True) self._fields['downloadLimit'] = Field(limit, True)
self._push() self._push()
elif limit is None: elif limit == None:
self._fields['downloadLimited'] = Field(False, True) self._fields['downloadLimited'] = Field(False, True)
self._push() self._push()
else: else:
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
download_limit = property(_get_download_limit, _set_download_limit, None, download_limit = property(_get_download_limit, _set_download_limit, None, "Download limit in Kbps or None. This is a mutator.")
"Download limit in Kbps or None. This is a mutator.")
def _get_peer_limit(self): def _get_peer_limit(self):
""" """
@ -311,7 +307,7 @@ class Torrent(object):
self._push() self._push()
priority = property(_get_priority, _set_priority, None priority = property(_get_priority, _set_priority, None
, "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.") , "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.")
def _get_seed_idle_limit(self): def _get_seed_idle_limit(self):
""" """
@ -330,7 +326,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None
, "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.") , "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.")
def _get_seed_idle_mode(self): def _get_seed_idle_mode(self):
""" """
@ -349,7 +345,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None, seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None,
""" """
Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'. Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed idle limit. * global, use session seed idle limit.
@ -358,7 +354,7 @@ class Torrent(object):
This is a mutator. This is a mutator.
""" """
) )
def _get_seed_ratio_limit(self): def _get_seed_ratio_limit(self):
""" """
@ -377,7 +373,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None
, "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.") , "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.")
def _get_seed_ratio_mode(self): def _get_seed_ratio_mode(self):
""" """
@ -396,7 +392,7 @@ class Torrent(object):
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None, seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None,
""" """
Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'.
* global, use session seed ratio limit. * global, use session seed ratio limit.
@ -405,7 +401,7 @@ class Torrent(object):
This is a mutator. This is a mutator.
""" """
) )
def _get_upload_limit(self): def _get_upload_limit(self):
""" """
@ -426,14 +422,13 @@ class Torrent(object):
self._fields['uploadLimited'] = Field(True, True) self._fields['uploadLimited'] = Field(True, True)
self._fields['uploadLimit'] = Field(limit, True) self._fields['uploadLimit'] = Field(limit, True)
self._push() self._push()
elif limit is None: elif limit == None:
self._fields['uploadLimited'] = Field(False, True) self._fields['uploadLimited'] = Field(False, True)
self._push() self._push()
else: else:
raise ValueError("Not a valid limit") raise ValueError("Not a valid limit")
upload_limit = property(_get_upload_limit, _set_upload_limit, None, upload_limit = property(_get_upload_limit, _set_upload_limit, None, "Upload limit in Kbps or None. This is a mutator.")
"Upload limit in Kbps or None. This is a mutator.")
def _get_queue_position(self): def _get_queue_position(self):
"""Get the queue position for this torrent.""" """Get the queue position for this torrent."""

View file

@ -2,19 +2,15 @@
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com> # Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license. # Licensed under the MIT license.
import datetime import socket, datetime, logging
import logging
import socket
from collections import namedtuple from collections import namedtuple
import transmissionrpc.constants as constants
from transmissionrpc.constants import LOGGER
from six import iteritems, string_types from six import string_types, iteritems
from . import constants
from .constants import LOGGER
UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] UNITS = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
def format_size(size): def format_size(size):
""" """
Format byte size into IEC prefixes, B, KiB, MiB ... Format byte size into IEC prefixes, B, KiB, MiB ...
@ -24,16 +20,14 @@ def format_size(size):
while size >= 1024.0 and i < len(UNITS): while size >= 1024.0 and i < len(UNITS):
i += 1 i += 1
size /= 1024.0 size /= 1024.0
return size, UNITS[i] return (size, UNITS[i])
def format_speed(size): def format_speed(size):
""" """
Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ... Format bytes per second speed into IEC prefixes, B/s, KiB/s, MiB/s ...
""" """
(size, unit) = format_size(size) (size, unit) = format_size(size)
return size, '{unit}/s'.format(unit=unit) return (size, unit + '/s')
def format_timedelta(delta): def format_timedelta(delta):
""" """
@ -41,8 +35,7 @@ def format_timedelta(delta):
""" """
minutes, seconds = divmod(delta.seconds, 60) minutes, seconds = divmod(delta.seconds, 60)
hours, minutes = divmod(minutes, 60) hours, minutes = divmod(minutes, 60)
return '{0:d} {1:02d}:{2:02d}:{3:02d}'.format(delta.days, hours, minutes, seconds) return '%d %02d:%02d:%02d' % (delta.days, hours, minutes, seconds)
def format_timestamp(timestamp, utc=False): def format_timestamp(timestamp, utc=False):
""" """
@ -57,14 +50,12 @@ def format_timestamp(timestamp, utc=False):
else: else:
return '-' return '-'
class INetAddressError(Exception): class INetAddressError(Exception):
""" """
Error parsing / generating a internet address. Error parsing / generating a internet address.
""" """
pass pass
def inet_address(address, default_port, default_address='localhost'): def inet_address(address, default_port, default_address='localhost'):
""" """
Parse internet address. Parse internet address.
@ -81,19 +72,18 @@ def inet_address(address, default_port, default_address='localhost'):
try: try:
port = int(addr[1]) port = int(addr[1])
except ValueError: except ValueError:
raise INetAddressError('Invalid address "{0}".'.format(address)) raise INetAddressError('Invalid address "%s".' % address)
if len(addr[0]) == 0: if len(addr[0]) == 0:
addr = default_address addr = default_address
else: else:
addr = addr[0] addr = addr[0]
else: else:
raise INetAddressError('Invalid address "{0}".'.format(address)) raise INetAddressError('Invalid address "%s".' % address)
try: try:
socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM)
except socket.gaierror: except socket.gaierror:
raise INetAddressError('Cannot look up address "{0}".'.format(address)) raise INetAddressError('Cannot look up address "%s".' % address)
return addr, port return (addr, port)
def rpc_bool(arg): def rpc_bool(arg):
""" """
@ -106,31 +96,27 @@ def rpc_bool(arg):
arg = arg.lower() in ['true', 'yes'] arg = arg.lower() in ['true', 'yes']
return 1 if bool(arg) else 0 return 1 if bool(arg) else 0
TR_TYPE_MAP = { TR_TYPE_MAP = {
'number': int, 'number' : int,
'string': str, 'string' : str,
'double': float, 'double': float,
'boolean': rpc_bool, 'boolean' : rpc_bool,
'array': list, 'array': list,
'object': dict 'object': dict
} }
def make_python_name(name): def make_python_name(name):
""" """
Convert Transmission RPC name to python compatible name. Convert Transmission RPC name to python compatible name.
""" """
return name.replace('-', '_') return name.replace('-', '_')
def make_rpc_name(name): def make_rpc_name(name):
""" """
Convert python compatible name to Transmission RPC name. Convert python compatible name to Transmission RPC name.
""" """
return name.replace('_', '-') return name.replace('_', '-')
def argument_value_convert(method, argument, value, rpc_version): def argument_value_convert(method, argument, value, rpc_version):
""" """
Check and fix Transmission RPC issues with regards to methods, arguments and values. Check and fix Transmission RPC issues with regards to methods, arguments and values.
@ -140,7 +126,7 @@ def argument_value_convert(method, argument, value, rpc_version):
elif method in ('session-get', 'session-set'): elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]] args = constants.SESSION_ARGS[method[-3:]]
else: else:
return ValueError('Method "{0}" not supported'.format(method)) return ValueError('Method "%s" not supported' % (method))
if argument in args: if argument in args:
info = args[argument] info = args[argument]
invalid_version = True invalid_version = True
@ -156,18 +142,19 @@ def argument_value_convert(method, argument, value, rpc_version):
if invalid_version: if invalid_version:
if replacement: if replacement:
LOGGER.warning( LOGGER.warning(
'Replacing requested argument "{0}" with "{1}".'.format(argument, replacement)) 'Replacing requested argument "%s" with "%s".'
% (argument, replacement))
argument = replacement argument = replacement
info = args[argument] info = args[argument]
else: else:
raise ValueError( raise ValueError(
'Method "{0}" Argument "{1}" does not exist in version {2:d}.'.format(method, argument, rpc_version)) 'Method "%s" Argument "%s" does not exist in version %d.'
return argument, TR_TYPE_MAP[info[0]](value) % (method, argument, rpc_version))
return (argument, TR_TYPE_MAP[info[0]](value))
else: else:
raise ValueError('Argument "%s" does not exists for method "%s".', raise ValueError('Argument "%s" does not exists for method "%s".',
(argument, method)) (argument, method))
def get_arguments(method, rpc_version): def get_arguments(method, rpc_version):
""" """
Get arguments for method in specified Transmission RPC version. Get arguments for method in specified Transmission RPC version.
@ -177,7 +164,7 @@ def get_arguments(method, rpc_version):
elif method in ('session-get', 'session-set'): elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]] args = constants.SESSION_ARGS[method[-3:]]
else: else:
return ValueError('Method "{0}" not supported'.format(method)) return ValueError('Method "%s" not supported' % (method))
accessible = [] accessible = []
for argument, info in iteritems(args): for argument, info in iteritems(args):
valid_version = True valid_version = True
@ -189,7 +176,6 @@ def get_arguments(method, rpc_version):
accessible.append(argument) accessible.append(argument)
return accessible return accessible
def add_stdout_logger(level='debug'): def add_stdout_logger(level='debug'):
""" """
Add a stdout target for the transmissionrpc logging. Add a stdout target for the transmissionrpc logging.
@ -204,7 +190,6 @@ def add_stdout_logger(level='debug'):
loghandler.setLevel(loglevel) loghandler.setLevel(loglevel)
trpc_logger.addHandler(loghandler) trpc_logger.addHandler(loghandler)
def add_file_logger(filepath, level='debug'): def add_file_logger(filepath, level='debug'):
""" """
Add a stdout target for the transmissionrpc logging. Add a stdout target for the transmissionrpc logging.
@ -219,5 +204,4 @@ def add_file_logger(filepath, level='debug'):
loghandler.setLevel(loglevel) loghandler.setLevel(loglevel)
trpc_logger.addHandler(loghandler) trpc_logger.addHandler(loghandler)
Field = namedtuple('Field', ['value', 'dirty']) Field = namedtuple('Field', ['value', 'dirty'])

View file

@ -25,7 +25,7 @@ Typical usage:
Note: see the rox.Options module for a higher-level API for managing options. Note: see the rox.Options module for a higher-level API for managing options.
""" """
import os import os, stat
_home = os.path.expanduser('~') _home = os.path.expanduser('~')
xdg_data_home = os.environ.get('XDG_DATA_HOME') or \ xdg_data_home = os.environ.get('XDG_DATA_HOME') or \
@ -131,15 +131,30 @@ def get_runtime_dir(strict=True):
import getpass import getpass
fallback = '/tmp/pyxdg-runtime-dir-fallback-' + getpass.getuser() fallback = '/tmp/pyxdg-runtime-dir-fallback-' + getpass.getuser()
create = False
try: try:
os.mkdir(fallback, 0o700) # This must be a real directory, not a symlink, so attackers can't
# point it elsewhere. So we use lstat to check it.
st = os.lstat(fallback)
except OSError as e: except OSError as e:
import errno import errno
if e.errno == errno.EEXIST: if e.errno == errno.ENOENT:
# Already exists - set 700 permissions again. create = True
import stat else:
os.chmod(fallback, stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
else: # pragma: no cover
raise raise
else:
# The fallback must be a directory
if not stat.S_ISDIR(st.st_mode):
os.unlink(fallback)
create = True
# Must be owned by the user and not accessible by anyone else
elif (st.st_uid != os.getuid()) \
or (st.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
os.rmdir(fallback)
create = True
if create:
os.mkdir(fallback, 0o700)
return fallback return fallback

View file

@ -1,5 +1,5 @@
""" """
Complete implementation of the XDG Desktop Entry Specification Version 0.9.4 Complete implementation of the XDG Desktop Entry Specification
http://standards.freedesktop.org/desktop-entry-spec/ http://standards.freedesktop.org/desktop-entry-spec/
Not supported: Not supported:
@ -13,6 +13,7 @@ Not supported:
from xdg.IniFile import IniFile, is_ascii from xdg.IniFile import IniFile, is_ascii
import xdg.Locale import xdg.Locale
from xdg.Exceptions import ParsingError from xdg.Exceptions import ParsingError
from xdg.util import which
import os.path import os.path
import re import re
import warnings import warnings
@ -23,7 +24,7 @@ class DesktopEntry(IniFile):
defaultGroup = 'Desktop Entry' defaultGroup = 'Desktop Entry'
def __init__(self, filename=None): def __init__(self, filename=None):
"""Create a new DesktopEntry """Create a new DesktopEntry.
If filename exists, it will be parsed as a desktop entry file. If not, If filename exists, it will be parsed as a desktop entry file. If not,
or if filename is None, a blank DesktopEntry is created. or if filename is None, a blank DesktopEntry is created.
@ -38,8 +39,22 @@ class DesktopEntry(IniFile):
return self.getName() return self.getName()
def parse(self, file): def parse(self, file):
"""Parse a desktop entry file.""" """Parse a desktop entry file.
This can raise :class:`~xdg.Exceptions.ParsingError`,
:class:`~xdg.Exceptions.DuplicateGroupError` or
:class:`~xdg.Exceptions.DuplicateKeyError`.
"""
IniFile.parse(self, file, ["Desktop Entry", "KDE Desktop Entry"]) IniFile.parse(self, file, ["Desktop Entry", "KDE Desktop Entry"])
def findTryExec(self):
"""Looks in the PATH for the executable given in the TryExec field.
Returns the full path to the executable if it is found, None if not.
Raises :class:`~xdg.Exceptions.NoKeyError` if TryExec is not present.
"""
tryexec = self.get('TryExec', strict=True)
return which(tryexec)
# start standard keys # start standard keys
def getType(self): def getType(self):
@ -140,10 +155,11 @@ class DesktopEntry(IniFile):
# desktop entry edit stuff # desktop entry edit stuff
def new(self, filename): def new(self, filename):
"""Make this instance into a new desktop entry. """Make this instance into a new, blank desktop entry.
If filename has a .desktop extension, Type is set to Application. If it If filename has a .desktop extension, Type is set to Application. If it
has a .directory extension, Type is Directory. has a .directory extension, Type is Directory. Other extensions will
cause :class:`~xdg.Exceptions.ParsingError` to be raised.
""" """
if os.path.splitext(filename)[1] == ".desktop": if os.path.splitext(filename)[1] == ".desktop":
type = "Application" type = "Application"
@ -185,7 +201,7 @@ class DesktopEntry(IniFile):
def checkGroup(self, group): def checkGroup(self, group):
# check if group header is valid # check if group header is valid
if not (group == self.defaultGroup \ if not (group == self.defaultGroup \
or re.match("^Desktop Action [a-zA-Z0-9\-]+$", group) \ or re.match("^Desktop Action [a-zA-Z0-9-]+$", group) \
or (re.match("^X-", group) and is_ascii(group))): or (re.match("^X-", group) and is_ascii(group))):
self.errors.append("Invalid Group name: %s" % group) self.errors.append("Invalid Group name: %s" % group)
else: else:

View file

@ -5,6 +5,7 @@ Exception Classes for the xdg package
debug = False debug = False
class Error(Exception): class Error(Exception):
"""Base class for exceptions defined here."""
def __init__(self, msg): def __init__(self, msg):
self.msg = msg self.msg = msg
Exception.__init__(self, msg) Exception.__init__(self, msg)
@ -12,40 +13,72 @@ class Error(Exception):
return self.msg return self.msg
class ValidationError(Error): class ValidationError(Error):
"""Raised when a file fails to validate.
The filename is the .file attribute.
"""
def __init__(self, msg, file): def __init__(self, msg, file):
self.msg = msg self.msg = msg
self.file = file self.file = file
Error.__init__(self, "ValidationError in file '%s': %s " % (file, msg)) Error.__init__(self, "ValidationError in file '%s': %s " % (file, msg))
class ParsingError(Error): class ParsingError(Error):
"""Raised when a file cannot be parsed.
The filename is the .file attribute.
"""
def __init__(self, msg, file): def __init__(self, msg, file):
self.msg = msg self.msg = msg
self.file = file self.file = file
Error.__init__(self, "ParsingError in file '%s', %s" % (file, msg)) Error.__init__(self, "ParsingError in file '%s', %s" % (file, msg))
class NoKeyError(Error): class NoKeyError(Error):
"""Raised when trying to access a nonexistant key in an INI-style file.
Attributes are .key, .group and .file.
"""
def __init__(self, key, group, file): def __init__(self, key, group, file):
Error.__init__(self, "No key '%s' in group %s of file %s" % (key, group, file)) Error.__init__(self, "No key '%s' in group %s of file %s" % (key, group, file))
self.key = key self.key = key
self.group = group self.group = group
self.file = file
class DuplicateKeyError(Error): class DuplicateKeyError(Error):
"""Raised when the same key occurs twice in an INI-style file.
Attributes are .key, .group and .file.
"""
def __init__(self, key, group, file): def __init__(self, key, group, file):
Error.__init__(self, "Duplicate key '%s' in group %s of file %s" % (key, group, file)) Error.__init__(self, "Duplicate key '%s' in group %s of file %s" % (key, group, file))
self.key = key self.key = key
self.group = group self.group = group
self.file = file
class NoGroupError(Error): class NoGroupError(Error):
"""Raised when trying to access a nonexistant group in an INI-style file.
Attributes are .group and .file.
"""
def __init__(self, group, file): def __init__(self, group, file):
Error.__init__(self, "No group: %s in file %s" % (group, file)) Error.__init__(self, "No group: %s in file %s" % (group, file))
self.group = group self.group = group
self.file = file
class DuplicateGroupError(Error): class DuplicateGroupError(Error):
"""Raised when the same key occurs twice in an INI-style file.
Attributes are .group and .file.
"""
def __init__(self, group, file): def __init__(self, group, file):
Error.__init__(self, "Duplicate group: %s in file %s" % (group, file)) Error.__init__(self, "Duplicate group: %s in file %s" % (group, file))
self.group = group self.group = group
self.file = file
class NoThemeError(Error): class NoThemeError(Error):
"""Raised when trying to access a nonexistant icon theme.
The name of the theme is the .theme attribute.
"""
def __init__(self, theme): def __init__(self, theme):
Error.__init__(self, "No such icon-theme: %s" % theme) Error.__init__(self, "No such icon-theme: %s" % theme)
self.theme = theme self.theme = theme

View file

@ -1,5 +1,5 @@
""" """
Complete implementation of the XDG Icon Spec Version 0.8 Complete implementation of the XDG Icon Spec
http://standards.freedesktop.org/icon-theme-spec/ http://standards.freedesktop.org/icon-theme-spec/
""" """
@ -37,6 +37,8 @@ class IconTheme(IniFile):
return self.get('Inherits', list=True) return self.get('Inherits', list=True)
def getDirectories(self): def getDirectories(self):
return self.get('Directories', list=True) return self.get('Directories', list=True)
def getScaledDirectories(self):
return self.get('ScaledDirectories', list=True)
def getHidden(self): def getHidden(self):
return self.get('Hidden', type="boolean") return self.get('Hidden', type="boolean")
def getExample(self): def getExample(self):
@ -72,6 +74,10 @@ class IconTheme(IniFile):
else: else:
return 2 return 2
def getScale(self, directory):
value = self.get('Scale', type="integer", group=directory)
return value or 1
# validation stuff # validation stuff
def checkExtras(self): def checkExtras(self):
# header # header
@ -125,7 +131,7 @@ class IconTheme(IniFile):
self.name = self.content[group]["Size"] self.name = self.content[group]["Size"]
except KeyError: except KeyError:
self.errors.append("Key 'Size' in Group '%s' is missing" % group) self.errors.append("Key 'Size' in Group '%s' is missing" % group)
elif not (re.match("^\[X-", group) and is_ascii(group)): elif not (re.match(r"^\[X-", group) and is_ascii(group)):
self.errors.append("Invalid Group name: %s" % group) self.errors.append("Invalid Group name: %s" % group)
def checkKey(self, key, value, group): def checkKey(self, key, value, group):
@ -139,6 +145,8 @@ class IconTheme(IniFile):
self.checkValue(key, value, list=True) self.checkValue(key, value, list=True)
elif key == "Directories": elif key == "Directories":
self.checkValue(key, value, list=True) self.checkValue(key, value, list=True)
elif key == "ScaledDirectories":
self.checkValue(key, value, list=True)
elif key == "Hidden": elif key == "Hidden":
self.checkValue(key, value, type="boolean") self.checkValue(key, value, type="boolean")
elif key == "Example": elif key == "Example":
@ -168,6 +176,8 @@ class IconTheme(IniFile):
self.checkValue(key, value, type="integer") self.checkValue(key, value, type="integer")
if self.type != "Threshold": if self.type != "Threshold":
self.errors.append("Key 'Threshold' give, but Type is %s" % self.type) self.errors.append("Key 'Threshold' give, but Type is %s" % self.type)
elif key == "Scale":
self.checkValue(key, value, type="integer")
elif re.match("^X-[a-zA-Z0-9-]+", key): elif re.match("^X-[a-zA-Z0-9-]+", key):
pass pass
else: else:
@ -211,7 +221,7 @@ class IconData(IniFile):
def checkGroup(self, group): def checkGroup(self, group):
# check if group header is valid # check if group header is valid
if not (group == self.defaultGroup \ if not (group == self.defaultGroup \
or (re.match("^\[X-", group) and is_ascii(group))): or (re.match(r"^\[X-", group) and is_ascii(group))):
self.errors.append("Invalid Group name: %s" % group.encode("ascii", "replace")) self.errors.append("Invalid Group name: %s" % group.encode("ascii", "replace"))
def checkKey(self, key, value, group): def checkKey(self, key, value, group):

View file

@ -102,7 +102,7 @@ class IniFile:
raise ParsingError("[%s]-Header missing" % headers[0], filename) raise ParsingError("[%s]-Header missing" % headers[0], filename)
# start stuff to access the keys # start stuff to access the keys
def get(self, key, group=None, locale=False, type="string", list=False): def get(self, key, group=None, locale=False, type="string", list=False, strict=False):
# set default group # set default group
if not group: if not group:
group = self.defaultGroup group = self.defaultGroup
@ -114,7 +114,7 @@ class IniFile:
else: else:
value = self.content[group][key] value = self.content[group][key]
else: else:
if debug: if strict or debug:
if group not in self.content: if group not in self.content:
raise NoGroupError(group, self.filename) raise NoGroupError(group, self.filename)
elif key not in self.content[group]: elif key not in self.content[group]:
@ -192,8 +192,8 @@ class IniFile:
# start validation stuff # start validation stuff
def validate(self, report="All"): def validate(self, report="All"):
"""Validate the contents, raising ``ValidationError`` if there """Validate the contents, raising :class:`~xdg.Exceptions.ValidationError`
is anything amiss. if there is anything amiss.
report can be 'All' / 'Warnings' / 'Errors' report can be 'All' / 'Warnings' / 'Errors'
""" """

View file

@ -9,7 +9,7 @@ http://cvs.sourceforge.net/viewcvs.py/rox/ROX-Lib2/python/rox/i18n.py?rev=1.3&vi
import os import os
from locale import normalize from locale import normalize
regex = "(\[([a-zA-Z]+)(_[a-zA-Z]+)?(\.[a-zA-Z\-0-9]+)?(@[a-zA-Z]+)?\])?" regex = r"(\[([a-zA-Z]+)(_[a-zA-Z]+)?(\.[a-zA-Z0-9-]+)?(@[a-zA-Z]+)?\])?"
def _expand_lang(locale): def _expand_lang(locale):
locale = normalize(locale) locale = normalize(locale)

File diff suppressed because it is too large Load diff

View file

@ -1,14 +1,14 @@
""" CLass to edit XDG Menus """ """ CLass to edit XDG Menus """
from xdg.Menu import *
from xdg.BaseDirectory import *
from xdg.Exceptions import *
from xdg.DesktopEntry import *
from xdg.Config import *
import xml.dom.minidom
import os import os
import re try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from xdg.Menu import Menu, MenuEntry, Layout, Separator, XMLMenuBuilder
from xdg.BaseDirectory import xdg_config_dirs, xdg_data_dirs
from xdg.Exceptions import ParsingError
from xdg.Config import setRootMode
# XML-Cleanups: Move / Exclude # XML-Cleanups: Move / Exclude
# FIXME: proper reverte/delete # FIXME: proper reverte/delete
@ -20,28 +20,31 @@ import re
# FIXME: Advanced MenuEditing Stuff: LegacyDir/MergeFile # FIXME: Advanced MenuEditing Stuff: LegacyDir/MergeFile
# Complex Rules/Deleted/OnlyAllocated/AppDirs/DirectoryDirs # Complex Rules/Deleted/OnlyAllocated/AppDirs/DirectoryDirs
class MenuEditor:
class MenuEditor(object):
def __init__(self, menu=None, filename=None, root=False): def __init__(self, menu=None, filename=None, root=False):
self.menu = None self.menu = None
self.filename = None self.filename = None
self.doc = None self.tree = None
self.parser = XMLMenuBuilder()
self.parse(menu, filename, root) self.parse(menu, filename, root)
# fix for creating two menus with the same name on the fly # fix for creating two menus with the same name on the fly
self.filenames = [] self.filenames = []
def parse(self, menu=None, filename=None, root=False): def parse(self, menu=None, filename=None, root=False):
if root == True: if root:
setRootMode(True) setRootMode(True)
if isinstance(menu, Menu): if isinstance(menu, Menu):
self.menu = menu self.menu = menu
elif menu: elif menu:
self.menu = parse(menu) self.menu = self.parser.parse(menu)
else: else:
self.menu = parse() self.menu = self.parser.parse()
if root == True: if root:
self.filename = self.menu.Filename self.filename = self.menu.Filename
elif filename: elif filename:
self.filename = filename self.filename = filename
@ -49,13 +52,21 @@ class MenuEditor:
self.filename = os.path.join(xdg_config_dirs[0], "menus", os.path.split(self.menu.Filename)[1]) self.filename = os.path.join(xdg_config_dirs[0], "menus", os.path.split(self.menu.Filename)[1])
try: try:
self.doc = xml.dom.minidom.parse(self.filename) self.tree = etree.parse(self.filename)
except IOError: except IOError:
self.doc = xml.dom.minidom.parseString('<!DOCTYPE Menu PUBLIC "-//freedesktop//DTD Menu 1.0//EN" "http://standards.freedesktop.org/menu-spec/menu-1.0.dtd"><Menu><Name>Applications</Name><MergeFile type="parent">'+self.menu.Filename+'</MergeFile></Menu>') root = etree.fromtring("""
except xml.parsers.expat.ExpatError: <!DOCTYPE Menu PUBLIC "-//freedesktop//DTD Menu 1.0//EN" "http://standards.freedesktop.org/menu-spec/menu-1.0.dtd">
<Menu>
<Name>Applications</Name>
<MergeFile type="parent">%s</MergeFile>
</Menu>
""" % self.menu.Filename)
self.tree = etree.ElementTree(root)
except ParsingError:
raise ParsingError('Not a valid .menu file', self.filename) raise ParsingError('Not a valid .menu file', self.filename)
self.__remove_whilespace_nodes(self.doc) #FIXME: is this needed with etree ?
self.__remove_whitespace_nodes(self.tree)
def save(self): def save(self):
self.__saveEntries(self.menu) self.__saveEntries(self.menu)
@ -67,7 +78,7 @@ class MenuEditor:
self.__addEntry(parent, menuentry, after, before) self.__addEntry(parent, menuentry, after, before)
sort(self.menu) self.menu.sort()
return menuentry return menuentry
@ -83,7 +94,7 @@ class MenuEditor:
self.__addEntry(parent, menu, after, before) self.__addEntry(parent, menu, after, before)
sort(self.menu) self.menu.sort()
return menu return menu
@ -92,7 +103,7 @@ class MenuEditor:
self.__addEntry(parent, separator, after, before) self.__addEntry(parent, separator, after, before)
sort(self.menu) self.menu.sort()
return separator return separator
@ -100,7 +111,7 @@ class MenuEditor:
self.__deleteEntry(oldparent, menuentry, after, before) self.__deleteEntry(oldparent, menuentry, after, before)
self.__addEntry(newparent, menuentry, after, before) self.__addEntry(newparent, menuentry, after, before)
sort(self.menu) self.menu.sort()
return menuentry return menuentry
@ -112,7 +123,7 @@ class MenuEditor:
if oldparent.getPath(True) != newparent.getPath(True): if oldparent.getPath(True) != newparent.getPath(True):
self.__addXmlMove(root_menu, os.path.join(oldparent.getPath(True), menu.Name), os.path.join(newparent.getPath(True), menu.Name)) self.__addXmlMove(root_menu, os.path.join(oldparent.getPath(True), menu.Name), os.path.join(newparent.getPath(True), menu.Name))
sort(self.menu) self.menu.sort()
return menu return menu
@ -120,14 +131,14 @@ class MenuEditor:
self.__deleteEntry(parent, separator, after, before) self.__deleteEntry(parent, separator, after, before)
self.__addEntry(parent, separator, after, before) self.__addEntry(parent, separator, after, before)
sort(self.menu) self.menu.sort()
return separator return separator
def copyMenuEntry(self, menuentry, oldparent, newparent, after=None, before=None): def copyMenuEntry(self, menuentry, oldparent, newparent, after=None, before=None):
self.__addEntry(newparent, menuentry, after, before) self.__addEntry(newparent, menuentry, after, before)
sort(self.menu) self.menu.sort()
return menuentry return menuentry
@ -137,39 +148,39 @@ class MenuEditor:
if name: if name:
if not deskentry.hasKey("Name"): if not deskentry.hasKey("Name"):
deskentry.set("Name", name) deskentry.set("Name", name)
deskentry.set("Name", name, locale = True) deskentry.set("Name", name, locale=True)
if comment: if comment:
if not deskentry.hasKey("Comment"): if not deskentry.hasKey("Comment"):
deskentry.set("Comment", comment) deskentry.set("Comment", comment)
deskentry.set("Comment", comment, locale = True) deskentry.set("Comment", comment, locale=True)
if genericname: if genericname:
if not deskentry.hasKey("GnericNe"): if not deskentry.hasKey("GenericName"):
deskentry.set("GenericName", genericname) deskentry.set("GenericName", genericname)
deskentry.set("GenericName", genericname, locale = True) deskentry.set("GenericName", genericname, locale=True)
if command: if command:
deskentry.set("Exec", command) deskentry.set("Exec", command)
if icon: if icon:
deskentry.set("Icon", icon) deskentry.set("Icon", icon)
if terminal == True: if terminal:
deskentry.set("Terminal", "true") deskentry.set("Terminal", "true")
elif terminal == False: elif not terminal:
deskentry.set("Terminal", "false") deskentry.set("Terminal", "false")
if nodisplay == True: if nodisplay is True:
deskentry.set("NoDisplay", "true") deskentry.set("NoDisplay", "true")
elif nodisplay == False: elif nodisplay is False:
deskentry.set("NoDisplay", "false") deskentry.set("NoDisplay", "false")
if hidden == True: if hidden is True:
deskentry.set("Hidden", "true") deskentry.set("Hidden", "true")
elif hidden == False: elif hidden is False:
deskentry.set("Hidden", "false") deskentry.set("Hidden", "false")
menuentry.updateAttributes() menuentry.updateAttributes()
if len(menuentry.Parents) > 0: if len(menuentry.Parents) > 0:
sort(self.menu) self.menu.sort()
return menuentry return menuentry
@ -195,56 +206,58 @@ class MenuEditor:
if name: if name:
if not deskentry.hasKey("Name"): if not deskentry.hasKey("Name"):
deskentry.set("Name", name) deskentry.set("Name", name)
deskentry.set("Name", name, locale = True) deskentry.set("Name", name, locale=True)
if genericname: if genericname:
if not deskentry.hasKey("GenericName"): if not deskentry.hasKey("GenericName"):
deskentry.set("GenericName", genericname) deskentry.set("GenericName", genericname)
deskentry.set("GenericName", genericname, locale = True) deskentry.set("GenericName", genericname, locale=True)
if comment: if comment:
if not deskentry.hasKey("Comment"): if not deskentry.hasKey("Comment"):
deskentry.set("Comment", comment) deskentry.set("Comment", comment)
deskentry.set("Comment", comment, locale = True) deskentry.set("Comment", comment, locale=True)
if icon: if icon:
deskentry.set("Icon", icon) deskentry.set("Icon", icon)
if nodisplay == True: if nodisplay is True:
deskentry.set("NoDisplay", "true") deskentry.set("NoDisplay", "true")
elif nodisplay == False: elif nodisplay is False:
deskentry.set("NoDisplay", "false") deskentry.set("NoDisplay", "false")
if hidden == True: if hidden is True:
deskentry.set("Hidden", "true") deskentry.set("Hidden", "true")
elif hidden == False: elif hidden is False:
deskentry.set("Hidden", "false") deskentry.set("Hidden", "false")
menu.Directory.updateAttributes() menu.Directory.updateAttributes()
if isinstance(menu.Parent, Menu): if isinstance(menu.Parent, Menu):
sort(self.menu) self.menu.sort()
return menu return menu
def hideMenuEntry(self, menuentry): def hideMenuEntry(self, menuentry):
self.editMenuEntry(menuentry, nodisplay = True) self.editMenuEntry(menuentry, nodisplay=True)
def unhideMenuEntry(self, menuentry): def unhideMenuEntry(self, menuentry):
self.editMenuEntry(menuentry, nodisplay = False, hidden = False) self.editMenuEntry(menuentry, nodisplay=False, hidden=False)
def hideMenu(self, menu): def hideMenu(self, menu):
self.editMenu(menu, nodisplay = True) self.editMenu(menu, nodisplay=True)
def unhideMenu(self, menu): def unhideMenu(self, menu):
self.editMenu(menu, nodisplay = False, hidden = False) self.editMenu(menu, nodisplay=False, hidden=False)
xml_menu = self.__getXmlMenu(menu.getPath(True,True), False) xml_menu = self.__getXmlMenu(menu.getPath(True, True), False)
for node in self.__getXmlNodesByName(["Deleted", "NotDeleted"], xml_menu): deleted = xml_menu.findall('Deleted')
node.parentNode.removeChild(node) not_deleted = xml_menu.findall('NotDeleted')
for node in deleted + not_deleted:
xml_menu.remove(node)
def deleteMenuEntry(self, menuentry): def deleteMenuEntry(self, menuentry):
if self.getAction(menuentry) == "delete": if self.getAction(menuentry) == "delete":
self.__deleteFile(menuentry.DesktopEntry.filename) self.__deleteFile(menuentry.DesktopEntry.filename)
for parent in menuentry.Parents: for parent in menuentry.Parents:
self.__deleteEntry(parent, menuentry) self.__deleteEntry(parent, menuentry)
sort(self.menu) self.menu.sort()
return menuentry return menuentry
def revertMenuEntry(self, menuentry): def revertMenuEntry(self, menuentry):
@ -257,7 +270,7 @@ class MenuEditor:
index = parent.MenuEntries.index(menuentry) index = parent.MenuEntries.index(menuentry)
parent.MenuEntries[index] = menuentry.Original parent.MenuEntries[index] = menuentry.Original
menuentry.Original.Parents.append(parent) menuentry.Original.Parents.append(parent)
sort(self.menu) self.menu.sort()
return menuentry return menuentry
def deleteMenu(self, menu): def deleteMenu(self, menu):
@ -265,21 +278,22 @@ class MenuEditor:
self.__deleteFile(menu.Directory.DesktopEntry.filename) self.__deleteFile(menu.Directory.DesktopEntry.filename)
self.__deleteEntry(menu.Parent, menu) self.__deleteEntry(menu.Parent, menu)
xml_menu = self.__getXmlMenu(menu.getPath(True, True)) xml_menu = self.__getXmlMenu(menu.getPath(True, True))
xml_menu.parentNode.removeChild(xml_menu) parent = self.__get_parent_node(xml_menu)
sort(self.menu) parent.remove(xml_menu)
self.menu.sort()
return menu return menu
def revertMenu(self, menu): def revertMenu(self, menu):
if self.getAction(menu) == "revert": if self.getAction(menu) == "revert":
self.__deleteFile(menu.Directory.DesktopEntry.filename) self.__deleteFile(menu.Directory.DesktopEntry.filename)
menu.Directory = menu.Directory.Original menu.Directory = menu.Directory.Original
sort(self.menu) self.menu.sort()
return menu return menu
def deleteSeparator(self, separator): def deleteSeparator(self, separator):
self.__deleteEntry(separator.Parent, separator, after=True) self.__deleteEntry(separator.Parent, separator, after=True)
sort(self.menu) self.menu.sort()
return separator return separator
@ -290,8 +304,9 @@ class MenuEditor:
return "none" return "none"
elif entry.Directory.getType() == "Both": elif entry.Directory.getType() == "Both":
return "revert" return "revert"
elif entry.Directory.getType() == "User" \ elif entry.Directory.getType() == "User" and (
and (len(entry.Submenus) + len(entry.MenuEntries)) == 0: len(entry.Submenus) + len(entry.MenuEntries)
) == 0:
return "delete" return "delete"
elif isinstance(entry, MenuEntry): elif isinstance(entry, MenuEntry):
@ -318,9 +333,7 @@ class MenuEditor:
def __saveMenu(self): def __saveMenu(self):
if not os.path.isdir(os.path.dirname(self.filename)): if not os.path.isdir(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename)) os.makedirs(os.path.dirname(self.filename))
fd = open(self.filename, 'w') self.tree.write(self.filename, encoding='utf-8')
fd.write(re.sub("\n[\s]*([^\n<]*)\n[\s]*</", "\\1</", self.doc.toprettyxml().replace('<?xml version="1.0" ?>\n', '')))
fd.close()
def __getFileName(self, name, extension): def __getFileName(self, name, extension):
postfix = 0 postfix = 0
@ -333,8 +346,9 @@ class MenuEditor:
dir = "applications" dir = "applications"
elif extension == ".directory": elif extension == ".directory":
dir = "desktop-directories" dir = "desktop-directories"
if not filename in self.filenames and not \ if not filename in self.filenames and not os.path.isfile(
os.path.isfile(os.path.join(xdg_data_dirs[0], dir, filename)): os.path.join(xdg_data_dirs[0], dir, filename)
):
self.filenames.append(filename) self.filenames.append(filename)
break break
else: else:
@ -343,8 +357,11 @@ class MenuEditor:
return filename return filename
def __getXmlMenu(self, path, create=True, element=None): def __getXmlMenu(self, path, create=True, element=None):
# FIXME: we should also return the menu's parent,
# to avoid looking for it later on
# @see Element.getiterator()
if not element: if not element:
element = self.doc element = self.tree
if "/" in path: if "/" in path:
(name, path) = path.split("/", 1) (name, path) = path.split("/", 1)
@ -353,17 +370,16 @@ class MenuEditor:
path = "" path = ""
found = None found = None
for node in self.__getXmlNodesByName("Menu", element): for node in element.findall("Menu"):
for child in self.__getXmlNodesByName("Name", node): name_node = node.find('Name')
if child.childNodes[0].nodeValue == name: if name_node.text == name:
if path: if path:
found = self.__getXmlMenu(path, create, node) found = self.__getXmlMenu(path, create, node)
else: else:
found = node found = node
break
if found: if found:
break break
if not found and create == True: if not found and create:
node = self.__addXmlMenuElement(element, name) node = self.__addXmlMenuElement(element, name)
if path: if path:
found = self.__getXmlMenu(path, create, node) found = self.__getXmlMenu(path, create, node)
@ -373,58 +389,62 @@ class MenuEditor:
return found return found
def __addXmlMenuElement(self, element, name): def __addXmlMenuElement(self, element, name):
node = self.doc.createElement('Menu') menu_node = etree.SubElement('Menu', element)
self.__addXmlTextElement(node, 'Name', name) name_node = etree.SubElement('Name', menu_node)
return element.appendChild(node) name_node.text = name
return menu_node
def __addXmlTextElement(self, element, name, text): def __addXmlTextElement(self, element, name, text):
node = self.doc.createElement(name) node = etree.SubElement(name, element)
text = self.doc.createTextNode(text) node.text = text
node.appendChild(text) return node
return element.appendChild(node)
def __addXmlFilename(self, element, filename, type = "Include"): def __addXmlFilename(self, element, filename, type_="Include"):
# remove old filenames # remove old filenames
for node in self.__getXmlNodesByName(["Include", "Exclude"], element): includes = element.findall('Include')
if node.childNodes[0].nodeName == "Filename" and node.childNodes[0].childNodes[0].nodeValue == filename: excludes = element.findall('Exclude')
element.removeChild(node) rules = includes + excludes
for rule in rules:
#FIXME: this finds only Rules whose FIRST child is a Filename element
if rule[0].tag == "Filename" and rule[0].text == filename:
element.remove(rule)
# shouldn't it remove all occurences, like the following:
#filename_nodes = rule.findall('.//Filename'):
#for fn in filename_nodes:
#if fn.text == filename:
##element.remove(rule)
#parent = self.__get_parent_node(fn)
#parent.remove(fn)
# add new filename # add new filename
node = self.doc.createElement(type) node = etree.SubElement(type_, element)
node.appendChild(self.__addXmlTextElement(node, 'Filename', filename)) self.__addXmlTextElement(node, 'Filename', filename)
return element.appendChild(node) return node
def __addXmlMove(self, element, old, new): def __addXmlMove(self, element, old, new):
node = self.doc.createElement("Move") node = etree.SubElement("Move", element)
node.appendChild(self.__addXmlTextElement(node, 'Old', old)) self.__addXmlTextElement(node, 'Old', old)
node.appendChild(self.__addXmlTextElement(node, 'New', new)) self.__addXmlTextElement(node, 'New', new)
return element.appendChild(node) return node
def __addXmlLayout(self, element, layout): def __addXmlLayout(self, element, layout):
# remove old layout # remove old layout
for node in self.__getXmlNodesByName("Layout", element): for node in element.findall("Layout"):
element.removeChild(node) element.remove(node)
# add new layout # add new layout
node = self.doc.createElement("Layout") node = etree.SubElement("Layout", element)
for order in layout.order: for order in layout.order:
if order[0] == "Separator": if order[0] == "Separator":
child = self.doc.createElement("Separator") child = etree.SubElement("Separator", node)
node.appendChild(child)
elif order[0] == "Filename": elif order[0] == "Filename":
child = self.__addXmlTextElement(node, "Filename", order[1]) child = self.__addXmlTextElement(node, "Filename", order[1])
elif order[0] == "Menuname": elif order[0] == "Menuname":
child = self.__addXmlTextElement(node, "Menuname", order[1]) child = self.__addXmlTextElement(node, "Menuname", order[1])
elif order[0] == "Merge": elif order[0] == "Merge":
child = self.doc.createElement("Merge") child = etree.SubElement("Merge", node)
child.setAttribute("type", order[1]) child.attrib["type"] = order[1]
node.appendChild(child) return node
return element.appendChild(node)
def __getXmlNodesByName(self, name, element):
for child in element.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE and child.nodeName in name:
yield child
def __addLayout(self, parent): def __addLayout(self, parent):
layout = Layout() layout = Layout()
@ -498,14 +518,24 @@ class MenuEditor:
except ValueError: except ValueError:
pass pass
def __remove_whilespace_nodes(self, node): def __remove_whitespace_nodes(self, node):
remove_list = [] for child in node:
for child in node.childNodes: text = child.text.strip()
if child.nodeType == xml.dom.minidom.Node.TEXT_NODE: if not text:
child.data = child.data.strip() child.text = ''
if not child.data.strip(): tail = child.tail.strip()
remove_list.append(child) if not tail:
elif child.hasChildNodes(): child.tail = ''
if len(child):
self.__remove_whilespace_nodes(child) self.__remove_whilespace_nodes(child)
for node in remove_list:
node.parentNode.removeChild(node) def __get_parent_node(self, node):
# elements in ElementTree doesn't hold a reference to their parent
for parent, child in self.__iter_parent():
if child is node:
return child
def __iter_parent(self):
for parent in self.tree.getiterator():
for child in parent:
yield parent, child

View file

@ -20,6 +20,7 @@ information about the format of these files.
""" """
import os import os
import re
import stat import stat
import sys import sys
import fnmatch import fnmatch
@ -46,25 +47,42 @@ def _get_node_data(node):
return ''.join([n.nodeValue for n in node.childNodes]).strip() return ''.join([n.nodeValue for n in node.childNodes]).strip()
def lookup(media, subtype = None): def lookup(media, subtype = None):
"""Get the MIMEtype object for this type, creating a new one if needed. """Get the MIMEtype object for the given type.
This remains for backwards compatibility; calling MIMEtype now does
the same thing.
The name can either be passed as one part ('text/plain'), or as two The name can either be passed as one part ('text/plain'), or as two
('text', 'plain'). ('text', 'plain').
""" """
if subtype is None and '/' in media: return MIMEtype(media, subtype)
media, subtype = media.split('/', 1)
if (media, subtype) not in types:
types[(media, subtype)] = MIMEtype(media, subtype)
return types[(media, subtype)]
class MIMEtype: class MIMEtype(object):
"""Type holding data about a MIME type""" """Class holding data about a MIME type.
def __init__(self, media, subtype):
"Don't use this constructor directly; use mime.lookup() instead." Calling the class will return a cached instance, so there is only one
assert media and '/' not in media instance for each MIME type. The name can either be passed as one part
assert subtype and '/' not in subtype ('text/plain'), or as two ('text', 'plain').
assert (media, subtype) not in types """
def __new__(cls, media, subtype=None):
if subtype is None and '/' in media:
media, subtype = media.split('/', 1)
assert '/' not in subtype
media = media.lower()
subtype = subtype.lower()
try:
return types[(media, subtype)]
except KeyError:
mtype = super(MIMEtype, cls).__new__(cls)
mtype._init(media, subtype)
types[(media, subtype)] = mtype
return mtype
# If this is done in __init__, it is automatically called again each time
# the MIMEtype is returned by __new__, which we don't want. So we call it
# explicitly only when we construct a new instance.
def _init(self, media, subtype):
self.media = media self.media = media
self.subtype = subtype self.subtype = subtype
self._comment = None self._comment = None
@ -109,100 +127,106 @@ class MIMEtype:
return self.media + '/' + self.subtype return self.media + '/' + self.subtype
def __repr__(self): def __repr__(self):
return '<%s: %s>' % (self, self._comment or '(comment not loaded)') return 'MIMEtype(%r, %r)' % (self.media, self.subtype)
def __hash__(self):
return hash(self.media) ^ hash(self.subtype)
class UnknownMagicRuleFormat(ValueError):
pass
class DiscardMagicRules(Exception):
"Raised when __NOMAGIC__ is found, and caught to discard previous rules."
pass
class MagicRule: class MagicRule:
def __init__(self, f): also = None
self.next=None
self.prev=None def __init__(self, start, value, mask, word, range):
self.start = start
self.value = value
self.mask = mask
self.word = word
self.range = range
rule_ending_re = re.compile(br'(?:~(\d+))?(?:\+(\d+))?\n$')
@classmethod
def from_file(cls, f):
"""Read a rule from the binary magics file. Returns a 2-tuple of
the nesting depth and the MagicRule."""
line = f.readline()
#print line #print line
ind=b''
while True:
c=f.read(1)
if c == b'>':
break
ind+=c
if not ind:
self.nest=0
else:
self.nest=int(ind.decode('ascii'))
start = b''
while True:
c = f.read(1)
if c == b'=':
break
start += c
self.start = int(start.decode('ascii'))
hb=f.read(1) # [indent] '>'
lb=f.read(1) nest_depth, line = line.split(b'>', 1)
self.lenvalue = ord(lb)+(ord(hb)<<8) nest_depth = int(nest_depth) if nest_depth else 0
self.value = f.read(self.lenvalue) # start-offset '='
start, line = line.split(b'=', 1)
c = f.read(1) start = int(start)
if c == b'&':
self.mask = f.read(self.lenvalue)
c = f.read(1)
else:
self.mask=None
if c == b'~':
w = b''
while c!=b'+' and c!=b'\n':
c=f.read(1)
if c==b'+' or c==b'\n':
break
w+=c
self.word=int(w.decode('ascii'))
else:
self.word=1
if c==b'+':
r=b''
while c!=b'\n':
c=f.read(1)
if c==b'\n':
break
r+=c
#print r
self.range = int(r.decode('ascii'))
else:
self.range = 1
if c != b'\n':
raise ValueError('Malformed MIME magic line')
def getLength(self):
return self.start+self.lenvalue+self.range
def appendRule(self, rule):
if self.nest<rule.nest:
self.next=rule
rule.prev=self
elif self.prev:
self.prev.appendRule(rule)
if line == b'__NOMAGIC__\n':
raise DiscardMagicRules
# value length (2 bytes, big endian)
if sys.version_info[0] >= 3:
lenvalue = int.from_bytes(line[:2], byteorder='big')
else:
lenvalue = (ord(line[0])<<8)+ord(line[1])
line = line[2:]
# value
# This can contain newlines, so we may need to read more lines
while len(line) <= lenvalue:
line += f.readline()
value, line = line[:lenvalue], line[lenvalue:]
# ['&' mask]
if line.startswith(b'&'):
# This can contain newlines, so we may need to read more lines
while len(line) <= lenvalue:
line += f.readline()
mask, line = line[1:lenvalue+1], line[lenvalue+1:]
else:
mask = None
# ['~' word-size] ['+' range-length]
ending = cls.rule_ending_re.match(line)
if not ending:
# Per the spec, this will be caught and ignored, to allow
# for future extensions.
raise UnknownMagicRuleFormat(repr(line))
word, range = ending.groups()
word = int(word) if (word is not None) else 1
range = int(range) if (range is not None) else 1
return nest_depth, cls(start, value, mask, word, range)
def maxlen(self):
l = self.start + len(self.value) + self.range
if self.also:
return max(l, self.also.maxlen())
return l
def match(self, buffer): def match(self, buffer):
if self.match0(buffer): if self.match0(buffer):
if self.next: if self.also:
return self.next.match(buffer) return self.also.match(buffer)
return True return True
def match0(self, buffer): def match0(self, buffer):
l=len(buffer) l=len(buffer)
lenvalue = len(self.value)
for o in range(self.range): for o in range(self.range):
s=self.start+o s=self.start+o
e=s+self.lenvalue e=s+lenvalue
if l<e: if l<e:
return False return False
if self.mask: if self.mask:
test='' test=''
for i in range(self.lenvalue): for i in range(lenvalue):
if PY3: if PY3:
c = buffer[s+i] & self.mask[i] c = buffer[s+i] & self.mask[i]
else: else:
@ -215,46 +239,81 @@ class MagicRule:
return True return True
def __repr__(self): def __repr__(self):
return '<MagicRule %d>%d=[%d]%r&%r~%d+%d>' % (self.nest, return 'MagicRule(start=%r, value=%r, mask=%r, word=%r, range=%r)' %(
self.start, self.start,
self.lenvalue,
self.value, self.value,
self.mask, self.mask,
self.word, self.word,
self.range) self.range)
class MagicType:
def __init__(self, mtype):
self.mtype=mtype
self.top_rules=[]
self.last_rule=None
def getLine(self, f):
nrule=MagicRule(f)
if nrule.nest and self.last_rule:
self.last_rule.appendRule(nrule)
else:
self.top_rules.append(nrule)
self.last_rule=nrule
return nrule
class MagicMatchAny(object):
"""Match any of a set of magic rules.
This has a similar interface to MagicRule objects (i.e. its match() and
maxlen() methods), to allow for duck typing.
"""
def __init__(self, rules):
self.rules = rules
def match(self, buffer): def match(self, buffer):
for rule in self.top_rules: return any(r.match(buffer) for r in self.rules)
if rule.match(buffer):
return self.mtype def maxlen(self):
return max(r.maxlen() for r in self.rules)
def __repr__(self):
return '<MagicType %s>' % self.mtype @classmethod
def from_file(cls, f):
"""Read a set of rules from the binary magic file."""
c=f.read(1)
f.seek(-1, 1)
depths_rules = []
while c and c != b'[':
try:
depths_rules.append(MagicRule.from_file(f))
except UnknownMagicRuleFormat:
# Ignored to allow for extensions to the rule format.
pass
c=f.read(1)
if c:
f.seek(-1, 1)
# Build the rule tree
tree = [] # (rule, [(subrule,[subsubrule,...]), ...])
insert_points = {0:tree}
for depth, rule in depths_rules:
subrules = []
insert_points[depth].append((rule, subrules))
insert_points[depth+1] = subrules
return cls.from_rule_tree(tree)
@classmethod
def from_rule_tree(cls, tree):
"""From a nested list of (rule, subrules) pairs, build a MagicMatchAny
instance, recursing down the tree.
Where there's only one top-level rule, this is returned directly,
to simplify the nested structure. Returns None if no rules were read.
"""
rules = []
for rule, subrules in tree:
if subrules:
rule.also = cls.from_rule_tree(subrules)
rules.append(rule)
if len(rules)==0:
return None
if len(rules)==1:
return rules[0]
return cls(rules)
class MagicDB: class MagicDB:
def __init__(self): def __init__(self):
self.types={} # Indexed by priority, each entry is a list of type rules self.bytype = defaultdict(list) # mimetype -> [(priority, rule), ...]
self.maxlen=0
def mergeFile(self, fname): def merge_file(self, fname):
"""Read a magic binary file, and add its rules to this MagicDB."""
with open(fname, 'rb') as f: with open(fname, 'rb') as f:
line = f.readline() line = f.readline()
if line != b'MIME-Magic\0\n': if line != b'MIME-Magic\0\n':
@ -262,68 +321,210 @@ class MagicDB:
while True: while True:
shead = f.readline().decode('ascii') shead = f.readline().decode('ascii')
#print shead #print(shead)
if not shead: if not shead:
break break
if shead[0] != '[' or shead[-2:] != ']\n': if shead[0] != '[' or shead[-2:] != ']\n':
raise ValueError('Malformed section heading') raise ValueError('Malformed section heading', shead)
pri, tname = shead[1:-2].split(':') pri, tname = shead[1:-2].split(':')
#print shead[1:-2] #print shead[1:-2]
pri = int(pri) pri = int(pri)
mtype = lookup(tname) mtype = lookup(tname)
try: try:
ents = self.types[pri] rule = MagicMatchAny.from_file(f)
except: except DiscardMagicRules:
ents = [] self.bytype.pop(mtype, None)
self.types[pri] = ents rule = MagicMatchAny.from_file(f)
if rule is None:
continue
#print rule
magictype = MagicType(mtype) self.bytype[mtype].append((pri, rule))
#print tname
#rline=f.readline() def finalise(self):
c=f.read(1) """Prepare the MagicDB for matching.
f.seek(-1, 1)
while c and c != b'[': This should be called after all rules have been merged into it.
rule=magictype.getLine(f) """
#print rule maxlen = 0
if rule and rule.getLength() > self.maxlen: self.alltypes = [] # (priority, mimetype, rule)
self.maxlen = rule.getLength()
c = f.read(1) for mtype, rules in self.bytype.items():
f.seek(-1, 1) for pri, rule in rules:
self.alltypes.append((pri, mtype, rule))
maxlen = max(maxlen, rule.maxlen())
ents.append(magictype) self.maxlen = maxlen # Number of bytes to read from files
#self.types[pri]=ents self.alltypes.sort(key=lambda x: x[0], reverse=True)
if not c:
break
def match_data(self, data, max_pri=100, min_pri=0): def match_data(self, data, max_pri=100, min_pri=0, possible=None):
for priority in sorted(self.types.keys(), reverse=True): """Do magic sniffing on some bytes.
max_pri & min_pri can be used to specify the maximum & minimum priority
rules to look for. possible can be a list of mimetypes to check, or None
(the default) to check all mimetypes until one matches.
Returns the MIMEtype found, or None if no entries match.
"""
if possible is not None:
types = []
for mt in possible:
for pri, rule in self.bytype[mt]:
types.append((pri, mt, rule))
types.sort(key=lambda x: x[0])
else:
types = self.alltypes
for priority, mimetype, rule in types:
#print priority, max_pri, min_pri #print priority, max_pri, min_pri
if priority > max_pri: if priority > max_pri:
continue continue
if priority < min_pri: if priority < min_pri:
break break
for type in self.types[priority]:
m=type.match(data) if rule.match(data):
if m: return mimetype
return m
def match(self, path, max_pri=100, min_pri=0): def match(self, path, max_pri=100, min_pri=0, possible=None):
try: """Read data from the file and do magic sniffing on it.
with open(path, 'rb') as f:
buf = f.read(self.maxlen) max_pri & min_pri can be used to specify the maximum & minimum priority
return self.match_data(buf, max_pri, min_pri) rules to look for. possible can be a list of mimetypes to check, or None
except: (the default) to check all mimetypes until one matches.
pass
Returns the MIMEtype found, or None if no entries match. Raises IOError
if the file can't be opened.
"""
with open(path, 'rb') as f:
buf = f.read(self.maxlen)
return self.match_data(buf, max_pri, min_pri, possible)
def __repr__(self): def __repr__(self):
return '<MagicDB %s>' % self.types return '<MagicDB (%d types)>' % len(self.alltypes)
class GlobDB(object):
def __init__(self):
"""Prepare the GlobDB. It can't actually be used until .finalise() is
called, but merge_file() can be used to add data before that.
"""
# Maps mimetype to {(weight, glob, flags), ...}
self.allglobs = defaultdict(set)
def merge_file(self, path):
"""Loads name matching information from a globs2 file."""#
allglobs = self.allglobs
with open(path) as f:
for line in f:
if line.startswith('#'): continue # Comment
fields = line[:-1].split(':')
weight, type_name, pattern = fields[:3]
weight = int(weight)
mtype = lookup(type_name)
if len(fields) > 3:
flags = fields[3].split(',')
else:
flags = ()
if pattern == '__NOGLOBS__':
# This signals to discard any previous globs
allglobs.pop(mtype, None)
continue
allglobs[mtype].add((weight, pattern, tuple(flags)))
def finalise(self):
"""Prepare the GlobDB for matching.
This should be called after all files have been merged into it.
"""
self.exts = defaultdict(list) # Maps extensions to [(type, weight),...]
self.cased_exts = defaultdict(list)
self.globs = [] # List of (regex, type, weight) triplets
self.literals = {} # Maps literal names to (type, weight)
self.cased_literals = {}
for mtype, globs in self.allglobs.items():
mtype = mtype.canonical()
for weight, pattern, flags in globs:
cased = 'cs' in flags
if pattern.startswith('*.'):
# *.foo -- extension pattern
rest = pattern[2:]
if not ('*' in rest or '[' in rest or '?' in rest):
if cased:
self.cased_exts[rest].append((mtype, weight))
else:
self.exts[rest.lower()].append((mtype, weight))
continue
if ('*' in pattern or '[' in pattern or '?' in pattern):
# Translate the glob pattern to a regex & compile it
re_flags = 0 if cased else re.I
pattern = re.compile(fnmatch.translate(pattern), flags=re_flags)
self.globs.append((pattern, mtype, weight))
else:
# No wildcards - literal pattern
if cased:
self.cased_literals[pattern] = (mtype, weight)
else:
self.literals[pattern.lower()] = (mtype, weight)
# Sort globs by weight & length
self.globs.sort(reverse=True, key=lambda x: (x[2], len(x[0].pattern)) )
def first_match(self, path):
"""Return the first match found for a given path, or None if no match
is found."""
try:
return next(self._match_path(path))[0]
except StopIteration:
return None
def all_matches(self, path):
"""Return a list of (MIMEtype, glob weight) pairs for the path."""
return list(self._match_path(path))
def _match_path(self, path):
"""Yields pairs of (mimetype, glob weight)."""
leaf = os.path.basename(path)
# Literals (no wildcards)
if leaf in self.cased_literals:
yield self.cased_literals[leaf]
lleaf = leaf.lower()
if lleaf in self.literals:
yield self.literals[lleaf]
# Extensions
ext = leaf
while 1:
p = ext.find('.')
if p < 0: break
ext = ext[p + 1:]
if ext in self.cased_exts:
for res in self.cased_exts[ext]:
yield res
ext = lleaf
while 1:
p = ext.find('.')
if p < 0: break
ext = ext[p+1:]
if ext in self.exts:
for res in self.exts[ext]:
yield res
# Other globs
for (regex, mime_type, weight) in self.globs:
if regex.match(leaf):
yield (mime_type, weight)
# Some well-known types # Some well-known types
text = lookup('text', 'plain') text = lookup('text', 'plain')
octet_stream = lookup('application', 'octet-stream')
inode_block = lookup('inode', 'blockdevice') inode_block = lookup('inode', 'blockdevice')
inode_char = lookup('inode', 'chardevice') inode_char = lookup('inode', 'chardevice')
inode_dir = lookup('inode', 'directory') inode_dir = lookup('inode', 'directory')
@ -336,44 +537,12 @@ app_exe = lookup('application', 'executable')
_cache_uptodate = False _cache_uptodate = False
def _cache_database(): def _cache_database():
global exts, globs, literals, magic, aliases, inheritance, _cache_uptodate global globs, magic, aliases, inheritance, _cache_uptodate
_cache_uptodate = True _cache_uptodate = True
exts = {} # Maps extensions to types
globs = [] # List of (glob, type) pairs
literals = {} # Maps literal names to types
aliases = {} # Maps alias Mime types to canonical names aliases = {} # Maps alias Mime types to canonical names
inheritance = defaultdict(set) # Maps to sets of parent mime types. inheritance = defaultdict(set) # Maps to sets of parent mime types.
magic = MagicDB()
def _import_glob_file(path):
"""Loads name matching information from a MIME directory."""
with open(path) as f:
for line in f:
if line.startswith('#'): continue
line = line[:-1]
type_name, pattern = line.split(':', 1)
mtype = lookup(type_name)
if pattern.startswith('*.'):
rest = pattern[2:]
if not ('*' in rest or '[' in rest or '?' in rest):
exts[rest] = mtype
continue
if '*' in pattern or '[' in pattern or '?' in pattern:
globs.append((pattern, mtype))
else:
literals[pattern] = mtype
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'globs')):
_import_glob_file(path)
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'magic')):
magic.mergeFile(path)
# Sort globs by length
globs.sort(key=lambda x: len(x[0]) )
# Load aliases # Load aliases
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'aliases')): for path in BaseDirectory.load_data_paths(os.path.join('mime', 'aliases')):
@ -382,6 +551,18 @@ def _cache_database():
alias, canonical = line.strip().split(None, 1) alias, canonical = line.strip().split(None, 1)
aliases[alias] = canonical aliases[alias] = canonical
# Load filename patterns (globs)
globs = GlobDB()
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'globs2')):
globs.merge_file(path)
globs.finalise()
# Load magic sniffing data
magic = MagicDB()
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'magic')):
magic.merge_file(path)
magic.finalise()
# Load subclasses # Load subclasses
for path in BaseDirectory.load_data_paths(os.path.join('mime', 'subclasses')): for path in BaseDirectory.load_data_paths(os.path.join('mime', 'subclasses')):
with open(path, 'r') as f: with open(path, 'r') as f:
@ -396,35 +577,7 @@ def update_cache():
def get_type_by_name(path): def get_type_by_name(path):
"""Returns type of file by its name, or None if not known""" """Returns type of file by its name, or None if not known"""
update_cache() update_cache()
return globs.first_match(path)
leaf = os.path.basename(path)
if leaf in literals:
return literals[leaf]
lleaf = leaf.lower()
if lleaf in literals:
return literals[lleaf]
ext = leaf
while 1:
p = ext.find('.')
if p < 0: break
ext = ext[p + 1:]
if ext in exts:
return exts[ext]
ext = lleaf
while 1:
p = ext.find('.')
if p < 0: break
ext = ext[p+1:]
if ext in exts:
return exts[ext]
for (glob, mime_type) in globs:
if fnmatch.fnmatch(leaf, glob):
return mime_type
if fnmatch.fnmatch(lleaf, glob):
return mime_type
return None
def get_type_by_contents(path, max_pri=100, min_pri=0): def get_type_by_contents(path, max_pri=100, min_pri=0):
"""Returns type of file by its contents, or None if not known""" """Returns type of file by its contents, or None if not known"""
@ -438,15 +591,24 @@ def get_type_by_data(data, max_pri=100, min_pri=0):
return magic.match_data(data, max_pri, min_pri) return magic.match_data(data, max_pri, min_pri)
def _get_type_by_stat(st_mode):
"""Match special filesystem objects to Mimetypes."""
if stat.S_ISDIR(st_mode): return inode_dir
elif stat.S_ISCHR(st_mode): return inode_char
elif stat.S_ISBLK(st_mode): return inode_block
elif stat.S_ISFIFO(st_mode): return inode_fifo
elif stat.S_ISLNK(st_mode): return inode_symlink
elif stat.S_ISSOCK(st_mode): return inode_socket
return inode_door
def get_type(path, follow=True, name_pri=100): def get_type(path, follow=True, name_pri=100):
"""Returns type of file indicated by path. """Returns type of file indicated by path.
path : This function is *deprecated* - :func:`get_type2` is more accurate.
pathname to check (need not exist)
follow : :param path: pathname to check (need not exist)
when reading file, follow symbolic links :param follow: when reading file, follow symbolic links
name_pri : :param name_pri: Priority to do name matches. 100=override magic
Priority to do name matches. 100=override magic
This tries to use the contents of the file, and falls back to the name. It This tries to use the contents of the file, and falls back to the name. It
can also handle special filesystem objects like directories and sockets. can also handle special filesystem objects like directories and sockets.
@ -463,6 +625,7 @@ def get_type(path, follow=True, name_pri=100):
return t or text return t or text
if stat.S_ISREG(st.st_mode): if stat.S_ISREG(st.st_mode):
# Regular file
t = get_type_by_contents(path, min_pri=name_pri) t = get_type_by_contents(path, min_pri=name_pri)
if not t: t = get_type_by_name(path) if not t: t = get_type_by_name(path)
if not t: t = get_type_by_contents(path, max_pri=name_pri) if not t: t = get_type_by_contents(path, max_pri=name_pri)
@ -472,13 +635,112 @@ def get_type(path, follow=True, name_pri=100):
else: else:
return text return text
return t return t
elif stat.S_ISDIR(st.st_mode): return inode_dir else:
elif stat.S_ISCHR(st.st_mode): return inode_char return _get_type_by_stat(st.st_mode)
elif stat.S_ISBLK(st.st_mode): return inode_block
elif stat.S_ISFIFO(st.st_mode): return inode_fifo def get_type2(path, follow=True):
elif stat.S_ISLNK(st.st_mode): return inode_symlink """Find the MIMEtype of a file using the XDG recommended checking order.
elif stat.S_ISSOCK(st.st_mode): return inode_socket
return inode_door This first checks the filename, then uses file contents if the name doesn't
give an unambiguous MIMEtype. It can also handle special filesystem objects
like directories and sockets.
:param path: file path to examine (need not exist)
:param follow: whether to follow symlinks
:rtype: :class:`MIMEtype`
.. versionadded:: 1.0
"""
update_cache()
try:
st = os.stat(path) if follow else os.lstat(path)
except OSError:
return get_type_by_name(path) or octet_stream
if not stat.S_ISREG(st.st_mode):
# Special filesystem objects
return _get_type_by_stat(st.st_mode)
mtypes = sorted(globs.all_matches(path), key=(lambda x: x[1]), reverse=True)
if mtypes:
max_weight = mtypes[0][1]
i = 1
for mt, w in mtypes[1:]:
if w < max_weight:
break
i += 1
mtypes = mtypes[:i]
if len(mtypes) == 1:
return mtypes[0][0]
possible = [mt for mt,w in mtypes]
else:
possible = None # Try all magic matches
try:
t = magic.match(path, possible=possible)
except IOError:
t = None
if t:
return t
elif mtypes:
return mtypes[0][0]
elif stat.S_IMODE(st.st_mode) & 0o111:
return app_exe
else:
return text if is_text_file(path) else octet_stream
def is_text_file(path):
"""Guess whether a file contains text or binary data.
Heuristic: binary if the first 32 bytes include ASCII control characters.
This rule may change in future versions.
.. versionadded:: 1.0
"""
try:
f = open(path, 'rb')
except IOError:
return False
with f:
return _is_text(f.read(32))
if PY3:
def _is_text(data):
return not any(b <= 0x8 or 0xe <= b < 0x20 or b == 0x7f for b in data)
else:
def _is_text(data):
return not any(b <= '\x08' or '\x0e' <= b < '\x20' or b == '\x7f' \
for b in data)
_mime2ext_cache = None
_mime2ext_cache_uptodate = False
def get_extensions(mimetype):
"""Retrieve the set of filename extensions matching a given MIMEtype.
Extensions are returned without a leading dot, e.g. 'py'. If no extensions
are registered for the MIMEtype, returns an empty set.
The extensions are stored in a cache the first time this is called.
.. versionadded:: 1.0
"""
global _mime2ext_cache, _mime2ext_cache_uptodate
update_cache()
if not _mime2ext_cache_uptodate:
_mime2ext_cache = defaultdict(set)
for ext, mtypes in globs.exts.items():
for mtype, prio in mtypes:
_mime2ext_cache[mtype].add(ext)
_mime2ext_cache_uptodate = True
return _mime2ext_cache[mimetype]
def install_mime_info(application, package_file): def install_mime_info(application, package_file):
"""Copy 'package_file' as ``~/.local/share/mime/packages/<application>.xml.`` """Copy 'package_file' as ``~/.local/share/mime/packages/<application>.xml.``

View file

@ -1,5 +1,5 @@
""" """
Implementation of the XDG Recent File Storage Specification Version 0.2 Implementation of the XDG Recent File Storage Specification
http://standards.freedesktop.org/recent-file-spec http://standards.freedesktop.org/recent-file-spec
""" """

View file

@ -1,3 +1,3 @@
__all__ = [ "BaseDirectory", "DesktopEntry", "Menu", "Exceptions", "IniFile", "IconTheme", "Locale", "Config", "Mime", "RecentFiles", "MenuEditor" ] __all__ = [ "BaseDirectory", "DesktopEntry", "Menu", "Exceptions", "IniFile", "IconTheme", "Locale", "Config", "Mime", "RecentFiles", "MenuEditor" ]
__version__ = "0.25" __version__ = "0.26"

View file

@ -9,3 +9,67 @@ else:
# Unicode-like literals # Unicode-like literals
def u(s): def u(s):
return s.decode('utf-8') return s.decode('utf-8')
try:
# which() is available from Python 3.3
from shutil import which
except ImportError:
import os
# This is a copy of which() from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
path = (path or os.environ.get("PATH", os.defpath)).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None

3
requirements-py2.txt Normal file
View file

@ -0,0 +1,3 @@
backports.functools-lru-cache
enum34
futures

View file

@ -4,6 +4,7 @@ configobj
guessit guessit
linktastic linktastic
python-qbittorrent python-qbittorrent
pyxdg
rencode rencode
requests requests
setuptools setuptools