diff --git a/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth b/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth
new file mode 100644
index 00000000..0b1f79dd
--- /dev/null
+++ b/libs/backports.functools_lru_cache-1.2.1-py3.5-nspkg.pth
@@ -0,0 +1 @@
+import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('backports',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('backports', types.ModuleType('backports'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
diff --git a/libs/backports/functools_lru_cache.py b/libs/backports/functools_lru_cache.py
new file mode 100644
index 00000000..707c6c76
--- /dev/null
+++ b/libs/backports/functools_lru_cache.py
@@ -0,0 +1,184 @@
+from __future__ import absolute_import
+
+import functools
+from collections import namedtuple
+from threading import RLock
+
+_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
+
+
+@functools.wraps(functools.update_wrapper)
+def update_wrapper(wrapper,
+ wrapped,
+ assigned = functools.WRAPPER_ASSIGNMENTS,
+ updated = functools.WRAPPER_UPDATES):
+ """
+ Patch two bugs in functools.update_wrapper.
+ """
+ # workaround for http://bugs.python.org/issue3445
+ assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
+ wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
+ # workaround for https://bugs.python.org/issue17482
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+
+
+class _HashedSeq(list):
+ __slots__ = 'hashvalue'
+
+ def __init__(self, tup, hash=hash):
+ self[:] = tup
+ self.hashvalue = hash(tup)
+
+ def __hash__(self):
+ return self.hashvalue
+
+
+def _make_key(args, kwds, typed,
+ kwd_mark=(object(),),
+ fasttypes=set([int, str, frozenset, type(None)]),
+ sorted=sorted, tuple=tuple, type=type, len=len):
+ 'Make a cache key from optionally typed positional and keyword arguments'
+ key = args
+ if kwds:
+ sorted_items = sorted(kwds.items())
+ key += kwd_mark
+ for item in sorted_items:
+ key += item
+ if typed:
+ key += tuple(type(v) for v in args)
+ if kwds:
+ key += tuple(type(v) for k, v in sorted_items)
+ elif len(key) == 1 and type(key[0]) in fasttypes:
+ return key[0]
+ return _HashedSeq(key)
+
+
+def lru_cache(maxsize=100, typed=False):
+ """Least-recently-used cache decorator.
+
+ If *maxsize* is set to None, the LRU features are disabled and the cache
+ can grow without bound.
+
+ If *typed* is True, arguments of different types will be cached separately.
+ For example, f(3.0) and f(3) will be treated as distinct calls with
+ distinct results.
+
+ Arguments to the cached function must be hashable.
+
+ View the cache statistics named tuple (hits, misses, maxsize, currsize) with
+ f.cache_info(). Clear the cache and statistics with f.cache_clear().
+ Access the underlying function with f.__wrapped__.
+
+ See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
+
+ """
+
+ # Users should only access the lru_cache through its public API:
+ # cache_info, cache_clear, and f.__wrapped__
+ # The internals of the lru_cache are encapsulated for thread safety and
+ # to allow the implementation to change (including a possible C version).
+
+ def decorating_function(user_function):
+
+ cache = dict()
+ stats = [0, 0] # make statistics updateable non-locally
+ HITS, MISSES = 0, 1 # names for the stats fields
+ make_key = _make_key
+ cache_get = cache.get # bound method to lookup key or return None
+ _len = len # localize the global len() function
+ lock = RLock() # because linkedlist updates aren't threadsafe
+ root = [] # root of the circular doubly linked list
+ root[:] = [root, root, None, None] # initialize by pointing to self
+ nonlocal_root = [root] # make updateable non-locally
+ PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
+
+ if maxsize == 0:
+
+ def wrapper(*args, **kwds):
+ # no caching, just do a statistics update after a successful call
+ result = user_function(*args, **kwds)
+ stats[MISSES] += 1
+ return result
+
+ elif maxsize is None:
+
+ def wrapper(*args, **kwds):
+ # simple caching without ordering or size limit
+ key = make_key(args, kwds, typed)
+ result = cache_get(key, root) # root used here as a unique not-found sentinel
+ if result is not root:
+ stats[HITS] += 1
+ return result
+ result = user_function(*args, **kwds)
+ cache[key] = result
+ stats[MISSES] += 1
+ return result
+
+ else:
+
+ def wrapper(*args, **kwds):
+ # size limited caching that tracks accesses by recency
+ key = make_key(args, kwds, typed) if kwds or typed else args
+ with lock:
+ link = cache_get(key)
+ if link is not None:
+ # record recent use of the key by moving it to the front of the list
+ root, = nonlocal_root
+ link_prev, link_next, key, result = link
+ link_prev[NEXT] = link_next
+ link_next[PREV] = link_prev
+ last = root[PREV]
+ last[NEXT] = root[PREV] = link
+ link[PREV] = last
+ link[NEXT] = root
+ stats[HITS] += 1
+ return result
+ result = user_function(*args, **kwds)
+ with lock:
+ root, = nonlocal_root
+ if key in cache:
+ # getting here means that this same key was added to the
+ # cache while the lock was released. since the link
+ # update is already done, we need only return the
+ # computed result and update the count of misses.
+ pass
+ elif _len(cache) >= maxsize:
+ # use the old root to store the new key and result
+ oldroot = root
+ oldroot[KEY] = key
+ oldroot[RESULT] = result
+ # empty the oldest link and make it the new root
+ root = nonlocal_root[0] = oldroot[NEXT]
+ oldkey = root[KEY]
+ root[KEY] = root[RESULT] = None
+ # now update the cache dictionary for the new links
+ del cache[oldkey]
+ cache[key] = oldroot
+ else:
+ # put result in a new link at the front of the list
+ last = root[PREV]
+ link = [last, root, key, result]
+ last[NEXT] = root[PREV] = cache[key] = link
+ stats[MISSES] += 1
+ return result
+
+ def cache_info():
+ """Report cache statistics"""
+ with lock:
+ return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
+
+ def cache_clear():
+ """Clear the cache and cache statistics"""
+ with lock:
+ cache.clear()
+ root = nonlocal_root[0]
+ root[:] = [root, root, None, None]
+ stats[:] = [0, 0]
+
+ wrapper.__wrapped__ = user_function
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return update_wrapper(wrapper, user_function)
+
+ return decorating_function
diff --git a/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth b/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth
new file mode 100644
index 00000000..c8127a57
--- /dev/null
+++ b/libs/jaraco.collections-1.3.2-py3.5-nspkg.pth
@@ -0,0 +1 @@
+import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
diff --git a/libs/jaraco.functools-1.11-py2.7-nspkg.pth b/libs/jaraco.functools-1.11-py2.7-nspkg.pth
new file mode 100644
index 00000000..c8127a57
--- /dev/null
+++ b/libs/jaraco.functools-1.11-py2.7-nspkg.pth
@@ -0,0 +1 @@
+import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
diff --git a/libs/jaraco.text-1.7-py3.5-nspkg.pth b/libs/jaraco.text-1.7-py3.5-nspkg.pth
new file mode 100644
index 00000000..c8127a57
--- /dev/null
+++ b/libs/jaraco.text-1.7-py3.5-nspkg.pth
@@ -0,0 +1 @@
+import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
diff --git a/libs/jaraco.windows-3.6-py3.5-nspkg.pth b/libs/jaraco.windows-3.6-py3.5-nspkg.pth
new file mode 100644
index 00000000..c8127a57
--- /dev/null
+++ b/libs/jaraco.windows-3.6-py3.5-nspkg.pth
@@ -0,0 +1 @@
+import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));ie = os.path.exists(os.path.join(p,'__init__.py'));m = not ie and sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
diff --git a/libs/jaraco/__init__.py b/libs/jaraco/__init__.py
index 1b2910c2..5284146e 100644
--- a/libs/jaraco/__init__.py
+++ b/libs/jaraco/__init__.py
@@ -1,10 +1 @@
-# this is a namespace package
-__import__('pkg_resources').declare_namespace(__name__)
-
-try:
- # py2exe support (http://www.py2exe.org/index.cgi/ExeWithEggs)
- import modulefinder
- for p in __path__:
- modulefinder.AddPackagePath(__name__, p)
-except ImportError:
- pass
+__import__("pkg_resources").declare_namespace(__name__)
diff --git a/libs/jaraco/classes/__init__.py b/libs/jaraco/classes/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/libs/jaraco/classes/ancestry.py b/libs/jaraco/classes/ancestry.py
new file mode 100644
index 00000000..905c18fd
--- /dev/null
+++ b/libs/jaraco/classes/ancestry.py
@@ -0,0 +1,67 @@
+"""
+Routines for obtaining the class names
+of an object and its parent classes.
+"""
+
+from __future__ import unicode_literals
+
+def all_bases(c):
+ """
+ return a tuple of all base classes the class c has as a parent.
+ >>> object in all_bases(list)
+ True
+ """
+ return c.mro()[1:]
+
+def all_classes(c):
+ """
+ return a tuple of all classes to which c belongs
+ >>> list in all_classes(list)
+ True
+ """
+ return c.mro()
+
+# borrowed from http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
+def iter_subclasses(cls, _seen=None):
+ """
+ Generator over all subclasses of a given class, in depth-first order.
+
+ >>> bool in list(iter_subclasses(int))
+ True
+ >>> class A(object): pass
+ >>> class B(A): pass
+ >>> class C(A): pass
+ >>> class D(B,C): pass
+ >>> class E(D): pass
+ >>>
+ >>> for cls in iter_subclasses(A):
+ ... print(cls.__name__)
+ B
+ D
+ E
+ C
+ >>> # get ALL (new-style) classes currently defined
+ >>> res = [cls.__name__ for cls in iter_subclasses(object)]
+ >>> 'type' in res
+ True
+ >>> 'tuple' in res
+ True
+ >>> len(res) > 100
+ True
+ """
+
+ if not isinstance(cls, type):
+ raise TypeError('iter_subclasses must be called with '
+ 'new-style classes, not %.100r' % cls)
+ if _seen is None: _seen = set()
+ try:
+ subs = cls.__subclasses__()
+ except TypeError: # fails only when cls is type
+ subs = cls.__subclasses__(cls)
+ for sub in subs:
+ if sub in _seen:
+ continue
+ _seen.add(sub)
+ yield sub
+ for sub in iter_subclasses(sub, _seen):
+ yield sub
diff --git a/libs/jaraco/classes/meta.py b/libs/jaraco/classes/meta.py
new file mode 100644
index 00000000..cdb744d7
--- /dev/null
+++ b/libs/jaraco/classes/meta.py
@@ -0,0 +1,40 @@
+"""
+meta.py
+
+Some useful metaclasses.
+"""
+
+from __future__ import unicode_literals
+
+class LeafClassesMeta(type):
+ """
+ A metaclass for classes that keeps track of all of them that
+ aren't base classes.
+ """
+
+ _leaf_classes = set()
+
+ def __init__(cls, name, bases, attrs):
+ if not hasattr(cls, '_leaf_classes'):
+ cls._leaf_classes = set()
+ leaf_classes = getattr(cls, '_leaf_classes')
+ leaf_classes.add(cls)
+ # remove any base classes
+ leaf_classes -= set(bases)
+
+
+class TagRegistered(type):
+ """
+ As classes of this metaclass are created, they keep a registry in the
+ base class of all classes by a class attribute, indicated by attr_name.
+ """
+ attr_name = 'tag'
+
+ def __init__(cls, name, bases, namespace):
+ super(TagRegistered, cls).__init__(name, bases, namespace)
+ if not hasattr(cls, '_registry'):
+ cls._registry = {}
+ meta = cls.__class__
+ attr = getattr(cls, meta.attr_name, None)
+ if attr:
+ cls._registry[attr] = cls
diff --git a/libs/jaraco/classes/properties.py b/libs/jaraco/classes/properties.py
new file mode 100644
index 00000000..d64262a3
--- /dev/null
+++ b/libs/jaraco/classes/properties.py
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+import six
+
+
+class NonDataProperty(object):
+ """Much like the property builtin, but only implements __get__,
+ making it a non-data property, and can be subsequently reset.
+
+ See http://users.rcn.com/python/download/Descriptor.htm for more
+ information.
+
+ >>> class X(object):
+ ... @NonDataProperty
+ ... def foo(self):
+ ... return 3
+ >>> x = X()
+ >>> x.foo
+ 3
+ >>> x.foo = 4
+ >>> x.foo
+ 4
+ """
+
+ def __init__(self, fget):
+ assert fget is not None, "fget cannot be none"
+ assert six.callable(fget), "fget must be callable"
+ self.fget = fget
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+ return self.fget(obj)
+
+
+# from http://stackoverflow.com/a/5191224
+class ClassPropertyDescriptor(object):
+
+ def __init__(self, fget, fset=None):
+ self.fget = fget
+ self.fset = fset
+
+ def __get__(self, obj, klass=None):
+ if klass is None:
+ klass = type(obj)
+ return self.fget.__get__(obj, klass)()
+
+ def __set__(self, obj, value):
+ if not self.fset:
+ raise AttributeError("can't set attribute")
+ type_ = type(obj)
+ return self.fset.__get__(obj, type_)(value)
+
+ def setter(self, func):
+ if not isinstance(func, (classmethod, staticmethod)):
+ func = classmethod(func)
+ self.fset = func
+ return self
+
+
+def classproperty(func):
+ if not isinstance(func, (classmethod, staticmethod)):
+ func = classmethod(func)
+
+ return ClassPropertyDescriptor(func)
diff --git a/libs/jaraco/collections.py b/libs/jaraco/collections.py
new file mode 100644
index 00000000..6af6ad45
--- /dev/null
+++ b/libs/jaraco/collections.py
@@ -0,0 +1,773 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, unicode_literals, division
+
+import re
+import operator
+import collections
+import itertools
+import copy
+
+import six
+from jaraco.classes.properties import NonDataProperty
+import jaraco.text
+
+
+class DictFilter(object):
+ """
+ Takes a dict, and simulates a sub-dict based on the keys.
+
+ >>> sample = {'a': 1, 'b': 2, 'c': 3}
+ >>> filtered = DictFilter(sample, ['a', 'c'])
+ >>> filtered == {'a': 1, 'c': 3}
+ True
+
+ One can also filter by a regular expression pattern
+
+ >>> sample['d'] = 4
+ >>> sample['ef'] = 5
+
+ Here we filter for only single-character keys
+
+ >>> filtered = DictFilter(sample, include_pattern='.$')
+ >>> filtered == {'a': 1, 'b': 2, 'c': 3, 'd': 4}
+ True
+
+ Also note that DictFilter keeps a reference to the original dict, so
+ if you modify the original dict, that could modify the filtered dict.
+
+ >>> del sample['d']
+ >>> del sample['a']
+ >>> filtered == {'b': 2, 'c': 3}
+ True
+
+ """
+ def __init__(self, dict, include_keys=[], include_pattern=None):
+ self.dict = dict
+ self.specified_keys = set(include_keys)
+ if include_pattern is not None:
+ self.include_pattern = re.compile(include_pattern)
+ else:
+ # for performance, replace the pattern_keys property
+ self.pattern_keys = set()
+
+ def get_pattern_keys(self):
+ #key_matches = lambda k, v: self.include_pattern.match(k)
+ keys = filter(self.include_pattern.match, self.dict.keys())
+ return set(keys)
+ pattern_keys = NonDataProperty(get_pattern_keys)
+
+ @property
+ def include_keys(self):
+ return self.specified_keys.union(self.pattern_keys)
+
+ def keys(self):
+ return self.include_keys.intersection(self.dict.keys())
+
+ def values(self):
+ keys = self.keys()
+ values = map(self.dict.get, keys)
+ return values
+
+ def __getitem__(self, i):
+ if not i in self.include_keys:
+ return KeyError, i
+ return self.dict[i]
+
+ def items(self):
+ keys = self.keys()
+ values = map(self.dict.get, keys)
+ return zip(keys, values)
+
+ def __eq__(self, other):
+ return dict(self) == other
+
+ def __ne__(self, other):
+ return dict(self) != other
+
+
+def dict_map(function, dictionary):
+ """
+ dict_map is much like the built-in function map. It takes a dictionary
+ and applys a function to the values of that dictionary, returning a
+ new dictionary with the mapped values in the original keys.
+
+ >>> d = dict_map(lambda x:x+1, dict(a=1, b=2))
+ >>> d == dict(a=2,b=3)
+ True
+ """
+ return dict((key, function(value)) for key, value in dictionary.items())
+
+
+class RangeMap(dict):
+ """
+ A dictionary-like object that uses the keys as bounds for a range.
+ Inclusion of the value for that range is determined by the
+ key_match_comparator, which defaults to less-than-or-equal.
+ A value is returned for a key if it is the first key that matches in
+ the sorted list of keys.
+
+ One may supply keyword parameters to be passed to the sort function used
+ to sort keys (i.e. cmp [python 2 only], keys, reverse) as sort_params.
+
+ Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b'
+
+ >>> r = RangeMap({3: 'a', 6: 'b'}) # boy, that was easy
+ >>> r[1], r[2], r[3], r[4], r[5], r[6]
+ ('a', 'a', 'a', 'b', 'b', 'b')
+
+ Even float values should work so long as the comparison operator
+ supports it.
+
+ >>> r[4.5]
+ 'b'
+
+ But you'll notice that the way rangemap is defined, it must be open-ended
+ on one side.
+
+ >>> r[0]
+ 'a'
+ >>> r[-1]
+ 'a'
+
+ One can close the open-end of the RangeMap by using undefined_value
+
+ >>> r = RangeMap({0: RangeMap.undefined_value, 3: 'a', 6: 'b'})
+ >>> r[0]
+ Traceback (most recent call last):
+ ...
+ KeyError: 0
+
+ One can get the first or last elements in the range by using RangeMap.Item
+
+ >>> last_item = RangeMap.Item(-1)
+ >>> r[last_item]
+ 'b'
+
+ .last_item is a shortcut for Item(-1)
+
+ >>> r[RangeMap.last_item]
+ 'b'
+
+ Sometimes it's useful to find the bounds for a RangeMap
+
+ >>> r.bounds()
+ (0, 6)
+
+ RangeMap supports .get(key, default)
+
+ >>> r.get(0, 'not found')
+ 'not found'
+
+ >>> r.get(7, 'not found')
+ 'not found'
+ """
+ def __init__(self, source, sort_params = {}, key_match_comparator = operator.le):
+ dict.__init__(self, source)
+ self.sort_params = sort_params
+ self.match = key_match_comparator
+
+ def __getitem__(self, item):
+ sorted_keys = sorted(self.keys(), **self.sort_params)
+ if isinstance(item, RangeMap.Item):
+ result = self.__getitem__(sorted_keys[item])
+ else:
+ key = self._find_first_match_(sorted_keys, item)
+ result = dict.__getitem__(self, key)
+ if result is RangeMap.undefined_value:
+ raise KeyError(key)
+ return result
+
+ def get(self, key, default=None):
+ """
+ Return the value for key if key is in the dictionary, else default.
+ If default is not given, it defaults to None, so that this method
+ never raises a KeyError.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def _find_first_match_(self, keys, item):
+ is_match = lambda k: self.match(item, k)
+ matches = list(filter(is_match, keys))
+ if matches:
+ return matches[0]
+ raise KeyError(item)
+
+ def bounds(self):
+ sorted_keys = sorted(self.keys(), **self.sort_params)
+ return (
+ sorted_keys[RangeMap.first_item],
+ sorted_keys[RangeMap.last_item],
+ )
+
+ # some special values for the RangeMap
+ undefined_value = type(str('RangeValueUndefined'), (object,), {})()
+ class Item(int): pass
+ first_item = Item(0)
+ last_item = Item(-1)
+
+
+__identity = lambda x: x
+
+
+def sorted_items(d, key=__identity, reverse=False):
+ """
+ Return the items of the dictionary sorted by the keys
+
+ >>> sample = dict(foo=20, bar=42, baz=10)
+ >>> tuple(sorted_items(sample))
+ (('bar', 42), ('baz', 10), ('foo', 20))
+
+ >>> reverse_string = lambda s: ''.join(reversed(s))
+ >>> tuple(sorted_items(sample, key=reverse_string))
+ (('foo', 20), ('bar', 42), ('baz', 10))
+
+ >>> tuple(sorted_items(sample, reverse=True))
+ (('foo', 20), ('baz', 10), ('bar', 42))
+ """
+ # wrap the key func so it operates on the first element of each item
+ pairkey_key = lambda item: key(item[0])
+ return sorted(d.items(), key=pairkey_key, reverse=reverse)
+
+
+class KeyTransformingDict(dict):
+ """
+ A dict subclass that transforms the keys before they're used.
+ Subclasses may override the default transform_key to customize behavior.
+ """
+ @staticmethod
+ def transform_key(key):
+ return key
+
+ def __init__(self, *args, **kargs):
+ super(KeyTransformingDict, self).__init__()
+ # build a dictionary using the default constructs
+ d = dict(*args, **kargs)
+ # build this dictionary using transformed keys.
+ for item in d.items():
+ self.__setitem__(*item)
+
+ def __setitem__(self, key, val):
+ key = self.transform_key(key)
+ super(KeyTransformingDict, self).__setitem__(key, val)
+
+ def __getitem__(self, key):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).__getitem__(key)
+
+ def __contains__(self, key):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).__contains__(key)
+
+ def __delitem__(self, key):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).__delitem__(key)
+
+ def get(self, key, *args, **kwargs):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).get(key, *args, **kwargs)
+
+ def setdefault(self, key, *args, **kwargs):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).setdefault(key, *args, **kwargs)
+
+ def pop(self, key, *args, **kwargs):
+ key = self.transform_key(key)
+ return super(KeyTransformingDict, self).pop(key, *args, **kwargs)
+
+ def matching_key_for(self, key):
+ """
+ Given a key, return the actual key stored in self that matches.
+ Raise KeyError if the key isn't found.
+ """
+ try:
+ return next(e_key for e_key in self.keys() if e_key == key)
+ except StopIteration:
+ raise KeyError(key)
+
+
+class FoldedCaseKeyedDict(KeyTransformingDict):
+ """
+ A case-insensitive dictionary (keys are compared as insensitive
+ if they are strings).
+
+ >>> d = FoldedCaseKeyedDict()
+ >>> d['heLlo'] = 'world'
+ >>> list(d.keys()) == ['heLlo']
+ True
+ >>> list(d.values()) == ['world']
+ True
+ >>> d['hello'] == 'world'
+ True
+ >>> 'hello' in d
+ True
+ >>> 'HELLO' in d
+ True
+ >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})).replace("u'", "'"))
+ {'heLlo': 'world'}
+ >>> d = FoldedCaseKeyedDict({'heLlo': 'world'})
+ >>> print(d['hello'])
+ world
+ >>> print(d['Hello'])
+ world
+ >>> list(d.keys())
+ ['heLlo']
+ >>> d = FoldedCaseKeyedDict({'heLlo': 'world', 'Hello': 'world'})
+ >>> list(d.values())
+ ['world']
+ >>> key, = d.keys()
+ >>> key in ['heLlo', 'Hello']
+ True
+ >>> del d['HELLO']
+ >>> d
+ {}
+
+ get should work
+
+ >>> d['Sumthin'] = 'else'
+ >>> d.get('SUMTHIN')
+ 'else'
+ >>> d.get('OTHER', 'thing')
+ 'thing'
+ >>> del d['sumthin']
+
+ setdefault should also work
+
+ >>> d['This'] = 'that'
+ >>> print(d.setdefault('this', 'other'))
+ that
+ >>> len(d)
+ 1
+ >>> print(d['this'])
+ that
+ >>> print(d.setdefault('That', 'other'))
+ other
+ >>> print(d['THAT'])
+ other
+
+ Make it pop!
+
+ >>> print(d.pop('THAT'))
+ other
+
+ To retrieve the key in its originally-supplied form, use matching_key_for
+
+ >>> print(d.matching_key_for('this'))
+ This
+ """
+ @staticmethod
+ def transform_key(key):
+ return jaraco.text.FoldedCase(key)
+
+
+class DictAdapter(object):
+ """
+ Provide a getitem interface for attributes of an object.
+
+ Let's say you want to get at the string.lowercase property in a formatted
+ string. It's easy with DictAdapter.
+
+ >>> import string
+ >>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string))
+ lowercase is abcdefghijklmnopqrstuvwxyz
+ """
+ def __init__(self, wrapped_ob):
+ self.object = wrapped_ob
+
+ def __getitem__(self, name):
+ return getattr(self.object, name)
+
+
+class ItemsAsAttributes(object):
+ """
+ Mix-in class to enable a mapping object to provide items as
+ attributes.
+
+ >>> C = type(str('C'), (dict, ItemsAsAttributes), dict())
+ >>> i = C()
+ >>> i['foo'] = 'bar'
+ >>> i.foo
+ 'bar'
+
+ Natural attribute access takes precedence
+
+ >>> i.foo = 'henry'
+ >>> i.foo
+ 'henry'
+
+ But as you might expect, the mapping functionality is preserved.
+
+ >>> i['foo']
+ 'bar'
+
+ A normal attribute error should be raised if an attribute is
+ requested that doesn't exist.
+
+ >>> i.missing
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'C' object has no attribute 'missing'
+
+ It also works on dicts that customize __getitem__
+
+ >>> missing_func = lambda self, key: 'missing item'
+ >>> C = type(str('C'), (dict, ItemsAsAttributes), dict(__missing__ = missing_func))
+ >>> i = C()
+ >>> i.missing
+ 'missing item'
+ >>> i.foo
+ 'missing item'
+ """
+ def __getattr__(self, key):
+ try:
+ return getattr(super(ItemsAsAttributes, self), key)
+ except AttributeError as e:
+ # attempt to get the value from the mapping (return self[key])
+ # but be careful not to lose the original exception context.
+ noval = object()
+ def _safe_getitem(cont, key, missing_result):
+ try:
+ return cont[key]
+ except KeyError:
+ return missing_result
+ result = _safe_getitem(self, key, noval)
+ if result is not noval:
+ return result
+ # raise the original exception, but use the original class
+ # name, not 'super'.
+ message, = e.args
+ message = message.replace('super', self.__class__.__name__, 1)
+ e.args = message,
+ raise
+
+
+def invert_map(map):
+ """
+ Given a dictionary, return another dictionary with keys and values
+ switched. If any of the values resolve to the same key, raises
+ a ValueError.
+
+ >>> numbers = dict(a=1, b=2, c=3)
+ >>> letters = invert_map(numbers)
+ >>> letters[1]
+ 'a'
+ >>> numbers['d'] = 3
+ >>> invert_map(numbers)
+ Traceback (most recent call last):
+ ...
+ ValueError: Key conflict in inverted mapping
+ """
+ res = dict((v,k) for k, v in map.items())
+ if not len(res) == len(map):
+ raise ValueError('Key conflict in inverted mapping')
+ return res
+
+
+class IdentityOverrideMap(dict):
+ """
+ A dictionary that by default maps each key to itself, but otherwise
+ acts like a normal dictionary.
+
+ >>> d = IdentityOverrideMap()
+ >>> d[42]
+ 42
+ >>> d['speed'] = 'speedo'
+ >>> print(d['speed'])
+ speedo
+ """
+
+ def __missing__(self, key):
+ return key
+
+
+class DictStack(list, collections.Mapping):
+ """
+ A stack of dictionaries that behaves as a view on those dictionaries,
+ giving preference to the last.
+
+ >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
+ >>> stack['a']
+ 2
+ >>> stack['b']
+ 2
+ >>> stack['c']
+ 2
+ >>> stack.push(dict(a=3))
+ >>> stack['a']
+ 3
+ >>> set(stack.keys()) == set(['a', 'b', 'c'])
+ True
+ >>> d = stack.pop()
+ >>> stack['a']
+ 2
+ >>> d = stack.pop()
+ >>> stack['a']
+ 1
+ """
+
+ def keys(self):
+ return list(set(itertools.chain.from_iterable(c.keys() for c in self)))
+
+ def __getitem__(self, key):
+ for scope in reversed(self):
+ if key in scope: return scope[key]
+ raise KeyError(key)
+
+ push = list.append
+
+
+class BijectiveMap(dict):
+ """
+ A Bijective Map (two-way mapping).
+
+ Implemented as a simple dictionary of 2x the size, mapping values back
+ to keys.
+
+ Note, this implementation may be incomplete. If there's not a test for
+ your use case below, it's likely to fail, so please test and send pull
+ requests or patches for additional functionality needed.
+
+
+ >>> m = BijectiveMap()
+ >>> m['a'] = 'b'
+ >>> m == {'a': 'b', 'b': 'a'}
+ True
+ >>> print(m['b'])
+ a
+
+ >>> m['c'] = 'd'
+ >>> len(m)
+ 2
+
+ Some weird things happen if you map an item to itself or overwrite a
+ single key of a pair, so it's disallowed.
+
+ >>> m['e'] = 'e'
+ Traceback (most recent call last):
+ ValueError: Key cannot map to itself
+
+ >>> m['d'] = 'e'
+ Traceback (most recent call last):
+ ValueError: Key/Value pairs may not overlap
+
+ >>> print(m.pop('d'))
+ c
+
+ >>> 'c' in m
+ False
+
+ >>> m = BijectiveMap(dict(a='b'))
+ >>> len(m)
+ 1
+ >>> print(m['b'])
+ a
+
+ >>> m = BijectiveMap()
+ >>> m.update(a='b')
+ >>> m['b']
+ 'a'
+
+ >>> del m['b']
+ >>> len(m)
+ 0
+ >>> 'a' in m
+ False
+ """
+ def __init__(self, *args, **kwargs):
+ super(BijectiveMap, self).__init__()
+ self.update(*args, **kwargs)
+
+ def __setitem__(self, item, value):
+ if item == value:
+ raise ValueError("Key cannot map to itself")
+ if (value in self or item in self) and self[item] != value:
+ raise ValueError("Key/Value pairs may not overlap")
+ super(BijectiveMap, self).__setitem__(item, value)
+ super(BijectiveMap, self).__setitem__(value, item)
+
+ def __delitem__(self, item):
+ self.pop(item)
+
+ def __len__(self):
+ return super(BijectiveMap, self).__len__() // 2
+
+ def pop(self, key, *args, **kwargs):
+ mirror = self[key]
+ super(BijectiveMap, self).__delitem__(mirror)
+ return super(BijectiveMap, self).pop(key, *args, **kwargs)
+
+ def update(self, *args, **kwargs):
+ # build a dictionary using the default constructs
+ d = dict(*args, **kwargs)
+ # build this dictionary using transformed keys.
+ for item in d.items():
+ self.__setitem__(*item)
+
+
+class FrozenDict(collections.Mapping, collections.Hashable):
+ """
+ An immutable mapping.
+
+ >>> a = FrozenDict(a=1, b=2)
+ >>> b = FrozenDict(a=1, b=2)
+ >>> a == b
+ True
+
+ >>> a == dict(a=1, b=2)
+ True
+ >>> dict(a=1, b=2) == a
+ True
+
+ >>> a['c'] = 3
+ Traceback (most recent call last):
+ ...
+ TypeError: 'FrozenDict' object does not support item assignment
+
+ >>> a.update(y=3)
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'FrozenDict' object has no attribute 'update'
+
+ Copies should compare equal
+
+ >>> copy.copy(a) == a
+ True
+
+ Copies should be the same type
+
+ >>> isinstance(copy.copy(a), FrozenDict)
+ True
+
+ FrozenDict supplies .copy(), even though collections.Mapping doesn't
+ demand it.
+
+ >>> a.copy() == a
+ True
+ >>> a.copy() is not a
+ True
+ """
+ __slots__ = ['__data']
+
+ def __new__(cls, *args, **kwargs):
+ self = super(FrozenDict, cls).__new__(cls)
+ self.__data = dict(*args, **kwargs)
+ return self
+
+ # Container
+ def __contains__(self, key):
+ return key in self.__data
+
+ # Hashable
+ def __hash__(self):
+ return hash(tuple(sorted(self.__data.iteritems())))
+
+ # Mapping
+ def __iter__(self):
+ return iter(self.__data)
+
+ def __len__(self):
+ return len(self.__data)
+
+ def __getitem__(self, key):
+ return self.__data[key]
+
+ # override get for efficiency provided by dict
+ def get(self, *args, **kwargs):
+ return self.__data.get(*args, **kwargs)
+
+ # override eq to recognize underlying implementation
+ def __eq__(self, other):
+ if isinstance(other, FrozenDict):
+ other = other.__data
+ return self.__data.__eq__(other)
+
+ def copy(self):
+ "Return a shallow copy of self"
+ return copy.copy(self)
+
+
+class Enumeration(ItemsAsAttributes, BijectiveMap):
+ """
+ A convenient way to provide enumerated values
+
+ >>> e = Enumeration('a b c')
+ >>> e['a']
+ 0
+
+ >>> e.a
+ 0
+
+ >>> e[1]
+ 'b'
+
+ >>> set(e.names) == set('abc')
+ True
+
+ >>> set(e.codes) == set(range(3))
+ True
+
+ >>> e.get('d') is None
+ True
+
+ Codes need not start with 0
+
+ >>> e = Enumeration('a b c', range(1, 4))
+ >>> e['a']
+ 1
+
+ >>> e[3]
+ 'c'
+ """
+ def __init__(self, names, codes=None):
+ if isinstance(names, six.string_types):
+ names = names.split()
+ if codes is None:
+ codes = itertools.count()
+ super(Enumeration, self).__init__(zip(names, codes))
+
+ @property
+ def names(self):
+ return (key for key in self if isinstance(key, six.string_types))
+
+ @property
+ def codes(self):
+ return (self[name] for name in self.names)
+
+
+class Everything(object):
+ """
+ A collection "containing" every possible thing.
+
+ >>> 'foo' in Everything()
+ True
+
+ >>> import random
+ >>> random.randint(1, 999) in Everything()
+ True
+ """
+ def __contains__(self, other):
+ return True
+
+
+class InstrumentedDict(six.moves.UserDict):
+ """
+ Instrument an existing dictionary with additional
+ functionality, but always reference and mutate
+ the original dictionary.
+
+ >>> orig = {'a': 1, 'b': 2}
+ >>> inst = InstrumentedDict(orig)
+ >>> inst['a']
+ 1
+ >>> inst['c'] = 3
+ >>> orig['c']
+ 3
+ >>> inst.keys() == orig.keys()
+ True
+ """
+ def __init__(self, data):
+ six.moves.UserDict.__init__(self)
+ self.data = data
diff --git a/libs/jaraco/functools.py b/libs/jaraco/functools.py
new file mode 100644
index 00000000..d9ccf3a6
--- /dev/null
+++ b/libs/jaraco/functools.py
@@ -0,0 +1,268 @@
+from __future__ import absolute_import, unicode_literals, print_function, division
+
+import functools
+import time
+import warnings
+
+try:
+ from functools import lru_cache
+except ImportError:
+ try:
+ from backports.functools_lru_cache import lru_cache
+ except ImportError:
+ try:
+ from functools32 import lru_cache
+ except ImportError:
+ warnings.warn("No lru_cache available")
+
+
+def compose(*funcs):
+ """
+ Compose any number of unary functions into a single unary function.
+
+ >>> import textwrap
+ >>> from six import text_type
+ >>> text_type.strip(textwrap.dedent(compose.__doc__)) == compose(text_type.strip, textwrap.dedent)(compose.__doc__)
+ True
+
+ Compose also allows the innermost function to take arbitrary arguments.
+
+ >>> round_three = lambda x: round(x, ndigits=3)
+ >>> f = compose(round_three, int.__truediv__)
+ >>> [f(3*x, x+1) for x in range(1,10)]
+ [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
+ """
+
+ compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))
+ return functools.reduce(compose_two, funcs)
+
+
+def method_caller(method_name, *args, **kwargs):
+ """
+ Return a function that will call a named method on the
+ target object with optional positional and keyword
+ arguments.
+
+ >>> lower = method_caller('lower')
+ >>> lower('MyString')
+ 'mystring'
+ """
+ def call_method(target):
+ func = getattr(target, method_name)
+ return func(*args, **kwargs)
+ return call_method
+
+
+def once(func):
+ """
+ Decorate func so it's only ever called the first time.
+
+ This decorator can ensure that an expensive or non-idempotent function
+ will not be expensive on subsequent calls and is idempotent.
+
+ >>> func = once(lambda a: a+3)
+ >>> func(3)
+ 6
+ >>> func(9)
+ 6
+ >>> func('12')
+ 6
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if not hasattr(func, 'always_returns'):
+ func.always_returns = func(*args, **kwargs)
+ return func.always_returns
+ return wrapper
+
+
+def method_cache(method, cache_wrapper=None):
+ """
+ Wrap lru_cache to support storing the cache data in the object instances.
+
+ Abstracts the common paradigm where the method explicitly saves an
+ underscore-prefixed protected property on first call and returns that
+ subsequently.
+
+ >>> class MyClass:
+ ... calls = 0
+ ...
+ ... @method_cache
+ ... def method(self, value):
+ ... self.calls += 1
+ ... return value
+
+ >>> a = MyClass()
+ >>> a.method(3)
+ 3
+ >>> for x in range(75):
+ ... res = a.method(x)
+ >>> a.calls
+ 75
+
+ Note that the apparent behavior will be exactly like that of lru_cache
+ except that the cache is stored on each instance, so values in one
+ instance will not flush values from another, and when an instance is
+ deleted, so are the cached values for that instance.
+
+ >>> b = MyClass()
+ >>> for x in range(35):
+ ... res = b.method(x)
+ >>> b.calls
+ 35
+ >>> a.method(0)
+ 0
+ >>> a.calls
+ 75
+
+ Note that if method had been decorated with ``functools.lru_cache()``,
+ a.calls would have been 76 (due to the cached value of 0 having been
+ flushed by the 'b' instance).
+
+ Clear the cache with ``.cache_clear()``
+
+ >>> a.method.cache_clear()
+
+ Another cache wrapper may be supplied:
+
+ >>> cache = lru_cache(maxsize=2)
+ >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
+ >>> a = MyClass()
+ >>> a.method2()
+ 3
+
+ See also
+ http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
+ for another implementation and additional justification.
+ """
+ cache_wrapper = cache_wrapper or lru_cache()
+ def wrapper(self, *args, **kwargs):
+ # it's the first call, replace the method with a cached, bound method
+ bound_method = functools.partial(method, self)
+ cached_method = cache_wrapper(bound_method)
+ setattr(self, method.__name__, cached_method)
+ return cached_method(*args, **kwargs)
+ return _special_method_cache(method, cache_wrapper) or wrapper
+
+
+def _special_method_cache(method, cache_wrapper):
+ """
+ Because Python treats special methods differently, it's not
+ possible to use instance attributes to implement the cached
+ methods.
+
+ Instead, install the wrapper method under a different name
+ and return a simple proxy to that wrapper.
+
+ https://github.com/jaraco/jaraco.functools/issues/5
+ """
+ name = method.__name__
+ special_names = '__getattr__', '__getitem__'
+ if name not in special_names:
+ return
+
+ wrapper_name = '__cached' + name
+
+ def proxy(self, *args, **kwargs):
+ if wrapper_name not in vars(self):
+ bound = functools.partial(method, self)
+ cache = cache_wrapper(bound)
+ setattr(self, wrapper_name, cache)
+ else:
+ cache = getattr(self, wrapper_name)
+ return cache(*args, **kwargs)
+
+ return proxy
+
+
+def apply(transform):
+ """
+ Decorate a function with a transform function that is
+ invoked on results returned from the decorated function.
+
+ >>> @apply(reversed)
+ ... def get_numbers(start):
+ ... return range(start, start+3)
+ >>> list(get_numbers(4))
+ [6, 5, 4]
+ """
+ def wrap(func):
+ return compose(transform, func)
+ return wrap
+
+
+def call_aside(f, *args, **kwargs):
+ """
+ Call a function for its side effect after initialization.
+
+ >>> @call_aside
+ ... def func(): print("called")
+ called
+ >>> func()
+ called
+
+ Use functools.partial to pass parameters to the initial call
+
+ >>> @functools.partial(call_aside, name='bingo')
+ ... def func(name): print("called with", name)
+ called with bingo
+ """
+ f(*args, **kwargs)
+ return f
+
+
+class Throttler(object):
+ """
+ Rate-limit a function (or other callable)
+ """
+ def __init__(self, func, max_rate=float('Inf')):
+ if isinstance(func, Throttler):
+ func = func.func
+ self.func = func
+ self.max_rate = max_rate
+ self.reset()
+
+ def reset(self):
+ self.last_called = 0
+
+ def __call__(self, *args, **kwargs):
+ self._wait()
+ return self.func(*args, **kwargs)
+
+ def _wait(self):
+ "ensure at least 1/max_rate seconds from last call"
+ elapsed = time.time() - self.last_called
+ must_wait = 1 / self.max_rate - elapsed
+ time.sleep(max(0, must_wait))
+ self.last_called = time.time()
+
+ def __get__(self, obj, type=None):
+ return first_invoke(self._wait, functools.partial(self.func, obj))
+
+
+def first_invoke(func1, func2):
+ """
+ Return a function that when invoked will invoke func1 without
+ any parameters (for its side-effect) and then invoke func2
+ with whatever parameters were passed, returning its result.
+ """
+ def wrapper(*args, **kwargs):
+ func1()
+ return func2(*args, **kwargs)
+ return wrapper
+
+
+def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
+ """
+ Given a callable func, trap the indicated exceptions
+ for up to 'retries' times, invoking cleanup on the
+ exception. On the final attempt, allow any exceptions
+ to propagate.
+ """
+ for attempt in range(retries):
+ try:
+ return func()
+ except trap:
+ cleanup()
+
+ return func()
diff --git a/libs/jaraco/structures/__init__.py b/libs/jaraco/structures/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/libs/jaraco/structures/binary.py b/libs/jaraco/structures/binary.py
new file mode 100644
index 00000000..e4db2c65
--- /dev/null
+++ b/libs/jaraco/structures/binary.py
@@ -0,0 +1,130 @@
+from __future__ import absolute_import, unicode_literals
+
+from functools import reduce
+
+
+def get_bit_values(number, size=32):
+ """
+ Get bit values as a list for a given number
+
+ >>> get_bit_values(1) == [0]*31 + [1]
+ True
+
+ >>> get_bit_values(0xDEADBEEF)
+ [1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1]
+
+ You may override the default word size of 32-bits to match your actual
+ application.
+
+ >>> get_bit_values(0x3, 2)
+ [1, 1]
+
+ >>> get_bit_values(0x3, 4)
+ [0, 0, 1, 1]
+ """
+ number += 2**size
+ return list(map(int, bin(number)[-size:]))
+
+def gen_bit_values(number):
+ """
+ Return a zero or one for each bit of a numeric value up to the most
+ significant 1 bit, beginning with the least significant bit.
+
+ >>> list(gen_bit_values(16))
+ [0, 0, 0, 0, 1]
+ """
+ digits = bin(number)[2:]
+ return map(int, reversed(digits))
+
+def coalesce(bits):
+ """
+ Take a sequence of bits, most significant first, and
+ coalesce them into a number.
+
+ >>> coalesce([1,0,1])
+ 5
+ """
+ operation = lambda a, b: (a << 1 | b)
+ return reduce(operation, bits)
+
+class Flags(object):
+ """
+ Subclasses should define _names, a list of flag names beginning
+ with the least-significant bit.
+
+ >>> class MyFlags(Flags):
+ ... _names = 'a', 'b', 'c'
+ >>> mf = MyFlags.from_number(5)
+ >>> mf['a']
+ 1
+ >>> mf['b']
+ 0
+ >>> mf['c'] == mf[2]
+ True
+ >>> mf['b'] = 1
+ >>> mf['a'] = 0
+ >>> mf.number
+ 6
+ """
+ def __init__(self, values):
+ self._values = list(values)
+ if hasattr(self, '_names'):
+ n_missing_bits = len(self._names) - len(self._values)
+ self._values.extend([0] * n_missing_bits)
+
+ @classmethod
+ def from_number(cls, number):
+ return cls(gen_bit_values(number))
+
+ @property
+ def number(self):
+ return coalesce(reversed(self._values))
+
+ def __setitem__(self, key, value):
+ # first try by index, then by name
+ try:
+ self._values[key] = value
+ except TypeError:
+ index = self._names.index(key)
+ self._values[index] = value
+
+ def __getitem__(self, key):
+ # first try by index, then by name
+ try:
+ return self._values[key]
+ except TypeError:
+ index = self._names.index(key)
+ return self._values[index]
+
+class BitMask(type):
+ """
+ A metaclass to create a bitmask with attributes. Subclass an int and
+ set this as the metaclass to use.
+
+ Here's how to create such a class on Python 3:
+
+ class MyBits(int, metaclass=BitMask):
+ a = 0x1
+ b = 0x4
+ c = 0x3
+
+ For testing purposes, construct explicitly to support Python 2
+
+ >>> ns = dict(a=0x1, b=0x4, c=0x3)
+ >>> MyBits = BitMask(str('MyBits'), (int,), ns)
+
+ >>> b1 = MyBits(3)
+ >>> b1.a, b1.b, b1.c
+ (True, False, True)
+ >>> b2 = MyBits(8)
+ >>> any([b2.a, b2.b, b2.c])
+ False
+ """
+
+ def __new__(cls, name, bases, attrs):
+ newattrs = dict(
+ (attr, property(lambda self, value=value: bool(self & value)))
+ for attr, value in attrs.items()
+ if not attr.startswith('_')
+ )
+ return type.__new__(cls, name, bases, newattrs)
diff --git a/libs/jaraco/text.py b/libs/jaraco/text.py
new file mode 100644
index 00000000..c459e6e0
--- /dev/null
+++ b/libs/jaraco/text.py
@@ -0,0 +1,371 @@
+from __future__ import absolute_import, unicode_literals, print_function
+
+import sys
+import re
+import inspect
+import itertools
+import textwrap
+import functools
+
+import six
+
+import jaraco.collections
+from jaraco.functools import compose
+
+
+def substitution(old, new):
+ """
+ Return a function that will perform a substitution on a string
+ """
+ return lambda s: s.replace(old, new)
+
+
+def multi_substitution(*substitutions):
+ """
+ Take a sequence of pairs specifying substitutions, and create
+ a function that performs those substitutions.
+
+ >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
+ 'baz'
+ """
+ substitutions = itertools.starmap(substitution, substitutions)
+ # compose function applies last function first, so reverse the
+ # substitutions to get the expected order.
+ substitutions = reversed(tuple(substitutions))
+ return compose(*substitutions)
+
+
+class FoldedCase(six.text_type):
+ """
+ A case insensitive string class; behaves just like str
+ except compares equal when the only variation is case.
+ >>> s = FoldedCase('hello world')
+
+ >>> s == 'Hello World'
+ True
+
+ >>> 'Hello World' == s
+ True
+
+ >>> s.index('O')
+ 4
+
+ >>> s.split('O')
+ ['hell', ' w', 'rld']
+
+ >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
+ ['alpha', 'Beta', 'GAMMA']
+ """
+ def __lt__(self, other):
+ return self.lower() < other.lower()
+
+ def __gt__(self, other):
+ return self.lower() > other.lower()
+
+ def __eq__(self, other):
+ return self.lower() == other.lower()
+
+ def __hash__(self):
+ return hash(self.lower())
+
+ # cache lower since it's likely to be called frequently.
+ def lower(self):
+ self._lower = super(FoldedCase, self).lower()
+ self.lower = lambda: self._lower
+ return self._lower
+
+ def index(self, sub):
+ return self.lower().index(sub.lower())
+
+ def split(self, splitter=' ', maxsplit=0):
+ pattern = re.compile(re.escape(splitter), re.I)
+ return pattern.split(self, maxsplit)
+
+
+def local_format(string):
+ """
+ format the string using variables in the caller's local namespace.
+
+ >>> a = 3
+ >>> local_format("{a:5}")
+ ' 3'
+ """
+ context = inspect.currentframe().f_back.f_locals
+ if sys.version_info < (3, 2):
+ return string.format(**context)
+ return string.format_map(context)
+
+
+def global_format(string):
+ """
+ format the string using variables in the caller's global namespace.
+
+ >>> a = 3
+ >>> fmt = "The func name: {global_format.__name__}"
+ >>> global_format(fmt)
+ 'The func name: global_format'
+ """
+ context = inspect.currentframe().f_back.f_globals
+ if sys.version_info < (3, 2):
+ return string.format(**context)
+ return string.format_map(context)
+
+
+def namespace_format(string):
+ """
+ Format the string using variable in the caller's scope (locals + globals).
+
+ >>> a = 3
+ >>> fmt = "A is {a} and this func is {namespace_format.__name__}"
+ >>> namespace_format(fmt)
+ 'A is 3 and this func is namespace_format'
+ """
+ context = jaraco.collections.DictStack()
+ context.push(inspect.currentframe().f_back.f_globals)
+ context.push(inspect.currentframe().f_back.f_locals)
+ if sys.version_info < (3, 2):
+ return string.format(**context)
+ return string.format_map(context)
+
+
+def is_decodable(value):
+ r"""
+ Return True if the supplied value is decodable (using the default
+ encoding).
+
+ >>> is_decodable(b'\xff')
+ False
+ >>> is_decodable(b'\x32')
+ True
+ """
+ # TODO: This code could be expressed more consisely and directly
+ # with a jaraco.context.ExceptionTrap, but that adds an unfortunate
+ # long dependency tree, so for now, use boolean literals.
+ try:
+ value.decode()
+ except UnicodeDecodeError:
+ return False
+ return True
+
+def is_binary(value):
+ """
+ Return True if the value appears to be binary (that is, it's a byte
+ string and isn't decodable).
+ """
+ return isinstance(value, bytes) and not is_decodable(value)
+
+def trim(s):
+ r"""
+ Trim something like a docstring to remove the whitespace that
+ is common due to indentation and formatting.
+
+ >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
+ 'foo = bar\n\tbar = baz'
+ """
+ return textwrap.dedent(s).strip()
+
+class Splitter(object):
+ """object that will split a string with the given arguments for each call
+ >>> s = Splitter(',')
+ >>> s('hello, world, this is your, master calling')
+ ['hello', ' world', ' this is your', ' master calling']
+ """
+ def __init__(self, *args):
+ self.args = args
+
+ def __call__(self, s):
+ return s.split(*self.args)
+
+def indent(string, prefix=' ' * 4):
+ return prefix + string
+
+class WordSet(tuple):
+ """
+ Given a Python identifier, return the words that identifier represents,
+ whether in camel case, underscore-separated, etc.
+
+ >>> WordSet.parse("camelCase")
+ ('camel', 'Case')
+
+ >>> WordSet.parse("under_sep")
+ ('under', 'sep')
+
+ Acronyms should be retained
+
+ >>> WordSet.parse("firstSNL")
+ ('first', 'SNL')
+
+ >>> WordSet.parse("you_and_I")
+ ('you', 'and', 'I')
+
+ >>> WordSet.parse("A simple test")
+ ('A', 'simple', 'test')
+
+ Multiple caps should not interfere with the first cap of another word.
+
+ >>> WordSet.parse("myABCClass")
+ ('my', 'ABC', 'Class')
+
+ The result is a WordSet, so you can get the form you need.
+
+ >>> WordSet.parse("myABCClass").underscore_separated()
+ 'my_ABC_Class'
+
+ >>> WordSet.parse('a-command').camel_case()
+ 'ACommand'
+
+ >>> WordSet.parse('someIdentifier').lowered().space_separated()
+ 'some identifier'
+
+ Slices of the result should return another WordSet.
+
+ >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
+ 'out_of_context'
+
+ >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
+ 'word set'
+ """
+ _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
+
+ def capitalized(self):
+ return WordSet(word.capitalize() for word in self)
+
+ def lowered(self):
+ return WordSet(word.lower() for word in self)
+
+ def camel_case(self):
+ return ''.join(self.capitalized())
+
+ def headless_camel_case(self):
+ words = iter(self)
+ first = next(words).lower()
+ return itertools.chain((first,), WordSet(words).camel_case())
+
+ def underscore_separated(self):
+ return '_'.join(self)
+
+ def dash_separated(self):
+ return '-'.join(self)
+
+ def space_separated(self):
+ return ' '.join(self)
+
+ def __getitem__(self, item):
+ result = super(WordSet, self).__getitem__(item)
+ if isinstance(item, slice):
+ result = WordSet(result)
+ return result
+
+ # for compatibility with Python 2
+ def __getslice__(self, i, j):
+ return self.__getitem__(slice(i, j))
+
+ @classmethod
+ def parse(cls, identifier):
+ matches = cls._pattern.finditer(identifier)
+ return WordSet(match.group(0) for match in matches)
+
+ @classmethod
+ def from_class_name(cls, subject):
+ return cls.parse(subject.__class__.__name__)
+
+# for backward compatibility
+words = WordSet.parse
+
+
+def simple_html_strip(s):
+ r"""
+ Remove HTML from the string `s`.
+
+ >>> str(simple_html_strip(''))
+ ''
+
+ >>> print(simple_html_strip('A stormy day in paradise'))
+ A stormy day in paradise
+
+ >>> print(simple_html_strip('Somebody tell the truth.'))
+ Somebody tell the truth.
+
+ >>> print(simple_html_strip('What about
\nmultiple lines?'))
+ What about
+ multiple lines?
+ """
+ html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL)
+ texts = (
+ match.group(3) or ''
+ for match
+ in html_stripper.finditer(s)
+ )
+ return ''.join(texts)
+
+
+class SeparatedValues(six.text_type):
+ """
+ A string separated by a separator. Overrides __iter__ for getting
+ the values.
+
+ >>> list(SeparatedValues('a,b,c'))
+ ['a', 'b', 'c']
+
+ Whitespace is stripped and empty values are discarded.
+
+ >>> list(SeparatedValues(' a, b , c, '))
+ ['a', 'b', 'c']
+ """
+ separator = ','
+
+ def __iter__(self):
+ parts = self.split(self.separator)
+ return six.moves.filter(None, (part.strip() for part in parts))
+
+class Stripper:
+ r"""
+ Given a series of lines, find the common prefix and strip it from them.
+
+ >>> lines = [
+ ... 'abcdefg\n',
+ ... 'abc\n',
+ ... 'abcde\n',
+ ... ]
+ >>> res = Stripper.strip_prefix(lines)
+ >>> res.prefix
+ 'abc'
+ >>> list(res.lines)
+ ['defg\n', '\n', 'de\n']
+
+ If no prefix is common, nothing should be stripped.
+
+ >>> lines = [
+ ... 'abcd\n',
+ ... '1234\n',
+ ... ]
+ >>> res = Stripper.strip_prefix(lines)
+ >>> res.prefix = ''
+ >>> list(res.lines)
+ ['abcd\n', '1234\n']
+ """
+ def __init__(self, prefix, lines):
+ self.prefix = prefix
+ self.lines = map(self, lines)
+
+ @classmethod
+ def strip_prefix(cls, lines):
+ prefix_lines, lines = itertools.tee(lines)
+ prefix = functools.reduce(cls.common_prefix, prefix_lines)
+ return cls(prefix, lines)
+
+ def __call__(self, line):
+ if not self.prefix:
+ return line
+ null, prefix, rest = line.partition(self.prefix)
+ return rest
+
+ @staticmethod
+ def common_prefix(s1, s2):
+ """
+ Return the common prefix of two lines.
+ """
+ index = min(len(s1), len(s2))
+ while s1[:index] != s2[:index]:
+ index -= 1
+ return s1[:index]
diff --git a/libs/jaraco/ui/__init__.py b/libs/jaraco/ui/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/libs/jaraco/ui/cmdline.py b/libs/jaraco/ui/cmdline.py
new file mode 100644
index 00000000..0634f21d
--- /dev/null
+++ b/libs/jaraco/ui/cmdline.py
@@ -0,0 +1,62 @@
+import argparse
+
+import six
+from jaraco.classes import meta
+from jaraco import text
+
+
+@six.add_metaclass(meta.LeafClassesMeta)
+class Command(object):
+ """
+ A general-purpose base class for creating commands for a command-line
+ program using argparse. Each subclass of Command represents a separate
+ sub-command of a program.
+
+ For example, one might use Command subclasses to implement the Mercurial
+ command set::
+
+ class Commit(Command):
+ @staticmethod
+ def add_arguments(cls, parser):
+ parser.add_argument('-m', '--message')
+
+ @classmethod
+ def run(cls, args):
+ "Run the 'commit' command with args (parsed)"
+
+ class Merge(Command): pass
+ class Pull(Command): pass
+ ...
+
+ Then one could create an entry point for Mercurial like so::
+
+ def hg_command():
+ Command.invoke()
+ """
+
+ @classmethod
+ def add_subparsers(cls, parser):
+ subparsers = parser.add_subparsers()
+ [cmd_class.add_parser(subparsers) for cmd_class in cls._leaf_classes]
+
+ @classmethod
+ def add_parser(cls, subparsers):
+ cmd_string = text.words(cls.__name__).lowered().dash_separated()
+ parser = subparsers.add_parser(cmd_string)
+ parser.set_defaults(action=cls)
+ cls.add_arguments(parser)
+ return parser
+
+ @classmethod
+ def add_arguments(cls, parser):
+ pass
+
+ @classmethod
+ def invoke(cls):
+ """
+ Invoke the command using ArgumentParser
+ """
+ parser = argparse.ArgumentParser()
+ cls.add_subparsers(parser)
+ args = parser.parse_args()
+ args.action.run(args)
diff --git a/libs/jaraco/ui/editor.py b/libs/jaraco/ui/editor.py
new file mode 100644
index 00000000..b37c759d
--- /dev/null
+++ b/libs/jaraco/ui/editor.py
@@ -0,0 +1,108 @@
+from __future__ import unicode_literals, absolute_import
+
+import tempfile
+import os
+import sys
+import subprocess
+import mimetypes
+import collections
+import io
+import difflib
+
+import six
+
+class EditProcessException(RuntimeError): pass
+
+class EditableFile(object):
+ """
+ EditableFile saves some data to a temporary file, launches a
+ platform editor for interactive editing, and then reloads the data,
+ setting .changed to True if the data was edited.
+
+ e.g.::
+
+ x = EditableFile('foo')
+ x.edit()
+
+ if x.changed:
+ print(x.data)
+
+ The EDITOR environment variable can define which executable to use
+ (also XML_EDITOR if the content-type to edit includes 'xml'). If no
+ EDITOR is defined, defaults to 'notepad' on Windows and 'edit' on
+ other platforms.
+ """
+ platform_default_editors = collections.defaultdict(
+ lambda: 'edit',
+ win32 = 'notepad',
+ linux2 = 'vi',
+ )
+ encoding = 'utf-8'
+
+ def __init__(self, data='', content_type='text/plain'):
+ self.data = six.text_type(data)
+ self.content_type = content_type
+
+ def __enter__(self):
+ extension = mimetypes.guess_extension(self.content_type) or ''
+ fobj, self.name = tempfile.mkstemp(extension)
+ os.write(fobj, self.data.encode(self.encoding))
+ os.close(fobj)
+ return self
+
+ def read(self):
+ with open(self.name, 'rb') as f:
+ return f.read().decode(self.encoding)
+
+ def __exit__(self, *tb_info):
+ os.remove(self.name)
+
+ def edit(self):
+ """
+ Edit the file
+ """
+ self.changed = False
+ with self:
+ editor = self.get_editor()
+ cmd = [editor, self.name]
+ try:
+ res = subprocess.call(cmd)
+ except Exception as e:
+ print("Error launching editor %(editor)s" % locals())
+ print(e)
+ return
+ if res != 0:
+ msg = '%(editor)s returned error status %(res)d' % locals()
+ raise EditProcessException(msg)
+ new_data = self.read()
+ if new_data != self.data:
+ self.changed = self._save_diff(self.data, new_data)
+ self.data = new_data
+
+ @staticmethod
+ def _search_env(keys):
+ """
+ Search the environment for the supplied keys, returning the first
+ one found or None if none was found.
+ """
+ matches = (os.environ[key] for key in keys if key in os.environ)
+ return next(matches, None)
+
+ def get_editor(self):
+ """
+ Give preference to an XML_EDITOR or EDITOR defined in the
+ environment. Otherwise use a default editor based on platform.
+ """
+ env_search = ['EDITOR']
+ if 'xml' in self.content_type:
+ env_search.insert(0, 'XML_EDITOR')
+ default_editor = self.platform_default_editors[sys.platform]
+ return self._search_env(env_search) or default_editor
+
+ @staticmethod
+ def _save_diff(*versions):
+ def get_lines(content):
+ return list(io.StringIO(content))
+ lines = map(get_lines, versions)
+ diff = difflib.context_diff(*lines)
+ return tuple(diff)
diff --git a/libs/jaraco/ui/input.py b/libs/jaraco/ui/input.py
new file mode 100644
index 00000000..3d108fc0
--- /dev/null
+++ b/libs/jaraco/ui/input.py
@@ -0,0 +1,26 @@
+"""
+This module currently provides a cross-platform getch function
+"""
+
+try:
+ # Windows
+ from msvcrt import getch
+except ImportError:
+ pass
+
+try:
+ # Unix
+ import sys
+ import tty
+ import termios
+
+ def getch():
+ fd = sys.stdin.fileno()
+ old = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ return sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old)
+except ImportError:
+ pass
diff --git a/libs/jaraco/ui/menu.py b/libs/jaraco/ui/menu.py
new file mode 100644
index 00000000..aede93b3
--- /dev/null
+++ b/libs/jaraco/ui/menu.py
@@ -0,0 +1,34 @@
+from __future__ import print_function, absolute_import, unicode_literals
+
+import itertools
+
+import six
+
+class Menu(object):
+ """
+ A simple command-line based menu
+ """
+ def __init__(self, choices=None, formatter=str):
+ self.choices = choices or list()
+ self.formatter = formatter
+
+ def get_choice(self, prompt="> "):
+ n = len(self.choices)
+ number_width = len(str(n)) + 1
+ menu_fmt = '{number:{number_width}}) {choice}'
+ formatted_choices = map(self.formatter, self.choices)
+ for number, choice in zip(itertools.count(1), formatted_choices):
+ print(menu_fmt.format(**locals()))
+ print()
+ try:
+ answer = int(six.moves.input(prompt))
+ result = self.choices[answer - 1]
+ except ValueError:
+ print('invalid selection')
+ result = None
+ except IndexError:
+ print('invalid selection')
+ result = None
+ except KeyboardInterrupt:
+ result = None
+ return result
diff --git a/libs/jaraco/ui/progress.py b/libs/jaraco/ui/progress.py
new file mode 100644
index 00000000..a00adf47
--- /dev/null
+++ b/libs/jaraco/ui/progress.py
@@ -0,0 +1,150 @@
+from __future__ import (print_function, absolute_import, unicode_literals,
+ division)
+
+import time
+import sys
+import itertools
+import abc
+import datetime
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class AbstractProgressBar(object):
+ def __init__(self, unit='', size=70):
+ """
+ Size is the nominal size in characters
+ """
+ self.unit = unit
+ self.size = size
+
+ def report(self, amt):
+ sys.stdout.write('\r%s' % self.get_bar(amt))
+ sys.stdout.flush()
+
+ @abc.abstractmethod
+ def get_bar(self, amt):
+ "Return the string to be printed. Should be size >= self.size"
+
+ def summary(self, str):
+ return ' (' + self.unit_str(str) + ')'
+
+ def unit_str(self, str):
+ if self.unit:
+ str += ' ' + self.unit
+ return str
+
+ def finish(self):
+ print()
+
+ def __enter__(self):
+ self.report(0)
+ return self
+
+ def __exit__(self, exc, exc_val, tb):
+ if exc is None:
+ self.finish()
+ else:
+ print()
+
+ def iterate(self, iterable):
+ """
+ Report the status as the iterable is consumed.
+ """
+ with self:
+ for n, item in enumerate(iterable, 1):
+ self.report(n)
+ yield item
+
+
+class SimpleProgressBar(AbstractProgressBar):
+
+ _PROG_DISPGLYPH = itertools.cycle(['|', '/', '-', '\\'])
+
+ def get_bar(self, amt):
+ bar = next(self._PROG_DISPGLYPH)
+ template = ' [{bar:^{bar_len}}]'
+ summary = self.summary('{amt}')
+ template += summary
+ empty = template.format(
+ bar='',
+ bar_len=0,
+ amt=amt,
+ )
+ bar_len = self.size - len(empty)
+ return template.format(**locals())
+
+ @classmethod
+ def demo(cls):
+ bar3 = cls(unit='cubes', size=30)
+ with bar3:
+ for x in six.moves.range(1, 759):
+ bar3.report(x)
+ time.sleep(0.01)
+
+
+class TargetProgressBar(AbstractProgressBar):
+ def __init__(self, total=None, unit='', size=70):
+ """
+ Size is the nominal size in characters
+ """
+ self.total = total
+ super(TargetProgressBar, self).__init__(unit, size)
+
+ def get_bar(self, amt):
+ template = ' [{bar:<{bar_len}}]'
+ completed = amt / self.total
+ percent = int(completed * 100)
+ percent_str = ' {percent:3}%'
+ template += percent_str
+ summary = self.summary('{amt}/{total}')
+ template += summary
+ empty = template.format(
+ total=self.total,
+ bar='',
+ bar_len=0,
+ **locals()
+ )
+ bar_len = self.size - len(empty)
+ bar = '=' * int(completed * bar_len)
+ return template.format(total=self.total, **locals())
+
+ @classmethod
+ def demo(cls):
+ bar1 = cls(100, 'blocks')
+ with bar1:
+ for x in six.moves.range(1, 101):
+ bar1.report(x)
+ time.sleep(0.05)
+
+ bar2 = cls(758, size=50)
+ with bar2:
+ for x in six.moves.range(1, 759):
+ bar2.report(x)
+ time.sleep(0.01)
+
+ def finish(self):
+ self.report(self.total)
+ super(TargetProgressBar, self).finish()
+
+
+def countdown(template, duration=datetime.timedelta(seconds=5)):
+ """
+ Do a countdown for duration, printing the template (which may accept one
+ positional argument). Template should be something like
+ ``countdown complete in {} seconds.``
+ """
+ now = datetime.datetime.now()
+ deadline = now + duration
+ remaining = deadline - datetime.datetime.now()
+ while remaining:
+ remaining = deadline - datetime.datetime.now()
+ remaining = max(datetime.timedelta(), remaining)
+ msg = template.format(remaining.total_seconds())
+ print(msg, end=' '*10)
+ sys.stdout.flush()
+ time.sleep(.1)
+ print('\b'*80, end='')
+ sys.stdout.flush()
+ print()
diff --git a/libs/more_itertools/__init__.py b/libs/more_itertools/__init__.py
new file mode 100644
index 00000000..5a3467fe
--- /dev/null
+++ b/libs/more_itertools/__init__.py
@@ -0,0 +1,2 @@
+from more_itertools.more import *
+from more_itertools.recipes import *
diff --git a/libs/more_itertools/more.py b/libs/more_itertools/more.py
new file mode 100644
index 00000000..56512ce4
--- /dev/null
+++ b/libs/more_itertools/more.py
@@ -0,0 +1,237 @@
+from functools import partial, wraps
+from itertools import izip_longest
+from recipes import *
+
+__all__ = ['chunked', 'first', 'peekable', 'collate', 'consumer', 'ilen',
+ 'iterate', 'with_iter']
+
+
+_marker = object()
+
+
+def chunked(iterable, n):
+ """Break an iterable into lists of a given length::
+
+ >>> list(chunked([1, 2, 3, 4, 5, 6, 7], 3))
+ [[1, 2, 3], [4, 5, 6], [7]]
+
+ If the length of ``iterable`` is not evenly divisible by ``n``, the last
+ returned list will be shorter.
+
+ This is useful for splitting up a computation on a large number of keys
+ into batches, to be pickled and sent off to worker processes. One example
+ is operations on rows in MySQL, which does not implement server-side
+ cursors properly and would otherwise load the entire dataset into RAM on
+ the client.
+
+ """
+ # Doesn't seem to run into any number-of-args limits.
+ for group in (list(g) for g in izip_longest(*[iter(iterable)] * n,
+ fillvalue=_marker)):
+ if group[-1] is _marker:
+ # If this is the last group, shuck off the padding:
+ del group[group.index(_marker):]
+ yield group
+
+
+def first(iterable, default=_marker):
+ """Return the first item of an iterable, ``default`` if there is none.
+
+ >>> first(xrange(4))
+ 0
+ >>> first(xrange(0), 'some default')
+ 'some default'
+
+ If ``default`` is not provided and there are no items in the iterable,
+ raise ``ValueError``.
+
+ ``first()`` is useful when you have a generator of expensive-to-retrieve
+ values and want any arbitrary one. It is marginally shorter than
+ ``next(iter(...))`` but saves you an entire ``try``/``except`` when you
+ want to provide a fallback value.
+
+ """
+ try:
+ return next(iter(iterable))
+ except StopIteration:
+ # I'm on the edge about raising ValueError instead of StopIteration. At
+ # the moment, ValueError wins, because the caller could conceivably
+ # want to do something different with flow control when I raise the
+ # exception, and it's weird to explicitly catch StopIteration.
+ if default is _marker:
+ raise ValueError('first() was called on an empty iterable, and no '
+ 'default value was provided.')
+ return default
+
+
+class peekable(object):
+ """Wrapper for an iterator to allow 1-item lookahead
+
+ Call ``peek()`` on the result to get the value that will next pop out of
+ ``next()``, without advancing the iterator:
+
+ >>> p = peekable(xrange(2))
+ >>> p.peek()
+ 0
+ >>> p.next()
+ 0
+ >>> p.peek()
+ 1
+ >>> p.next()
+ 1
+
+ Pass ``peek()`` a default value, and it will be returned in the case where
+ the iterator is exhausted:
+
+ >>> p = peekable([])
+ >>> p.peek('hi')
+ 'hi'
+
+ If no default is provided, ``peek()`` raises ``StopIteration`` when there
+ are no items left.
+
+ To test whether there are more items in the iterator, examine the
+ peekable's truth value. If it is truthy, there are more items.
+
+ >>> assert peekable(xrange(1))
+ >>> assert not peekable([])
+
+ """
+ # Lowercase to blend in with itertools. The fact that it's a class is an
+ # implementation detail.
+
+ def __init__(self, iterable):
+ self._it = iter(iterable)
+
+ def __iter__(self):
+ return self
+
+ def __nonzero__(self):
+ try:
+ self.peek()
+ except StopIteration:
+ return False
+ return True
+
+ def peek(self, default=_marker):
+ """Return the item that will be next returned from ``next()``.
+
+ Return ``default`` if there are no items left. If ``default`` is not
+ provided, raise ``StopIteration``.
+
+ """
+ if not hasattr(self, '_peek'):
+ try:
+ self._peek = self._it.next()
+ except StopIteration:
+ if default is _marker:
+ raise
+ return default
+ return self._peek
+
+ def next(self):
+ ret = self.peek()
+ del self._peek
+ return ret
+
+
+def collate(*iterables, **kwargs):
+ """Return a sorted merge of the items from each of several already-sorted
+ ``iterables``.
+
+ >>> list(collate('ACDZ', 'AZ', 'JKL'))
+ ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
+
+ Works lazily, keeping only the next value from each iterable in memory. Use
+ ``collate()`` to, for example, perform a n-way mergesort of items that
+ don't fit in memory.
+
+ :arg key: A function that returns a comparison value for an item. Defaults
+ to the identity function.
+ :arg reverse: If ``reverse=True``, yield results in descending order
+ rather than ascending. ``iterables`` must also yield their elements in
+ descending order.
+
+ If the elements of the passed-in iterables are out of order, you might get
+ unexpected results.
+
+ """
+ key = kwargs.pop('key', lambda a: a)
+ reverse = kwargs.pop('reverse', False)
+
+ min_or_max = partial(max if reverse else min, key=lambda (a, b): a)
+ peekables = [peekable(it) for it in iterables]
+ peekables = [p for p in peekables if p] # Kill empties.
+ while peekables:
+ _, p = min_or_max((key(p.peek()), p) for p in peekables)
+ yield p.next()
+ peekables = [p for p in peekables if p]
+
+
+def consumer(func):
+ """Decorator that automatically advances a PEP-342-style "reverse iterator"
+ to its first yield point so you don't have to call ``next()`` on it
+ manually.
+
+ >>> @consumer
+ ... def tally():
+ ... i = 0
+ ... while True:
+ ... print 'Thing number %s is %s.' % (i, (yield))
+ ... i += 1
+ ...
+ >>> t = tally()
+ >>> t.send('red')
+ Thing number 0 is red.
+ >>> t.send('fish')
+ Thing number 1 is fish.
+
+ Without the decorator, you would have to call ``t.next()`` before
+ ``t.send()`` could be used.
+
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ gen = func(*args, **kwargs)
+ gen.next()
+ return gen
+ return wrapper
+
+
+def ilen(iterable):
+ """Return the number of items in ``iterable``.
+
+ >>> from itertools import ifilter
+ >>> ilen(ifilter(lambda x: x % 3 == 0, xrange(1000000)))
+ 333334
+
+ This does, of course, consume the iterable, so handle it with care.
+
+ """
+ return sum(1 for _ in iterable)
+
+
+def iterate(func, start):
+ """Return ``start``, ``func(start)``, ``func(func(start))``, ...
+
+ >>> from itertools import islice
+ >>> list(islice(iterate(lambda x: 2*x, 1), 10))
+ [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+ """
+ while True:
+ yield start
+ start = func(start)
+
+
+def with_iter(context_manager):
+ """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
+
+ Example::
+
+ upper_lines = (line.upper() for line in with_iter(open('foo')))
+
+ """
+ with context_manager as iterable:
+ for item in iterable:
+ yield item
diff --git a/libs/more_itertools/recipes.py b/libs/more_itertools/recipes.py
new file mode 100644
index 00000000..c92373c6
--- /dev/null
+++ b/libs/more_itertools/recipes.py
@@ -0,0 +1,331 @@
+"""Imported from the recipes section of the itertools documentation.
+
+All functions taken from the recipes section of the itertools library docs
+[1]_.
+Some backward-compatible usability improvements have been made.
+
+.. [1] http://docs.python.org/library/itertools.html#recipes
+
+"""
+from collections import deque
+from itertools import chain, combinations, count, cycle, groupby, ifilterfalse, imap, islice, izip, izip_longest, repeat, starmap, tee # Wrapping breaks 2to3.
+import operator
+from random import randrange, sample, choice
+
+
+__all__ = ['take', 'tabulate', 'consume', 'nth', 'quantify', 'padnone',
+ 'ncycles', 'dotproduct', 'flatten', 'repeatfunc', 'pairwise',
+ 'grouper', 'roundrobin', 'powerset', 'unique_everseen',
+ 'unique_justseen', 'iter_except', 'random_product',
+ 'random_permutation', 'random_combination',
+ 'random_combination_with_replacement']
+
+
+def take(n, iterable):
+ """Return first n items of the iterable as a list
+
+ >>> take(3, range(10))
+ [0, 1, 2]
+ >>> take(5, range(3))
+ [0, 1, 2]
+
+ Effectively a short replacement for ``next`` based iterator consumption
+ when you want more than one item, but less than the whole iterator.
+
+ """
+ return list(islice(iterable, n))
+
+
+def tabulate(function, start=0):
+ """Return an iterator mapping the function over linear input.
+
+ The start argument will be increased by 1 each time the iterator is called
+ and fed into the function.
+
+ >>> t = tabulate(lambda x: x**2, -3)
+ >>> take(3, t)
+ [9, 4, 1]
+
+ """
+ return imap(function, count(start))
+
+
+def consume(iterator, n=None):
+ """Advance the iterator n-steps ahead. If n is none, consume entirely.
+
+ Efficiently exhausts an iterator without returning values. Defaults to
+ consuming the whole iterator, but an optional second argument may be
+ provided to limit consumption.
+
+ >>> i = (x for x in range(10))
+ >>> next(i)
+ 0
+ >>> consume(i, 3)
+ >>> next(i)
+ 4
+ >>> consume(i)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "", line 1, in
+ StopIteration
+
+ If the iterator has fewer items remaining than the provided limit, the
+ whole iterator will be consumed.
+
+ >>> i = (x for x in range(3))
+ >>> consume(i, 5)
+ >>> next(i)
+ Traceback (most recent call last):
+ File "", line 1, in
+ StopIteration
+
+ """
+ # Use functions that consume iterators at C speed.
+ if n is None:
+ # feed the entire iterator into a zero-length deque
+ deque(iterator, maxlen=0)
+ else:
+ # advance to the empty slice starting at position n
+ next(islice(iterator, n, n), None)
+
+
+def nth(iterable, n, default=None):
+ """Returns the nth item or a default value
+
+ >>> l = range(10)
+ >>> nth(l, 3)
+ 3
+ >>> nth(l, 20, "zebra")
+ 'zebra'
+
+ """
+ return next(islice(iterable, n, None), default)
+
+
+def quantify(iterable, pred=bool):
+ """Return the how many times the predicate is true
+
+ >>> quantify([True, False, True])
+ 2
+
+ """
+ return sum(imap(pred, iterable))
+
+
+def padnone(iterable):
+ """Returns the sequence of elements and then returns None indefinitely.
+
+ >>> take(5, padnone(range(3)))
+ [0, 1, 2, None, None]
+
+ Useful for emulating the behavior of the built-in map() function.
+
+ """
+ return chain(iterable, repeat(None))
+
+
+def ncycles(iterable, n):
+ """Returns the sequence elements n times
+
+ >>> list(ncycles(["a", "b"], 3))
+ ['a', 'b', 'a', 'b', 'a', 'b']
+
+ """
+ return chain.from_iterable(repeat(tuple(iterable), n))
+
+
+def dotproduct(vec1, vec2):
+ """Returns the dot product of the two iterables
+
+ >>> dotproduct([10, 10], [20, 20])
+ 400
+
+ """
+ return sum(imap(operator.mul, vec1, vec2))
+
+
+def flatten(listOfLists):
+ """Return an iterator flattening one level of nesting in a list of lists
+
+ >>> list(flatten([[0, 1], [2, 3]]))
+ [0, 1, 2, 3]
+
+ """
+ return chain.from_iterable(listOfLists)
+
+
+def repeatfunc(func, times=None, *args):
+ """Repeat calls to func with specified arguments.
+
+ >>> list(repeatfunc(lambda: 5, 3))
+ [5, 5, 5]
+ >>> list(repeatfunc(lambda x: x ** 2, 3, 3))
+ [9, 9, 9]
+
+ """
+ if times is None:
+ return starmap(func, repeat(args))
+ return starmap(func, repeat(args, times))
+
+
+def pairwise(iterable):
+ """Returns an iterator of paired items, overlapping, from the original
+
+ >>> take(4, pairwise(count()))
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
+
+ """
+ a, b = tee(iterable)
+ next(b, None)
+ return izip(a, b)
+
+
+def grouper(n, iterable, fillvalue=None):
+ """Collect data into fixed-length chunks or blocks
+
+ >>> list(grouper(3, 'ABCDEFG', 'x'))
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
+
+ """
+ args = [iter(iterable)] * n
+ return izip_longest(fillvalue=fillvalue, *args)
+
+
+def roundrobin(*iterables):
+ """Yields an item from each iterable, alternating between them
+
+ >>> list(roundrobin('ABC', 'D', 'EF'))
+ ['A', 'D', 'E', 'B', 'F', 'C']
+
+ """
+ # Recipe credited to George Sakkis
+ pending = len(iterables)
+ nexts = cycle(iter(it).next for it in iterables)
+ while pending:
+ try:
+ for next in nexts:
+ yield next()
+ except StopIteration:
+ pending -= 1
+ nexts = cycle(islice(nexts, pending))
+
+
+def powerset(iterable):
+ """Yields all possible subsets of the iterable
+
+ >>> list(powerset([1,2,3]))
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
+
+ """
+ s = list(iterable)
+ return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
+
+
+def unique_everseen(iterable, key=None):
+ """Yield unique elements, preserving order.
+
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D']
+ >>> list(unique_everseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'D']
+
+ """
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in ifilterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+def unique_justseen(iterable, key=None):
+ """Yields elements in order, ignoring serial duplicates
+
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
+ ['A', 'B', 'C', 'D', 'A', 'B']
+ >>> list(unique_justseen('ABBCcAD', str.lower))
+ ['A', 'B', 'C', 'A', 'D']
+
+ """
+ return imap(next, imap(operator.itemgetter(1), groupby(iterable, key)))
+
+
+def iter_except(func, exception, first=None):
+ """Yields results from a function repeatedly until an exception is raised.
+
+ Converts a call-until-exception interface to an iterator interface.
+ Like __builtin__.iter(func, sentinel) but uses an exception instead
+ of a sentinel to end the loop.
+
+ >>> l = range(3)
+ >>> list(iter_except(l.pop, IndexError))
+ [2, 1, 0]
+
+ """
+ try:
+ if first is not None:
+ yield first()
+ while 1:
+ yield func()
+ except exception:
+ pass
+
+
+def random_product(*args, **kwds):
+ """Returns a random pairing of items from each iterable argument
+
+ If `repeat` is provided as a kwarg, it's value will be used to indicate
+ how many pairings should be chosen.
+
+ >>> random_product(['a', 'b', 'c'], [1, 2], repeat=2) # doctest:+SKIP
+ ('b', '2', 'c', '2')
+
+ """
+ pools = map(tuple, args) * kwds.get('repeat', 1)
+ return tuple(choice(pool) for pool in pools)
+
+
+def random_permutation(iterable, r=None):
+ """Returns a random permutation.
+
+ If r is provided, the permutation is truncated to length r.
+
+ >>> random_permutation(range(5)) # doctest:+SKIP
+ (3, 4, 0, 1, 2)
+
+ """
+ pool = tuple(iterable)
+ r = len(pool) if r is None else r
+ return tuple(sample(pool, r))
+
+
+def random_combination(iterable, r):
+ """Returns a random combination of length r, chosen without replacement.
+
+ >>> random_combination(range(5), 3) # doctest:+SKIP
+ (2, 3, 4)
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(sample(xrange(n), r))
+ return tuple(pool[i] for i in indices)
+
+
+def random_combination_with_replacement(iterable, r):
+ """Returns a random combination of length r, chosen with replacement.
+
+ >>> random_combination_with_replacement(range(3), 5) # # doctest:+SKIP
+ (0, 0, 1, 2, 2)
+
+ """
+ pool = tuple(iterable)
+ n = len(pool)
+ indices = sorted(randrange(n) for i in xrange(r))
+ return tuple(pool[i] for i in indices)
diff --git a/libs/more_itertools/tests/__init__.py b/libs/more_itertools/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/libs/more_itertools/tests/test_more.py b/libs/more_itertools/tests/test_more.py
new file mode 100644
index 00000000..53b10618
--- /dev/null
+++ b/libs/more_itertools/tests/test_more.py
@@ -0,0 +1,143 @@
+from contextlib import closing
+from itertools import islice, ifilter
+from StringIO import StringIO
+from unittest import TestCase
+
+from nose.tools import eq_, assert_raises
+
+from more_itertools import * # Test all the symbols are in __all__.
+
+
+class CollateTests(TestCase):
+ """Unit tests for ``collate()``"""
+ # Also accidentally tests peekable, though that could use its own tests
+
+ def test_default(self):
+ """Test with the default `key` function."""
+ iterables = [xrange(4), xrange(7), xrange(3, 6)]
+ eq_(sorted(reduce(list.__add__, [list(it) for it in iterables])),
+ list(collate(*iterables)))
+
+ def test_key(self):
+ """Test using a custom `key` function."""
+ iterables = [xrange(5, 0, -1), xrange(4, 0, -1)]
+ eq_(list(sorted(reduce(list.__add__,
+ [list(it) for it in iterables]),
+ reverse=True)),
+ list(collate(*iterables, key=lambda x: -x)))
+
+ def test_empty(self):
+ """Be nice if passed an empty list of iterables."""
+ eq_([], list(collate()))
+
+ def test_one(self):
+ """Work when only 1 iterable is passed."""
+ eq_([0, 1], list(collate(xrange(2))))
+
+ def test_reverse(self):
+ """Test the `reverse` kwarg."""
+ iterables = [xrange(4, 0, -1), xrange(7, 0, -1), xrange(3, 6, -1)]
+ eq_(sorted(reduce(list.__add__, [list(it) for it in iterables]),
+ reverse=True),
+ list(collate(*iterables, reverse=True)))
+
+
+class ChunkedTests(TestCase):
+ """Tests for ``chunked()``"""
+
+ def test_even(self):
+ """Test when ``n`` divides evenly into the length of the iterable."""
+ eq_(list(chunked('ABCDEF', 3)), [['A', 'B', 'C'], ['D', 'E', 'F']])
+
+ def test_odd(self):
+ """Test when ``n`` does not divide evenly into the length of the
+ iterable.
+
+ """
+ eq_(list(chunked('ABCDE', 3)), [['A', 'B', 'C'], ['D', 'E']])
+
+
+class FirstTests(TestCase):
+ """Tests for ``first()``"""
+
+ def test_many(self):
+ """Test that it works on many-item iterables."""
+ # Also try it on a generator expression to make sure it works on
+ # whatever those return, across Python versions.
+ eq_(first(x for x in xrange(4)), 0)
+
+ def test_one(self):
+ """Test that it doesn't raise StopIteration prematurely."""
+ eq_(first([3]), 3)
+
+ def test_empty_stop_iteration(self):
+ """It should raise StopIteration for empty iterables."""
+ assert_raises(ValueError, first, [])
+
+ def test_default(self):
+ """It should return the provided default arg for empty iterables."""
+ eq_(first([], 'boo'), 'boo')
+
+
+class PeekableTests(TestCase):
+ """Tests for ``peekable()`` behavor not incidentally covered by testing
+ ``collate()``
+
+ """
+ def test_peek_default(self):
+ """Make sure passing a default into ``peek()`` works."""
+ p = peekable([])
+ eq_(p.peek(7), 7)
+
+ def test_truthiness(self):
+ """Make sure a ``peekable`` tests true iff there are items remaining in
+ the iterable.
+
+ """
+ p = peekable([])
+ self.failIf(p)
+ p = peekable(xrange(3))
+ self.failUnless(p)
+
+ def test_simple_peeking(self):
+ """Make sure ``next`` and ``peek`` advance and don't advance the
+ iterator, respectively.
+
+ """
+ p = peekable(xrange(10))
+ eq_(p.next(), 0)
+ eq_(p.peek(), 1)
+ eq_(p.next(), 1)
+
+
+class ConsumerTests(TestCase):
+ """Tests for ``consumer()``"""
+
+ def test_consumer(self):
+ @consumer
+ def eater():
+ while True:
+ x = yield
+
+ e = eater()
+ e.send('hi') # without @consumer, would raise TypeError
+
+
+def test_ilen():
+ """Sanity-check ``ilen()``."""
+ eq_(ilen(ifilter(lambda x: x % 10 == 0, range(101))), 11)
+
+
+def test_with_iter():
+ """Make sure ``with_iter`` iterates over and closes things correctly."""
+ s = StringIO('One fish\nTwo fish')
+ initial_words = [line.split()[0] for line in with_iter(closing(s))]
+ eq_(initial_words, ['One', 'Two'])
+
+ # Make sure closing happened:
+ try:
+ list(s)
+ except ValueError: # "I/O operation on closed file"
+ pass
+ else:
+ raise AssertionError('StringIO object was not closed.')
diff --git a/libs/more_itertools/tests/test_recipes.py b/libs/more_itertools/tests/test_recipes.py
new file mode 100644
index 00000000..485d9d30
--- /dev/null
+++ b/libs/more_itertools/tests/test_recipes.py
@@ -0,0 +1,433 @@
+from random import seed
+from unittest import TestCase
+
+from nose.tools import eq_, assert_raises, ok_
+
+from more_itertools import *
+
+
+def setup_module():
+ seed(1337)
+
+
+class TakeTests(TestCase):
+ """Tests for ``take()``"""
+
+ def test_simple_take(self):
+ """Test basic usage"""
+ t = take(5, xrange(10))
+ eq_(t, [0, 1, 2, 3, 4])
+
+ def test_null_take(self):
+ """Check the null case"""
+ t = take(0, xrange(10))
+ eq_(t, [])
+
+ def test_negative_take(self):
+ """Make sure taking negative items results in a ValueError"""
+ assert_raises(ValueError, take, -3, xrange(10))
+
+ def test_take_too_much(self):
+ """Taking more than an iterator has remaining should return what the
+ iterator has remaining.
+
+ """
+ t = take(10, xrange(5))
+ eq_(t, [0, 1, 2, 3, 4])
+
+
+class TabulateTests(TestCase):
+ """Tests for ``tabulate()``"""
+
+ def test_simple_tabulate(self):
+ """Test the happy path"""
+ t = tabulate(lambda x: x)
+ f = tuple([next(t) for _ in range(3)])
+ eq_(f, (0, 1, 2))
+
+ def test_count(self):
+ """Ensure tabulate accepts specific count"""
+ t = tabulate(lambda x: 2 * x, -1)
+ f = (next(t), next(t), next(t))
+ eq_(f, (-2, 0, 2))
+
+
+class ConsumeTests(TestCase):
+ """Tests for ``consume()``"""
+
+ def test_sanity(self):
+ """Test basic functionality"""
+ r = (x for x in range(10))
+ consume(r, 3)
+ eq_(3, next(r))
+
+ def test_null_consume(self):
+ """Check the null case"""
+ r = (x for x in range(10))
+ consume(r, 0)
+ eq_(0, next(r))
+
+ def test_negative_consume(self):
+ """Check that negative consumsion throws an error"""
+ r = (x for x in range(10))
+ assert_raises(ValueError, consume, r, -1)
+
+ def test_total_consume(self):
+ """Check that iterator is totally consumed by default"""
+ r = (x for x in range(10))
+ consume(r)
+ assert_raises(StopIteration, next, r)
+
+
+class NthTests(TestCase):
+ """Tests for ``nth()``"""
+
+ def test_basic(self):
+ """Make sure the nth item is returned"""
+ l = range(10)
+ for i, v in enumerate(l):
+ eq_(nth(l, i), v)
+
+ def test_default(self):
+ """Ensure a default value is returned when nth item not found"""
+ l = range(3)
+ eq_(nth(l, 100, "zebra"), "zebra")
+
+ def test_negative_item_raises(self):
+ """Ensure asking for a negative item raises an exception"""
+ assert_raises(ValueError, nth, range(10), -3)
+
+
+class QuantifyTests(TestCase):
+ """Tests for ``quantify()``"""
+
+ def test_happy_path(self):
+ """Make sure True count is returned"""
+ q = [True, False, True]
+ eq_(quantify(q), 2)
+
+ def test_custom_predicate(self):
+ """Ensure non-default predicates return as expected"""
+ q = range(10)
+ eq_(quantify(q, lambda x: x % 2 == 0), 5)
+
+
+class PadnoneTests(TestCase):
+ """Tests for ``padnone()``"""
+
+ def test_happy_path(self):
+ """wrapper iterator should return None indefinitely"""
+ r = range(2)
+ p = padnone(r)
+ eq_([0, 1, None, None], [next(p) for _ in range(4)])
+
+
+class NcyclesTests(TestCase):
+ """Tests for ``nyclces()``"""
+
+ def test_happy_path(self):
+ """cycle a sequence three times"""
+ r = ["a", "b", "c"]
+ n = ncycles(r, 3)
+ eq_(["a", "b", "c", "a", "b", "c", "a", "b", "c"],
+ list(n))
+
+ def test_null_case(self):
+ """asking for 0 cycles should return an empty iterator"""
+ n = ncycles(range(100), 0)
+ assert_raises(StopIteration, next, n)
+
+ def test_pathalogical_case(self):
+ """asking for negative cycles should return an empty iterator"""
+ n = ncycles(range(100), -10)
+ assert_raises(StopIteration, next, n)
+
+
+class DotproductTests(TestCase):
+ """Tests for ``dotproduct()``'"""
+
+ def test_happy_path(self):
+ """simple dotproduct example"""
+ eq_(400, dotproduct([10, 10], [20, 20]))
+
+
+class FlattenTests(TestCase):
+ """Tests for ``flatten()``"""
+
+ def test_basic_usage(self):
+ """ensure list of lists is flattened one level"""
+ f = [[0, 1, 2], [3, 4, 5]]
+ eq_(range(6), list(flatten(f)))
+
+ def test_single_level(self):
+ """ensure list of lists is flattened only one level"""
+ f = [[0, [1, 2]], [[3, 4], 5]]
+ eq_([0, [1, 2], [3, 4], 5], list(flatten(f)))
+
+
+class RepeatfuncTests(TestCase):
+ """Tests for ``repeatfunc()``"""
+
+ def test_simple_repeat(self):
+ """test simple repeated functions"""
+ r = repeatfunc(lambda: 5)
+ eq_([5, 5, 5, 5, 5], [next(r) for _ in range(5)])
+
+ def test_finite_repeat(self):
+ """ensure limited repeat when times is provided"""
+ r = repeatfunc(lambda: 5, times=5)
+ eq_([5, 5, 5, 5, 5], list(r))
+
+ def test_added_arguments(self):
+ """ensure arguments are applied to the function"""
+ r = repeatfunc(lambda x: x, 2, 3)
+ eq_([3, 3], list(r))
+
+ def test_null_times(self):
+ """repeat 0 should return an empty iterator"""
+ r = repeatfunc(range, 0, 3)
+ assert_raises(StopIteration, next, r)
+
+
+class PairwiseTests(TestCase):
+ """Tests for ``pairwise()``"""
+
+ def test_base_case(self):
+ """ensure an iterable will return pairwise"""
+ p = pairwise([1, 2, 3])
+ eq_([(1, 2), (2, 3)], list(p))
+
+ def test_short_case(self):
+ """ensure an empty iterator if there's not enough values to pair"""
+ p = pairwise("a")
+ assert_raises(StopIteration, next, p)
+
+
+class GrouperTests(TestCase):
+ """Tests for ``grouper()``"""
+
+ def test_even(self):
+ """Test when group size divides evenly into the length of
+ the iterable.
+
+ """
+ eq_(list(grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')])
+
+ def test_odd(self):
+ """Test when group size does not divide evenly into the length of the
+ iterable.
+
+ """
+ eq_(list(grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)])
+
+ def test_fill_value(self):
+ """Test that the fill value is used to pad the final group"""
+ eq_(list(grouper(3, 'ABCDE', 'x')), [('A', 'B', 'C'), ('D', 'E', 'x')])
+
+
+class RoundrobinTests(TestCase):
+ """Tests for ``roundrobin()``"""
+
+ def test_even_groups(self):
+ """Ensure ordered output from evenly populated iterables"""
+ eq_(list(roundrobin('ABC', [1, 2, 3], range(3))),
+ ['A', 1, 0, 'B', 2, 1, 'C', 3, 2])
+
+ def test_uneven_groups(self):
+ """Ensure ordered output from unevenly populated iterables"""
+ eq_(list(roundrobin('ABCD', [1, 2], range(0))),
+ ['A', 1, 'B', 2, 'C', 'D'])
+
+
+class PowersetTests(TestCase):
+ """Tests for ``powerset()``"""
+
+ def test_combinatorics(self):
+ """Ensure a proper enumeration"""
+ p = powerset([1, 2, 3])
+ eq_(list(p),
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)])
+
+
+class UniqueEverseenTests(TestCase):
+ """Tests for ``unique_everseen()``"""
+
+ def test_everseen(self):
+ """ensure duplicate elements are ignored"""
+ u = unique_everseen('AAAABBBBCCDAABBB')
+ eq_(['A', 'B', 'C', 'D'],
+ list(u))
+
+ def test_custom_key(self):
+ """ensure the custom key comparison works"""
+ u = unique_everseen('aAbACCc', key=str.lower)
+ eq_(list('abC'), list(u))
+
+
+class UniqueJustseenTests(TestCase):
+ """Tests for ``unique_justseen()``"""
+
+ def test_justseen(self):
+ """ensure only last item is remembered"""
+ u = unique_justseen('AAAABBBCCDABB')
+ eq_(list('ABCDAB'), list(u))
+
+ def test_custom_key(self):
+ """ensure the custom key comparison works"""
+ u = unique_justseen('AABCcAD', str.lower)
+ eq_(list('ABCAD'), list(u))
+
+
+class IterExceptTests(TestCase):
+ """Tests for ``iter_except()``"""
+
+ def test_exact_exception(self):
+ """ensure the exact specified exception is caught"""
+ l = [1, 2, 3]
+ i = iter_except(l.pop, IndexError)
+ eq_(list(i), [3, 2, 1])
+
+ def test_generic_exception(self):
+ """ensure the generic exception can be caught"""
+ l = [1, 2]
+ i = iter_except(l.pop, Exception)
+ eq_(list(i), [2, 1])
+
+ def test_uncaught_exception_is_raised(self):
+ """ensure a non-specified exception is raised"""
+ l = [1, 2, 3]
+ i = iter_except(l.pop, KeyError)
+ assert_raises(IndexError, list, i)
+
+ def test_first(self):
+ """ensure first is run before the function"""
+ l = [1, 2, 3]
+ f = lambda: 25
+ i = iter_except(l.pop, IndexError, f)
+ eq_(list(i), [25, 3, 2, 1])
+
+
+class RandomProductTests(TestCase):
+ """Tests for ``random_product()``
+
+ Since random.choice() has different results with the same seed across
+ python versions 2.x and 3.x, these tests use highly probably events to
+ create predictable outcomes across platforms.
+ """
+
+ def test_simple_lists(self):
+ """Ensure that one item is chosen from each list in each pair.
+ Also ensure that each item from each list eventually appears in
+ the chosen combinations.
+
+ Odds are roughly 1 in 7.1 * 10e16 that one item from either list will
+ not be chosen after 100 samplings of one item from each list. Just to
+ be safe, better use a known random seed, too.
+
+ """
+ nums = [1, 2, 3]
+ lets = ['a', 'b', 'c']
+ n, m = zip(*[random_product(nums, lets) for _ in range(100)])
+ n, m = set(n), set(m)
+ eq_(n, set(nums))
+ eq_(m, set(lets))
+ eq_(len(n), len(nums))
+ eq_(len(m), len(lets))
+
+ def test_list_with_repeat(self):
+ """ensure multiple items are chosen, and that they appear to be chosen
+ from one list then the next, in proper order.
+
+ """
+ nums = [1, 2, 3]
+ lets = ['a', 'b', 'c']
+ r = list(random_product(nums, lets, repeat=100))
+ eq_(2 * 100, len(r))
+ n, m = set(r[::2]), set(r[1::2])
+ eq_(n, set(nums))
+ eq_(m, set(lets))
+ eq_(len(n), len(nums))
+ eq_(len(m), len(lets))
+
+
+class RandomPermutationTests(TestCase):
+ """Tests for ``random_permutation()``"""
+
+ def test_full_permutation(self):
+ """ensure every item from the iterable is returned in a new ordering
+
+ 15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so
+ we fix a seed value just to be sure.
+
+ """
+ i = range(15)
+ r = random_permutation(i)
+ eq_(set(i), set(r))
+ if i == r:
+ raise AssertionError("Values were not permuted")
+
+ def test_partial_permutation(self):
+ """ensure all returned items are from the iterable, that the returned
+ permutation is of the desired length, and that all items eventually
+ get returned.
+
+ Sampling 100 permutations of length 5 from a set of 15 leaves a
+ (2/3)^100 chance that an item will not be chosen. Multiplied by 15
+ items, there is a 1 in 2.6e16 chance that at least 1 item will not
+ show up in the resulting output. Using a random seed will fix that.
+
+ """
+ items = range(15)
+ item_set = set(items)
+ all_items = set()
+ for _ in xrange(100):
+ permutation = random_permutation(items, 5)
+ eq_(len(permutation), 5)
+ permutation_set = set(permutation)
+ ok_(permutation_set <= item_set)
+ all_items |= permutation_set
+ eq_(all_items, item_set)
+
+
+class RandomCombinationTests(TestCase):
+ """Tests for ``random_combination()``"""
+
+ def test_psuedorandomness(self):
+ """ensure different subsets of the iterable get returned over many
+ samplings of random combinations"""
+ items = range(15)
+ all_items = set()
+ for _ in xrange(50):
+ combination = random_combination(items, 5)
+ all_items |= set(combination)
+ eq_(all_items, set(items))
+
+ def test_no_replacement(self):
+ """ensure that elements are sampled without replacement"""
+ items = range(15)
+ for _ in xrange(50):
+ combination = random_combination(items, len(items))
+ eq_(len(combination), len(set(combination)))
+ assert_raises(ValueError, random_combination, items, len(items) + 1)
+
+
+class RandomCombinationWithReplacementTests(TestCase):
+ """Tests for ``random_combination_with_replacement()``"""
+
+ def test_replacement(self):
+ """ensure that elements are sampled with replacement"""
+ items = range(5)
+ combo = random_combination_with_replacement(items, len(items) * 2)
+ eq_(2 * len(items), len(combo))
+ if len(set(combo)) == len(combo):
+ raise AssertionError("Combination contained no duplicates")
+
+ def test_psuedorandomness(self):
+ """ensure different subsets of the iterable get returned over many
+ samplings of random combinations"""
+ items = range(15)
+ all_items = set()
+ for _ in xrange(50):
+ combination = random_combination_with_replacement(items, 5)
+ all_items |= set(combination)
+ eq_(all_items, set(items))
diff --git a/libs/path.py b/libs/path.py
new file mode 100644
index 00000000..1e92a490
--- /dev/null
+++ b/libs/path.py
@@ -0,0 +1,1722 @@
+#
+# Copyright (c) 2010 Mikhail Gusarov
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+
+"""
+path.py - An object representing a path to a file or directory.
+
+https://github.com/jaraco/path.py
+
+Example::
+
+ from path import Path
+ d = Path('/home/guido/bin')
+ for f in d.files('*.py'):
+ f.chmod(0o755)
+"""
+
+from __future__ import unicode_literals
+
+import sys
+import warnings
+import os
+import fnmatch
+import glob
+import shutil
+import codecs
+import hashlib
+import errno
+import tempfile
+import functools
+import operator
+import re
+import contextlib
+import io
+from distutils import dir_util
+import importlib
+
+try:
+ import win32security
+except ImportError:
+ pass
+
+try:
+ import pwd
+except ImportError:
+ pass
+
+try:
+ import grp
+except ImportError:
+ pass
+
+##############################################################################
+# Python 2/3 support
+PY3 = sys.version_info >= (3,)
+PY2 = not PY3
+
+string_types = str,
+text_type = str
+getcwdu = os.getcwd
+
+def surrogate_escape(error):
+ """
+ Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
+ """
+ chars = error.object[error.start:error.end]
+ assert len(chars) == 1
+ val = ord(chars)
+ val += 0xdc00
+ return __builtin__.unichr(val), error.end
+
+if PY2:
+ import __builtin__
+ string_types = __builtin__.basestring,
+ text_type = __builtin__.unicode
+ getcwdu = os.getcwdu
+ codecs.register_error('surrogateescape', surrogate_escape)
+
+@contextlib.contextmanager
+def io_error_compat():
+ try:
+ yield
+ except IOError as io_err:
+ # On Python 2, io.open raises IOError; transform to OSError for
+ # future compatibility.
+ os_err = OSError(*io_err.args)
+ os_err.filename = getattr(io_err, 'filename', None)
+ raise os_err
+
+##############################################################################
+
+__all__ = ['Path', 'CaseInsensitivePattern']
+
+
+LINESEPS = ['\r\n', '\r', '\n']
+U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
+NEWLINE = re.compile('|'.join(LINESEPS))
+U_NEWLINE = re.compile('|'.join(U_LINESEPS))
+NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern))
+U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern))
+
+
+try:
+ import pkg_resources
+ __version__ = pkg_resources.require('path.py')[0].version
+except Exception:
+ __version__ = 'unknown'
+
+
+class TreeWalkWarning(Warning):
+ pass
+
+
+# from jaraco.functools
+def compose(*funcs):
+ compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))
+ return functools.reduce(compose_two, funcs)
+
+
+def simple_cache(func):
+ """
+ Save results for the :meth:'path.using_module' classmethod.
+ When Python 3.2 is available, use functools.lru_cache instead.
+ """
+ saved_results = {}
+
+ def wrapper(cls, module):
+ if module in saved_results:
+ return saved_results[module]
+ saved_results[module] = func(cls, module)
+ return saved_results[module]
+ return wrapper
+
+
+class ClassProperty(property):
+ def __get__(self, cls, owner):
+ return self.fget.__get__(None, owner)()
+
+
+class multimethod(object):
+ """
+ Acts like a classmethod when invoked from the class and like an
+ instancemethod when invoked from the instance.
+ """
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, instance, owner):
+ return (
+ functools.partial(self.func, owner) if instance is None
+ else functools.partial(self.func, owner, instance)
+ )
+
+
+class Path(text_type):
+ """
+ Represents a filesystem path.
+
+ For documentation on individual methods, consult their
+ counterparts in :mod:`os.path`.
+
+ Some methods are additionally included from :mod:`shutil`.
+ The functions are linked directly into the class namespace
+ such that they will be bound to the Path instance. For example,
+ ``Path(src).copy(target)`` is equivalent to
+ ``shutil.copy(src, target)``. Therefore, when referencing
+ the docs for these methods, assume `src` references `self`,
+ the Path instance.
+ """
+
+ module = os.path
+ """ The path module to use for path operations.
+
+ .. seealso:: :mod:`os.path`
+ """
+
+ def __init__(self, other=''):
+ if other is None:
+ raise TypeError("Invalid initial value for path: None")
+
+ @classmethod
+ @simple_cache
+ def using_module(cls, module):
+ subclass_name = cls.__name__ + '_' + module.__name__
+ if PY2:
+ subclass_name = str(subclass_name)
+ bases = (cls,)
+ ns = {'module': module}
+ return type(subclass_name, bases, ns)
+
+ @ClassProperty
+ @classmethod
+ def _next_class(cls):
+ """
+ What class should be used to construct new instances from this class
+ """
+ return cls
+
+ @classmethod
+ def _always_unicode(cls, path):
+ """
+ Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
+ is a proper Unicode string.
+ """
+ if PY3 or isinstance(path, text_type):
+ return path
+ return path.decode(sys.getfilesystemencoding(), 'surrogateescape')
+
+ # --- Special Python methods.
+
+ def __repr__(self):
+ return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__())
+
+ # Adding a Path and a string yields a Path.
+ def __add__(self, more):
+ try:
+ return self._next_class(super(Path, self).__add__(more))
+ except TypeError: # Python bug
+ return NotImplemented
+
+ def __radd__(self, other):
+ if not isinstance(other, string_types):
+ return NotImplemented
+ return self._next_class(other.__add__(self))
+
+ # The / operator joins Paths.
+ def __div__(self, rel):
+ """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
+
+ Join two path components, adding a separator character if
+ needed.
+
+ .. seealso:: :func:`os.path.join`
+ """
+ return self._next_class(self.module.join(self, rel))
+
+ # Make the / operator work even when true division is enabled.
+ __truediv__ = __div__
+
+ # The / operator joins Paths the other way around
+ def __rdiv__(self, rel):
+ """ fp.__rdiv__(rel) == rel / fp
+
+ Join two path components, adding a separator character if
+ needed.
+
+ .. seealso:: :func:`os.path.join`
+ """
+ return self._next_class(self.module.join(rel, self))
+
+ # Make the / operator work even when true division is enabled.
+ __rtruediv__ = __rdiv__
+
+ def __enter__(self):
+ self._old_dir = self.getcwd()
+ os.chdir(self)
+ return self
+
+ def __exit__(self, *_):
+ os.chdir(self._old_dir)
+
+ @classmethod
+ def getcwd(cls):
+ """ Return the current working directory as a path object.
+
+ .. seealso:: :func:`os.getcwdu`
+ """
+ return cls(getcwdu())
+
+ #
+ # --- Operations on Path strings.
+
+ def abspath(self):
+ """ .. seealso:: :func:`os.path.abspath` """
+ return self._next_class(self.module.abspath(self))
+
+ def normcase(self):
+ """ .. seealso:: :func:`os.path.normcase` """
+ return self._next_class(self.module.normcase(self))
+
+ def normpath(self):
+ """ .. seealso:: :func:`os.path.normpath` """
+ return self._next_class(self.module.normpath(self))
+
+ def realpath(self):
+ """ .. seealso:: :func:`os.path.realpath` """
+ return self._next_class(self.module.realpath(self))
+
+ def expanduser(self):
+ """ .. seealso:: :func:`os.path.expanduser` """
+ return self._next_class(self.module.expanduser(self))
+
+ def expandvars(self):
+ """ .. seealso:: :func:`os.path.expandvars` """
+ return self._next_class(self.module.expandvars(self))
+
+ def dirname(self):
+ """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """
+ return self._next_class(self.module.dirname(self))
+
+ def basename(self):
+ """ .. seealso:: :attr:`name`, :func:`os.path.basename` """
+ return self._next_class(self.module.basename(self))
+
+ def expand(self):
+ """ Clean up a filename by calling :meth:`expandvars()`,
+ :meth:`expanduser()`, and :meth:`normpath()` on it.
+
+ This is commonly everything needed to clean up a filename
+ read from a configuration file, for example.
+ """
+ return self.expandvars().expanduser().normpath()
+
+ @property
+ def namebase(self):
+ """ The same as :meth:`name`, but with one file extension stripped off.
+
+ For example,
+ ``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
+ but
+ ``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``.
+ """
+ base, ext = self.module.splitext(self.name)
+ return base
+
+ @property
+ def ext(self):
+ """ The file extension, for example ``'.py'``. """
+ f, ext = self.module.splitext(self)
+ return ext
+
+ @property
+ def drive(self):
+ """ The drive specifier, for example ``'C:'``.
+
+ This is always empty on systems that don't use drive specifiers.
+ """
+ drive, r = self.module.splitdrive(self)
+ return self._next_class(drive)
+
+ parent = property(
+ dirname, None, None,
+ """ This path's parent directory, as a new Path object.
+
+ For example,
+ ``Path('/usr/local/lib/libpython.so').parent ==
+ Path('/usr/local/lib')``
+
+ .. seealso:: :meth:`dirname`, :func:`os.path.dirname`
+ """)
+
+ name = property(
+ basename, None, None,
+ """ The name of this file or directory without the full path.
+
+ For example,
+ ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
+
+ .. seealso:: :meth:`basename`, :func:`os.path.basename`
+ """)
+
+ def splitpath(self):
+ """ p.splitpath() -> Return ``(p.parent, p.name)``.
+
+ .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
+ """
+ parent, child = self.module.split(self)
+ return self._next_class(parent), child
+
+ def splitdrive(self):
+ """ p.splitdrive() -> Return ``(p.drive, )``.
+
+ Split the drive specifier from this path. If there is
+ no drive specifier, :samp:`{p.drive}` is empty, so the return value
+ is simply ``(Path(''), p)``. This is always the case on Unix.
+
+ .. seealso:: :func:`os.path.splitdrive`
+ """
+ drive, rel = self.module.splitdrive(self)
+ return self._next_class(drive), rel
+
+ def splitext(self):
+ """ p.splitext() -> Return ``(p.stripext(), p.ext)``.
+
+ Split the filename extension from this path and return
+ the two parts. Either part may be empty.
+
+ The extension is everything from ``'.'`` to the end of the
+ last path segment. This has the property that if
+ ``(a, b) == p.splitext()``, then ``a + b == p``.
+
+ .. seealso:: :func:`os.path.splitext`
+ """
+ filename, ext = self.module.splitext(self)
+ return self._next_class(filename), ext
+
+ def stripext(self):
+ """ p.stripext() -> Remove one file extension from the path.
+
+ For example, ``Path('/home/guido/python.tar.gz').stripext()``
+ returns ``Path('/home/guido/python.tar')``.
+ """
+ return self.splitext()[0]
+
+ def splitunc(self):
+ """ .. seealso:: :func:`os.path.splitunc` """
+ unc, rest = self.module.splitunc(self)
+ return self._next_class(unc), rest
+
+ @property
+ def uncshare(self):
+ """
+ The UNC mount point for this path.
+ This is empty for paths on local drives.
+ """
+ unc, r = self.module.splitunc(self)
+ return self._next_class(unc)
+
+ @multimethod
+ def joinpath(cls, first, *others):
+ """
+ Join first to zero or more :class:`Path` components, adding a separator
+ character (:samp:`{first}.module.sep`) if needed. Returns a new instance of
+ :samp:`{first}._next_class`.
+
+ .. seealso:: :func:`os.path.join`
+ """
+ if not isinstance(first, cls):
+ first = cls(first)
+ return first._next_class(first.module.join(first, *others))
+
+ def splitall(self):
+ r""" Return a list of the path components in this path.
+
+ The first item in the list will be a Path. Its value will be
+ either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
+ directory of this path (for example, ``'/'`` or ``'C:\\'``). The
+ other items in the list will be strings.
+
+ ``path.Path.joinpath(*result)`` will yield the original path.
+ """
+ parts = []
+ loc = self
+ while loc != os.curdir and loc != os.pardir:
+ prev = loc
+ loc, child = prev.splitpath()
+ if loc == prev:
+ break
+ parts.append(child)
+ parts.append(loc)
+ parts.reverse()
+ return parts
+
+ def relpath(self, start='.'):
+ """ Return this path as a relative path,
+ based from `start`, which defaults to the current working directory.
+ """
+ cwd = self._next_class(start)
+ return cwd.relpathto(self)
+
+ def relpathto(self, dest):
+ """ Return a relative path from `self` to `dest`.
+
+ If there is no relative path from `self` to `dest`, for example if
+ they reside on different drives in Windows, then this returns
+ ``dest.abspath()``.
+ """
+ origin = self.abspath()
+ dest = self._next_class(dest).abspath()
+
+ orig_list = origin.normcase().splitall()
+ # Don't normcase dest! We want to preserve the case.
+ dest_list = dest.splitall()
+
+ if orig_list[0] != self.module.normcase(dest_list[0]):
+ # Can't get here from there.
+ return dest
+
+ # Find the location where the two paths start to differ.
+ i = 0
+ for start_seg, dest_seg in zip(orig_list, dest_list):
+ if start_seg != self.module.normcase(dest_seg):
+ break
+ i += 1
+
+ # Now i is the point where the two paths diverge.
+ # Need a certain number of "os.pardir"s to work up
+ # from the origin to the point of divergence.
+ segments = [os.pardir] * (len(orig_list) - i)
+ # Need to add the diverging part of dest_list.
+ segments += dest_list[i:]
+ if len(segments) == 0:
+ # If they happen to be identical, use os.curdir.
+ relpath = os.curdir
+ else:
+ relpath = self.module.join(*segments)
+ return self._next_class(relpath)
+
+ # --- Listing, searching, walking, and matching
+
+ def listdir(self, pattern=None):
+ """ D.listdir() -> List of items in this directory.
+
+ Use :meth:`files` or :meth:`dirs` instead if you want a listing
+ of just files or just subdirectories.
+
+ The elements of the list are Path objects.
+
+ With the optional `pattern` argument, this only lists
+ items whose names match the given pattern.
+
+ .. seealso:: :meth:`files`, :meth:`dirs`
+ """
+ if pattern is None:
+ pattern = '*'
+ return [
+ self / child
+ for child in map(self._always_unicode, os.listdir(self))
+ if self._next_class(child).fnmatch(pattern)
+ ]
+
+ def dirs(self, pattern=None):
+ """ D.dirs() -> List of this directory's subdirectories.
+
+ The elements of the list are Path objects.
+ This does not walk recursively into subdirectories
+ (but see :meth:`walkdirs`).
+
+ With the optional `pattern` argument, this only lists
+ directories whose names match the given pattern. For
+ example, ``d.dirs('build-*')``.
+ """
+ return [p for p in self.listdir(pattern) if p.isdir()]
+
+ def files(self, pattern=None):
+ """ D.files() -> List of the files in this directory.
+
+ The elements of the list are Path objects.
+ This does not walk into subdirectories (see :meth:`walkfiles`).
+
+ With the optional `pattern` argument, this only lists files
+ whose names match the given pattern. For example,
+ ``d.files('*.pyc')``.
+ """
+
+ return [p for p in self.listdir(pattern) if p.isfile()]
+
+ def walk(self, pattern=None, errors='strict'):
+ """ D.walk() -> iterator over files and subdirs, recursively.
+
+ The iterator yields Path objects naming each child item of
+ this directory and its descendants. This requires that
+ ``D.isdir()``.
+
+ This performs a depth-first traversal of the directory tree.
+ Each directory is returned just before all its children.
+
+ The `errors=` keyword argument controls behavior when an
+ error occurs. The default is ``'strict'``, which causes an
+ exception. Other allowed values are ``'warn'`` (which
+ reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+ `errors` may also be an arbitrary callable taking a msg parameter.
+ """
+ class Handlers:
+ def strict(msg):
+ raise
+
+ def warn(msg):
+ warnings.warn(msg, TreeWalkWarning)
+
+ def ignore(msg):
+ pass
+
+ if not callable(errors) and errors not in vars(Handlers):
+ raise ValueError("invalid errors parameter")
+ errors = vars(Handlers).get(errors, errors)
+
+ try:
+ childList = self.listdir()
+ except Exception:
+ exc = sys.exc_info()[1]
+ tmpl = "Unable to list directory '%(self)s': %(exc)s"
+ msg = tmpl % locals()
+ errors(msg)
+ return
+
+ for child in childList:
+ if pattern is None or child.fnmatch(pattern):
+ yield child
+ try:
+ isdir = child.isdir()
+ except Exception:
+ exc = sys.exc_info()[1]
+ tmpl = "Unable to access '%(child)s': %(exc)s"
+ msg = tmpl % locals()
+ errors(msg)
+ isdir = False
+
+ if isdir:
+ for item in child.walk(pattern, errors):
+ yield item
+
+ def walkdirs(self, pattern=None, errors='strict'):
+ """ D.walkdirs() -> iterator over subdirs, recursively.
+
+ With the optional `pattern` argument, this yields only
+ directories whose names match the given pattern. For
+ example, ``mydir.walkdirs('*test')`` yields only directories
+ with names ending in ``'test'``.
+
+ The `errors=` keyword argument controls behavior when an
+ error occurs. The default is ``'strict'``, which causes an
+ exception. The other allowed values are ``'warn'`` (which
+ reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+ """
+ if errors not in ('strict', 'warn', 'ignore'):
+ raise ValueError("invalid errors parameter")
+
+ try:
+ dirs = self.dirs()
+ except Exception:
+ if errors == 'ignore':
+ return
+ elif errors == 'warn':
+ warnings.warn(
+ "Unable to list directory '%s': %s"
+ % (self, sys.exc_info()[1]),
+ TreeWalkWarning)
+ return
+ else:
+ raise
+
+ for child in dirs:
+ if pattern is None or child.fnmatch(pattern):
+ yield child
+ for subsubdir in child.walkdirs(pattern, errors):
+ yield subsubdir
+
+ def walkfiles(self, pattern=None, errors='strict'):
+ """ D.walkfiles() -> iterator over files in D, recursively.
+
+ The optional argument `pattern` limits the results to files
+ with names that match the pattern. For example,
+ ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
+ extension.
+ """
+ if errors not in ('strict', 'warn', 'ignore'):
+ raise ValueError("invalid errors parameter")
+
+ try:
+ childList = self.listdir()
+ except Exception:
+ if errors == 'ignore':
+ return
+ elif errors == 'warn':
+ warnings.warn(
+ "Unable to list directory '%s': %s"
+ % (self, sys.exc_info()[1]),
+ TreeWalkWarning)
+ return
+ else:
+ raise
+
+ for child in childList:
+ try:
+ isfile = child.isfile()
+ isdir = not isfile and child.isdir()
+ except:
+ if errors == 'ignore':
+ continue
+ elif errors == 'warn':
+ warnings.warn(
+ "Unable to access '%s': %s"
+ % (self, sys.exc_info()[1]),
+ TreeWalkWarning)
+ continue
+ else:
+ raise
+
+ if isfile:
+ if pattern is None or child.fnmatch(pattern):
+ yield child
+ elif isdir:
+ for f in child.walkfiles(pattern, errors):
+ yield f
+
+ def fnmatch(self, pattern, normcase=None):
+ """ Return ``True`` if `self.name` matches the given `pattern`.
+
+ `pattern` - A filename pattern with wildcards,
+ for example ``'*.py'``. If the pattern contains a `normcase`
+ attribute, it is applied to the name and path prior to comparison.
+
+ `normcase` - (optional) A function used to normalize the pattern and
+ filename before matching. Defaults to :meth:`self.module`, which defaults
+ to :meth:`os.path.normcase`.
+
+ .. seealso:: :func:`fnmatch.fnmatch`
+ """
+ default_normcase = getattr(pattern, 'normcase', self.module.normcase)
+ normcase = normcase or default_normcase
+ name = normcase(self.name)
+ pattern = normcase(pattern)
+ return fnmatch.fnmatchcase(name, pattern)
+
+ def glob(self, pattern):
+ """ Return a list of Path objects that match the pattern.
+
+ `pattern` - a path relative to this directory, with wildcards.
+
+ For example, ``Path('/users').glob('*/bin/*')`` returns a list
+ of all the files users have in their :file:`bin` directories.
+
+ .. seealso:: :func:`glob.glob`
+ """
+ cls = self._next_class
+ return [cls(s) for s in glob.glob(self / pattern)]
+
+ #
+ # --- Reading or writing an entire file at once.
+
+ def open(self, *args, **kwargs):
+ """ Open this file and return a corresponding :class:`file` object.
+
+ Keyword arguments work as in :func:`io.open`. If the file cannot be
+ opened, an :class:`~exceptions.OSError` is raised.
+ """
+ with io_error_compat():
+ return io.open(self, *args, **kwargs)
+
+ def bytes(self):
+ """ Open this file, read all bytes, return them as a string. """
+ with self.open('rb') as f:
+ return f.read()
+
+ def chunks(self, size, *args, **kwargs):
+ """ Returns a generator yielding chunks of the file, so it can
+ be read piece by piece with a simple for loop.
+
+ Any argument you pass after `size` will be passed to :meth:`open`.
+
+ :example:
+
+ >>> hash = hashlib.md5()
+ >>> for chunk in Path("path.py").chunks(8192, mode='rb'):
+ ... hash.update(chunk)
+
+ This will read the file by chunks of 8192 bytes.
+ """
+ with self.open(*args, **kwargs) as f:
+ for chunk in iter(lambda: f.read(size) or None, None):
+ yield chunk
+
+ def write_bytes(self, bytes, append=False):
+ """ Open this file and write the given bytes to it.
+
+ Default behavior is to overwrite any existing file.
+ Call ``p.write_bytes(bytes, append=True)`` to append instead.
+ """
+ if append:
+ mode = 'ab'
+ else:
+ mode = 'wb'
+ with self.open(mode) as f:
+ f.write(bytes)
+
+ def text(self, encoding=None, errors='strict'):
+ r""" Open this file, read it in, return the content as a string.
+
+ All newline sequences are converted to ``'\n'``. Keyword arguments
+ will be passed to :meth:`open`.
+
+ .. seealso:: :meth:`lines`
+ """
+ with self.open(mode='r', encoding=encoding, errors=errors) as f:
+ return U_NEWLINE.sub('\n', f.read())
+
+ def write_text(self, text, encoding=None, errors='strict',
+ linesep=os.linesep, append=False):
+ r""" Write the given text to this file.
+
+ The default behavior is to overwrite any existing file;
+ to append instead, use the `append=True` keyword argument.
+
+ There are two differences between :meth:`write_text` and
+ :meth:`write_bytes`: newline handling and Unicode handling.
+ See below.
+
+ Parameters:
+
+ `text` - str/unicode - The text to be written.
+
+ `encoding` - str - The Unicode encoding that will be used.
+ This is ignored if `text` isn't a Unicode string.
+
+ `errors` - str - How to handle Unicode encoding errors.
+ Default is ``'strict'``. See ``help(unicode.encode)`` for the
+ options. This is ignored if `text` isn't a Unicode
+ string.
+
+ `linesep` - keyword argument - str/unicode - The sequence of
+ characters to be used to mark end-of-line. The default is
+ :data:`os.linesep`. You can also specify ``None`` to
+ leave all newlines as they are in `text`.
+
+ `append` - keyword argument - bool - Specifies what to do if
+ the file already exists (``True``: append to the end of it;
+ ``False``: overwrite it.) The default is ``False``.
+
+
+ --- Newline handling.
+
+ ``write_text()`` converts all standard end-of-line sequences
+ (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
+ end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
+ the end-of-line marker is ``'\r\n'``).
+
+ If you don't like your platform's default, you can override it
+ using the `linesep=` keyword argument. If you specifically want
+ ``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
+
+ This applies to Unicode text the same as to 8-bit text, except
+ there are three additional standard Unicode end-of-line sequences:
+ ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
+
+ (This is slightly different from when you open a file for
+ writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
+ in Python.)
+
+
+ --- Unicode
+
+ If `text` isn't Unicode, then apart from newline handling, the
+ bytes are written verbatim to the file. The `encoding` and
+ `errors` arguments are not used and must be omitted.
+
+ If `text` is Unicode, it is first converted to :func:`bytes` using the
+ specified `encoding` (or the default encoding if `encoding`
+ isn't specified). The `errors` argument applies only to this
+ conversion.
+
+ """
+ if isinstance(text, text_type):
+ if linesep is not None:
+ text = U_NEWLINE.sub(linesep, text)
+ text = text.encode(encoding or sys.getdefaultencoding(), errors)
+ else:
+ assert encoding is None
+ text = NEWLINE.sub(linesep, text)
+ self.write_bytes(text, append=append)
+
+ def lines(self, encoding=None, errors='strict', retain=True):
+ r""" Open this file, read all lines, return them in a list.
+
+ Optional arguments:
+ `encoding` - The Unicode encoding (or character set) of
+ the file. The default is ``None``, meaning the content
+ of the file is read as 8-bit characters and returned
+ as a list of (non-Unicode) str objects.
+ `errors` - How to handle Unicode errors; see help(str.decode)
+ for the options. Default is ``'strict'``.
+ `retain` - If ``True``, retain newline characters; but all newline
+ character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
+ translated to ``'\n'``. If ``False``, newline characters are
+ stripped off. Default is ``True``.
+
+ This uses ``'U'`` mode.
+
+ .. seealso:: :meth:`text`
+ """
+ if encoding is None and retain:
+ with self.open('U') as f:
+ return f.readlines()
+ else:
+ return self.text(encoding, errors).splitlines(retain)
+
+ def write_lines(self, lines, encoding=None, errors='strict',
+ linesep=os.linesep, append=False):
+ r""" Write the given lines of text to this file.
+
+ By default this overwrites any existing file at this path.
+
+ This puts a platform-specific newline sequence on every line.
+ See `linesep` below.
+
+ `lines` - A list of strings.
+
+ `encoding` - A Unicode encoding to use. This applies only if
+ `lines` contains any Unicode strings.
+
+ `errors` - How to handle errors in Unicode encoding. This
+ also applies only to Unicode strings.
+
+ linesep - The desired line-ending. This line-ending is
+ applied to every line. If a line already has any
+ standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
+ ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
+ be stripped off and this will be used instead. The
+ default is os.linesep, which is platform-dependent
+ (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
+ Specify ``None`` to write the lines as-is, like
+ :meth:`file.writelines`.
+
+ Use the keyword argument ``append=True`` to append lines to the
+ file. The default is to overwrite the file.
+
+ .. warning ::
+
+ When you use this with Unicode data, if the encoding of the
+ existing data in the file is different from the encoding
+ you specify with the `encoding=` parameter, the result is
+ mixed-encoding data, which can really confuse someone trying
+ to read the file later.
+ """
+ with self.open('ab' if append else 'wb') as f:
+ for l in lines:
+ isUnicode = isinstance(l, text_type)
+ if linesep is not None:
+ pattern = U_NL_END if isUnicode else NL_END
+ l = pattern.sub('', l) + linesep
+ if isUnicode:
+ l = l.encode(encoding or sys.getdefaultencoding(), errors)
+ f.write(l)
+
+ def read_md5(self):
+ """ Calculate the md5 hash for this file.
+
+ This reads through the entire file.
+
+ .. seealso:: :meth:`read_hash`
+ """
+ return self.read_hash('md5')
+
+ def _hash(self, hash_name):
+ """ Returns a hash object for the file at the current path.
+
+ `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``)
+ that's available in the :mod:`hashlib` module.
+ """
+ m = hashlib.new(hash_name)
+ for chunk in self.chunks(8192, mode="rb"):
+ m.update(chunk)
+ return m
+
+ def read_hash(self, hash_name):
+ """ Calculate given hash for this file.
+
+ List of supported hashes can be obtained from :mod:`hashlib` package.
+ This reads the entire file.
+
+ .. seealso:: :meth:`hashlib.hash.digest`
+ """
+ return self._hash(hash_name).digest()
+
+ def read_hexhash(self, hash_name):
+ """ Calculate given hash for this file, returning hexdigest.
+
+ List of supported hashes can be obtained from :mod:`hashlib` package.
+ This reads the entire file.
+
+ .. seealso:: :meth:`hashlib.hash.hexdigest`
+ """
+ return self._hash(hash_name).hexdigest()
+
+ # --- Methods for querying the filesystem.
+ # N.B. On some platforms, the os.path functions may be implemented in C
+ # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
+ # bound. Playing it safe and wrapping them all in method calls.
+
+ def isabs(self):
+ """ .. seealso:: :func:`os.path.isabs` """
+ return self.module.isabs(self)
+
+ def exists(self):
+ """ .. seealso:: :func:`os.path.exists` """
+ return self.module.exists(self)
+
+ def isdir(self):
+ """ .. seealso:: :func:`os.path.isdir` """
+ return self.module.isdir(self)
+
+ def isfile(self):
+ """ .. seealso:: :func:`os.path.isfile` """
+ return self.module.isfile(self)
+
+ def islink(self):
+ """ .. seealso:: :func:`os.path.islink` """
+ return self.module.islink(self)
+
+ def ismount(self):
+ """ .. seealso:: :func:`os.path.ismount` """
+ return self.module.ismount(self)
+
+ def samefile(self, other):
+ """ .. seealso:: :func:`os.path.samefile` """
+ if not hasattr(self.module, 'samefile'):
+ other = Path(other).realpath().normpath().normcase()
+ return self.realpath().normpath().normcase() == other
+ return self.module.samefile(self, other)
+
+ def getatime(self):
+ """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """
+ return self.module.getatime(self)
+
+ atime = property(
+ getatime, None, None,
+ """ Last access time of the file.
+
+ .. seealso:: :meth:`getatime`, :func:`os.path.getatime`
+ """)
+
+ def getmtime(self):
+ """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """
+ return self.module.getmtime(self)
+
+ mtime = property(
+ getmtime, None, None,
+ """ Last-modified time of the file.
+
+ .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
+ """)
+
+ def getctime(self):
+ """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """
+ return self.module.getctime(self)
+
+ ctime = property(
+ getctime, None, None,
+ """ Creation time of the file.
+
+ .. seealso:: :meth:`getctime`, :func:`os.path.getctime`
+ """)
+
+ def getsize(self):
+ """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """
+ return self.module.getsize(self)
+
+ size = property(
+ getsize, None, None,
+ """ Size of the file, in bytes.
+
+ .. seealso:: :meth:`getsize`, :func:`os.path.getsize`
+ """)
+
+ if hasattr(os, 'access'):
+ def access(self, mode):
+ """ Return ``True`` if current user has access to this path.
+
+ mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`,
+ :data:`os.W_OK`, :data:`os.X_OK`
+
+ .. seealso:: :func:`os.access`
+ """
+ return os.access(self, mode)
+
+ def stat(self):
+ """ Perform a ``stat()`` system call on this path.
+
+ .. seealso:: :meth:`lstat`, :func:`os.stat`
+ """
+ return os.stat(self)
+
+ def lstat(self):
+ """ Like :meth:`stat`, but do not follow symbolic links.
+
+ .. seealso:: :meth:`stat`, :func:`os.lstat`
+ """
+ return os.lstat(self)
+
+ def __get_owner_windows(self):
+ """
+ Return the name of the owner of this file or directory. Follow
+ symbolic links.
+
+ Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
+
+ .. seealso:: :attr:`owner`
+ """
+ desc = win32security.GetFileSecurity(
+ self, win32security.OWNER_SECURITY_INFORMATION)
+ sid = desc.GetSecurityDescriptorOwner()
+ account, domain, typecode = win32security.LookupAccountSid(None, sid)
+ return domain + '\\' + account
+
+ def __get_owner_unix(self):
+ """
+ Return the name of the owner of this file or directory. Follow
+ symbolic links.
+
+ .. seealso:: :attr:`owner`
+ """
+ st = self.stat()
+ return pwd.getpwuid(st.st_uid).pw_name
+
+ def __get_owner_not_implemented(self):
+ raise NotImplementedError("Ownership not available on this platform.")
+
+ if 'win32security' in globals():
+ get_owner = __get_owner_windows
+ elif 'pwd' in globals():
+ get_owner = __get_owner_unix
+ else:
+ get_owner = __get_owner_not_implemented
+
+ owner = property(
+ get_owner, None, None,
+ """ Name of the owner of this file or directory.
+
+ .. seealso:: :meth:`get_owner`""")
+
+ if hasattr(os, 'statvfs'):
+ def statvfs(self):
+ """ Perform a ``statvfs()`` system call on this path.
+
+ .. seealso:: :func:`os.statvfs`
+ """
+ return os.statvfs(self)
+
+ if hasattr(os, 'pathconf'):
+ def pathconf(self, name):
+ """ .. seealso:: :func:`os.pathconf` """
+ return os.pathconf(self, name)
+
+ #
+ # --- Modifying operations on files and directories
+
+ def utime(self, times):
+ """ Set the access and modified times of this file.
+
+ .. seealso:: :func:`os.utime`
+ """
+ os.utime(self, times)
+ return self
+
+ def chmod(self, mode):
+ """
+ Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
+ mode `_.
+
+ .. seealso:: :func:`os.chmod`
+ """
+ if isinstance(mode, string_types):
+ mask = _multi_permission_mask(mode)
+ mode = mask(self.stat().st_mode)
+ os.chmod(self, mode)
+ return self
+
+ def chown(self, uid=-1, gid=-1):
+ """
+ Change the owner and group by names rather than the uid or gid numbers.
+
+ .. seealso:: :func:`os.chown`
+ """
+ if hasattr(os, 'chown'):
+ if 'pwd' in globals() and isinstance(uid, string_types):
+ uid = pwd.getpwnam(uid).pw_uid
+ if 'grp' in globals() and isinstance(gid, string_types):
+ gid = grp.getgrnam(gid).gr_gid
+ os.chown(self, uid, gid)
+ else:
+ raise NotImplementedError("Ownership not available on this platform.")
+ return self
+
+ def rename(self, new):
+ """ .. seealso:: :func:`os.rename` """
+ os.rename(self, new)
+ return self._next_class(new)
+
+ def renames(self, new):
+ """ .. seealso:: :func:`os.renames` """
+ os.renames(self, new)
+ return self._next_class(new)
+
+ #
+ # --- Create/delete operations on directories
+
+ def mkdir(self, mode=0o777):
+ """ .. seealso:: :func:`os.mkdir` """
+ os.mkdir(self, mode)
+ return self
+
+ def mkdir_p(self, mode=0o777):
+ """ Like :meth:`mkdir`, but does not raise an exception if the
+ directory already exists. """
+ try:
+ self.mkdir(mode)
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.EEXIST:
+ raise
+ return self
+
+ def makedirs(self, mode=0o777):
+ """ .. seealso:: :func:`os.makedirs` """
+ os.makedirs(self, mode)
+ return self
+
+ def makedirs_p(self, mode=0o777):
+ """ Like :meth:`makedirs`, but does not raise an exception if the
+ directory already exists. """
+ try:
+ self.makedirs(mode)
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.EEXIST:
+ raise
+ return self
+
+ def rmdir(self):
+ """ .. seealso:: :func:`os.rmdir` """
+ os.rmdir(self)
+ return self
+
+ def rmdir_p(self):
+ """ Like :meth:`rmdir`, but does not raise an exception if the
+ directory is not empty or does not exist. """
+ try:
+ self.rmdir()
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
+ raise
+ return self
+
+ def removedirs(self):
+ """ .. seealso:: :func:`os.removedirs` """
+ os.removedirs(self)
+ return self
+
+ def removedirs_p(self):
+ """ Like :meth:`removedirs`, but does not raise an exception if the
+ directory is not empty or does not exist. """
+ try:
+ self.removedirs()
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
+ raise
+ return self
+
+ # --- Modifying operations on files
+
+ def touch(self):
+ """ Set the access/modified times of this file to the current time.
+ Create the file if it does not exist.
+ """
+ fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
+ os.close(fd)
+ os.utime(self, None)
+ return self
+
+ def remove(self):
+ """ .. seealso:: :func:`os.remove` """
+ os.remove(self)
+ return self
+
+ def remove_p(self):
+ """ Like :meth:`remove`, but does not raise an exception if the
+ file does not exist. """
+ try:
+ self.unlink()
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOENT:
+ raise
+ return self
+
+ def unlink(self):
+ """ .. seealso:: :func:`os.unlink` """
+ os.unlink(self)
+ return self
+
+ def unlink_p(self):
+ """ Like :meth:`unlink`, but does not raise an exception if the
+ file does not exist. """
+ self.remove_p()
+ return self
+
+ # --- Links
+
+ if hasattr(os, 'link'):
+ def link(self, newpath):
+ """ Create a hard link at `newpath`, pointing to this file.
+
+ .. seealso:: :func:`os.link`
+ """
+ os.link(self, newpath)
+ return self._next_class(newpath)
+
+ if hasattr(os, 'symlink'):
+ def symlink(self, newlink):
+ """ Create a symbolic link at `newlink`, pointing here.
+
+ .. seealso:: :func:`os.symlink`
+ """
+ os.symlink(self, newlink)
+ return self._next_class(newlink)
+
+ if hasattr(os, 'readlink'):
+ def readlink(self):
+ """ Return the path to which this symbolic link points.
+
+ The result may be an absolute or a relative path.
+
+ .. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
+ """
+ return self._next_class(os.readlink(self))
+
+ def readlinkabs(self):
+ """ Return the path to which this symbolic link points.
+
+ The result is always an absolute path.
+
+ .. seealso:: :meth:`readlink`, :func:`os.readlink`
+ """
+ p = self.readlink()
+ if p.isabs():
+ return p
+ else:
+ return (self.parent / p).abspath()
+
+ # High-level functions from shutil
+ # These functions will be bound to the instance such that
+ # Path(name).copy(target) will invoke shutil.copy(name, target)
+
+ copyfile = shutil.copyfile
+ copymode = shutil.copymode
+ copystat = shutil.copystat
+ copy = shutil.copy
+ copy2 = shutil.copy2
+ copytree = shutil.copytree
+ if hasattr(shutil, 'move'):
+ move = shutil.move
+ rmtree = shutil.rmtree
+
+ def rmtree_p(self):
+ """ Like :meth:`rmtree`, but does not raise an exception if the
+ directory does not exist. """
+ try:
+ self.rmtree()
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOENT:
+ raise
+ return self
+
+ def chdir(self):
+ """ .. seealso:: :func:`os.chdir` """
+ os.chdir(self)
+
+ cd = chdir
+
+ def merge_tree(self, dst, symlinks=False, *args, **kwargs):
+ """
+ Copy entire contents of self to dst, overwriting existing
+ contents in dst with those in self.
+
+ If the additional keyword `update` is True, each
+ `src` will only be copied if `dst` does not exist,
+ or `src` is newer than `dst`.
+
+ Note that the technique employed stages the files in a temporary
+ directory first, so this function is not suitable for merging
+ trees with large files, especially if the temporary directory
+ is not capable of storing a copy of the entire source tree.
+ """
+ update = kwargs.pop('update', False)
+ with tempdir() as _temp_dir:
+ # first copy the tree to a stage directory to support
+ # the parameters and behavior of copytree.
+ stage = _temp_dir / str(hash(self))
+ self.copytree(stage, symlinks, *args, **kwargs)
+ # now copy everything from the stage directory using
+ # the semantics of dir_util.copy_tree
+ dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks,
+ update=update)
+
+ #
+ # --- Special stuff from os
+
+ if hasattr(os, 'chroot'):
+ def chroot(self):
+ """ .. seealso:: :func:`os.chroot` """
+ os.chroot(self)
+
+ if hasattr(os, 'startfile'):
+ def startfile(self):
+ """ .. seealso:: :func:`os.startfile` """
+ os.startfile(self)
+ return self
+
+ # in-place re-writing, courtesy of Martijn Pieters
+ # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
+ @contextlib.contextmanager
+ def in_place(self, mode='r', buffering=-1, encoding=None, errors=None,
+ newline=None, backup_extension=None):
+ """
+ A context in which a file may be re-written in-place with new content.
+
+ Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable`
+ replaces `readable`.
+
+ If an exception occurs, the old file is restored, removing the
+ written data.
+
+ Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are
+ allowed. A :exc:`ValueError` is raised on invalid modes.
+
+ For example, to add line numbers to a file::
+
+ p = Path(filename)
+ assert p.isfile()
+ with p.in_place() as (reader, writer):
+ for number, line in enumerate(reader, 1):
+ writer.write('{0:3}: '.format(number)))
+ writer.write(line)
+
+ Thereafter, the file at `filename` will have line numbers in it.
+ """
+ import io
+
+ if set(mode).intersection('wa+'):
+ raise ValueError('Only read-only file modes can be used')
+
+ # move existing file to backup, create new file with same permissions
+ # borrowed extensively from the fileinput module
+ backup_fn = self + (backup_extension or os.extsep + 'bak')
+ try:
+ os.unlink(backup_fn)
+ except os.error:
+ pass
+ os.rename(self, backup_fn)
+ readable = io.open(backup_fn, mode, buffering=buffering,
+ encoding=encoding, errors=errors, newline=newline)
+ try:
+ perm = os.fstat(readable.fileno()).st_mode
+ except OSError:
+ writable = open(self, 'w' + mode.replace('r', ''),
+ buffering=buffering, encoding=encoding, errors=errors,
+ newline=newline)
+ else:
+ os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
+ if hasattr(os, 'O_BINARY'):
+ os_mode |= os.O_BINARY
+ fd = os.open(self, os_mode, perm)
+ writable = io.open(fd, "w" + mode.replace('r', ''),
+ buffering=buffering, encoding=encoding, errors=errors,
+ newline=newline)
+ try:
+ if hasattr(os, 'chmod'):
+ os.chmod(self, perm)
+ except OSError:
+ pass
+ try:
+ yield readable, writable
+ except Exception:
+ # move backup back
+ readable.close()
+ writable.close()
+ try:
+ os.unlink(self)
+ except os.error:
+ pass
+ os.rename(backup_fn, self)
+ raise
+ else:
+ readable.close()
+ writable.close()
+ finally:
+ try:
+ os.unlink(backup_fn)
+ except os.error:
+ pass
+
+ @ClassProperty
+ @classmethod
+ def special(cls):
+ """
+ Return a SpecialResolver object suitable referencing a suitable
+ directory for the relevant platform for the given
+ type of content.
+
+ For example, to get a user config directory, invoke:
+
+ dir = Path.special().user.config
+
+ Uses the `appdirs
+ `_ to resolve
+ the paths in a platform-friendly way.
+
+ To create a config directory for 'My App', consider:
+
+ dir = Path.special("My App").user.config.makedirs_p()
+
+ If the ``appdirs`` module is not installed, invocation
+ of special will raise an ImportError.
+ """
+ return functools.partial(SpecialResolver, cls)
+
+
+class SpecialResolver(object):
+ class ResolverScope:
+ def __init__(self, paths, scope):
+ self.paths = paths
+ self.scope = scope
+
+ def __getattr__(self, class_):
+ return self.paths.get_dir(self.scope, class_)
+
+ def __init__(self, path_class, *args, **kwargs):
+ appdirs = importlib.import_module('appdirs')
+
+ # let appname default to None until
+ # https://github.com/ActiveState/appdirs/issues/55 is solved.
+ not args and kwargs.setdefault('appname', None)
+
+ vars(self).update(
+ path_class=path_class,
+ wrapper=appdirs.AppDirs(*args, **kwargs),
+ )
+
+ def __getattr__(self, scope):
+ return self.ResolverScope(self, scope)
+
+ def get_dir(self, scope, class_):
+ """
+ Return the callable function from appdirs, but with the
+ result wrapped in self.path_class
+ """
+ prop_name = '{scope}_{class_}_dir'.format(**locals())
+ value = getattr(self.wrapper, prop_name)
+ MultiPath = Multi.for_class(self.path_class)
+ return MultiPath.detect(value)
+
+
+class Multi:
+ """
+ A mix-in for a Path which may contain multiple Path separated by pathsep.
+ """
+ @classmethod
+ def for_class(cls, path_cls):
+ name = 'Multi' + path_cls.__name__
+ if PY2:
+ name = str(name)
+ return type(name, (cls, path_cls), {})
+
+ @classmethod
+ def detect(cls, input):
+ if os.pathsep not in input:
+ cls = cls._next_class
+ return cls(input)
+
+ def __iter__(self):
+ return iter(map(self._next_class, self.split(os.pathsep)))
+
+ @ClassProperty
+ @classmethod
+ def _next_class(cls):
+ """
+ Multi-subclasses should use the parent class
+ """
+ return next(
+ class_
+ for class_ in cls.__mro__
+ if not issubclass(class_, Multi)
+ )
+
+
+class tempdir(Path):
+ """
+ A temporary directory via :func:`tempfile.mkdtemp`, and constructed with the
+ same parameters that you can use as a context manager.
+
+ Example:
+
+ with tempdir() as d:
+ # do stuff with the Path object "d"
+
+ # here the directory is deleted automatically
+
+ .. seealso:: :func:`tempfile.mkdtemp`
+ """
+
+ @ClassProperty
+ @classmethod
+ def _next_class(cls):
+ return Path
+
+ def __new__(cls, *args, **kwargs):
+ dirname = tempfile.mkdtemp(*args, **kwargs)
+ return super(tempdir, cls).__new__(cls, dirname)
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if not exc_value:
+ self.rmtree()
+
+
+def _multi_permission_mask(mode):
+ """
+ Support multiple, comma-separated Unix chmod symbolic modes.
+
+ >>> _multi_permission_mask('a=r,u+w')(0) == 0o644
+ True
+ """
+ compose = lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs))
+ return functools.reduce(compose, map(_permission_mask, mode.split(',')))
+
+
+def _permission_mask(mode):
+ """
+ Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
+ suitable for applying to a mask to affect that change.
+
+ >>> mask = _permission_mask('ugo+rwx')
+ >>> mask(0o554) == 0o777
+ True
+
+ >>> _permission_mask('go-x')(0o777) == 0o766
+ True
+
+ >>> _permission_mask('o-x')(0o445) == 0o444
+ True
+
+ >>> _permission_mask('a+x')(0) == 0o111
+ True
+
+ >>> _permission_mask('a=rw')(0o057) == 0o666
+ True
+
+ >>> _permission_mask('u=x')(0o666) == 0o166
+ True
+
+ >>> _permission_mask('g=')(0o157) == 0o107
+ True
+ """
+ # parse the symbolic mode
+ parsed = re.match('(?P[ugoa]+)(?P[-+=])(?P[rwx]*)$', mode)
+ if not parsed:
+ raise ValueError("Unrecognized symbolic mode", mode)
+
+ # generate a mask representing the specified permission
+ spec_map = dict(r=4, w=2, x=1)
+ specs = (spec_map[perm] for perm in parsed.group('what'))
+ spec = functools.reduce(operator.or_, specs, 0)
+
+ # now apply spec to each subject in who
+ shift_map = dict(u=6, g=3, o=0)
+ who = parsed.group('who').replace('a', 'ugo')
+ masks = (spec << shift_map[subj] for subj in who)
+ mask = functools.reduce(operator.or_, masks)
+
+ op = parsed.group('op')
+
+ # if op is -, invert the mask
+ if op == '-':
+ mask ^= 0o777
+
+ # if op is =, retain extant values for unreferenced subjects
+ if op == '=':
+ masks = (0o7 << shift_map[subj] for subj in who)
+ retain = functools.reduce(operator.or_, masks) ^ 0o777
+
+ op_map = {
+ '+': operator.or_,
+ '-': operator.and_,
+ '=': lambda mask, target: target & retain ^ mask,
+ }
+ return functools.partial(op_map[op], mask)
+
+
+class CaseInsensitivePattern(text_type):
+ """
+ A string with a ``'normcase'`` property, suitable for passing to
+ :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
+ :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
+
+ For example, to get all files ending in .py, .Py, .pY, or .PY in the
+ current directory::
+
+ from path import Path, CaseInsensitivePattern as ci
+ Path('.').files(ci('*.py'))
+ """
+
+ @property
+ def normcase(self):
+ return __import__('ntpath').normcase
+
+########################
+# Backward-compatibility
+class path(Path):
+ def __new__(cls, *args, **kwargs):
+ msg = "path is deprecated. Use Path instead."
+ warnings.warn(msg, DeprecationWarning)
+ return Path.__new__(cls, *args, **kwargs)
+
+
+__all__ += ['path']
+########################
diff --git a/libs/test_path.py b/libs/test_path.py
new file mode 100644
index 00000000..f6aa1b67
--- /dev/null
+++ b/libs/test_path.py
@@ -0,0 +1,1119 @@
+# -*- coding: utf-8 -*-
+
+"""
+Tests for the path module.
+
+This suite runs on Linux, OS X, and Windows right now. To extend the
+platform support, just add appropriate pathnames for your
+platform (os.name) in each place where the p() function is called.
+Then report the result. If you can't get the test to run at all on
+your platform, there's probably a bug in path.py -- please report the issue
+in the issue tracker at https://github.com/jaraco/path.py.
+
+TestScratchDir.test_touch() takes a while to run. It sleeps a few
+seconds to allow some time to pass between calls to check the modify
+time on files.
+"""
+
+from __future__ import unicode_literals, absolute_import, print_function
+
+import codecs
+import os
+import sys
+import shutil
+import time
+import ntpath
+import posixpath
+import textwrap
+import platform
+import importlib
+
+import pytest
+
+from path import Path, tempdir
+from path import CaseInsensitivePattern as ci
+from path import SpecialResolver
+from path import Multi
+
+
+def p(**choices):
+ """ Choose a value from several possible values, based on os.name """
+ return choices[os.name]
+
+
+class TestBasics:
+ def test_relpath(self):
+ root = Path(p(nt='C:\\', posix='/'))
+ foo = root / 'foo'
+ quux = foo / 'quux'
+ bar = foo / 'bar'
+ boz = bar / 'Baz' / 'Boz'
+ up = Path(os.pardir)
+
+ # basics
+ assert root.relpathto(boz) == Path('foo')/'bar'/'Baz'/'Boz'
+ assert bar.relpathto(boz) == Path('Baz')/'Boz'
+ assert quux.relpathto(boz) == up/'bar'/'Baz'/'Boz'
+ assert boz.relpathto(quux) == up/up/up/'quux'
+ assert boz.relpathto(bar) == up/up
+
+ # Path is not the first element in concatenation
+ assert root.relpathto(boz) == 'foo'/Path('bar')/'Baz'/'Boz'
+
+ # x.relpathto(x) == curdir
+ assert root.relpathto(root) == os.curdir
+ assert boz.relpathto(boz) == os.curdir
+ # Make sure case is properly noted (or ignored)
+ assert boz.relpathto(boz.normcase()) == os.curdir
+
+ # relpath()
+ cwd = Path(os.getcwd())
+ assert boz.relpath() == cwd.relpathto(boz)
+
+ if os.name == 'nt':
+ # Check relpath across drives.
+ d = Path('D:\\')
+ assert d.relpathto(boz) == boz
+
+ def test_construction_from_none(self):
+ """
+
+ """
+ try:
+ Path(None)
+ except TypeError:
+ pass
+ else:
+ raise Exception("DID NOT RAISE")
+
+ def test_construction_from_int(self):
+ """
+ Path class will construct a path as a string of the number
+ """
+ assert Path(1) == '1'
+
+ def test_string_compatibility(self):
+ """ Test compatibility with ordinary strings. """
+ x = Path('xyzzy')
+ assert x == 'xyzzy'
+ assert x == str('xyzzy')
+
+ # sorting
+ items = [Path('fhj'),
+ Path('fgh'),
+ 'E',
+ Path('d'),
+ 'A',
+ Path('B'),
+ 'c']
+ items.sort()
+ assert items == ['A', 'B', 'E', 'c', 'd', 'fgh', 'fhj']
+
+ # Test p1/p1.
+ p1 = Path("foo")
+ p2 = Path("bar")
+ assert p1/p2 == p(nt='foo\\bar', posix='foo/bar')
+
+ def test_properties(self):
+ # Create sample path object.
+ f = p(nt='C:\\Program Files\\Python\\Lib\\xyzzy.py',
+ posix='/usr/local/python/lib/xyzzy.py')
+ f = Path(f)
+
+ # .parent
+ nt_lib = 'C:\\Program Files\\Python\\Lib'
+ posix_lib = '/usr/local/python/lib'
+ expected = p(nt=nt_lib, posix=posix_lib)
+ assert f.parent == expected
+
+ # .name
+ assert f.name == 'xyzzy.py'
+ assert f.parent.name == p(nt='Lib', posix='lib')
+
+ # .ext
+ assert f.ext == '.py'
+ assert f.parent.ext == ''
+
+ # .drive
+ assert f.drive == p(nt='C:', posix='')
+
+ def test_methods(self):
+ # .abspath()
+ assert Path(os.curdir).abspath() == os.getcwd()
+
+ # .getcwd()
+ cwd = Path.getcwd()
+ assert isinstance(cwd, Path)
+ assert cwd == os.getcwd()
+
+ def test_UNC(self):
+ if hasattr(os.path, 'splitunc'):
+ p = Path(r'\\python1\share1\dir1\file1.txt')
+ assert p.uncshare == r'\\python1\share1'
+ assert p.splitunc() == os.path.splitunc(str(p))
+
+ def test_explicit_module(self):
+ """
+ The user may specify an explicit path module to use.
+ """
+ nt_ok = Path.using_module(ntpath)(r'foo\bar\baz')
+ posix_ok = Path.using_module(posixpath)(r'foo/bar/baz')
+ posix_wrong = Path.using_module(posixpath)(r'foo\bar\baz')
+
+ assert nt_ok.dirname() == r'foo\bar'
+ assert posix_ok.dirname() == r'foo/bar'
+ assert posix_wrong.dirname() == ''
+
+ assert nt_ok / 'quux' == r'foo\bar\baz\quux'
+ assert posix_ok / 'quux' == r'foo/bar/baz/quux'
+
+ def test_explicit_module_classes(self):
+ """
+ Multiple calls to path.using_module should produce the same class.
+ """
+ nt_path = Path.using_module(ntpath)
+ assert nt_path is Path.using_module(ntpath)
+ assert nt_path.__name__ == 'Path_ntpath'
+
+ def test_joinpath_on_instance(self):
+ res = Path('foo')
+ foo_bar = res.joinpath('bar')
+ assert foo_bar == p(nt='foo\\bar', posix='foo/bar')
+
+ def test_joinpath_to_nothing(self):
+ res = Path('foo')
+ assert res.joinpath() == res
+
+ def test_joinpath_on_class(self):
+ "Construct a path from a series of strings"
+ foo_bar = Path.joinpath('foo', 'bar')
+ assert foo_bar == p(nt='foo\\bar', posix='foo/bar')
+
+ def test_joinpath_fails_on_empty(self):
+ "It doesn't make sense to join nothing at all"
+ try:
+ Path.joinpath()
+ except TypeError:
+ pass
+ else:
+ raise Exception("did not raise")
+
+ def test_joinpath_returns_same_type(self):
+ path_posix = Path.using_module(posixpath)
+ res = path_posix.joinpath('foo')
+ assert isinstance(res, path_posix)
+ res2 = res.joinpath('bar')
+ assert isinstance(res2, path_posix)
+ assert res2 == 'foo/bar'
+
+
+class TestSelfReturn:
+ """
+ Some methods don't necessarily return any value (e.g. makedirs,
+ makedirs_p, rename, mkdir, touch, chroot). These methods should return
+ self anyhow to allow methods to be chained.
+ """
+ def test_makedirs_p(self, tmpdir):
+ """
+ Path('foo').makedirs_p() == Path('foo')
+ """
+ p = Path(tmpdir) / "newpath"
+ ret = p.makedirs_p()
+ assert p == ret
+
+ def test_makedirs_p_extant(self, tmpdir):
+ p = Path(tmpdir)
+ ret = p.makedirs_p()
+ assert p == ret
+
+ def test_rename(self, tmpdir):
+ p = Path(tmpdir) / "somefile"
+ p.touch()
+ target = Path(tmpdir) / "otherfile"
+ ret = p.rename(target)
+ assert target == ret
+
+ def test_mkdir(self, tmpdir):
+ p = Path(tmpdir) / "newdir"
+ ret = p.mkdir()
+ assert p == ret
+
+ def test_touch(self, tmpdir):
+ p = Path(tmpdir) / "empty file"
+ ret = p.touch()
+ assert p == ret
+
+
+class TestScratchDir:
+ """
+ Tests that run in a temporary directory (does not test tempdir class)
+ """
+ def test_context_manager(self, tmpdir):
+ """Can be used as context manager for chdir."""
+ d = Path(tmpdir)
+ subdir = d / 'subdir'
+ subdir.makedirs()
+ old_dir = os.getcwd()
+ with subdir:
+ assert os.getcwd() == os.path.realpath(subdir)
+ assert os.getcwd() == old_dir
+
+ def test_touch(self, tmpdir):
+ # NOTE: This test takes a long time to run (~10 seconds).
+ # It sleeps several seconds because on Windows, the resolution
+ # of a file's mtime and ctime is about 2 seconds.
+ #
+ # atime isn't tested because on Windows the resolution of atime
+ # is something like 24 hours.
+
+ threshold = 1
+
+ d = Path(tmpdir)
+ f = d / 'test.txt'
+ t0 = time.time() - threshold
+ f.touch()
+ t1 = time.time() + threshold
+
+ assert f.exists()
+ assert f.isfile()
+ assert f.size == 0
+ assert t0 <= f.mtime <= t1
+ if hasattr(os.path, 'getctime'):
+ ct = f.ctime
+ assert t0 <= ct <= t1
+
+ time.sleep(threshold*2)
+ fobj = open(f, 'ab')
+ fobj.write('some bytes'.encode('utf-8'))
+ fobj.close()
+
+ time.sleep(threshold*2)
+ t2 = time.time() - threshold
+ f.touch()
+ t3 = time.time() + threshold
+
+ assert t0 <= t1 < t2 <= t3 # sanity check
+
+ assert f.exists()
+ assert f.isfile()
+ assert f.size == 10
+ assert t2 <= f.mtime <= t3
+ if hasattr(os.path, 'getctime'):
+ ct2 = f.ctime
+ if os.name == 'nt':
+ # On Windows, "ctime" is CREATION time
+ assert ct == ct2
+ assert ct2 < t2
+ else:
+ # On other systems, it might be the CHANGE time
+ # (especially on Unix, time of inode changes)
+ assert ct == ct2 or ct2 == f.mtime
+
+ def test_listing(self, tmpdir):
+ d = Path(tmpdir)
+ assert d.listdir() == []
+
+ f = 'testfile.txt'
+ af = d / f
+ assert af == os.path.join(d, f)
+ af.touch()
+ try:
+ assert af.exists()
+
+ assert d.listdir() == [af]
+
+ # .glob()
+ assert d.glob('testfile.txt') == [af]
+ assert d.glob('test*.txt') == [af]
+ assert d.glob('*.txt') == [af]
+ assert d.glob('*txt') == [af]
+ assert d.glob('*') == [af]
+ assert d.glob('*.html') == []
+ assert d.glob('testfile') == []
+ finally:
+ af.remove()
+
+ # Try a test with 20 files
+ files = [d / ('%d.txt' % i) for i in range(20)]
+ for f in files:
+ fobj = open(f, 'w')
+ fobj.write('some text\n')
+ fobj.close()
+ try:
+ files2 = d.listdir()
+ files.sort()
+ files2.sort()
+ assert files == files2
+ finally:
+ for f in files:
+ try:
+ f.remove()
+ except:
+ pass
+
+ def test_listdir_other_encoding(self, tmpdir):
+ """
+ Some filesystems allow non-character sequences in path names.
+ ``.listdir`` should still function in this case.
+ See issue #61 for details.
+ """
+ assert Path(tmpdir).listdir() == []
+ tmpdir_bytes = str(tmpdir).encode('ascii')
+
+ filename = 'r\xe9\xf1emi'.encode('latin-1')
+ pathname = os.path.join(tmpdir_bytes, filename)
+ with open(pathname, 'wb'):
+ pass
+ # first demonstrate that os.listdir works
+ assert os.listdir(tmpdir_bytes)
+
+ # now try with path.py
+ results = Path(tmpdir).listdir()
+ assert len(results) == 1
+ res, = results
+ assert isinstance(res, Path)
+ # OS X seems to encode the bytes in the filename as %XX characters.
+ if platform.system() == 'Darwin':
+ assert res.basename() == 'r%E9%F1emi'
+ return
+ assert len(res.basename()) == len(filename)
+
+ def test_makedirs(self, tmpdir):
+ d = Path(tmpdir)
+
+ # Placeholder file so that when removedirs() is called,
+ # it doesn't remove the temporary directory itself.
+ tempf = d / 'temp.txt'
+ tempf.touch()
+ try:
+ foo = d / 'foo'
+ boz = foo / 'bar' / 'baz' / 'boz'
+ boz.makedirs()
+ try:
+ assert boz.isdir()
+ finally:
+ boz.removedirs()
+ assert not foo.exists()
+ assert d.exists()
+
+ foo.mkdir(0o750)
+ boz.makedirs(0o700)
+ try:
+ assert boz.isdir()
+ finally:
+ boz.removedirs()
+ assert not foo.exists()
+ assert d.exists()
+ finally:
+ os.remove(tempf)
+
+ def assertSetsEqual(self, a, b):
+ ad = {}
+
+ for i in a:
+ ad[i] = None
+
+ bd = {}
+
+ for i in b:
+ bd[i] = None
+
+ assert ad == bd
+
+ def test_shutil(self, tmpdir):
+ # Note: This only tests the methods exist and do roughly what
+ # they should, neglecting the details as they are shutil's
+ # responsibility.
+
+ d = Path(tmpdir)
+ testDir = d / 'testdir'
+ testFile = testDir / 'testfile.txt'
+ testA = testDir / 'A'
+ testCopy = testA / 'testcopy.txt'
+ testLink = testA / 'testlink.txt'
+ testB = testDir / 'B'
+ testC = testB / 'C'
+ testCopyOfLink = testC / testA.relpathto(testLink)
+
+ # Create test dirs and a file
+ testDir.mkdir()
+ testA.mkdir()
+ testB.mkdir()
+
+ f = open(testFile, 'w')
+ f.write('x' * 10000)
+ f.close()
+
+ # Test simple file copying.
+ testFile.copyfile(testCopy)
+ assert testCopy.isfile()
+ assert testFile.bytes() == testCopy.bytes()
+
+ # Test copying into a directory.
+ testCopy2 = testA / testFile.name
+ testFile.copy(testA)
+ assert testCopy2.isfile()
+ assert testFile.bytes() == testCopy2.bytes()
+
+ # Make a link for the next test to use.
+ if hasattr(os, 'symlink'):
+ testFile.symlink(testLink)
+ else:
+ testFile.copy(testLink) # fallback
+
+ # Test copying directory tree.
+ testA.copytree(testC)
+ assert testC.isdir()
+ self.assertSetsEqual(
+ testC.listdir(),
+ [testC / testCopy.name,
+ testC / testFile.name,
+ testCopyOfLink])
+ assert not testCopyOfLink.islink()
+
+ # Clean up for another try.
+ testC.rmtree()
+ assert not testC.exists()
+
+ # Copy again, preserving symlinks.
+ testA.copytree(testC, True)
+ assert testC.isdir()
+ self.assertSetsEqual(
+ testC.listdir(),
+ [testC / testCopy.name,
+ testC / testFile.name,
+ testCopyOfLink])
+ if hasattr(os, 'symlink'):
+ assert testCopyOfLink.islink()
+ assert testCopyOfLink.readlink() == testFile
+
+ # Clean up.
+ testDir.rmtree()
+ assert not testDir.exists()
+ self.assertList(d.listdir(), [])
+
+ def assertList(self, listing, expected):
+ assert sorted(listing) == sorted(expected)
+
+ def test_patterns(self, tmpdir):
+ d = Path(tmpdir)
+ names = ['x.tmp', 'x.xtmp', 'x2g', 'x22', 'x.txt']
+ dirs = [d, d/'xdir', d/'xdir.tmp', d/'xdir.tmp'/'xsubdir']
+
+ for e in dirs:
+ if not e.isdir():
+ e.makedirs()
+
+ for name in names:
+ (e/name).touch()
+ self.assertList(d.listdir('*.tmp'), [d/'x.tmp', d/'xdir.tmp'])
+ self.assertList(d.files('*.tmp'), [d/'x.tmp'])
+ self.assertList(d.dirs('*.tmp'), [d/'xdir.tmp'])
+ self.assertList(d.walk(), [e for e in dirs
+ if e != d] + [e/n for e in dirs
+ for n in names])
+ self.assertList(d.walk('*.tmp'),
+ [e/'x.tmp' for e in dirs] + [d/'xdir.tmp'])
+ self.assertList(d.walkfiles('*.tmp'), [e/'x.tmp' for e in dirs])
+ self.assertList(d.walkdirs('*.tmp'), [d/'xdir.tmp'])
+
+ def test_unicode(self, tmpdir):
+ d = Path(tmpdir)
+ p = d/'unicode.txt'
+
+ def test(enc):
+ """ Test that path works with the specified encoding,
+ which must be capable of representing the entire range of
+ Unicode codepoints.
+ """
+
+ given = ('Hello world\n'
+ '\u0d0a\u0a0d\u0d15\u0a15\r\n'
+ '\u0d0a\u0a0d\u0d15\u0a15\x85'
+ '\u0d0a\u0a0d\u0d15\u0a15\u2028'
+ '\r'
+ 'hanging')
+ clean = ('Hello world\n'
+ '\u0d0a\u0a0d\u0d15\u0a15\n'
+ '\u0d0a\u0a0d\u0d15\u0a15\n'
+ '\u0d0a\u0a0d\u0d15\u0a15\n'
+ '\n'
+ 'hanging')
+ givenLines = [
+ ('Hello world\n'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\r\n'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\x85'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\u2028'),
+ ('\r'),
+ ('hanging')]
+ expectedLines = [
+ ('Hello world\n'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\n'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\n'),
+ ('\u0d0a\u0a0d\u0d15\u0a15\n'),
+ ('\n'),
+ ('hanging')]
+ expectedLines2 = [
+ ('Hello world'),
+ ('\u0d0a\u0a0d\u0d15\u0a15'),
+ ('\u0d0a\u0a0d\u0d15\u0a15'),
+ ('\u0d0a\u0a0d\u0d15\u0a15'),
+ (''),
+ ('hanging')]
+
+ # write bytes manually to file
+ f = codecs.open(p, 'w', enc)
+ f.write(given)
+ f.close()
+
+ # test all 3 path read-fully functions, including
+ # path.lines() in unicode mode.
+ assert p.bytes() == given.encode(enc)
+ assert p.text(enc) == clean
+ assert p.lines(enc) == expectedLines
+ assert p.lines(enc, retain=False) == expectedLines2
+
+ # If this is UTF-16, that's enough.
+ # The rest of these will unfortunately fail because append=True
+ # mode causes an extra BOM to be written in the middle of the file.
+ # UTF-16 is the only encoding that has this problem.
+ if enc == 'UTF-16':
+ return
+
+ # Write Unicode to file using path.write_text().
+ cleanNoHanging = clean + '\n' # This test doesn't work with a
+ # hanging line.
+ p.write_text(cleanNoHanging, enc)
+ p.write_text(cleanNoHanging, enc, append=True)
+ # Check the result.
+ expectedBytes = 2 * cleanNoHanging.replace('\n',
+ os.linesep).encode(enc)
+ expectedLinesNoHanging = expectedLines[:]
+ expectedLinesNoHanging[-1] += '\n'
+ assert p.bytes() == expectedBytes
+ assert p.text(enc) == 2 * cleanNoHanging
+ assert p.lines(enc) == 2 * expectedLinesNoHanging
+ assert p.lines(enc, retain=False) == 2 * expectedLines2
+
+ # Write Unicode to file using path.write_lines().
+ # The output in the file should be exactly the same as last time.
+ p.write_lines(expectedLines, enc)
+ p.write_lines(expectedLines2, enc, append=True)
+ # Check the result.
+ assert p.bytes() == expectedBytes
+
+ # Now: same test, but using various newline sequences.
+ # If linesep is being properly applied, these will be converted
+ # to the platform standard newline sequence.
+ p.write_lines(givenLines, enc)
+ p.write_lines(givenLines, enc, append=True)
+ # Check the result.
+ assert p.bytes() == expectedBytes
+
+ # Same test, using newline sequences that are different
+ # from the platform default.
+ def testLinesep(eol):
+ p.write_lines(givenLines, enc, linesep=eol)
+ p.write_lines(givenLines, enc, linesep=eol, append=True)
+ expected = 2 * cleanNoHanging.replace('\n', eol).encode(enc)
+ assert p.bytes() == expected
+
+ testLinesep('\n')
+ testLinesep('\r')
+ testLinesep('\r\n')
+ testLinesep('\x0d\x85')
+
+ # Again, but with linesep=None.
+ p.write_lines(givenLines, enc, linesep=None)
+ p.write_lines(givenLines, enc, linesep=None, append=True)
+ # Check the result.
+ expectedBytes = 2 * given.encode(enc)
+ assert p.bytes() == expectedBytes
+ assert p.text(enc) == 2 * clean
+ expectedResultLines = expectedLines[:]
+ expectedResultLines[-1] += expectedLines[0]
+ expectedResultLines += expectedLines[1:]
+ assert p.lines(enc) == expectedResultLines
+
+ test('UTF-8')
+ test('UTF-16BE')
+ test('UTF-16LE')
+ test('UTF-16')
+
+ def test_chunks(self, tmpdir):
+ p = (tempdir() / 'test.txt').touch()
+ txt = "0123456789"
+ size = 5
+ p.write_text(txt)
+ for i, chunk in enumerate(p.chunks(size)):
+ assert chunk == txt[i * size:i * size + size]
+
+ assert i == len(txt) / size - 1
+
+ @pytest.mark.skipif(not hasattr(os.path, 'samefile'),
+ reason="samefile not present")
+ def test_samefile(self, tmpdir):
+ f1 = (tempdir() / '1.txt').touch()
+ f1.write_text('foo')
+ f2 = (tempdir() / '2.txt').touch()
+ f1.write_text('foo')
+ f3 = (tempdir() / '3.txt').touch()
+ f1.write_text('bar')
+ f4 = (tempdir() / '4.txt')
+ f1.copyfile(f4)
+
+ assert os.path.samefile(f1, f2) == f1.samefile(f2)
+ assert os.path.samefile(f1, f3) == f1.samefile(f3)
+ assert os.path.samefile(f1, f4) == f1.samefile(f4)
+ assert os.path.samefile(f1, f1) == f1.samefile(f1)
+
+ def test_rmtree_p(self, tmpdir):
+ d = Path(tmpdir)
+ sub = d / 'subfolder'
+ sub.mkdir()
+ (sub / 'afile').write_text('something')
+ sub.rmtree_p()
+ assert not sub.exists()
+ try:
+ sub.rmtree_p()
+ except OSError:
+ self.fail("Calling `rmtree_p` on non-existent directory "
+ "should not raise an exception.")
+
+
+class TestMergeTree:
+ @pytest.fixture(autouse=True)
+ def testing_structure(self, tmpdir):
+ self.test_dir = Path(tmpdir)
+ self.subdir_a = self.test_dir / 'A'
+ self.test_file = self.subdir_a / 'testfile.txt'
+ self.test_link = self.subdir_a / 'testlink.txt'
+ self.subdir_b = self.test_dir / 'B'
+
+ self.subdir_a.mkdir()
+ self.subdir_b.mkdir()
+
+ with open(self.test_file, 'w') as f:
+ f.write('x' * 10000)
+
+ if hasattr(os, 'symlink'):
+ self.test_file.symlink(self.test_link)
+ else:
+ self.test_file.copy(self.test_link)
+
+ def test_with_nonexisting_dst_kwargs(self):
+ self.subdir_a.merge_tree(self.subdir_b, symlinks=True)
+ assert self.subdir_b.isdir()
+ expected = set((
+ self.subdir_b / self.test_file.name,
+ self.subdir_b / self.test_link.name,
+ ))
+ assert set(self.subdir_b.listdir()) == expected
+ assert Path(self.subdir_b / self.test_link.name).islink()
+
+ def test_with_nonexisting_dst_args(self):
+ self.subdir_a.merge_tree(self.subdir_b, True)
+ assert self.subdir_b.isdir()
+ expected = set((
+ self.subdir_b / self.test_file.name,
+ self.subdir_b / self.test_link.name,
+ ))
+ assert set(self.subdir_b.listdir()) == expected
+ assert Path(self.subdir_b / self.test_link.name).islink()
+
+ def test_with_existing_dst(self):
+ self.subdir_b.rmtree()
+ self.subdir_a.copytree(self.subdir_b, True)
+
+ self.test_link.remove()
+ test_new = self.subdir_a / 'newfile.txt'
+ test_new.touch()
+ with open(self.test_file, 'w') as f:
+ f.write('x' * 5000)
+
+ self.subdir_a.merge_tree(self.subdir_b, True)
+
+ assert self.subdir_b.isdir()
+ expected = set((
+ self.subdir_b / self.test_file.name,
+ self.subdir_b / self.test_link.name,
+ self.subdir_b / test_new.name,
+ ))
+ assert set(self.subdir_b.listdir()) == expected
+ assert Path(self.subdir_b / self.test_link.name).islink()
+ assert len(Path(self.subdir_b / self.test_file.name).bytes()) == 5000
+
+ def test_copytree_parameters(self):
+ """
+ merge_tree should accept parameters to copytree, such as 'ignore'
+ """
+ ignore = shutil.ignore_patterns('testlink*')
+ self.subdir_a.merge_tree(self.subdir_b, ignore=ignore)
+
+ assert self.subdir_b.isdir()
+ assert self.subdir_b.listdir() == [self.subdir_b / self.test_file.name]
+
+
+class TestChdir:
+ def test_chdir_or_cd(self, tmpdir):
+ """ tests the chdir or cd method """
+ d = Path(str(tmpdir))
+ cwd = d.getcwd()
+
+ # ensure the cwd isn't our tempdir
+ assert str(d) != str(cwd)
+ # now, we're going to chdir to tempdir
+ d.chdir()
+
+ # we now ensure that our cwd is the tempdir
+ assert str(d.getcwd()) == str(tmpdir)
+ # we're resetting our path
+ d = Path(cwd)
+
+ # we ensure that our cwd is still set to tempdir
+ assert str(d.getcwd()) == str(tmpdir)
+
+ # we're calling the alias cd method
+ d.cd()
+ # now, we ensure cwd isn'r tempdir
+ assert str(d.getcwd()) == str(cwd)
+ assert str(d.getcwd()) != str(tmpdir)
+
+
+class TestSubclass:
+ class PathSubclass(Path):
+ pass
+
+ def test_subclass_produces_same_class(self):
+ """
+ When operations are invoked on a subclass, they should produce another
+ instance of that subclass.
+ """
+ p = self.PathSubclass('/foo')
+ subdir = p / 'bar'
+ assert isinstance(subdir, self.PathSubclass)
+
+
+class TestTempDir:
+
+ def test_constructor(self):
+ """
+ One should be able to readily construct a temporary directory
+ """
+ d = tempdir()
+ assert isinstance(d, Path)
+ assert d.exists()
+ assert d.isdir()
+ d.rmdir()
+ assert not d.exists()
+
+ def test_next_class(self):
+ """
+ It should be possible to invoke operations on a tempdir and get
+ Path classes.
+ """
+ d = tempdir()
+ sub = d / 'subdir'
+ assert isinstance(sub, Path)
+ d.rmdir()
+
+ def test_context_manager(self):
+ """
+ One should be able to use a tempdir object as a context, which will
+ clean up the contents after.
+ """
+ d = tempdir()
+ res = d.__enter__()
+ assert res is d
+ (d / 'somefile.txt').touch()
+ assert not isinstance(d / 'somefile.txt', tempdir)
+ d.__exit__(None, None, None)
+ assert not d.exists()
+
+ def test_context_manager_exception(self):
+ """
+ The context manager will not clean up if an exception occurs.
+ """
+ d = tempdir()
+ d.__enter__()
+ (d / 'somefile.txt').touch()
+ assert not isinstance(d / 'somefile.txt', tempdir)
+ d.__exit__(TypeError, TypeError('foo'), None)
+ assert d.exists()
+
+ def test_context_manager_using_with(self):
+ """
+ The context manager will allow using the with keyword and
+ provide a temporry directory that will be deleted after that.
+ """
+
+ with tempdir() as d:
+ assert d.isdir()
+ assert not d.isdir()
+
+
+class TestUnicode:
+ @pytest.fixture(autouse=True)
+ def unicode_name_in_tmpdir(self, tmpdir):
+ # build a snowman (dir) in the temporary directory
+ Path(tmpdir).joinpath('☃').mkdir()
+
+ def test_walkdirs_with_unicode_name(self, tmpdir):
+ for res in Path(tmpdir).walkdirs():
+ pass
+
+
+class TestPatternMatching:
+ def test_fnmatch_simple(self):
+ p = Path('FooBar')
+ assert p.fnmatch('Foo*')
+ assert p.fnmatch('Foo[ABC]ar')
+
+ def test_fnmatch_custom_mod(self):
+ p = Path('FooBar')
+ p.module = ntpath
+ assert p.fnmatch('foobar')
+ assert p.fnmatch('FOO[ABC]AR')
+
+ def test_fnmatch_custom_normcase(self):
+ normcase = lambda path: path.upper()
+ p = Path('FooBar')
+ assert p.fnmatch('foobar', normcase=normcase)
+ assert p.fnmatch('FOO[ABC]AR', normcase=normcase)
+
+ def test_listdir_simple(self):
+ p = Path('.')
+ assert len(p.listdir()) == len(os.listdir('.'))
+
+ def test_listdir_empty_pattern(self):
+ p = Path('.')
+ assert p.listdir('') == []
+
+ def test_listdir_patterns(self, tmpdir):
+ p = Path(tmpdir)
+ (p/'sub').mkdir()
+ (p/'File').touch()
+ assert p.listdir('s*') == [p / 'sub']
+ assert len(p.listdir('*')) == 2
+
+ def test_listdir_custom_module(self, tmpdir):
+ """
+ Listdir patterns should honor the case sensitivity of the path module
+ used by that Path class.
+ """
+ always_unix = Path.using_module(posixpath)
+ p = always_unix(tmpdir)
+ (p/'sub').mkdir()
+ (p/'File').touch()
+ assert p.listdir('S*') == []
+
+ always_win = Path.using_module(ntpath)
+ p = always_win(tmpdir)
+ assert p.listdir('S*') == [p/'sub']
+ assert p.listdir('f*') == [p/'File']
+
+ def test_listdir_case_insensitive(self, tmpdir):
+ """
+ Listdir patterns should honor the case sensitivity of the path module
+ used by that Path class.
+ """
+ p = Path(tmpdir)
+ (p/'sub').mkdir()
+ (p/'File').touch()
+ assert p.listdir(ci('S*')) == [p/'sub']
+ assert p.listdir(ci('f*')) == [p/'File']
+ assert p.files(ci('S*')) == []
+ assert p.dirs(ci('f*')) == []
+
+ def test_walk_case_insensitive(self, tmpdir):
+ p = Path(tmpdir)
+ (p/'sub1'/'foo').makedirs_p()
+ (p/'sub2'/'foo').makedirs_p()
+ (p/'sub1'/'foo'/'bar.Txt').touch()
+ (p/'sub2'/'foo'/'bar.TXT').touch()
+ (p/'sub2'/'foo'/'bar.txt.bz2').touch()
+ files = list(p.walkfiles(ci('*.txt')))
+ assert len(files) == 2
+ assert p/'sub2'/'foo'/'bar.TXT' in files
+ assert p/'sub1'/'foo'/'bar.Txt' in files
+
+@pytest.mark.skipif(sys.version_info < (2, 6),
+ reason="in_place requires io module in Python 2.6")
+class TestInPlace:
+ reference_content = textwrap.dedent("""
+ The quick brown fox jumped over the lazy dog.
+ """.lstrip())
+ reversed_content = textwrap.dedent("""
+ .god yzal eht revo depmuj xof nworb kciuq ehT
+ """.lstrip())
+ alternate_content = textwrap.dedent("""
+ Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua. Ut enim ad minim veniam, quis nostrud exercitation
+ ullamco laboris nisi ut aliquip ex ea commodo consequat.
+ Duis aute irure dolor in reprehenderit in voluptate velit
+ esse cillum dolore eu fugiat nulla pariatur. Excepteur
+ sint occaecat cupidatat non proident, sunt in culpa qui
+ officia deserunt mollit anim id est laborum.
+ """.lstrip())
+
+ @classmethod
+ def create_reference(cls, tmpdir):
+ p = Path(tmpdir)/'document'
+ with p.open('w') as stream:
+ stream.write(cls.reference_content)
+ return p
+
+ def test_line_by_line_rewrite(self, tmpdir):
+ doc = self.create_reference(tmpdir)
+ # reverse all the text in the document, line by line
+ with doc.in_place() as (reader, writer):
+ for line in reader:
+ r_line = ''.join(reversed(line.strip())) + '\n'
+ writer.write(r_line)
+ with doc.open() as stream:
+ data = stream.read()
+ assert data == self.reversed_content
+
+ def test_exception_in_context(self, tmpdir):
+ doc = self.create_reference(tmpdir)
+ with pytest.raises(RuntimeError) as exc:
+ with doc.in_place() as (reader, writer):
+ writer.write(self.alternate_content)
+ raise RuntimeError("some error")
+ assert "some error" in str(exc)
+ with doc.open() as stream:
+ data = stream.read()
+ assert not 'Lorem' in data
+ assert 'lazy dog' in data
+
+
+class TestSpecialPaths:
+ @pytest.fixture(autouse=True, scope='class')
+ def appdirs_installed(cls):
+ pytest.importorskip('appdirs')
+
+ @pytest.fixture
+ def feign_linux(self, monkeypatch):
+ monkeypatch.setattr("platform.system", lambda: "Linux")
+ monkeypatch.setattr("sys.platform", "linux")
+ monkeypatch.setattr("os.pathsep", ":")
+ # remove any existing import of appdirs, as it sets up some
+ # state during import.
+ sys.modules.pop('appdirs')
+
+ def test_basic_paths(self):
+ appdirs = importlib.import_module('appdirs')
+
+ expected = appdirs.user_config_dir()
+ assert SpecialResolver(Path).user.config == expected
+
+ expected = appdirs.site_config_dir()
+ assert SpecialResolver(Path).site.config == expected
+
+ expected = appdirs.user_config_dir('My App', 'Me')
+ assert SpecialResolver(Path, 'My App', 'Me').user.config == expected
+
+ def test_unix_paths(self, tmpdir, monkeypatch, feign_linux):
+ fake_config = tmpdir / '_config'
+ monkeypatch.setitem(os.environ, 'XDG_CONFIG_HOME', str(fake_config))
+ expected = str(tmpdir / '_config')
+ assert SpecialResolver(Path).user.config == expected
+
+ def test_unix_paths_fallback(self, tmpdir, monkeypatch, feign_linux):
+ "Without XDG_CONFIG_HOME set, ~/.config should be used."
+ fake_home = tmpdir / '_home'
+ monkeypatch.setitem(os.environ, 'HOME', str(fake_home))
+ expected = str(tmpdir / '_home' / '.config')
+ assert SpecialResolver(Path).user.config == expected
+
+ def test_property(self):
+ assert isinstance(Path.special().user.config, Path)
+ assert isinstance(Path.special().user.data, Path)
+ assert isinstance(Path.special().user.cache, Path)
+
+ def test_other_parameters(self):
+ """
+ Other parameters should be passed through to appdirs function.
+ """
+ res = Path.special(version="1.0", multipath=True).site.config
+ assert isinstance(res, Path)
+
+ def test_multipath(self, feign_linux, monkeypatch, tmpdir):
+ """
+ If multipath is provided, on Linux return the XDG_CONFIG_DIRS
+ """
+ fake_config_1 = str(tmpdir / '_config1')
+ fake_config_2 = str(tmpdir / '_config2')
+ config_dirs = os.pathsep.join([fake_config_1, fake_config_2])
+ monkeypatch.setitem(os.environ, 'XDG_CONFIG_DIRS', config_dirs)
+ res = Path.special(multipath=True).site.config
+ assert isinstance(res, Multi)
+ assert fake_config_1 in res
+ assert fake_config_2 in res
+ assert '_config1' in str(res)
+
+ def test_reused_SpecialResolver(self):
+ """
+ Passing additional args and kwargs to SpecialResolver should be
+ passed through to each invocation of the function in appdirs.
+ """
+ appdirs = importlib.import_module('appdirs')
+
+ adp = SpecialResolver(Path, version="1.0")
+ res = adp.user.config
+
+ expected = appdirs.user_config_dir(version="1.0")
+ assert res == expected
+
+
+class TestMultiPath:
+ def test_for_class(self):
+ """
+ Multi.for_class should return a subclass of the Path class provided.
+ """
+ cls = Multi.for_class(Path)
+ assert issubclass(cls, Path)
+ assert issubclass(cls, Multi)
+ assert cls.__name__ == 'MultiPath'
+
+ def test_detect_no_pathsep(self):
+ """
+ If no pathsep is provided, multipath detect should return an instance
+ of the parent class with no Multi mix-in.
+ """
+ path = Multi.for_class(Path).detect('/foo/bar')
+ assert isinstance(path, Path)
+ assert not isinstance(path, Multi)
+
+ def test_detect_with_pathsep(self):
+ """
+ If a pathsep appears in the input, detect should return an instance
+ of a Path with the Multi mix-in.
+ """
+ inputs = '/foo/bar', '/baz/bing'
+ input = os.pathsep.join(inputs)
+ path = Multi.for_class(Path).detect(input)
+
+ assert isinstance(path, Multi)
+
+ def test_iteration(self):
+ """
+ Iterating over a MultiPath should yield instances of the
+ parent class.
+ """
+ inputs = '/foo/bar', '/baz/bing'
+ input = os.pathsep.join(inputs)
+ path = Multi.for_class(Path).detect(input)
+
+ items = iter(path)
+ first = next(items)
+ assert first == '/foo/bar'
+ assert isinstance(first, Path)
+ assert not isinstance(first, Multi)
+ assert next(items) == '/baz/bing'
+ assert path == input
+
+
+if __name__ == '__main__':
+ pytest.main()