Cleanup unused modules and imports

* Ran code through PyFlakes
This commit is contained in:
JonnyWong16 2016-05-15 11:32:11 -07:00
parent 325fa19e46
commit cb8a5504f6
290 changed files with 45 additions and 78157 deletions

View file

@ -1,88 +0,0 @@
#!/usr/bin/python
####
# 06/2010 Nic Wolfe <nic@wolfeden.ca>
# 02/2006 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib
import urllib2
import mimetools, mimetypes
import os, sys
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request

View file

@ -1,690 +0,0 @@
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
'D.iterkeys() -> an iterator over the keys of D'
return iter(self)
def itervalues(self):
'D.itervalues() -> an iterator over the values of D'
for key in self:
yield self[key]
def iteritems(self):
'D.iteritems() -> an iterator over the (key, value) items of D'
for key in self:
yield (key, self[key])
def keys(self):
"D.keys() -> list of D's keys"
return list(self)
def items(self):
"D.items() -> list of D's (key, value) pairs, as 2-tuples"
return [(key, self[key]) for key in self]
def values(self):
"D.values() -> list of D's values"
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({args} given)".format(args=len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
'''S.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
"""All the operations on a read-only sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, object) -- insert object before index'
raise IndexError
def append(self, value):
'S.append(object) -- append object to the end of the sequence'
self.insert(len(self), value)
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)

View file

@ -1,732 +0,0 @@
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from backport_abcoll import *
import backport_abcoll
__all__ += backport_abcoll.__all__
from _collections import defaultdict
from collections import deque as _deque
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
if _sys.version_info >= (2, 7):
import warnings as _warnings
_warnings.warn('Use the stock collections modules instead.', DeprecationWarning)
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
__dict__ = _property(_asdict)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strings')
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
########################################################################
### deque
########################################################################
class deque(_deque):
"""Extension of deque to support Python 2.7's operations."""
def __init__(self, iterable=[], maxlen=None):
_deque.__init__(self, iterable, maxlen)
self._maxlen = maxlen
@property
def maxlen(self):
return self._maxlen
def reverse(self):
data = []
while self:
data.append(self.pop())
self.extend(data)
def count(self, value):
return sum(1 for element in self if element == value)

View file

@ -1,131 +0,0 @@
# Got this from here: https://gist.github.com/1126793
# The contents of this file are subject to the Python Software Foundation
# License Version 2.3 (the License). You may not copy or use this file, in
# either source code or executable form, except in compliance with the License.
# You may obtain a copy of the License at http://www.python.org/license.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
# Minor modifications made by Andrew Resch to replace the BTFailure errors with Exceptions
def decode_int(x, f):
f += 1
newf = x.index('e', f)
n = int(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
n = int(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != 'e':
k, f = decode_string(x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError, ValueError):
raise Exception("not a valid bencoded string")
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_bool(x, r):
if x:
encode_int(1, r)
else:
encode_int(0, r)
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[Bencached] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)

View file

@ -1,803 +0,0 @@
"""biplist -- a library for reading and writing binary property list files.
Binary Property List (plist) files provide a faster and smaller serialization
format for property lists on OS X. This is a library for generating binary
plists which can be read by OS X, iOS, or other clients.
The API models the plistlib API, and will call through to plistlib when
XML serialization or deserialization is required.
To generate plists with UID values, wrap the values with the Uid object. The
value must be an int.
To generate plists with NSData/CFData values, wrap the values with the
Data object. The value must be a string.
Date values can only be datetime.datetime objects.
The exceptions InvalidPlistException and NotBinaryPlistException may be
thrown to indicate that the data cannot be serialized or deserialized as
a binary plist.
Plist generation example:
from biplist import *
from datetime import datetime
plist = {'aKey':'aValue',
'0':1.322,
'now':datetime.now(),
'list':[1,2,3],
'tuple':('a','b','c')
}
try:
writePlist(plist, "example.plist")
except (InvalidPlistException, NotBinaryPlistException), e:
print "Something bad happened:", e
Plist parsing example:
from biplist import *
try:
plist = readPlist("example.plist")
print plist
except (InvalidPlistException, NotBinaryPlistException), e:
print "Not a plist:", e
"""
import sys
from collections import namedtuple
import datetime
import io
import math
import plistlib
from struct import pack, unpack
from struct import error as struct_error
import sys
import time
try:
unicode
unicodeEmpty = r''
except NameError:
unicode = str
unicodeEmpty = ''
try:
long
except NameError:
long = int
try:
{}.iteritems
iteritems = lambda x: x.iteritems()
except AttributeError:
iteritems = lambda x: x.items()
__all__ = [
'Uid', 'Data', 'readPlist', 'writePlist', 'readPlistFromString',
'writePlistToString', 'InvalidPlistException', 'NotBinaryPlistException'
]
# Apple uses Jan 1, 2001 as a base for all plist date/times.
apple_reference_date = datetime.datetime.utcfromtimestamp(978307200)
class Uid(int):
"""Wrapper around integers for representing UID values. This
is used in keyed archiving."""
def __repr__(self):
return "Uid(%d)" % self
class Data(bytes):
"""Wrapper around str types for representing Data values."""
pass
class InvalidPlistException(Exception):
"""Raised when the plist is incorrectly formatted."""
pass
class NotBinaryPlistException(Exception):
"""Raised when a binary plist was expected but not encountered."""
pass
def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result
def wrapDataObject(o, for_binary=False):
if isinstance(o, Data) and not for_binary:
v = sys.version_info
if not (v[0] >= 3 and v[1] >= 4):
o = plistlib.Data(o)
elif isinstance(o, (bytes, plistlib.Data)) and for_binary:
if hasattr(o, 'data'):
o = Data(o.data)
elif isinstance(o, tuple):
o = wrapDataObject(list(o), for_binary)
o = tuple(o)
elif isinstance(o, list):
for i in range(len(o)):
o[i] = wrapDataObject(o[i], for_binary)
elif isinstance(o, dict):
for k in o:
o[k] = wrapDataObject(o[k], for_binary)
return o
def writePlist(rootObject, pathOrFile, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dump"):
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'wb') as f:
return plistlib.dump(rootObject, f)
else:
return plistlib.dump(rootObject, pathOrFile)
else:
return plistlib.writePlist(rootObject, pathOrFile)
else:
didOpen = False
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'wb')
didOpen = True
writer = PlistWriter(pathOrFile)
result = writer.writeRoot(rootObject)
if didOpen:
pathOrFile.close()
return result
def readPlistFromString(data):
return readPlist(io.BytesIO(data))
def writePlistToString(rootObject, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dumps"):
return plistlib.dumps(rootObject)
elif hasattr(plistlib, "writePlistToBytes"):
return plistlib.writePlistToBytes(rootObject)
else:
return plistlib.writePlistToString(rootObject)
else:
ioObject = io.BytesIO()
writer = PlistWriter(ioObject)
writer.writeRoot(rootObject)
return ioObject.getvalue()
def is_stream_binary_plist(stream):
stream.seek(0)
header = stream.read(7)
if header == b'bplist0':
return True
else:
return False
PlistTrailer = namedtuple('PlistTrailer', 'offsetSize, objectRefSize, offsetCount, topLevelObjectNumber, offsetTableOffset')
PlistByteCounts = namedtuple('PlistByteCounts', 'nullBytes, boolBytes, intBytes, realBytes, dateBytes, dataBytes, stringBytes, uidBytes, arrayBytes, setBytes, dictBytes')
class PlistReader(object):
file = None
contents = ''
offsets = None
trailer = None
currentOffset = 0
def __init__(self, fileOrStream):
"""Raises NotBinaryPlistException."""
self.reset()
self.file = fileOrStream
def parse(self):
return self.readRoot()
def reset(self):
self.trailer = None
self.contents = ''
self.offsets = []
self.currentOffset = 0
def readRoot(self):
result = None
self.reset()
# Get the header, make sure it's a valid file.
if not is_stream_binary_plist(self.file):
raise NotBinaryPlistException()
self.file.seek(0)
self.contents = self.file.read()
if len(self.contents) < 32:
raise InvalidPlistException("File is too short.")
trailerContents = self.contents[-32:]
try:
self.trailer = PlistTrailer._make(unpack("!xxxxxxBBQQQ", trailerContents))
offset_size = self.trailer.offsetSize * self.trailer.offsetCount
offset = self.trailer.offsetTableOffset
offset_contents = self.contents[offset:offset+offset_size]
offset_i = 0
while offset_i < self.trailer.offsetCount:
begin = self.trailer.offsetSize*offset_i
tmp_contents = offset_contents[begin:begin+self.trailer.offsetSize]
tmp_sized = self.getSizedInteger(tmp_contents, self.trailer.offsetSize)
self.offsets.append(tmp_sized)
offset_i += 1
self.setCurrentOffsetToObjectNumber(self.trailer.topLevelObjectNumber)
result = self.readObject()
except TypeError as e:
raise InvalidPlistException(e)
return result
def setCurrentOffsetToObjectNumber(self, objectNumber):
self.currentOffset = self.offsets[objectNumber]
def readObject(self):
result = None
tmp_byte = self.contents[self.currentOffset:self.currentOffset+1]
marker_byte = unpack("!B", tmp_byte)[0]
format = (marker_byte >> 4) & 0x0f
extra = marker_byte & 0x0f
self.currentOffset += 1
def proc_extra(extra):
if extra == 0b1111:
#self.currentOffset += 1
extra = self.readObject()
return extra
# bool, null, or fill byte
if format == 0b0000:
if extra == 0b0000:
result = None
elif extra == 0b1000:
result = False
elif extra == 0b1001:
result = True
elif extra == 0b1111:
pass # fill byte
else:
raise InvalidPlistException("Invalid object found at offset: %d" % (self.currentOffset - 1))
# int
elif format == 0b0001:
extra = proc_extra(extra)
result = self.readInteger(pow(2, extra))
# real
elif format == 0b0010:
extra = proc_extra(extra)
result = self.readReal(extra)
# date
elif format == 0b0011 and extra == 0b0011:
result = self.readDate()
# data
elif format == 0b0100:
extra = proc_extra(extra)
result = self.readData(extra)
# ascii string
elif format == 0b0101:
extra = proc_extra(extra)
result = self.readAsciiString(extra)
# Unicode string
elif format == 0b0110:
extra = proc_extra(extra)
result = self.readUnicode(extra)
# uid
elif format == 0b1000:
result = self.readUid(extra)
# array
elif format == 0b1010:
extra = proc_extra(extra)
result = self.readArray(extra)
# set
elif format == 0b1100:
extra = proc_extra(extra)
result = set(self.readArray(extra))
# dict
elif format == 0b1101:
extra = proc_extra(extra)
result = self.readDict(extra)
else:
raise InvalidPlistException("Invalid object found: {format: %s, extra: %s}" % (bin(format), bin(extra)))
return result
def readInteger(self, byteSize):
result = 0
original_offset = self.currentOffset
data = self.contents[self.currentOffset:self.currentOffset + byteSize]
result = self.getSizedInteger(data, byteSize, as_number=True)
self.currentOffset = original_offset + byteSize
return result
def readReal(self, length):
result = 0.0
to_read = pow(2, length)
data = self.contents[self.currentOffset:self.currentOffset+to_read]
if length == 2: # 4 bytes
result = unpack('>f', data)[0]
elif length == 3: # 8 bytes
result = unpack('>d', data)[0]
else:
raise InvalidPlistException("Unknown real of length %d bytes" % to_read)
return result
def readRefs(self, count):
refs = []
i = 0
while i < count:
fragment = self.contents[self.currentOffset:self.currentOffset+self.trailer.objectRefSize]
ref = self.getSizedInteger(fragment, len(fragment))
refs.append(ref)
self.currentOffset += self.trailer.objectRefSize
i += 1
return refs
def readArray(self, count):
result = []
values = self.readRefs(count)
i = 0
while i < len(values):
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result.append(value)
i += 1
return result
def readDict(self, count):
result = {}
keys = self.readRefs(count)
values = self.readRefs(count)
i = 0
while i < len(keys):
self.setCurrentOffsetToObjectNumber(keys[i])
key = self.readObject()
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result[key] = value
i += 1
return result
def readAsciiString(self, length):
result = unpack("!%ds" % length, self.contents[self.currentOffset:self.currentOffset+length])[0]
self.currentOffset += length
return result
def readUnicode(self, length):
actual_length = length*2
data = self.contents[self.currentOffset:self.currentOffset+actual_length]
# unpack not needed?!! data = unpack(">%ds" % (actual_length), data)[0]
self.currentOffset += actual_length
return data.decode('utf_16_be')
def readDate(self):
result = unpack(">d", self.contents[self.currentOffset:self.currentOffset+8])[0]
# Use timedelta to workaround time_t size limitation on 32-bit python.
result = datetime.timedelta(seconds=result) + apple_reference_date
self.currentOffset += 8
return result
def readData(self, length):
result = self.contents[self.currentOffset:self.currentOffset+length]
self.currentOffset += length
return Data(result)
def readUid(self, length):
return Uid(self.readInteger(length+1))
def getSizedInteger(self, data, byteSize, as_number=False):
"""Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
result = 0
# 1, 2, and 4 byte integers are unsigned
if byteSize == 1:
result = unpack('>B', data)[0]
elif byteSize == 2:
result = unpack('>H', data)[0]
elif byteSize == 4:
result = unpack('>L', data)[0]
elif byteSize == 8:
if as_number:
result = unpack('>q', data)[0]
else:
result = unpack('>Q', data)[0]
elif byteSize <= 16:
# Handle odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
result = (result << 8) | unpack('>B', byte)[0]
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result
class HashableWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<HashableWrapper: %s>" % [self.value]
class BoolWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<BoolWrapper: %s>" % self.value
class FloatWrapper(object):
_instances = {}
def __new__(klass, value):
# Ensure FloatWrapper(x) for a given float x is always the same object
wrapper = klass._instances.get(value)
if wrapper is None:
wrapper = object.__new__(klass)
wrapper.value = value
klass._instances[value] = wrapper
return wrapper
def __repr__(self):
return "<FloatWrapper: %s>" % self.value
class PlistWriter(object):
header = b'bplist00bybiplist1.0'
file = None
byteCounts = None
trailer = None
computedUniques = None
writtenReferences = None
referencePositions = None
wrappedTrue = None
wrappedFalse = None
def __init__(self, file):
self.reset()
self.file = file
self.wrappedTrue = BoolWrapper(True)
self.wrappedFalse = BoolWrapper(False)
def reset(self):
self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.trailer = PlistTrailer(0, 0, 0, 0, 0)
# A set of all the uniques which have been computed.
self.computedUniques = set()
# A list of all the uniques which have been written.
self.writtenReferences = {}
# A dict of the positions of the written uniques.
self.referencePositions = {}
def positionOfObjectReference(self, obj):
"""If the given object has been written already, return its
position in the offset table. Otherwise, return None."""
return self.writtenReferences.get(obj)
def writeRoot(self, root):
"""
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
"""
output = self.header
wrapped_root = self.wrapRoot(root)
should_reference_root = True#not isinstance(wrapped_root, HashableWrapper)
self.computeOffsets(wrapped_root, asReference=should_reference_root, isRoot=True)
self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
(_, output) = self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
# output size at this point is an upper bound on how big the
# object reference offsets need to be.
self.trailer = self.trailer._replace(**{
'offsetSize':self.intSize(len(output)),
'offsetCount':len(self.computedUniques),
'offsetTableOffset':len(output),
'topLevelObjectNumber':0
})
output = self.writeOffsetTable(output)
output += pack('!xxxxxxBBQQQ', *self.trailer)
self.file.write(output)
def wrapRoot(self, root):
if isinstance(root, bool):
if root is True:
return self.wrappedTrue
else:
return self.wrappedFalse
elif isinstance(root, float):
return FloatWrapper(root)
elif isinstance(root, set):
n = set()
for value in root:
n.add(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, dict):
n = {}
for key, value in iteritems(root):
n[self.wrapRoot(key)] = self.wrapRoot(value)
return HashableWrapper(n)
elif isinstance(root, list):
n = []
for value in root:
n.append(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, tuple):
n = tuple([self.wrapRoot(value) for value in root])
return HashableWrapper(n)
else:
return root
def incrementByteCount(self, field, incr=1):
self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr})
def computeOffsets(self, obj, asReference=False, isRoot=False):
def check_key(key):
if key is None:
raise InvalidPlistException('Dictionary keys cannot be null in plists.')
elif isinstance(key, Data):
raise InvalidPlistException('Data cannot be dictionary keys in plists.')
elif not isinstance(key, (bytes, unicode)):
raise InvalidPlistException('Keys must be strings.')
def proc_size(size):
if size > 0b1110:
size += self.intSize(size)
return size
# If this should be a reference, then we keep a record of it in the
# uniques table.
if asReference:
if obj in self.computedUniques:
return
else:
self.computedUniques.add(obj)
if obj is None:
self.incrementByteCount('nullBytes')
elif isinstance(obj, BoolWrapper):
self.incrementByteCount('boolBytes')
elif isinstance(obj, Uid):
size = self.intSize(obj)
self.incrementByteCount('uidBytes', incr=1+size)
elif isinstance(obj, (int, long)):
size = self.intSize(obj)
self.incrementByteCount('intBytes', incr=1+size)
elif isinstance(obj, FloatWrapper):
size = self.realSize(obj)
self.incrementByteCount('realBytes', incr=1+size)
elif isinstance(obj, datetime.datetime):
self.incrementByteCount('dateBytes', incr=2)
elif isinstance(obj, Data):
size = proc_size(len(obj))
self.incrementByteCount('dataBytes', incr=1+size)
elif isinstance(obj, (unicode, bytes)):
size = proc_size(len(obj))
self.incrementByteCount('stringBytes', incr=1+size)
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, set):
size = proc_size(len(obj))
self.incrementByteCount('setBytes', incr=1+size)
for value in obj:
self.computeOffsets(value, asReference=True)
elif isinstance(obj, (list, tuple)):
size = proc_size(len(obj))
self.incrementByteCount('arrayBytes', incr=1+size)
for value in obj:
asRef = True
self.computeOffsets(value, asReference=True)
elif isinstance(obj, dict):
size = proc_size(len(obj))
self.incrementByteCount('dictBytes', incr=1+size)
for key, value in iteritems(obj):
check_key(key)
self.computeOffsets(key, asReference=True)
self.computeOffsets(value, asReference=True)
else:
raise InvalidPlistException("Unknown object type.")
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output)
def writeObject(self, obj, output, setReferencePosition=False):
"""Serializes the given object to the output. Returns output.
If setReferencePosition is True, will set the position the
object was written.
"""
def proc_variable_length(format, length):
result = b''
if length > 0b1110:
result += pack('!B', (format << 4) | 0b1111)
result = self.writeObject(length, result)
else:
result += pack('!B', (format << 4) | length)
return result
if isinstance(obj, (str, unicode)) and obj == unicodeEmpty:
# The Apple Plist decoder can't decode a zero length Unicode string.
obj = b''
if setReferencePosition:
self.referencePositions[obj] = len(output)
if obj is None:
output += pack('!B', 0b00000000)
elif isinstance(obj, BoolWrapper):
if obj.value is False:
output += pack('!B', 0b00001000)
else:
output += pack('!B', 0b00001001)
elif isinstance(obj, Uid):
size = self.intSize(obj)
output += pack('!B', (0b1000 << 4) | size - 1)
output += self.binaryInt(obj)
elif isinstance(obj, (int, long)):
byteSize = self.intSize(obj)
root = math.log(byteSize, 2)
output += pack('!B', (0b0001 << 4) | int(root))
output += self.binaryInt(obj, as_number=True)
elif isinstance(obj, FloatWrapper):
# just use doubles
output += pack('!B', (0b0010 << 4) | 3)
output += self.binaryReal(obj)
elif isinstance(obj, datetime.datetime):
timestamp = (obj - apple_reference_date).total_seconds()
output += pack('!B', 0b00110011)
output += pack('!d', float(timestamp))
elif isinstance(obj, Data):
output += proc_variable_length(0b0100, len(obj))
output += obj
elif isinstance(obj, unicode):
byteData = obj.encode('utf_16_be')
output += proc_variable_length(0b0110, len(byteData)//2)
output += byteData
elif isinstance(obj, bytes):
output += proc_variable_length(0b0101, len(obj))
output += obj
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, (set, list, tuple)):
if isinstance(obj, set):
output += proc_variable_length(0b1100, len(obj))
else:
output += proc_variable_length(0b1010, len(obj))
objectsToWrite = []
for objRef in obj:
(isNew, output) = self.writeObjectReference(objRef, output)
if isNew:
objectsToWrite.append(objRef)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
elif isinstance(obj, dict):
output += proc_variable_length(0b1101, len(obj))
keys = []
values = []
objectsToWrite = []
for key, value in iteritems(obj):
keys.append(key)
values.append(value)
for key in keys:
(isNew, output) = self.writeObjectReference(key, output)
if isNew:
objectsToWrite.append(key)
for value in values:
(isNew, output) = self.writeObjectReference(value, output)
if isNew:
objectsToWrite.append(value)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
return output
def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output
def binaryReal(self, obj):
# just use doubles
result = pack('>d', obj.value)
return result
def binaryInt(self, obj, byteSize=None, as_number=False):
result = b''
if byteSize is None:
byteSize = self.intSize(obj)
if byteSize == 1:
result += pack('>B', obj)
elif byteSize == 2:
result += pack('>H', obj)
elif byteSize == 4:
result += pack('>L', obj)
elif byteSize == 8:
if as_number:
result += pack('>q', obj)
else:
result += pack('>Q', obj)
elif byteSize <= 16:
try:
result = pack('>Q', 0) + pack('>Q', obj)
except struct_error as e:
raise InvalidPlistException("Unable to pack integer %d: %s" % (obj, e))
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 16 bytes.")
return result
def intSize(self, obj):
"""Returns the number of bytes necessary to store the given integer."""
# SIGNED
if obj < 0: # Signed integer, always 8 bytes
return 8
# UNSIGNED
elif obj <= 0xFF: # 1 byte
return 1
elif obj <= 0xFFFF: # 2 bytes
return 2
elif obj <= 0xFFFFFFFF: # 4 bytes
return 4
# SIGNED
# 0x7FFFFFFFFFFFFFFF is the max.
elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
return 8
elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
return 16
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
def realSize(self, obj):
return 8

View file

@ -1,32 +0,0 @@
Copyright (c) 2013, Ethan Furman.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name Ethan Furman nor the names of any
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,2 +0,0 @@
enum34 is the new Python stdlib enum module available in Python 3.4
backported for previous versions of Python from 2.4 to 3.3.

View file

@ -1,790 +0,0 @@
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 0, 4
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_'
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration

View file

@ -1,725 +0,0 @@
``enum`` --- support for enumerations
========================================
.. :synopsis: enumerations are sets of symbolic names bound to unique, constant
values.
.. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
.. :sectionauthor:: Barry Warsaw <barry@python.org>,
.. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
.. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
----------------
An enumeration is a set of symbolic names (members) bound to unique, constant
values. Within an enumeration, the members can be compared by identity, and
the enumeration itself can be iterated over.
Module Contents
---------------
This module defines two enumeration classes that can be used to define unique
sets of names and values: ``Enum`` and ``IntEnum``. It also defines
one decorator, ``unique``.
``Enum``
Base class for creating enumerated constants. See section `Functional API`_
for an alternate construction syntax.
``IntEnum``
Base class for creating enumerated constants that are also subclasses of ``int``.
``unique``
Enum class decorator that ensures only one name is bound to any one value.
Creating an Enum
----------------
Enumerations are created using the ``class`` syntax, which makes them
easy to read and write. An alternative creation method is described in
`Functional API`_. To define an enumeration, subclass ``Enum`` as
follows::
>>> from enum import Enum
>>> class Color(Enum):
... red = 1
... green = 2
... blue = 3
Note: Nomenclature
- The class ``Color`` is an *enumeration* (or *enum*)
- The attributes ``Color.red``, ``Color.green``, etc., are
*enumeration members* (or *enum members*).
- The enum members have *names* and *values* (the name of
``Color.red`` is ``red``, the value of ``Color.blue`` is
``3``, etc.)
Note:
Even though we use the ``class`` syntax to create Enums, Enums
are not normal Python classes. See `How are Enums different?`_ for
more details.
Enumeration members have human readable string representations::
>>> print(Color.red)
Color.red
...while their ``repr`` has more information::
>>> print(repr(Color.red))
<Color.red: 1>
The *type* of an enumeration member is the enumeration it belongs to::
>>> type(Color.red)
<enum 'Color'>
>>> isinstance(Color.green, Color)
True
>>>
Enum members also have a property that contains just their item name::
>>> print(Color.red.name)
red
Enumerations support iteration. In Python 3.x definition order is used; in
Python 2.x the definition order is not available, but class attribute
``__order__`` is supported; otherwise, value order is used::
>>> class Shake(Enum):
... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
... vanilla = 7
... chocolate = 4
... cookies = 9
... mint = 3
...
>>> for shake in Shake:
... print(shake)
...
Shake.vanilla
Shake.chocolate
Shake.cookies
Shake.mint
The ``__order__`` attribute is always removed, and in 3.x it is also ignored
(order is definition order); however, in the stdlib version it will be ignored
but not removed.
Enumeration members are hashable, so they can be used in dictionaries and sets::
>>> apples = {}
>>> apples[Color.red] = 'red delicious'
>>> apples[Color.green] = 'granny smith'
>>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
True
Programmatic access to enumeration members and their attributes
---------------------------------------------------------------
Sometimes it's useful to access members in enumerations programmatically (i.e.
situations where ``Color.red`` won't do because the exact color is not known
at program-writing time). ``Enum`` allows such access::
>>> Color(1)
<Color.red: 1>
>>> Color(3)
<Color.blue: 3>
If you want to access enum members by *name*, use item access::
>>> Color['red']
<Color.red: 1>
>>> Color['green']
<Color.green: 2>
If have an enum member and need its ``name`` or ``value``::
>>> member = Color.red
>>> member.name
'red'
>>> member.value
1
Duplicating enum members and values
-----------------------------------
Having two enum members (or any other attribute) with the same name is invalid;
in Python 3.x this would raise an error, but in Python 2.x the second member
simply overwrites the first::
>>> # python 2.x
>>> class Shape(Enum):
... square = 2
... square = 3
...
>>> Shape.square
<Shape.square: 3>
>>> # python 3.x
>>> class Shape(Enum):
... square = 2
... square = 3
Traceback (most recent call last):
...
TypeError: Attempted to reuse key: 'square'
However, two enum members are allowed to have the same value. Given two members
A and B with the same value (and A defined first), B is an alias to A. By-value
lookup of the value of A and B will return A. By-name lookup of B will also
return A::
>>> class Shape(Enum):
... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
... square = 2
... diamond = 1
... circle = 3
... alias_for_square = 2
...
>>> Shape.square
<Shape.square: 2>
>>> Shape.alias_for_square
<Shape.square: 2>
>>> Shape(2)
<Shape.square: 2>
Allowing aliases is not always desirable. ``unique`` can be used to ensure
that none exist in a particular enumeration::
>>> from enum import unique
>>> @unique
... class Mistake(Enum):
... __order__ = 'one two three four' # only needed in 2.x
... one = 1
... two = 2
... three = 3
... four = 3
Traceback (most recent call last):
...
ValueError: duplicate names found in <enum 'Mistake'>: four -> three
Iterating over the members of an enum does not provide the aliases::
>>> list(Shape)
[<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
The special attribute ``__members__`` is a dictionary mapping names to members.
It includes all names defined in the enumeration, including the aliases::
>>> for name, member in sorted(Shape.__members__.items()):
... name, member
...
('alias_for_square', <Shape.square: 2>)
('circle', <Shape.circle: 3>)
('diamond', <Shape.diamond: 1>)
('square', <Shape.square: 2>)
The ``__members__`` attribute can be used for detailed programmatic access to
the enumeration members. For example, finding all the aliases::
>>> [name for name, member in Shape.__members__.items() if member.name != name]
['alias_for_square']
Comparisons
-----------
Enumeration members are compared by identity::
>>> Color.red is Color.red
True
>>> Color.red is Color.blue
False
>>> Color.red is not Color.blue
True
Ordered comparisons between enumeration values are *not* supported. Enum
members are not integers (but see `IntEnum`_ below)::
>>> Color.red < Color.blue
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unorderable types: Color() < Color()
.. warning::
In Python 2 *everything* is ordered, even though the ordering may not
make sense. If you want your enumerations to have a sensible ordering
check out the `OrderedEnum`_ recipe below.
Equality comparisons are defined though::
>>> Color.blue == Color.red
False
>>> Color.blue != Color.red
True
>>> Color.blue == Color.blue
True
Comparisons against non-enumeration values will always compare not equal
(again, ``IntEnum`` was explicitly designed to behave differently, see
below)::
>>> Color.blue == 2
False
Allowed members and attributes of enumerations
----------------------------------------------
The examples above use integers for enumeration values. Using integers is
short and handy (and provided by default by the `Functional API`_), but not
strictly enforced. In the vast majority of use-cases, one doesn't care what
the actual value of an enumeration is. But if the value *is* important,
enumerations can have arbitrary values.
Enumerations are Python classes, and can have methods and special methods as
usual. If we have this enumeration::
>>> class Mood(Enum):
... funky = 1
... happy = 3
...
... def describe(self):
... # self is the member here
... return self.name, self.value
...
... def __str__(self):
... return 'my custom str! {0}'.format(self.value)
...
... @classmethod
... def favorite_mood(cls):
... # cls here is the enumeration
... return cls.happy
Then::
>>> Mood.favorite_mood()
<Mood.happy: 3>
>>> Mood.happy.describe()
('happy', 3)
>>> str(Mood.funky)
'my custom str! 1'
The rules for what is allowed are as follows: _sunder_ names (starting and
ending with a single underscore) are reserved by enum and cannot be used;
all other attributes defined within an enumeration will become members of this
enumeration, with the exception of *__dunder__* names and descriptors (methods
are also descriptors).
Note:
If your enumeration defines ``__new__`` and/or ``__init__`` then
whatever value(s) were given to the enum member will be passed into
those methods. See `Planet`_ for an example.
Restricted subclassing of enumerations
--------------------------------------
Subclassing an enumeration is allowed only if the enumeration does not define
any members. So this is forbidden::
>>> class MoreColor(Color):
... pink = 17
Traceback (most recent call last):
...
TypeError: Cannot extend enumerations
But this is allowed::
>>> class Foo(Enum):
... def some_behavior(self):
... pass
...
>>> class Bar(Foo):
... happy = 1
... sad = 2
...
Allowing subclassing of enums that define members would lead to a violation of
some important invariants of types and instances. On the other hand, it makes
sense to allow sharing some common behavior between a group of enumerations.
(See `OrderedEnum`_ for an example.)
Pickling
--------
Enumerations can be pickled and unpickled::
>>> from enum.test_enum import Fruit
>>> from pickle import dumps, loads
>>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
True
The usual restrictions for pickling apply: picklable enums must be defined in
the top level of a module, since unpickling requires them to be importable
from that module.
Note:
With pickle protocol version 4 (introduced in Python 3.4) it is possible
to easily pickle enums nested in other classes.
Functional API
--------------
The ``Enum`` class is callable, providing the following functional API::
>>> Animal = Enum('Animal', 'ant bee cat dog')
>>> Animal
<enum 'Animal'>
>>> Animal.ant
<Animal.ant: 1>
>>> Animal.ant.value
1
>>> list(Animal)
[<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
The semantics of this API resemble ``namedtuple``. The first argument
of the call to ``Enum`` is the name of the enumeration.
The second argument is the *source* of enumeration member names. It can be a
whitespace-separated string of names, a sequence of names, a sequence of
2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
values. The last two options enable assigning arbitrary values to
enumerations; the others auto-assign increasing integers starting with 1. A
new class derived from ``Enum`` is returned. In other words, the above
assignment to ``Animal`` is equivalent to::
>>> class Animals(Enum):
... ant = 1
... bee = 2
... cat = 3
... dog = 4
Pickling enums created with the functional API can be tricky as frame stack
implementation details are used to try and figure out which module the
enumeration is being created in (e.g. it will fail if you use a utility
function in separate module, and also may not work on IronPython or Jython).
The solution is to specify the module name explicitly as follows::
>>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
Derived Enumerations
--------------------
IntEnum
^^^^^^^
A variation of ``Enum`` is provided which is also a subclass of
``int``. Members of an ``IntEnum`` can be compared to integers;
by extension, integer enumerations of different types can also be compared
to each other::
>>> from enum import IntEnum
>>> class Shape(IntEnum):
... circle = 1
... square = 2
...
>>> class Request(IntEnum):
... post = 1
... get = 2
...
>>> Shape == 1
False
>>> Shape.circle == 1
True
>>> Shape.circle == Request.post
True
However, they still can't be compared to standard ``Enum`` enumerations::
>>> class Shape(IntEnum):
... circle = 1
... square = 2
...
>>> class Color(Enum):
... red = 1
... green = 2
...
>>> Shape.circle == Color.red
False
``IntEnum`` values behave like integers in other ways you'd expect::
>>> int(Shape.circle)
1
>>> ['a', 'b', 'c'][Shape.circle]
'b'
>>> [i for i in range(Shape.square)]
[0, 1]
For the vast majority of code, ``Enum`` is strongly recommended,
since ``IntEnum`` breaks some semantic promises of an enumeration (by
being comparable to integers, and thus by transitivity to other
unrelated enumerations). It should be used only in special cases where
there's no other choice; for example, when integer constants are
replaced with enumerations and backwards compatibility is required with code
that still expects integers.
Others
^^^^^^
While ``IntEnum`` is part of the ``enum`` module, it would be very
simple to implement independently::
class IntEnum(int, Enum):
pass
This demonstrates how similar derived enumerations can be defined; for example
a ``StrEnum`` that mixes in ``str`` instead of ``int``.
Some rules:
1. When subclassing ``Enum``, mix-in types must appear before
``Enum`` itself in the sequence of bases, as in the ``IntEnum``
example above.
2. While ``Enum`` can have members of any type, once you mix in an
additional type, all the members must have values of that type, e.g.
``int`` above. This restriction does not apply to mix-ins which only
add methods and don't specify another data type such as ``int`` or
``str``.
3. When another data type is mixed in, the ``value`` attribute is *not the
same* as the enum member itself, although it is equivalant and will compare
equal.
4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
IntEnum) treat the enum member as its mixed-in type.
Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
or ``%u`` codes are used.
5. ``str.__format__`` (or ``format``) will use the mixed-in
type's ``__format__``. If the ``Enum``'s ``str`` or
``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
Decorators
----------
unique
^^^^^^
A ``class`` decorator specifically for enumerations. It searches an
enumeration's ``__members__`` gathering any aliases it finds; if any are
found ``ValueError`` is raised with the details::
>>> @unique
... class NoDupes(Enum):
... first = 'one'
... second = 'two'
... third = 'two'
Traceback (most recent call last):
...
ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
Interesting examples
--------------------
While ``Enum`` and ``IntEnum`` are expected to cover the majority of
use-cases, they cannot cover them all. Here are recipes for some different
types of enumerations that can be used directly, or as examples for creating
one's own.
AutoNumber
^^^^^^^^^^
Avoids having to specify the value for each enumeration member::
>>> class AutoNumber(Enum):
... def __new__(cls):
... value = len(cls.__members__) + 1
... obj = object.__new__(cls)
... obj._value_ = value
... return obj
...
>>> class Color(AutoNumber):
... __order__ = "red green blue" # only needed in 2.x
... red = ()
... green = ()
... blue = ()
...
>>> Color.green.value == 2
True
Note:
The `__new__` method, if defined, is used during creation of the Enum
members; it is then replaced by Enum's `__new__` which is used after
class creation for lookup of existing members. Due to the way Enums are
supposed to behave, there is no way to customize Enum's `__new__`.
UniqueEnum
^^^^^^^^^^
Raises an error if a duplicate member name is found instead of creating an
alias::
>>> class UniqueEnum(Enum):
... def __init__(self, *args):
... cls = self.__class__
... if any(self.value == e.value for e in cls):
... a = self.name
... e = cls(self.value).name
... raise ValueError(
... "aliases not allowed in UniqueEnum: %r --> %r"
... % (a, e))
...
>>> class Color(UniqueEnum):
... red = 1
... green = 2
... blue = 3
... grene = 2
Traceback (most recent call last):
...
ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
OrderedEnum
^^^^^^^^^^^
An ordered enumeration that is not based on ``IntEnum`` and so maintains
the normal ``Enum`` invariants (such as not being comparable to other
enumerations)::
>>> class OrderedEnum(Enum):
... def __ge__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ >= other._value_
... return NotImplemented
... def __gt__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ > other._value_
... return NotImplemented
... def __le__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ <= other._value_
... return NotImplemented
... def __lt__(self, other):
... if self.__class__ is other.__class__:
... return self._value_ < other._value_
... return NotImplemented
...
>>> class Grade(OrderedEnum):
... __ordered__ = 'A B C D F'
... A = 5
... B = 4
... C = 3
... D = 2
... F = 1
...
>>> Grade.C < Grade.A
True
Planet
^^^^^^
If ``__new__`` or ``__init__`` is defined the value of the enum member
will be passed to those methods::
>>> class Planet(Enum):
... MERCURY = (3.303e+23, 2.4397e6)
... VENUS = (4.869e+24, 6.0518e6)
... EARTH = (5.976e+24, 6.37814e6)
... MARS = (6.421e+23, 3.3972e6)
... JUPITER = (1.9e+27, 7.1492e7)
... SATURN = (5.688e+26, 6.0268e7)
... URANUS = (8.686e+25, 2.5559e7)
... NEPTUNE = (1.024e+26, 2.4746e7)
... def __init__(self, mass, radius):
... self.mass = mass # in kilograms
... self.radius = radius # in meters
... @property
... def surface_gravity(self):
... # universal gravitational constant (m3 kg-1 s-2)
... G = 6.67300E-11
... return G * self.mass / (self.radius * self.radius)
...
>>> Planet.EARTH.value
(5.976e+24, 6378140.0)
>>> Planet.EARTH.surface_gravity
9.802652743337129
How are Enums different?
------------------------
Enums have a custom metaclass that affects many aspects of both derived Enum
classes and their instances (members).
Enum Classes
^^^^^^^^^^^^
The ``EnumMeta`` metaclass is responsible for providing the
``__contains__``, ``__dir__``, ``__iter__`` and other methods that
allow one to do things with an ``Enum`` class that fail on a typical
class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
responsible for ensuring that various other methods on the final ``Enum``
class are correct (such as ``__new__``, ``__getnewargs__``,
``__str__`` and ``__repr__``)
Enum Members (aka instances)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The most interesting thing about Enum members is that they are singletons.
``EnumMeta`` creates them all while it is creating the ``Enum``
class itself, and then puts a custom ``__new__`` in place to ensure
that no new ones are ever instantiated by returning only the existing
member instances.
Finer Points
^^^^^^^^^^^^
Enum members are instances of an Enum class, and even though they are
accessible as ``EnumClass.member``, they are not accessible directly from
the member::
>>> Color.red
<Color.red: 1>
>>> Color.red.blue
Traceback (most recent call last):
...
AttributeError: 'Color' object has no attribute 'blue'
Likewise, ``__members__`` is only available on the class.
In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
as the ``OrderedDict`` type didn't exist yet.
If you give your ``Enum`` subclass extra methods, like the `Planet`_
class above, those methods will show up in a `dir` of the member,
but not of the class::
>>> dir(Planet)
['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
'VENUS', '__class__', '__doc__', '__members__', '__module__']
>>> dir(Planet.EARTH)
['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
A ``__new__`` method will only be used for the creation of the
``Enum`` members -- after that it is replaced. This means if you wish to
change how ``Enum`` members are looked up you either have to write a
helper function or a ``classmethod``.

View file

@ -1,790 +0,0 @@
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 0, 4
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_'
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,110 +0,0 @@
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()

View file

@ -1,786 +0,0 @@
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxsize
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxsize - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxsize - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.6"
__author__ = "Brian Clapper, bmc@clapper.org"
__url__ = "http://software.clapper.org/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [0] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[0] * total_rows]
return new_matrix
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print('(%d, %d) -> %d' % (r, c, x))
print('lowest cost=%d' % total_cost)
assert expected_total == total_cost

View file

@ -1,58 +0,0 @@
Mutagen
=======
Mutagen is a Python module to handle audio metadata. It supports ASF, FLAC,
M4A, Monkey's Audio, MP3, Musepack, Ogg Opus, Ogg FLAC, Ogg Speex, Ogg
Theora, Ogg Vorbis, True Audio, WavPack, OptimFROG, and AIFF audio files.
All versions of ID3v2 are supported, and all standard ID3v2.4 frames are
parsed. It can read Xing headers to accurately calculate the bitrate and
length of MP3s. ID3 and APEv2 tags can be edited regardless of audio
format. It can also manipulate Ogg streams on an individual packet/page
level.
Mutagen works on Python 2.6, 2.7, 3.3, 3.4 (CPython and PyPy) and has no
dependencies outside the Python standard library.
Installing
----------
$ ./setup.py build
$ su -c "./setup.py install"
Documentation
-------------
The primary documentation for Mutagen is the doc strings found in
the source code and the sphinx documentation in the docs/ directory.
To build the docs (needs sphinx):
$ ./setup.py build_sphinx
The tools/ directory contains several useful examples.
The docs are also hosted on readthedocs.org:
http://mutagen.readthedocs.org
Testing the Module
------------------
To test Mutagen's MP3 reading support, run
$ tools/mutagen-pony <your top-level MP3 directory here>
Mutagen will try to load all of them, and report any errors.
To look at the tags in files, run
$ tools/mutagen-inspect filename ...
To run our test suite,
$ ./setup.py test
Compatibility/Bugs
------------------
See docs/bugs.rst

View file

@ -1,41 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Mutagen aims to be an all purpose multimedia tagging library.
::
import mutagen.[format]
metadata = mutagen.[format].Open(filename)
`metadata` acts like a dictionary of tags in the file. Tags are generally a
list of string-like values, but may have additional methods available
depending on tag or format. They may also be entirely different objects
for certain keys, again depending on format.
"""
from mutagen._util import MutagenError
from mutagen._file import FileType, StreamInfo, File
from mutagen._tags import Metadata
version = (1, 27)
"""Version tuple."""
version_string = ".".join(map(str, version))
"""Version string."""
MutagenError
FileType
StreamInfo
File
Metadata

View file

@ -1,84 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import sys
PY2 = sys.version_info[0] == 2
PY3 = not PY2
if PY2:
from StringIO import StringIO
BytesIO = StringIO
from cStringIO import StringIO as cBytesIO
long_ = long
integer_types = (int, long)
string_types = (str, unicode)
text_type = unicode
xrange = xrange
cmp = cmp
chr_ = chr
def endswith(text, end):
return text.endswith(end)
iteritems = lambda d: d.iteritems()
itervalues = lambda d: d.itervalues()
iterkeys = lambda d: d.iterkeys()
iterbytes = lambda b: iter(b)
exec("def reraise(tp, value, tb):\n raise tp, value, tb")
def swap_to_string(cls):
if "__str__" in cls.__dict__:
cls.__unicode__ = cls.__str__
if "__bytes__" in cls.__dict__:
cls.__str__ = cls.__bytes__
return cls
elif PY3:
from io import StringIO
StringIO = StringIO
from io import BytesIO
cBytesIO = BytesIO
long_ = int
integer_types = (int,)
string_types = (str,)
text_type = str
xrange = range
cmp = lambda a, b: (a > b) - (a < b)
chr_ = lambda x: bytes([x])
def endswith(text, end):
# usefull for paths which can be both, str and bytes
if isinstance(text, str):
if not isinstance(end, str):
end = end.decode("ascii")
else:
if not isinstance(end, bytes):
end = end.encode("ascii")
return text.endswith(end)
iteritems = lambda d: iter(d.items())
itervalues = lambda d: iter(d.values())
iterkeys = lambda d: iter(d.keys())
iterbytes = lambda b: (bytes([v]) for v in b)
def reraise(tp, value, tb):
raise tp(value).with_traceback(tb)
def swap_to_string(cls):
return cls

View file

@ -1,199 +0,0 @@
# -*- coding: utf-8 -*-
"""Constants used by Mutagen."""
GENRES = [
u"Blues",
u"Classic Rock",
u"Country",
u"Dance",
u"Disco",
u"Funk",
u"Grunge",
u"Hip-Hop",
u"Jazz",
u"Metal",
u"New Age",
u"Oldies",
u"Other",
u"Pop",
u"R&B",
u"Rap",
u"Reggae",
u"Rock",
u"Techno",
u"Industrial",
u"Alternative",
u"Ska",
u"Death Metal",
u"Pranks",
u"Soundtrack",
u"Euro-Techno",
u"Ambient",
u"Trip-Hop",
u"Vocal",
u"Jazz+Funk",
u"Fusion",
u"Trance",
u"Classical",
u"Instrumental",
u"Acid",
u"House",
u"Game",
u"Sound Clip",
u"Gospel",
u"Noise",
u"Alt. Rock",
u"Bass",
u"Soul",
u"Punk",
u"Space",
u"Meditative",
u"Instrumental Pop",
u"Instrumental Rock",
u"Ethnic",
u"Gothic",
u"Darkwave",
u"Techno-Industrial",
u"Electronic",
u"Pop-Folk",
u"Eurodance",
u"Dream",
u"Southern Rock",
u"Comedy",
u"Cult",
u"Gangsta Rap",
u"Top 40",
u"Christian Rap",
u"Pop/Funk",
u"Jungle",
u"Native American",
u"Cabaret",
u"New Wave",
u"Psychedelic",
u"Rave",
u"Showtunes",
u"Trailer",
u"Lo-Fi",
u"Tribal",
u"Acid Punk",
u"Acid Jazz",
u"Polka",
u"Retro",
u"Musical",
u"Rock & Roll",
u"Hard Rock",
u"Folk",
u"Folk-Rock",
u"National Folk",
u"Swing",
u"Fast-Fusion",
u"Bebop",
u"Latin",
u"Revival",
u"Celtic",
u"Bluegrass",
u"Avantgarde",
u"Gothic Rock",
u"Progressive Rock",
u"Psychedelic Rock",
u"Symphonic Rock",
u"Slow Rock",
u"Big Band",
u"Chorus",
u"Easy Listening",
u"Acoustic",
u"Humour",
u"Speech",
u"Chanson",
u"Opera",
u"Chamber Music",
u"Sonata",
u"Symphony",
u"Booty Bass",
u"Primus",
u"Porn Groove",
u"Satire",
u"Slow Jam",
u"Club",
u"Tango",
u"Samba",
u"Folklore",
u"Ballad",
u"Power Ballad",
u"Rhythmic Soul",
u"Freestyle",
u"Duet",
u"Punk Rock",
u"Drum Solo",
u"A Cappella",
u"Euro-House",
u"Dance Hall",
u"Goa",
u"Drum & Bass",
u"Club-House",
u"Hardcore",
u"Terror",
u"Indie",
u"BritPop",
u"Afro-Punk",
u"Polsk Punk",
u"Beat",
u"Christian Gangsta Rap",
u"Heavy Metal",
u"Black Metal",
u"Crossover",
u"Contemporary Christian",
u"Christian Rock",
u"Merengue",
u"Salsa",
u"Thrash Metal",
u"Anime",
u"JPop",
u"Synthpop",
u"Abstract",
u"Art Rock",
u"Baroque",
u"Bhangra",
u"Big Beat",
u"Breakbeat",
u"Chillout",
u"Downtempo",
u"Dub",
u"EBM",
u"Eclectic",
u"Electro",
u"Electroclash",
u"Emo",
u"Experimental",
u"Garage",
u"Global",
u"IDM",
u"Illbient",
u"Industro-Goth",
u"Jam Band",
u"Krautrock",
u"Leftfield",
u"Lounge",
u"Math Rock",
u"New Romantic",
u"Nu-Breakz",
u"Post-Punk",
u"Post-Rock",
u"Psytrance",
u"Shoegaze",
u"Space Rock",
u"Trop Rock",
u"World Music",
u"Neoclassical",
u"Audiobook",
u"Audio Theatre",
u"Neue Deutsche Welle",
u"Podcast",
u"Indie Rock",
u"G-Funk",
u"Dubstep",
u"Garage Rock",
u"Psybient",
]
"""The ID3v1 genre list."""

View file

@ -1,237 +0,0 @@
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import warnings
from mutagen._util import DictMixin
class FileType(DictMixin):
"""An abstract object wrapping tags and audio stream information.
Attributes:
* info -- stream information (length, bitrate, sample rate)
* tags -- metadata tags, if any
Each file format has different potential tags and stream
information.
FileTypes implement an interface very similar to Metadata; the
dict interface, save, load, and delete calls on a FileType call
the appropriate methods on its tag data.
"""
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, filename=None, *args, **kwargs):
if filename is None:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(filename, *args, **kwargs)
def load(self, filename, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
"""Look up a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
"""Delete a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
"""Return a list of keys in the metadata tag.
If the file has no tags at all, an empty list is returned.
"""
if self.tags is None:
return []
else:
return self.tags.keys()
def delete(self, filename=None):
"""Remove tags from a file."""
if self.tags is not None:
if filename is None:
filename = self.filename
else:
warnings.warn(
"delete(filename=...) is deprecated, reload the file",
DeprecationWarning)
return self.tags.delete(filename)
def save(self, filename=None, **kwargs):
"""Save metadata tags."""
if filename is None:
filename = self.filename
else:
warnings.warn(
"save(filename=...) is deprecated, reload the file",
DeprecationWarning)
if self.tags is not None:
return self.tags.save(filename, **kwargs)
else:
raise ValueError("no tags in file")
def pprint(self):
"""Print stream information and comment key=value pairs."""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
"""Adds new tags to the file.
Raises if tags already exist.
"""
raise NotImplementedError
@property
def mime(self):
"""A list of mime types"""
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
raise NotImplementedError
class StreamInfo(object):
"""Abstract stream information object.
Provides attributes for length, bitrate, sample rate etc.
See the implementations for details.
"""
__module__ = "mutagen"
def pprint(self):
"""Print stream information"""
raise NotImplementedError
def File(filename, options=None, easy=False):
"""Guess the type of the file and try to open it.
The file type is decided by several things, such as the first 128
bytes (which usually contains a file type identifier), the
filename extension, and the presence of existing tags.
If no appropriate type could be found, None is returned.
:param options: Sequence of :class:`FileType` implementations, defaults to
all included ones.
:param easy: If the easy wrappers should be returnd if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead
of :class:`MP3 <mp3.MP3>`.
"""
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC]
if not options:
return None
fileobj = open(filename, "rb")
try:
header = fileobj.read(128)
# Sort by name after score. Otherwise import order affects
# Kind sort order, which affects treatment of things with
# equals scores.
results = [(Kind.score(filename, fileobj, header), Kind.__name__)
for Kind in options]
finally:
fileobj.close()
results = list(zip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
return Kind(filename)
else:
return None

View file

@ -1,31 +0,0 @@
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
class Metadata(object):
"""An abstract dict-like object.
Metadata is the base class for many of the tag objects in Mutagen.
"""
__module__ = "mutagen"
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
def load(self, *args, **kwargs):
raise NotImplementedError
def save(self, filename=None):
"""Save changes to a file."""
raise NotImplementedError
def delete(self, filename=None):
"""Remove tags from a file."""
raise NotImplementedError

View file

@ -1,603 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Utility classes for Mutagen.
You should not rely on the interfaces here being stable. They are
intended for internal use in Mutagen only.
"""
import struct
import codecs
from fnmatch import fnmatchcase
from ._compat import chr_, text_type, PY2, iteritems, iterbytes, \
integer_types, xrange
class MutagenError(Exception):
"""Base class for all custom exceptions in mutagen
.. versionadded:: 1.25
"""
def total_ordering(cls):
assert "__eq__" in cls.__dict__
assert "__lt__" in cls.__dict__
cls.__le__ = lambda self, other: self == other or self < other
cls.__gt__ = lambda self, other: not (self == other or self < other)
cls.__ge__ = lambda self, other: not self < other
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def hashable(cls):
"""Makes sure the class is hashable.
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
# py2
assert "__hash__" in cls.__dict__
# py3
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def enum(cls):
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def repr_(self):
if self in map_:
return "%s.%s" % (type(self).__name__, map_[self])
else:
return "%s(%s)" % (type(self).__name__, self)
setattr(new_type, "__repr__", repr_)
return new_type
@total_ordering
class DictMixin(object):
"""Implement the dict API using keys() and __*item__ methods.
Similar to UserDict.DictMixin, this takes a class that defines
__getitem__, __setitem__, __delitem__, and keys(), and turns it
into a full dict-like object.
UserDict.DictMixin is not suitable for this purpose because it's
an old-style class.
This class is not optimized for very large dictionaries; many
functions have linear memory requirements. I recommend you
override some of these functions if speed is required.
"""
def __iter__(self):
return iter(self.keys())
def __has_key(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
if PY2:
has_key = __has_key
__contains__ = __has_key
if PY2:
iterkeys = lambda self: iter(self.keys())
def values(self):
return [self[k] for k in self.keys()]
if PY2:
itervalues = lambda self: iter(self.values())
def items(self):
return list(zip(self.keys(), self.values()))
if PY2:
iteritems = lambda s: iter(s.items())
def clear(self):
for key in list(self.keys()):
self.__delitem__(key)
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop takes at most two arguments")
try:
value = self[key]
except KeyError:
if args:
return args[0]
else:
raise
del(self[key])
return value
def popitem(self):
for key in self.keys():
break
else:
raise KeyError("dictionary is empty")
return key, self.pop(key)
def update(self, other=None, **kwargs):
if other is None:
self.update(kwargs)
other = {}
try:
for key, value in other.items():
self.__setitem__(key, value)
except AttributeError:
for key, value in other:
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.items()))
def __eq__(self, other):
return dict(self.items()) == other
def __lt__(self, other):
return dict(self.items()) < other
__hash__ = object.__hash__
def __len__(self):
return len(self.keys())
class DictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__dict = {}
super(DictProxy, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict[key]
def __setitem__(self, key, value):
self.__dict[key] = value
def __delitem__(self, key):
del(self.__dict[key])
def keys(self):
return self.__dict.keys()
def _fill_cdata(cls):
"""Add struct pack/unpack functions"""
funcs = {}
for key, name in [("b", "char"), ("h", "short"),
("i", "int"), ("q", "longlong")]:
for echar, esuffix in [("<", "le"), (">", "be")]:
esuffix = "_" + esuffix
for unsigned in [True, False]:
s = struct.Struct(echar + (key.upper() if unsigned else key))
get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]
unpack = get_wrapper(s.unpack)
unpack_from = get_wrapper(s.unpack_from)
def get_unpack_from(s):
def unpack_from(data, offset=0):
return s.unpack_from(data, offset)[0], offset + s.size
return unpack_from
unpack_from = get_unpack_from(s)
pack = s.pack
prefix = "u" if unsigned else ""
if s.size == 1:
esuffix = ""
bits = str(s.size * 8)
funcs["%s%s%s" % (prefix, name, esuffix)] = unpack
funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack
funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from
funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from
funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack
funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack
for key, func in iteritems(funcs):
setattr(cls, key, staticmethod(func))
class cdata(object):
"""C character buffer to Python numeric type conversions.
For each size/sign/endianness:
uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0)
"""
from struct import error
error = error
bitswap = b''.join(
chr_(sum(((val >> i) & 1) << (7 - i) for i in range(8)))
for val in range(256))
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
_fill_cdata(cdata)
def lock(fileobj):
"""Lock a file object 'safely'.
That means a failure to lock because the platform doesn't
support fcntl or filesystem locks is not considered a
failure. This call does block.
Returns whether or not the lock was successful, or
raises an exception in more extreme circumstances (full
lock table, invalid file).
"""
try:
import fcntl
except ImportError:
return False
else:
try:
fcntl.lockf(fileobj, fcntl.LOCK_EX)
except IOError:
# FIXME: There's possibly a lot of complicated
# logic that needs to go here in case the IOError
# is EACCES or EAGAIN.
return False
else:
return True
def unlock(fileobj):
"""Unlock a file object.
Don't call this on a file object unless a call to lock()
returned true.
"""
# If this fails there's a mismatched lock/unlock pair,
# so we definitely don't want to ignore errors.
import fcntl
fcntl.lockf(fileobj, fcntl.LOCK_UN)
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
assert 0 < size
assert 0 <= offset
locked = False
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
fobj.write(b'\x00' * size)
fobj.flush()
try:
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize + size)
try:
file_map.move(offset + size, offset, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
# handle broken mmap scenarios
locked = lock(fobj)
fobj.truncate(filesize)
fobj.seek(0, 2)
padsize = size
# Don't generate an enormous string if we need to pad
# the file out several megs.
while padsize:
addsize = min(BUFFER_SIZE, padsize)
fobj.write(b"\x00" * addsize)
padsize -= addsize
fobj.seek(filesize, 0)
while movesize:
# At the start of this loop, fobj is pointing at the end
# of the data we need to move, which is of movesize length.
thismove = min(BUFFER_SIZE, movesize)
# Seek back however much we're going to read this frame.
fobj.seek(-thismove, 1)
nextpos = fobj.tell()
# Read it, so we're back at the end.
data = fobj.read(thismove)
# Seek back to where we need to write it.
fobj.seek(-thismove + size, 1)
# Write it.
fobj.write(data)
# And seek back to the end of the unmoved data.
fobj.seek(nextpos)
movesize -= thismove
fobj.flush()
finally:
if locked:
unlock(fobj)
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
locked = False
assert 0 < size
assert 0 <= offset
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset - size
assert 0 <= movesize
try:
if movesize > 0:
fobj.flush()
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize)
try:
file_map.move(offset, offset + size, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
# handle broken mmap scenarios
locked = lock(fobj)
fobj.seek(offset + size)
buf = fobj.read(BUFFER_SIZE)
while buf:
fobj.seek(offset)
fobj.write(buf)
offset += len(buf)
fobj.seek(offset + size)
buf = fobj.read(BUFFER_SIZE)
fobj.truncate(filesize - size)
fobj.flush()
finally:
if locked:
unlock(fobj)
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
In case the data can't be decoded raises UnicodeError.
In case the encoding is not found raises LookupError.
In case the data isn't null terminated (even if it is encoded correctly)
raises ValueError except if strict is False, then the decoded string
will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b""
def split_escape(string, sep, maxsplit=None, escape_char="\\"):
"""Like unicode/str/bytes.split but allows for the separator to be escaped
If passed unicode/str/bytes will only return list of unicode/str/bytes.
"""
assert len(sep) == 1
assert len(escape_char) == 1
if isinstance(string, bytes):
if isinstance(escape_char, text_type):
escape_char = escape_char.encode("ascii")
iter_ = iterbytes
else:
iter_ = iter
if maxsplit is None:
maxsplit = len(string)
empty = string[:0]
result = []
current = empty
escaped = False
for char in iter_(string):
if escaped:
if char != escape_char and char != sep:
current += escape_char
current += char
escaped = False
else:
if char == escape_char:
escaped = True
elif char == sep and len(result) < maxsplit:
result.append(current)
current = empty
else:
current += char
result.append(current)
return result
class BitReaderError(Exception):
pass
class BitReader(object):
def __init__(self, fileobj):
self._fileobj = fileobj
self._buffer = 0
self._bits = 0
self._pos = fileobj.tell()
def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in xrange(count)))
def skip(self, count):
"""Skip `count` bits.
Might raise BitReaderError if there wasn't enough data to skip,
but might also fail on the next bits() instead.
"""
if count < 0:
raise ValueError
if count <= self._bits:
self.bits(count)
else:
count -= self.align()
n_bytes = count // 8
self._fileobj.seek(n_bytes, 1)
count -= n_bytes * 8
self.bits(count)
def get_position(self):
"""Returns the amount of bits read or skipped so far"""
return (self._fileobj.tell() - self._pos) * 8 - self._bits
def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits
def is_aligned(self):
"""If we are currently aligned to bytes and nothing is buffered"""
return self._bits == 0

View file

@ -1,327 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Read and write Vorbis comment data.
Vorbis comments are freeform key/value pairs; keys are
case-insensitive ASCII and values are Unicode strings. A key may have
multiple values.
The specification is at http://www.xiph.org/vorbis/doc/v-comment.html.
"""
import sys
import mutagen
from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2
from mutagen._util import DictMixin, cdata
def is_valid_key(key):
"""Return true if a string is a valid Vorbis comment key.
Valid Vorbis comment keys are printable ASCII between 0x20 (space)
and 0x7D ('}'), excluding '='.
Takes str/unicode in Python 2, unicode in Python 3
"""
if PY3 and isinstance(key, bytes):
raise TypeError("needs to be str not bytes")
for c in key:
if c < " " or c > "}" or c == "=":
return False
else:
return bool(key)
istag = is_valid_key
class error(IOError):
pass
class VorbisUnsetFrameError(error):
pass
class VorbisEncodingError(error):
pass
class VComment(mutagen.Metadata, list):
"""A Vorbis comment parser, accessor, and renderer.
All comment ordering is preserved. A VComment is a list of
key/value pairs, and so any Python list method can be used on it.
Vorbis comments are always wrapped in something like an Ogg Vorbis
bitstream or a FLAC metadata block, so this loads string data or a
file-like object, not a filename.
Attributes:
* vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen'
"""
vendor = u"Mutagen " + mutagen.version_string
def __init__(self, data=None, *args, **kwargs):
# Collect the args to pass to load, this lets child classes
# override just load and get equivalent magic for the
# constructor.
if data is not None:
if isinstance(data, bytes):
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError("VComment requires bytes or a file-like")
self.load(data, *args, **kwargs)
def load(self, fileobj, errors='replace', framing=True):
"""Parse a Vorbis comment from a file-like object.
Keyword arguments:
* errors:
'strict', 'replace', or 'ignore'. This affects Unicode decoding
and how other malformed content is interpreted.
* framing -- if true, fail if a framing bit is not present
Framing bits are required by the Vorbis comment specification,
but are not used in FLAC Vorbis comment blocks.
"""
try:
vendor_length = cdata.uint_le(fileobj.read(4))
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
count = cdata.uint_le(fileobj.read(4))
for i in xrange(count):
length = cdata.uint_le(fileobj.read(4))
try:
string = fileobj.read(length).decode('utf-8', errors)
except (OverflowError, MemoryError):
raise error("cannot read %d bytes, too large" % length)
try:
tag, value = string.split('=', 1)
except ValueError as err:
if errors == "ignore":
continue
elif errors == "replace":
tag, value = u"unknown%d" % i, string
else:
reraise(VorbisEncodingError, err, sys.exc_info()[2])
try:
tag = tag.encode('ascii', errors)
except UnicodeEncodeError:
raise VorbisEncodingError("invalid tag name %r" % tag)
else:
# string keys in py3k
if PY3:
tag = tag.decode("ascii")
if is_valid_key(tag):
self.append((tag, value))
if framing and not bytearray(fileobj.read(1))[0] & 0x01:
raise VorbisUnsetFrameError("framing bit was unset")
except (cdata.error, TypeError):
raise error("file is not a valid Vorbis comment")
def validate(self):
"""Validate keys and values.
Check to make sure every key used is a valid Vorbis key, and
that every value used is a valid Unicode or UTF-8 string. If
any invalid keys or values are found, a ValueError is raised.
In Python 3 all keys and values have to be a string.
"""
if not isinstance(self.vendor, text_type):
if PY3:
raise ValueError("vendor needs to be str")
try:
self.vendor.decode('utf-8')
except UnicodeDecodeError:
raise ValueError
for key, value in self:
try:
if not is_valid_key(key):
raise ValueError
except TypeError:
raise ValueError("%r is not a valid key" % key)
if not isinstance(value, text_type):
if PY3:
raise ValueError("%r needs to be str" % key)
try:
value.decode("utf-8")
except:
raise ValueError("%r is not a valid value" % value)
return True
def clear(self):
"""Clear all keys from the comment."""
for i in list(self):
self.remove(i)
def write(self, framing=True):
"""Return a string representation of the data.
Validation is always performed, so calling this function on
invalid data may raise a ValueError.
Keyword arguments:
* framing -- if true, append a framing bit (see load)
"""
self.validate()
def _encode(value):
if not isinstance(value, bytes):
return value.encode('utf-8')
return value
f = BytesIO()
vendor = _encode(self.vendor)
f.write(cdata.to_uint_le(len(vendor)))
f.write(vendor)
f.write(cdata.to_uint_le(len(self)))
for tag, value in self:
tag = _encode(tag)
value = _encode(value)
comment = tag + b"=" + value
f.write(cdata.to_uint_le(len(comment)))
f.write(comment)
if framing:
f.write(b"\x01")
return f.getvalue()
def pprint(self):
def _decode(value):
if not isinstance(value, text_type):
return value.decode('utf-8', 'replace')
return value
tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self]
return u"\n".join(tags)
class VCommentDict(VComment, DictMixin):
"""A VComment that looks like a dictionary.
This object differs from a dictionary in two ways. First,
len(comment) will still return the number of values, not the
number of keys. Secondly, iterating through the object will
iterate over (key, value) pairs, not keys. Since a key may have
multiple values, the same value may appear multiple times while
iterating.
Since Vorbis comment keys are case-insensitive, all keys are
normalized to lowercase ASCII.
"""
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
# PY3 only
if isinstance(key, slice):
return VComment.__getitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
values = [value for (k, value) in self if k.lower() == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
# PY3 only
if isinstance(key, slice):
return VComment.__delitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
to_delete = [x for x in self if x[0].lower() == key]
if not to_delete:
raise KeyError(key)
else:
for item in to_delete:
self.remove(item)
def __contains__(self, key):
"""Return true if the key has any values."""
if not is_valid_key(key):
raise ValueError
key = key.lower()
for k, value in self:
if k.lower() == key:
return True
else:
return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
# PY3 only
if isinstance(key, slice):
return VComment.__setitem__(self, key, values)
if not is_valid_key(key):
raise ValueError
if not isinstance(values, list):
values = [values]
try:
del(self[key])
except KeyError:
pass
if PY2:
key = key.encode('ascii')
for value in values:
self.append((key, value))
def keys(self):
"""Return all keys in the comment."""
return list(set([k.lower() for k, v in self]))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
return dict([(key, self[key]) for key in self.keys()])

View file

@ -1,407 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""
* ADTS - Audio Data Transport Stream
* ADIF - Audio Data Interchange Format
* See ISO/IEC 13818-7 / 14496-03
"""
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import BitReader, BitReaderError, MutagenError
from mutagen._compat import endswith, xrange
_FREQS = [
96000, 88200, 64000, 48000,
44100, 32000, 24000, 22050,
16000, 12000, 11025, 8000,
7350,
]
class _ADTSStream(object):
"""Represents a series of frames belonging to the same stream"""
parsed_frames = 0
"""Number of successfully parsed frames"""
offset = 0
"""offset in bytes at which the stream starts (the first sync word)"""
@classmethod
def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
"""
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream
def sync(self, max_bytes):
"""Find the next sync.
Returns True if found."""
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False
def __init__(self, r):
"""Use _ADTSStream.find_stream to create a stream"""
self._fixed_header_key = None
self._r = r
self.offset = -1
self.parsed_frames = 0
self._samples = 0
self._payload = 0
self._start = r.get_position() / 8
self._last = self._start
@property
def bitrate(self):
"""Bitrate of the raw aac blocks, excluding framing/crc"""
assert self.parsed_frames, "no frame parsed yet"
if self._samples == 0:
return 0
return (8 * self._payload * self.frequency) // self._samples
@property
def samples(self):
"""samples so far"""
assert self.parsed_frames, "no frame parsed yet"
return self._samples
@property
def size(self):
"""bytes read in the stream so far (including framing)"""
assert self.parsed_frames, "no frame parsed yet"
return self._last - self._start
@property
def channels(self):
"""0 means unknown"""
assert self.parsed_frames, "no frame parsed yet"
b_index = self._fixed_header_key[6]
if b_index == 7:
return 8
elif b_index > 7:
return 0
else:
return b_index
@property
def frequency(self):
"""0 means unknown"""
assert self.parsed_frames, "no frame parsed yet"
f_index = self._fixed_header_key[4]
try:
return _FREQS[f_index]
except IndexError:
return 0
def parse_frame(self):
"""True if parsing was successful.
Fails either because the frame wasn't valid or the stream ended.
"""
try:
return self._parse_frame()
except BitReaderError:
return False
def _parse_frame(self):
r = self._r
# start == position of sync word
start = r.get_position() - 12
# adts_fixed_header
id_ = r.bits(1)
layer = r.bits(2)
protection_absent = r.bits(1)
profile = r.bits(2)
sampling_frequency_index = r.bits(4)
private_bit = r.bits(1)
# TODO: if 0 we could parse program_config_element()
channel_configuration = r.bits(3)
original_copy = r.bits(1)
home = r.bits(1)
# the fixed header has to be the same for every frame in the stream
fixed_header_key = (
id_, layer, protection_absent, profile, sampling_frequency_index,
private_bit, channel_configuration, original_copy, home,
)
if self._fixed_header_key is None:
self._fixed_header_key = fixed_header_key
else:
if self._fixed_header_key != fixed_header_key:
return False
# adts_variable_header
r.skip(2) # copyright_identification_bit/start
frame_length = r.bits(13)
r.skip(11) # adts_buffer_fullness
nordbif = r.bits(2)
# adts_variable_header end
crc_overhead = 0
if not protection_absent:
crc_overhead += (nordbif + 1) * 16
if nordbif != 0:
crc_overhead *= 2
left = (frame_length * 8) - (r.get_position() - start)
if left < 0:
return False
r.skip(left)
assert r.is_aligned()
self._payload += (left - crc_overhead) / 8
self._samples += (nordbif + 1) * 1024
self._last = r.get_position() / 8
self.parsed_frames += 1
return True
class ProgramConfigElement(object):
element_instance_tag = None
object_type = None
sampling_frequency_index = None
channels = None
def __init__(self, r):
"""Reads the program_config_element()
Raises BitReaderError
"""
self.element_instance_tag = r.bits(4)
self.object_type = r.bits(2)
self.sampling_frequency_index = r.bits(4)
num_front_channel_elements = r.bits(4)
num_side_channel_elements = r.bits(4)
num_back_channel_elements = r.bits(4)
num_lfe_channel_elements = r.bits(2)
num_assoc_data_elements = r.bits(3)
num_valid_cc_elements = r.bits(4)
mono_mixdown_present = r.bits(1)
if mono_mixdown_present == 1:
r.skip(4)
stereo_mixdown_present = r.bits(1)
if stereo_mixdown_present == 1:
r.skip(4)
matrix_mixdown_idx_present = r.bits(1)
if matrix_mixdown_idx_present == 1:
r.skip(3)
elms = num_front_channel_elements + num_side_channel_elements + \
num_back_channel_elements
channels = 0
for i in xrange(elms):
channels += 1
element_is_cpe = r.bits(1)
if element_is_cpe:
channels += 1
r.skip(4)
channels += num_lfe_channel_elements
self.channels = channels
r.skip(4 * num_lfe_channel_elements)
r.skip(4 * num_assoc_data_elements)
r.skip(5 * num_valid_cc_elements)
r.align()
comment_field_bytes = r.bits(8)
r.skip(8 * comment_field_bytes)
class AACError(MutagenError):
pass
class AACInfo(StreamInfo):
"""AAC stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bitrate -- audio bitrate, in bits per second
The length of the stream is just a guess and might not be correct.
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
def __init__(self, fileobj):
# skip id3v2 header
start_offset = 0
header = fileobj.read(10)
from mutagen.id3 import BitPaddedInt
if header.startswith(b"ID3"):
size = BitPaddedInt(header[6:])
start_offset = size + 10
fileobj.seek(start_offset)
adif = fileobj.read(4)
if adif == b"ADIF":
self._parse_adif(fileobj)
self._type = "ADIF"
else:
self._parse_adts(fileobj, start_offset)
self._type = "ADTS"
def _parse_adif(self, fileobj):
r = BitReader(fileobj)
try:
copyright_id_present = r.bits(1)
if copyright_id_present:
r.skip(72) # copyright_id
r.skip(1 + 1) # original_copy, home
bitstream_type = r.bits(1)
self.bitrate = r.bits(23)
npce = r.bits(4)
if bitstream_type == 0:
r.skip(20) # adif_buffer_fullness
pce = ProgramConfigElement(r)
try:
self.sample_rate = _FREQS[pce.sampling_frequency_index]
except IndexError:
pass
self.channels = pce.channels
# other pces..
for i in xrange(npce):
ProgramConfigElement(r)
r.align()
except BitReaderError as e:
raise AACError(e)
# use bitrate + data size to guess length
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
if self.bitrate != 0:
self.length = (8.0 * length) / self.bitrate
def _parse_adts(self, fileobj, start_offset):
max_initial_read = 512
max_resync_read = 10
max_sync_tries = 10
frames_max = 100
frames_needed = 3
# Try up to X times to find a sync word and read up to Y frames.
# If more than Z frames are valid we assume a valid stream
offset = start_offset
for i in xrange(max_sync_tries):
fileobj.seek(offset)
s = _ADTSStream.find_stream(fileobj, max_initial_read)
if s is None:
raise AACError("sync not found")
# start right after the last found offset
offset += s.offset + 1
for i in xrange(frames_max):
if not s.parse_frame():
break
if not s.sync(max_resync_read):
break
if s.parsed_frames >= frames_needed:
break
else:
raise AACError(
"no valid stream found (only %d frames)" % s.parsed_frames)
self.sample_rate = s.frequency
self.channels = s.channels
self.bitrate = s.bitrate
# size from stream start to end of file
fileobj.seek(0, 2)
stream_size = fileobj.tell() - (offset + s.offset)
# approx
self.length = float(s.samples * stream_size) / (s.size * s.frequency)
def pprint(self):
return "AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self._type, self.sample_rate, self.length, self.channels,
self.bitrate)
class AAC(FileType):
"""Load ADTS or ADIF streams containing AAC.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
"""
_mimes = ["audio/x-aac"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as h:
self.info = AACInfo(h)
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
s = endswith(filename, ".aac") or endswith(filename, ".adts") or \
endswith(filename, ".adif")
s += b"ADIF" in header
return s
Open = AAC
error = AACError
__all__ = ["AAC", "Open"]

View file

@ -1,362 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""AIFF audio stream information and tags."""
# NOTE from Ben Ockmore - according to the Py3k migration guidelines, AIFF
# chunk keys should be unicode in Py3k, and unicode or bytes in Py2k (ASCII).
# To make this easier, chunk keys should be stored internally as unicode.
import struct
from struct import pack
from ._compat import endswith, text_type, PY3
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen.id3._util import error as ID3Error
from mutagen._util import insert_bytes, delete_bytes, MutagenError
__all__ = ["AIFF", "Open", "delete"]
class error(MutagenError, RuntimeError):
pass
class InvalidChunk(error, IOError):
pass
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def is_valid_chunk_id(id):
if not isinstance(id, text_type):
if PY3:
raise TypeError("AIFF chunk must be unicode")
try:
id = id.decode('ascii')
except UnicodeDecodeError:
return False
return ((len(id) <= 4) and (min(id) >= u' ') and
(max(id) <= u'~'))
def read_float(data): # 10 bytes
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class IFFChunk(object):
"""Representation of a single IFF chunk"""
# Chunk headers are 8 bytes long (4 for ID and 4 for the size)
HEADER_SIZE = 8
def __init__(self, fileobj, parent_chunk=None):
self.__fileobj = fileobj
self.parent_chunk = parent_chunk
self.offset = fileobj.tell()
header = fileobj.read(self.HEADER_SIZE)
if len(header) < self.HEADER_SIZE:
raise InvalidChunk()
self.id, self.data_size = struct.unpack('>4si', header)
if not isinstance(self.id, text_type):
self.id = self.id.decode('ascii')
if not is_valid_chunk_id(self.id):
raise InvalidChunk()
self.size = self.HEADER_SIZE + self.data_size
self.data_offset = fileobj.tell()
self.data = None
def read(self):
"""Read the chunks data"""
self.__fileobj.seek(self.data_offset)
self.data = self.__fileobj.read(self.data_size)
def delete(self):
"""Removes the chunk from the file"""
delete_bytes(self.__fileobj, self.size, self.offset)
if self.parent_chunk is not None:
self.parent_chunk.resize(self.parent_chunk.data_size - self.size)
def resize(self, data_size):
"""Update the size of the chunk"""
self.__fileobj.seek(self.offset + 4)
self.__fileobj.write(pack('>I', data_size))
if self.parent_chunk is not None:
size_diff = self.data_size - data_size
self.parent_chunk.resize(self.parent_chunk.data_size - size_diff)
self.data_size = data_size
self.size = data_size + self.HEADER_SIZE
class IFFFile(object):
"""Representation of a IFF file"""
def __init__(self, fileobj):
self.__fileobj = fileobj
self.__chunks = {}
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
fileobj.seek(0)
self.__chunks[u'FORM'] = IFFChunk(fileobj)
# Skip past the 4 byte FORM id
fileobj.seek(IFFChunk.HEADER_SIZE + 4)
# Where the next chunk can be located. We need to keep track of this
# since the size indicated in the FORM header may not match up with the
# offset determined from the size of the last chunk in the file
self.__next_offset = fileobj.tell()
# Load all of the chunks
while True:
try:
chunk = IFFChunk(fileobj, self[u'FORM'])
except InvalidChunk:
break
self.__chunks[chunk.id.strip()] = chunk
# Calculate the location of the next chunk,
# considering the pad byte
self.__next_offset = chunk.offset + chunk.size
self.__next_offset += self.__next_offset % 2
fileobj.seek(self.__next_offset)
def __contains__(self, id_):
"""Check if the IFF file contains a specific chunk"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
return id_ in self.__chunks
def __getitem__(self, id_):
"""Get a chunk from the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
try:
return self.__chunks[id_]
except KeyError:
raise KeyError(
"%r has no %r chunk" % (self.__fileobj.name, id_))
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
self.__chunks.pop(id_).delete()
def insert_chunk(self, id_):
"""Insert a new chunk at the end of the IFF file"""
if not isinstance(id_, text_type):
id_ = id_.decode('ascii')
if not is_valid_chunk_id(id_):
raise KeyError("AIFF key must be four ASCII characters.")
self.__fileobj.seek(self.__next_offset)
self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0))
self.__fileobj.seek(self.__next_offset)
chunk = IFFChunk(self.__fileobj, self[u'FORM'])
self[u'FORM'].resize(self[u'FORM'].data_size + chunk.size)
self.__chunks[id_] = chunk
self.__next_offset = chunk.offset + chunk.size
class AIFFInfo(StreamInfo):
"""AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Useful attributes:
* length -- audio length, in seconds
* bitrate -- audio bitrate, in bits per second
* channels -- The number of audio channels
* sample_rate -- audio sample rate, in Hz
* sample_size -- The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
def __init__(self, fileobj):
iff = IFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
raise error(str(e))
common_chunk.read()
info = struct.unpack('>hLh10s', common_chunk.data[:18])
channels, frame_count, sample_size, sample_rate = info
self.sample_rate = int(read_float(sample_rate))
self.sample_size = sample_size
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
self.length = frame_count / float(self.sample_rate)
def pprint(self):
return "%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(ID3):
"""A AIFF file with ID3v2 tags"""
def _load_header(self):
try:
self._fileobj.seek(IFFFile(self._fileobj)[u'ID3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3Error()
super(_IFFID3, self)._load_header()
def save(self, filename=None, v2_version=4, v23_sep='/'):
"""Save ID3v2 data to the AIFF file"""
framedata = self._prepare_framedata(v2_version, v23_sep)
framesize = len(framedata)
if filename is None:
filename = self.filename
# Unlike the parent ID3.save method, we won't save to a blank file
# since we would have to construct a empty AIFF file
fileobj = open(filename, 'rb+')
iff_file = IFFFile(fileobj)
try:
if u'ID3' not in iff_file:
iff_file.insert_chunk(u'ID3')
chunk = iff_file[u'ID3']
fileobj.seek(chunk.data_offset)
header = fileobj.read(10)
header = self._prepare_id3_header(header, framesize, v2_version)
header, new_size, _ = header
data = header + framedata + (b'\x00' * (new_size - framesize))
# Include ID3 header size in 'new_size' calculation
new_size += 10
# Expand the chunk if necessary, including pad byte
if new_size > chunk.size:
insert_at = chunk.offset + chunk.size
insert_size = new_size - chunk.size + new_size % 2
insert_bytes(fileobj, insert_size, insert_at)
chunk.resize(new_size)
fileobj.seek(chunk.data_offset)
fileobj.write(data)
finally:
fileobj.close()
def delete(self, filename=None):
"""Completely removes the ID3 chunk from the AIFF file"""
if filename is None:
filename = self.filename
delete(filename)
self.clear()
def delete(filename):
"""Completely removes the ID3 chunk from the AIFF file"""
with open(filename, "rb+") as file_:
try:
del IFFFile(file_)[u'ID3']
except KeyError:
pass
class AIFF(FileType):
"""An AIFF audio file.
:ivar info: :class:`AIFFInfo`
:ivar tags: :class:`ID3`
"""
_mimes = ["audio/aiff", "audio/x-aiff"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _IFFID3()
else:
raise error("an ID3 tag already exists")
def load(self, filename, **kwargs):
"""Load stream and tag information from a file."""
self.filename = filename
try:
self.tags = _IFFID3(filename, **kwargs)
except ID3Error:
self.tags = None
try:
fileobj = open(filename, "rb")
self.info = AIFFInfo(fileobj)
finally:
fileobj.close()
Open = AIFF

View file

@ -1,714 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""APEv2 reading and writing.
The APEv2 format is most commonly used with Musepack files, but is
also the format of choice for WavPack and other formats. Some MP3s
also have APEv2 tags, but this can cause problems with many MP3
decoders and taggers.
APEv2 tags, like Vorbis comments, are freeform key=value pairs. APEv2
keys can be any ASCII string with characters from 0x20 to 0x7E,
between 2 and 255 characters long. Keys are case-sensitive, but
readers are recommended to be case insensitive, and it is forbidden to
multiple keys which differ only in case. Keys are usually stored
title-cased (e.g. 'Artist' rather than 'artist').
APEv2 values are slightly more structured than Vorbis comments; values
are flagged as one of text, binary, or an external reference (usually
a URI).
Based off the format specification found at
http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification.
"""
__all__ = ["APEv2", "APEv2File", "Open", "delete"]
import sys
import struct
from collections import MutableSequence
from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string,
xrange)
from mutagen import Metadata, FileType, StreamInfo
from mutagen._util import (DictMixin, cdata, delete_bytes, total_ordering,
MutagenError)
def is_valid_apev2_key(key):
if not isinstance(key, text_type):
if PY3:
raise TypeError("APEv2 key must be str")
try:
key = key.decode('ascii')
except UnicodeDecodeError:
return False
# PY26 - Change to set literal syntax (since set is faster than list here)
return ((2 <= len(key) <= 255) and (min(key) >= u' ') and
(max(key) <= u'~') and
(key not in [u"OggS", u"TAG", u"ID3", u"MP+"]))
# There are three different kinds of APE tag values.
# "0: Item contains text information coded in UTF-8
# 1: Item contains binary information
# 2: Item is a locator of external stored information [e.g. URL]
# 3: reserved"
TEXT, BINARY, EXTERNAL = range(3)
HAS_HEADER = 1 << 31
HAS_NO_FOOTER = 1 << 30
IS_HEADER = 1 << 29
class error(IOError, MutagenError):
pass
class APENoHeaderError(error, ValueError):
pass
class APEUnsupportedVersionError(error, ValueError):
pass
class APEBadItemError(error, ValueError):
pass
class _APEv2Data(object):
# Store offsets of the important parts of the file.
start = header = data = footer = end = None
# Footer or header; seek here and read 32 to get version/size/items/flags
metadata = None
# Actual tag data
tag = None
version = None
size = None
items = None
flags = 0
# The tag is at the start rather than the end. A tag at both
# the start and end of the file (i.e. the tag is the whole file)
# is not considered to be at the start.
is_at_start = False
def __init__(self, fileobj):
self.__find_metadata(fileobj)
if self.header is None:
self.metadata = self.footer
elif self.footer is None:
self.metadata = self.header
else:
self.metadata = max(self.header, self.footer)
if self.metadata is None:
return
self.__fill_missing(fileobj)
self.__fix_brokenness(fileobj)
if self.data is not None:
fileobj.seek(self.data)
self.tag = fileobj.read(self.size)
def __find_metadata(self, fileobj):
# Try to find a header or footer.
# Check for a simple footer.
try:
fileobj.seek(-32, 2)
except IOError:
fileobj.seek(0, 2)
return
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = self.metadata = fileobj.tell()
return
# Check for an APEv2 tag followed by an ID3v1 tag at the end.
try:
fileobj.seek(-128, 2)
if fileobj.read(3) == b"TAG":
fileobj.seek(-35, 1) # "TAG" + header length
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = fileobj.tell()
return
# ID3v1 tag at the end, maybe preceded by Lyrics3v2.
# (http://www.id3.org/lyrics3200.html)
# (header length - "APETAGEX") - "LYRICS200"
fileobj.seek(15, 1)
if fileobj.read(9) == b'LYRICS200':
fileobj.seek(-15, 1) # "LYRICS200" + size tag
try:
offset = int(fileobj.read(6))
except ValueError:
raise IOError
fileobj.seek(-32 - offset - 6, 1)
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = fileobj.tell()
return
except IOError:
pass
# Check for a tag at the start.
fileobj.seek(0, 0)
if fileobj.read(8) == b"APETAGEX":
self.is_at_start = True
self.header = 0
def __fill_missing(self, fileobj):
fileobj.seek(self.metadata + 8)
self.version = fileobj.read(4)
self.size = cdata.uint_le(fileobj.read(4))
self.items = cdata.uint_le(fileobj.read(4))
self.flags = cdata.uint_le(fileobj.read(4))
if self.header is not None:
self.data = self.header + 32
# If we're reading the header, the size is the header
# offset + the size, which includes the footer.
self.end = self.data + self.size
fileobj.seek(self.end - 32, 0)
if fileobj.read(8) == b"APETAGEX":
self.footer = self.end - 32
elif self.footer is not None:
self.end = self.footer + 32
self.data = self.end - self.size
if self.flags & HAS_HEADER:
self.header = self.data - 32
else:
self.header = self.data
else:
raise APENoHeaderError("No APE tag found")
# exclude the footer from size
if self.footer is not None:
self.size -= 32
def __fix_brokenness(self, fileobj):
# Fix broken tags written with PyMusepack.
if self.header is not None:
start = self.header
else:
start = self.data
fileobj.seek(start)
while start > 0:
# Clean up broken writing from pre-Mutagen PyMusepack.
# It didn't remove the first 24 bytes of header.
try:
fileobj.seek(-24, 1)
except IOError:
break
else:
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
start = fileobj.tell()
else:
break
self.start = start
class _CIDictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__casemap = {}
self.__dict = {}
super(_CIDictProxy, self).__init__(*args, **kwargs)
# Internally all names are stored as lowercase, but the case
# they were set with is remembered and used when saving. This
# is roughly in line with the standard, which says that keys
# are case-sensitive but two keys differing only in case are
# not allowed, and recommends case-insensitive
# implementations.
def __getitem__(self, key):
return self.__dict[key.lower()]
def __setitem__(self, key, value):
lower = key.lower()
self.__casemap[lower] = key
self.__dict[lower] = value
def __delitem__(self, key):
lower = key.lower()
del(self.__casemap[lower])
del(self.__dict[lower])
def keys(self):
return [self.__casemap.get(key, key) for key in self.__dict.keys()]
class APEv2(_CIDictProxy, Metadata):
"""A file with an APEv2 tag.
ID3v1 tags are silently ignored and overwritten.
"""
filename = None
def pprint(self):
"""Return tag key=value pairs in a human-readable format."""
items = sorted(self.items())
return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items)
def load(self, filename):
"""Load tags from a filename."""
self.filename = filename
fileobj = open(filename, "rb")
try:
data = _APEv2Data(fileobj)
finally:
fileobj.close()
if data.tag:
self.clear()
self.__parse_tag(data.tag, data.items)
else:
raise APENoHeaderError("No APE tag found")
def __parse_tag(self, tag, count):
fileobj = cBytesIO(tag)
for i in xrange(count):
size_data = fileobj.read(4)
# someone writes wrong item counts
if not size_data:
break
size = cdata.uint_le(size_data)
flags = cdata.uint_le(fileobj.read(4))
# Bits 1 and 2 bits are flags, 0-3
# Bit 0 is read/write flag, ignored
kind = (flags & 6) >> 1
if kind == 3:
raise APEBadItemError("value type must be 0, 1, or 2")
key = value = fileobj.read(1)
while key[-1:] != b'\x00' and value:
value = fileobj.read(1)
key += value
if key[-1:] == b"\x00":
key = key[:-1]
if PY3:
try:
key = key.decode("ascii")
except UnicodeError as err:
reraise(APEBadItemError, err, sys.exc_info()[2])
value = fileobj.read(size)
value = _get_value_type(kind)._new(value)
self[key] = value
def __getitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
return super(APEv2, self).__getitem__(key)
def __delitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
super(APEv2, self).__delitem__(key)
def __setitem__(self, key, value):
"""'Magic' value setter.
This function tries to guess at what kind of value you want to
store. If you pass in a valid UTF-8 or Unicode string, it
treats it as a text value. If you pass in a list, it treats it
as a list of string/Unicode values. If you pass in a string
that is not valid UTF-8, it assumes it is a binary value.
Python 3: all bytes will be assumed to be a byte value, even
if they are valid utf-8.
If you need to force a specific type of value (e.g. binary
data that also happens to be valid UTF-8, or an external
reference), use the APEValue factory and set the value to the
result of that::
from mutagen.apev2 import APEValue, EXTERNAL
tag['Website'] = APEValue('http://example.org', EXTERNAL)
"""
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
if not isinstance(value, _APEValue):
# let's guess at the content if we're not already a value...
if isinstance(value, text_type):
# unicode? we've got to be text.
value = APEValue(value, TEXT)
elif isinstance(value, list):
items = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("item in list not str")
v = v.decode("utf-8")
items.append(v)
# list? text.
value = APEValue(u"\0".join(items), TEXT)
else:
if PY3:
value = APEValue(value, BINARY)
else:
try:
value.decode("utf-8")
except UnicodeError:
# invalid UTF8 text, probably binary
value = APEValue(value, BINARY)
else:
# valid UTF8, probably text
value = APEValue(value, TEXT)
super(APEv2, self).__setitem__(key, value)
def save(self, filename=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer.
"""
filename = filename or self.filename
try:
fileobj = open(filename, "r+b")
except IOError:
fileobj = open(filename, "w+b")
data = _APEv2Data(fileobj)
if data.is_at_start:
delete_bytes(fileobj, data.end - data.start, data.start)
elif data.start is not None:
fileobj.seek(data.start)
# Delete an ID3v1 tag if present, too.
fileobj.truncate()
fileobj.seek(0, 2)
tags = []
for key, value in self.items():
# Packed format for an item:
# 4B: Value length
# 4B: Value type
# Key name
# 1B: Null
# Key value
value_data = value._write()
if not isinstance(key, bytes):
key = key.encode("utf-8")
tag_data = bytearray()
tag_data += struct.pack("<2I", len(value_data), value.kind << 1)
tag_data += key + b"\0" + value_data
tags.append(bytes(tag_data))
# "APE tags items should be sorted ascending by size... This is
# not a MUST, but STRONGLY recommended. Actually the items should
# be sorted by importance/byte, but this is not feasible."
tags.sort(key=len)
num_tags = len(tags)
tags = b"".join(tags)
header = bytearray(b"APETAGEX")
# version, tag size, item count, flags
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER | IS_HEADER)
header += b"\0" * 8
fileobj.write(header)
fileobj.write(tags)
footer = bytearray(b"APETAGEX")
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER)
footer += b"\0" * 8
fileobj.write(footer)
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file."""
filename = filename or self.filename
fileobj = open(filename, "r+b")
try:
data = _APEv2Data(fileobj)
if data.start is not None and data.size is not None:
delete_bytes(fileobj, data.end - data.start, data.start)
finally:
fileobj.close()
self.clear()
Open = APEv2
def delete(filename):
"""Remove tags from a file."""
try:
APEv2(filename).delete()
except APENoHeaderError:
pass
def _get_value_type(kind):
"""Returns a _APEValue subclass or raises ValueError"""
if kind == TEXT:
return APETextValue
elif kind == BINARY:
return APEBinaryValue
elif kind == EXTERNAL:
return APEExtValue
raise ValueError("unknown kind %r" % kind)
def APEValue(value, kind):
"""APEv2 tag value factory.
Use this if you need to specify the value's type manually. Binary
and text data are automatically detected by APEv2.__setitem__.
"""
try:
type_ = _get_value_type(kind)
except ValueError:
raise ValueError("kind must be TEXT, BINARY, or EXTERNAL")
else:
return type_(value)
class _APEValue(object):
kind = None
value = None
def __init__(self, value, kind=None):
# kind kwarg is for backwards compat
if kind is not None and kind != self.kind:
raise ValueError
self.value = self._validate(value)
@classmethod
def _new(cls, data):
instance = cls.__new__(cls)
instance._parse(data)
return instance
def _parse(self, data):
"""Sets value or raises APEBadItemError"""
raise NotImplementedError
def _write(self):
"""Returns bytes"""
raise NotImplementedError
def _validate(self, value):
"""Returns validated value or raises TypeError/ValueErrr"""
raise NotImplementedError
def __repr__(self):
return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind)
@swap_to_string
@total_ordering
class _APEUtf8Value(_APEValue):
def _parse(self, data):
try:
self.value = data.decode("utf-8")
except UnicodeDecodeError as e:
reraise(APEBadItemError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
return value
def _write(self):
return self.value.encode("utf-8")
def __len__(self):
return len(self.value)
def __bytes__(self):
return self._write()
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
def __str__(self):
return self.value
class APETextValue(_APEUtf8Value, MutableSequence):
"""An APEv2 text value.
Text values are Unicode/UTF-8 strings. They can be accessed like
strings (with a null separating the values), or arrays of strings.
"""
kind = TEXT
def __iter__(self):
"""Iterate over the strings of the value (not the characters)"""
return iter(self.value.split(u"\0"))
def __getitem__(self, index):
return self.value.split(u"\0")[index]
def __len__(self):
return self.value.count(u"\0") + 1
def __setitem__(self, index, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
values = list(self)
values[index] = value
self.value = u"\0".join(values)
def insert(self, index, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
values = list(self)
values.insert(index, value)
self.value = u"\0".join(values)
def __delitem__(self, index):
values = list(self)
del values[index]
self.value = u"\0".join(values)
def pprint(self):
return u" / ".join(self)
@swap_to_string
@total_ordering
class APEBinaryValue(_APEValue):
"""An APEv2 binary value."""
kind = BINARY
def _parse(self, data):
self.value = data
def _write(self):
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("value not bytes")
return bytes(value)
def __len__(self):
return len(self.value)
def __bytes__(self):
return self._write()
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
def pprint(self):
return u"[%d bytes]" % len(self)
class APEExtValue(_APEUtf8Value):
"""An APEv2 external value.
External values are usually URI or IRI strings.
"""
kind = EXTERNAL
def pprint(self):
return u"[External] %s" % self.value
class APEv2File(FileType):
class _Info(StreamInfo):
length = 0
bitrate = 0
def __init__(self, fileobj):
pass
@staticmethod
def pprint():
return u"Unknown format with APEv2 tag."
def load(self, filename):
self.filename = filename
self.info = self._Info(open(filename, "rb"))
try:
self.tags = APEv2(filename)
except APENoHeaderError:
self.tags = None
def add_tags(self):
if self.tags is None:
self.tags = APEv2()
else:
raise ValueError("%r already has tags: %r" % (self, self.tags))
@staticmethod
def score(filename, fileobj, header):
try:
fileobj.seek(-160, 2)
except IOError:
fileobj.seek(0)
footer = fileobj.read()
return ((b"APETAGEX" in footer) - header.startswith(b"ID3"))

View file

@ -1,862 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write ASF (Window Media Audio) files."""
__all__ = ["ASF", "Open"]
import sys
import struct
from mutagen import FileType, Metadata, StreamInfo
from mutagen._util import (insert_bytes, delete_bytes, DictMixin,
total_ordering, MutagenError)
from ._compat import swap_to_string, text_type, PY2, string_types, reraise, \
xrange, long_, PY3
class error(IOError, MutagenError):
pass
class ASFError(error):
pass
class ASFHeaderError(error):
pass
class ASFInfo(StreamInfo):
"""ASF stream information."""
def __init__(self):
self.length = 0.0
self.sample_rate = 0
self.bitrate = 0
self.channels = 0
def pprint(self):
s = "Windows Media Audio %d bps, %s Hz, %d channels, %.2f seconds" % (
self.bitrate, self.sample_rate, self.channels, self.length)
return s
class ASFTags(list, DictMixin, Metadata):
"""Dictionary containing ASF attributes."""
def pprint(self):
return "\n".join("%s=%s" % (k, v) for k, v in self)
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
# PY3 only
if isinstance(key, slice):
return list.__getitem__(self, key)
values = [value for (k, value) in self if k == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
# PY3 only
if isinstance(key, slice):
return list.__delitem__(self, key)
to_delete = [x for x in self if x[0] == key]
if not to_delete:
raise KeyError(key)
else:
for k in to_delete:
self.remove(k)
def __contains__(self, key):
"""Return true if the key has any values."""
for k, value in self:
if k == key:
return True
else:
return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
# PY3 only
if isinstance(key, slice):
return list.__setitem__(self, key, values)
if not isinstance(values, list):
values = [values]
to_append = []
for value in values:
if not isinstance(value, ASFBaseAttribute):
if isinstance(value, string_types):
value = ASFUnicodeAttribute(value)
elif PY3 and isinstance(value, bytes):
value = ASFByteArrayAttribute(value)
elif isinstance(value, bool):
value = ASFBoolAttribute(value)
elif isinstance(value, int):
value = ASFDWordAttribute(value)
elif isinstance(value, long_):
value = ASFQWordAttribute(value)
else:
raise TypeError("Invalid type %r" % type(value))
to_append.append((key, value))
try:
del(self[key])
except KeyError:
pass
self.extend(to_append)
def keys(self):
"""Return all keys in the comment."""
return self and set(next(iter(zip(*self))))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
d = {}
for key, value in self:
d.setdefault(key, []).append(value)
return d
class ASFBaseAttribute(object):
"""Generic attribute."""
TYPE = None
def __init__(self, value=None, data=None, language=None,
stream=None, **kwargs):
self.language = language
self.stream = stream
if data:
self.value = self.parse(data, **kwargs)
else:
if value is None:
# we used to support not passing any args and instead assign
# them later, keep that working..
self.value = None
else:
self.value = self._validate(value)
def _validate(self, value):
"""Raises TypeError or ValueError in case the user supplied value
isn't valid.
"""
return value
def data_size(self):
raise NotImplementedError
def __repr__(self):
name = "%s(%r" % (type(self).__name__, self.value)
if self.language:
name += ", language=%d" % self.language
if self.stream:
name += ", stream=%d" % self.stream
name += ")"
return name
def render(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
data = self._render()
return (struct.pack("<H", len(name)) + name +
struct.pack("<HH", self.TYPE, len(data)) + data)
def render_m(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", 0, self.stream or 0, len(name),
self.TYPE, len(data)) + name + data)
def render_ml(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", self.language or 0, self.stream or 0,
len(name), self.TYPE, len(data)) + name + data)
@swap_to_string
@total_ordering
class ASFUnicodeAttribute(ASFBaseAttribute):
"""Unicode string attribute."""
TYPE = 0x0000
def parse(self, data):
try:
return data.decode("utf-16-le").strip("\x00")
except UnicodeDecodeError as e:
reraise(ASFError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, text_type):
if PY2:
return value.decode("utf-8")
else:
raise TypeError("%r not str" % value)
return value
def _render(self):
return self.value.encode("utf-16-le") + b"\x00\x00"
def data_size(self):
return len(self._render())
def __bytes__(self):
return self.value.encode("utf-16-le")
def __str__(self):
return self.value
def __eq__(self, other):
return text_type(self) == other
def __lt__(self, other):
return text_type(self) < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFByteArrayAttribute(ASFBaseAttribute):
"""Byte array attribute."""
TYPE = 0x0001
def parse(self, data):
assert isinstance(data, bytes)
return data
def _render(self):
assert isinstance(self.value, bytes)
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("must be bytes/str: %r" % value)
return value
def data_size(self):
return len(self.value)
def __bytes__(self):
return self.value
def __str__(self):
return "[binary data (%d bytes)]" % len(self.value)
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFBoolAttribute(ASFBaseAttribute):
"""Bool attribute."""
TYPE = 0x0002
def parse(self, data, dword=True):
if dword:
return struct.unpack("<I", data)[0] == 1
else:
return struct.unpack("<H", data)[0] == 1
def _render(self, dword=True):
if dword:
return struct.pack("<I", bool(self.value))
else:
return struct.pack("<H", bool(self.value))
def _validate(self, value):
return bool(value)
def data_size(self):
return 4
def __bool__(self):
return bool(self.value)
def __bytes__(self):
return text_type(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
def __eq__(self, other):
return bool(self.value) == other
def __lt__(self, other):
return bool(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFDWordAttribute(ASFBaseAttribute):
"""DWORD attribute."""
TYPE = 0x0003
def parse(self, data):
return struct.unpack("<L", data)[0]
def _render(self):
return struct.pack("<L", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 32 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 4
def __int__(self):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFQWordAttribute(ASFBaseAttribute):
"""QWORD attribute."""
TYPE = 0x0004
def parse(self, data):
return struct.unpack("<Q", data)[0]
def _render(self):
return struct.pack("<Q", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 64 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 8
def __int__(self):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFWordAttribute(ASFBaseAttribute):
"""WORD attribute."""
TYPE = 0x0005
def parse(self, data):
return struct.unpack("<H", data)[0]
def _render(self):
return struct.pack("<H", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 16 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 2
def __int__(self):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@swap_to_string
@total_ordering
class ASFGUIDAttribute(ASFBaseAttribute):
"""GUID attribute."""
TYPE = 0x0006
def parse(self, data):
assert isinstance(data, bytes)
return data
def _render(self):
assert isinstance(self.value, bytes)
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("must be bytes/str: %r" % value)
return value
def data_size(self):
return len(self.value)
def __bytes__(self):
return self.value
def __str__(self):
return repr(self.value)
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
__hash__ = ASFBaseAttribute.__hash__
UNICODE = ASFUnicodeAttribute.TYPE
BYTEARRAY = ASFByteArrayAttribute.TYPE
BOOL = ASFBoolAttribute.TYPE
DWORD = ASFDWordAttribute.TYPE
QWORD = ASFQWordAttribute.TYPE
WORD = ASFWordAttribute.TYPE
GUID = ASFGUIDAttribute.TYPE
def ASFValue(value, kind, **kwargs):
try:
attr_type = _attribute_types[kind]
except KeyError:
raise ValueError("Unknown value type %r" % kind)
else:
return attr_type(value=value, **kwargs)
_attribute_types = {
ASFUnicodeAttribute.TYPE: ASFUnicodeAttribute,
ASFByteArrayAttribute.TYPE: ASFByteArrayAttribute,
ASFBoolAttribute.TYPE: ASFBoolAttribute,
ASFDWordAttribute.TYPE: ASFDWordAttribute,
ASFQWordAttribute.TYPE: ASFQWordAttribute,
ASFWordAttribute.TYPE: ASFWordAttribute,
ASFGUIDAttribute.TYPE: ASFGUIDAttribute,
}
class BaseObject(object):
"""Base ASF object."""
GUID = None
def parse(self, asf, data, fileobj, size):
self.data = data
def render(self, asf):
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
return data
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
assert isinstance(guid, bytes)
self.GUID = guid
class HeaderObject(object):
"""ASF header."""
GUID = b"\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = b"\x33\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
NAMES = [
u"Title",
u"Author",
u"Copyright",
u"Description",
u"Rating",
]
def parse(self, asf, data, fileobj, size):
super(ContentDescriptionObject, self).parse(asf, data, fileobj, size)
asf.content_description_obj = self
lengths = struct.unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
if length > 0:
texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00"))
else:
texts.append(None)
pos = end
for key, value in zip(self.NAMES, texts):
if value is not None:
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
def render(self, asf):
def render_text(name):
value = asf.to_content_description.get(name)
if value is not None:
return text_type(value).encode("utf-16-le") + b"\x00\x00"
else:
return b""
texts = [render_text(x) for x in self.NAMES]
data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts)
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
class ExtendedContentDescriptionObject(BaseObject):
"""Extended content description."""
GUID = b"\x40\xA4\xD0\xD2\x07\xE3\xD2\x11\x97\xF0\x00\xA0\xC9\x5E\xA8\x50"
def parse(self, asf, data, fileobj, size):
super(ExtendedContentDescriptionObject, self).parse(
asf, data, fileobj, size)
asf.extended_content_description_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos + 2])
pos += 2
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value_type, value_length = struct.unpack("<HH", data[pos:pos + 4])
pos += 4
value = data[pos:pos + value_length]
pos += value_length
attr = _attribute_types[value_type](data=value)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_extended_content_description.items()
data = b"".join(attr.render(name) for (name, attr) in attrs)
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
return self.GUID + data
class FilePropertiesObject(BaseObject):
"""File properties."""
GUID = b"\xA1\xDC\xAB\x8C\x47\xA9\xCF\x11\x8E\xE4\x00\xC0\x0C\x20\x53\x65"
def parse(self, asf, data, fileobj, size):
super(FilePropertiesObject, self).parse(asf, data, fileobj, size)
length, _, preroll = struct.unpack("<QQQ", data[40:64])
asf.info.length = (length / 10000000.0) - (preroll / 1000.0)
class StreamPropertiesObject(BaseObject):
"""Stream properties."""
GUID = b"\x91\x07\xDC\xB7\xB7\xA9\xCF\x11\x8E\xE6\x00\xC0\x0C\x20\x53\x65"
def parse(self, asf, data, fileobj, size):
super(StreamPropertiesObject, self).parse(asf, data, fileobj, size)
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = bitrate * 8
class HeaderExtensionObject(BaseObject):
"""Header extension."""
GUID = b"\xb5\x03\xbf_.\xa9\xcf\x11\x8e\xe3\x00\xc0\x0c Se"
def parse(self, asf, data, fileobj, size):
super(HeaderExtensionObject, self).parse(asf, data, fileobj, size)
asf.header_extension_obj = self
datasize, = struct.unpack("<I", data[18:22])
datapos = 0
self.objects = []
while datapos < datasize:
guid, size = struct.unpack(
"<16sQ", data[22 + datapos:22 + datapos + 24])
if guid in _object_types:
obj = _object_types[guid]()
else:
obj = UnknownObject(guid)
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size],
fileobj, size)
self.objects.append(obj)
datapos += size
def render(self, asf):
data = b"".join(obj.render(asf) for obj in self.objects)
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
b"\x06\x00" + struct.pack("<I", len(data)) + data)
class MetadataObject(BaseObject):
"""Metadata description."""
GUID = b"\xea\xcb\xf8\xc5\xaf[wH\x84g\xaa\x8cD\xfaL\xca"
def parse(self, asf, data, fileobj, size):
super(MetadataObject, self).parse(asf, data, fileobj, size)
asf.metadata_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = _attribute_types[value_type](**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata.items()
data = b"".join([attr.render_m(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
class MetadataLibraryObject(BaseObject):
"""Metadata library description."""
GUID = b"\x94\x1c#D\x98\x94\xd1I\xa1A\x1d\x13NEpT"
def parse(self, asf, data, fileobj, size):
super(MetadataLibraryObject, self).parse(asf, data, fileobj, size)
asf.metadata_library_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'language': language, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = _attribute_types[value_type](**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata_library
data = b"".join([attr.render_ml(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
_object_types = {
ExtendedContentDescriptionObject.GUID: ExtendedContentDescriptionObject,
ContentDescriptionObject.GUID: ContentDescriptionObject,
FilePropertiesObject.GUID: FilePropertiesObject,
StreamPropertiesObject.GUID: StreamPropertiesObject,
HeaderExtensionObject.GUID: HeaderExtensionObject,
MetadataLibraryObject.GUID: MetadataLibraryObject,
MetadataObject.GUID: MetadataObject,
}
class ASF(FileType):
"""An ASF file, probably containing WMA or WMV."""
_mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf",
"audio/x-wma", "video/x-wmv"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as fileobj:
self.size = 0
self.size1 = 0
self.size2 = 0
self.offset1 = 0
self.offset2 = 0
self.num_objects = 0
self.info = ASFInfo()
self.tags = ASFTags()
self.__read_file(fileobj)
def save(self):
# Move attributes to the right objects
self.to_content_description = {}
self.to_extended_content_description = {}
self.to_metadata = {}
self.to_metadata_library = []
for name, value in self.tags:
library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID)
can_cont_desc = value.TYPE == UNICODE
if library_only or value.language is not None:
self.to_metadata_library.append((name, value))
elif value.stream is not None:
if name not in self.to_metadata:
self.to_metadata[name] = value
else:
self.to_metadata_library.append((name, value))
elif name in ContentDescriptionObject.NAMES:
if name not in self.to_content_description and can_cont_desc:
self.to_content_description[name] = value
else:
self.to_metadata_library.append((name, value))
else:
if name not in self.to_extended_content_description:
self.to_extended_content_description[name] = value
else:
self.to_metadata_library.append((name, value))
# Add missing objects
if not self.content_description_obj:
self.content_description_obj = \
ContentDescriptionObject()
self.objects.append(self.content_description_obj)
if not self.extended_content_description_obj:
self.extended_content_description_obj = \
ExtendedContentDescriptionObject()
self.objects.append(self.extended_content_description_obj)
if not self.header_extension_obj:
self.header_extension_obj = \
HeaderExtensionObject()
self.objects.append(self.header_extension_obj)
if not self.metadata_obj:
self.metadata_obj = \
MetadataObject()
self.header_extension_obj.objects.append(self.metadata_obj)
if not self.metadata_library_obj:
self.metadata_library_obj = \
MetadataLibraryObject()
self.header_extension_obj.objects.append(self.metadata_library_obj)
# Render the header
data = b"".join([obj.render(self) for obj in self.objects])
data = (HeaderObject.GUID +
struct.pack("<QL", len(data) + 30, len(self.objects)) +
b"\x01\x02" + data)
with open(self.filename, "rb+") as fileobj:
size = len(data)
if size > self.size:
insert_bytes(fileobj, size - self.size, self.size)
if size < self.size:
delete_bytes(fileobj, self.size - size, 0)
fileobj.seek(0)
fileobj.write(data)
self.size = size
self.num_objects = len(self.objects)
def __read_file(self, fileobj):
header = fileobj.read(30)
if len(header) != 30 or header[:16] != HeaderObject.GUID:
raise ASFHeaderError("Not an ASF file.")
self.extended_content_description_obj = None
self.content_description_obj = None
self.header_extension_obj = None
self.metadata_obj = None
self.metadata_library_obj = None
self.size, self.num_objects = struct.unpack("<QL", header[16:28])
self.objects = []
self._tags = {}
for i in xrange(self.num_objects):
self.__read_object(fileobj)
for guid in [ContentDescriptionObject.GUID,
ExtendedContentDescriptionObject.GUID, MetadataObject.GUID,
MetadataLibraryObject.GUID]:
self.tags.extend(self._tags.pop(guid, []))
assert not self._tags
def __read_object(self, fileobj):
guid, size = struct.unpack("<16sQ", fileobj.read(24))
if guid in _object_types:
obj = _object_types[guid]()
else:
obj = UnknownObject(guid)
data = fileobj.read(size - 24)
obj.parse(self, data, fileobj, size)
self.objects.append(obj)
@staticmethod
def score(filename, fileobj, header):
return header.startswith(HeaderObject.GUID) * 2
Open = ASF

View file

@ -1,534 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Easier access to ID3 tags.
EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
more like Vorbis or APEv2 tags.
"""
import mutagen.id3
from ._compat import iteritems, text_type, PY2
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match
from mutagen.id3 import ID3, error, delete, ID3FileType
__all__ = ['EasyID3', 'Open', 'delete']
class EasyID3KeyError(KeyError, ValueError, error):
"""Raised when trying to get/set an invalid key.
Subclasses both KeyError and ValueError for API compatibility,
catching KeyError is preferred.
"""
class EasyID3(DictMixin, Metadata):
"""A file with an ID3 tag.
Like Vorbis comments, EasyID3 keys are case-insensitive ASCII
strings. Only a subset of ID3 frames are supported by default. Use
EasyID3.RegisterKey and its wrappers to support more.
You can also set the GetFallback, SetFallback, and DeleteFallback
to generic key getter/setter/deleter functions, which are called
if no specific handler is registered for a key. Additionally,
ListFallback can be used to supply an arbitrary list of extra
keys. These can be set on EasyID3 or on individual instances after
creation.
To use an EasyID3 class with mutagen.mp3.MP3::
from mutagen.mp3 import EasyMP3 as MP3
MP3(filename)
Because many of the attributes are constructed on the fly, things
like the following will not work::
ezid3["performer"].append("Joe")
Instead, you must do::
values = ezid3["performer"]
values.append("Joe")
ezid3["performer"] = values
"""
Set = {}
Get = {}
Delete = {}
List = {}
# For compatibility.
valid_keys = Get
GetFallback = None
SetFallback = None
DeleteFallback = None
ListFallback = None
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an ID3 instance and
the requested key name. The setter also receives the desired
value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the ID3 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, frameid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of ID3 frame name to EasyID3 key, then you can use this
function::
EasyID3.RegisterTextKey("title", "TIT2")
"""
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value))
else:
frame.encoding = 3
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
for v in value:
if v and max(v) > u'\x7f':
enc = 3
break
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
else:
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
def __init__(self, filename=None):
self.__id3 = ID3()
if filename is not None:
self.load(filename)
load = property(lambda s: s.__id3.load,
lambda s, v: setattr(s.__id3, 'load', v))
def save(self, *args, **kwargs):
# ignore v2_version until we support 2.3 here
kwargs.pop("v2_version", None)
self.__id3.save(*args, **kwargs)
delete = property(lambda s: s.__id3.delete,
lambda s, v: setattr(s.__id3, 'delete', v))
filename = property(lambda s: s.__id3.filename,
lambda s, fn: setattr(s.__id3, 'filename', fn))
size = property(lambda s: s.__id3.size,
lambda s, fn: setattr(s.__id3, 'size', s))
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key, self.GetFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
func = dict_match(self.Set, key, self.SetFallback)
if func is not None:
return func(self.__id3, key, value)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key, self.DeleteFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__id3, key))
elif key in self:
keys.append(key)
if self.ListFallback is not None:
keys.extend(self.ListFallback(self.__id3, ""))
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
Open = EasyID3
def genre_get(id3, key):
return id3["TCON"].genres
def genre_set(id3, key, value):
try:
frame = id3["TCON"]
except KeyError:
id3.add(mutagen.id3.TCON(encoding=3, text=value))
else:
frame.encoding = 3
frame.genres = value
def genre_delete(id3, key):
del(id3["TCON"])
def date_get(id3, key):
return [stamp.text for stamp in id3["TDRC"].text]
def date_set(id3, key, value):
id3.add(mutagen.id3.TDRC(encoding=3, text=value))
def date_delete(id3, key):
del(id3["TDRC"])
def original_date_get(id3, key):
return [stamp.text for stamp in id3["TDOR"].text]
def original_date_set(id3, key, value):
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
def original_date_delete(id3, key):
del(id3["TDOR"])
def performer_get(id3, key):
people = []
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
for role, person in mcl.people:
if role == wanted_role:
people.append(person)
if people:
return people
else:
raise KeyError(key)
def performer_set(id3, key, value):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
mcl = mutagen.id3.TMCL(encoding=3, people=[])
id3.add(mcl)
mcl.encoding = 3
people = [p for p in mcl.people if p[0] != wanted_role]
for v in value:
people.append((wanted_role, v))
mcl.people = people
def performer_delete(id3, key):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
people = [p for p in mcl.people if p[0] != wanted_role]
if people == mcl.people:
raise KeyError(key)
elif people:
mcl.people = people
else:
del(id3["TMCL"])
def performer_list(id3, key):
try:
mcl = id3["TMCL"]
except KeyError:
return []
else:
return list(set("performer:" + p[0] for p in mcl.people))
def musicbrainz_trackid_get(id3, key):
return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')]
def musicbrainz_trackid_set(id3, key, value):
if len(value) != 1:
raise ValueError("only one track ID may be set per song")
value = value[0].encode('ascii')
try:
frame = id3["UFID:http://musicbrainz.org"]
except KeyError:
frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value)
id3.add(frame)
else:
frame.data = value
def musicbrainz_trackid_delete(id3, key):
del(id3["UFID:http://musicbrainz.org"])
def website_get(id3, key):
urls = [frame.url for frame in id3.getall("WOAR")]
if urls:
return urls
else:
raise EasyID3KeyError(key)
def website_set(id3, key, value):
id3.delall("WOAR")
for v in value:
id3.add(mutagen.id3.WOAR(url=v))
def website_delete(id3, key):
id3.delall("WOAR")
def gain_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%+f dB" % frame.gain]
def gain_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one gain value, not %r.", value)
gain = float(value[0].split()[0])
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.gain = gain
def gain_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.peak:
frame.gain = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peak_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%f" % frame.peak]
def peak_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one peak value, not %r.", value)
peak = float(value[0])
if peak >= 2 or peak < 0:
raise ValueError("peak must be => 0 and < 2.")
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.peak = peak
def peak_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.gain:
frame.peak = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peakgain_list(id3, key):
keys = []
for frame in id3.getall("RVA2"):
keys.append("replaygain_%s_gain" % frame.desc)
keys.append("replaygain_%s_peak" % frame.desc)
return keys
for frameid, key in iteritems({
"TALB": "album",
"TBPM": "bpm",
"TCMP": "compilation", # iTunes extension
"TCOM": "composer",
"TCOP": "copyright",
"TENC": "encodedby",
"TEXT": "lyricist",
"TLEN": "length",
"TMED": "media",
"TMOO": "mood",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
"TPE2": "performer",
"TPE3": "conductor",
"TPE4": "arranger",
"TPOS": "discnumber",
"TPUB": "organization",
"TRCK": "tracknumber",
"TOLY": "author",
"TSO2": "albumartistsort", # iTunes extension
"TSOA": "albumsort",
"TSOC": "composersort", # iTunes extension
"TSOP": "artistsort",
"TSOT": "titlesort",
"TSRC": "isrc",
"TSST": "discsubtitle",
"TLAN": "language",
}):
EasyID3.RegisterTextKey(key, frameid)
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
EasyID3.RegisterKey("date", date_get, date_set, date_delete)
EasyID3.RegisterKey("originaldate", original_date_get, original_date_set,
original_date_delete)
EasyID3.RegisterKey(
"performer:*", performer_get, performer_set, performer_delete,
performer_list)
EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get,
musicbrainz_trackid_set, musicbrainz_trackid_delete)
EasyID3.RegisterKey("website", website_get, website_set, website_delete)
EasyID3.RegisterKey(
"replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list)
EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
# At various times, information for this came from
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
for desc, key in iteritems({
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
u"MusicBrainz TRM Id": "musicbrainz_trmid",
u"MusicIP PUID": "musicip_puid",
u"MusicMagic Fingerprint": "musicip_fingerprint",
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
u"MusicBrainz Album Type": "musicbrainz_albumtype",
u"MusicBrainz Album Release Country": "releasecountry",
u"MusicBrainz Disc Id": "musicbrainz_discid",
u"ASIN": "asin",
u"ALBUMARTISTSORT": "albumartistsort",
u"BARCODE": "barcode",
u"CATALOGNUMBER": "catalognumber",
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
u"MusicBrainz Work Id": "musicbrainz_workid",
u"Acoustid Fingerprint": "acoustid_fingerprint",
u"Acoustid Id": "acoustid_id",
}):
EasyID3.RegisterTXXXKey(key, desc)
class EasyID3FileType(ID3FileType):
"""Like ID3FileType, but uses EasyID3 for tags."""
ID3 = EasyID3

View file

@ -1,284 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match
from mutagen.mp4 import MP4, MP4Tags, error, delete
from ._compat import PY2, text_type, PY3
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
class EasyMP4KeyError(error, KeyError, ValueError):
pass
class EasyMP4Tags(DictMixin, Metadata):
"""A file with MPEG-4 iTunes metadata.
Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII
strings, and values are a list of Unicode strings (and these lists
are always of length 0 or 1).
If you need access to the full MP4 metadata feature set, you should use
MP4, not EasyMP4.
"""
Set = {}
Get = {}
Delete = {}
List = {}
def __init__(self, *args, **kwargs):
self.__mp4 = MP4Tags(*args, **kwargs)
self.load = self.__mp4.load
self.save = self.__mp4.save
self.delete = self.__mp4.delete
filename = property(lambda s: s.__mp4.filename,
lambda s, fn: setattr(s.__mp4, 'filename', fn))
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an MP4Tags instance
and the requested key name. The setter also receives the
desired value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the MP4 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
"""Register a scalar integer key.
"""
def getter(tags, key):
return list(map(text_type, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = [clamp(v) for v in map(int, value)]
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntPairKey(cls, key, atomid, min_value=0,
max_value=(2 ** 16) - 1):
def getter(tags, key):
ret = []
for (track, total) in tags[atomid]:
if total:
ret.append(u"%d/%d" % (track, total))
else:
ret.append(text_type(track))
return ret
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
data = []
for v in value:
try:
tracks, total = v.split("/")
tracks = clamp(int(tracks))
total = clamp(int(total))
except (ValueError, TypeError):
tracks = clamp(int(v))
total = min_value
data.append((tracks, total))
tags[atomid] = data
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = "----:" + mean + ":" + name
def getter(tags, key):
return [s.decode("utf-8", "replace") for s in tags[atomid]]
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
tags[atomid] = encoded
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
func = dict_match(self.Set, key)
if func is not None:
return func(self.__mp4, key, value)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__mp4, key))
elif key in self:
keys.append(key)
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
for atomid, key in {
'\xa9nam': 'title',
'\xa9alb': 'album',
'\xa9ART': 'artist',
'aART': 'albumartist',
'\xa9day': 'date',
'\xa9cmt': 'comment',
'desc': 'description',
'\xa9grp': 'grouping',
'\xa9gen': 'genre',
'cprt': 'copyright',
'soal': 'albumsort',
'soaa': 'albumartistsort',
'soar': 'artistsort',
'sonm': 'titlesort',
'soco': 'composersort',
}.items():
EasyMP4Tags.RegisterTextKey(key, atomid)
for name, key in {
'MusicBrainz Artist Id': 'musicbrainz_artistid',
'MusicBrainz Track Id': 'musicbrainz_trackid',
'MusicBrainz Album Id': 'musicbrainz_albumid',
'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'MusicIP PUID': 'musicip_puid',
'MusicBrainz Album Status': 'musicbrainz_albumstatus',
'MusicBrainz Album Type': 'musicbrainz_albumtype',
'MusicBrainz Release Country': 'releasecountry',
}.items():
EasyMP4Tags.RegisterFreeformKey(key, name)
for name, key in {
"tmpo": "bpm",
}.items():
EasyMP4Tags.RegisterIntKey(key, name)
for name, key in {
"trkn": "tracknumber",
"disk": "discnumber",
}.items():
EasyMP4Tags.RegisterIntPairKey(key, name)
class EasyMP4(MP4):
"""Like :class:`MP4 <mutagen.mp4.MP4>`,
but uses :class:`EasyMP4Tags` for tags.
:ivar info: :class:`MP4Info <mutagen.mp4.MP4Info>`
:ivar tags: :class:`EasyMP4Tags`
"""
MP4Tags = EasyMP4Tags
Get = EasyMP4Tags.Get
Set = EasyMP4Tags.Set
Delete = EasyMP4Tags.Delete
List = EasyMP4Tags.List
RegisterTextKey = EasyMP4Tags.RegisterTextKey
RegisterKey = EasyMP4Tags.RegisterKey

View file

@ -1,838 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Read and write FLAC Vorbis comments and stream information.
Read more about FLAC at http://flac.sourceforge.net.
FLAC supports arbitrary metadata blocks. The two most interesting ones
are the FLAC stream information block, and the Vorbis comment block;
these are also the only ones Mutagen can currently read.
This module does not handle Ogg FLAC files.
Based off documentation available at
http://flac.sourceforge.net/format.html
"""
__all__ = ["FLAC", "Open", "delete"]
import struct
from ._vorbis import VCommentDict
import mutagen
from ._compat import cBytesIO, endswith, chr_
from mutagen._util import insert_bytes, MutagenError
from mutagen.id3 import BitPaddedInt
from functools import reduce
class error(IOError, MutagenError):
pass
class FLACNoHeaderError(error):
pass
class FLACVorbisError(ValueError, error):
pass
def to_int_be(data):
"""Convert an arbitrarily-long string to a long using big-endian
byte order."""
return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0)
class StrictFileObject(object):
"""Wraps a file-like object and raises an exception if the requested
amount of data to read isn't returned."""
def __init__(self, fileobj):
self._fileobj = fileobj
for m in ["close", "tell", "seek", "write", "name"]:
if hasattr(fileobj, m):
setattr(self, m, getattr(fileobj, m))
def read(self, size=-1):
data = self._fileobj.read(size)
if size >= 0 and len(data) != size:
raise error("file said %d bytes, read %d bytes" % (
size, len(data)))
return data
def tryread(self, *args):
return self._fileobj.read(*args)
class MetadataBlock(object):
"""A generic block of FLAC metadata.
This class is extended by specific used as an ancestor for more specific
blocks, and also as a container for data blobs of unknown blocks.
Attributes:
* data -- raw binary data for this block
"""
_distrust_size = False
def __init__(self, data):
"""Parse the given data string or file-like as a metadata block.
The metadata header should not be included."""
if data is not None:
if not isinstance(data, StrictFileObject):
if isinstance(data, bytes):
data = cBytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError(
"StreamInfo requires string data or a file-like")
data = StrictFileObject(data)
self.load(data)
def load(self, data):
self.data = data.read()
def write(self):
return self.data
@staticmethod
def writeblocks(blocks):
"""Render metadata block as a byte string."""
data = []
codes = [[block.code, block.write()] for block in blocks]
codes[-1][0] |= 128
for code, datum in codes:
byte = chr_(code)
if len(datum) > 2 ** 24:
raise error("block is too long to write")
length = struct.pack(">I", len(datum))[-3:]
data.append(byte + length + datum)
return b"".join(data)
@staticmethod
def group_padding(blocks):
"""Consolidate FLAC padding metadata blocks.
The overall size of the rendered blocks does not change, so
this adds several bytes of padding for each merged block.
"""
paddings = [b for b in blocks if isinstance(b, Padding)]
for p in paddings:
blocks.remove(p)
# total padding size is the sum of padding sizes plus 4 bytes
# per removed header.
size = sum(padding.length for padding in paddings)
padding = Padding()
padding.length = size + 4 * (len(paddings) - 1)
blocks.append(padding)
class StreamInfo(MetadataBlock, mutagen.StreamInfo):
"""FLAC stream information.
This contains information about the audio data in the FLAC file.
Unlike most stream information objects in Mutagen, changes to this
one will rewritten to the file when it is saved. Unless you are
actually changing the audio stream itself, don't change any
attributes of this block.
Attributes:
* min_blocksize -- minimum audio block size
* max_blocksize -- maximum audio block size
* sample_rate -- audio sample rate in Hz
* channels -- audio channels (1 for mono, 2 for stereo)
* bits_per_sample -- bits per sample
* total_samples -- total samples in file
* length -- audio length in seconds
"""
code = 0
def __eq__(self, other):
try:
return (self.min_blocksize == other.min_blocksize and
self.max_blocksize == other.max_blocksize and
self.sample_rate == other.sample_rate and
self.channels == other.channels and
self.bits_per_sample == other.bits_per_sample and
self.total_samples == other.total_samples)
except:
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.min_blocksize = int(to_int_be(data.read(2)))
self.max_blocksize = int(to_int_be(data.read(2)))
self.min_framesize = int(to_int_be(data.read(3)))
self.max_framesize = int(to_int_be(data.read(3)))
# first 16 bits of sample rate
sample_first = to_int_be(data.read(2))
# last 4 bits of sample rate, 3 of channels, first 1 of bits/sample
sample_channels_bps = to_int_be(data.read(1))
# last 4 of bits/sample, 36 of total samples
bps_total = to_int_be(data.read(5))
sample_tail = sample_channels_bps >> 4
self.sample_rate = int((sample_first << 4) + sample_tail)
if not self.sample_rate:
raise error("A sample rate value of 0 is invalid")
self.channels = int(((sample_channels_bps >> 1) & 7) + 1)
bps_tail = bps_total >> 36
bps_head = (sample_channels_bps & 1) << 4
self.bits_per_sample = int(bps_head + bps_tail + 1)
self.total_samples = bps_total & 0xFFFFFFFFF
self.length = self.total_samples / float(self.sample_rate)
self.md5_signature = to_int_be(data.read(16))
def write(self):
f = cBytesIO()
f.write(struct.pack(">I", self.min_blocksize)[-2:])
f.write(struct.pack(">I", self.max_blocksize)[-2:])
f.write(struct.pack(">I", self.min_framesize)[-3:])
f.write(struct.pack(">I", self.max_framesize)[-3:])
# first 16 bits of sample rate
f.write(struct.pack(">I", self.sample_rate >> 4)[-2:])
# 4 bits sample, 3 channel, 1 bps
byte = (self.sample_rate & 0xF) << 4
byte += ((self.channels - 1) & 7) << 1
byte += ((self.bits_per_sample - 1) >> 4) & 1
f.write(chr_(byte))
# 4 bits of bps, 4 of sample count
byte = ((self.bits_per_sample - 1) & 0xF) << 4
byte += (self.total_samples >> 32) & 0xF
f.write(chr_(byte))
# last 32 of sample count
f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF))
# MD5 signature
sig = self.md5_signature
f.write(struct.pack(
">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF,
(sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF))
return f.getvalue()
def pprint(self):
return "FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate)
class SeekPoint(tuple):
"""A single seek point in a FLAC file.
Placeholder seek points have first_sample of 0xFFFFFFFFFFFFFFFFL,
and byte_offset and num_samples undefined. Seek points must be
sorted in ascending order by first_sample number. Seek points must
be unique by first_sample number, except for placeholder
points. Placeholder points must occur last in the table and there
may be any number of them.
Attributes:
* first_sample -- sample number of first sample in the target frame
* byte_offset -- offset from first frame to target frame
* num_samples -- number of samples in target frame
"""
def __new__(cls, first_sample, byte_offset, num_samples):
return super(cls, SeekPoint).__new__(
cls, (first_sample, byte_offset, num_samples))
first_sample = property(lambda self: self[0])
byte_offset = property(lambda self: self[1])
num_samples = property(lambda self: self[2])
class SeekTable(MetadataBlock):
"""Read and write FLAC seek tables.
Attributes:
* seekpoints -- list of SeekPoint objects
"""
__SEEKPOINT_FORMAT = '>QQH'
__SEEKPOINT_SIZE = struct.calcsize(__SEEKPOINT_FORMAT)
code = 3
def __init__(self, data):
self.seekpoints = []
super(SeekTable, self).__init__(data)
def __eq__(self, other):
try:
return (self.seekpoints == other.seekpoints)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.seekpoints = []
sp = data.tryread(self.__SEEKPOINT_SIZE)
while len(sp) == self.__SEEKPOINT_SIZE:
self.seekpoints.append(SeekPoint(
*struct.unpack(self.__SEEKPOINT_FORMAT, sp)))
sp = data.tryread(self.__SEEKPOINT_SIZE)
def write(self):
f = cBytesIO()
for seekpoint in self.seekpoints:
packed = struct.pack(
self.__SEEKPOINT_FORMAT,
seekpoint.first_sample, seekpoint.byte_offset,
seekpoint.num_samples)
f.write(packed)
return f.getvalue()
def __repr__(self):
return "<%s seekpoints=%r>" % (type(self).__name__, self.seekpoints)
class VCFLACDict(VCommentDict):
"""Read and write FLAC Vorbis comments.
FLACs don't use the framing bit at the end of the comment block.
So this extends VCommentDict to not use the framing bit.
"""
code = 4
_distrust_size = True
def load(self, data, errors='replace', framing=False):
super(VCFLACDict, self).load(data, errors=errors, framing=framing)
def write(self, framing=False):
return super(VCFLACDict, self).write(framing=framing)
class CueSheetTrackIndex(tuple):
"""Index for a track in a cuesheet.
For CD-DA, an index_number of 0 corresponds to the track
pre-gap. The first index in a track must have a number of 0 or 1,
and subsequently, index_numbers must increase by 1. Index_numbers
must be unique within a track. And index_offset must be evenly
divisible by 588 samples.
Attributes:
* index_number -- index point number
* index_offset -- offset in samples from track start
"""
def __new__(cls, index_number, index_offset):
return super(cls, CueSheetTrackIndex).__new__(
cls, (index_number, index_offset))
index_number = property(lambda self: self[0])
index_offset = property(lambda self: self[1])
class CueSheetTrack(object):
"""A track in a cuesheet.
For CD-DA, track_numbers must be 1-99, or 170 for the
lead-out. Track_numbers must be unique within a cue sheet. There
must be atleast one index in every track except the lead-out track
which must have none.
Attributes:
* track_number -- track number
* start_offset -- track offset in samples from start of FLAC stream
* isrc -- ISRC code
* type -- 0 for audio, 1 for digital data
* pre_emphasis -- true if the track is recorded with pre-emphasis
* indexes -- list of CueSheetTrackIndex objects
"""
def __init__(self, track_number, start_offset, isrc='', type_=0,
pre_emphasis=False):
self.track_number = track_number
self.start_offset = start_offset
self.isrc = isrc
self.type = type_
self.pre_emphasis = pre_emphasis
self.indexes = []
def __eq__(self, other):
try:
return (self.track_number == other.track_number and
self.start_offset == other.start_offset and
self.isrc == other.isrc and
self.type == other.type and
self.pre_emphasis == other.pre_emphasis and
self.indexes == other.indexes)
except (AttributeError, TypeError):
return False
__hash__ = object.__hash__
def __repr__(self):
return (("<%s number=%r, offset=%d, isrc=%r, type=%r, "
"pre_emphasis=%r, indexes=%r)>") %
(type(self).__name__, self.track_number, self.start_offset,
self.isrc, self.type, self.pre_emphasis, self.indexes))
class CueSheet(MetadataBlock):
"""Read and write FLAC embedded cue sheets.
Number of tracks should be from 1 to 100. There should always be
exactly one lead-out track and that track must be the last track
in the cue sheet.
Attributes:
* media_catalog_number -- media catalog number in ASCII
* lead_in_samples -- number of lead-in samples
* compact_disc -- true if the cuesheet corresponds to a compact disc
* tracks -- list of CueSheetTrack objects
* lead_out -- lead-out as CueSheetTrack or None if lead-out was not found
"""
__CUESHEET_FORMAT = '>128sQB258xB'
__CUESHEET_SIZE = struct.calcsize(__CUESHEET_FORMAT)
__CUESHEET_TRACK_FORMAT = '>QB12sB13xB'
__CUESHEET_TRACK_SIZE = struct.calcsize(__CUESHEET_TRACK_FORMAT)
__CUESHEET_TRACKINDEX_FORMAT = '>QB3x'
__CUESHEET_TRACKINDEX_SIZE = struct.calcsize(__CUESHEET_TRACKINDEX_FORMAT)
code = 5
media_catalog_number = b''
lead_in_samples = 88200
compact_disc = True
def __init__(self, data):
self.tracks = []
super(CueSheet, self).__init__(data)
def __eq__(self, other):
try:
return (self.media_catalog_number == other.media_catalog_number and
self.lead_in_samples == other.lead_in_samples and
self.compact_disc == other.compact_disc and
self.tracks == other.tracks)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
header = data.read(self.__CUESHEET_SIZE)
media_catalog_number, lead_in_samples, flags, num_tracks = \
struct.unpack(self.__CUESHEET_FORMAT, header)
self.media_catalog_number = media_catalog_number.rstrip(b'\0')
self.lead_in_samples = lead_in_samples
self.compact_disc = bool(flags & 0x80)
self.tracks = []
for i in range(num_tracks):
track = data.read(self.__CUESHEET_TRACK_SIZE)
start_offset, track_number, isrc_padded, flags, num_indexes = \
struct.unpack(self.__CUESHEET_TRACK_FORMAT, track)
isrc = isrc_padded.rstrip(b'\0')
type_ = (flags & 0x80) >> 7
pre_emphasis = bool(flags & 0x40)
val = CueSheetTrack(
track_number, start_offset, isrc, type_, pre_emphasis)
for j in range(num_indexes):
index = data.read(self.__CUESHEET_TRACKINDEX_SIZE)
index_offset, index_number = struct.unpack(
self.__CUESHEET_TRACKINDEX_FORMAT, index)
val.indexes.append(
CueSheetTrackIndex(index_number, index_offset))
self.tracks.append(val)
def write(self):
f = cBytesIO()
flags = 0
if self.compact_disc:
flags |= 0x80
packed = struct.pack(
self.__CUESHEET_FORMAT, self.media_catalog_number,
self.lead_in_samples, flags, len(self.tracks))
f.write(packed)
for track in self.tracks:
track_flags = 0
track_flags |= (track.type & 1) << 7
if track.pre_emphasis:
track_flags |= 0x40
track_packed = struct.pack(
self.__CUESHEET_TRACK_FORMAT, track.start_offset,
track.track_number, track.isrc, track_flags,
len(track.indexes))
f.write(track_packed)
for index in track.indexes:
index_packed = struct.pack(
self.__CUESHEET_TRACKINDEX_FORMAT,
index.index_offset, index.index_number)
f.write(index_packed)
return f.getvalue()
def __repr__(self):
return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, "
"tracks=%r>") %
(type(self).__name__, self.media_catalog_number,
self.lead_in_samples, self.compact_disc, self.tracks))
class Picture(MetadataBlock):
"""Read and write FLAC embed pictures.
Attributes:
* type -- picture type (same as types for ID3 APIC frames)
* mime -- MIME type of the picture
* desc -- picture's description
* width -- width in pixels
* height -- height in pixels
* depth -- color depth in bits-per-pixel
* colors -- number of colors for indexed palettes (like GIF),
0 for non-indexed
* data -- picture data
"""
code = 6
_distrust_size = True
def __init__(self, data=None):
self.type = 0
self.mime = u''
self.desc = u''
self.width = 0
self.height = 0
self.depth = 0
self.colors = 0
self.data = b''
super(Picture, self).__init__(data)
def __eq__(self, other):
try:
return (self.type == other.type and
self.mime == other.mime and
self.desc == other.desc and
self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.colors == other.colors and
self.data == other.data)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.type, length = struct.unpack('>2I', data.read(8))
self.mime = data.read(length).decode('UTF-8', 'replace')
length, = struct.unpack('>I', data.read(4))
self.desc = data.read(length).decode('UTF-8', 'replace')
(self.width, self.height, self.depth,
self.colors, length) = struct.unpack('>5I', data.read(20))
self.data = data.read(length)
def write(self):
f = cBytesIO()
mime = self.mime.encode('UTF-8')
f.write(struct.pack('>2I', self.type, len(mime)))
f.write(mime)
desc = self.desc.encode('UTF-8')
f.write(struct.pack('>I', len(desc)))
f.write(desc)
f.write(struct.pack('>5I', self.width, self.height, self.depth,
self.colors, len(self.data)))
f.write(self.data)
return f.getvalue()
def __repr__(self):
return "<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime,
len(self.data))
class Padding(MetadataBlock):
"""Empty padding space for metadata blocks.
To avoid rewriting the entire FLAC file when editing comments,
metadata is often padded. Padding should occur at the end, and no
more than one padding block should be in any FLAC file. Mutagen
handles this with MetadataBlock.group_padding.
"""
code = 1
def __init__(self, data=b""):
super(Padding, self).__init__(data)
def load(self, data):
self.length = len(data.read())
def write(self):
try:
return b"\x00" * self.length
# On some 64 bit platforms this won't generate a MemoryError
# or OverflowError since you might have enough RAM, but it
# still generates a ValueError. On other 64 bit platforms,
# this will still succeed for extremely large values.
# Those should never happen in the real world, and if they
# do, writeblocks will catch it.
except (OverflowError, ValueError, MemoryError):
raise error("cannot write %d bytes" % self.length)
def __eq__(self, other):
return isinstance(other, Padding) and self.length == other.length
__hash__ = MetadataBlock.__hash__
def __repr__(self):
return "<%s (%d bytes)>" % (type(self).__name__, self.length)
class FLAC(mutagen.FileType):
"""A FLAC audio file.
Attributes:
* info -- stream information (length, bitrate, sample rate)
* tags -- metadata tags, if any
* cuesheet -- CueSheet object, if any
* seektable -- SeekTable object, if any
* pictures -- list of embedded pictures
"""
_mimes = ["audio/x-flac", "application/x-flac"]
METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict,
CueSheet, Picture]
"""Known metadata block types, indexed by ID."""
@staticmethod
def score(filename, fileobj, header_data):
return (header_data.startswith(b"fLaC") +
endswith(filename.lower(), ".flac") * 3)
def __read_metadata_block(self, fileobj):
byte = ord(fileobj.read(1))
size = to_int_be(fileobj.read(3))
code = byte & 0x7F
last_block = bool(byte & 0x80)
try:
block_type = self.METADATA_BLOCKS[code] or MetadataBlock
except IndexError:
block_type = MetadataBlock
if block_type._distrust_size:
# Some jackass is writing broken Metadata block length
# for Vorbis comment blocks, and the FLAC reference
# implementaton can parse them (mostly by accident),
# so we have to too. Instead of parsing the size
# given, parse an actual Vorbis comment, leaving
# fileobj in the right position.
# http://code.google.com/p/mutagen/issues/detail?id=52
# ..same for the Picture block:
# http://code.google.com/p/mutagen/issues/detail?id=106
block = block_type(fileobj)
else:
data = fileobj.read(size)
block = block_type(data)
block.code = code
if block.code == VCFLACDict.code:
if self.tags is None:
self.tags = block
else:
raise FLACVorbisError("> 1 Vorbis comment block found")
elif block.code == CueSheet.code:
if self.cuesheet is None:
self.cuesheet = block
else:
raise error("> 1 CueSheet block found")
elif block.code == SeekTable.code:
if self.seektable is None:
self.seektable = block
else:
raise error("> 1 SeekTable block found")
self.metadata_blocks.append(block)
return not last_block
def add_tags(self):
"""Add a Vorbis comment block to the file."""
if self.tags is None:
self.tags = VCFLACDict()
self.metadata_blocks.append(self.tags)
else:
raise FLACVorbisError("a Vorbis comment already exists")
add_vorbiscomment = add_tags
def delete(self, filename=None):
"""Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
for s in list(self.metadata_blocks):
if isinstance(s, VCFLACDict):
self.metadata_blocks.remove(s)
self.tags = None
self.save()
break
vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.")
def load(self, filename):
"""Load file information from a filename."""
self.metadata_blocks = []
self.tags = None
self.cuesheet = None
self.seektable = None
self.filename = filename
fileobj = StrictFileObject(open(filename, "rb"))
try:
self.__check_header(fileobj)
while self.__read_metadata_block(fileobj):
pass
finally:
fileobj.close()
try:
self.metadata_blocks[0].length
except (AttributeError, IndexError):
raise FLACNoHeaderError("Stream info block not found")
@property
def info(self):
return self.metadata_blocks[0]
def add_picture(self, picture):
"""Add a new picture to the file."""
self.metadata_blocks.append(picture)
def clear_pictures(self):
"""Delete all pictures from the file."""
blocks = [b for b in self.metadata_blocks if b.code != Picture.code]
self.metadata_blocks = blocks
@property
def pictures(self):
"""List of embedded pictures"""
return [b for b in self.metadata_blocks if b.code == Picture.code]
def save(self, filename=None, deleteid3=False):
"""Save metadata blocks to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
f = open(filename, 'rb+')
try:
# Ensure we've got padding at the end, and only at the end.
# If adding makes it too large, we'll scale it down later.
self.metadata_blocks.append(Padding(b'\x00' * 1020))
MetadataBlock.group_padding(self.metadata_blocks)
header = self.__check_header(f)
# "fLaC" and maybe ID3
available = self.__find_audio_offset(f) - header
data = MetadataBlock.writeblocks(self.metadata_blocks)
# Delete ID3v2
if deleteid3 and header > 4:
available += header - 4
header = 4
if len(data) > available:
# If we have too much data, see if we can reduce padding.
padding = self.metadata_blocks[-1]
newlength = padding.length - (len(data) - available)
if newlength > 0:
padding.length = newlength
data = MetadataBlock.writeblocks(self.metadata_blocks)
assert len(data) == available
elif len(data) < available:
# If we have too little data, increase padding.
self.metadata_blocks[-1].length += (available - len(data))
data = MetadataBlock.writeblocks(self.metadata_blocks)
assert len(data) == available
if len(data) != available:
# We couldn't reduce the padding enough.
diff = (len(data) - available)
insert_bytes(f, diff, header)
f.seek(header - 4)
f.write(b"fLaC" + data)
# Delete ID3v1
if deleteid3:
try:
f.seek(-128, 2)
except IOError:
pass
else:
if f.read(3) == b"TAG":
f.seek(-128, 2)
f.truncate()
finally:
f.close()
def __find_audio_offset(self, fileobj):
byte = 0x00
while not (byte & 0x80):
byte = ord(fileobj.read(1))
size = to_int_be(fileobj.read(3))
try:
block_type = self.METADATA_BLOCKS[byte & 0x7F]
except IndexError:
block_type = None
if block_type and block_type._distrust_size:
# See comments in read_metadata_block; the size can't
# be trusted for Vorbis comment blocks and Picture block
block_type(fileobj)
else:
fileobj.read(size)
return fileobj.tell()
def __check_header(self, fileobj):
size = 4
header = fileobj.read(4)
if header != b"fLaC":
size = None
if header[:3] == b"ID3":
size = 14 + BitPaddedInt(fileobj.read(6)[2:])
fileobj.seek(size - 4)
if fileobj.read(4) != b"fLaC":
size = None
if size is None:
raise FLACNoHeaderError(
"%r is not a valid FLAC file" % fileobj.name)
return size
Open = FLAC
def delete(filename):
"""Remove tags from a file."""
FLAC(filename).delete()

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,543 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import struct
from struct import unpack, pack
from warnings import warn
from .._compat import text_type, chr_, PY3, swap_to_string, string_types
from .._util import total_ordering, decode_terminated, enum
from ._util import ID3JunkFrameError, ID3Warning, BitPaddedInt
class Spec(object):
def __init__(self, name):
self.name = name
def __hash__(self):
raise TypeError("Spec objects are unhashable")
def _validate23(self, frame, value, **kwargs):
"""Return a possibly modified value which, if written,
results in valid id3v2.3 data.
"""
return value
def read(self, frame, value):
raise NotImplementedError
def write(self, frame, value):
raise NotImplementedError
def validate(self, frame, value):
"""Returns the validated data or raises ValueError/TypeError"""
raise NotImplementedError
class ByteSpec(Spec):
def read(self, frame, data):
return bytearray(data)[0], data[1:]
def write(self, frame, value):
return chr_(value)
def validate(self, frame, value):
if value is not None:
chr_(value)
return value
class IntegerSpec(Spec):
def read(self, frame, data):
return int(BitPaddedInt(data, bits=8)), b''
def write(self, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=-1)
def validate(self, frame, value):
return value
class SizedIntegerSpec(Spec):
def __init__(self, name, size):
self.name, self.__sz = name, size
def read(self, frame, data):
return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:]
def write(self, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=self.__sz)
def validate(self, frame, value):
return value
@enum
class Encoding(object):
LATIN1 = 0
UTF16 = 1
UTF16BE = 2
UTF8 = 3
class EncodingSpec(ByteSpec):
def read(self, frame, data):
enc, data = super(EncodingSpec, self).read(frame, data)
if enc < 16:
return enc, data
else:
return 0, chr_(enc) + data
def validate(self, frame, value):
if value is None:
return None
if 0 <= value <= 3:
return value
raise ValueError('Invalid Encoding: %r' % value)
def _validate23(self, frame, value, **kwargs):
# only 0, 1 are valid in v2.3, default to utf-16
return min(1, value)
class StringSpec(Spec):
"""A fixed size ASCII only payload."""
def __init__(self, name, length):
super(StringSpec, self).__init__(name)
self.len = length
def read(s, frame, data):
chunk = data[:s.len]
try:
ascii = chunk.decode("ascii")
except UnicodeDecodeError:
raise ID3JunkFrameError("not ascii")
else:
if PY3:
chunk = ascii
return chunk, data[s.len:]
def write(s, frame, value):
if value is None:
return b'\x00' * s.len
else:
if PY3:
value = value.encode("ascii")
return (bytes(value) + b'\x00' * s.len)[:s.len]
def validate(s, frame, value):
if value is None:
return None
if PY3:
if not isinstance(value, str):
raise TypeError("%s has to be str" % s.name)
value.encode("ascii")
else:
if not isinstance(value, bytes):
value = value.encode("ascii")
if len(value) == s.len:
return value
raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value))
class BinaryDataSpec(Spec):
def read(self, frame, data):
return data, b''
def write(self, frame, value):
if value is None:
return b""
if isinstance(value, bytes):
return value
value = text_type(value).encode("ascii")
return value
def validate(self, frame, value):
if value is None:
return None
if isinstance(value, bytes):
return value
elif PY3:
raise TypeError("%s has to be bytes" % self.name)
value = text_type(value).encode("ascii")
return value
class EncodedTextSpec(Spec):
# Okay, seriously. This is private and defined explicitly and
# completely by the ID3 specification. You can't just add
# encodings here however you want.
_encodings = (
('latin1', b'\x00'),
('utf16', b'\x00\x00'),
('utf_16_be', b'\x00\x00'),
('utf8', b'\x00')
)
def read(self, frame, data):
enc, term = self._encodings[frame.encoding]
try:
# allow missing termination
return decode_terminated(data, enc, strict=False)
except ValueError:
# utf-16 termination with missing BOM, or single NULL
if not data[:len(term)].strip(b"\x00"):
return u"", data[len(term):]
# utf-16 data with single NULL, see issue 169
try:
return decode_terminated(data + b"\x00", enc)
except ValueError:
raise ID3JunkFrameError
def write(self, frame, value):
enc, term = self._encodings[frame.encoding]
return value.encode(enc) + term
def validate(self, frame, value):
return text_type(value)
class MultiSpec(Spec):
def __init__(self, name, *specs, **kw):
super(MultiSpec, self).__init__(name)
self.specs = specs
self.sep = kw.get('sep')
def read(self, frame, data):
values = []
while data:
record = []
for spec in self.specs:
value, data = spec.read(frame, data)
record.append(value)
if len(self.specs) != 1:
values.append(record)
else:
values.append(record[0])
return values, data
def write(self, frame, value):
data = []
if len(self.specs) == 1:
for v in value:
data.append(self.specs[0].write(frame, v))
else:
for record in value:
for v, s in zip(record, self.specs):
data.append(s.write(frame, v))
return b''.join(data)
def validate(self, frame, value):
if value is None:
return []
if self.sep and isinstance(value, string_types):
value = value.split(self.sep)
if isinstance(value, list):
if len(self.specs) == 1:
return [self.specs[0].validate(frame, v) for v in value]
else:
return [
[s.validate(frame, v) for (v, s) in zip(val, self.specs)]
for val in value]
raise ValueError('Invalid MultiSpec data: %r' % value)
def _validate23(self, frame, value, **kwargs):
if len(self.specs) != 1:
return [[s._validate23(frame, v, **kwargs)
for (v, s) in zip(val, self.specs)]
for val in value]
spec = self.specs[0]
# Merge single text spec multispecs only.
# (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame)
if not isinstance(spec, EncodedTextSpec) or \
isinstance(spec, TimeStampSpec):
return value
value = [spec._validate23(frame, v, **kwargs) for v in value]
if kwargs.get("sep") is not None:
return [spec.validate(frame, kwargs["sep"].join(value))]
return value
class EncodedNumericTextSpec(EncodedTextSpec):
pass
class EncodedNumericPartTextSpec(EncodedTextSpec):
pass
class Latin1TextSpec(EncodedTextSpec):
def read(self, frame, data):
if b'\x00' in data:
data, ret = data.split(b'\x00', 1)
else:
ret = b''
return data.decode('latin1'), ret
def write(self, data, value):
return value.encode('latin1') + b'\x00'
def validate(self, frame, value):
return text_type(value)
@swap_to_string
@total_ordering
class ID3TimeStamp(object):
"""A time stamp in ID3v2 format.
This is a restricted form of the ISO 8601 standard; time stamps
take the form of:
YYYY-MM-DD HH:MM:SS
Or some partial form (YYYY-MM-DD HH, YYYY, etc.).
The 'text' attribute contains the raw text data of the time stamp.
"""
import re
def __init__(self, text):
if isinstance(text, ID3TimeStamp):
text = text.text
elif not isinstance(text, text_type):
if PY3:
raise TypeError("not a str")
text = text.decode("utf-8")
self.text = text
__formats = ['%04d'] + ['%02d'] * 5
__seps = ['-', '-', ' ', ':', ':', 'x']
def get_text(self):
parts = [self.year, self.month, self.day,
self.hour, self.minute, self.second]
pieces = []
for i, part in enumerate(parts):
if part is None:
break
pieces.append(self.__formats[i] % part + self.__seps[i])
return u''.join(pieces)[:-1]
def set_text(self, text, splitre=re.compile('[-T:/.]|\s+')):
year, month, day, hour, minute, second = \
splitre.split(text + ':::::')[:6]
for a in 'year month day hour minute second'.split():
try:
v = int(locals()[a])
except ValueError:
v = None
setattr(self, a, v)
text = property(get_text, set_text, doc="ID3v2.4 date and time.")
def __str__(self):
return self.text
def __bytes__(self):
return self.text.encode("utf-8")
def __repr__(self):
return repr(self.text)
def __eq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.text < other.text
__hash__ = object.__hash__
def encode(self, *args):
return self.text.encode(*args)
class TimeStampSpec(EncodedTextSpec):
def read(self, frame, data):
value, data = super(TimeStampSpec, self).read(frame, data)
return self.validate(frame, value), data
def write(self, frame, data):
return super(TimeStampSpec, self).write(frame,
data.text.replace(' ', 'T'))
def validate(self, frame, value):
try:
return ID3TimeStamp(value)
except TypeError:
raise ValueError("Invalid ID3TimeStamp: %r" % value)
class ChannelSpec(ByteSpec):
(OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE,
BACKCENTRE, SUBWOOFER) = range(9)
class VolumeAdjustmentSpec(Spec):
def read(self, frame, data):
value, = unpack('>h', data[0:2])
return value / 512.0, data[2:]
def write(self, frame, value):
number = int(round(value * 512))
# pack only fails in 2.7, do it manually in 2.6
if not -32768 <= number <= 32767:
raise struct.error
return pack('>h', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(frame, value)
except struct.error:
raise ValueError("out of range")
return value
class VolumePeakSpec(Spec):
def read(self, frame, data):
# http://bugs.xmms.org/attachment.cgi?id=113&action=view
peak = 0
data_array = bytearray(data)
bits = data_array[0]
vol_bytes = min(4, (bits + 7) >> 3)
# not enough frame data
if vol_bytes + 1 > len(data):
raise ID3JunkFrameError
shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8
for i in range(1, vol_bytes + 1):
peak *= 256
peak += data_array[i]
peak *= 2 ** shift
return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:]
def write(self, frame, value):
number = int(round(value * 32768))
# pack only fails in 2.7, do it manually in 2.6
if not 0 <= number <= 65535:
raise struct.error
# always write as 16 bits for sanity.
return b"\x10" + pack('>H', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(frame, value)
except struct.error:
raise ValueError("out of range")
return value
class SynchronizedTextSpec(EncodedTextSpec):
def read(self, frame, data):
texts = []
encoding, term = self._encodings[frame.encoding]
while data:
try:
value, data = decode_terminated(data, encoding)
except ValueError:
raise ID3JunkFrameError
if len(data) < 4:
raise ID3JunkFrameError
time, = struct.unpack(">I", data[:4])
texts.append((value, time))
data = data[4:]
return texts, b""
def write(self, frame, value):
data = []
encoding, term = self._encodings[frame.encoding]
for text, time in value:
text = text.encode(encoding) + term
data.append(text + struct.pack(">I", time))
return b"".join(data)
def validate(self, frame, value):
return value
class KeyEventSpec(Spec):
def read(self, frame, data):
events = []
while len(data) >= 5:
events.append(struct.unpack(">bI", data[:5]))
data = data[5:]
return events, data
def write(self, frame, value):
return b"".join(struct.pack(">bI", *event) for event in value)
def validate(self, frame, value):
return value
class VolumeAdjustmentsSpec(Spec):
# Not to be confused with VolumeAdjustmentSpec.
def read(self, frame, data):
adjustments = {}
while len(data) >= 4:
freq, adj = struct.unpack(">Hh", data[:4])
data = data[4:]
freq /= 2.0
adj /= 512.0
adjustments[freq] = adj
adjustments = sorted(adjustments.items())
return adjustments, data
def write(self, frame, value):
value.sort()
return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512))
for (freq, adj) in value)
def validate(self, frame, value):
return value
class ASPIIndexSpec(Spec):
def read(self, frame, data):
if frame.b == 16:
format = "H"
size = 2
elif frame.b == 8:
format = "B"
size = 1
else:
warn("invalid bit count in ASPI (%d)" % frame.b, ID3Warning)
return [], data
indexes = data[:frame.N * size]
data = data[frame.N * size:]
return list(struct.unpack(">" + format * frame.N, indexes)), data
def write(self, frame, values):
if frame.b == 16:
format = "H"
elif frame.b == 8:
format = "B"
else:
raise ValueError("frame.b must be 8 or 16")
return struct.pack(">" + format * frame.N, *values)
def validate(self, frame, values):
return values

View file

@ -1,165 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2013 Christoph Reiter
# 2014 Ben Ockmore
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
from .._compat import long_, integer_types
from .._util import MutagenError
class error(MutagenError):
pass
class ID3NoHeaderError(error, ValueError):
pass
class ID3BadUnsynchData(error, ValueError):
pass
class ID3BadCompressedData(error, ValueError):
pass
class ID3TagError(error, ValueError):
pass
class ID3UnsupportedVersionError(error, NotImplementedError):
pass
class ID3EncryptionUnsupportedError(error, NotImplementedError):
pass
class ID3JunkFrameError(error, ValueError):
pass
class ID3Warning(error, UserWarning):
pass
class unsynch(object):
@staticmethod
def decode(value):
fragments = bytearray(value).split(b'\xff')
if len(fragments) > 1 and not fragments[-1]:
raise ValueError('string ended unsafe')
for f in fragments[1:]:
if (not f) or (f[0] >= 0xE0):
raise ValueError('invalid sync-safe string')
if f[0] == 0x00:
del f[0]
return bytes(bytearray(b'\xff').join(fragments))
@staticmethod
def encode(value):
fragments = bytearray(value).split(b'\xff')
for f in fragments[1:]:
if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00):
f.insert(0, 0x00)
return bytes(bytearray(b'\xff').join(fragments))
class _BitPaddedMixin(object):
def as_str(self, width=4, minwidth=4):
return self.to_str(self, self.bits, self.bigendian, width, minwidth)
@staticmethod
def to_str(value, bits=7, bigendian=True, width=4, minwidth=4):
mask = (1 << bits) - 1
if width != -1:
index = 0
bytes_ = bytearray(width)
try:
while value:
bytes_[index] = value & mask
value >>= bits
index += 1
except IndexError:
raise ValueError('Value too wide (>%d bytes)' % width)
else:
# PCNT and POPM use growing integers
# of at least 4 bytes (=minwidth) as counters.
bytes_ = bytearray()
append = bytes_.append
while value:
append(value & mask)
value >>= bits
bytes_ = bytes_.ljust(minwidth, b"\x00")
if bigendian:
bytes_.reverse()
return bytes(bytes_)
@staticmethod
def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero"""
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True
class BitPaddedInt(int, _BitPaddedMixin):
def __new__(cls, value, bits=7, bigendian=True):
mask = (1 << (bits)) - 1
numeric_value = 0
shift = 0
if isinstance(value, integer_types):
while value:
numeric_value += (value & mask) << shift
value >>= 8
shift += bits
elif isinstance(value, bytes):
if bigendian:
value = reversed(value)
for byte in bytearray(value):
numeric_value += (byte & mask) << shift
shift += bits
else:
raise TypeError
if isinstance(numeric_value, int):
self = int.__new__(BitPaddedInt, numeric_value)
else:
self = long_.__new__(BitPaddedLong, numeric_value)
self.bits = bits
self.bigendian = bigendian
return self
class BitPaddedLong(long_, _BitPaddedMixin):
pass

View file

@ -1,545 +0,0 @@
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import sys
if sys.version_info[0] != 2:
raise ImportError("No longer available with Python 3, use mutagen.mp4")
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's M4A (aka MP4, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
http://developer.apple.com/documentation/QuickTime/QTFF/,
http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt,
and http://wiki.multimedia.cx/index.php?title=Apple_QuickTime were all
consulted.
This module does not support 64 bit atom sizes, and so will not
work on metadata over 4GB.
"""
import struct
import sys
from cStringIO import StringIO
from ._compat import reraise
from mutagen import FileType, Metadata, StreamInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, delete_bytes, DictProxy, \
MutagenError
class error(IOError, MutagenError):
pass
class M4AMetadataError(error):
pass
class M4AStreamInfoError(error):
pass
class M4AMetadataValueError(ValueError, M4AMetadataError):
pass
import warnings
warnings.warn(
"mutagen.m4a is deprecated; use mutagen.mp4 instead.", DeprecationWarning)
# This is not an exhaustive list of container atoms, but just the
# ones this module needs to peek inside.
_CONTAINERS = ["moov", "udta", "trak", "mdia", "meta", "ilst",
"stbl", "minf", "stsd"]
_SKIP_SIZE = {"meta": 4}
__all__ = ['M4A', 'Open', 'delete', 'M4ACover']
class M4ACover(str):
"""A cover artwork.
Attributes:
imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = 0x0D
FORMAT_PNG = 0x0E
def __new__(cls, data, imageformat=None):
self = str.__new__(cls, data)
if imageformat is None:
imageformat = M4ACover.FORMAT_JPEG
self.imageformat = imageformat
try:
self.format
except AttributeError:
self.format = imageformat
return self
class Atom(object):
"""An individual atom.
Attributes:
children -- list child atoms (or None for non-container atoms)
length -- length of this atom, including length and name
name -- four byte name of the atom, as a str
offset -- location in the constructor-given fileobj of this atom
This structure should only be used internally by Mutagen.
"""
children = None
def __init__(self, fileobj):
self.offset = fileobj.tell()
self.length, self.name = struct.unpack(">I4s", fileobj.read(8))
if self.length == 1:
raise error("64 bit atom sizes are not supported")
elif self.length < 8:
return
if self.name in _CONTAINERS:
self.children = []
fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1)
while fileobj.tell() < self.offset + self.length:
self.children.append(Atom(fileobj))
else:
fileobj.seek(self.offset + self.length, 0)
@staticmethod
def render(name, data):
"""Render raw atom data."""
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data
def __getitem__(self, remaining):
"""Look up a child atom, potentially recursively.
e.g. atom['udta', 'meta'] => <Atom name='meta' ...>
"""
if not remaining:
return self
elif self.children is None:
raise KeyError("%r is not a container" % self.name)
for child in self.children:
if child.name == remaining[0]:
return child[remaining[1:]]
else:
raise KeyError("%r not found" % remaining[0])
def __repr__(self):
klass = self.__class__.__name__
if self.children is None:
return "<%s name=%r length=%r offset=%r>" % (
klass, self.name, self.length, self.offset)
else:
children = "\n".join([" " + line for child in self.children
for line in repr(child).splitlines()])
return "<%s name=%r length=%r offset=%r\n%s>" % (
klass, self.name, self.length, self.offset, children)
class Atoms(object):
"""Root atoms in a given file.
Attributes:
atoms -- a list of top-level atoms as Atom objects
This structure should only be used internally by Mutagen.
"""
def __init__(self, fileobj):
self.atoms = []
fileobj.seek(0, 2)
end = fileobj.tell()
fileobj.seek(0)
while fileobj.tell() < end:
self.atoms.append(Atom(fileobj))
def path(self, *names):
"""Look up and return the complete path of an atom.
For example, atoms.path('moov', 'udta', 'meta') will return a
list of three atoms, corresponding to the moov, udta, and meta
atoms.
"""
path = [self]
for name in names:
path.append(path[-1][name, ])
return path[1:]
def __getitem__(self, names):
"""Look up a child atom.
'names' may be a list of atoms (['moov', 'udta']) or a string
specifying the complete path ('moov.udta').
"""
if isinstance(names, basestring):
names = names.split(".")
for child in self.atoms:
if child.name == names[0]:
return child[names[1:]]
else:
raise KeyError("%s not found" % names[0])
def __repr__(self):
return "\n".join([repr(child) for child in self.atoms])
class M4ATags(DictProxy, Metadata):
"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
cpil -- boolean
trkn, disk -- tuple of 16 bit ints (current, total)
tmpo -- 16 bit int
covr -- list of M4ACover objects (which are tagged strs)
gnre -- not supported. Use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8.
M4A tag data cannot exist outside of the structure of an M4A file,
so this class should not be manually instantiated.
Unknown non-text tags are removed.
"""
def load(self, atoms, fileobj):
try:
ilst = atoms["moov.udta.meta.ilst"]
except KeyError as key:
raise M4AMetadataError(key)
for atom in ilst.children:
fileobj.seek(atom.offset + 8)
data = fileobj.read(atom.length - 8)
parse = self.__atoms.get(atom.name, (M4ATags.__parse_text,))[0]
parse(self, atom, data)
@staticmethod
def __key_sort(item1, item2):
(key1, v1) = item1
(key2, v2) = item2
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "tmpo", "\xa9too",
"----", "covr", "\xa9lyr"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (cmp(order.get(key1[:4], last), order.get(key2[:4], last)) or
cmp(len(v1), len(v2)) or cmp(v1, v2))
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = self.items()
items.sort(self.__key_sort)
for key, value in items:
render = self.__atoms.get(
key[:4], (None, M4ATags.__render_text))[1]
values.append(render(self, key, value))
data = Atom.render("ilst", "".join(values))
# Find the old atoms.
fileobj = open(filename, "rb+")
try:
atoms = Atoms(fileobj)
moov = atoms["moov"]
if moov != atoms.atoms[-1]:
# "Free" the old moov block. Something in the mdat
# block is not happy when its offset changes and it
# won't play back. So, rather than try to figure that
# out, just move the moov atom to the end of the file.
offset = self.__move_moov(fileobj, moov)
else:
offset = 0
try:
path = atoms.path("moov", "udta", "meta", "ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, offset)
else:
self.__save_existing(fileobj, atoms, path, data, offset)
finally:
fileobj.close()
def __move_moov(self, fileobj, moov):
fileobj.seek(moov.offset)
data = fileobj.read(moov.length)
fileobj.seek(moov.offset)
free = Atom.render("free", "\x00" * (moov.length - 8))
fileobj.write(free)
fileobj.seek(0, 2)
# Figure out how far we have to shift all our successive
# seek calls, relative to what the atoms say.
old_end = fileobj.tell()
fileobj.write(data)
return old_end - moov.offset
def __save_new(self, fileobj, atoms, ilst, offset):
hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9)
meta = Atom.render("meta", "\x00\x00\x00\x00" + hdlr + ilst)
moov, udta = atoms.path("moov", "udta")
insert_bytes(fileobj, len(meta), udta.offset + offset + 8)
fileobj.seek(udta.offset + offset + 8)
fileobj.write(meta)
self.__update_parents(fileobj, [moov, udta], len(meta), offset)
def __save_existing(self, fileobj, atoms, path, data, offset):
# Replace the old ilst atom.
ilst = path.pop()
delta = len(data) - ilst.length
fileobj.seek(ilst.offset + offset)
if delta > 0:
insert_bytes(fileobj, delta, ilst.offset + offset)
elif delta < 0:
delete_bytes(fileobj, -delta, ilst.offset + offset)
fileobj.seek(ilst.offset + offset)
fileobj.write(data)
self.__update_parents(fileobj, path, delta, offset)
def __update_parents(self, fileobj, path, delta, offset):
# Update all parent atoms with the new size.
for atom in path:
fileobj.seek(atom.offset + offset)
size = cdata.uint_be(fileobj.read(4)) + delta
fileobj.seek(atom.offset + offset)
fileobj.write(cdata.to_uint_be(size))
def __render_data(self, key, flags, data):
data = struct.pack(">2I", flags, 0) + data
return Atom.render(key, Atom.render("data", data))
def __parse_freeform(self, atom, data):
try:
fileobj = StringIO(data)
mean_length = cdata.uint_be(fileobj.read(4))
# skip over 8 bytes of atom name, flags
mean = fileobj.read(mean_length - 4)[8:]
name_length = cdata.uint_be(fileobj.read(4))
name = fileobj.read(name_length - 4)[8:]
value_length = cdata.uint_be(fileobj.read(4))
# Name, flags, and reserved bytes
value = fileobj.read(value_length - 4)[12:]
except struct.error:
# Some ---- atoms have no data atom, I have no clue why
# they actually end up in the file.
pass
else:
self["%s:%s:%s" % (atom.name, mean, name)] = value
def __render_freeform(self, key, value):
dummy, mean, name = key.split(":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, "mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, "name", 0) + name
value = struct.pack(">I4s2I", len(value) + 16, "data", 0x1, 0) + value
final = mean + name + value
return Atom.render("----", final)
def __parse_pair(self, atom, data):
self[atom.name] = struct.unpack(">2H", data[18:22])
def __render_pair(self, key, value):
track, total = value
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data = struct.pack(">4H", 0, track, total, 0)
return self.__render_data(key, 0, data)
else:
raise M4AMetadataValueError("invalid numeric pair %r" % (value,))
def __render_pair_no_trailing(self, key, value):
track, total = value
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data = struct.pack(">3H", 0, track, total)
return self.__render_data(key, 0, data)
else:
raise M4AMetadataValueError("invalid numeric pair %r" % (value,))
def __parse_genre(self, atom, data):
# Translate to a freeform genre.
genre = cdata.short_be(data[16:18])
if "\xa9gen" not in self:
try:
self["\xa9gen"] = GENRES[genre - 1]
except IndexError:
pass
def __parse_tempo(self, atom, data):
self[atom.name] = cdata.short_be(data[16:18])
def __render_tempo(self, key, value):
if 0 <= value < 1 << 16:
return self.__render_data(key, 0x15, cdata.to_ushort_be(value))
else:
raise M4AMetadataValueError("invalid short integer %r" % value)
def __parse_compilation(self, atom, data):
try:
self[atom.name] = bool(ord(data[16:17]))
except TypeError:
self[atom.name] = False
def __render_compilation(self, key, value):
return self.__render_data(key, 0x15, chr(bool(value)))
def __parse_cover(self, atom, data):
length, name, imageformat = struct.unpack(">I4sI", data[:12])
if name != "data":
raise M4AMetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (M4ACover.FORMAT_JPEG, M4ACover.FORMAT_PNG):
imageformat = M4ACover.FORMAT_JPEG
self[atom.name] = M4ACover(data[16:length], imageformat)
def __render_cover(self, key, value):
try:
imageformat = value.imageformat
except AttributeError:
imageformat = M4ACover.FORMAT_JPEG
data = Atom.render("data", struct.pack(">2I", imageformat, 0) + value)
return Atom.render(key, data)
def __parse_text(self, atom, data):
flags = cdata.uint_be(data[8:12])
if flags == 1:
self[atom.name] = data[16:].decode('utf-8', 'replace')
def __render_text(self, key, value):
return self.__render_data(key, 0x1, value.encode('utf-8'))
def delete(self, filename):
self.clear()
self.save(filename)
__atoms = {
"----": (__parse_freeform, __render_freeform),
"trkn": (__parse_pair, __render_pair),
"disk": (__parse_pair, __render_pair_no_trailing),
"gnre": (__parse_genre, None),
"tmpo": (__parse_tempo, __render_tempo),
"cpil": (__parse_compilation, __render_compilation),
"covr": (__parse_cover, __render_cover),
}
def pprint(self):
values = []
for key, value in self.iteritems():
key = key.decode('latin1')
try:
values.append("%s=%s" % (key, value))
except UnicodeDecodeError:
values.append("%s=[%d bytes of data]" % (key, len(value)))
return "\n".join(values)
class M4AInfo(StreamInfo):
"""MPEG-4 stream information.
Attributes:
bitrate -- bitrate in bits per second, as an int
length -- file length in seconds, as a float
"""
bitrate = 0
def __init__(self, atoms, fileobj):
hdlr = atoms["moov.trak.mdia.hdlr"]
fileobj.seek(hdlr.offset)
if "soun" not in fileobj.read(hdlr.length):
raise M4AStreamInfoError("track has no audio data")
mdhd = atoms["moov.trak.mdia.mdhd"]
fileobj.seek(mdhd.offset)
data = fileobj.read(mdhd.length)
if ord(data[8]) == 0:
offset = 20
fmt = ">2I"
else:
offset = 28
fmt = ">IQ"
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
self.length = float(length) / unit
try:
atom = atoms["moov.trak.mdia.minf.stbl.stsd"]
fileobj.seek(atom.offset)
data = fileobj.read(atom.length)
self.bitrate = cdata.uint_be(data[-17:-13])
except (ValueError, KeyError):
# Bitrate values are optional.
pass
def pprint(self):
return "MPEG-4 audio, %.2f seconds, %d bps" % (
self.length, self.bitrate)
class M4A(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
"""
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
fileobj = open(filename, "rb")
try:
atoms = Atoms(fileobj)
try:
self.info = M4AInfo(atoms, fileobj)
except StandardError as err:
reraise(M4AStreamInfoError, err, sys.exc_info()[2])
try:
self.tags = M4ATags(atoms, fileobj)
except M4AMetadataError:
self.tags = None
except StandardError as err:
reraise(M4AMetadataError, err, sys.exc_info()[2])
finally:
fileobj.close()
def add_tags(self):
self.tags = M4ATags()
@staticmethod
def score(filename, fileobj, header):
return ("ftyp" in header) + ("mp4" in header)
Open = M4A
def delete(filename):
"""Remove tags from a file."""
M4A(filename).delete()

View file

@ -1,86 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Monkey's Audio streams with APEv2 tags.
Monkey's Audio is a very efficient lossless audio compressor developed
by Matt Ashland.
For more information, see http://www.monkeysaudio.com/.
"""
__all__ = ["MonkeysAudio", "Open", "delete"]
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen._util import cdata
class MonkeysAudioHeaderError(error):
pass
class MonkeysAudioInfo(StreamInfo):
"""Monkey's Audio stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* version -- Monkey's Audio stream version, as a float (eg: 3.99)
"""
def __init__(self, fileobj):
header = fileobj.read(76)
if len(header) != 76 or not header.startswith(b"MAC "):
raise MonkeysAudioHeaderError("not a Monkey's Audio file")
self.version = cdata.ushort_le(header[4:6])
if self.version >= 3980:
(blocks_per_frame, final_frame_blocks, total_frames,
self.bits_per_sample, self.channels,
self.sample_rate) = struct.unpack("<IIIHHI", header[56:76])
else:
compression_level = cdata.ushort_le(header[6:8])
self.channels, self.sample_rate = struct.unpack(
"<HI", header[10:16])
total_frames, final_frame_blocks = struct.unpack(
"<II", header[24:32])
if self.version >= 3950:
blocks_per_frame = 73728 * 4
elif self.version >= 3900 or (self.version >= 3800 and
compression_level == 4):
blocks_per_frame = 73728
else:
blocks_per_frame = 9216
self.version /= 1000.0
self.length = 0.0
if (self.sample_rate != 0) and (total_frames > 0):
total_blocks = ((total_frames - 1) * blocks_per_frame +
final_frame_blocks)
self.length = float(total_blocks) / self.sample_rate
def pprint(self):
return "Monkey's Audio %.2f, %.2f seconds, %d Hz" % (
self.version, self.length, self.sample_rate)
class MonkeysAudio(APEv2File):
_Info = MonkeysAudioInfo
_mimes = ["audio/ape", "audio/x-ape"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"MAC ") + endswith(filename.lower(), ".ape")
Open = MonkeysAudio

View file

@ -1,289 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""MPEG audio stream information and tags."""
import os
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen._util import MutagenError
from mutagen.id3 import ID3FileType, BitPaddedInt, delete
__all__ = ["MP3", "Open", "delete", "MP3"]
class error(RuntimeError, MutagenError):
pass
class HeaderNotFoundError(error, IOError):
pass
class InvalidMPEGHeader(error, IOError):
pass
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGInfo(StreamInfo):
"""MPEG audio stream information
Parse information about an MPEG audio file. This also reads the
Xing VBR header format.
This code was implemented based on the format documentation at
http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
Useful attributes:
* length -- audio length, in seconds
* bitrate -- audio bitrate, in bits per second
* sketchy -- if true, the file may not be valid MPEG audio
Useless attributes:
* version -- MPEG version (1, 2, 2.5)
* layer -- 1, 2, or 3
* mode -- One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3)
* protected -- whether or not the file is "protected"
* padding -- whether or not audio frames are padded
* sample_rate -- audio sample rate, in Hz
"""
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1, 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
__RATES = {
1: [44100, 48000, 32000],
2: [22050, 24000, 16000],
2.5: [11025, 12000, 8000]
}
sketchy = False
def __init__(self, fileobj, offset=None):
"""Parse MPEG stream information from a file-like object.
If an offset argument is given, it is used to start looking
for stream information and Xing headers; otherwise, ID3v2 tags
will be skipped automatically. A correct offset can make
loading files significantly faster.
"""
try:
size = os.path.getsize(fileobj.name)
except (IOError, OSError, AttributeError):
fileobj.seek(0, 2)
size = fileobj.tell()
# If we don't get an offset, try to skip an ID3v2 tag.
if offset is None:
fileobj.seek(0, 0)
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = '', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
offset = insize + 10
else:
offset = 0
# Try to find two valid headers (meaning, very likely MPEG data)
# at the given offset, 30% through the file, 60% through the file,
# and 90% through the file.
for i in [offset, 0.3 * size, 0.6 * size, 0.9 * size]:
try:
self.__try(fileobj, int(i), size - offset)
except error:
pass
else:
break
# If we can't find any two consecutive frames, try to find just
# one frame back at the original offset given.
else:
self.__try(fileobj, offset, size - offset, False)
self.sketchy = True
def __try(self, fileobj, offset, real_size, check_second=True):
# This is going to be one really long function; bear with it,
# because there's not really a sane point to cut it up.
fileobj.seek(offset, 0)
# We "know" we have an MPEG file if we find two frames that look like
# valid MPEG data. If we can't find them in 32k of reads, something
# is horribly wrong (the longest frame can only be about 4k). This
# is assuming the offset didn't lie.
data = fileobj.read(32768)
frame_1 = data.find(b"\xff")
while 0 <= frame_1 <= (len(data) - 4):
frame_data = struct.unpack(">I", data[frame_1:frame_1 + 4])[0]
if ((frame_data >> 16) & 0xE0) != 0xE0:
frame_1 = data.find(b"\xff", frame_1 + 2)
else:
version = (frame_data >> 19) & 0x3
layer = (frame_data >> 17) & 0x3
protection = (frame_data >> 16) & 0x1
bitrate = (frame_data >> 12) & 0xF
sample_rate = (frame_data >> 10) & 0x3
padding = (frame_data >> 9) & 0x1
# private = (frame_data >> 8) & 0x1
self.mode = (frame_data >> 6) & 0x3
# mode_extension = (frame_data >> 4) & 0x3
# copyright = (frame_data >> 3) & 0x1
# original = (frame_data >> 2) & 0x1
# emphasis = (frame_data >> 0) & 0x3
if (version == 1 or layer == 0 or sample_rate == 0x3 or
bitrate == 0 or bitrate == 0xF):
frame_1 = data.find(b"\xff", frame_1 + 2)
else:
break
else:
raise HeaderNotFoundError("can't sync to an MPEG frame")
# There is a serious problem here, which is that many flags
# in an MPEG header are backwards.
self.version = [2.5, None, 2, 1][version]
self.layer = 4 - layer
self.protected = not protection
self.padding = bool(padding)
self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate]
self.bitrate *= 1000
self.sample_rate = self.__RATES[self.version][sample_rate]
if self.layer == 1:
frame_length = (
(12 * self.bitrate // self.sample_rate) + padding) * 4
frame_size = 384
elif self.version >= 2 and self.layer == 3:
frame_length = (72 * self.bitrate // self.sample_rate) + padding
frame_size = 576
else:
frame_length = (144 * self.bitrate // self.sample_rate) + padding
frame_size = 1152
if check_second:
possible = int(frame_1 + frame_length)
if possible > len(data) + 4:
raise HeaderNotFoundError("can't sync to second MPEG frame")
try:
frame_data = struct.unpack(
">H", data[possible:possible + 2])[0]
except struct.error:
raise HeaderNotFoundError("can't sync to second MPEG frame")
if (frame_data & 0xFFE0) != 0xFFE0:
raise HeaderNotFoundError("can't sync to second MPEG frame")
self.length = 8 * real_size / float(self.bitrate)
# Try to find/parse the Xing header, which trumps the above length
# and bitrate calculation.
fileobj.seek(offset, 0)
data = fileobj.read(32768)
try:
xing = data[:-4].index(b"Xing")
except ValueError:
# Try to find/parse the VBRI header, which trumps the above length
# calculation.
try:
vbri = data[:-24].index(b"VBRI")
except ValueError:
pass
else:
# If a VBRI header was found, this is definitely MPEG audio.
self.sketchy = False
vbri_version = struct.unpack('>H', data[vbri + 4:vbri + 6])[0]
if vbri_version == 1:
frame_count = struct.unpack(
'>I', data[vbri + 14:vbri + 18])[0]
samples = float(frame_size * frame_count)
self.length = (samples / self.sample_rate) or self.length
else:
# If a Xing header was found, this is definitely MPEG audio.
self.sketchy = False
flags = struct.unpack('>I', data[xing + 4:xing + 8])[0]
if flags & 0x1:
frame_count = struct.unpack('>I', data[xing + 8:xing + 12])[0]
samples = float(frame_size * frame_count)
self.length = (samples / self.sample_rate) or self.length
if flags & 0x2:
bitrate_data = struct.unpack(
'>I', data[xing + 12:xing + 16])[0]
self.bitrate = int((bitrate_data * 8) // self.length)
def pprint(self):
s = "MPEG %s layer %d, %d bps, %s Hz, %.2f seconds" % (
self.version, self.layer, self.bitrate, self.sample_rate,
self.length)
if self.sketchy:
s += " (sketchy)"
return s
class MP3(ID3FileType):
"""An MPEG audio (usually MPEG-1 Layer 3) file.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`ID3 <mutagen.id3.ID3>`
"""
_Info = MPEGInfo
_mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"]
@property
def mime(self):
l = self.info.layer
return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime
@staticmethod
def score(filename, fileobj, header_data):
filename = filename.lower()
return (header_data.startswith(b"ID3") * 2 +
endswith(filename, b".mp3") +
endswith(filename, b".mp2") + endswith(filename, b".mpg") +
endswith(filename, b".mpeg"))
Open = MP3
class EasyMP3(MP3):
"""Like MP3, but uses EasyID3 for tags.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>`
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3

View file

@ -1,965 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from mutagen import FileType, Metadata, StreamInfo
from mutagen._constants import GENRES
from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError,
hashable, enum)
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO)
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(IOError, MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType']
@enum
class AtomDataType(object):
"""Enum for `dataformat` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
* imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.imageformat != other.imageformat:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
* dataformat -- format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.dataformat != other.dataformat:
return False
if self.version != other.version:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
class MP4Tags(DictProxy, Metadata):
r"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Others:
* 'tmpo' -- tempo/BPM, 16 bit int
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__(*args, **kwargs)
def load(self, atoms, fileobj):
try:
ilst = atoms[b"moov.udta.meta.ilst"]
except KeyError as key:
raise MP4MetadataError(key)
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
@staticmethod
def __key_sort(item):
(key, v) = item
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = [b"\xa9nam", b"\xa9ART", b"\xa9wrt", b"\xa9alb",
b"\xa9gen", b"gnre", b"trkn", b"disk",
b"\xa9day", b"cpil", b"pgap", b"pcst", b"tmpo",
b"\xa9too", b"----", b"covr", b"\xa9lyr"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(v)), repr(v))
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = sorted(self.items(), key=self.__key_sort)
for key, value in items:
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
else:
render_func = type(self).__render_text
try:
values.append(render_func(self, key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for atom_name, failed in iteritems(self._failed_atoms):
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if atom_name in self:
assert atom_name != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(atom_name), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
with open(filename, "rb+") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data)
else:
self.__save_existing(fileobj, atoms, path, data)
def __pad_ilst(self, data, length=None):
if length is None:
length = ((len(data) + 1023) & ~1023) - len(data)
return Atom.render(b"free", b"\x00" * length)
def __save_new(self, fileobj, atoms, ilst):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta = Atom.render(
b"meta", b"\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst))
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
# moov.udta not found -- create one
path = atoms.path(b"moov")
meta = Atom.render(b"udta", meta)
offset = path[-1].offset + 8
insert_bytes(fileobj, len(meta), offset)
fileobj.seek(offset)
fileobj.write(meta)
self.__update_parents(fileobj, path, len(meta))
self.__update_offsets(fileobj, atoms, len(meta), offset)
def __save_existing(self, fileobj, atoms, path, data):
# Replace the old ilst atom.
ilst = path.pop()
offset = ilst.offset
length = ilst.length
# Check for padding "free" atoms
meta = path[-1]
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
offset = prev.offset
length += prev.length
except IndexError:
pass
try:
next = meta.children[index + 1]
if next.name == b"free":
length += next.length
except IndexError:
pass
delta = len(data) - length
if delta > 0 or (delta < 0 and delta > -8):
data += self.__pad_ilst(data)
delta = len(data) - length
insert_bytes(fileobj, delta, offset)
elif delta < 0:
data += self.__pad_ilst(data, -delta - 8)
delta = 0
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_tempo(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0 or 21
if len(data) != 2:
raise MP4MetadataValueError("invalid tempo")
values.append(cdata.ushort_be(data))
key = _name2key(atom.name)
self.__add(key, values)
def __render_tempo(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0, AtomDataType.INTEGER, b"")
if (min(value) < 0) or (max(value) >= 2 ** 16):
raise MP4MetadataValueError(
"invalid 16 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"tmpo must be a list of 16 bit integers")
values = [cdata.to_ushort_be(v) for v in value]
return self.__render_data(key, 0, AtomDataType.INTEGER, values)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (atom.name, e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"tmpo": (__parse_tempo, __render_tempo),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
values = []
for key, value in iteritems(self):
if not isinstance(key, text_type):
key = key.decode("latin-1")
if key == "covr":
values.append("%s=%s" % (key, ", ".join(
["[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
values.append("%s=%s" %
(key, " / ".join(map(text_type, value))))
else:
values.append("%s=%s" % (key, value))
return "\n".join(values)
class MP4Info(StreamInfo):
"""MPEG-4 stream information.
Attributes:
* bitrate -- bitrate in bits per second, as an int
* length -- file length in seconds, as a float
* channels -- number of audio channels
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* codec (string):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
* codec_description (string):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in
the future, use for display purposes only.
"""
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_name = u""
def __init__(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4StreamInfoError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
:ivar info: :class:`MP4Info`
:ivar tags: :class:`MP4Tags`
"""
MP4Tags = MP4Tags
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
self.info = MP4Info(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
def delete(filename):
"""Remove tags from a file."""
MP4(filename).delete()

View file

@ -1,541 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
from mutagen._compat import cBytesIO, xrange
from mutagen.aac import ProgramConfigElement
from mutagen._util import BitReader, BitReaderError, cdata, text_type
from ._util import parse_full_atom
from ._atom import Atom, AtomError
class ASEntryError(Exception):
pass
class AudioSampleEntry(object):
"""Parses an AudioSampleEntry atom.
Private API.
Attrs:
channels (int): number of channels
sample_size (int): sample size in bits
sample_rate (int): sample rate in Hz
bitrate (int): bits per second (0 means unknown)
codec (string):
audio codec, either 'mp4a[.*][.*]' (rfc6381) or 'alac'
codec_description (string): descriptive codec name e.g. "AAC LC+SBR"
Can raise ASEntryError.
"""
channels = 0
sample_size = 0
sample_rate = 0
bitrate = 0
codec = None
codec_description = None
def __init__(self, atom, fileobj):
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("too short %r atom" % atom.name)
fileobj = cBytesIO(data)
r = BitReader(fileobj)
try:
# SampleEntry
r.skip(6 * 8) # reserved
r.skip(2 * 8) # data_ref_index
# AudioSampleEntry
r.skip(8 * 8) # reserved
self.channels = r.bits(16)
self.sample_size = r.bits(16)
r.skip(2 * 8) # pre_defined
r.skip(2 * 8) # reserved
self.sample_rate = r.bits(32) >> 16
except BitReaderError as e:
raise ASEntryError(e)
assert r.is_aligned()
try:
extra = Atom(fileobj)
except AtomError as e:
raise ASEntryError(e)
self.codec = atom.name.decode("latin-1")
self.codec_description = None
if atom.name == b"mp4a" and extra.name == b"esds":
self._parse_esds(extra, fileobj)
elif atom.name == b"alac" and extra.name == b"alac":
self._parse_alac(extra, fileobj)
elif atom.name == b"ac-3" and extra.name == b"dac3":
self._parse_dac3(extra, fileobj)
if self.codec_description is None:
self.codec_description = self.codec.upper()
def _parse_dac3(self, atom, fileobj):
# ETSI TS 102 366
assert atom.name == b"dac3"
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % atom.name)
fileobj = cBytesIO(data)
r = BitReader(fileobj)
# sample_rate in AudioSampleEntry covers values in
# fscod2 and not just fscod, so ignore fscod here.
try:
r.skip(2 + 5 + 3) # fscod, bsid, bsmod
acmod = r.bits(3)
lfeon = r.bits(1)
bit_rate_code = r.bits(5)
r.skip(5) # reserved
except BitReaderError as e:
raise ASEntryError(e)
self.channels = [2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon
try:
self.bitrate = [
32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000
except IndexError:
pass
def _parse_alac(self, atom, fileobj):
# https://alac.macosforge.org/trac/browser/trunk/
# ALACMagicCookieDescription.txt
assert atom.name == b"alac"
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % atom.name)
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = cBytesIO(data)
r = BitReader(fileobj)
try:
# for some files the AudioSampleEntry values default to 44100/2chan
# and the real info is in the alac cookie, so prefer it
r.skip(32) # frameLength
compatibleVersion = r.bits(8)
if compatibleVersion != 0:
return
self.sample_size = r.bits(8)
r.skip(8 + 8 + 8)
self.channels = r.bits(8)
r.skip(16 + 32)
self.bitrate = r.bits(32)
self.sample_rate = r.bits(32)
except BitReaderError as e:
raise ASEntryError(e)
def _parse_esds(self, esds, fileobj):
assert esds.name == b"esds"
ok, data = esds.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % esds.name)
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = cBytesIO(data)
r = BitReader(fileobj)
try:
tag = r.bits(8)
if tag != ES_Descriptor.TAG:
raise ASEntryError("unexpected descriptor: %d" % tag)
assert r.is_aligned()
except BitReaderError as e:
raise ASEntryError(e)
try:
decSpecificInfo = ES_Descriptor.parse(fileobj)
except DescriptorError as e:
raise ASEntryError(e)
dec_conf_desc = decSpecificInfo.decConfigDescr
self.bitrate = dec_conf_desc.avgBitrate
self.codec += dec_conf_desc.codec_param
self.codec_description = dec_conf_desc.codec_desc
decSpecificInfo = dec_conf_desc.decSpecificInfo
if decSpecificInfo is not None:
if decSpecificInfo.channels != 0:
self.channels = decSpecificInfo.channels
if decSpecificInfo.sample_rate != 0:
self.sample_rate = decSpecificInfo.sample_rate
class DescriptorError(Exception):
pass
class BaseDescriptor(object):
TAG = None
@classmethod
def _parse_desc_length_file(cls, fileobj):
"""May raise ValueError"""
value = 0
for i in xrange(4):
try:
b = cdata.uint8(fileobj.read(1))
except cdata.error as e:
raise ValueError(e)
value = (value << 7) | (b & 0x7f)
if not b >> 7:
break
else:
raise ValueError("invalid descriptor length")
return value
@classmethod
def parse(cls, fileobj):
"""Returns a parsed instance of the called type.
The file position is right after the descriptor after this returns.
Raises DescriptorError
"""
try:
length = cls._parse_desc_length_file(fileobj)
except ValueError as e:
raise DescriptorError(e)
pos = fileobj.tell()
instance = cls(fileobj, length)
left = length - (fileobj.tell() - pos)
if left < 0:
raise DescriptorError("descriptor parsing read too much data")
fileobj.seek(left, 1)
return instance
class ES_Descriptor(BaseDescriptor):
TAG = 0x3
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self.ES_ID = r.bits(16)
self.streamDependenceFlag = r.bits(1)
self.URL_Flag = r.bits(1)
self.OCRstreamFlag = r.bits(1)
self.streamPriority = r.bits(5)
if self.streamDependenceFlag:
self.dependsOn_ES_ID = r.bits(16)
if self.URL_Flag:
URLlength = r.bits(8)
self.URLstring = r.bytes(URLlength)
if self.OCRstreamFlag:
self.OCR_ES_Id = r.bits(16)
tag = r.bits(8)
except BitReaderError as e:
raise DescriptorError(e)
if tag != DecoderConfigDescriptor.TAG:
raise DescriptorError("unexpected DecoderConfigDescrTag %d" % tag)
assert r.is_aligned()
self.decConfigDescr = DecoderConfigDescriptor.parse(fileobj)
class DecoderConfigDescriptor(BaseDescriptor):
TAG = 0x4
decSpecificInfo = None
"""A DecoderSpecificInfo, optional"""
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self.objectTypeIndication = r.bits(8)
self.streamType = r.bits(6)
self.upStream = r.bits(1)
self.reserved = r.bits(1)
self.bufferSizeDB = r.bits(24)
self.maxBitrate = r.bits(32)
self.avgBitrate = r.bits(32)
if (self.objectTypeIndication, self.streamType) != (0x40, 0x5):
return
# all from here is optional
if length * 8 == r.get_position():
return
tag = r.bits(8)
except BitReaderError as e:
raise DescriptorError(e)
if tag == DecoderSpecificInfo.TAG:
assert r.is_aligned()
self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj)
@property
def codec_param(self):
"""string"""
param = u".%X" % self.objectTypeIndication
info = self.decSpecificInfo
if info is not None:
param += u".%d" % info.audioObjectType
return param
@property
def codec_desc(self):
"""string or None"""
info = self.decSpecificInfo
desc = None
if info is not None:
desc = info.description
return desc
class DecoderSpecificInfo(BaseDescriptor):
TAG = 0x5
_TYPE_NAMES = [
None, "AAC MAIN", "AAC LC", "AAC SSR", "AAC LTP", "SBR",
"AAC scalable", "TwinVQ", "CELP", "HVXC", None, None, "TTSI",
"Main synthetic", "Wavetable synthesis", "General MIDI",
"Algorithmic Synthesis and Audio FX", "ER AAC LC", None, "ER AAC LTP",
"ER AAC scalable", "ER Twin VQ", "ER BSAC", "ER AAC LD", "ER CELP",
"ER HVXC", "ER HILN", "ER Parametric", "SSC", "PS", "MPEG Surround",
None, "Layer-1", "Layer-2", "Layer-3", "DST", "ALS", "SLS",
"SLS non-core", "ER AAC ELD", "SMR Simple", "SMR Main", "USAC",
"SAOC", "LD MPEG Surround", "USAC"
]
_FREQS = [
96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000,
12000, 11025, 8000, 7350,
]
@property
def description(self):
"""string or None if unknown"""
name = None
try:
name = self._TYPE_NAMES[self.audioObjectType]
except IndexError:
pass
if name is None:
return
if self.sbrPresentFlag == 1:
name += "+SBR"
if self.psPresentFlag == 1:
name += "+PS"
return text_type(name)
@property
def sample_rate(self):
"""0 means unknown"""
if self.sbrPresentFlag == 1:
return self.extensionSamplingFrequency
elif self.sbrPresentFlag == 0:
return self.samplingFrequency
else:
# these are all types that support SBR
aot_can_sbr = (1, 2, 3, 4, 6, 17, 19, 20, 22)
if self.audioObjectType not in aot_can_sbr:
return self.samplingFrequency
# there shouldn't be SBR for > 48KHz
if self.samplingFrequency > 24000:
return self.samplingFrequency
# either samplingFrequency or samplingFrequency * 2
return 0
@property
def channels(self):
"""channel count or 0 for unknown"""
# from ProgramConfigElement()
if hasattr(self, "pce_channels"):
return self.pce_channels
conf = getattr(
self, "extensionChannelConfiguration", self.channelConfiguration)
if conf == 1:
if self.psPresentFlag == -1:
return 0
elif self.psPresentFlag == 1:
return 2
else:
return 1
elif conf == 7:
return 8
elif conf > 7:
return 0
else:
return conf
def _get_audio_object_type(self, r):
"""Raises BitReaderError"""
audioObjectType = r.bits(5)
if audioObjectType == 31:
audioObjectTypeExt = r.bits(6)
audioObjectType = 32 + audioObjectTypeExt
return audioObjectType
def _get_sampling_freq(self, r):
"""Raises BitReaderError"""
samplingFrequencyIndex = r.bits(4)
if samplingFrequencyIndex == 0xf:
samplingFrequency = r.bits(24)
else:
try:
samplingFrequency = self._FREQS[samplingFrequencyIndex]
except IndexError:
samplingFrequency = 0
return samplingFrequency
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self._parse(r, length)
except BitReaderError as e:
raise DescriptorError(e)
def _parse(self, r, length):
"""Raises BitReaderError"""
def bits_left():
return length * 8 - r.get_position()
self.audioObjectType = self._get_audio_object_type(r)
self.samplingFrequency = self._get_sampling_freq(r)
self.channelConfiguration = r.bits(4)
self.sbrPresentFlag = -1
self.psPresentFlag = -1
if self.audioObjectType in (5, 29):
self.extensionAudioObjectType = 5
self.sbrPresentFlag = 1
if self.audioObjectType == 29:
self.psPresentFlag = 1
self.extensionSamplingFrequency = self._get_sampling_freq(r)
self.audioObjectType = self._get_audio_object_type(r)
if self.audioObjectType == 22:
self.extensionChannelConfiguration = r.bits(4)
else:
self.extensionAudioObjectType = 0
if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23):
try:
GASpecificConfig(r, self)
except NotImplementedError:
# unsupported, (warn?)
return
else:
# unsupported
return
if self.audioObjectType in (
17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39):
epConfig = r.bits(2)
if epConfig in (2, 3):
# unsupported
return
if self.extensionAudioObjectType != 5 and bits_left() >= 16:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x2b7:
self.extensionAudioObjectType = self._get_audio_object_type(r)
if self.extensionAudioObjectType == 5:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
if bits_left() >= 12:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x548:
self.psPresentFlag = r.bits(1)
if self.extensionAudioObjectType == 22:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
self.extensionChannelConfiguration = r.bits(4)
def GASpecificConfig(r, info):
"""Reads GASpecificConfig which is needed to get the data after that
(there is no length defined to skip it) and to read program_config_element
which can contain channel counts.
May raise BitReaderError on error or
NotImplementedError if some reserved data was set.
"""
assert isinstance(info, DecoderSpecificInfo)
r.skip(1) # frameLengthFlag
dependsOnCoreCoder = r.bits(1)
if dependsOnCoreCoder:
r.skip(14)
extensionFlag = r.bits(1)
if not info.channelConfiguration:
pce = ProgramConfigElement(r)
info.pce_channels = pce.channels
if info.audioObjectType == 6 or info.audioObjectType == 20:
r.skip(3)
if extensionFlag:
if info.audioObjectType == 22:
r.skip(5 + 11)
if info.audioObjectType in (17, 19, 20, 23):
r.skip(1 + 1 + 1)
extensionFlag3 = r.bits(1)
if extensionFlag3 != 0:
raise NotImplementedError("extensionFlag3 set")

View file

@ -1,190 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import struct
from mutagen._compat import PY2
# This is not an exhaustive list of container atoms, but just the
# ones this module needs to peek inside.
_CONTAINERS = [b"moov", b"udta", b"trak", b"mdia", b"meta", b"ilst",
b"stbl", b"minf", b"moof", b"traf"]
_SKIP_SIZE = {b"meta": 4}
class AtomError(Exception):
pass
class Atom(object):
"""An individual atom.
Attributes:
children -- list child atoms (or None for non-container atoms)
length -- length of this atom, including length and name
name -- four byte name of the atom, as a str
offset -- location in the constructor-given fileobj of this atom
This structure should only be used internally by Mutagen.
"""
children = None
def __init__(self, fileobj, level=0):
"""May raise AtomError"""
self.offset = fileobj.tell()
try:
self.length, self.name = struct.unpack(">I4s", fileobj.read(8))
except struct.error:
raise AtomError("truncated data")
self._dataoffset = self.offset + 8
if self.length == 1:
try:
self.length, = struct.unpack(">Q", fileobj.read(8))
except struct.error:
raise AtomError("truncated data")
self._dataoffset += 8
if self.length < 16:
raise AtomError(
"64 bit atom length can only be 16 and higher")
elif self.length == 0:
if level != 0:
raise AtomError(
"only a top-level atom can have zero length")
# Only the last atom is supposed to have a zero-length, meaning it
# extends to the end of file.
fileobj.seek(0, 2)
self.length = fileobj.tell() - self.offset
fileobj.seek(self.offset + 8, 0)
elif self.length < 8:
raise AtomError(
"atom length can only be 0, 1 or 8 and higher")
if self.name in _CONTAINERS:
self.children = []
fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1)
while fileobj.tell() < self.offset + self.length:
self.children.append(Atom(fileobj, level + 1))
else:
fileobj.seek(self.offset + self.length, 0)
def read(self, fileobj):
"""Return if all data could be read and the atom payload"""
fileobj.seek(self._dataoffset, 0)
length = self.length - (self._dataoffset - self.offset)
data = fileobj.read(length)
return len(data) == length, data
@staticmethod
def render(name, data):
"""Render raw atom data."""
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data
def findall(self, name, recursive=False):
"""Recursively find all child atoms by specified name."""
if self.children is not None:
for child in self.children:
if child.name == name:
yield child
if recursive:
for atom in child.findall(name, True):
yield atom
def __getitem__(self, remaining):
"""Look up a child atom, potentially recursively.
e.g. atom['udta', 'meta'] => <Atom name='meta' ...>
"""
if not remaining:
return self
elif self.children is None:
raise KeyError("%r is not a container" % self.name)
for child in self.children:
if child.name == remaining[0]:
return child[remaining[1:]]
else:
raise KeyError("%r not found" % remaining[0])
def __repr__(self):
cls = self.__class__.__name__
if self.children is None:
return "<%s name=%r length=%r offset=%r>" % (
cls, self.name, self.length, self.offset)
else:
children = "\n".join([" " + line for child in self.children
for line in repr(child).splitlines()])
return "<%s name=%r length=%r offset=%r\n%s>" % (
cls, self.name, self.length, self.offset, children)
class Atoms(object):
"""Root atoms in a given file.
Attributes:
atoms -- a list of top-level atoms as Atom objects
This structure should only be used internally by Mutagen.
"""
def __init__(self, fileobj):
self.atoms = []
fileobj.seek(0, 2)
end = fileobj.tell()
fileobj.seek(0)
while fileobj.tell() + 8 <= end:
self.atoms.append(Atom(fileobj))
def path(self, *names):
"""Look up and return the complete path of an atom.
For example, atoms.path('moov', 'udta', 'meta') will return a
list of three atoms, corresponding to the moov, udta, and meta
atoms.
"""
path = [self]
for name in names:
path.append(path[-1][name, ])
return path[1:]
def __contains__(self, names):
try:
self[names]
except KeyError:
return False
return True
def __getitem__(self, names):
"""Look up a child atom.
'names' may be a list of atoms (['moov', 'udta']) or a string
specifying the complete path ('moov.udta').
"""
if PY2:
if isinstance(names, basestring):
names = names.split(b".")
else:
if isinstance(names, bytes):
names = names.split(b".")
for child in self.atoms:
if child.name == names[0]:
return child[names[1:]]
else:
raise KeyError("%s not found" % names[0])
def __repr__(self):
return "\n".join([repr(child) for child in self.atoms])

View file

@ -1,21 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
from mutagen._util import cdata
def parse_full_atom(data):
"""Some atoms are versioned. Split them up in (version, flags, payload).
Can raise ValueError.
"""
if len(data) < 4:
raise ValueError("not enough data")
version = ord(data[0:1])
flags = cdata.uint_be(b"\x00" + data[1:4])
return version, flags, data[4:]

View file

@ -1,270 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
# Copyright (C) 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Musepack audio streams with APEv2 tags.
Musepack is an audio format originally based on the MPEG-1 Layer-2
algorithms. Stream versions 4 through 7 are supported.
For more information, see http://www.musepack.net/.
"""
__all__ = ["Musepack", "Open", "delete"]
import struct
from ._compat import endswith, xrange
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen.id3 import BitPaddedInt
from mutagen._util import cdata
class MusepackHeaderError(error):
pass
RATES = [44100, 48000, 37800, 32000]
def _parse_sv8_int(fileobj, limit=9):
"""Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
"""
num = 0
for i in xrange(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
c = bytearray(c)
num = (num << 7) | (c[0] & 0x7F)
if not c[0] & 0x80:
return num, i + 1
if limit > 0:
raise ValueError
return 0, 0
def _calc_sv8_gain(gain):
# 64.82 taken from mpcdec
return 64.82 - gain / 256.0
def _calc_sv8_peak(peak):
return (10 ** (peak / (256.0 * 20.0)) / 65535.0)
class MusepackInfo(StreamInfo):
"""Musepack stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bitrate -- audio bitrate, in bits per second
* version -- Musepack stream version
Optional Attributes:
* title_gain, title_peak -- Replay Gain and peak data for this song
* album_gain, album_peak -- Replay Gain and peak data for this album
These attributes are only available in stream version 7/8. The
gains are a float, +/- some dB. The peaks are a percentage [0..1] of
the maximum amplitude. This means to get a number comparable to
VorbisGain, you must multiply the peak by 2.
"""
def __init__(self, fileobj):
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
# Skip ID3v2 tags
if header[:3] == b"ID3":
header = fileobj.read(6)
if len(header) != 6:
raise MusepackHeaderError("not a Musepack file")
size = 10 + BitPaddedInt(header[2:6])
fileobj.seek(size)
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
if header.startswith(b"MPCK"):
self.__parse_sv8(fileobj)
else:
self.__parse_sv467(fileobj)
if not self.bitrate and self.length != 0:
fileobj.seek(0, 2)
self.bitrate = int(round(fileobj.tell() * 8 / self.length))
def __parse_sv8(self, fileobj):
# SV8 http://trac.musepack.net/trac/wiki/SV8Specification
key_size = 2
mandatory_packets = [b"SH", b"RG"]
def check_frame_key(key):
if ((len(frame_type) != key_size) or
(not b'AA' <= frame_type <= b'ZZ')):
raise MusepackHeaderError("Invalid frame key.")
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
while frame_type not in (b"AP", b"SE") and mandatory_packets:
try:
frame_size, slen = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError("Invalid packet size.")
data_size = frame_size - key_size - slen
# packets can be at maximum data_size big and are padded with zeros
if frame_type == b"SH":
mandatory_packets.remove(frame_type)
self.__parse_stream_header(fileobj, data_size)
elif frame_type == b"RG":
mandatory_packets.remove(frame_type)
self.__parse_replaygain_packet(fileobj, data_size)
else:
fileobj.seek(data_size, 1)
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
if mandatory_packets:
raise MusepackHeaderError("Missing mandatory packets: %s." %
", ".join(map(repr, mandatory_packets)))
self.length = float(self.samples) / self.sample_rate
self.bitrate = 0
def __parse_stream_header(self, fileobj, data_size):
# skip CRC
fileobj.seek(4, 1)
remaining_size = data_size - 4
try:
self.version = bytearray(fileobj.read(1))[0]
except TypeError:
raise MusepackHeaderError("SH packet ended unexpectedly.")
remaining_size -= 1
try:
samples, l1 = _parse_sv8_int(fileobj)
samples_skip, l2 = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError(
"SH packet: Invalid sample counts.")
self.samples = samples - samples_skip
remaining_size -= l1 + l2
data = fileobj.read(remaining_size)
if len(data) != remaining_size:
raise MusepackHeaderError("SH packet ended unexpectedly.")
self.sample_rate = RATES[bytearray(data)[0] >> 5]
self.channels = (bytearray(data)[1] >> 4) + 1
def __parse_replaygain_packet(self, fileobj, data_size):
data = fileobj.read(data_size)
if data_size < 9:
raise MusepackHeaderError("Invalid RG packet size.")
if len(data) != data_size:
raise MusepackHeaderError("RG packet ended unexpectedly.")
title_gain = cdata.short_be(data[1:3])
title_peak = cdata.short_be(data[3:5])
album_gain = cdata.short_be(data[5:7])
album_peak = cdata.short_be(data[7:9])
if title_gain:
self.title_gain = _calc_sv8_gain(title_gain)
if title_peak:
self.title_peak = _calc_sv8_peak(title_peak)
if album_gain:
self.album_gain = _calc_sv8_gain(album_gain)
if album_peak:
self.album_peak = _calc_sv8_peak(album_peak)
def __parse_sv467(self, fileobj):
fileobj.seek(-4, 1)
header = fileobj.read(32)
if len(header) != 32:
raise MusepackHeaderError("not a Musepack file")
# SV7
if header.startswith(b"MP+"):
self.version = bytearray(header)[3] & 0xF
if self.version < 7:
raise MusepackHeaderError("not a Musepack file")
frames = cdata.uint_le(header[4:8])
flags = cdata.uint_le(header[8:12])
self.title_peak, self.title_gain = struct.unpack(
"<Hh", header[12:16])
self.album_peak, self.album_gain = struct.unpack(
"<Hh", header[16:20])
self.title_gain /= 100.0
self.album_gain /= 100.0
self.title_peak /= 65535.0
self.album_peak /= 65535.0
self.sample_rate = RATES[(flags >> 16) & 0x0003]
self.bitrate = 0
# SV4-SV6
else:
header_dword = cdata.uint_le(header[0:4])
self.version = (header_dword >> 11) & 0x03FF
if self.version < 4 or self.version > 6:
raise MusepackHeaderError("not a Musepack file")
self.bitrate = (header_dword >> 23) & 0x01FF
self.sample_rate = 44100
if self.version >= 5:
frames = cdata.uint_le(header[4:8])
else:
frames = cdata.ushort_le(header[6:8])
if self.version < 6:
frames -= 1
self.channels = 2
self.length = float(frames * 1152 - 576) / self.sample_rate
def pprint(self):
rg_data = []
if hasattr(self, "title_gain"):
rg_data.append("%+0.2f (title)" % self.title_gain)
if hasattr(self, "album_gain"):
rg_data.append("%+0.2f (album)" % self.album_gain)
rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or ""
return "Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % (
self.version, self.length, self.sample_rate, self.bitrate, rg_data)
class Musepack(APEv2File):
_Info = MusepackInfo
_mimes = ["audio/x-musepack", "audio/x-mpc"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"MP+") + header.startswith(b"MPCK") +
endswith(filename, b".mpc"))
Open = Musepack

View file

@ -1,508 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes, MutagenError
from ._compat import cBytesIO, reraise, chr_
class error(IOError, MutagenError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
* version -- stream structure version (currently always 0)
* position -- absolute stream position (default -1)
* serial -- logical stream serial number (default 0)
* sequence -- page sequence number within logical stream (default 0)
* offset -- offset this page was read from (default None)
* complete -- if the last packet on this page is complete (default True)
* packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags,
self.position, self.serial, self.sequence,
crc, segments) = struct.unpack("<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != b"OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, b"OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in bytearray(lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = [fileobj.read(l) for l in lacings]
if [len(p) for p in self.packets] != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append(b"\xff" * quot + chr_(rem))
lacing_data = b"".join(lacing_data)
if not self.complete and lacing_data.endswith(b"\x00"):
lacing_data = lacing_data[:-1]
data.append(chr_(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = b"".join(data)
# Python's CRC is swapped relative to Ogg's needs.
# crc32 returns uint prior to py2.6 on some platforms, so force uint
crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff
# Although we're using to_uint_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_uint_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
@property
def size(self):
"""Total frame size."""
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
def __set_flag(self, bit, val):
mask = 1 << bit
if val:
self.__type_flags |= mask
else:
self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
@staticmethod
def renumber(fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try:
page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
@staticmethod
def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets]
@staticmethod
def from_packets(packets, sequence=0, default_size=4096,
wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append(b"")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = b""
if page.packets:
pages.append(page)
return pages
@classmethod
def replace(cls, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1
new_data = b"".join(cls.write(p) for p in new_pages)
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
cls.renumber(fileobj, serial, sequence)
@staticmethod
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256 * 256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = open(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
self.info._post_tags(fileobj)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = open(filename, "rb+")
try:
try:
self.tags._inject(fileobj)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = open(filename, "rb+")
try:
try:
self.tags._inject(fileobj)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
finally:
fileobj.close()

View file

@ -1,147 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg FLAC comments.
This module handles FLAC files wrapped in an Ogg bitstream. The first
FLAC stream found is used. For 'naked' FLACs, see mutagen.flac.
This module is based off the specification at
http://flac.sourceforge.net/ogg_mapping.html.
"""
__all__ = ["OggFLAC", "Open", "delete"]
import struct
from ._compat import cBytesIO
from mutagen.flac import StreamInfo, VCFLACDict, StrictFileObject
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggFLACHeaderError(error):
pass
class OggFLACStreamInfo(StreamInfo):
"""Ogg FLAC general header and stream info.
This encompasses the Ogg wrapper for the FLAC STREAMINFO metadata
block, as well as the Ogg codec setup that precedes it.
Attributes (in addition to StreamInfo's):
* packets -- number of metadata packets
* serial -- Ogg logical stream serial number
"""
packets = 0
serial = 0
def load(self, data):
# Ogg expects file objects that don't raise on read
if isinstance(data, StrictFileObject):
data = data._fileobj
page = OggPage(data)
while not page.packets[0].startswith(b"\x7FFLAC"):
page = OggPage(data)
major, minor, self.packets, flac = struct.unpack(
">BBH4s", page.packets[0][5:13])
if flac != b"fLaC":
raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac)
elif (major, minor) != (1, 0):
raise OggFLACHeaderError(
"unknown mapping version: %d.%d" % (major, minor))
self.serial = page.serial
# Skip over the block header.
stringobj = StrictFileObject(cBytesIO(page.packets[0][17:]))
super(OggFLACStreamInfo, self).load(stringobj)
def _post_tags(self, fileobj):
if self.length:
return
page = OggPage.find_last(fileobj, self.serial)
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg " + super(OggFLACStreamInfo, self).pprint()
class OggFLACVComment(VCFLACDict):
def load(self, data, info, errors='replace'):
# data should be pointing at the start of an Ogg page, after
# the first FLAC page.
pages = []
complete = False
while not complete:
page = OggPage(data)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
comment = cBytesIO(OggPage.to_packets(pages)[0][4:])
super(OggFLACVComment, self).load(comment, errors=errors)
def _inject(self, fileobj):
"""Write tag data into the FLAC Vorbis comment packet/page."""
# Ogg FLAC has no convenient data marker like Vorbis, but the
# second packet - and second page - must be the comment data.
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x7FFLAC"):
page = OggPage(fileobj)
first_page = page
while not (page.sequence == 1 and page.serial == first_page.serial):
page = OggPage(fileobj)
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == first_page.serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment block.
data = self.write()
data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data
packets[0] = data
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggFLAC(OggFileType):
"""An Ogg FLAC file."""
_Info = OggFLACStreamInfo
_Tags = OggFLACVComment
_Error = OggFLACHeaderError
_mimes = ["audio/x-oggflac"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (
(b"FLAC" in header) + (b"fLaC" in header)))
Open = OggFLAC
def delete(filename):
"""Remove tags from a file."""
OggFLAC(filename).delete()

View file

@ -1,138 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2012, 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Opus comments.
This module handles Opus files wrapped in an Ogg bitstream. The
first Opus stream found is used.
Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01
"""
__all__ = ["OggOpus", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen._compat import BytesIO
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggOpusHeaderError(error):
pass
class OggOpusInfo(StreamInfo):
"""Ogg Opus stream information.
Attributes:
* length - file length in seconds, as a float
* channels - number of channels
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"OpusHead"):
page = OggPage(fileobj)
self.serial = page.serial
if not page.first:
raise OggOpusHeaderError(
"page has ID header, but doesn't start a stream")
(version, self.channels, pre_skip, orig_sample_rate, output_gain,
channel_map) = struct.unpack("<BBHIhB", page.packets[0][8:19])
self.__pre_skip = pre_skip
# only the higher 4 bits change on incombatible changes
major = version >> 4
if major != 0:
raise OggOpusHeaderError("version %r unsupported" % major)
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
self.length = (page.position - self.__pre_skip) / float(48000)
def pprint(self):
return u"Ogg Opus, %.2f seconds" % (self.length)
class OggOpusVComment(VCommentDict):
"""Opus comments embedded in an Ogg bitstream."""
def __get_comment_pages(self, fileobj, info):
# find the first tags page with the right serial
page = OggPage(fileobj)
while ((info.serial != page.serial) or
not page.packets[0].startswith(b"OpusTags")):
page = OggPage(fileobj)
# get all comment pages
pages = [page]
while not (pages[-1].complete or len(pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == pages[0].serial:
pages.append(page)
return pages
def __init__(self, fileobj, info):
pages = self.__get_comment_pages(fileobj, info)
data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags
fileobj = BytesIO(data)
super(OggOpusVComment, self).__init__(fileobj, framing=False)
# in case the LSB of the first byte after v-comment is 1, preserve the
# following data
padding_flag = fileobj.read(1)
if padding_flag and ord(padding_flag) & 0x1:
self._pad_data = padding_flag + fileobj.read()
else:
self._pad_data = b""
def _inject(self, fileobj):
fileobj.seek(0)
info = OggOpusInfo(fileobj)
old_pages = self.__get_comment_pages(fileobj, info)
packets = OggPage.to_packets(old_pages)
packets[0] = b"OpusTags" + self.write(framing=False) + self._pad_data
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggOpus(OggFileType):
"""An Ogg Opus file."""
_Info = OggOpusInfo
_Tags = OggOpusVComment
_Error = OggOpusHeaderError
_mimes = ["audio/ogg", "audio/ogg; codecs=opus"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"OpusHead" in header))
Open = OggOpus
def delete(filename):
"""Remove tags from a file."""
OggOpus(filename).delete()

View file

@ -1,138 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Speex comments.
This module handles Speex files wrapped in an Ogg bitstream. The
first Speex stream found is used.
Read more about Ogg Speex at http://www.speex.org/. This module is
based on the specification at http://www.speex.org/manual2/node7.html
and clarifications after personal communication with Jean-Marc,
http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html.
"""
__all__ = ["OggSpeex", "Open", "delete"]
from mutagen import StreamInfo
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
from mutagen._util import cdata
class error(OggError):
pass
class OggSpeexHeaderError(error):
pass
class OggSpeexInfo(StreamInfo):
"""Ogg Speex stream information.
Attributes:
* bitrate - nominal bitrate in bits per second
* channels - number of channels
* length - file length in seconds, as a float
The reference encoder does not set the bitrate; in this case,
the bitrate will be 0.
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"Speex "):
page = OggPage(fileobj)
if not page.first:
raise OggSpeexHeaderError(
"page has ID header, but doesn't start a stream")
self.sample_rate = cdata.uint_le(page.packets[0][36:40])
self.channels = cdata.uint_le(page.packets[0][48:52])
self.bitrate = max(0, cdata.int_le(page.packets[0][52:56]))
self.serial = page.serial
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg Speex, %.2f seconds" % self.length
class OggSpeexVComment(VCommentDict):
"""Speex comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0] + b"\x01"
super(OggSpeexVComment, self).__init__(data, framing=False)
def _inject(self, fileobj):
"""Write tag data into the Speex comment packet/page."""
fileobj.seek(0)
# Find the first header page, with the stream info.
# Use it to get the serial number.
page = OggPage(fileobj)
while not page.packets[0].startswith(b"Speex "):
page = OggPage(fileobj)
# Look for the next page with that serial number, it'll start
# the comment packet.
serial = page.serial
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
# Then find all the pages with the comment packet.
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment packet.
packets[0] = self.write(framing=False)
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggSpeex(OggFileType):
"""An Ogg Speex file."""
_Info = OggSpeexInfo
_Tags = OggSpeexVComment
_Error = OggSpeexHeaderError
_mimes = ["audio/x-speex"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"Speex " in header))
Open = OggSpeex
def delete(filename):
"""Remove tags from a file."""
OggSpeex(filename).delete()

View file

@ -1,131 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Theora comments.
This module handles Theora files wrapped in an Ogg bitstream. The
first Theora stream found is used.
Based on the specification at http://theora.org/doc/Theora_I_spec.pdf.
"""
__all__ = ["OggTheora", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen._vorbis import VCommentDict
from mutagen._util import cdata
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggTheoraHeaderError(error):
pass
class OggTheoraInfo(StreamInfo):
"""Ogg Theora stream information.
Attributes:
* length - file length in seconds, as a float
* fps - video frames per second, as a float
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x80theora"):
page = OggPage(fileobj)
if not page.first:
raise OggTheoraHeaderError(
"page has ID header, but doesn't start a stream")
data = page.packets[0]
vmaj, vmin = struct.unpack("2B", data[7:9])
if (vmaj, vmin) != (3, 2):
raise OggTheoraHeaderError(
"found Theora version %d.%d != 3.2" % (vmaj, vmin))
fps_num, fps_den = struct.unpack(">2I", data[22:30])
self.fps = fps_num / float(fps_den)
self.bitrate = cdata.uint_be(b"\x00" + data[37:40])
self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F
self.serial = page.serial
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
position = page.position
mask = (1 << self.granule_shift) - 1
frames = (position >> self.granule_shift) + (position & mask)
self.length = frames / float(self.fps)
def pprint(self):
return "Ogg Theora, %.2f seconds, %d bps" % (self.length, self.bitrate)
class OggTheoraCommentDict(VCommentDict):
"""Theora comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0][7:]
super(OggTheoraCommentDict, self).__init__(data + b"\x01")
def _inject(self, fileobj):
"""Write tag data into the Theora comment packet/page."""
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x81theora"):
page = OggPage(fileobj)
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
packets[0] = b"\x81theora" + self.write(framing=False)
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggTheora(OggFileType):
"""An Ogg Theora file."""
_Info = OggTheoraInfo
_Tags = OggTheoraCommentDict
_Error = OggTheoraHeaderError
_mimes = ["video/x-theora"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") *
((b"\x80theora" in header) + (b"\x81theora" in header)) * 2)
Open = OggTheora
def delete(filename):
"""Remove tags from a file."""
OggTheora(filename).delete()

View file

@ -1,139 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Vorbis comments.
This module handles Vorbis files wrapped in an Ogg bitstream. The
first Vorbis stream found is used.
Read more about Ogg Vorbis at http://vorbis.com/. This module is based
on the specification at http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html.
"""
__all__ = ["OggVorbis", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggVorbisHeaderError(error):
pass
class OggVorbisInfo(StreamInfo):
"""Ogg Vorbis stream information.
Attributes:
* length - file length in seconds, as a float
* bitrate - nominal ('average') bitrate in bits per second, as an int
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x01vorbis"):
page = OggPage(fileobj)
if not page.first:
raise OggVorbisHeaderError(
"page has ID header, but doesn't start a stream")
(self.channels, self.sample_rate, max_bitrate, nominal_bitrate,
min_bitrate) = struct.unpack("<B4i", page.packets[0][11:28])
self.serial = page.serial
max_bitrate = max(0, max_bitrate)
min_bitrate = max(0, min_bitrate)
nominal_bitrate = max(0, nominal_bitrate)
if nominal_bitrate == 0:
self.bitrate = (max_bitrate + min_bitrate) // 2
elif max_bitrate and max_bitrate < nominal_bitrate:
# If the max bitrate is less than the nominal, we know
# the nominal is wrong.
self.bitrate = max_bitrate
elif min_bitrate > nominal_bitrate:
self.bitrate = min_bitrate
else:
self.bitrate = nominal_bitrate
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg Vorbis, %.2f seconds, %d bps" % (
self.length, self.bitrate)
class OggVCommentDict(VCommentDict):
"""Vorbis comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0][7:] # Strip off "\x03vorbis".
super(OggVCommentDict, self).__init__(data)
def _inject(self, fileobj):
"""Write tag data into the Vorbis comment packet/page."""
# Find the old pages in the file; we'll need to remove them,
# plus grab any stray setup packet data out of them.
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x03vorbis"):
page = OggPage(fileobj)
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment packet.
packets[0] = b"\x03vorbis" + self.write()
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggVorbis(OggFileType):
"""An Ogg Vorbis file."""
_Info = OggVorbisInfo
_Tags = OggVCommentDict
_Error = OggVorbisHeaderError
_mimes = ["audio/vorbis", "audio/x-vorbis"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"\x01vorbis" in header))
Open = OggVorbis
def delete(filename):
"""Remove tags from a file."""
OggVorbis(filename).delete()

View file

@ -1,74 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""OptimFROG audio streams with APEv2 tags.
OptimFROG is a lossless audio compression program. Its main goal is to
reduce at maximum the size of audio files, while permitting bit
identical restoration for all input. It is similar with the ZIP
compression, but it is highly specialized to compress audio data.
Only versions 4.5 and higher are supported.
For more information, see http://www.losslessaudio.org/
"""
__all__ = ["OptimFROG", "Open", "delete"]
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
class OptimFROGHeaderError(error):
pass
class OptimFROGInfo(StreamInfo):
"""OptimFROG stream information.
Attributes:
* channels - number of audio channels
* length - file length in seconds, as a float
* sample_rate - audio sampling rate in Hz
"""
def __init__(self, fileobj):
header = fileobj.read(76)
if (len(header) != 76 or not header.startswith(b"OFR ") or
struct.unpack("<I", header[4:8])[0] not in [12, 15]):
raise OptimFROGHeaderError("not an OptimFROG file")
(total_samples, total_samples_high, sample_type, self.channels,
self.sample_rate) = struct.unpack("<IHBBI", header[8:20])
total_samples += total_samples_high << 32
self.channels += 1
if self.sample_rate:
self.length = float(total_samples) / (self.channels *
self.sample_rate)
else:
self.length = 0.0
def pprint(self):
return "OptimFROG, %.2f seconds, %d Hz" % (self.length,
self.sample_rate)
class OptimFROG(APEv2File):
_Info = OptimFROGInfo
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"OFR") + endswith(filename, b".ofr") +
endswith(filename, b".ofs"))
Open = OptimFROG

View file

@ -1,84 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""True Audio audio stream information and tags.
True Audio is a lossless format designed for real-time encoding and
decoding. This module is based on the documentation at
http://www.true-audio.com/TTA_Lossless_Audio_Codec\_-_Format_Description
True Audio files use ID3 tags.
"""
__all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"]
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.id3 import ID3FileType, delete
from mutagen._util import cdata, MutagenError
class error(RuntimeError, MutagenError):
pass
class TrueAudioHeaderError(error, IOError):
pass
class TrueAudioInfo(StreamInfo):
"""True Audio stream information.
Attributes:
* length - audio length, in seconds
* sample_rate - audio sample rate, in Hz
"""
def __init__(self, fileobj, offset):
fileobj.seek(offset or 0)
header = fileobj.read(18)
if len(header) != 18 or not header.startswith(b"TTA"):
raise TrueAudioHeaderError("TTA header not found")
self.sample_rate = cdata.int_le(header[10:14])
samples = cdata.uint_le(header[14:18])
self.length = float(samples) / self.sample_rate
def pprint(self):
return "True Audio, %.2f seconds, %d Hz." % (
self.length, self.sample_rate)
class TrueAudio(ID3FileType):
"""A True Audio file.
:ivar info: :class:`TrueAudioInfo`
:ivar tags: :class:`ID3 <mutagen.id3.ID3>`
"""
_Info = TrueAudioInfo
_mimes = ["audio/x-tta"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"ID3") + header.startswith(b"TTA") +
endswith(filename.lower(), b".tta") * 2)
Open = TrueAudio
class EasyTrueAudio(TrueAudio):
"""Like MP3, but uses EasyID3 for tags.
:ivar info: :class:`TrueAudioInfo`
:ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>`
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3

View file

@ -1,124 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
# 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""WavPack reading and writing.
WavPack is a lossless format that uses APEv2 tags. Read
* http://www.wavpack.com/
* http://www.wavpack.com/file_format.txt
for more information.
"""
__all__ = ["WavPack", "Open", "delete"]
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen._util import cdata
class WavPackHeaderError(error):
pass
RATES = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100,
48000, 64000, 88200, 96000, 192000]
class _WavPackHeader(object):
def __init__(self, block_size, version, track_no, index_no, total_samples,
block_index, block_samples, flags, crc):
self.block_size = block_size
self.version = version
self.track_no = track_no
self.index_no = index_no
self.total_samples = total_samples
self.block_index = block_index
self.block_samples = block_samples
self.flags = flags
self.crc = crc
@classmethod
def from_fileobj(cls, fileobj):
"""A new _WavPackHeader or raises WavPackHeaderError"""
header = fileobj.read(32)
if len(header) != 32 or not header.startswith(b"wvpk"):
raise WavPackHeaderError("not a WavPack header: %r" % header)
block_size = cdata.uint_le(header[4:8])
version = cdata.ushort_le(header[8:10])
track_no = ord(header[10:11])
index_no = ord(header[11:12])
samples = cdata.uint_le(header[12:16])
if samples == 2 ** 32 - 1:
samples = -1
block_index = cdata.uint_le(header[16:20])
block_samples = cdata.uint_le(header[20:24])
flags = cdata.uint_le(header[24:28])
crc = cdata.uint_le(header[28:32])
return _WavPackHeader(block_size, version, track_no, index_no,
samples, block_index, block_samples, flags, crc)
class WavPackInfo(StreamInfo):
"""WavPack stream information.
Attributes:
* channels - number of audio channels (1 or 2)
* length - file length in seconds, as a float
* sample_rate - audio sampling rate in Hz
* version - WavPack stream version
"""
def __init__(self, fileobj):
try:
header = _WavPackHeader.from_fileobj(fileobj)
except WavPackHeaderError:
raise WavPackHeaderError("not a WavPack file")
self.version = header.version
self.channels = bool(header.flags & 4) or 2
self.sample_rate = RATES[(header.flags >> 23) & 0xF]
if header.total_samples == -1 or header.block_index != 0:
# TODO: we could make this faster by using the tag size
# and search backwards for the last block, then do
# last.block_index + last.block_samples - initial.block_index
samples = header.block_samples
while 1:
fileobj.seek(header.block_size - 32 + 8, 1)
try:
header = _WavPackHeader.from_fileobj(fileobj)
except WavPackHeaderError:
break
samples += header.block_samples
else:
samples = header.total_samples
self.length = float(samples) / self.sample_rate
def pprint(self):
return "WavPack, %.2f seconds, %d Hz" % (self.length, self.sample_rate)
class WavPack(APEv2File):
_Info = WavPackInfo
_mimes = ["audio/x-wavpack"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"wvpk") * 2
Open = WavPack

View file

@ -1,419 +0,0 @@
#!/usr/bin/env python
#
# PyGazelle - https://github.com/cohena/pygazelle
# A Python implementation of the What.cd Gazelle JSON API
#
# Loosely based on the API implementation from 'whatbetter', by Zachary Denton
# See https://github.com/zacharydenton/whatbetter
from HTMLParser import HTMLParser
import sys
import json
import time
import requests as requests
from .user import User
from .artist import Artist
from .tag import Tag
from .request import Request
from .torrent_group import TorrentGroup
from .torrent import Torrent
from .category import Category
from .inbox import Mailbox
class LoginException(Exception):
pass
class RequestException(Exception):
pass
class GazelleAPI(object):
last_request = time.time() # share amongst all api objects
default_headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3)'\
'AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79'\
'Safari/535.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9'\
',*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'}
def __init__(self, username=None, password=None):
self.session = requests.session()
self.session.headers = self.default_headers
self.username = username
self.password = password
self.authkey = None
self.passkey = None
self.userid = None
self.logged_in_user = None
self.default_timeout = 30
self.cached_users = {}
self.cached_artists = {}
self.cached_tags = {}
self.cached_torrent_groups = {}
self.cached_torrents = {}
self.cached_requests = {}
self.cached_categories = {}
self.site = "https://what.cd/"
self.past_request_timestamps = []
def wait_for_rate_limit(self):
# maximum is 5 requests within 10 secs
time_frame = 10
max_reqs = 5
slice_point = 0
while len(self.past_request_timestamps) >= max_reqs:
for i, timestamp in enumerate(self.past_request_timestamps):
if timestamp < time.time() - time_frame:
slice_point = i + 1
else:
break
if slice_point:
self.past_request_timestamps = self.past_request_timestamps[slice_point:]
else:
time.sleep(0.1)
def logged_in(self):
return self.logged_in_user is not None and self.logged_in_user.id == self.userid
def _login(self):
"""
Private method.
Logs in user and gets authkey from server.
"""
if self.logged_in():
return
self.wait_for_rate_limit()
loginpage = 'https://what.cd/login.php'
data = {'username': self.username,
'password': self.password,
'keeplogged': '1'}
r = self.session.post(loginpage, data=data, timeout=self.default_timeout, headers=self.default_headers)
self.past_request_timestamps.append(time.time())
if r.status_code != 200:
raise LoginException("Login returned status code %s" % r.status_code)
try:
accountinfo = self.request('index', autologin=False)
except RequestException as e:
raise LoginException("Login probably incorrect")
if not accountinfo or 'id' not in accountinfo:
raise LoginException("Login probably incorrect")
self.userid = accountinfo['id']
self.authkey = accountinfo['authkey']
self.passkey = accountinfo['passkey']
self.logged_in_user = User(self.userid, self)
self.logged_in_user.set_index_data(accountinfo)
def request(self, action, autologin=True, **kwargs):
"""
Makes an AJAX request at a given action.
Pass an action and relevant arguments for that action.
"""
def make_request(action, **kwargs):
ajaxpage = 'ajax.php'
content = self.unparsed_request(ajaxpage, action, **kwargs)
try:
if not isinstance(content, text_type):
content = content.decode('utf-8')
parsed = json.loads(content)
if parsed['status'] != 'success':
raise RequestException
return parsed['response']
except ValueError:
raise RequestException
try:
return make_request(action, **kwargs)
except Exception as e:
if autologin and not self.logged_in():
self._login()
return make_request(action, **kwargs)
else:
raise e
def unparsed_request(self, sitepage, action, **kwargs):
"""
Makes a generic HTTP request at a given page with a given action.
Also pass relevant arguments for that action.
"""
self.wait_for_rate_limit()
url = "%s%s" % (self.site, sitepage)
params = {'action': action}
if self.authkey:
params['auth'] = self.authkey
params.update(kwargs)
r = self.session.get(url, params=params, allow_redirects=False, timeout=self.default_timeout)
if r.status_code == 302 and r.raw.headers['location'] == 'login.php':
self.logged_in_user = None
raise LoginException("User login expired")
self.past_request_timestamps.append(time.time())
return r.content
def get_user(self, id):
"""
Returns a User for the passed ID, associated with this API object. If the ID references the currently logged in
user, the user returned will be pre-populated with the information from an 'index' API call. Otherwise, you'll
need to call User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id == self.userid:
return self.logged_in_user
elif id in self.cached_users.keys():
return self.cached_users[id]
else:
return User(id, self)
def search_users(self, search_query):
"""
Returns a list of users returned for the search query. You can search by name, part of name, and ID number. If
one of the returned users is the currently logged-in user, that user object will be pre-populated with the
information from an 'index' API call. Otherwise only the limited info returned by the search will be pre-pop'd.
You can query more information with User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
response = self.request(action='usersearch', search=search_query)
results = response['results']
found_users = []
for result in results:
user = self.get_user(result['userId'])
user.set_search_result_data(result)
found_users.append(user)
return found_users
def get_inbox(self, page='1', sort='unread'):
"""
Returns the inbox Mailbox for the logged in user
"""
return Mailbox(self, 'inbox', page, sort)
def get_sentbox(self, page='1', sort='unread'):
"""
Returns the sentbox Mailbox for the logged in user
"""
return Mailbox(self, 'sentbox', page, sort)
def get_artist(self, id=None, name=None):
"""
Returns an Artist for the passed ID, associated with this API object. You'll need to call Artist.update_data()
if the artist hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
if id:
id = int(id)
if id in self.cached_artists.keys():
artist = self.cached_artists[id]
else:
artist = Artist(id, self)
if name:
artist.name = HTMLParser().unescape(name)
elif name:
artist = Artist(-1, self)
artist.name = HTMLParser().unescape(name)
else:
raise Exception("You must specify either an ID or a Name to get an artist.")
return artist
def get_tag(self, name):
"""
Returns a Tag for the passed name, associated with this API object. If you know the count value for this tag,
pass it to update the object. There is no way to query the count directly from the API, but it can be retrieved
from other calls such as 'artist', however.
"""
if name in self.cached_tags.keys():
return self.cached_tags[name]
else:
return Tag(name, self)
def get_request(self, id):
"""
Returns a Request for the passed ID, associated with this API object. You'll need to call Request.update_data()
if the request hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id in self.cached_requests.keys():
return self.cached_requests[id]
else:
return Request(id, self)
def get_torrent_group(self, id):
"""
Returns a TorrentGroup for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_torrent_groups.keys():
return self.cached_torrent_groups[id]
else:
return TorrentGroup(id, self)
def get_torrent(self, id):
"""
Returns a Torrent for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_torrents.keys():
return self.cached_torrents[id]
else:
return Torrent(id, self)
def get_torrent_from_info_hash(self, info_hash):
"""
Returns a Torrent for the passed info hash (if one exists), associated with this API object.
"""
try:
response = self.request(action='torrent', hash=info_hash.upper())
except RequestException:
return None
id = int(response['torrent']['id'])
if id in self.cached_torrents.keys():
torrent = self.cached_torrents[id]
else:
torrent = Torrent(id, self)
torrent.set_torrent_complete_data(response)
return torrent
def get_category(self, id, name=None):
"""
Returns a Category for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_categories.keys():
cat = self.cached_categories[id]
else:
cat = Category(id, self)
if name:
cat.name = name
return cat
def get_top_10(self, type="torrents", limit=25):
"""
Lists the top <limit> items of <type>. Type can be "torrents", "tags", or "users". Limit MUST be
10, 25, or 100...it can't just be an arbitrary number (unfortunately). Results are organized into a list of hashes.
Each hash contains the results for a specific time frame, like 'day', or 'week'. In the hash, the 'results' key
contains a list of objects appropriate to the passed <type>.
"""
response = self.request(action='top10', type=type, limit=limit)
top_items = []
if not response:
raise RequestException
for category in response:
results = []
if type == "torrents":
for item in category['results']:
torrent = self.get_torrent(item['torrentId'])
torrent.set_torrent_top_10_data(item)
results.append(torrent)
elif type == "tags":
for item in category['results']:
tag = self.get_tag(item['name'])
results.append(tag)
elif type == "users":
for item in category['results']:
user = self.get_user(item['id'])
results.append(user)
else:
raise Exception("%s is an invalid type argument for GazelleAPI.get_top_ten()" % type)
top_items.append({
"caption": category['caption'],
"tag": category['tag'],
"limit": category['limit'],
"results": results
})
return top_items
def search_torrents(self, **kwargs):
"""
Searches based on the args you pass and returns torrent groups filled with torrents.
Pass strings unless otherwise specified.
Valid search args:
searchstr (any arbitrary string to search for)
page (page to display -- default: 1)
artistname (self explanatory)
groupname (torrent group name, equivalent to album)
recordlabel (self explanatory)
cataloguenumber (self explanatory)
year (self explanatory)
remastertitle (self explanatory)
remasteryear (self explanatory)
remasterrecordlabel (self explanatory)
remastercataloguenumber (self explanatory)
filelist (can search for filenames found in torrent...unsure of formatting for multiple files)
encoding (use constants in pygazelle.Encoding module)
format (use constants in pygazelle.Format module)
media (use constants in pygazelle.Media module)
releasetype (use constants in pygazelle.ReleaseType module)
haslog (int 1 or 0 to represent boolean, 100 for 100% only, -1 for < 100% / unscored)
hascue (int 1 or 0 to represent boolean)
scene (int 1 or 0 to represent boolean)
vanityhouse (int 1 or 0 to represent boolean)
freetorrent (int 1 or 0 to represent boolean)
taglist (comma separated tag names)
tags_type (0 for 'any' matching, 1 for 'all' matching)
order_by (use constants in pygazelle.order module that start with by_ in their name)
order_way (use way_ascending or way_descending constants in pygazelle.order)
filter_cat (for each category you want to search, the param name must be filter_cat[catnum] and the value 1)
ex. filter_cat[1]=1 turns on Music.
filter_cat[1]=1, filter_cat[2]=1 turns on music and applications. (two separate params and vals!)
Category object ids return the correct int value for these. (verify?)
Returns a dict containing keys 'curr_page', 'pages', and 'results'. Results contains a matching list of Torrents
(they have a reference to their parent TorrentGroup).
"""
response = self.request(action='browse', **kwargs)
results = response['results']
if len(results):
curr_page = response['currentPage']
pages = response['pages']
else:
curr_page = 1
pages = 1
matching_torrents = []
for torrent_group_dict in results:
torrent_group = self.get_torrent_group(torrent_group_dict['groupId'])
torrent_group.set_torrent_search_data(torrent_group_dict)
for torrent_dict in torrent_group_dict['torrents']:
torrent_dict['groupId'] = torrent_group.id
torrent = self.get_torrent(torrent_dict['torrentId'])
torrent.set_torrent_search_data(torrent_dict)
matching_torrents.append(torrent)
return {'curr_page': curr_page, 'pages': pages, 'results': matching_torrents}
def generate_torrent_link(self, id):
url = "%storrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s" %\
(self.site, id, self.logged_in_user.authkey, self.logged_in_user.passkey)
return url
def save_torrent_file(self, id, dest):
file_data = self.unparsed_request("torrents.php", 'download',
id=id, authkey=self.logged_in_user.authkey, torrent_pass=self.logged_in_user.passkey)
with open(dest, 'w+') as dest_file:
dest_file.write(file_data)
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode

View file

@ -1,85 +0,0 @@
from HTMLParser import HTMLParser
class InvalidArtistException(Exception):
pass
class Artist(object):
"""
This class represents an Artist. It is created knowing only its ID. To reduce API accesses, load information using
Artist.update_data() only as needed.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.notifications_enabled = None
self.has_bookmarked = None
self.image = None
self.body = None
self.vanity_house = None
self.tags = []
self.similar_artists_and_score = {}
self.statistics = None
self.torrent_groups = []
self.requests = []
self.parent_api.cached_artists[self.id] = self # add self to cache of known Artist objects
def update_data(self):
if self.id > 0:
response = self.parent_api.request(action='artist', id=self.id)
elif self.name:
self.name = HTMLParser().unescape(self.name)
try:
response = self.parent_api.request(action='artist', artistname=self.name)
except Exception:
self.name = self.name.split(" & ")[0]
response = self.parent_api.request(action='artist', artistname=self.name)
else:
raise InvalidArtistException("Neither ID or Artist Name is valid, can't update data.")
self.set_data(response)
def set_data(self, artist_json_response):
if self.id > 0 and self.id != artist_json_response['id']:
raise InvalidArtistException("Tried to update an artists's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_json_response['id']) )
elif self.name:
self.id = artist_json_response['id']
self.parent_api.cached_artists[self.id] = self
self.name = HTMLParser().unescape(artist_json_response['name'])
self.notifications_enabled = artist_json_response['notificationsEnabled']
self.has_bookmarked = artist_json_response['hasBookmarked']
self.image = artist_json_response['image']
self.body = artist_json_response['body']
self.vanity_house = artist_json_response['vanityHouse']
self.tags = []
for tag_dict in artist_json_response['tags']:
tag = self.parent_api.get_tag(tag_dict['name'])
tag.set_artist_count(self, tag_dict['count'])
self.tags.append(tag)
self.similar_artists_and_score = {}
for similar_artist_dict in artist_json_response['similarArtists']:
similar_artist = self.parent_api.get_artist(similar_artist_dict['artistId'])
similar_artist.name = similar_artist_dict['name']
self.similar_artists_and_score[similar_artist] = similar_artist_dict['score']
self.statistics = artist_json_response['statistics']
self.torrent_groups = []
for torrent_group_item in artist_json_response['torrentgroup']:
torrent_group = self.parent_api.get_torrent_group(torrent_group_item['groupId'])
torrent_group.set_artist_group_data(torrent_group_item)
self.torrent_groups.append(torrent_group)
self.requests = []
for request_json_item in artist_json_response['requests']:
request = self.parent_api.get_request(request_json_item['requestId'])
request.set_data(request_json_item)
self.requests.append(request)
def __repr__(self):
return "Artist: %s - ID: %s" % (self.name, self.id)

View file

@ -1,13 +0,0 @@
class InvalidCategoryException(Exception):
pass
class Category(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.parent_api.cached_categories[self.id] = self # add self to cache of known Category objects
def __repr__(self):
return "Category: %s - id: %s" % (self.name, self.id)

View file

@ -1,13 +0,0 @@
C192 = "192"
APS = "APS (VBR)"
V2 = "V2 (VBR)"
V1 = "V1 (VBR)"
C256 = "256"
APX = "APX (VBR)"
V0 = "V0 (VBR)"
C320 = "320"
LOSSLESS = "Lossless"
LOSSLESS_24 = "24bit Lossless"
V8 = "V8 (VBR)"
ALL_ENCODINGS = [C192, APS, V2, V1, C256, APX, V0, C320, LOSSLESS, LOSSLESS_24, V8]

View file

@ -1,8 +0,0 @@
MP3 = "MP3"
FLAC = "FLAC"
AAC = "AAC"
AC3 = "AC3"
DTS = "DTS"
OGG_VORBIS = "Ogg Vorbis"
ALL_FORMATS = [MP3, FLAC, AAC, AC3, DTS, OGG_VORBIS]

View file

@ -1,107 +0,0 @@
class MailboxMessage(object):
def __init__(self, api, message):
self.id = message['convId']
self.conv = Conversation(api, self.id)
self.subject = message['subject']
self.unread = message['unread']
self.sticky = message['sticky']
self.fwd_id = message['forwardedId']
self.fwd_name = message['forwardedName']
self.sender_id = message['senderId']
self.username = message['username']
self.donor = message['donor']
self.warned = message['warned']
self.enabled = message['enabled']
self.date = message['date']
def __repr__(self):
return "MailboxMessage ID %s - %s %s %s" % (self.id, self.subject, self.sender_id, self.username)
class ConversationMessage(object):
def __init__(self, msg_resp):
self.id = msg_resp['messageId']
self.sender_id = msg_resp['senderId']
self.sender_name = msg_resp['senderName']
self.sent_date = msg_resp['sentDate']
self.bb_body = msg_resp['bbBody']
self.body = msg_resp['body']
def __repr__(self):
return "ConversationMessage ID %s - %s %s" % (self.id, self.sender_name, self.sent_date)
class Conversation(object):
def __init__(self, api, conv_id):
self.id = conv_id
self.parent_api = api
self.subject = None
self.sticky = None
self.messages = []
def __repr__(self):
return "Conversation ID %s - %s" % (self.id, self.subject)
def set_conv_data(self, conv_resp):
assert self.id == conv_resp['convId']
self.subject = conv_resp['subject']
self.sticky = conv_resp['sticky']
self.messages = [ConversationMessage(m) for m in conv_resp['messages']]
def update_conv_data(self):
response = self.parent_api.request(action='inbox',
type='viewconv', id=self.id)
self.set_conv_data(response)
class Mailbox(object):
"""
This class represents the logged in user's inbox/sentbox
"""
def __init__(self, parent_api, boxtype='inbox', page='1', sort='unread'):
self.parent_api = parent_api
self.boxtype = boxtype
self.current_page = page
self.total_pages = None
self.sort = sort
self.messages = None
def set_mbox_data(self, mbox_resp):
"""
Takes parsed JSON response from 'inbox' action on api
and updates the available subset of mailbox information.
"""
self.current_page = mbox_resp['currentPage']
self.total_pages = mbox_resp['pages']
self.messages = \
[MailboxMessage(self.parent_api, m) for m in mbox_resp['messages']]
def update_mbox_data(self):
response = self.parent_api.request(action='inbox',
type=self.boxtype, page=self.current_page, sort=self.sort)
self.set_mbox_data(response)
def next_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page < total_pages:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page + 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def prev_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page > 1:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page - 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def __repr__(self):
return "Mailbox: %s %s Page %s/%s" \
% (self.boxtype, self.sort,
self.current_page, self.total_pages)

View file

@ -1,11 +0,0 @@
CD = "CD"
DVD = "DVD"
VINYL = "Vinyl"
SOUNDBOARD = "Soundboard"
SACD = "SACD"
DAT = "DAT"
CASETTE = "Casette"
WEB = "WEB"
BLU_RAY = "Blu-ray"
ALL_MEDIAS = [CD, DVD, VINYL, SOUNDBOARD, SACD, DAT, CASETTE, WEB, BLU_RAY]

View file

@ -1,19 +0,0 @@
ALBUM = "Album"
SOUNDTRACK = "Soundtrack"
EP = "EP"
ANTHOLOGY = "Anthology"
COMPILATION = "Compilation"
DJ_MIX = "DJ Mix"
SINGLE = "Single"
LIVE_ALBUM = "Live album"
REMIX = "Remix"
BOOTLEG = "Bootleg"
INTERVIEW = "Interview"
MIXTAPE = "Mixtape"
UNKNOWN = "Unknown"
ALL_RELEASE_TYPES = [ALBUM, SOUNDTRACK, EP, ANTHOLOGY, COMPILATION, DJ_MIX, SINGLE, LIVE_ALBUM, REMIX, BOOTLEG,
INTERVIEW, MIXTAPE, UNKNOWN]
def get_int_val(release_type):
return ALL_RELEASE_TYPES.index(release_type) + 1

View file

@ -1,29 +0,0 @@
class InvalidRequestException(Exception):
pass
class Request(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.category = None
self.title = None
self.year = None
self.time_added = None
self.votes = None
self.bounty = None
self.parent_api.cached_requests[self.id] = self # add self to cache of known Request objects
def set_data(self, request_item_json_data):
if self.id != request_item_json_data['requestId']:
raise InvalidRequestException("Tried to update a Request's information from a request JSON item with a different id." +
" Should be %s, got %s" % (self.id, request_item_json_data['requestId']) )
self.category = self.parent_api.get_category(request_item_json_data['categoryId'])
self.title = request_item_json_data['title']
self.year = request_item_json_data['year']
self.time_added = request_item_json_data['timeAdded']
self.votes = request_item_json_data['votes']
self.bounty = request_item_json_data['bounty']
def __repr__(self):
return "Request: %s - ID: %s" % (self.title, self.id)

View file

@ -1,17 +0,0 @@
class Tag(object):
def __init__(self, name, parent_api):
self.name = name
self.artist_counts = {}
self.parent_api = parent_api
self.parent_api.cached_tags[self.name] = self # add self to cache of known Tag objects
def set_artist_count(self, artist, count):
"""
Adds an artist to the known list of artists tagged with this tag (if necessary), and sets the count of times
that that artist has been known to be tagged with this tag.
"""
self.artist_counts[artist] = count
def __repr__(self):
return "Tag: %s" % self.name

View file

@ -1,183 +0,0 @@
from HTMLParser import HTMLParser
import re
class InvalidTorrentException(Exception):
pass
class Torrent(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.group = None
self.media = None
self.format = None
self.encoding = None
self.remaster_year = None
self.remastered = None
self.remaster_title = None
self.remaster_record_label = None
self.remaster_catalogue_number = None
self.scene = None
self.has_log = None
self.has_cue = None
self.log_score = None
self.file_count = None
self.free_torrent = None
self.size = None
self.leechers = None
self.seeders = None
self.snatched = None
self.time = None
self.has_file = None
self.description = None
self.file_list = []
self.file_path = None
self.user = None
self.parent_api.cached_torrents[self.id] = self
def set_torrent_complete_data(self, torrent_json_response):
if self.id != torrent_json_response['torrent']['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(torrent_json_response['group']['id'])
had_complete_list = self.group.has_complete_torrent_list
self.group.set_group_data(torrent_json_response)
self.group.has_complete_torrent_list = had_complete_list
self.media = torrent_json_response['torrent']['media']
self.format = torrent_json_response['torrent']['format']
self.encoding = torrent_json_response['torrent']['encoding']
self.remaster_year = torrent_json_response['torrent']['remasterYear']
self.remastered = torrent_json_response['torrent']['remastered']
self.remaster_title = torrent_json_response['torrent']['remasterTitle']
self.remaster_record_label = torrent_json_response['torrent']['remasterRecordLabel']
self.scene = torrent_json_response['torrent']['scene']
self.has_log = torrent_json_response['torrent']['hasLog']
self.has_cue = torrent_json_response['torrent']['hasCue']
self.log_score = torrent_json_response['torrent']['logScore']
self.file_count = torrent_json_response['torrent']['fileCount']
self.free_torrent = torrent_json_response['torrent']['freeTorrent']
self.size = torrent_json_response['torrent']['size']
self.leechers = torrent_json_response['torrent']['leechers']
self.seeders = torrent_json_response['torrent']['seeders']
self.snatched = torrent_json_response['torrent']['snatched']
self.time = torrent_json_response['torrent']['time']
self.description = torrent_json_response['torrent']['description']
self.file_list = [ re.match("(.+){{{(\d+)}}}", item).groups()
for item in torrent_json_response['torrent']['fileList'].split("|||") ] # tuple ( filename, filesize )
self.file_path = torrent_json_response['torrent']['filePath']
self.user = self.parent_api.get_user(torrent_json_response['torrent']['userId'])
def set_torrent_artist_data(self, artist_torrent_json_response):
if self.id != artist_torrent_json_response['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(artist_torrent_json_response['groupId'])
self.media = artist_torrent_json_response['media']
self.format = artist_torrent_json_response['format']
self.encoding = artist_torrent_json_response['encoding']
self.remaster_year = artist_torrent_json_response['remasterYear']
self.remastered = artist_torrent_json_response['remastered']
self.remaster_title = artist_torrent_json_response['remasterTitle']
self.remaster_record_label = artist_torrent_json_response['remasterRecordLabel']
self.scene = artist_torrent_json_response['scene']
self.has_log = artist_torrent_json_response['hasLog']
self.has_cue = artist_torrent_json_response['hasCue']
self.log_score = artist_torrent_json_response['logScore']
self.file_count = artist_torrent_json_response['fileCount']
self.free_torrent = artist_torrent_json_response['freeTorrent']
self.size = artist_torrent_json_response['size']
self.leechers = artist_torrent_json_response['leechers']
self.seeders = artist_torrent_json_response['seeders']
self.snatched = artist_torrent_json_response['snatched']
self.time = artist_torrent_json_response['time']
self.has_file = artist_torrent_json_response['hasFile']
def set_torrent_group_data(self, group_torrent_json_response):
if self.id != group_torrent_json_response['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'torrentgroup' API call with a different id." +
" Should be %s, got %s" % (self.id, group_torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(group_torrent_json_response['groupId'])
self.media = group_torrent_json_response['media']
self.format = group_torrent_json_response['format']
self.encoding = group_torrent_json_response['encoding']
self.remastered = group_torrent_json_response['remastered']
self.remaster_year = group_torrent_json_response['remasterYear']
self.remaster_title = group_torrent_json_response['remasterTitle']
self.remaster_record_label = group_torrent_json_response['remasterRecordLabel']
self.remaster_catalogue_number = group_torrent_json_response['remasterCatalogueNumber']
self.scene = group_torrent_json_response['scene']
self.has_log = group_torrent_json_response['hasLog']
self.has_cue = group_torrent_json_response['hasCue']
self.log_score = group_torrent_json_response['logScore']
self.file_count = group_torrent_json_response['fileCount']
self.size = group_torrent_json_response['size']
self.seeders = group_torrent_json_response['seeders']
self.leechers = group_torrent_json_response['leechers']
self.snatched = group_torrent_json_response['snatched']
self.free_torrent = group_torrent_json_response['freeTorrent']
self.time = group_torrent_json_response['time']
self.description = group_torrent_json_response['description']
self.file_list = [ re.match("(.+){{{(\d+)}}}", item).groups()
for item in group_torrent_json_response['fileList'].split("|||") ] # tuple ( filename, filesize )
self.file_path = group_torrent_json_response['filePath']
self.user = self.parent_api.get_user(group_torrent_json_response['userId'])
def set_torrent_search_data(self, search_torrent_json_response):
if self.id != search_torrent_json_response['torrentId']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, search_torrent_json_response['torrentId']) )
# TODO: Add conditionals to handle torrents that aren't music
self.group = self.parent_api.get_torrent_group(search_torrent_json_response['groupId'])
self.remastered = search_torrent_json_response['remastered']
self.remaster_year = search_torrent_json_response['remasterYear']
self.remaster_title = search_torrent_json_response['remasterTitle']
self.remaster_catalogue_number = search_torrent_json_response['remasterCatalogueNumber']
self.media = search_torrent_json_response['media']
self.format = search_torrent_json_response['format']
self.encoding = search_torrent_json_response['encoding']
self.has_log = search_torrent_json_response['hasLog']
self.has_cue = search_torrent_json_response['hasCue']
self.log_score = search_torrent_json_response['logScore']
self.scene = search_torrent_json_response['scene']
self.file_count = search_torrent_json_response['fileCount']
self.size = search_torrent_json_response['size']
self.seeders = search_torrent_json_response['seeders']
self.leechers = search_torrent_json_response['leechers']
self.snatched = search_torrent_json_response['snatches']
self.free_torrent = search_torrent_json_response['isFreeleech'] or search_torrent_json_response['isPersonalFreeleech']
self.time = search_torrent_json_response['time']
def set_torrent_top_10_data(self, top_10_json_response):
if self.id != top_10_json_response['torrentId']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, top_10_json_response['torrentId']) )
# TODO: Add conditionals to handle torrents that aren't music
self.group = self.parent_api.get_torrent_group(top_10_json_response['groupId'])
self.group.name = top_10_json_response['groupName']
if not self.group.music_info and top_10_json_response['artist']:
self.group.music_info = {'artists': [self.parent_api.get_artist(name=HTMLParser().unescape(top_10_json_response['artist']))]}
self.remaster_title = top_10_json_response['remasterTitle']
self.media = top_10_json_response['media']
self.format = top_10_json_response['format']
self.encoding = top_10_json_response['encoding']
self.has_log = top_10_json_response['hasLog']
self.has_cue = top_10_json_response['hasCue']
self.scene = top_10_json_response['scene']
self.seeders = top_10_json_response['seeders']
self.leechers = top_10_json_response['leechers']
self.snatched = top_10_json_response['snatched']
def __repr__(self):
if self.group:
groupname = self.group.name
else:
groupname = "Unknown Group"
return "Torrent: %s - %s - ID: %s" % (groupname, self.encoding, self.id)

View file

@ -1,139 +0,0 @@
from .torrent import Torrent
class InvalidTorrentGroupException(Exception):
pass
class TorrentGroup(object):
"""
Represents a Torrent Group (usually an album). Note that TorrentGroup.torrents may not be comprehensive if you
haven't called TorrentGroup.update_group_data()...it may have only been populated with filtered search results.
Check TorrentGroup.has_complete_torrent_list (boolean) to be sure.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.wiki_body = None
self.wiki_image = None
self.year = None
self.record_label = None
self.catalogue_number = None
self.tags = []
self.release_type = None
self.vanity_house = None
self.has_bookmarked = None
self.category = None
self.time = None
self.music_info = None
self.torrents = []
self.has_complete_torrent_list = False
self.parent_api.cached_torrent_groups[self.id] = self
def update_group_data(self):
response = self.parent_api.request(action='torrentgroup', id=self.id)
self.set_group_data(response)
def set_group_data(self, torrent_group_json_response):
"""
Takes parsed JSON response from 'torrentgroup' action on api, and updates relevant information.
To avoid problems, only pass in data from an API call that used this torrentgroup's ID as an argument.
"""
if self.id != torrent_group_json_response['group']['id']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, torrent_group_json_response['group']['groupId']) )
self.name = torrent_group_json_response['group']['name']
self.year = torrent_group_json_response['group']['year']
self.wiki_body = torrent_group_json_response['group']['wikiBody']
self.wiki_image = torrent_group_json_response['group']['wikiImage']
self.record_label = torrent_group_json_response['group']['recordLabel']
self.catalogue_number = torrent_group_json_response['group']['catalogueNumber']
self.release_type = torrent_group_json_response['group']['releaseType']
self.category = self.parent_api.get_category(torrent_group_json_response['group']['categoryId'],
torrent_group_json_response['group']['categoryName'])
self.time = torrent_group_json_response['group']['time']
self.vanity_house = torrent_group_json_response['group']['vanityHouse']
self.music_info = torrent_group_json_response['group']['musicInfo']
self.music_info['artists'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['artists'] ]
self.music_info['with'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['with'] ]
if 'torrents' in torrent_group_json_response:
self.torrents = []
for torrent_dict in torrent_group_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_group_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
elif 'torrent' in torrent_group_json_response:
torrent = self.parent_api.get_torrent(torrent_group_json_response['torrent']['id'])
self.torrents.append(torrent)
def set_artist_group_data(self, artist_group_json_response):
"""
Takes torrentgroup section from parsed JSON response from 'artist' action on api, and updates relevant information.
"""
if self.id != artist_group_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_group_json_response['groupId']) )
self.name = artist_group_json_response['groupName']
self.year = artist_group_json_response['groupYear']
self.record_label = artist_group_json_response['groupRecordLabel']
self.catalogue_number = artist_group_json_response['groupCatalogueNumber']
self.tags = []
for tag_name in artist_group_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
self.release_type = artist_group_json_response['releaseType']
self.has_bookmarked = artist_group_json_response['hasBookmarked']
self.torrents = []
for torrent_dict in artist_group_json_response['torrent']:
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_artist_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
def set_torrent_search_data(self, search_json_response):
if self.id != search_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, search_json_response['groupId']) )
self.name = search_json_response['groupName']
# purposefully ignoring search_json_response['artist']...the other data updates don't include it, would just get confusing
self.tags = []
for tag_name in search_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
# some of the below keys aren't in things like comics...should probably watch out for this elsewhere
if 'bookmarked' in search_json_response.keys():
self.has_bookmarked = search_json_response['bookmarked']
if 'vanityHouse' in search_json_response.keys():
self.vanity_house = search_json_response['vanityHouse']
if 'groupYear' in search_json_response.keys():
self.year = search_json_response['groupYear']
if 'releaseType' in search_json_response.keys():
self.release_type = search_json_response['releaseType']
self.time = search_json_response['groupTime']
if 'torrentId' in search_json_response.keys():
search_json_response['torrents'] = [{'torrentId': search_json_response['torrentId']}]
new_torrents = []
for torrent_dict in search_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['torrentId'])
new_torrents.append(torrent)
# torrent information gets populated in API search call, no need to duplicate that here
self.torrents = self.torrents + new_torrents
def __repr__(self):
return "TorrentGroup: %s - ID: %s" % (self.name, self.id)

View file

@ -1,108 +0,0 @@
class InvalidUserException(Exception):
pass
class User(object):
"""
This class represents a User, whether your own or someone else's. It is created knowing only its ID. To reduce
API accesses, load information using User.update_index_data() or User.update_user_data only as needed.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.username = None
self.authkey = None
self.passkey = None
self.avatar = None
self.is_friend = None
self.profile_text = None
self.notifications = None
self.stats = None
self.ranks = None
self.personal = None
self.community = None
self.parent_api.cached_users[self.id] = self # add self to cache of known User objects
def update_index_data(self):
"""
Calls 'index' API action, then updates this User objects information with it.
NOTE: Only call if this user is the logged-in user...throws InvalidUserException otherwise.
"""
response = self.parent_api.request(action='index')
self.set_index_data(response)
def set_index_data(self, index_json_response):
"""
Takes parsed JSON response from 'index' action on api, and updates the available subset of user information.
ONLY callable if this User object represents the currently logged in user. Throws InvalidUserException otherwise.
"""
if self.id != index_json_response['id']:
raise InvalidUserException("Tried to update non-logged-in User's information from 'index' API call." +
" Should be %s, got %s" % (self.id, index_json_response['id']) )
self.username = index_json_response['username']
self.authkey = index_json_response['authkey']
self.passkey = index_json_response['passkey']
self.notifications = index_json_response['notifications']
if self.stats:
self.stats = dict(self.stats.items() + index_json_response['userstats'].items()) # merge in new info
else:
self.stats = index_json_response['userstats']
# cross pollinate some data that is located in multiple locations in API
if self.personal:
self.personal['class'] = self.stats['class']
self.personal['passkey'] = self.passkey
def update_user_data(self):
response = self.parent_api.request(action='user', id=self.id)
self.set_user_data(response)
def set_user_data(self, user_json_response):
"""
Takes parsed JSON response from 'user' action on api, and updates relevant user information.
To avoid problems, only pass in user data from an API call that used this user's ID as an argument.
"""
if self.username and self.username != user_json_response['username']:
raise InvalidUserException("Tried to update a user's information from a 'user' API call with a different username." +
" Should be %s, got %s" % (self.username, user_json_response['username']) )
self.username = user_json_response['username']
self.avatar = user_json_response['avatar']
self.is_friend = user_json_response['isFriend']
self.profile_text = user_json_response['profileText']
if self.stats:
self.stats = dict(self.stats.items() + user_json_response['stats'].items()) # merge in new info
else:
self.stats = user_json_response['stats']
self.ranks = user_json_response['ranks']
self.personal = user_json_response['personal']
self.community = user_json_response['community']
# cross pollinate some data that is located in multiple locations in API
self.stats['class'] = self.personal['class']
self.passkey = self.personal['passkey']
def set_search_result_data(self, search_result_item):
"""
Takes a single user result item from a 'usersearch' API call and updates user info.
"""
if self.id != search_result_item['userId']:
raise InvalidUserException("Tried to update existing user with another user's search result data (IDs don't match).")
self.username = search_result_item['username']
if not self.personal:
self.personal = {}
self.personal['donor'] = search_result_item['donor']
self.personal['warned'] = search_result_item['warned']
self.personal['enabled'] = search_result_item['enabled']
self.personal['class'] = search_result_item['class']
def __repr__(self):
return "User: %s - ID: %s" % (self.username, self.id)

View file

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View file

@ -1,134 +0,0 @@
Unidecode, lossy ASCII transliterations of Unicode text
=======================================================
It often happens that you have text data in Unicode, but you need to
represent it in ASCII. For example when integrating with legacy code that
doesn't support Unicode, or for ease of entry of non-Roman names on a US
keyboard, or when constructing ASCII machine identifiers from
human-readable Unicode strings that should still be somewhat intelligeble
(a popular example of this is when making an URL slug from an article
title).
In most of these examples you could represent Unicode characters as
"???" or "\\15BA\\15A0\\1610", to mention two extreme cases. But that's
nearly useless to someone who actually wants to read what the text says.
What Unidecode provides is a middle road: function unidecode() takes
Unicode data and tries to represent it in ASCII characters (i.e., the
universally displayable characters between 0x00 and 0x7F), where the
compromises taken when mapping between two character sets are chosen to be
near what a human with a US keyboard would choose.
The quality of resulting ASCII representation varies. For languages of
western origin it should be between perfect and good. On the other hand
transliteration (i.e., conveying, in Roman letters, the pronunciation
expressed by the text in some other writing system) of languages like
Chinese, Japanese or Korean is a very complex issue and this library does
not even attempt to address it. It draws the line at context-free
character-by-character mapping. So a good rule of thumb is that the further
the script you are transliterating is from Latin alphabet, the worse the
transliteration will be.
Note that this module generally produces better results than simply
stripping accents from characters (which can be done in Python with
built-in functions). It is based on hand-tuned character mappings that for
example also contain ASCII approximations for symbols and non-Latin
alphabets.
This is a Python port of Text::Unidecode Perl module by
Sean M. Burke <sburke@cpan.org>.
Module content
--------------
The module exports a single function that takes an Unicode object (Python
2.x) or string (Python 3.x) and returns a string (that can be encoded to
ASCII bytes in Python 3.x)::
>>> from unidecode import unidecode
>>> unidecode(u'ko\u017eu\u0161\u010dek')
'kozuscek'
>>> unidecode(u'30 \U0001d5c4\U0001d5c6/\U0001d5c1')
'30 km/h'
>>> unidecode(u"\u5317\u4EB0")
'Bei Jing '
Requirements
------------
Nothing except Python itself.
You need a Python build with "wide" Unicode characters (also called "UCS-4
build") in order for unidecode to work correctly with characters outside of
Basic Multilingual Plane (BMP). Common characters outside BMP are bold, italic,
script, etc. variants of the Latin alphabet intended for mathematical notation.
Surrogate pair encoding of "narrow" builds is not supported in unidecode.
If your Python build supports "wide" Unicode the following expression will
return True::
>>> import sys
>>> sys.maxunicode > 0xffff
True
See PEP 261 for details regarding support for "wide" Unicode characters in
Python.
Installation
------------
You install Unidecode, as you would install any Python module, by running
these commands::
python setup.py install
python setup.py test
Source
------
You can get the latest development version of Unidecode with::
git clone https://www.tablix.org/~avian/git/unidecode.git
Support
-------
Questions, bug reports, useful code bits, and suggestions for Unidecode
should be sent to tomaz.solc@tablix.org
Copyright
---------
Original character transliteration tables:
Copyright 2001, Sean M. Burke <sburke@cpan.org>, all rights reserved.
Python code and later additions:
Copyright 2014, Tomaz Solc <tomaz.solc@tablix.org>
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 51
Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. The programs and
documentation in this dist are distributed in the hope that they will be
useful, but without any warranty; without even the implied warranty of
merchantability or fitness for a particular purpose.
..
vim: set filetype=rst:

View file

@ -1,69 +0,0 @@
# -*- coding: utf-8 -*-
# vi:tabstop=4:expandtab:sw=4
"""Transliterate Unicode text into plain 7-bit ASCII.
Example usage:
>>> from unidecode import unidecode:
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
The transliteration uses a straightforward map, and doesn't have alternatives
for the same character based on language, position, or anything else.
In Python 3, a standard string object will be returned. If you need bytes, use:
>>> unidecode("Κνωσός").encode("ascii")
b'Knosos'
"""
import warnings
from sys import version_info
Cache = {}
def unidecode(string):
"""Transliterate an Unicode object into an ASCII string
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
"""
if version_info[0] < 3 and not isinstance(string, unicode):
warnings.warn( "Argument %r is not an unicode object. "
"Passing an encoded string will likely have "
"unexpected results." % (type(string),),
RuntimeWarning, 2)
retval = []
for char in string:
codepoint = ord(char)
if codepoint < 0x80: # Basic ASCII
retval.append(str(char))
continue
if codepoint > 0xeffff:
continue # Characters in Private Use Area and above are ignored
if 0xd800 <= codepoint <= 0xdfff:
warnings.warn( "Surrogate character %r will be ignored. "
"You might be using a narrow Python build." % (char,),
RuntimeWarning, 2)
section = codepoint >> 8 # Chop off the last two hex digits
position = codepoint % 256 # Last two hex digits
try:
table = Cache[section]
except KeyError:
try:
mod = __import__('unidecode.x%03x'%(section), globals(), locals(), ['data'])
except ImportError:
Cache[section] = None
continue # No match: ignore this character and carry on.
Cache[section] = table = mod.data
if table and len(table) > position:
retval.append( table[position] )
return ''.join(retval)

View file

@ -1,165 +0,0 @@
data = (
# Code points u+007f and below are equivalent to ASCII and are handled by a
# special case in the code. Hence they are not present in this table.
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
' ', # 0xa0
'!', # 0xa1
'C/', # 0xa2
# Not "GBP" - Pound Sign is used for more than just British Pounds.
'PS', # 0xa3
'$?', # 0xa4
'Y=', # 0xa5
'|', # 0xa6
'SS', # 0xa7
'"', # 0xa8
'(c)', # 0xa9
'a', # 0xaa
'<<', # 0xab
'!', # 0xac
'', # 0xad
'(r)', # 0xae
'-', # 0xaf
'deg', # 0xb0
'+-', # 0xb1
# These might be combined with other superscript digits (u+2070 - u+2079)
'2', # 0xb2
'3', # 0xb3
'\'', # 0xb4
'u', # 0xb5
'P', # 0xb6
'*', # 0xb7
',', # 0xb8
'1', # 0xb9
'o', # 0xba
'>>', # 0xbb
'1/4', # 0xbc
'1/2', # 0xbd
'3/4', # 0xbe
'?', # 0xbf
'A', # 0xc0
'A', # 0xc1
'A', # 0xc2
'A', # 0xc3
# Not "AE" - used in languages other than German
'A', # 0xc4
'A', # 0xc5
'AE', # 0xc6
'C', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'I', # 0xcc
'I', # 0xcd
'I', # 0xce
'I', # 0xcf
'D', # 0xd0
'N', # 0xd1
'O', # 0xd2
'O', # 0xd3
'O', # 0xd4
'O', # 0xd5
# Not "OE" - used in languages other than German
'O', # 0xd6
'x', # 0xd7
'O', # 0xd8
'U', # 0xd9
'U', # 0xda
'U', # 0xdb
# Not "UE" - used in languages other than German
'U', # 0xdc
'Y', # 0xdd
'Th', # 0xde
'ss', # 0xdf
'a', # 0xe0
'a', # 0xe1
'a', # 0xe2
'a', # 0xe3
# Not "ae" - used in languages other than German
'a', # 0xe4
'a', # 0xe5
'ae', # 0xe6
'c', # 0xe7
'e', # 0xe8
'e', # 0xe9
'e', # 0xea
'e', # 0xeb
'i', # 0xec
'i', # 0xed
'i', # 0xee
'i', # 0xef
'd', # 0xf0
'n', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'o', # 0xf5
# Not "oe" - used in languages other than German
'o', # 0xf6
'/', # 0xf7
'o', # 0xf8
'u', # 0xf9
'u', # 0xfa
'u', # 0xfb
# Not "ue" - used in languages other than German
'u', # 0xfc
'y', # 0xfd
'th', # 0xfe
'y', # 0xff
)

View file

@ -1,258 +0,0 @@
data = (
'A', # 0x00
'a', # 0x01
'A', # 0x02
'a', # 0x03
'A', # 0x04
'a', # 0x05
'C', # 0x06
'c', # 0x07
'C', # 0x08
'c', # 0x09
'C', # 0x0a
'c', # 0x0b
'C', # 0x0c
'c', # 0x0d
'D', # 0x0e
'd', # 0x0f
'D', # 0x10
'd', # 0x11
'E', # 0x12
'e', # 0x13
'E', # 0x14
'e', # 0x15
'E', # 0x16
'e', # 0x17
'E', # 0x18
'e', # 0x19
'E', # 0x1a
'e', # 0x1b
'G', # 0x1c
'g', # 0x1d
'G', # 0x1e
'g', # 0x1f
'G', # 0x20
'g', # 0x21
'G', # 0x22
'g', # 0x23
'H', # 0x24
'h', # 0x25
'H', # 0x26
'h', # 0x27
'I', # 0x28
'i', # 0x29
'I', # 0x2a
'i', # 0x2b
'I', # 0x2c
'i', # 0x2d
'I', # 0x2e
'i', # 0x2f
'I', # 0x30
'i', # 0x31
'IJ', # 0x32
'ij', # 0x33
'J', # 0x34
'j', # 0x35
'K', # 0x36
'k', # 0x37
'k', # 0x38
'L', # 0x39
'l', # 0x3a
'L', # 0x3b
'l', # 0x3c
'L', # 0x3d
'l', # 0x3e
'L', # 0x3f
'l', # 0x40
'L', # 0x41
'l', # 0x42
'N', # 0x43
'n', # 0x44
'N', # 0x45
'n', # 0x46
'N', # 0x47
'n', # 0x48
'\'n', # 0x49
'ng', # 0x4a
'NG', # 0x4b
'O', # 0x4c
'o', # 0x4d
'O', # 0x4e
'o', # 0x4f
'O', # 0x50
'o', # 0x51
'OE', # 0x52
'oe', # 0x53
'R', # 0x54
'r', # 0x55
'R', # 0x56
'r', # 0x57
'R', # 0x58
'r', # 0x59
'S', # 0x5a
's', # 0x5b
'S', # 0x5c
's', # 0x5d
'S', # 0x5e
's', # 0x5f
'S', # 0x60
's', # 0x61
'T', # 0x62
't', # 0x63
'T', # 0x64
't', # 0x65
'T', # 0x66
't', # 0x67
'U', # 0x68
'u', # 0x69
'U', # 0x6a
'u', # 0x6b
'U', # 0x6c
'u', # 0x6d
'U', # 0x6e
'u', # 0x6f
'U', # 0x70
'u', # 0x71
'U', # 0x72
'u', # 0x73
'W', # 0x74
'w', # 0x75
'Y', # 0x76
'y', # 0x77
'Y', # 0x78
'Z', # 0x79
'z', # 0x7a
'Z', # 0x7b
'z', # 0x7c
'Z', # 0x7d
'z', # 0x7e
's', # 0x7f
'b', # 0x80
'B', # 0x81
'B', # 0x82
'b', # 0x83
'6', # 0x84
'6', # 0x85
'O', # 0x86
'C', # 0x87
'c', # 0x88
'D', # 0x89
'D', # 0x8a
'D', # 0x8b
'd', # 0x8c
'd', # 0x8d
'3', # 0x8e
'@', # 0x8f
'E', # 0x90
'F', # 0x91
'f', # 0x92
'G', # 0x93
'G', # 0x94
'hv', # 0x95
'I', # 0x96
'I', # 0x97
'K', # 0x98
'k', # 0x99
'l', # 0x9a
'l', # 0x9b
'W', # 0x9c
'N', # 0x9d
'n', # 0x9e
'O', # 0x9f
'O', # 0xa0
'o', # 0xa1
'OI', # 0xa2
'oi', # 0xa3
'P', # 0xa4
'p', # 0xa5
'YR', # 0xa6
'2', # 0xa7
'2', # 0xa8
'SH', # 0xa9
'sh', # 0xaa
't', # 0xab
'T', # 0xac
't', # 0xad
'T', # 0xae
'U', # 0xaf
'u', # 0xb0
'Y', # 0xb1
'V', # 0xb2
'Y', # 0xb3
'y', # 0xb4
'Z', # 0xb5
'z', # 0xb6
'ZH', # 0xb7
'ZH', # 0xb8
'zh', # 0xb9
'zh', # 0xba
'2', # 0xbb
'5', # 0xbc
'5', # 0xbd
'ts', # 0xbe
'w', # 0xbf
'|', # 0xc0
'||', # 0xc1
'|=', # 0xc2
'!', # 0xc3
'DZ', # 0xc4
'Dz', # 0xc5
'dz', # 0xc6
'LJ', # 0xc7
'Lj', # 0xc8
'lj', # 0xc9
'NJ', # 0xca
'Nj', # 0xcb
'nj', # 0xcc
'A', # 0xcd
'a', # 0xce
'I', # 0xcf
'i', # 0xd0
'O', # 0xd1
'o', # 0xd2
'U', # 0xd3
'u', # 0xd4
'U', # 0xd5
'u', # 0xd6
'U', # 0xd7
'u', # 0xd8
'U', # 0xd9
'u', # 0xda
'U', # 0xdb
'u', # 0xdc
'@', # 0xdd
'A', # 0xde
'a', # 0xdf
'A', # 0xe0
'a', # 0xe1
'AE', # 0xe2
'ae', # 0xe3
'G', # 0xe4
'g', # 0xe5
'G', # 0xe6
'g', # 0xe7
'K', # 0xe8
'k', # 0xe9
'O', # 0xea
'o', # 0xeb
'O', # 0xec
'o', # 0xed
'ZH', # 0xee
'zh', # 0xef
'j', # 0xf0
'DZ', # 0xf1
'Dz', # 0xf2
'dz', # 0xf3
'G', # 0xf4
'g', # 0xf5
'HV', # 0xf6
'W', # 0xf7
'N', # 0xf8
'n', # 0xf9
'A', # 0xfa
'a', # 0xfb
'AE', # 0xfc
'ae', # 0xfd
'O', # 0xfe
'o', # 0xff
)

View file

@ -1,257 +0,0 @@
data = (
'A', # 0x00
'a', # 0x01
'A', # 0x02
'a', # 0x03
'E', # 0x04
'e', # 0x05
'E', # 0x06
'e', # 0x07
'I', # 0x08
'i', # 0x09
'I', # 0x0a
'i', # 0x0b
'O', # 0x0c
'o', # 0x0d
'O', # 0x0e
'o', # 0x0f
'R', # 0x10
'r', # 0x11
'R', # 0x12
'r', # 0x13
'U', # 0x14
'u', # 0x15
'U', # 0x16
'u', # 0x17
'S', # 0x18
's', # 0x19
'T', # 0x1a
't', # 0x1b
'Y', # 0x1c
'y', # 0x1d
'H', # 0x1e
'h', # 0x1f
'N', # 0x20
'd', # 0x21
'OU', # 0x22
'ou', # 0x23
'Z', # 0x24
'z', # 0x25
'A', # 0x26
'a', # 0x27
'E', # 0x28
'e', # 0x29
'O', # 0x2a
'o', # 0x2b
'O', # 0x2c
'o', # 0x2d
'O', # 0x2e
'o', # 0x2f
'O', # 0x30
'o', # 0x31
'Y', # 0x32
'y', # 0x33
'l', # 0x34
'n', # 0x35
't', # 0x36
'j', # 0x37
'db', # 0x38
'qp', # 0x39
'A', # 0x3a
'C', # 0x3b
'c', # 0x3c
'L', # 0x3d
'T', # 0x3e
's', # 0x3f
'z', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'B', # 0x43
'U', # 0x44
'^', # 0x45
'E', # 0x46
'e', # 0x47
'J', # 0x48
'j', # 0x49
'q', # 0x4a
'q', # 0x4b
'R', # 0x4c
'r', # 0x4d
'Y', # 0x4e
'y', # 0x4f
'a', # 0x50
'a', # 0x51
'a', # 0x52
'b', # 0x53
'o', # 0x54
'c', # 0x55
'd', # 0x56
'd', # 0x57
'e', # 0x58
'@', # 0x59
'@', # 0x5a
'e', # 0x5b
'e', # 0x5c
'e', # 0x5d
'e', # 0x5e
'j', # 0x5f
'g', # 0x60
'g', # 0x61
'g', # 0x62
'g', # 0x63
'u', # 0x64
'Y', # 0x65
'h', # 0x66
'h', # 0x67
'i', # 0x68
'i', # 0x69
'I', # 0x6a
'l', # 0x6b
'l', # 0x6c
'l', # 0x6d
'lZ', # 0x6e
'W', # 0x6f
'W', # 0x70
'm', # 0x71
'n', # 0x72
'n', # 0x73
'n', # 0x74
'o', # 0x75
'OE', # 0x76
'O', # 0x77
'F', # 0x78
'r', # 0x79
'r', # 0x7a
'r', # 0x7b
'r', # 0x7c
'r', # 0x7d
'r', # 0x7e
'r', # 0x7f
'R', # 0x80
'R', # 0x81
's', # 0x82
'S', # 0x83
'j', # 0x84
'S', # 0x85
'S', # 0x86
't', # 0x87
't', # 0x88
'u', # 0x89
'U', # 0x8a
'v', # 0x8b
'^', # 0x8c
'w', # 0x8d
'y', # 0x8e
'Y', # 0x8f
'z', # 0x90
'z', # 0x91
'Z', # 0x92
'Z', # 0x93
'?', # 0x94
'?', # 0x95
'?', # 0x96
'C', # 0x97
'@', # 0x98
'B', # 0x99
'E', # 0x9a
'G', # 0x9b
'H', # 0x9c
'j', # 0x9d
'k', # 0x9e
'L', # 0x9f
'q', # 0xa0
'?', # 0xa1
'?', # 0xa2
'dz', # 0xa3
'dZ', # 0xa4
'dz', # 0xa5
'ts', # 0xa6
'tS', # 0xa7
'tC', # 0xa8
'fN', # 0xa9
'ls', # 0xaa
'lz', # 0xab
'WW', # 0xac
']]', # 0xad
'h', # 0xae
'h', # 0xaf
'k', # 0xb0
'h', # 0xb1
'j', # 0xb2
'r', # 0xb3
'r', # 0xb4
'r', # 0xb5
'r', # 0xb6
'w', # 0xb7
'y', # 0xb8
'\'', # 0xb9
'"', # 0xba
'`', # 0xbb
'\'', # 0xbc
'`', # 0xbd
'`', # 0xbe
'\'', # 0xbf
'?', # 0xc0
'?', # 0xc1
'<', # 0xc2
'>', # 0xc3
'^', # 0xc4
'V', # 0xc5
'^', # 0xc6
'V', # 0xc7
'\'', # 0xc8
'-', # 0xc9
'/', # 0xca
'\\', # 0xcb
',', # 0xcc
'_', # 0xcd
'\\', # 0xce
'/', # 0xcf
':', # 0xd0
'.', # 0xd1
'`', # 0xd2
'\'', # 0xd3
'^', # 0xd4
'V', # 0xd5
'+', # 0xd6
'-', # 0xd7
'V', # 0xd8
'.', # 0xd9
'@', # 0xda
',', # 0xdb
'~', # 0xdc
'"', # 0xdd
'R', # 0xde
'X', # 0xdf
'G', # 0xe0
'l', # 0xe1
's', # 0xe2
'x', # 0xe3
'?', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'V', # 0xec
'=', # 0xed
'"', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'a', # 0x63
'e', # 0x64
'i', # 0x65
'o', # 0x66
'u', # 0x67
'c', # 0x68
'd', # 0x69
'h', # 0x6a
'm', # 0x6b
'r', # 0x6c
't', # 0x6d
'v', # 0x6e
'x', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'\'', # 0x74
',', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'?', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'', # 0x84
'', # 0x85
'A', # 0x86
';', # 0x87
'E', # 0x88
'E', # 0x89
'I', # 0x8a
'[?]', # 0x8b
'O', # 0x8c
'[?]', # 0x8d
'U', # 0x8e
'O', # 0x8f
'I', # 0x90
'A', # 0x91
'B', # 0x92
'G', # 0x93
'D', # 0x94
'E', # 0x95
'Z', # 0x96
'E', # 0x97
'Th', # 0x98
'I', # 0x99
'K', # 0x9a
'L', # 0x9b
'M', # 0x9c
'N', # 0x9d
'Ks', # 0x9e
'O', # 0x9f
'P', # 0xa0
'R', # 0xa1
'[?]', # 0xa2
'S', # 0xa3
'T', # 0xa4
'U', # 0xa5
'Ph', # 0xa6
'Kh', # 0xa7
'Ps', # 0xa8
'O', # 0xa9
'I', # 0xaa
'U', # 0xab
'a', # 0xac
'e', # 0xad
'e', # 0xae
'i', # 0xaf
'u', # 0xb0
'a', # 0xb1
'b', # 0xb2
'g', # 0xb3
'd', # 0xb4
'e', # 0xb5
'z', # 0xb6
'e', # 0xb7
'th', # 0xb8
'i', # 0xb9
'k', # 0xba
'l', # 0xbb
'm', # 0xbc
'n', # 0xbd
'x', # 0xbe
'o', # 0xbf
'p', # 0xc0
'r', # 0xc1
's', # 0xc2
's', # 0xc3
't', # 0xc4
'u', # 0xc5
'ph', # 0xc6
'kh', # 0xc7
'ps', # 0xc8
'o', # 0xc9
'i', # 0xca
'u', # 0xcb
'o', # 0xcc
'u', # 0xcd
'o', # 0xce
'[?]', # 0xcf
'b', # 0xd0
'th', # 0xd1
'U', # 0xd2
'U', # 0xd3
'U', # 0xd4
'ph', # 0xd5
'p', # 0xd6
'&', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'St', # 0xda
'st', # 0xdb
'W', # 0xdc
'w', # 0xdd
'Q', # 0xde
'q', # 0xdf
'Sp', # 0xe0
'sp', # 0xe1
'Sh', # 0xe2
'sh', # 0xe3
'F', # 0xe4
'f', # 0xe5
'Kh', # 0xe6
'kh', # 0xe7
'H', # 0xe8
'h', # 0xe9
'G', # 0xea
'g', # 0xeb
'CH', # 0xec
'ch', # 0xed
'Ti', # 0xee
'ti', # 0xef
'k', # 0xf0
'r', # 0xf1
'c', # 0xf2
'j', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'Ie', # 0x00
'Io', # 0x01
'Dj', # 0x02
'Gj', # 0x03
'Ie', # 0x04
'Dz', # 0x05
'I', # 0x06
'Yi', # 0x07
'J', # 0x08
'Lj', # 0x09
'Nj', # 0x0a
'Tsh', # 0x0b
'Kj', # 0x0c
'I', # 0x0d
'U', # 0x0e
'Dzh', # 0x0f
'A', # 0x10
'B', # 0x11
'V', # 0x12
'G', # 0x13
'D', # 0x14
'E', # 0x15
'Zh', # 0x16
'Z', # 0x17
'I', # 0x18
'I', # 0x19
'K', # 0x1a
'L', # 0x1b
'M', # 0x1c
'N', # 0x1d
'O', # 0x1e
'P', # 0x1f
'R', # 0x20
'S', # 0x21
'T', # 0x22
'U', # 0x23
'F', # 0x24
'Kh', # 0x25
'Ts', # 0x26
'Ch', # 0x27
'Sh', # 0x28
'Shch', # 0x29
'\'', # 0x2a
'Y', # 0x2b
'\'', # 0x2c
'E', # 0x2d
'Iu', # 0x2e
'Ia', # 0x2f
'a', # 0x30
'b', # 0x31
'v', # 0x32
'g', # 0x33
'd', # 0x34
'e', # 0x35
'zh', # 0x36
'z', # 0x37
'i', # 0x38
'i', # 0x39
'k', # 0x3a
'l', # 0x3b
'm', # 0x3c
'n', # 0x3d
'o', # 0x3e
'p', # 0x3f
'r', # 0x40
's', # 0x41
't', # 0x42
'u', # 0x43
'f', # 0x44
'kh', # 0x45
'ts', # 0x46
'ch', # 0x47
'sh', # 0x48
'shch', # 0x49
'\'', # 0x4a
'y', # 0x4b
'\'', # 0x4c
'e', # 0x4d
'iu', # 0x4e
'ia', # 0x4f
'ie', # 0x50
'io', # 0x51
'dj', # 0x52
'gj', # 0x53
'ie', # 0x54
'dz', # 0x55
'i', # 0x56
'yi', # 0x57
'j', # 0x58
'lj', # 0x59
'nj', # 0x5a
'tsh', # 0x5b
'kj', # 0x5c
'i', # 0x5d
'u', # 0x5e
'dzh', # 0x5f
'O', # 0x60
'o', # 0x61
'E', # 0x62
'e', # 0x63
'Ie', # 0x64
'ie', # 0x65
'E', # 0x66
'e', # 0x67
'Ie', # 0x68
'ie', # 0x69
'O', # 0x6a
'o', # 0x6b
'Io', # 0x6c
'io', # 0x6d
'Ks', # 0x6e
'ks', # 0x6f
'Ps', # 0x70
'ps', # 0x71
'F', # 0x72
'f', # 0x73
'Y', # 0x74
'y', # 0x75
'Y', # 0x76
'y', # 0x77
'u', # 0x78
'u', # 0x79
'O', # 0x7a
'o', # 0x7b
'O', # 0x7c
'o', # 0x7d
'Ot', # 0x7e
'ot', # 0x7f
'Q', # 0x80
'q', # 0x81
'*1000*', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'[?]', # 0x87
'*100.000*', # 0x88
'*1.000.000*', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'"', # 0x8c
'"', # 0x8d
'R\'', # 0x8e
'r\'', # 0x8f
'G\'', # 0x90
'g\'', # 0x91
'G\'', # 0x92
'g\'', # 0x93
'G\'', # 0x94
'g\'', # 0x95
'Zh\'', # 0x96
'zh\'', # 0x97
'Z\'', # 0x98
'z\'', # 0x99
'K\'', # 0x9a
'k\'', # 0x9b
'K\'', # 0x9c
'k\'', # 0x9d
'K\'', # 0x9e
'k\'', # 0x9f
'K\'', # 0xa0
'k\'', # 0xa1
'N\'', # 0xa2
'n\'', # 0xa3
'Ng', # 0xa4
'ng', # 0xa5
'P\'', # 0xa6
'p\'', # 0xa7
'Kh', # 0xa8
'kh', # 0xa9
'S\'', # 0xaa
's\'', # 0xab
'T\'', # 0xac
't\'', # 0xad
'U', # 0xae
'u', # 0xaf
'U\'', # 0xb0
'u\'', # 0xb1
'Kh\'', # 0xb2
'kh\'', # 0xb3
'Tts', # 0xb4
'tts', # 0xb5
'Ch\'', # 0xb6
'ch\'', # 0xb7
'Ch\'', # 0xb8
'ch\'', # 0xb9
'H', # 0xba
'h', # 0xbb
'Ch', # 0xbc
'ch', # 0xbd
'Ch\'', # 0xbe
'ch\'', # 0xbf
'`', # 0xc0
'Zh', # 0xc1
'zh', # 0xc2
'K\'', # 0xc3
'k\'', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'N\'', # 0xc7
'n\'', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'Ch', # 0xcb
'ch', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'a', # 0xd0
'a', # 0xd1
'A', # 0xd2
'a', # 0xd3
'Ae', # 0xd4
'ae', # 0xd5
'Ie', # 0xd6
'ie', # 0xd7
'@', # 0xd8
'@', # 0xd9
'@', # 0xda
'@', # 0xdb
'Zh', # 0xdc
'zh', # 0xdd
'Z', # 0xde
'z', # 0xdf
'Dz', # 0xe0
'dz', # 0xe1
'I', # 0xe2
'i', # 0xe3
'I', # 0xe4
'i', # 0xe5
'O', # 0xe6
'o', # 0xe7
'O', # 0xe8
'o', # 0xe9
'O', # 0xea
'o', # 0xeb
'E', # 0xec
'e', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'U', # 0xf2
'u', # 0xf3
'Ch', # 0xf4
'ch', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'A', # 0x31
'B', # 0x32
'G', # 0x33
'D', # 0x34
'E', # 0x35
'Z', # 0x36
'E', # 0x37
'E', # 0x38
'T`', # 0x39
'Zh', # 0x3a
'I', # 0x3b
'L', # 0x3c
'Kh', # 0x3d
'Ts', # 0x3e
'K', # 0x3f
'H', # 0x40
'Dz', # 0x41
'Gh', # 0x42
'Ch', # 0x43
'M', # 0x44
'Y', # 0x45
'N', # 0x46
'Sh', # 0x47
'O', # 0x48
'Ch`', # 0x49
'P', # 0x4a
'J', # 0x4b
'Rh', # 0x4c
'S', # 0x4d
'V', # 0x4e
'T', # 0x4f
'R', # 0x50
'Ts`', # 0x51
'W', # 0x52
'P`', # 0x53
'K`', # 0x54
'O', # 0x55
'F', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'<', # 0x59
'\'', # 0x5a
'/', # 0x5b
'!', # 0x5c
',', # 0x5d
'?', # 0x5e
'.', # 0x5f
'[?]', # 0x60
'a', # 0x61
'b', # 0x62
'g', # 0x63
'd', # 0x64
'e', # 0x65
'z', # 0x66
'e', # 0x67
'e', # 0x68
't`', # 0x69
'zh', # 0x6a
'i', # 0x6b
'l', # 0x6c
'kh', # 0x6d
'ts', # 0x6e
'k', # 0x6f
'h', # 0x70
'dz', # 0x71
'gh', # 0x72
'ch', # 0x73
'm', # 0x74
'y', # 0x75
'n', # 0x76
'sh', # 0x77
'o', # 0x78
'ch`', # 0x79
'p', # 0x7a
'j', # 0x7b
'rh', # 0x7c
's', # 0x7d
'v', # 0x7e
't', # 0x7f
'r', # 0x80
'ts`', # 0x81
'w', # 0x82
'p`', # 0x83
'k`', # 0x84
'o', # 0x85
'f', # 0x86
'ew', # 0x87
'[?]', # 0x88
':', # 0x89
'-', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'[?]', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'@', # 0xb0
'e', # 0xb1
'a', # 0xb2
'o', # 0xb3
'i', # 0xb4
'e', # 0xb5
'e', # 0xb6
'a', # 0xb7
'a', # 0xb8
'o', # 0xb9
'[?]', # 0xba
'u', # 0xbb
'\'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'|', # 0xc0
'', # 0xc1
'', # 0xc2
':', # 0xc3
'', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'', # 0xd0
'b', # 0xd1
'g', # 0xd2
'd', # 0xd3
'h', # 0xd4
'v', # 0xd5
'z', # 0xd6
'kh', # 0xd7
't', # 0xd8
'y', # 0xd9
'k', # 0xda
'k', # 0xdb
'l', # 0xdc
'm', # 0xdd
'm', # 0xde
'n', # 0xdf
'n', # 0xe0
's', # 0xe1
'`', # 0xe2
'p', # 0xe3
'p', # 0xe4
'ts', # 0xe5
'ts', # 0xe6
'q', # 0xe7
'r', # 0xe8
'sh', # 0xe9
't', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'V', # 0xf0
'oy', # 0xf1
'i', # 0xf2
'\'', # 0xf3
'"', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
',', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
';', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'?', # 0x1f
'[?]', # 0x20
'', # 0x21
'a', # 0x22
'\'', # 0x23
'w\'', # 0x24
'', # 0x25
'y\'', # 0x26
'', # 0x27
'b', # 0x28
'@', # 0x29
't', # 0x2a
'th', # 0x2b
'j', # 0x2c
'H', # 0x2d
'kh', # 0x2e
'd', # 0x2f
'dh', # 0x30
'r', # 0x31
'z', # 0x32
's', # 0x33
'sh', # 0x34
'S', # 0x35
'D', # 0x36
'T', # 0x37
'Z', # 0x38
'`', # 0x39
'G', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'', # 0x40
'f', # 0x41
'q', # 0x42
'k', # 0x43
'l', # 0x44
'm', # 0x45
'n', # 0x46
'h', # 0x47
'w', # 0x48
'~', # 0x49
'y', # 0x4a
'an', # 0x4b
'un', # 0x4c
'in', # 0x4d
'a', # 0x4e
'u', # 0x4f
'i', # 0x50
'W', # 0x51
'', # 0x52
'', # 0x53
'\'', # 0x54
'\'', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'0', # 0x60
'1', # 0x61
'2', # 0x62
'3', # 0x63
'4', # 0x64
'5', # 0x65
'6', # 0x66
'7', # 0x67
'8', # 0x68
'9', # 0x69
'%', # 0x6a
'.', # 0x6b
',', # 0x6c
'*', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'', # 0x70
'\'', # 0x71
'\'', # 0x72
'\'', # 0x73
'', # 0x74
'\'', # 0x75
'\'w', # 0x76
'\'u', # 0x77
'\'y', # 0x78
'tt', # 0x79
'tth', # 0x7a
'b', # 0x7b
't', # 0x7c
'T', # 0x7d
'p', # 0x7e
'th', # 0x7f
'bh', # 0x80
'\'h', # 0x81
'H', # 0x82
'ny', # 0x83
'dy', # 0x84
'H', # 0x85
'ch', # 0x86
'cch', # 0x87
'dd', # 0x88
'D', # 0x89
'D', # 0x8a
'Dt', # 0x8b
'dh', # 0x8c
'ddh', # 0x8d
'd', # 0x8e
'D', # 0x8f
'D', # 0x90
'rr', # 0x91
'R', # 0x92
'R', # 0x93
'R', # 0x94
'R', # 0x95
'R', # 0x96
'R', # 0x97
'j', # 0x98
'R', # 0x99
'S', # 0x9a
'S', # 0x9b
'S', # 0x9c
'S', # 0x9d
'S', # 0x9e
'T', # 0x9f
'GH', # 0xa0
'F', # 0xa1
'F', # 0xa2
'F', # 0xa3
'v', # 0xa4
'f', # 0xa5
'ph', # 0xa6
'Q', # 0xa7
'Q', # 0xa8
'kh', # 0xa9
'k', # 0xaa
'K', # 0xab
'K', # 0xac
'ng', # 0xad
'K', # 0xae
'g', # 0xaf
'G', # 0xb0
'N', # 0xb1
'G', # 0xb2
'G', # 0xb3
'G', # 0xb4
'L', # 0xb5
'L', # 0xb6
'L', # 0xb7
'L', # 0xb8
'N', # 0xb9
'N', # 0xba
'N', # 0xbb
'N', # 0xbc
'N', # 0xbd
'h', # 0xbe
'Ch', # 0xbf
'hy', # 0xc0
'h', # 0xc1
'H', # 0xc2
'@', # 0xc3
'W', # 0xc4
'oe', # 0xc5
'oe', # 0xc6
'u', # 0xc7
'yu', # 0xc8
'yu', # 0xc9
'W', # 0xca
'v', # 0xcb
'y', # 0xcc
'Y', # 0xcd
'Y', # 0xce
'W', # 0xcf
'', # 0xd0
'', # 0xd1
'y', # 0xd2
'y\'', # 0xd3
'.', # 0xd4
'ae', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'@', # 0xdd
'#', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'^', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'0', # 0xf0
'1', # 0xf1
'2', # 0xf2
'3', # 0xf3
'4', # 0xf4
'5', # 0xf5
'6', # 0xf6
'7', # 0xf7
'8', # 0xf8
'9', # 0xf9
'Sh', # 0xfa
'D', # 0xfb
'Gh', # 0xfc
'&', # 0xfd
'+m', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'//', # 0x00
'/', # 0x01
',', # 0x02
'!', # 0x03
'!', # 0x04
'-', # 0x05
',', # 0x06
',', # 0x07
';', # 0x08
'?', # 0x09
'~', # 0x0a
'{', # 0x0b
'}', # 0x0c
'*', # 0x0d
'[?]', # 0x0e
'', # 0x0f
'\'', # 0x10
'', # 0x11
'b', # 0x12
'g', # 0x13
'g', # 0x14
'd', # 0x15
'd', # 0x16
'h', # 0x17
'w', # 0x18
'z', # 0x19
'H', # 0x1a
't', # 0x1b
't', # 0x1c
'y', # 0x1d
'yh', # 0x1e
'k', # 0x1f
'l', # 0x20
'm', # 0x21
'n', # 0x22
's', # 0x23
's', # 0x24
'`', # 0x25
'p', # 0x26
'p', # 0x27
'S', # 0x28
'q', # 0x29
'r', # 0x2a
'sh', # 0x2b
't', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'a', # 0x30
'a', # 0x31
'a', # 0x32
'A', # 0x33
'A', # 0x34
'A', # 0x35
'e', # 0x36
'e', # 0x37
'e', # 0x38
'E', # 0x39
'i', # 0x3a
'i', # 0x3b
'u', # 0x3c
'u', # 0x3d
'u', # 0x3e
'o', # 0x3f
'', # 0x40
'`', # 0x41
'\'', # 0x42
'', # 0x43
'', # 0x44
'X', # 0x45
'Q', # 0x46
'@', # 0x47
'@', # 0x48
'|', # 0x49
'+', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'h', # 0x80
'sh', # 0x81
'n', # 0x82
'r', # 0x83
'b', # 0x84
'L', # 0x85
'k', # 0x86
'\'', # 0x87
'v', # 0x88
'm', # 0x89
'f', # 0x8a
'dh', # 0x8b
'th', # 0x8c
'l', # 0x8d
'g', # 0x8e
'ny', # 0x8f
's', # 0x90
'd', # 0x91
'z', # 0x92
't', # 0x93
'y', # 0x94
'p', # 0x95
'j', # 0x96
'ch', # 0x97
'tt', # 0x98
'hh', # 0x99
'kh', # 0x9a
'th', # 0x9b
'z', # 0x9c
'sh', # 0x9d
's', # 0x9e
'd', # 0x9f
't', # 0xa0
'z', # 0xa1
'`', # 0xa2
'gh', # 0xa3
'q', # 0xa4
'w', # 0xa5
'a', # 0xa6
'aa', # 0xa7
'i', # 0xa8
'ee', # 0xa9
'u', # 0xaa
'oo', # 0xab
'e', # 0xac
'ey', # 0xad
'o', # 0xae
'oa', # 0xaf
'', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'eN', # 0x0d
'e', # 0x0e
'e', # 0x0f
'ai', # 0x10
'oN', # 0x11
'o', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'nnn', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'l', # 0x33
'lll', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'RR', # 0x44
'eN', # 0x45
'e', # 0x46
'e', # 0x47
'ai', # 0x48
'oN', # 0x49
'o', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'AUM', # 0x50
'\'', # 0x51
'\'', # 0x52
'`', # 0x53
'\'', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'q', # 0x58
'khh', # 0x59
'ghh', # 0x5a
'z', # 0x5b
'dddh', # 0x5c
'rh', # 0x5d
'f', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'L', # 0x62
'LL', # 0x63
' / ', # 0x64
' // ', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'.', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'N', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'RR', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'e', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'o', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'[?]', # 0xb1
'l', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'\'', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'e', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'o', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'rr', # 0xdc
'rh', # 0xdd
'[?]', # 0xde
'yy', # 0xdf
'RR', # 0xe0
'LL', # 0xe1
'L', # 0xe2
'LL', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'r\'', # 0xf0
'r`', # 0xf1
'Rs', # 0xf2
'Rs', # 0xf3
'1/', # 0xf4
'2/', # 0xf5
'3/', # 0xf6
'4/', # 0xf7
' 1 - 1/', # 0xf8
'/16', # 0xf9
'', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'N', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bb', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'[?]', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'khh', # 0x59
'ghh', # 0x5a
'z', # 0x5b
'rr', # 0x5c
'[?]', # 0x5d
'f', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'N', # 0x70
'H', # 0x71
'', # 0x72
'', # 0x73
'G.E.O.', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'N', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'[?]', # 0x8c
'eN', # 0x8d
'[?]', # 0x8e
'e', # 0x8f
'ai', # 0x90
'oN', # 0x91
'[?]', # 0x92
'o', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'ya', # 0xaf
'r', # 0xb0
'[?]', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'\'', # 0xbc
'\'', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'eN', # 0xc5
'[?]', # 0xc6
'e', # 0xc7
'ai', # 0xc8
'oN', # 0xc9
'[?]', # 0xca
'o', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'AUM', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'e', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'e', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'+', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'rr', # 0x5c
'rh', # 0x5d
'[?]', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'ng', # 0x99
'c', # 0x9a
'[?]', # 0x9b
'j', # 0x9c
'[?]', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'nn', # 0xa3
't', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
'[?]', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'RR', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'+', # 0x55
'+', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'L', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'+', # 0xd5
'+', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'lll', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'LL', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'lll', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'ae', # 0x87
'aae', # 0x88
'i', # 0x89
'ii', # 0x8a
'u', # 0x8b
'uu', # 0x8c
'R', # 0x8d
'RR', # 0x8e
'L', # 0x8f
'LL', # 0x90
'e', # 0x91
'ee', # 0x92
'ai', # 0x93
'o', # 0x94
'oo', # 0x95
'au', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'k', # 0x9a
'kh', # 0x9b
'g', # 0x9c
'gh', # 0x9d
'ng', # 0x9e
'nng', # 0x9f
'c', # 0xa0
'ch', # 0xa1
'j', # 0xa2
'jh', # 0xa3
'ny', # 0xa4
'jny', # 0xa5
'nyj', # 0xa6
'tt', # 0xa7
'tth', # 0xa8
'dd', # 0xa9
'ddh', # 0xaa
'nn', # 0xab
'nndd', # 0xac
't', # 0xad
'th', # 0xae
'd', # 0xaf
'dh', # 0xb0
'n', # 0xb1
'[?]', # 0xb2
'nd', # 0xb3
'p', # 0xb4
'ph', # 0xb5
'b', # 0xb6
'bh', # 0xb7
'm', # 0xb8
'mb', # 0xb9
'y', # 0xba
'r', # 0xbb
'[?]', # 0xbc
'l', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'v', # 0xc0
'sh', # 0xc1
'ss', # 0xc2
's', # 0xc3
'h', # 0xc4
'll', # 0xc5
'f', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'aa', # 0xcf
'ae', # 0xd0
'aae', # 0xd1
'i', # 0xd2
'ii', # 0xd3
'u', # 0xd4
'[?]', # 0xd5
'uu', # 0xd6
'[?]', # 0xd7
'R', # 0xd8
'e', # 0xd9
'ee', # 0xda
'ai', # 0xdb
'o', # 0xdc
'oo', # 0xdd
'au', # 0xde
'L', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'RR', # 0xf2
'LL', # 0xf3
' . ', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'k', # 0x01
'kh', # 0x02
'kh', # 0x03
'kh', # 0x04
'kh', # 0x05
'kh', # 0x06
'ng', # 0x07
'cch', # 0x08
'ch', # 0x09
'ch', # 0x0a
'ch', # 0x0b
'ch', # 0x0c
'y', # 0x0d
'd', # 0x0e
't', # 0x0f
'th', # 0x10
'th', # 0x11
'th', # 0x12
'n', # 0x13
'd', # 0x14
't', # 0x15
'th', # 0x16
'th', # 0x17
'th', # 0x18
'n', # 0x19
'b', # 0x1a
'p', # 0x1b
'ph', # 0x1c
'f', # 0x1d
'ph', # 0x1e
'f', # 0x1f
'ph', # 0x20
'm', # 0x21
'y', # 0x22
'r', # 0x23
'R', # 0x24
'l', # 0x25
'L', # 0x26
'w', # 0x27
's', # 0x28
's', # 0x29
's', # 0x2a
'h', # 0x2b
'l', # 0x2c
'`', # 0x2d
'h', # 0x2e
'~', # 0x2f
'a', # 0x30
'a', # 0x31
'aa', # 0x32
'am', # 0x33
'i', # 0x34
'ii', # 0x35
'ue', # 0x36
'uue', # 0x37
'u', # 0x38
'uu', # 0x39
'\'', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'Bh.', # 0x3f
'e', # 0x40
'ae', # 0x41
'o', # 0x42
'ai', # 0x43
'ai', # 0x44
'ao', # 0x45
'+', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'M', # 0x4d
'', # 0x4e
' * ', # 0x4f
'0', # 0x50
'1', # 0x51
'2', # 0x52
'3', # 0x53
'4', # 0x54
'5', # 0x55
'6', # 0x56
'7', # 0x57
'8', # 0x58
'9', # 0x59
' // ', # 0x5a
' /// ', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'k', # 0x81
'kh', # 0x82
'[?]', # 0x83
'kh', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'ng', # 0x87
'ch', # 0x88
'[?]', # 0x89
's', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'ny', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'd', # 0x94
'h', # 0x95
'th', # 0x96
'th', # 0x97
'[?]', # 0x98
'n', # 0x99
'b', # 0x9a
'p', # 0x9b
'ph', # 0x9c
'f', # 0x9d
'ph', # 0x9e
'f', # 0x9f
'[?]', # 0xa0
'm', # 0xa1
'y', # 0xa2
'r', # 0xa3
'[?]', # 0xa4
'l', # 0xa5
'[?]', # 0xa6
'w', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
's', # 0xaa
'h', # 0xab
'[?]', # 0xac
'`', # 0xad
'', # 0xae
'~', # 0xaf
'a', # 0xb0
'', # 0xb1
'aa', # 0xb2
'am', # 0xb3
'i', # 0xb4
'ii', # 0xb5
'y', # 0xb6
'yy', # 0xb7
'u', # 0xb8
'uu', # 0xb9
'[?]', # 0xba
'o', # 0xbb
'l', # 0xbc
'ny', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'e', # 0xc0
'ei', # 0xc1
'o', # 0xc2
'ay', # 0xc3
'ai', # 0xc4
'[?]', # 0xc5
'+', # 0xc6
'[?]', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'M', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'0', # 0xd0
'1', # 0xd1
'2', # 0xd2
'3', # 0xd3
'4', # 0xd4
'5', # 0xd5
'6', # 0xd6
'7', # 0xd7
'8', # 0xd8
'9', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'hn', # 0xdc
'hm', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'AUM', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
' // ', # 0x08
' * ', # 0x09
'', # 0x0a
'-', # 0x0b
' / ', # 0x0c
' / ', # 0x0d
' // ', # 0x0e
' -/ ', # 0x0f
' +/ ', # 0x10
' X/ ', # 0x11
' /XX/ ', # 0x12
' /X/ ', # 0x13
', ', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'0', # 0x20
'1', # 0x21
'2', # 0x22
'3', # 0x23
'4', # 0x24
'5', # 0x25
'6', # 0x26
'7', # 0x27
'8', # 0x28
'9', # 0x29
'.5', # 0x2a
'1.5', # 0x2b
'2.5', # 0x2c
'3.5', # 0x2d
'4.5', # 0x2e
'5.5', # 0x2f
'6.5', # 0x30
'7.5', # 0x31
'8.5', # 0x32
'-.5', # 0x33
'+', # 0x34
'*', # 0x35
'^', # 0x36
'_', # 0x37
'', # 0x38
'~', # 0x39
'[?]', # 0x3a
']', # 0x3b
'[[', # 0x3c
']]', # 0x3d
'', # 0x3e
'', # 0x3f
'k', # 0x40
'kh', # 0x41
'g', # 0x42
'gh', # 0x43
'ng', # 0x44
'c', # 0x45
'ch', # 0x46
'j', # 0x47
'[?]', # 0x48
'ny', # 0x49
'tt', # 0x4a
'tth', # 0x4b
'dd', # 0x4c
'ddh', # 0x4d
'nn', # 0x4e
't', # 0x4f
'th', # 0x50
'd', # 0x51
'dh', # 0x52
'n', # 0x53
'p', # 0x54
'ph', # 0x55
'b', # 0x56
'bh', # 0x57
'm', # 0x58
'ts', # 0x59
'tsh', # 0x5a
'dz', # 0x5b
'dzh', # 0x5c
'w', # 0x5d
'zh', # 0x5e
'z', # 0x5f
'\'', # 0x60
'y', # 0x61
'r', # 0x62
'l', # 0x63
'sh', # 0x64
'ssh', # 0x65
's', # 0x66
'h', # 0x67
'a', # 0x68
'kss', # 0x69
'r', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'aa', # 0x71
'i', # 0x72
'ii', # 0x73
'u', # 0x74
'uu', # 0x75
'R', # 0x76
'RR', # 0x77
'L', # 0x78
'LL', # 0x79
'e', # 0x7a
'ee', # 0x7b
'o', # 0x7c
'oo', # 0x7d
'M', # 0x7e
'H', # 0x7f
'i', # 0x80
'ii', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'k', # 0x90
'kh', # 0x91
'g', # 0x92
'gh', # 0x93
'ng', # 0x94
'c', # 0x95
'ch', # 0x96
'j', # 0x97
'[?]', # 0x98
'ny', # 0x99
'tt', # 0x9a
'tth', # 0x9b
'dd', # 0x9c
'ddh', # 0x9d
'nn', # 0x9e
't', # 0x9f
'th', # 0xa0
'd', # 0xa1
'dh', # 0xa2
'n', # 0xa3
'p', # 0xa4
'ph', # 0xa5
'b', # 0xa6
'bh', # 0xa7
'm', # 0xa8
'ts', # 0xa9
'tsh', # 0xaa
'dz', # 0xab
'dzh', # 0xac
'w', # 0xad
'zh', # 0xae
'z', # 0xaf
'\'', # 0xb0
'y', # 0xb1
'r', # 0xb2
'l', # 0xb3
'sh', # 0xb4
'ss', # 0xb5
's', # 0xb6
'h', # 0xb7
'a', # 0xb8
'kss', # 0xb9
'w', # 0xba
'y', # 0xbb
'r', # 0xbc
'[?]', # 0xbd
'X', # 0xbe
' :X: ', # 0xbf
' /O/ ', # 0xc0
' /o/ ', # 0xc1
' \\o\\ ', # 0xc2
' (O) ', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'k', # 0x00
'kh', # 0x01
'g', # 0x02
'gh', # 0x03
'ng', # 0x04
'c', # 0x05
'ch', # 0x06
'j', # 0x07
'jh', # 0x08
'ny', # 0x09
'nny', # 0x0a
'tt', # 0x0b
'tth', # 0x0c
'dd', # 0x0d
'ddh', # 0x0e
'nn', # 0x0f
'tt', # 0x10
'th', # 0x11
'd', # 0x12
'dh', # 0x13
'n', # 0x14
'p', # 0x15
'ph', # 0x16
'b', # 0x17
'bh', # 0x18
'm', # 0x19
'y', # 0x1a
'r', # 0x1b
'l', # 0x1c
'w', # 0x1d
's', # 0x1e
'h', # 0x1f
'll', # 0x20
'a', # 0x21
'[?]', # 0x22
'i', # 0x23
'ii', # 0x24
'u', # 0x25
'uu', # 0x26
'e', # 0x27
'[?]', # 0x28
'o', # 0x29
'au', # 0x2a
'[?]', # 0x2b
'aa', # 0x2c
'i', # 0x2d
'ii', # 0x2e
'u', # 0x2f
'uu', # 0x30
'e', # 0x31
'ai', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'N', # 0x36
'\'', # 0x37
':', # 0x38
'', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'0', # 0x40
'1', # 0x41
'2', # 0x42
'3', # 0x43
'4', # 0x44
'5', # 0x45
'6', # 0x46
'7', # 0x47
'8', # 0x48
'9', # 0x49
' / ', # 0x4a
' // ', # 0x4b
'n*', # 0x4c
'r*', # 0x4d
'l*', # 0x4e
'e*', # 0x4f
'sh', # 0x50
'ss', # 0x51
'R', # 0x52
'RR', # 0x53
'L', # 0x54
'LL', # 0x55
'R', # 0x56
'RR', # 0x57
'L', # 0x58
'LL', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'B', # 0xa1
'G', # 0xa2
'D', # 0xa3
'E', # 0xa4
'V', # 0xa5
'Z', # 0xa6
'T`', # 0xa7
'I', # 0xa8
'K', # 0xa9
'L', # 0xaa
'M', # 0xab
'N', # 0xac
'O', # 0xad
'P', # 0xae
'Zh', # 0xaf
'R', # 0xb0
'S', # 0xb1
'T', # 0xb2
'U', # 0xb3
'P`', # 0xb4
'K`', # 0xb5
'G\'', # 0xb6
'Q', # 0xb7
'Sh', # 0xb8
'Ch`', # 0xb9
'C`', # 0xba
'Z\'', # 0xbb
'C', # 0xbc
'Ch', # 0xbd
'X', # 0xbe
'J', # 0xbf
'H', # 0xc0
'E', # 0xc1
'Y', # 0xc2
'W', # 0xc3
'Xh', # 0xc4
'OE', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'a', # 0xd0
'b', # 0xd1
'g', # 0xd2
'd', # 0xd3
'e', # 0xd4
'v', # 0xd5
'z', # 0xd6
't`', # 0xd7
'i', # 0xd8
'k', # 0xd9
'l', # 0xda
'm', # 0xdb
'n', # 0xdc
'o', # 0xdd
'p', # 0xde
'zh', # 0xdf
'r', # 0xe0
's', # 0xe1
't', # 0xe2
'u', # 0xe3
'p`', # 0xe4
'k`', # 0xe5
'g\'', # 0xe6
'q', # 0xe7
'sh', # 0xe8
'ch`', # 0xe9
'c`', # 0xea
'z\'', # 0xeb
'c', # 0xec
'ch', # 0xed
'x', # 0xee
'j', # 0xef
'h', # 0xf0
'e', # 0xf1
'y', # 0xf2
'w', # 0xf3
'xh', # 0xf4
'oe', # 0xf5
'f', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
' // ', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'g', # 0x00
'gg', # 0x01
'n', # 0x02
'd', # 0x03
'dd', # 0x04
'r', # 0x05
'm', # 0x06
'b', # 0x07
'bb', # 0x08
's', # 0x09
'ss', # 0x0a
'', # 0x0b
'j', # 0x0c
'jj', # 0x0d
'c', # 0x0e
'k', # 0x0f
't', # 0x10
'p', # 0x11
'h', # 0x12
'ng', # 0x13
'nn', # 0x14
'nd', # 0x15
'nb', # 0x16
'dg', # 0x17
'rn', # 0x18
'rr', # 0x19
'rh', # 0x1a
'rN', # 0x1b
'mb', # 0x1c
'mN', # 0x1d
'bg', # 0x1e
'bn', # 0x1f
'', # 0x20
'bs', # 0x21
'bsg', # 0x22
'bst', # 0x23
'bsb', # 0x24
'bss', # 0x25
'bsj', # 0x26
'bj', # 0x27
'bc', # 0x28
'bt', # 0x29
'bp', # 0x2a
'bN', # 0x2b
'bbN', # 0x2c
'sg', # 0x2d
'sn', # 0x2e
'sd', # 0x2f
'sr', # 0x30
'sm', # 0x31
'sb', # 0x32
'sbg', # 0x33
'sss', # 0x34
's', # 0x35
'sj', # 0x36
'sc', # 0x37
'sk', # 0x38
'st', # 0x39
'sp', # 0x3a
'sh', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'Z', # 0x40
'g', # 0x41
'd', # 0x42
'm', # 0x43
'b', # 0x44
's', # 0x45
'Z', # 0x46
'', # 0x47
'j', # 0x48
'c', # 0x49
't', # 0x4a
'p', # 0x4b
'N', # 0x4c
'j', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'ck', # 0x52
'ch', # 0x53
'', # 0x54
'', # 0x55
'pb', # 0x56
'pN', # 0x57
'hh', # 0x58
'Q', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'', # 0x5f
'', # 0x60
'a', # 0x61
'ae', # 0x62
'ya', # 0x63
'yae', # 0x64
'eo', # 0x65
'e', # 0x66
'yeo', # 0x67
'ye', # 0x68
'o', # 0x69
'wa', # 0x6a
'wae', # 0x6b
'oe', # 0x6c
'yo', # 0x6d
'u', # 0x6e
'weo', # 0x6f
'we', # 0x70
'wi', # 0x71
'yu', # 0x72
'eu', # 0x73
'yi', # 0x74
'i', # 0x75
'a-o', # 0x76
'a-u', # 0x77
'ya-o', # 0x78
'ya-yo', # 0x79
'eo-o', # 0x7a
'eo-u', # 0x7b
'eo-eu', # 0x7c
'yeo-o', # 0x7d
'yeo-u', # 0x7e
'o-eo', # 0x7f
'o-e', # 0x80
'o-ye', # 0x81
'o-o', # 0x82
'o-u', # 0x83
'yo-ya', # 0x84
'yo-yae', # 0x85
'yo-yeo', # 0x86
'yo-o', # 0x87
'yo-i', # 0x88
'u-a', # 0x89
'u-ae', # 0x8a
'u-eo-eu', # 0x8b
'u-ye', # 0x8c
'u-u', # 0x8d
'yu-a', # 0x8e
'yu-eo', # 0x8f
'yu-e', # 0x90
'yu-yeo', # 0x91
'yu-ye', # 0x92
'yu-u', # 0x93
'yu-i', # 0x94
'eu-u', # 0x95
'eu-eu', # 0x96
'yi-u', # 0x97
'i-a', # 0x98
'i-ya', # 0x99
'i-o', # 0x9a
'i-u', # 0x9b
'i-eu', # 0x9c
'i-U', # 0x9d
'U', # 0x9e
'U-eo', # 0x9f
'U-u', # 0xa0
'U-i', # 0xa1
'UU', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'g', # 0xa8
'gg', # 0xa9
'gs', # 0xaa
'n', # 0xab
'nj', # 0xac
'nh', # 0xad
'd', # 0xae
'l', # 0xaf
'lg', # 0xb0
'lm', # 0xb1
'lb', # 0xb2
'ls', # 0xb3
'lt', # 0xb4
'lp', # 0xb5
'lh', # 0xb6
'm', # 0xb7
'b', # 0xb8
'bs', # 0xb9
's', # 0xba
'ss', # 0xbb
'ng', # 0xbc
'j', # 0xbd
'c', # 0xbe
'k', # 0xbf
't', # 0xc0
'p', # 0xc1
'h', # 0xc2
'gl', # 0xc3
'gsg', # 0xc4
'ng', # 0xc5
'nd', # 0xc6
'ns', # 0xc7
'nZ', # 0xc8
'nt', # 0xc9
'dg', # 0xca
'tl', # 0xcb
'lgs', # 0xcc
'ln', # 0xcd
'ld', # 0xce
'lth', # 0xcf
'll', # 0xd0
'lmg', # 0xd1
'lms', # 0xd2
'lbs', # 0xd3
'lbh', # 0xd4
'rNp', # 0xd5
'lss', # 0xd6
'lZ', # 0xd7
'lk', # 0xd8
'lQ', # 0xd9
'mg', # 0xda
'ml', # 0xdb
'mb', # 0xdc
'ms', # 0xdd
'mss', # 0xde
'mZ', # 0xdf
'mc', # 0xe0
'mh', # 0xe1
'mN', # 0xe2
'bl', # 0xe3
'bp', # 0xe4
'ph', # 0xe5
'pN', # 0xe6
'sg', # 0xe7
'sd', # 0xe8
'sl', # 0xe9
'sb', # 0xea
'Z', # 0xeb
'g', # 0xec
'ss', # 0xed
'', # 0xee
'kh', # 0xef
'N', # 0xf0
'Ns', # 0xf1
'NZ', # 0xf2
'pb', # 0xf3
'pN', # 0xf4
'hn', # 0xf5
'hl', # 0xf6
'hm', # 0xf7
'hb', # 0xf8
'Q', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,258 +0,0 @@
data = (
'ha', # 0x00
'hu', # 0x01
'hi', # 0x02
'haa', # 0x03
'hee', # 0x04
'he', # 0x05
'ho', # 0x06
'[?]', # 0x07
'la', # 0x08
'lu', # 0x09
'li', # 0x0a
'laa', # 0x0b
'lee', # 0x0c
'le', # 0x0d
'lo', # 0x0e
'lwa', # 0x0f
'hha', # 0x10
'hhu', # 0x11
'hhi', # 0x12
'hhaa', # 0x13
'hhee', # 0x14
'hhe', # 0x15
'hho', # 0x16
'hhwa', # 0x17
'ma', # 0x18
'mu', # 0x19
'mi', # 0x1a
'maa', # 0x1b
'mee', # 0x1c
'me', # 0x1d
'mo', # 0x1e
'mwa', # 0x1f
'sza', # 0x20
'szu', # 0x21
'szi', # 0x22
'szaa', # 0x23
'szee', # 0x24
'sze', # 0x25
'szo', # 0x26
'szwa', # 0x27
'ra', # 0x28
'ru', # 0x29
'ri', # 0x2a
'raa', # 0x2b
'ree', # 0x2c
're', # 0x2d
'ro', # 0x2e
'rwa', # 0x2f
'sa', # 0x30
'su', # 0x31
'si', # 0x32
'saa', # 0x33
'see', # 0x34
'se', # 0x35
'so', # 0x36
'swa', # 0x37
'sha', # 0x38
'shu', # 0x39
'shi', # 0x3a
'shaa', # 0x3b
'shee', # 0x3c
'she', # 0x3d
'sho', # 0x3e
'shwa', # 0x3f
'qa', # 0x40
'qu', # 0x41
'qi', # 0x42
'qaa', # 0x43
'qee', # 0x44
'qe', # 0x45
'qo', # 0x46
'[?]', # 0x47
'qwa', # 0x48
'[?]', # 0x49
'qwi', # 0x4a
'qwaa', # 0x4b
'qwee', # 0x4c
'qwe', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'qha', # 0x50
'qhu', # 0x51
'qhi', # 0x52
'qhaa', # 0x53
'qhee', # 0x54
'qhe', # 0x55
'qho', # 0x56
'[?]', # 0x57
'qhwa', # 0x58
'[?]', # 0x59
'qhwi', # 0x5a
'qhwaa', # 0x5b
'qhwee', # 0x5c
'qhwe', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'ba', # 0x60
'bu', # 0x61
'bi', # 0x62
'baa', # 0x63
'bee', # 0x64
'be', # 0x65
'bo', # 0x66
'bwa', # 0x67
'va', # 0x68
'vu', # 0x69
'vi', # 0x6a
'vaa', # 0x6b
'vee', # 0x6c
've', # 0x6d
'vo', # 0x6e
'vwa', # 0x6f
'ta', # 0x70
'tu', # 0x71
'ti', # 0x72
'taa', # 0x73
'tee', # 0x74
'te', # 0x75
'to', # 0x76
'twa', # 0x77
'ca', # 0x78
'cu', # 0x79
'ci', # 0x7a
'caa', # 0x7b
'cee', # 0x7c
'ce', # 0x7d
'co', # 0x7e
'cwa', # 0x7f
'xa', # 0x80
'xu', # 0x81
'xi', # 0x82
'xaa', # 0x83
'xee', # 0x84
'xe', # 0x85
'xo', # 0x86
'[?]', # 0x87
'xwa', # 0x88
'[?]', # 0x89
'xwi', # 0x8a
'xwaa', # 0x8b
'xwee', # 0x8c
'xwe', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'na', # 0x90
'nu', # 0x91
'ni', # 0x92
'naa', # 0x93
'nee', # 0x94
'ne', # 0x95
'no', # 0x96
'nwa', # 0x97
'nya', # 0x98
'nyu', # 0x99
'nyi', # 0x9a
'nyaa', # 0x9b
'nyee', # 0x9c
'nye', # 0x9d
'nyo', # 0x9e
'nywa', # 0x9f
'\'a', # 0xa0
'\'u', # 0xa1
'[?]', # 0xa2
'\'aa', # 0xa3
'\'ee', # 0xa4
'\'e', # 0xa5
'\'o', # 0xa6
'\'wa', # 0xa7
'ka', # 0xa8
'ku', # 0xa9
'ki', # 0xaa
'kaa', # 0xab
'kee', # 0xac
'ke', # 0xad
'ko', # 0xae
'[?]', # 0xaf
'kwa', # 0xb0
'[?]', # 0xb1
'kwi', # 0xb2
'kwaa', # 0xb3
'kwee', # 0xb4
'kwe', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'kxa', # 0xb8
'kxu', # 0xb9
'kxi', # 0xba
'kxaa', # 0xbb
'kxee', # 0xbc
'kxe', # 0xbd
'kxo', # 0xbe
'[?]', # 0xbf
'kxwa', # 0xc0
'[?]', # 0xc1
'kxwi', # 0xc2
'kxwaa', # 0xc3
'kxwee', # 0xc4
'kxwe', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'wa', # 0xc8
'wu', # 0xc9
'wi', # 0xca
'waa', # 0xcb
'wee', # 0xcc
'we', # 0xcd
'wo', # 0xce
'[?]', # 0xcf
'`a', # 0xd0
'`u', # 0xd1
'`i', # 0xd2
'`aa', # 0xd3
'`ee', # 0xd4
'`e', # 0xd5
'`o', # 0xd6
'[?]', # 0xd7
'za', # 0xd8
'zu', # 0xd9
'zi', # 0xda
'zaa', # 0xdb
'zee', # 0xdc
'ze', # 0xdd
'zo', # 0xde
'zwa', # 0xdf
'zha', # 0xe0
'zhu', # 0xe1
'zhi', # 0xe2
'zhaa', # 0xe3
'zhee', # 0xe4
'zhe', # 0xe5
'zho', # 0xe6
'zhwa', # 0xe7
'ya', # 0xe8
'yu', # 0xe9
'yi', # 0xea
'yaa', # 0xeb
'yee', # 0xec
'ye', # 0xed
'yo', # 0xee
'[?]', # 0xef
'da', # 0xf0
'du', # 0xf1
'di', # 0xf2
'daa', # 0xf3
'dee', # 0xf4
'de', # 0xf5
'do', # 0xf6
'dwa', # 0xf7
'dda', # 0xf8
'ddu', # 0xf9
'ddi', # 0xfa
'ddaa', # 0xfb
'ddee', # 0xfc
'dde', # 0xfd
'ddo', # 0xfe
'ddwa', # 0xff
)

View file

@ -1,257 +0,0 @@
data = (
'ja', # 0x00
'ju', # 0x01
'ji', # 0x02
'jaa', # 0x03
'jee', # 0x04
'je', # 0x05
'jo', # 0x06
'jwa', # 0x07
'ga', # 0x08
'gu', # 0x09
'gi', # 0x0a
'gaa', # 0x0b
'gee', # 0x0c
'ge', # 0x0d
'go', # 0x0e
'[?]', # 0x0f
'gwa', # 0x10
'[?]', # 0x11
'gwi', # 0x12
'gwaa', # 0x13
'gwee', # 0x14
'gwe', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'gga', # 0x18
'ggu', # 0x19
'ggi', # 0x1a
'ggaa', # 0x1b
'ggee', # 0x1c
'gge', # 0x1d
'ggo', # 0x1e
'[?]', # 0x1f
'tha', # 0x20
'thu', # 0x21
'thi', # 0x22
'thaa', # 0x23
'thee', # 0x24
'the', # 0x25
'tho', # 0x26
'thwa', # 0x27
'cha', # 0x28
'chu', # 0x29
'chi', # 0x2a
'chaa', # 0x2b
'chee', # 0x2c
'che', # 0x2d
'cho', # 0x2e
'chwa', # 0x2f
'pha', # 0x30
'phu', # 0x31
'phi', # 0x32
'phaa', # 0x33
'phee', # 0x34
'phe', # 0x35
'pho', # 0x36
'phwa', # 0x37
'tsa', # 0x38
'tsu', # 0x39
'tsi', # 0x3a
'tsaa', # 0x3b
'tsee', # 0x3c
'tse', # 0x3d
'tso', # 0x3e
'tswa', # 0x3f
'tza', # 0x40
'tzu', # 0x41
'tzi', # 0x42
'tzaa', # 0x43
'tzee', # 0x44
'tze', # 0x45
'tzo', # 0x46
'[?]', # 0x47
'fa', # 0x48
'fu', # 0x49
'fi', # 0x4a
'faa', # 0x4b
'fee', # 0x4c
'fe', # 0x4d
'fo', # 0x4e
'fwa', # 0x4f
'pa', # 0x50
'pu', # 0x51
'pi', # 0x52
'paa', # 0x53
'pee', # 0x54
'pe', # 0x55
'po', # 0x56
'pwa', # 0x57
'rya', # 0x58
'mya', # 0x59
'fya', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
' ', # 0x61
'.', # 0x62
',', # 0x63
';', # 0x64
':', # 0x65
':: ', # 0x66
'?', # 0x67
'//', # 0x68
'1', # 0x69
'2', # 0x6a
'3', # 0x6b
'4', # 0x6c
'5', # 0x6d
'6', # 0x6e
'7', # 0x6f
'8', # 0x70
'9', # 0x71
'10+', # 0x72
'20+', # 0x73
'30+', # 0x74
'40+', # 0x75
'50+', # 0x76
'60+', # 0x77
'70+', # 0x78
'80+', # 0x79
'90+', # 0x7a
'100+', # 0x7b
'10,000+', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'a', # 0xa0
'e', # 0xa1
'i', # 0xa2
'o', # 0xa3
'u', # 0xa4
'v', # 0xa5
'ga', # 0xa6
'ka', # 0xa7
'ge', # 0xa8
'gi', # 0xa9
'go', # 0xaa
'gu', # 0xab
'gv', # 0xac
'ha', # 0xad
'he', # 0xae
'hi', # 0xaf
'ho', # 0xb0
'hu', # 0xb1
'hv', # 0xb2
'la', # 0xb3
'le', # 0xb4
'li', # 0xb5
'lo', # 0xb6
'lu', # 0xb7
'lv', # 0xb8
'ma', # 0xb9
'me', # 0xba
'mi', # 0xbb
'mo', # 0xbc
'mu', # 0xbd
'na', # 0xbe
'hna', # 0xbf
'nah', # 0xc0
'ne', # 0xc1
'ni', # 0xc2
'no', # 0xc3
'nu', # 0xc4
'nv', # 0xc5
'qua', # 0xc6
'que', # 0xc7
'qui', # 0xc8
'quo', # 0xc9
'quu', # 0xca
'quv', # 0xcb
'sa', # 0xcc
's', # 0xcd
'se', # 0xce
'si', # 0xcf
'so', # 0xd0
'su', # 0xd1
'sv', # 0xd2
'da', # 0xd3
'ta', # 0xd4
'de', # 0xd5
'te', # 0xd6
'di', # 0xd7
'ti', # 0xd8
'do', # 0xd9
'du', # 0xda
'dv', # 0xdb
'dla', # 0xdc
'tla', # 0xdd
'tle', # 0xde
'tli', # 0xdf
'tlo', # 0xe0
'tlu', # 0xe1
'tlv', # 0xe2
'tsa', # 0xe3
'tse', # 0xe4
'tsi', # 0xe5
'tso', # 0xe6
'tsu', # 0xe7
'tsv', # 0xe8
'wa', # 0xe9
'we', # 0xea
'wi', # 0xeb
'wo', # 0xec
'wu', # 0xed
'wv', # 0xee
'ya', # 0xef
'ye', # 0xf0
'yi', # 0xf1
'yo', # 0xf2
'yu', # 0xf3
'yv', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,258 +0,0 @@
data = (
'[?]', # 0x00
'e', # 0x01
'aai', # 0x02
'i', # 0x03
'ii', # 0x04
'o', # 0x05
'oo', # 0x06
'oo', # 0x07
'ee', # 0x08
'i', # 0x09
'a', # 0x0a
'aa', # 0x0b
'we', # 0x0c
'we', # 0x0d
'wi', # 0x0e
'wi', # 0x0f
'wii', # 0x10
'wii', # 0x11
'wo', # 0x12
'wo', # 0x13
'woo', # 0x14
'woo', # 0x15
'woo', # 0x16
'wa', # 0x17
'wa', # 0x18
'waa', # 0x19
'waa', # 0x1a
'waa', # 0x1b
'ai', # 0x1c
'w', # 0x1d
'\'', # 0x1e
't', # 0x1f
'k', # 0x20
'sh', # 0x21
's', # 0x22
'n', # 0x23
'w', # 0x24
'n', # 0x25
'[?]', # 0x26
'w', # 0x27
'c', # 0x28
'?', # 0x29
'l', # 0x2a
'en', # 0x2b
'in', # 0x2c
'on', # 0x2d
'an', # 0x2e
'pe', # 0x2f
'paai', # 0x30
'pi', # 0x31
'pii', # 0x32
'po', # 0x33
'poo', # 0x34
'poo', # 0x35
'hee', # 0x36
'hi', # 0x37
'pa', # 0x38
'paa', # 0x39
'pwe', # 0x3a
'pwe', # 0x3b
'pwi', # 0x3c
'pwi', # 0x3d
'pwii', # 0x3e
'pwii', # 0x3f
'pwo', # 0x40
'pwo', # 0x41
'pwoo', # 0x42
'pwoo', # 0x43
'pwa', # 0x44
'pwa', # 0x45
'pwaa', # 0x46
'pwaa', # 0x47
'pwaa', # 0x48
'p', # 0x49
'p', # 0x4a
'h', # 0x4b
'te', # 0x4c
'taai', # 0x4d
'ti', # 0x4e
'tii', # 0x4f
'to', # 0x50
'too', # 0x51
'too', # 0x52
'dee', # 0x53
'di', # 0x54
'ta', # 0x55
'taa', # 0x56
'twe', # 0x57
'twe', # 0x58
'twi', # 0x59
'twi', # 0x5a
'twii', # 0x5b
'twii', # 0x5c
'two', # 0x5d
'two', # 0x5e
'twoo', # 0x5f
'twoo', # 0x60
'twa', # 0x61
'twa', # 0x62
'twaa', # 0x63
'twaa', # 0x64
'twaa', # 0x65
't', # 0x66
'tte', # 0x67
'tti', # 0x68
'tto', # 0x69
'tta', # 0x6a
'ke', # 0x6b
'kaai', # 0x6c
'ki', # 0x6d
'kii', # 0x6e
'ko', # 0x6f
'koo', # 0x70
'koo', # 0x71
'ka', # 0x72
'kaa', # 0x73
'kwe', # 0x74
'kwe', # 0x75
'kwi', # 0x76
'kwi', # 0x77
'kwii', # 0x78
'kwii', # 0x79
'kwo', # 0x7a
'kwo', # 0x7b
'kwoo', # 0x7c
'kwoo', # 0x7d
'kwa', # 0x7e
'kwa', # 0x7f
'kwaa', # 0x80
'kwaa', # 0x81
'kwaa', # 0x82
'k', # 0x83
'kw', # 0x84
'keh', # 0x85
'kih', # 0x86
'koh', # 0x87
'kah', # 0x88
'ce', # 0x89
'caai', # 0x8a
'ci', # 0x8b
'cii', # 0x8c
'co', # 0x8d
'coo', # 0x8e
'coo', # 0x8f
'ca', # 0x90
'caa', # 0x91
'cwe', # 0x92
'cwe', # 0x93
'cwi', # 0x94
'cwi', # 0x95
'cwii', # 0x96
'cwii', # 0x97
'cwo', # 0x98
'cwo', # 0x99
'cwoo', # 0x9a
'cwoo', # 0x9b
'cwa', # 0x9c
'cwa', # 0x9d
'cwaa', # 0x9e
'cwaa', # 0x9f
'cwaa', # 0xa0
'c', # 0xa1
'th', # 0xa2
'me', # 0xa3
'maai', # 0xa4
'mi', # 0xa5
'mii', # 0xa6
'mo', # 0xa7
'moo', # 0xa8
'moo', # 0xa9
'ma', # 0xaa
'maa', # 0xab
'mwe', # 0xac
'mwe', # 0xad
'mwi', # 0xae
'mwi', # 0xaf
'mwii', # 0xb0
'mwii', # 0xb1
'mwo', # 0xb2
'mwo', # 0xb3
'mwoo', # 0xb4
'mwoo', # 0xb5
'mwa', # 0xb6
'mwa', # 0xb7
'mwaa', # 0xb8
'mwaa', # 0xb9
'mwaa', # 0xba
'm', # 0xbb
'm', # 0xbc
'mh', # 0xbd
'm', # 0xbe
'm', # 0xbf
'ne', # 0xc0
'naai', # 0xc1
'ni', # 0xc2
'nii', # 0xc3
'no', # 0xc4
'noo', # 0xc5
'noo', # 0xc6
'na', # 0xc7
'naa', # 0xc8
'nwe', # 0xc9
'nwe', # 0xca
'nwa', # 0xcb
'nwa', # 0xcc
'nwaa', # 0xcd
'nwaa', # 0xce
'nwaa', # 0xcf
'n', # 0xd0
'ng', # 0xd1
'nh', # 0xd2
'le', # 0xd3
'laai', # 0xd4
'li', # 0xd5
'lii', # 0xd6
'lo', # 0xd7
'loo', # 0xd8
'loo', # 0xd9
'la', # 0xda
'laa', # 0xdb
'lwe', # 0xdc
'lwe', # 0xdd
'lwi', # 0xde
'lwi', # 0xdf
'lwii', # 0xe0
'lwii', # 0xe1
'lwo', # 0xe2
'lwo', # 0xe3
'lwoo', # 0xe4
'lwoo', # 0xe5
'lwa', # 0xe6
'lwa', # 0xe7
'lwaa', # 0xe8
'lwaa', # 0xe9
'l', # 0xea
'l', # 0xeb
'l', # 0xec
'se', # 0xed
'saai', # 0xee
'si', # 0xef
'sii', # 0xf0
'so', # 0xf1
'soo', # 0xf2
'soo', # 0xf3
'sa', # 0xf4
'saa', # 0xf5
'swe', # 0xf6
'swe', # 0xf7
'swi', # 0xf8
'swi', # 0xf9
'swii', # 0xfa
'swii', # 0xfb
'swo', # 0xfc
'swo', # 0xfd
'swoo', # 0xfe
'swoo', # 0xff
)

View file

@ -1,258 +0,0 @@
data = (
'swa', # 0x00
'swa', # 0x01
'swaa', # 0x02
'swaa', # 0x03
'swaa', # 0x04
's', # 0x05
's', # 0x06
'sw', # 0x07
's', # 0x08
'sk', # 0x09
'skw', # 0x0a
'sW', # 0x0b
'spwa', # 0x0c
'stwa', # 0x0d
'skwa', # 0x0e
'scwa', # 0x0f
'she', # 0x10
'shi', # 0x11
'shii', # 0x12
'sho', # 0x13
'shoo', # 0x14
'sha', # 0x15
'shaa', # 0x16
'shwe', # 0x17
'shwe', # 0x18
'shwi', # 0x19
'shwi', # 0x1a
'shwii', # 0x1b
'shwii', # 0x1c
'shwo', # 0x1d
'shwo', # 0x1e
'shwoo', # 0x1f
'shwoo', # 0x20
'shwa', # 0x21
'shwa', # 0x22
'shwaa', # 0x23
'shwaa', # 0x24
'sh', # 0x25
'ye', # 0x26
'yaai', # 0x27
'yi', # 0x28
'yii', # 0x29
'yo', # 0x2a
'yoo', # 0x2b
'yoo', # 0x2c
'ya', # 0x2d
'yaa', # 0x2e
'ywe', # 0x2f
'ywe', # 0x30
'ywi', # 0x31
'ywi', # 0x32
'ywii', # 0x33
'ywii', # 0x34
'ywo', # 0x35
'ywo', # 0x36
'ywoo', # 0x37
'ywoo', # 0x38
'ywa', # 0x39
'ywa', # 0x3a
'ywaa', # 0x3b
'ywaa', # 0x3c
'ywaa', # 0x3d
'y', # 0x3e
'y', # 0x3f
'y', # 0x40
'yi', # 0x41
're', # 0x42
're', # 0x43
'le', # 0x44
'raai', # 0x45
'ri', # 0x46
'rii', # 0x47
'ro', # 0x48
'roo', # 0x49
'lo', # 0x4a
'ra', # 0x4b
'raa', # 0x4c
'la', # 0x4d
'rwaa', # 0x4e
'rwaa', # 0x4f
'r', # 0x50
'r', # 0x51
'r', # 0x52
'fe', # 0x53
'faai', # 0x54
'fi', # 0x55
'fii', # 0x56
'fo', # 0x57
'foo', # 0x58
'fa', # 0x59
'faa', # 0x5a
'fwaa', # 0x5b
'fwaa', # 0x5c
'f', # 0x5d
'the', # 0x5e
'the', # 0x5f
'thi', # 0x60
'thi', # 0x61
'thii', # 0x62
'thii', # 0x63
'tho', # 0x64
'thoo', # 0x65
'tha', # 0x66
'thaa', # 0x67
'thwaa', # 0x68
'thwaa', # 0x69
'th', # 0x6a
'tthe', # 0x6b
'tthi', # 0x6c
'ttho', # 0x6d
'ttha', # 0x6e
'tth', # 0x6f
'tye', # 0x70
'tyi', # 0x71
'tyo', # 0x72
'tya', # 0x73
'he', # 0x74
'hi', # 0x75
'hii', # 0x76
'ho', # 0x77
'hoo', # 0x78
'ha', # 0x79
'haa', # 0x7a
'h', # 0x7b
'h', # 0x7c
'hk', # 0x7d
'qaai', # 0x7e
'qi', # 0x7f
'qii', # 0x80
'qo', # 0x81
'qoo', # 0x82
'qa', # 0x83
'qaa', # 0x84
'q', # 0x85
'tlhe', # 0x86
'tlhi', # 0x87
'tlho', # 0x88
'tlha', # 0x89
're', # 0x8a
'ri', # 0x8b
'ro', # 0x8c
'ra', # 0x8d
'ngaai', # 0x8e
'ngi', # 0x8f
'ngii', # 0x90
'ngo', # 0x91
'ngoo', # 0x92
'nga', # 0x93
'ngaa', # 0x94
'ng', # 0x95
'nng', # 0x96
'she', # 0x97
'shi', # 0x98
'sho', # 0x99
'sha', # 0x9a
'the', # 0x9b
'thi', # 0x9c
'tho', # 0x9d
'tha', # 0x9e
'th', # 0x9f
'lhi', # 0xa0
'lhii', # 0xa1
'lho', # 0xa2
'lhoo', # 0xa3
'lha', # 0xa4
'lhaa', # 0xa5
'lh', # 0xa6
'the', # 0xa7
'thi', # 0xa8
'thii', # 0xa9
'tho', # 0xaa
'thoo', # 0xab
'tha', # 0xac
'thaa', # 0xad
'th', # 0xae
'b', # 0xaf
'e', # 0xb0
'i', # 0xb1
'o', # 0xb2
'a', # 0xb3
'we', # 0xb4
'wi', # 0xb5
'wo', # 0xb6
'wa', # 0xb7
'ne', # 0xb8
'ni', # 0xb9
'no', # 0xba
'na', # 0xbb
'ke', # 0xbc
'ki', # 0xbd
'ko', # 0xbe
'ka', # 0xbf
'he', # 0xc0
'hi', # 0xc1
'ho', # 0xc2
'ha', # 0xc3
'ghu', # 0xc4
'gho', # 0xc5
'ghe', # 0xc6
'ghee', # 0xc7
'ghi', # 0xc8
'gha', # 0xc9
'ru', # 0xca
'ro', # 0xcb
're', # 0xcc
'ree', # 0xcd
'ri', # 0xce
'ra', # 0xcf
'wu', # 0xd0
'wo', # 0xd1
'we', # 0xd2
'wee', # 0xd3
'wi', # 0xd4
'wa', # 0xd5
'hwu', # 0xd6
'hwo', # 0xd7
'hwe', # 0xd8
'hwee', # 0xd9
'hwi', # 0xda
'hwa', # 0xdb
'thu', # 0xdc
'tho', # 0xdd
'the', # 0xde
'thee', # 0xdf
'thi', # 0xe0
'tha', # 0xe1
'ttu', # 0xe2
'tto', # 0xe3
'tte', # 0xe4
'ttee', # 0xe5
'tti', # 0xe6
'tta', # 0xe7
'pu', # 0xe8
'po', # 0xe9
'pe', # 0xea
'pee', # 0xeb
'pi', # 0xec
'pa', # 0xed
'p', # 0xee
'gu', # 0xef
'go', # 0xf0
'ge', # 0xf1
'gee', # 0xf2
'gi', # 0xf3
'ga', # 0xf4
'khu', # 0xf5
'kho', # 0xf6
'khe', # 0xf7
'khee', # 0xf8
'khi', # 0xf9
'kha', # 0xfa
'kku', # 0xfb
'kko', # 0xfc
'kke', # 0xfd
'kkee', # 0xfe
'kki', # 0xff
)

View file

@ -1,257 +0,0 @@
data = (
'kka', # 0x00
'kk', # 0x01
'nu', # 0x02
'no', # 0x03
'ne', # 0x04
'nee', # 0x05
'ni', # 0x06
'na', # 0x07
'mu', # 0x08
'mo', # 0x09
'me', # 0x0a
'mee', # 0x0b
'mi', # 0x0c
'ma', # 0x0d
'yu', # 0x0e
'yo', # 0x0f
'ye', # 0x10
'yee', # 0x11
'yi', # 0x12
'ya', # 0x13
'ju', # 0x14
'ju', # 0x15
'jo', # 0x16
'je', # 0x17
'jee', # 0x18
'ji', # 0x19
'ji', # 0x1a
'ja', # 0x1b
'jju', # 0x1c
'jjo', # 0x1d
'jje', # 0x1e
'jjee', # 0x1f
'jji', # 0x20
'jja', # 0x21
'lu', # 0x22
'lo', # 0x23
'le', # 0x24
'lee', # 0x25
'li', # 0x26
'la', # 0x27
'dlu', # 0x28
'dlo', # 0x29
'dle', # 0x2a
'dlee', # 0x2b
'dli', # 0x2c
'dla', # 0x2d
'lhu', # 0x2e
'lho', # 0x2f
'lhe', # 0x30
'lhee', # 0x31
'lhi', # 0x32
'lha', # 0x33
'tlhu', # 0x34
'tlho', # 0x35
'tlhe', # 0x36
'tlhee', # 0x37
'tlhi', # 0x38
'tlha', # 0x39
'tlu', # 0x3a
'tlo', # 0x3b
'tle', # 0x3c
'tlee', # 0x3d
'tli', # 0x3e
'tla', # 0x3f
'zu', # 0x40
'zo', # 0x41
'ze', # 0x42
'zee', # 0x43
'zi', # 0x44
'za', # 0x45
'z', # 0x46
'z', # 0x47
'dzu', # 0x48
'dzo', # 0x49
'dze', # 0x4a
'dzee', # 0x4b
'dzi', # 0x4c
'dza', # 0x4d
'su', # 0x4e
'so', # 0x4f
'se', # 0x50
'see', # 0x51
'si', # 0x52
'sa', # 0x53
'shu', # 0x54
'sho', # 0x55
'she', # 0x56
'shee', # 0x57
'shi', # 0x58
'sha', # 0x59
'sh', # 0x5a
'tsu', # 0x5b
'tso', # 0x5c
'tse', # 0x5d
'tsee', # 0x5e
'tsi', # 0x5f
'tsa', # 0x60
'chu', # 0x61
'cho', # 0x62
'che', # 0x63
'chee', # 0x64
'chi', # 0x65
'cha', # 0x66
'ttsu', # 0x67
'ttso', # 0x68
'ttse', # 0x69
'ttsee', # 0x6a
'ttsi', # 0x6b
'ttsa', # 0x6c
'X', # 0x6d
'.', # 0x6e
'qai', # 0x6f
'ngai', # 0x70
'nngi', # 0x71
'nngii', # 0x72
'nngo', # 0x73
'nngoo', # 0x74
'nnga', # 0x75
'nngaa', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
' ', # 0x80
'b', # 0x81
'l', # 0x82
'f', # 0x83
's', # 0x84
'n', # 0x85
'h', # 0x86
'd', # 0x87
't', # 0x88
'c', # 0x89
'q', # 0x8a
'm', # 0x8b
'g', # 0x8c
'ng', # 0x8d
'z', # 0x8e
'r', # 0x8f
'a', # 0x90
'o', # 0x91
'u', # 0x92
'e', # 0x93
'i', # 0x94
'ch', # 0x95
'th', # 0x96
'ph', # 0x97
'p', # 0x98
'x', # 0x99
'p', # 0x9a
'<', # 0x9b
'>', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'f', # 0xa0
'v', # 0xa1
'u', # 0xa2
'yr', # 0xa3
'y', # 0xa4
'w', # 0xa5
'th', # 0xa6
'th', # 0xa7
'a', # 0xa8
'o', # 0xa9
'ac', # 0xaa
'ae', # 0xab
'o', # 0xac
'o', # 0xad
'o', # 0xae
'oe', # 0xaf
'on', # 0xb0
'r', # 0xb1
'k', # 0xb2
'c', # 0xb3
'k', # 0xb4
'g', # 0xb5
'ng', # 0xb6
'g', # 0xb7
'g', # 0xb8
'w', # 0xb9
'h', # 0xba
'h', # 0xbb
'h', # 0xbc
'h', # 0xbd
'n', # 0xbe
'n', # 0xbf
'n', # 0xc0
'i', # 0xc1
'e', # 0xc2
'j', # 0xc3
'g', # 0xc4
'ae', # 0xc5
'a', # 0xc6
'eo', # 0xc7
'p', # 0xc8
'z', # 0xc9
's', # 0xca
's', # 0xcb
's', # 0xcc
'c', # 0xcd
'z', # 0xce
't', # 0xcf
't', # 0xd0
'd', # 0xd1
'b', # 0xd2
'b', # 0xd3
'p', # 0xd4
'p', # 0xd5
'e', # 0xd6
'm', # 0xd7
'm', # 0xd8
'm', # 0xd9
'l', # 0xda
'l', # 0xdb
'ng', # 0xdc
'ng', # 0xdd
'd', # 0xde
'o', # 0xdf
'ear', # 0xe0
'ior', # 0xe1
'qu', # 0xe2
'qu', # 0xe3
'qu', # 0xe4
's', # 0xe5
'yr', # 0xe6
'yr', # 0xe7
'yr', # 0xe8
'q', # 0xe9
'x', # 0xea
'.', # 0xeb
':', # 0xec
'+', # 0xed
'17', # 0xee
'18', # 0xef
'19', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'k', # 0x80
'kh', # 0x81
'g', # 0x82
'gh', # 0x83
'ng', # 0x84
'c', # 0x85
'ch', # 0x86
'j', # 0x87
'jh', # 0x88
'ny', # 0x89
't', # 0x8a
'tth', # 0x8b
'd', # 0x8c
'ddh', # 0x8d
'nn', # 0x8e
't', # 0x8f
'th', # 0x90
'd', # 0x91
'dh', # 0x92
'n', # 0x93
'p', # 0x94
'ph', # 0x95
'b', # 0x96
'bh', # 0x97
'm', # 0x98
'y', # 0x99
'r', # 0x9a
'l', # 0x9b
'v', # 0x9c
'sh', # 0x9d
'ss', # 0x9e
's', # 0x9f
'h', # 0xa0
'l', # 0xa1
'q', # 0xa2
'a', # 0xa3
'aa', # 0xa4
'i', # 0xa5
'ii', # 0xa6
'u', # 0xa7
'uk', # 0xa8
'uu', # 0xa9
'uuv', # 0xaa
'ry', # 0xab
'ryy', # 0xac
'ly', # 0xad
'lyy', # 0xae
'e', # 0xaf
'ai', # 0xb0
'oo', # 0xb1
'oo', # 0xb2
'au', # 0xb3
'a', # 0xb4
'aa', # 0xb5
'aa', # 0xb6
'i', # 0xb7
'ii', # 0xb8
'y', # 0xb9
'yy', # 0xba
'u', # 0xbb
'uu', # 0xbc
'ua', # 0xbd
'oe', # 0xbe
'ya', # 0xbf
'ie', # 0xc0
'e', # 0xc1
'ae', # 0xc2
'ai', # 0xc3
'oo', # 0xc4
'au', # 0xc5
'M', # 0xc6
'H', # 0xc7
'a`', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'r', # 0xcc
'', # 0xcd
'!', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'.', # 0xd4
' // ', # 0xd5
':', # 0xd6
'+', # 0xd7
'++', # 0xd8
' * ', # 0xd9
' /// ', # 0xda
'KR', # 0xdb
'\'', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'0', # 0xe0
'1', # 0xe1
'2', # 0xe2
'3', # 0xe3
'4', # 0xe4
'5', # 0xe5
'6', # 0xe6
'7', # 0xe7
'8', # 0xe8
'9', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
' @ ', # 0x00
' ... ', # 0x01
', ', # 0x02
'. ', # 0x03
': ', # 0x04
' // ', # 0x05
'', # 0x06
'-', # 0x07
', ', # 0x08
'. ', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'[?]', # 0x0f
'0', # 0x10
'1', # 0x11
'2', # 0x12
'3', # 0x13
'4', # 0x14
'5', # 0x15
'6', # 0x16
'7', # 0x17
'8', # 0x18
'9', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'a', # 0x20
'e', # 0x21
'i', # 0x22
'o', # 0x23
'u', # 0x24
'O', # 0x25
'U', # 0x26
'ee', # 0x27
'n', # 0x28
'ng', # 0x29
'b', # 0x2a
'p', # 0x2b
'q', # 0x2c
'g', # 0x2d
'm', # 0x2e
'l', # 0x2f
's', # 0x30
'sh', # 0x31
't', # 0x32
'd', # 0x33
'ch', # 0x34
'j', # 0x35
'y', # 0x36
'r', # 0x37
'w', # 0x38
'f', # 0x39
'k', # 0x3a
'kha', # 0x3b
'ts', # 0x3c
'z', # 0x3d
'h', # 0x3e
'zr', # 0x3f
'lh', # 0x40
'zh', # 0x41
'ch', # 0x42
'-', # 0x43
'e', # 0x44
'i', # 0x45
'o', # 0x46
'u', # 0x47
'O', # 0x48
'U', # 0x49
'ng', # 0x4a
'b', # 0x4b
'p', # 0x4c
'q', # 0x4d
'g', # 0x4e
'm', # 0x4f
't', # 0x50
'd', # 0x51
'ch', # 0x52
'j', # 0x53
'ts', # 0x54
'y', # 0x55
'w', # 0x56
'k', # 0x57
'g', # 0x58
'h', # 0x59
'jy', # 0x5a
'ny', # 0x5b
'dz', # 0x5c
'e', # 0x5d
'i', # 0x5e
'iy', # 0x5f
'U', # 0x60
'u', # 0x61
'ng', # 0x62
'k', # 0x63
'g', # 0x64
'h', # 0x65
'p', # 0x66
'sh', # 0x67
't', # 0x68
'd', # 0x69
'j', # 0x6a
'f', # 0x6b
'g', # 0x6c
'h', # 0x6d
'ts', # 0x6e
'z', # 0x6f
'r', # 0x70
'ch', # 0x71
'zh', # 0x72
'i', # 0x73
'k', # 0x74
'r', # 0x75
'f', # 0x76
'zh', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'H', # 0x81
'X', # 0x82
'W', # 0x83
'M', # 0x84
' 3 ', # 0x85
' 333 ', # 0x86
'a', # 0x87
'i', # 0x88
'k', # 0x89
'ng', # 0x8a
'c', # 0x8b
'tt', # 0x8c
'tth', # 0x8d
'dd', # 0x8e
'nn', # 0x8f
't', # 0x90
'd', # 0x91
'p', # 0x92
'ph', # 0x93
'ss', # 0x94
'zh', # 0x95
'z', # 0x96
'a', # 0x97
't', # 0x98
'zh', # 0x99
'gh', # 0x9a
'ng', # 0x9b
'c', # 0x9c
'jh', # 0x9d
'tta', # 0x9e
'ddh', # 0x9f
't', # 0xa0
'dh', # 0xa1
'ss', # 0xa2
'cy', # 0xa3
'zh', # 0xa4
'z', # 0xa5
'u', # 0xa6
'y', # 0xa7
'bh', # 0xa8
'\'', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'A', # 0x00
'a', # 0x01
'B', # 0x02
'b', # 0x03
'B', # 0x04
'b', # 0x05
'B', # 0x06
'b', # 0x07
'C', # 0x08
'c', # 0x09
'D', # 0x0a
'd', # 0x0b
'D', # 0x0c
'd', # 0x0d
'D', # 0x0e
'd', # 0x0f
'D', # 0x10
'd', # 0x11
'D', # 0x12
'd', # 0x13
'E', # 0x14
'e', # 0x15
'E', # 0x16
'e', # 0x17
'E', # 0x18
'e', # 0x19
'E', # 0x1a
'e', # 0x1b
'E', # 0x1c
'e', # 0x1d
'F', # 0x1e
'f', # 0x1f
'G', # 0x20
'g', # 0x21
'H', # 0x22
'h', # 0x23
'H', # 0x24
'h', # 0x25
'H', # 0x26
'h', # 0x27
'H', # 0x28
'h', # 0x29
'H', # 0x2a
'h', # 0x2b
'I', # 0x2c
'i', # 0x2d
'I', # 0x2e
'i', # 0x2f
'K', # 0x30
'k', # 0x31
'K', # 0x32
'k', # 0x33
'K', # 0x34
'k', # 0x35
'L', # 0x36
'l', # 0x37
'L', # 0x38
'l', # 0x39
'L', # 0x3a
'l', # 0x3b
'L', # 0x3c
'l', # 0x3d
'M', # 0x3e
'm', # 0x3f
'M', # 0x40
'm', # 0x41
'M', # 0x42
'm', # 0x43
'N', # 0x44
'n', # 0x45
'N', # 0x46
'n', # 0x47
'N', # 0x48
'n', # 0x49
'N', # 0x4a
'n', # 0x4b
'O', # 0x4c
'o', # 0x4d
'O', # 0x4e
'o', # 0x4f
'O', # 0x50
'o', # 0x51
'O', # 0x52
'o', # 0x53
'P', # 0x54
'p', # 0x55
'P', # 0x56
'p', # 0x57
'R', # 0x58
'r', # 0x59
'R', # 0x5a
'r', # 0x5b
'R', # 0x5c
'r', # 0x5d
'R', # 0x5e
'r', # 0x5f
'S', # 0x60
's', # 0x61
'S', # 0x62
's', # 0x63
'S', # 0x64
's', # 0x65
'S', # 0x66
's', # 0x67
'S', # 0x68
's', # 0x69
'T', # 0x6a
't', # 0x6b
'T', # 0x6c
't', # 0x6d
'T', # 0x6e
't', # 0x6f
'T', # 0x70
't', # 0x71
'U', # 0x72
'u', # 0x73
'U', # 0x74
'u', # 0x75
'U', # 0x76
'u', # 0x77
'U', # 0x78
'u', # 0x79
'U', # 0x7a
'u', # 0x7b
'V', # 0x7c
'v', # 0x7d
'V', # 0x7e
'v', # 0x7f
'W', # 0x80
'w', # 0x81
'W', # 0x82
'w', # 0x83
'W', # 0x84
'w', # 0x85
'W', # 0x86
'w', # 0x87
'W', # 0x88
'w', # 0x89
'X', # 0x8a
'x', # 0x8b
'X', # 0x8c
'x', # 0x8d
'Y', # 0x8e
'y', # 0x8f
'Z', # 0x90
'z', # 0x91
'Z', # 0x92
'z', # 0x93
'Z', # 0x94
'z', # 0x95
'h', # 0x96
't', # 0x97
'w', # 0x98
'y', # 0x99
'a', # 0x9a
'S', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'Ss', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'a', # 0xa1
'A', # 0xa2
'a', # 0xa3
'A', # 0xa4
'a', # 0xa5
'A', # 0xa6
'a', # 0xa7
'A', # 0xa8
'a', # 0xa9
'A', # 0xaa
'a', # 0xab
'A', # 0xac
'a', # 0xad
'A', # 0xae
'a', # 0xaf
'A', # 0xb0
'a', # 0xb1
'A', # 0xb2
'a', # 0xb3
'A', # 0xb4
'a', # 0xb5
'A', # 0xb6
'a', # 0xb7
'E', # 0xb8
'e', # 0xb9
'E', # 0xba
'e', # 0xbb
'E', # 0xbc
'e', # 0xbd
'E', # 0xbe
'e', # 0xbf
'E', # 0xc0
'e', # 0xc1
'E', # 0xc2
'e', # 0xc3
'E', # 0xc4
'e', # 0xc5
'E', # 0xc6
'e', # 0xc7
'I', # 0xc8
'i', # 0xc9
'I', # 0xca
'i', # 0xcb
'O', # 0xcc
'o', # 0xcd
'O', # 0xce
'o', # 0xcf
'O', # 0xd0
'o', # 0xd1
'O', # 0xd2
'o', # 0xd3
'O', # 0xd4
'o', # 0xd5
'O', # 0xd6
'o', # 0xd7
'O', # 0xd8
'o', # 0xd9
'O', # 0xda
'o', # 0xdb
'O', # 0xdc
'o', # 0xdd
'O', # 0xde
'o', # 0xdf
'O', # 0xe0
'o', # 0xe1
'O', # 0xe2
'o', # 0xe3
'U', # 0xe4
'u', # 0xe5
'U', # 0xe6
'u', # 0xe7
'U', # 0xe8
'u', # 0xe9
'U', # 0xea
'u', # 0xeb
'U', # 0xec
'u', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'Y', # 0xf2
'y', # 0xf3
'Y', # 0xf4
'y', # 0xf5
'Y', # 0xf6
'y', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'a', # 0x00
'a', # 0x01
'a', # 0x02
'a', # 0x03
'a', # 0x04
'a', # 0x05
'a', # 0x06
'a', # 0x07
'A', # 0x08
'A', # 0x09
'A', # 0x0a
'A', # 0x0b
'A', # 0x0c
'A', # 0x0d
'A', # 0x0e
'A', # 0x0f
'e', # 0x10
'e', # 0x11
'e', # 0x12
'e', # 0x13
'e', # 0x14
'e', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'E', # 0x18
'E', # 0x19
'E', # 0x1a
'E', # 0x1b
'E', # 0x1c
'E', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'e', # 0x20
'e', # 0x21
'e', # 0x22
'e', # 0x23
'e', # 0x24
'e', # 0x25
'e', # 0x26
'e', # 0x27
'E', # 0x28
'E', # 0x29
'E', # 0x2a
'E', # 0x2b
'E', # 0x2c
'E', # 0x2d
'E', # 0x2e
'E', # 0x2f
'i', # 0x30
'i', # 0x31
'i', # 0x32
'i', # 0x33
'i', # 0x34
'i', # 0x35
'i', # 0x36
'i', # 0x37
'I', # 0x38
'I', # 0x39
'I', # 0x3a
'I', # 0x3b
'I', # 0x3c
'I', # 0x3d
'I', # 0x3e
'I', # 0x3f
'o', # 0x40
'o', # 0x41
'o', # 0x42
'o', # 0x43
'o', # 0x44
'o', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'O', # 0x48
'O', # 0x49
'O', # 0x4a
'O', # 0x4b
'O', # 0x4c
'O', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'u', # 0x50
'u', # 0x51
'u', # 0x52
'u', # 0x53
'u', # 0x54
'u', # 0x55
'u', # 0x56
'u', # 0x57
'[?]', # 0x58
'U', # 0x59
'[?]', # 0x5a
'U', # 0x5b
'[?]', # 0x5c
'U', # 0x5d
'[?]', # 0x5e
'U', # 0x5f
'o', # 0x60
'o', # 0x61
'o', # 0x62
'o', # 0x63
'o', # 0x64
'o', # 0x65
'o', # 0x66
'o', # 0x67
'O', # 0x68
'O', # 0x69
'O', # 0x6a
'O', # 0x6b
'O', # 0x6c
'O', # 0x6d
'O', # 0x6e
'O', # 0x6f
'a', # 0x70
'a', # 0x71
'e', # 0x72
'e', # 0x73
'e', # 0x74
'e', # 0x75
'i', # 0x76
'i', # 0x77
'o', # 0x78
'o', # 0x79
'u', # 0x7a
'u', # 0x7b
'o', # 0x7c
'o', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'a', # 0x80
'a', # 0x81
'a', # 0x82
'a', # 0x83
'a', # 0x84
'a', # 0x85
'a', # 0x86
'a', # 0x87
'A', # 0x88
'A', # 0x89
'A', # 0x8a
'A', # 0x8b
'A', # 0x8c
'A', # 0x8d
'A', # 0x8e
'A', # 0x8f
'e', # 0x90
'e', # 0x91
'e', # 0x92
'e', # 0x93
'e', # 0x94
'e', # 0x95
'e', # 0x96
'e', # 0x97
'E', # 0x98
'E', # 0x99
'E', # 0x9a
'E', # 0x9b
'E', # 0x9c
'E', # 0x9d
'E', # 0x9e
'E', # 0x9f
'o', # 0xa0
'o', # 0xa1
'o', # 0xa2
'o', # 0xa3
'o', # 0xa4
'o', # 0xa5
'o', # 0xa6
'o', # 0xa7
'O', # 0xa8
'O', # 0xa9
'O', # 0xaa
'O', # 0xab
'O', # 0xac
'O', # 0xad
'O', # 0xae
'O', # 0xaf
'a', # 0xb0
'a', # 0xb1
'a', # 0xb2
'a', # 0xb3
'a', # 0xb4
'[?]', # 0xb5
'a', # 0xb6
'a', # 0xb7
'A', # 0xb8
'A', # 0xb9
'A', # 0xba
'A', # 0xbb
'A', # 0xbc
'\'', # 0xbd
'i', # 0xbe
'\'', # 0xbf
'~', # 0xc0
'"~', # 0xc1
'e', # 0xc2
'e', # 0xc3
'e', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'e', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'E', # 0xcc
'\'`', # 0xcd
'\'\'', # 0xce
'\'~', # 0xcf
'i', # 0xd0
'i', # 0xd1
'i', # 0xd2
'i', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'i', # 0xd6
'i', # 0xd7
'I', # 0xd8
'I', # 0xd9
'I', # 0xda
'I', # 0xdb
'[?]', # 0xdc
'`\'', # 0xdd
'`\'', # 0xde
'`~', # 0xdf
'u', # 0xe0
'u', # 0xe1
'u', # 0xe2
'u', # 0xe3
'R', # 0xe4
'R', # 0xe5
'u', # 0xe6
'u', # 0xe7
'U', # 0xe8
'U', # 0xe9
'U', # 0xea
'U', # 0xeb
'R', # 0xec
'"`', # 0xed
'"\'', # 0xee
'`', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'[?]', # 0xf5
'o', # 0xf6
'o', # 0xf7
'O', # 0xf8
'O', # 0xf9
'O', # 0xfa
'O', # 0xfb
'O', # 0xfc
'\'', # 0xfd
'`', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
' ', # 0x00
' ', # 0x01
' ', # 0x02
' ', # 0x03
' ', # 0x04
' ', # 0x05
' ', # 0x06
' ', # 0x07
' ', # 0x08
' ', # 0x09
' ', # 0x0a
' ', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'-', # 0x10
'-', # 0x11
'-', # 0x12
'-', # 0x13
'--', # 0x14
'--', # 0x15
'||', # 0x16
'_', # 0x17
'\'', # 0x18
'\'', # 0x19
',', # 0x1a
'\'', # 0x1b
'"', # 0x1c
'"', # 0x1d
',,', # 0x1e
'"', # 0x1f
'+', # 0x20
'++', # 0x21
'*', # 0x22
'*>', # 0x23
'.', # 0x24
'..', # 0x25
'...', # 0x26
'.', # 0x27
'\x0a', # 0x28
'\x0a\x0a', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
' ', # 0x2f
'%0', # 0x30
'%00', # 0x31
'\'', # 0x32
'\'\'', # 0x33
'\'\'\'', # 0x34
'`', # 0x35
'``', # 0x36
'```', # 0x37
'^', # 0x38
'<', # 0x39
'>', # 0x3a
'*', # 0x3b
'!!', # 0x3c
'!?', # 0x3d
'-', # 0x3e
'_', # 0x3f
'-', # 0x40
'^', # 0x41
'***', # 0x42
'--', # 0x43
'/', # 0x44
'-[', # 0x45
']-', # 0x46
'??', # 0x47
'?!', # 0x48
'!?', # 0x49
'7', # 0x4a
'PP', # 0x4b
'(]', # 0x4c
'[)', # 0x4d
'*', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'%', # 0x52
'~', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
"''''", # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'0', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'4', # 0x74
'5', # 0x75
'6', # 0x76
'7', # 0x77
'8', # 0x78
'9', # 0x79
'+', # 0x7a
'-', # 0x7b
'=', # 0x7c
'(', # 0x7d
')', # 0x7e
'n', # 0x7f
'0', # 0x80
'1', # 0x81
'2', # 0x82
'3', # 0x83
'4', # 0x84
'5', # 0x85
'6', # 0x86
'7', # 0x87
'8', # 0x88
'9', # 0x89
'+', # 0x8a
'-', # 0x8b
'=', # 0x8c
'(', # 0x8d
')', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'ECU', # 0xa0
'CL', # 0xa1
'Cr', # 0xa2
'FF', # 0xa3
'L', # 0xa4
'mil', # 0xa5
'N', # 0xa6
'Pts', # 0xa7
'Rs', # 0xa8
'W', # 0xa9
'NS', # 0xaa
'D', # 0xab
'EUR', # 0xac
'K', # 0xad
'T', # 0xae
'Dr', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'[?]', # 0xe4
'', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'', # 0x00
'', # 0x01
'C', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'H', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'N', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'P', # 0x19
'Q', # 0x1a
'', # 0x1b
'', # 0x1c
'R', # 0x1d
'', # 0x1e
'', # 0x1f
'(sm)', # 0x20
'TEL', # 0x21
'(tm)', # 0x22
'', # 0x23
'Z', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'', # 0x2c
'', # 0x2d
'e', # 0x2e
'e', # 0x2f
'E', # 0x30
'F', # 0x31
'F', # 0x32
'M', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'FAX', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'D', # 0x45
'd', # 0x46
'e', # 0x47
'i', # 0x48
'j', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'-', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'/', # 0x15
'\\', # 0x16
'*', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'|', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
':', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'~', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'<=', # 0x64
'>=', # 0x65
'<=', # 0x66
'>=', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'^', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'<', # 0x29
'> ', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'1', # 0x60
'2', # 0x61
'3', # 0x62
'4', # 0x63
'5', # 0x64
'6', # 0x65
'7', # 0x66
'8', # 0x67
'9', # 0x68
'10', # 0x69
'11', # 0x6a
'12', # 0x6b
'13', # 0x6c
'14', # 0x6d
'15', # 0x6e
'16', # 0x6f
'17', # 0x70
'18', # 0x71
'19', # 0x72
'20', # 0x73
'(1)', # 0x74
'(2)', # 0x75
'(3)', # 0x76
'(4)', # 0x77
'(5)', # 0x78
'(6)', # 0x79
'(7)', # 0x7a
'(8)', # 0x7b
'(9)', # 0x7c
'(10)', # 0x7d
'(11)', # 0x7e
'(12)', # 0x7f
'(13)', # 0x80
'(14)', # 0x81
'(15)', # 0x82
'(16)', # 0x83
'(17)', # 0x84
'(18)', # 0x85
'(19)', # 0x86
'(20)', # 0x87
'1.', # 0x88
'2.', # 0x89
'3.', # 0x8a
'4.', # 0x8b
'5.', # 0x8c
'6.', # 0x8d
'7.', # 0x8e
'8.', # 0x8f
'9.', # 0x90
'10.', # 0x91
'11.', # 0x92
'12.', # 0x93
'13.', # 0x94
'14.', # 0x95
'15.', # 0x96
'16.', # 0x97
'17.', # 0x98
'18.', # 0x99
'19.', # 0x9a
'20.', # 0x9b
'(a)', # 0x9c
'(b)', # 0x9d
'(c)', # 0x9e
'(d)', # 0x9f
'(e)', # 0xa0
'(f)', # 0xa1
'(g)', # 0xa2
'(h)', # 0xa3
'(i)', # 0xa4
'(j)', # 0xa5
'(k)', # 0xa6
'(l)', # 0xa7
'(m)', # 0xa8
'(n)', # 0xa9
'(o)', # 0xaa
'(p)', # 0xab
'(q)', # 0xac
'(r)', # 0xad
'(s)', # 0xae
'(t)', # 0xaf
'(u)', # 0xb0
'(v)', # 0xb1
'(w)', # 0xb2
'(x)', # 0xb3
'(y)', # 0xb4
'(z)', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
'e', # 0xba
'f', # 0xbb
'g', # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
'o', # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'a', # 0xd0
'b', # 0xd1
'c', # 0xd2
'd', # 0xd3
'e', # 0xd4
'f', # 0xd5
'g', # 0xd6
'h', # 0xd7
'i', # 0xd8
'j', # 0xd9
'k', # 0xda
'l', # 0xdb
'm', # 0xdc
'n', # 0xdd
'o', # 0xde
'p', # 0xdf
'q', # 0xe0
'r', # 0xe1
's', # 0xe2
't', # 0xe3
'u', # 0xe4
'v', # 0xe5
'w', # 0xe6
'x', # 0xe7
'y', # 0xe8
'z', # 0xe9
'0', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

View file

@ -1,257 +0,0 @@
data = (
'-', # 0x00
'-', # 0x01
'|', # 0x02
'|', # 0x03
'-', # 0x04
'-', # 0x05
'|', # 0x06
'|', # 0x07
'-', # 0x08
'-', # 0x09
'|', # 0x0a
'|', # 0x0b
'+', # 0x0c
'+', # 0x0d
'+', # 0x0e
'+', # 0x0f
'+', # 0x10
'+', # 0x11
'+', # 0x12
'+', # 0x13
'+', # 0x14
'+', # 0x15
'+', # 0x16
'+', # 0x17
'+', # 0x18
'+', # 0x19
'+', # 0x1a
'+', # 0x1b
'+', # 0x1c
'+', # 0x1d
'+', # 0x1e
'+', # 0x1f
'+', # 0x20
'+', # 0x21
'+', # 0x22
'+', # 0x23
'+', # 0x24
'+', # 0x25
'+', # 0x26
'+', # 0x27
'+', # 0x28
'+', # 0x29
'+', # 0x2a
'+', # 0x2b
'+', # 0x2c
'+', # 0x2d
'+', # 0x2e
'+', # 0x2f
'+', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'+', # 0x35
'+', # 0x36
'+', # 0x37
'+', # 0x38
'+', # 0x39
'+', # 0x3a
'+', # 0x3b
'+', # 0x3c
'+', # 0x3d
'+', # 0x3e
'+', # 0x3f
'+', # 0x40
'+', # 0x41
'+', # 0x42
'+', # 0x43
'+', # 0x44
'+', # 0x45
'+', # 0x46
'+', # 0x47
'+', # 0x48
'+', # 0x49
'+', # 0x4a
'+', # 0x4b
'-', # 0x4c
'-', # 0x4d
'|', # 0x4e
'|', # 0x4f
'-', # 0x50
'|', # 0x51
'+', # 0x52
'+', # 0x53
'+', # 0x54
'+', # 0x55
'+', # 0x56
'+', # 0x57
'+', # 0x58
'+', # 0x59
'+', # 0x5a
'+', # 0x5b
'+', # 0x5c
'+', # 0x5d
'+', # 0x5e
'+', # 0x5f
'+', # 0x60
'+', # 0x61
'+', # 0x62
'+', # 0x63
'+', # 0x64
'+', # 0x65
'+', # 0x66
'+', # 0x67
'+', # 0x68
'+', # 0x69
'+', # 0x6a
'+', # 0x6b
'+', # 0x6c
'+', # 0x6d
'+', # 0x6e
'+', # 0x6f
'+', # 0x70
'/', # 0x71
'\\', # 0x72
'X', # 0x73
'-', # 0x74
'|', # 0x75
'-', # 0x76
'|', # 0x77
'-', # 0x78
'|', # 0x79
'-', # 0x7a
'|', # 0x7b
'-', # 0x7c
'|', # 0x7d
'-', # 0x7e
'|', # 0x7f
'#', # 0x80
'#', # 0x81
'#', # 0x82
'#', # 0x83
'#', # 0x84
'#', # 0x85
'#', # 0x86
'#', # 0x87
'#', # 0x88
'#', # 0x89
'#', # 0x8a
'#', # 0x8b
'#', # 0x8c
'#', # 0x8d
'#', # 0x8e
'#', # 0x8f
'#', # 0x90
'#', # 0x91
'#', # 0x92
'#', # 0x93
'-', # 0x94
'|', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'#', # 0xa0
'#', # 0xa1
'#', # 0xa2
'#', # 0xa3
'#', # 0xa4
'#', # 0xa5
'#', # 0xa6
'#', # 0xa7
'#', # 0xa8
'#', # 0xa9
'#', # 0xaa
'#', # 0xab
'#', # 0xac
'#', # 0xad
'#', # 0xae
'#', # 0xaf
'#', # 0xb0
'#', # 0xb1
'^', # 0xb2
'^', # 0xb3
'^', # 0xb4
'^', # 0xb5
'>', # 0xb6
'>', # 0xb7
'>', # 0xb8
'>', # 0xb9
'>', # 0xba
'>', # 0xbb
'V', # 0xbc
'V', # 0xbd
'V', # 0xbe
'V', # 0xbf
'<', # 0xc0
'<', # 0xc1
'<', # 0xc2
'<', # 0xc3
'<', # 0xc4
'<', # 0xc5
'*', # 0xc6
'*', # 0xc7
'*', # 0xc8
'*', # 0xc9
'*', # 0xca
'*', # 0xcb
'*', # 0xcc
'*', # 0xcd
'*', # 0xce
'*', # 0xcf
'*', # 0xd0
'*', # 0xd1
'*', # 0xd2
'*', # 0xd3
'*', # 0xd4
'*', # 0xd5
'*', # 0xd6
'*', # 0xd7
'*', # 0xd8
'*', # 0xd9
'*', # 0xda
'*', # 0xdb
'*', # 0xdc
'*', # 0xdd
'*', # 0xde
'*', # 0xdf
'*', # 0xe0
'*', # 0xe1
'*', # 0xe2
'*', # 0xe3
'*', # 0xe4
'*', # 0xe5
'*', # 0xe6
'#', # 0xe7
'#', # 0xe8
'#', # 0xe9
'#', # 0xea
'#', # 0xeb
'^', # 0xec
'^', # 0xed
'^', # 0xee
'O', # 0xef
'#', # 0xf0
'#', # 0xf1
'#', # 0xf2
'#', # 0xf3
'#', # 0xf4
'#', # 0xf5
'#', # 0xf6
'#', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)

Some files were not shown because too many files have changed in this diff Show more