Update vendored beets to 1.6.0

Updates colorama to 0.4.6
Adds confuse version 1.7.0
Updates jellyfish to 0.9.0
Adds mediafile 0.10.1
Updates munkres to 1.1.4
Updates musicbrainzngs to 0.7.1
Updates mutagen to 1.46.0
Updates pyyaml to 6.0
Updates unidecode to 1.3.6
This commit is contained in:
Labrys of Knossos 2022-11-28 18:02:40 -05:00
commit 56c6773c6b
385 changed files with 25143 additions and 18080 deletions

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -23,7 +22,7 @@ from mutagen._util import MutagenError
from mutagen._file import FileType, StreamInfo, File
from mutagen._tags import Tags, Metadata, PaddingInfo
version = (1, 41, 1)
version = (1, 46, 0)
"""Version tuple."""
version_string = ".".join(map(str, version))

View file

@ -1,92 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
PY2 = sys.version_info[0] == 2
PY3 = not PY2
if PY2:
from StringIO import StringIO
BytesIO = StringIO
from cStringIO import StringIO as cBytesIO
from itertools import izip
long_ = long
integer_types = (int, long)
string_types = (str, unicode)
text_type = unicode
xrange = xrange
cmp = cmp
chr_ = chr
def endswith(text, end):
return text.endswith(end)
iteritems = lambda d: d.iteritems()
itervalues = lambda d: d.itervalues()
iterkeys = lambda d: d.iterkeys()
iterbytes = lambda b: iter(b)
exec("def reraise(tp, value, tb):\n raise tp, value, tb")
def swap_to_string(cls):
if "__str__" in cls.__dict__:
cls.__unicode__ = cls.__str__
if "__bytes__" in cls.__dict__:
cls.__str__ = cls.__bytes__
return cls
import __builtin__ as builtins
builtins
elif PY3:
from io import StringIO
StringIO = StringIO
from io import BytesIO
cBytesIO = BytesIO
long_ = int
integer_types = (int,)
string_types = (str,)
text_type = str
izip = zip
xrange = range
cmp = lambda a, b: (a > b) - (a < b)
chr_ = lambda x: bytes([x])
def endswith(text, end):
# usefull for paths which can be both, str and bytes
if isinstance(text, str):
if not isinstance(end, str):
end = end.decode("ascii")
else:
if not isinstance(end, bytes):
end = end.encode("ascii")
return text.endswith(end)
iteritems = lambda d: iter(d.items())
itervalues = lambda d: iter(d.values())
iterkeys = lambda d: iter(d.keys())
iterbytes = lambda b: (bytes([v]) for v in b)
def reraise(tp, value, tb):
raise tp(value).with_traceback(tb)
def swap_to_string(cls):
return cls
import builtins
builtins

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -9,7 +8,6 @@
import warnings
from mutagen._util import DictMixin, loadfile
from mutagen._compat import izip
class FileType(DictMixin):
@ -97,7 +95,7 @@ class FileType(DictMixin):
return self.tags.keys()
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
@ -120,7 +118,7 @@ class FileType(DictMixin):
return self.tags.delete(filething)
@loadfile(writable=True)
def save(self, filething, **kwargs):
def save(self, filething=None, **kwargs):
"""save(filething=None, **kwargs)
Save metadata tags.
@ -221,13 +219,13 @@ def File(filething, options=None, easy=False):
filething (filething)
options: Sequence of :class:`FileType` implementations,
defaults to all included ones.
easy (bool): If the easy wrappers should be returnd if available.
easy (bool): If the easy wrappers should be returned if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead of
:class:`MP3 <mp3.MP3>`.
Returns:
FileType: A FileType instance for the detected type or `None` in case
the type couln't be determined.
the type couldn't be determined.
Raises:
MutagenError: in case the detected type fails to load the file.
@ -264,12 +262,16 @@ def File(filething, options=None, easy=False):
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.ac3 import AC3
from mutagen.smf import SMF
from mutagen.tak import TAK
from mutagen.dsf import DSF
from mutagen.dsdiff import DSDIFF
from mutagen.wave import WAVE
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC,
SMF, DSF]
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, AC3,
SMF, TAK, DSF, DSDIFF, WAVE]
if not options:
return None
@ -287,7 +289,7 @@ def File(filething, options=None, easy=False):
results = [(Kind.score(filething.name, fileobj, header), Kind.__name__)
for Kind in options]
results = list(izip(results, options))
results = list(zip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:

386
libs/common/mutagen/_iff.py Normal file
View file

@ -0,0 +1,386 @@
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2017 Borewit
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Base classes for various IFF based formats (e.g. AIFF or RIFF)."""
import sys
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
MutagenError,
convert_error,
delete_bytes,
insert_bytes,
loadfile,
reraise,
resize_bytes,
)
class error(MutagenError):
pass
class InvalidChunk(error):
pass
class EmptyChunk(InvalidChunk):
pass
def is_valid_chunk_id(id):
""" is_valid_chunk_id(FOURCC)
Arguments:
id (FOURCC)
Returns:
true if valid; otherwise false
Check if argument id is valid FOURCC type.
"""
assert isinstance(id, str), \
'id is of type %s, must be str: %r' % (type(id), id)
return ((0 < len(id) <= 4) and (min(id) >= ' ') and
(max(id) <= '~'))
# Assert FOURCC formatted valid
def assert_valid_chunk_id(id):
if not is_valid_chunk_id(id):
raise ValueError("IFF chunk ID must be four ASCII characters.")
class IffChunk(object):
"""Generic representation of a single IFF chunk.
IFF chunks always consist of an ID followed by the chunk size. The exact
format varies between different IFF based formats, e.g. AIFF uses
big-endian while RIFF uses little-endian.
"""
# Chunk headers are usually 8 bytes long (4 for ID and 4 for the size)
HEADER_SIZE = 8
@classmethod
def parse_header(cls, header):
"""Read ID and data_size from the given header.
Must be implemented in subclasses."""
raise error("Not implemented")
def write_new_header(self, id_, size):
"""Write the chunk header with id_ and size to the file.
Must be implemented in subclasses. The data must be written
to the current position in self._fileobj."""
raise error("Not implemented")
def write_size(self):
"""Write self.data_size to the file.
Must be implemented in subclasses. The data must be written
to the current position in self._fileobj."""
raise error("Not implemented")
@classmethod
def get_class(cls, id):
"""Returns the class for a new chunk for a given ID.
Can be overridden in subclasses to implement specific chunk types."""
return cls
@classmethod
def parse(cls, fileobj, parent_chunk=None):
header = fileobj.read(cls.HEADER_SIZE)
if len(header) < cls.HEADER_SIZE:
raise EmptyChunk('Header size < %i' % cls.HEADER_SIZE)
id, data_size = cls.parse_header(header)
try:
id = id.decode('ascii').rstrip()
except UnicodeDecodeError as e:
raise InvalidChunk(e)
if not is_valid_chunk_id(id):
raise InvalidChunk('Invalid chunk ID %r' % id)
return cls.get_class(id)(fileobj, id, data_size, parent_chunk)
def __init__(self, fileobj, id, data_size, parent_chunk):
self._fileobj = fileobj
self.id = id
self.data_size = data_size
self.parent_chunk = parent_chunk
self.data_offset = fileobj.tell()
self.offset = self.data_offset - self.HEADER_SIZE
self._calculate_size()
def __repr__(self):
return ("<%s id=%s, offset=%i, size=%i, data_offset=%i, data_size=%i>"
% (type(self).__name__, self.id, self.offset, self.size,
self.data_offset, self.data_size))
def read(self):
"""Read the chunks data"""
self._fileobj.seek(self.data_offset)
return self._fileobj.read(self.data_size)
def write(self, data):
"""Write the chunk data"""
if len(data) > self.data_size:
raise ValueError
self._fileobj.seek(self.data_offset)
self._fileobj.write(data)
# Write the padding bytes
padding = self.padding()
if padding:
self._fileobj.seek(self.data_offset + self.data_size)
self._fileobj.write(b'\x00' * padding)
def delete(self):
"""Removes the chunk from the file"""
delete_bytes(self._fileobj, self.size, self.offset)
if self.parent_chunk is not None:
self.parent_chunk._remove_subchunk(self)
self._fileobj.flush()
def _update_size(self, size_diff, changed_subchunk=None):
"""Update the size of the chunk"""
old_size = self.size
self.data_size += size_diff
self._fileobj.seek(self.offset + 4)
self.write_size()
self._calculate_size()
if self.parent_chunk is not None:
self.parent_chunk._update_size(self.size - old_size, self)
if changed_subchunk:
self._update_sibling_offsets(
changed_subchunk, old_size - self.size)
def _calculate_size(self):
self.size = self.HEADER_SIZE + self.data_size + self.padding()
assert self.size % 2 == 0
def resize(self, new_data_size):
"""Resize the file and update the chunk sizes"""
padding = new_data_size % 2
resize_bytes(self._fileobj, self.data_size + self.padding(),
new_data_size + padding, self.data_offset)
size_diff = new_data_size - self.data_size
self._update_size(size_diff)
self._fileobj.flush()
def padding(self):
"""Returns the number of padding bytes (0 or 1).
IFF chunks are required to be a even number in total length. If
data_size is odd a padding byte will be added at the end.
"""
return self.data_size % 2
class IffContainerChunkMixin():
"""A IFF chunk containing other chunks.
A container chunk can have an additional name as the first 4 bytes of the
chunk data followed by an arbitrary number of subchunks. The root chunk of
the file is always a container chunk (e.g. the AIFF chunk or the FORM chunk
for RIFF) but there can be other types of container chunks (e.g. the LIST
chunks used in RIFF).
"""
def parse_next_subchunk(self):
""""""
raise error("Not implemented")
def init_container(self, name_size=4):
# Lists can store an additional name identifier before the subchunks
self.__name_size = name_size
if self.data_size < name_size:
raise InvalidChunk(
'Container chunk data size < %i' % name_size)
# Read the container name
if name_size > 0:
try:
self.name = self._fileobj.read(name_size).decode('ascii')
except UnicodeDecodeError as e:
raise error(e)
else:
self.name = None
# Load all IFF subchunks
self.__subchunks = []
def subchunks(self):
"""Returns a list of all subchunks.
The list is lazily loaded on first access.
"""
if not self.__subchunks:
next_offset = self.data_offset + self.__name_size
while next_offset < self.offset + self.size:
self._fileobj.seek(next_offset)
try:
chunk = self.parse_next_subchunk()
except EmptyChunk:
break
except InvalidChunk:
break
self.__subchunks.append(chunk)
# Calculate the location of the next chunk
next_offset = chunk.offset + chunk.size
return self.__subchunks
def insert_chunk(self, id_, data=None):
"""Insert a new chunk at the end of the container chunk"""
if not is_valid_chunk_id(id_):
raise KeyError("Invalid IFF key.")
next_offset = self.offset + self.size
size = self.HEADER_SIZE
data_size = 0
if data:
data_size = len(data)
padding = data_size % 2
size += data_size + padding
insert_bytes(self._fileobj, size, next_offset)
self._fileobj.seek(next_offset)
self.write_new_header(id_.ljust(4).encode('ascii'), data_size)
self._fileobj.seek(next_offset)
chunk = self.parse_next_subchunk()
self._update_size(chunk.size)
if data:
chunk.write(data)
self.subchunks().append(chunk)
self._fileobj.flush()
return chunk
def __contains__(self, id_):
"""Check if this chunk contains a specific subchunk."""
assert_valid_chunk_id(id_)
try:
self[id_]
return True
except KeyError:
return False
def __getitem__(self, id_):
"""Get a subchunk by ID."""
assert_valid_chunk_id(id_)
found_chunk = None
for chunk in self.subchunks():
if chunk.id == id_:
found_chunk = chunk
break
else:
raise KeyError("No %r chunk found" % id_)
return found_chunk
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
assert_valid_chunk_id(id_)
self[id_].delete()
def _remove_subchunk(self, chunk):
assert chunk in self.__subchunks
self._update_size(-chunk.size, chunk)
self.__subchunks.remove(chunk)
def _update_sibling_offsets(self, changed_subchunk, size_diff):
"""Update the offsets of subchunks after `changed_subchunk`.
"""
index = self.__subchunks.index(changed_subchunk)
sibling_chunks = self.__subchunks[index + 1:len(self.__subchunks)]
for sibling in sibling_chunks:
sibling.offset -= size_diff
sibling.data_offset -= size_diff
class IffFile:
"""Representation of a IFF file"""
def __init__(self, chunk_cls, fileobj):
fileobj.seek(0)
self.root = chunk_cls.parse(fileobj)
def __contains__(self, id_):
"""Check if the IFF file contains a specific chunk"""
return id_ in self.root
def __getitem__(self, id_):
"""Get a chunk from the IFF file"""
return self.root[id_]
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
self.delete_chunk(id_)
def delete_chunk(self, id_):
"""Remove a chunk from the IFF file"""
del self.root[id_]
def insert_chunk(self, id_, data=None):
"""Insert a new chunk at the end of the IFF file"""
return self.root.insert_chunk(id_, data)
class IffID3(ID3):
"""A generic IFF file with ID3v2 tags"""
def _load_file(self, fileobj):
raise error("Not implemented")
def _pre_load_header(self, fileobj):
try:
fileobj.seek(self._load_file(fileobj)['ID3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the IFF file"""
fileobj = filething.fileobj
iff_file = self._load_file(fileobj)
if 'ID3' not in iff_file:
iff_file.insert_chunk('ID3')
chunk = iff_file['ID3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
chunk.resize(len(data))
chunk.write(data)
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething=None):
"""Completely removes the ID3 chunk from the IFF file"""
try:
iff_file = self._load_file(filething.fileobj)
del iff_file['ID3']
except KeyError:
pass
self.clear()

View file

@ -0,0 +1,69 @@
# Copyright (C) 2017 Borewit
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Resource Interchange File Format (RIFF)."""
import struct
from struct import pack
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
InvalidChunk,
)
class RiffChunk(IffChunk):
"""Generic RIFF chunk"""
@classmethod
def parse_header(cls, header):
return struct.unpack('<4sI', header)
@classmethod
def get_class(cls, id):
if id in (u'LIST', u'RIFF'):
return RiffListChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('<4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('<I', self.data_size))
class RiffListChunk(RiffChunk, IffContainerChunkMixin):
"""A RIFF chunk containing other chunks.
This is either a 'LIST' or 'RIFF'
"""
def parse_next_subchunk(self):
return RiffChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id not in (u'RIFF', u'LIST'):
raise InvalidChunk('Expected RIFF or LIST chunk, got %s' % id)
RiffChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class RiffFile(IffFile):
"""Representation of a RIFF file"""
def __init__(self, fileobj):
super().__init__(RiffChunk, fileobj)
if self.root.id != u'RIFF':
raise InvalidChunk("Root chunk must be a RIFF chunk, got %s"
% self.root.id)
self.file_type = self.root.name

View file

@ -1,91 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
if os.name != "nt":
# make imports work
_winapi = object()
from ._fsnative import fsnative, path2fsn, fsn2text, fsn2bytes, \
bytes2fsn, uri2fsn, fsn2uri, text2fsn, fsn2norm
from ._print import print_, input_, supports_ansi_escape_codes
from ._stdlib import sep, pathsep, curdir, pardir, altsep, extsep, devnull, \
defpath, getcwd, expanduser, expandvars
from ._argv import argv
from ._environ import environ, getenv, unsetenv, putenv
from ._temp import mkstemp, gettempdir, gettempprefix, mkdtemp
fsnative, print_, getcwd, getenv, unsetenv, putenv, environ, expandvars, \
path2fsn, fsn2text, fsn2bytes, bytes2fsn, uri2fsn, fsn2uri, mkstemp, \
gettempdir, gettempprefix, mkdtemp, input_, expanduser, text2fsn, \
supports_ansi_escape_codes, fsn2norm
version = (1, 3, 4)
"""Tuple[`int`, `int`, `int`]: The version tuple (major, minor, micro)"""
version_string = ".".join(map(str, version))
"""`str`: A version string"""
argv = argv
"""List[`fsnative`]: Like `sys.argv` but contains unicode under
Windows + Python 2
"""
sep = sep
"""`fsnative`: Like `os.sep` but a `fsnative`"""
pathsep = pathsep
"""`fsnative`: Like `os.pathsep` but a `fsnative`"""
curdir = curdir
"""`fsnative`: Like `os.curdir` but a `fsnative`"""
pardir = pardir
"""`fsnative`: Like `os.pardir` but a fsnative"""
altsep = altsep
"""`fsnative` or `None`: Like `os.altsep` but a `fsnative` or `None`"""
extsep = extsep
"""`fsnative`: Like `os.extsep` but a `fsnative`"""
devnull = devnull
"""`fsnative`: Like `os.devnull` but a `fsnative`"""
defpath = defpath
"""`fsnative`: Like `os.defpath` but a `fsnative`"""
__all__ = []

View file

@ -1,117 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import ctypes
import collections
from functools import total_ordering
from ._compat import PY2, string_types
from ._fsnative import is_win, _fsn2legacy, path2fsn
from . import _winapi as winapi
def _get_win_argv():
"""Returns a unicode argv under Windows and standard sys.argv otherwise
Returns:
List[`fsnative`]
"""
assert is_win
argc = ctypes.c_int()
try:
argv = winapi.CommandLineToArgvW(
winapi.GetCommandLineW(), ctypes.byref(argc))
except WindowsError:
return []
if not argv:
return []
res = argv[max(0, argc.value - len(sys.argv)):argc.value]
winapi.LocalFree(argv)
return res
@total_ordering
class Argv(collections.MutableSequence):
"""List[`fsnative`]: Like `sys.argv` but contains unicode
keys and values under Windows + Python 2.
Any changes made will be forwarded to `sys.argv`.
"""
def __init__(self):
if PY2 and is_win:
self._argv = _get_win_argv()
else:
self._argv = sys.argv
def __getitem__(self, index):
return self._argv[index]
def __setitem__(self, index, value):
if isinstance(value, string_types):
value = path2fsn(value)
self._argv[index] = value
if sys.argv is not self._argv:
try:
if isinstance(value, string_types):
sys.argv[index] = _fsn2legacy(value)
else:
sys.argv[index] = [_fsn2legacy(path2fsn(v)) for v in value]
except IndexError:
pass
def __delitem__(self, index):
del self._argv[index]
try:
del sys.argv[index]
except IndexError:
pass
def __eq__(self, other):
return self._argv == other
def __lt__(self, other):
return self._argv < other
def __len__(self):
return len(self._argv)
def __repr__(self):
return repr(self._argv)
def insert(self, index, value):
value = path2fsn(value)
self._argv.insert(index, value)
if sys.argv is not self._argv:
sys.argv.insert(index, _fsn2legacy(value))
argv = Argv()

View file

@ -1,58 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
PY2 = sys.version_info[0] == 2
PY3 = not PY2
if PY2:
from urlparse import urlparse, urlunparse
urlparse, urlunparse
from urllib import quote, unquote
quote, unquote
from StringIO import StringIO
BytesIO = StringIO
from io import StringIO as TextIO
TextIO
string_types = (str, unicode)
text_type = unicode
iteritems = lambda d: d.iteritems()
elif PY3:
from urllib.parse import urlparse, quote, unquote, urlunparse
urlparse, quote, unquote, urlunparse
from io import StringIO
StringIO = StringIO
TextIO = StringIO
from io import BytesIO
BytesIO = BytesIO
string_types = (str,)
text_type = str
iteritems = lambda d: iter(d.items())

View file

@ -1,267 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import ctypes
import collections
from ._compat import text_type, PY2
from ._fsnative import path2fsn, is_win, _fsn2legacy, fsnative
from . import _winapi as winapi
def get_windows_env_var(key):
"""Get an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
buf = ctypes.create_unicode_buffer(32767)
stored = winapi.GetEnvironmentVariableW(key, buf, 32767)
if stored == 0:
raise ctypes.WinError()
return buf[:stored]
def set_windows_env_var(key, value):
"""Set an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
if not isinstance(value, text_type):
raise TypeError("%r not of type %r" % (value, text_type))
status = winapi.SetEnvironmentVariableW(key, value)
if status == 0:
raise ctypes.WinError()
def del_windows_env_var(key):
"""Delete an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
status = winapi.SetEnvironmentVariableW(key, None)
if status == 0:
raise ctypes.WinError()
def read_windows_environ():
"""Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError
"""
res = winapi.GetEnvironmentStringsW()
if not res:
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u""
i = 0
while 1:
c = res[i]
i += 1
if c == u"\x00":
if not current:
break
done.append(current)
current = u""
continue
current += c
dict_ = {}
for entry in done:
try:
key, value = entry.split(u"=", 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if status == 0:
raise ctypes.WinError()
return dict_
def _norm_key(key):
assert isinstance(key, fsnative)
if is_win:
key = key.upper()
return key
class Environ(collections.MutableMapping):
"""Dict[`fsnative`, `fsnative`]: Like `os.environ` but contains unicode
keys and values under Windows + Python 2.
Any changes made will be forwarded to `os.environ`.
"""
def __init__(self):
if is_win and PY2:
try:
env = read_windows_environ()
except WindowsError:
env = {}
else:
env = os.environ
self._env = env
def __getitem__(self, key):
key = _norm_key(path2fsn(key))
return self._env[key]
def __setitem__(self, key, value):
key = _norm_key(path2fsn(key))
value = path2fsn(value)
if is_win and PY2:
# this calls putenv, so do it first and replace later
try:
os.environ[_fsn2legacy(key)] = _fsn2legacy(value)
except OSError:
raise ValueError
try:
set_windows_env_var(key, value)
except WindowsError:
# py3+win fails for invalid keys. try to do the same
raise ValueError
try:
self._env[key] = value
except OSError:
raise ValueError
def __delitem__(self, key):
key = _norm_key(path2fsn(key))
if is_win and PY2:
try:
del_windows_env_var(key)
except WindowsError:
pass
try:
del os.environ[_fsn2legacy(key)]
except KeyError:
pass
del self._env[key]
def __iter__(self):
return iter(self._env)
def __len__(self):
return len(self._env)
def __repr__(self):
return repr(self._env)
def copy(self):
return self._env.copy()
environ = Environ()
def getenv(key, value=None):
"""Like `os.getenv` but returns unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (object): The value to return if the env var does not exist
Returns:
`fsnative` or `object`:
The env var or the passed value if it doesn't exist
"""
key = path2fsn(key)
if is_win and PY2:
return environ.get(key, value)
return os.getenv(key, value)
def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key)
def putenv(key, value):
"""Like `os.putenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (pathlike): The value to set
Raises:
ValueError
"""
key = path2fsn(key)
value = path2fsn(value)
if is_win and PY2:
try:
set_windows_env_var(key, value)
except WindowsError:
# py3 + win fails here
raise ValueError
else:
try:
os.putenv(key, value)
except OSError:
# win + py3 raise here for invalid keys which is probably a bug.
# ValueError seems better
raise ValueError

View file

@ -1,666 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import ctypes
import codecs
from . import _winapi as winapi
from ._compat import text_type, PY3, PY2, urlparse, quote, unquote, urlunparse
is_win = os.name == "nt"
is_unix = not is_win
is_darwin = sys.platform == "darwin"
_surrogatepass = "strict" if PY2 else "surrogatepass"
def _normalize_codec(codec, _cache={}):
"""Raises LookupError"""
try:
return _cache[codec]
except KeyError:
_cache[codec] = codecs.lookup(codec).name
return _cache[codec]
def _swap_bytes(data):
"""swaps bytes for 16 bit, leaves remaining trailing bytes alone"""
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data)
def _codec_fails_on_encode_surrogates(codec, _cache={}):
"""Returns if a codec fails correctly when passing in surrogates with
a surrogatepass/surrogateescape error handler. Some codecs were broken
in Python <3.4
"""
try:
return _cache[codec]
except KeyError:
try:
u"\uD800\uDC01".encode(codec)
except UnicodeEncodeError:
_cache[codec] = True
else:
_cache[codec] = False
return _cache[codec]
def _codec_can_decode_with_surrogatepass(codec, _cache={}):
"""Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4
"""
try:
return _cache[codec]
except KeyError:
try:
u"\ud83d".encode(
codec, _surrogatepass).decode(codec, _surrogatepass)
except UnicodeDecodeError:
_cache[codec] = False
else:
_cache[codec] = True
return _cache[codec]
def _decode_surrogatepass(data, codec):
"""Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work
on Python < 3.4 + Windows
https://bugs.python.org/issue27971
Raises UnicodeDecodeError, LookupError
"""
try:
return data.decode(codec, _surrogatepass)
except UnicodeDecodeError:
if not _codec_can_decode_with_surrogatepass(codec):
if _normalize_codec(codec) == "utf-16-be":
data = _swap_bytes(data)
codec = "utf-16-le"
if _normalize_codec(codec) == "utf-16-le":
buffer_ = ctypes.create_string_buffer(data + b"\x00\x00")
value = ctypes.wstring_at(buffer_, len(data) // 2)
if value.encode("utf-16-le", _surrogatepass) != data:
raise
return value
else:
raise
else:
raise
def _winpath2bytes_py3(text, codec):
"""Fallback implementation for text including surrogates"""
# merge surrogate codepoints
if _normalize_codec(codec).startswith("utf-16"):
# fast path, utf-16 merges anyway
return text.encode(codec, _surrogatepass)
return _decode_surrogatepass(
text.encode("utf-16-le", _surrogatepass),
"utf-16-le").encode(codec, _surrogatepass)
if PY2:
def _winpath2bytes(text, codec):
return text.encode(codec)
else:
def _winpath2bytes(text, codec):
if _codec_fails_on_encode_surrogates(codec):
try:
return text.encode(codec)
except UnicodeEncodeError:
return _winpath2bytes_py3(text, codec)
else:
return _winpath2bytes_py3(text, codec)
def fsn2norm(path):
"""
Args:
path (fsnative): The path to normalize
Returns:
`fsnative`
Normalizes an fsnative path.
The same underlying path can have multiple representations as fsnative
(due to surrogate pairs and variable length encodings). When concatenating
fsnative the result might be different than concatenating the serialized
form and then deserializing it.
This returns the normalized form i.e. the form which os.listdir() would
return. This is useful when you alter fsnative but require that the same
underlying path always maps to the same fsnative value.
All functions like :func:`bytes2fsn`, :func:`fsnative`, :func:`text2fsn`
and :func:`path2fsn` always return a normalized path, independent of their
input.
"""
native = _fsn2native(path)
if is_win:
return _decode_surrogatepass(
native.encode("utf-16-le", _surrogatepass),
"utf-16-le")
elif PY3:
return bytes2fsn(native, None)
else:
return path
def _fsn2legacy(path):
"""Takes a fsnative path and returns a path that can be put into os.environ
or sys.argv. Might result in a mangled path on Python2 + Windows.
Can't fail.
Args:
path (fsnative)
Returns:
str
"""
if PY2 and is_win:
return path.encode(_encoding, "replace")
return path
def _fsnative(text):
if not isinstance(text, text_type):
raise TypeError("%r needs to be a text type (%r)" % (text, text_type))
if is_unix:
# First we go to bytes so we can be sure we have a valid source.
# Theoretically we should fail here in case we have a non-unicode
# encoding. But this would make everything complicated and there is
# no good way to handle a failure from the user side. Instead
# fall back to utf-8 which is the most likely the right choice in
# a mis-configured environment
encoding = _encoding
try:
path = text.encode(encoding, _surrogatepass)
except UnicodeEncodeError:
path = text.encode("utf-8", _surrogatepass)
if b"\x00" in path:
path = path.replace(b"\x00", fsn2bytes(_fsnative(u"\uFFFD"), None))
if PY3:
return path.decode(_encoding, "surrogateescape")
return path
else:
if u"\x00" in text:
text = text.replace(u"\x00", u"\uFFFD")
text = fsn2norm(text)
return text
def _create_fsnative(type_):
# a bit of magic to make fsnative(u"foo") and isinstance(path, fsnative)
# work
class meta(type):
def __instancecheck__(self, instance):
return _typecheck_fsnative(instance)
def __subclasscheck__(self, subclass):
return issubclass(subclass, type_)
class impl(object):
"""fsnative(text=u"")
Args:
text (text): The text to convert to a path
Returns:
fsnative: The new path.
Raises:
TypeError: In case something other then `text` has been passed
This type is a virtual base class for the real path type.
Instantiating it returns an instance of the real path type and it
overrides instance and subclass checks so that `isinstance` and
`issubclass` checks work:
::
isinstance(fsnative(u"foo"), fsnative) == True
issubclass(type(fsnative(u"foo")), fsnative) == True
The real returned type is:
- **Python 2 + Windows:** :obj:`python:unicode`, with ``surrogates``,
without ``null``
- **Python 2 + Unix:** :obj:`python:str`, without ``null``
- **Python 3 + Windows:** :obj:`python3:str`, with ``surrogates``,
without ``null``
- **Python 3 + Unix:** :obj:`python3:str`, with ``surrogates``, without
``null``, without code points not encodable with the locale encoding
Constructing a `fsnative` can't fail.
Passing a `fsnative` to :func:`open` will never lead to `ValueError`
or `TypeError`.
Any operation on `fsnative` can also use the `str` type, as long as
the `str` only contains ASCII and no NULL.
"""
def __new__(cls, text=u""):
return _fsnative(text)
new_type = meta("fsnative", (object,), dict(impl.__dict__))
new_type.__module__ = "senf"
return new_type
fsnative_type = text_type if is_win or PY3 else bytes
fsnative = _create_fsnative(fsnative_type)
def _typecheck_fsnative(path):
"""
Args:
path (object)
Returns:
bool: if path is a fsnative
"""
if not isinstance(path, fsnative_type):
return False
if PY3 or is_win:
if u"\x00" in path:
return False
if is_unix:
try:
path.encode(_encoding, "surrogateescape")
except UnicodeEncodeError:
return False
elif b"\x00" in path:
return False
return True
def _fsn2native(path):
"""
Args:
path (fsnative)
Returns:
`text` on Windows, `bytes` on Unix
Raises:
TypeError: in case the type is wrong or the ´str` on Py3 + Unix
can't be converted to `bytes`
This helper allows to validate the type and content of a path.
To reduce overhead the encoded value for Py3 + Unix is returned so
it can be reused.
"""
if not isinstance(path, fsnative_type):
raise TypeError("path needs to be %s, not %s" % (
fsnative_type.__name__, type(path).__name__))
if is_unix:
if PY3:
try:
path = path.encode(_encoding, "surrogateescape")
except UnicodeEncodeError:
# This look more like ValueError, but raising only one error
# makes things simpler... also one could say str + surrogates
# is its own type
raise TypeError(
"path contained Unicode code points not valid in"
"the current path encoding. To create a valid "
"path from Unicode use text2fsn()")
if b"\x00" in path:
raise TypeError("fsnative can't contain nulls")
else:
if u"\x00" in path:
raise TypeError("fsnative can't contain nulls")
return path
def _get_encoding():
"""The encoding used for paths, argv, environ, stdout and stdin"""
encoding = sys.getfilesystemencoding()
if encoding is None:
if is_darwin:
encoding = "utf-8"
elif is_win:
encoding = "mbcs"
else:
encoding = "ascii"
encoding = _normalize_codec(encoding)
return encoding
_encoding = _get_encoding()
def path2fsn(path):
"""
Args:
path (pathlike): The path to convert
Returns:
`fsnative`
Raises:
TypeError: In case the type can't be converted to a `fsnative`
ValueError: In case conversion fails
Returns a `fsnative` path for a `pathlike`.
"""
# allow mbcs str on py2+win and bytes on py3
if PY2:
if is_win:
if isinstance(path, bytes):
path = path.decode(_encoding)
else:
if isinstance(path, text_type):
path = path.encode(_encoding)
if "\x00" in path:
raise ValueError("embedded null")
else:
path = getattr(os, "fspath", lambda x: x)(path)
if isinstance(path, bytes):
if b"\x00" in path:
raise ValueError("embedded null")
path = path.decode(_encoding, "surrogateescape")
elif is_unix and isinstance(path, str):
# make sure we can encode it and this is not just some random
# unicode string
data = path.encode(_encoding, "surrogateescape")
if b"\x00" in data:
raise ValueError("embedded null")
path = fsn2norm(path)
else:
if u"\x00" in path:
raise ValueError("embedded null")
path = fsn2norm(path)
if not isinstance(path, fsnative_type):
raise TypeError("path needs to be %s", fsnative_type.__name__)
return path
def fsn2text(path, strict=False):
"""
Args:
path (fsnative): The path to convert
strict (bool): Fail in case the conversion is not reversible
Returns:
`text`
Raises:
TypeError: In case no `fsnative` has been passed
ValueError: In case ``strict`` was True and the conversion failed
Converts a `fsnative` path to `text`.
Can be used to pass a path to some unicode API, like for example a GUI
toolkit.
If ``strict`` is True the conversion will fail in case it is not
reversible. This can be useful for converting program arguments that are
supposed to be text and erroring out in case they are not.
Encoding with a Unicode encoding will always succeed with the result.
"""
path = _fsn2native(path)
errors = "strict" if strict else "replace"
if is_win:
return path.encode("utf-16-le", _surrogatepass).decode("utf-16-le",
errors)
else:
return path.decode(_encoding, errors)
def text2fsn(text):
"""
Args:
text (text): The text to convert
Returns:
`fsnative`
Raises:
TypeError: In case no `text` has been passed
Takes `text` and converts it to a `fsnative`.
This operation is not reversible and can't fail.
"""
return fsnative(text)
def fsn2bytes(path, encoding="utf-8"):
"""
Args:
path (fsnative): The path to convert
encoding (`str`): encoding used for Windows
Returns:
`bytes`
Raises:
TypeError: If no `fsnative` path is passed
ValueError: If encoding fails or the encoding is invalid
Converts a `fsnative` path to `bytes`.
The passed *encoding* is only used on platforms where paths are not
associated with an encoding (Windows for example).
For Windows paths, lone surrogates will be encoded like normal code points
and surrogate pairs will be merged before encoding. In case of ``utf-8``
or ``utf-16-le`` this is equal to the `WTF-8 and WTF-16 encoding
<https://simonsapin.github.io/wtf-8/>`__.
"""
path = _fsn2native(path)
if is_win:
if encoding is None:
raise ValueError("invalid encoding %r" % encoding)
try:
return _winpath2bytes(path, encoding)
except LookupError:
raise ValueError("invalid encoding %r" % encoding)
else:
return path
def bytes2fsn(data, encoding="utf-8"):
"""
Args:
data (bytes): The data to convert
encoding (`str`): encoding used for Windows
Returns:
`fsnative`
Raises:
TypeError: If no `bytes` path is passed
ValueError: If decoding fails or the encoding is invalid
Turns `bytes` to a `fsnative` path.
The passed *encoding* is only used on platforms where paths are not
associated with an encoding (Windows for example).
For Windows paths ``WTF-8`` is accepted if ``utf-8`` is used and
``WTF-16`` accepted if ``utf-16-le`` is used.
"""
if not isinstance(data, bytes):
raise TypeError("data needs to be bytes")
if is_win:
if encoding is None:
raise ValueError("invalid encoding %r" % encoding)
try:
path = _decode_surrogatepass(data, encoding)
except LookupError:
raise ValueError("invalid encoding %r" % encoding)
if u"\x00" in path:
raise ValueError("contains nulls")
return path
else:
if b"\x00" in data:
raise ValueError("contains nulls")
if PY2:
return data
else:
return data.decode(_encoding, "surrogateescape")
def uri2fsn(uri):
"""
Args:
uri (`text` or :obj:`python:str`): A file URI
Returns:
`fsnative`
Raises:
TypeError: In case an invalid type is passed
ValueError: In case the URI isn't a valid file URI
Takes a file URI and returns a `fsnative` path
"""
if PY2:
if isinstance(uri, text_type):
uri = uri.encode("utf-8")
if not isinstance(uri, bytes):
raise TypeError("uri needs to be ascii str or unicode")
else:
if not isinstance(uri, str):
raise TypeError("uri needs to be str")
parsed = urlparse(uri)
scheme = parsed.scheme
netloc = parsed.netloc
path = parsed.path
if scheme != "file":
raise ValueError("Not a file URI: %r" % uri)
if not path:
raise ValueError("Invalid file URI: %r" % uri)
uri = urlunparse(parsed)[7:]
if is_win:
try:
drive, rest = uri.split(":", 1)
except ValueError:
path = ""
rest = uri.replace("/", "\\")
else:
path = drive[-1] + ":"
rest = rest.replace("/", "\\")
if PY2:
path += unquote(rest)
else:
path += unquote(rest, encoding="utf-8", errors="surrogatepass")
if netloc:
path = "\\\\" + path
if PY2:
path = path.decode("utf-8")
if u"\x00" in path:
raise ValueError("embedded null")
return path
else:
if PY2:
path = unquote(uri)
else:
path = unquote(uri, encoding=_encoding, errors="surrogateescape")
if "\x00" in path:
raise ValueError("embedded null")
return path
def fsn2uri(path):
"""
Args:
path (fsnative): The path to convert to an URI
Returns:
`text`: An ASCII only URI
Raises:
TypeError: If no `fsnative` was passed
ValueError: If the path can't be converted
Takes a `fsnative` path and returns a file URI.
On Windows non-ASCII characters will be encoded using utf-8 and then
percent encoded.
"""
path = _fsn2native(path)
def _quote_path(path):
# RFC 2396
path = quote(path, "/:@&=+$,")
if PY2:
path = path.decode("ascii")
return path
if is_win:
buf = ctypes.create_unicode_buffer(winapi.INTERNET_MAX_URL_LENGTH)
length = winapi.DWORD(winapi.INTERNET_MAX_URL_LENGTH)
flags = 0
try:
winapi.UrlCreateFromPathW(path, buf, ctypes.byref(length), flags)
except WindowsError as e:
raise ValueError(e)
uri = buf[:length.value]
# For some reason UrlCreateFromPathW escapes some chars outside of
# ASCII and some not. Unquote and re-quote with utf-8.
if PY3:
# latin-1 maps code points directly to bytes, which is what we want
uri = unquote(uri, "latin-1")
else:
# Python 2 does what we want by default
uri = unquote(uri)
return _quote_path(uri.encode("utf-8", _surrogatepass))
else:
return u"file://" + _quote_path(path)

View file

@ -1,424 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
import ctypes
import re
from ._fsnative import _encoding, is_win, is_unix, _surrogatepass, bytes2fsn
from ._compat import text_type, PY2, PY3
from ._winansi import AnsiState, ansi_split
from . import _winapi as winapi
def print_(*objects, **kwargs):
"""print_(*objects, sep=None, end=None, file=None, flush=False)
Args:
objects (object): zero or more objects to print
sep (str): Object separator to use, defaults to ``" "``
end (str): Trailing string to use, defaults to ``"\\n"``.
If end is ``"\\n"`` then `os.linesep` is used.
file (object): A file-like object, defaults to `sys.stdout`
flush (bool): If the file stream should be flushed
Raises:
EnvironmentError
Like print(), but:
* Supports printing filenames under Unix + Python 3 and Windows + Python 2
* Emulates ANSI escape sequence support under Windows
* Never fails due to encoding/decoding errors. Tries hard to get everything
on screen as is, but will fall back to "?" if all fails.
This does not conflict with ``colorama``, but will not use it on Windows.
"""
sep = kwargs.get("sep")
sep = sep if sep is not None else " "
end = kwargs.get("end")
end = end if end is not None else "\n"
file = kwargs.get("file")
file = file if file is not None else sys.stdout
flush = bool(kwargs.get("flush", False))
if is_win:
_print_windows(objects, sep, end, file, flush)
else:
_print_unix(objects, sep, end, file, flush)
def _print_unix(objects, sep, end, file, flush):
"""A print_() implementation which writes bytes"""
encoding = _encoding
if isinstance(sep, text_type):
sep = sep.encode(encoding, "replace")
if not isinstance(sep, bytes):
raise TypeError
if isinstance(end, text_type):
end = end.encode(encoding, "replace")
if not isinstance(end, bytes):
raise TypeError
if end == b"\n":
end = os.linesep
if PY3:
end = end.encode("ascii")
parts = []
for obj in objects:
if not isinstance(obj, text_type) and not isinstance(obj, bytes):
obj = text_type(obj)
if isinstance(obj, text_type):
if PY2:
obj = obj.encode(encoding, "replace")
else:
try:
obj = obj.encode(encoding, "surrogateescape")
except UnicodeEncodeError:
obj = obj.encode(encoding, "replace")
assert isinstance(obj, bytes)
parts.append(obj)
data = sep.join(parts) + end
assert isinstance(data, bytes)
file = getattr(file, "buffer", file)
try:
file.write(data)
except TypeError:
if PY3:
# For StringIO, first try with surrogates
surr_data = data.decode(encoding, "surrogateescape")
try:
file.write(surr_data)
except (TypeError, ValueError):
file.write(data.decode(encoding, "replace"))
else:
# for file like objects with don't support bytes
file.write(data.decode(encoding, "replace"))
if flush:
file.flush()
ansi_state = AnsiState()
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush()
def _readline_windows():
"""Raises OSError"""
try:
fileno = sys.stdin.fileno()
except (EnvironmentError, AttributeError):
fileno = -1
# In case stdin is replaced, read from that
if fileno != 0:
return _readline_windows_fallback()
h = winapi.GetStdHandle(winapi.STD_INPUT_HANDLE)
if h == winapi.INVALID_HANDLE_VALUE:
return _readline_windows_fallback()
buf_size = 1024
buf = ctypes.create_string_buffer(buf_size * ctypes.sizeof(winapi.WCHAR))
read = winapi.DWORD()
text = u""
while True:
if winapi.ReadConsoleW(
h, buf, buf_size, ctypes.byref(read), None) == 0:
if not text:
return _readline_windows_fallback()
raise ctypes.WinError()
data = buf[:read.value * ctypes.sizeof(winapi.WCHAR)]
text += data.decode("utf-16-le", _surrogatepass)
if text.endswith(u"\r\n"):
return text[:-2]
def _decode_codepage(codepage, data):
"""
Args:
codepage (int)
data (bytes)
Returns:
`text`
Decodes data using the given codepage. If some data can't be decoded
using the codepage it will not fail.
"""
assert isinstance(data, bytes)
if not data:
return u""
# get the required buffer length first
length = winapi.MultiByteToWideChar(codepage, 0, data, len(data), None, 0)
if length == 0:
raise ctypes.WinError()
# now decode
buf = ctypes.create_unicode_buffer(length)
length = winapi.MultiByteToWideChar(
codepage, 0, data, len(data), buf, length)
if length == 0:
raise ctypes.WinError()
return buf[:]
def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length]
def _readline_windows_fallback():
# In case reading from the console failed (maybe we get piped data)
# we assume the input was generated according to the output encoding.
# Got any better ideas?
assert is_win
cp = winapi.GetConsoleOutputCP()
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
return _decode_codepage(cp, data)
def _readline_default():
assert is_unix
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
if PY3:
return data.decode(_encoding, "surrogateescape")
else:
return data
def _readline():
if is_win:
return _readline_windows()
else:
return _readline_default()
def input_(prompt=None):
"""
Args:
prompt (object): Prints the passed object to stdout without
adding a trailing newline
Returns:
`fsnative`
Raises:
EnvironmentError
Like :func:`python3:input` but returns a `fsnative` and allows printing
filenames as prompt to stdout.
Use :func:`fsn2text` on the result if you just want to deal with text.
"""
if prompt is not None:
print_(prompt, end="")
return _readline()
def _get_file_name_for_handle(handle):
"""(Windows only) Returns a file name for a file handle.
Args:
handle (winapi.HANDLE)
Returns:
`text` or `None` if no file name could be retrieved.
"""
assert is_win
assert handle != winapi.INVALID_HANDLE_VALUE
size = winapi.FILE_NAME_INFO.FileName.offset + \
winapi.MAX_PATH * ctypes.sizeof(winapi.WCHAR)
buf = ctypes.create_string_buffer(size)
if winapi.GetFileInformationByHandleEx is None:
# Windows XP
return None
status = winapi.GetFileInformationByHandleEx(
handle, winapi.FileNameInfo, buf, size)
if status == 0:
return None
name_info = ctypes.cast(
buf, ctypes.POINTER(winapi.FILE_NAME_INFO)).contents
offset = winapi.FILE_NAME_INFO.FileName.offset
data = buf[offset:offset + name_info.FileNameLength]
return bytes2fsn(data, "utf-16-le")
def supports_ansi_escape_codes(fd):
"""Returns whether the output device is capable of interpreting ANSI escape
codes when :func:`print_` is used.
Args:
fd (int): file descriptor (e.g. ``sys.stdout.fileno()``)
Returns:
`bool`
"""
if os.isatty(fd):
return True
if not is_win:
return False
# Check for cygwin/msys terminal
handle = winapi._get_osfhandle(fd)
if handle == winapi.INVALID_HANDLE_VALUE:
return False
if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE:
return False
file_name = _get_file_name_for_handle(handle)
match = re.match(
"^\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$", file_name)
return match is not None

View file

@ -1,154 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import os
from ._fsnative import path2fsn, fsnative, is_win
from ._compat import PY2
from ._environ import environ
sep = path2fsn(os.sep)
pathsep = path2fsn(os.pathsep)
curdir = path2fsn(os.curdir)
pardir = path2fsn(os.pardir)
altsep = path2fsn(os.altsep) if os.altsep is not None else None
extsep = path2fsn(os.extsep)
devnull = path2fsn(os.devnull)
defpath = path2fsn(os.defpath)
def getcwd():
"""Like `os.getcwd` but returns a `fsnative` path
Returns:
`fsnative`
"""
if is_win and PY2:
return os.getcwdu()
return os.getcwd()
def _get_userdir(user=None):
"""Returns the user dir or None"""
if user is not None and not isinstance(user, fsnative):
raise TypeError
if is_win:
if "HOME" in environ:
path = environ["HOME"]
elif "USERPROFILE" in environ:
path = environ["USERPROFILE"]
elif "HOMEPATH" in environ and "HOMEDRIVE" in environ:
path = os.path.join(environ["HOMEDRIVE"], environ["HOMEPATH"])
else:
return
if user is None:
return path
else:
return os.path.join(os.path.dirname(path), user)
else:
import pwd
if user is None:
if "HOME" in environ:
return environ["HOME"]
else:
try:
return path2fsn(pwd.getpwuid(os.getuid()).pw_dir)
except KeyError:
return
else:
try:
return path2fsn(pwd.getpwnam(user).pw_dir)
except KeyError:
return
def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == "~":
return _get_userdir()
elif path.startswith("~" + sep) or (
altsep is not None and path.startswith("~" + altsep)):
userdir = _get_userdir()
if userdir is None:
return path
return userdir + path[1:]
elif path.startswith("~"):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index
if sep_index == -1:
user = path[1:]
rest = ""
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest
else:
return path
else:
return path
def expandvars(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expandvars` but supports unicode under Windows
+ Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
def repl_func(match):
return environ.get(match.group(1), match.group(0))
path = re.compile(r"\$(\w+)", flags=re.UNICODE).sub(repl_func, path)
if os.name == "nt":
path = re.sub(r"%([^%]+)%", repl_func, path)
return re.sub(r"\$\{([^\}]+)\}", repl_func, path)

View file

@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tempfile
from ._fsnative import path2fsn, fsnative
def gettempdir():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempdir`, but always returns a `fsnative`
path
"""
# FIXME: I don't want to reimplement all that logic, reading env vars etc.
# At least for the default it works.
return path2fsn(tempfile.gettempdir())
def gettempprefix():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempprefix`, but always returns a
`fsnative` path
"""
return path2fsn(tempfile.gettempprefix())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
text (bool): if the file should be opened in text mode
Returns:
Tuple[`int`, `fsnative`]:
A tuple containing the file descriptor and the file path
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative`
path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkstemp(suffix, prefix, dir, text)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
Returns:
`fsnative`: A path to a directory
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkdtemp(suffix, prefix, dir)

View file

@ -1,319 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import ctypes
import re
import atexit
from . import _winapi as winapi
def ansi_parse(code):
"""Returns command, (args)"""
return code[-1:], tuple([int(v or "0") for v in code[2:-1].split(";")])
def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")):
"""Yields (is_ansi, text)"""
for part in _re.split(text):
if part:
yield (bool(_re.match(part)), part)
class AnsiCommand(object):
TEXT = "m"
MOVE_UP = "A"
MOVE_DOWN = "B"
MOVE_FORWARD = "C"
MOVE_BACKWARD = "D"
SET_POS = "H"
SET_POS_ALT = "f"
SAVE_POS = "s"
RESTORE_POS = "u"
class TextAction(object):
RESET_ALL = 0
SET_BOLD = 1
SET_DIM = 2
SET_ITALIC = 3
SET_UNDERLINE = 4
SET_BLINK = 5
SET_BLINK_FAST = 6
SET_REVERSE = 7
SET_HIDDEN = 8
RESET_BOLD = 21
RESET_DIM = 22
RESET_ITALIC = 23
RESET_UNDERLINE = 24
RESET_BLINK = 25
RESET_BLINK_FAST = 26
RESET_REVERSE = 27
RESET_HIDDEN = 28
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
FG_DEFAULT = 39
FG_LIGHT_BLACK = 90
FG_LIGHT_RED = 91
FG_LIGHT_GREEN = 92
FG_LIGHT_YELLOW = 93
FG_LIGHT_BLUE = 94
FG_LIGHT_MAGENTA = 95
FG_LIGHT_CYAN = 96
FG_LIGHT_WHITE = 97
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
BG_DEFAULT = 49
BG_LIGHT_BLACK = 100
BG_LIGHT_RED = 101
BG_LIGHT_GREEN = 102
BG_LIGHT_YELLOW = 103
BG_LIGHT_BLUE = 104
BG_LIGHT_MAGENTA = 105
BG_LIGHT_CYAN = 106
BG_LIGHT_WHITE = 107
class AnsiState(object):
def __init__(self):
self.default_attrs = None
self.bold = False
self.bg_light = False
self.fg_light = False
self.saved_pos = (0, 0)
def do_text_action(self, attrs, action):
# In case the external state has changed, apply it it to ours.
# Mostly the first time this is called.
if attrs & winapi.FOREGROUND_INTENSITY and not self.fg_light \
and not self.bold:
self.fg_light = True
if attrs & winapi.BACKGROUND_INTENSITY and not self.bg_light:
self.bg_light = True
dark_fg = {
TextAction.FG_BLACK: 0,
TextAction.FG_RED: winapi.FOREGROUND_RED,
TextAction.FG_GREEN: winapi.FOREGROUND_GREEN,
TextAction.FG_YELLOW:
winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED,
TextAction.FG_BLUE: winapi.FOREGROUND_BLUE,
TextAction.FG_MAGENTA: winapi.FOREGROUND_BLUE |
winapi.FOREGROUND_RED,
TextAction.FG_CYAN:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN,
TextAction.FG_WHITE:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN |
winapi.FOREGROUND_RED,
}
dark_bg = {
TextAction.BG_BLACK: 0,
TextAction.BG_RED: winapi.BACKGROUND_RED,
TextAction.BG_GREEN: winapi.BACKGROUND_GREEN,
TextAction.BG_YELLOW:
winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED,
TextAction.BG_BLUE: winapi.BACKGROUND_BLUE,
TextAction.BG_MAGENTA:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED,
TextAction.BG_CYAN:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN,
TextAction.BG_WHITE:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN |
winapi.BACKGROUND_RED,
}
light_fg = {
TextAction.FG_LIGHT_BLACK: 0,
TextAction.FG_LIGHT_RED: winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_GREEN: winapi.FOREGROUND_GREEN,
TextAction.FG_LIGHT_YELLOW:
winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_BLUE: winapi.FOREGROUND_BLUE,
TextAction.FG_LIGHT_MAGENTA:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_CYAN:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN,
TextAction.FG_LIGHT_WHITE:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN |
winapi.FOREGROUND_RED,
}
light_bg = {
TextAction.BG_LIGHT_BLACK: 0,
TextAction.BG_LIGHT_RED: winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_GREEN: winapi.BACKGROUND_GREEN,
TextAction.BG_LIGHT_YELLOW:
winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_BLUE: winapi.BACKGROUND_BLUE,
TextAction.BG_LIGHT_MAGENTA:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_CYAN:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN,
TextAction.BG_LIGHT_WHITE:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN |
winapi.BACKGROUND_RED,
}
if action == TextAction.RESET_ALL:
attrs = self.default_attrs
self.bold = self.fg_light = self.bg_light = False
elif action == TextAction.SET_BOLD:
self.bold = True
elif action == TextAction.RESET_BOLD:
self.bold = False
elif action == TextAction.SET_DIM:
self.bold = False
elif action == TextAction.SET_REVERSE:
attrs |= winapi.COMMON_LVB_REVERSE_VIDEO
elif action == TextAction.RESET_REVERSE:
attrs &= ~winapi.COMMON_LVB_REVERSE_VIDEO
elif action == TextAction.SET_UNDERLINE:
attrs |= winapi.COMMON_LVB_UNDERSCORE
elif action == TextAction.RESET_UNDERLINE:
attrs &= ~winapi.COMMON_LVB_UNDERSCORE
elif action == TextAction.FG_DEFAULT:
attrs = (attrs & ~0xF) | (self.default_attrs & 0xF)
self.fg_light = False
elif action == TextAction.BG_DEFAULT:
attrs = (attrs & ~0xF0) | (self.default_attrs & 0xF0)
self.bg_light = False
elif action in dark_fg:
attrs = (attrs & ~0xF) | dark_fg[action]
self.fg_light = False
elif action in dark_bg:
attrs = (attrs & ~0xF0) | dark_bg[action]
self.bg_light = False
elif action in light_fg:
attrs = (attrs & ~0xF) | light_fg[action]
self.fg_light = True
elif action in light_bg:
attrs = (attrs & ~0xF0) | light_bg[action]
self.bg_light = True
if self.fg_light or self.bold:
attrs |= winapi.FOREGROUND_INTENSITY
else:
attrs &= ~winapi.FOREGROUND_INTENSITY
if self.bg_light:
attrs |= winapi.BACKGROUND_INTENSITY
else:
attrs &= ~winapi.BACKGROUND_INTENSITY
return attrs
def apply(self, handle, code):
buffer_info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(handle,
ctypes.byref(buffer_info)):
return
attrs = buffer_info.wAttributes
# We take the first attrs we see as default
if self.default_attrs is None:
self.default_attrs = attrs
# Make sure that like with linux terminals the program doesn't
# affect the prompt after it exits
atexit.register(
winapi.SetConsoleTextAttribute, handle, self.default_attrs)
cmd, args = ansi_parse(code)
if cmd == AnsiCommand.TEXT:
for action in args:
attrs = self.do_text_action(attrs, action)
winapi.SetConsoleTextAttribute(handle, attrs)
elif cmd in (AnsiCommand.MOVE_UP, AnsiCommand.MOVE_DOWN,
AnsiCommand.MOVE_FORWARD, AnsiCommand.MOVE_BACKWARD):
coord = buffer_info.dwCursorPosition
x, y = coord.X, coord.Y
amount = max(args[0], 1)
if cmd == AnsiCommand.MOVE_UP:
y -= amount
elif cmd == AnsiCommand.MOVE_DOWN:
y += amount
elif cmd == AnsiCommand.MOVE_FORWARD:
x += amount
elif cmd == AnsiCommand.MOVE_BACKWARD:
x -= amount
x = max(x, 0)
y = max(y, 0)
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))
elif cmd in (AnsiCommand.SET_POS, AnsiCommand.SET_POS_ALT):
args = list(args)
while len(args) < 2:
args.append(0)
x, y = args[:2]
win_rect = buffer_info.srWindow
x += win_rect.Left - 1
y += win_rect.Top - 1
x = max(x, 0)
y = max(y, 0)
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))
elif cmd == AnsiCommand.SAVE_POS:
win_rect = buffer_info.srWindow
coord = buffer_info.dwCursorPosition
x, y = coord.X, coord.Y
x -= win_rect.Left
y -= win_rect.Top
self.saved_pos = (x, y)
elif cmd == AnsiCommand.RESTORE_POS:
win_rect = buffer_info.srWindow
x, y = self.saved_pos
x += win_rect.Left
y += win_rect.Top
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))

View file

@ -1,222 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import ctypes
from ctypes import WinDLL, CDLL, wintypes
shell32 = WinDLL("shell32")
kernel32 = WinDLL("kernel32")
shlwapi = WinDLL("shlwapi")
msvcrt = CDLL("msvcrt")
GetCommandLineW = kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = wintypes.LPCWSTR
CommandLineToArgvW = shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [
wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR)
LocalFree = kernel32.LocalFree
LocalFree.argtypes = [wintypes.HLOCAL]
LocalFree.restype = wintypes.HLOCAL
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx
LPCTSTR = ctypes.c_wchar_p
LPWSTR = wintypes.LPWSTR
LPCWSTR = ctypes.c_wchar_p
LPTSTR = LPWSTR
PCWSTR = ctypes.c_wchar_p
PCTSTR = PCWSTR
PWSTR = ctypes.c_wchar_p
PTSTR = PWSTR
LPVOID = wintypes.LPVOID
WCHAR = wintypes.WCHAR
LPSTR = ctypes.c_char_p
BOOL = wintypes.BOOL
LPBOOL = ctypes.POINTER(BOOL)
UINT = wintypes.UINT
WORD = wintypes.WORD
DWORD = wintypes.DWORD
SHORT = wintypes.SHORT
HANDLE = wintypes.HANDLE
ULONG = wintypes.ULONG
LPCSTR = wintypes.LPCSTR
STD_INPUT_HANDLE = DWORD(-10)
STD_OUTPUT_HANDLE = DWORD(-11)
STD_ERROR_HANDLE = DWORD(-12)
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
INTERNET_MAX_SCHEME_LENGTH = 32
INTERNET_MAX_PATH_LENGTH = 2048
INTERNET_MAX_URL_LENGTH = (
INTERNET_MAX_SCHEME_LENGTH + len("://") + INTERNET_MAX_PATH_LENGTH)
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_RED = 0x0004
FOREGROUND_INTENSITY = 0x0008
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_RED = 0x0040
BACKGROUND_INTENSITY = 0x0080
COMMON_LVB_REVERSE_VIDEO = 0x4000
COMMON_LVB_UNDERSCORE = 0x8000
UrlCreateFromPathW = shlwapi.UrlCreateFromPathW
UrlCreateFromPathW.argtypes = [
PCTSTR, PTSTR, ctypes.POINTER(DWORD), DWORD]
UrlCreateFromPathW.restype = ctypes.HRESULT
SetEnvironmentVariableW = kernel32.SetEnvironmentVariableW
SetEnvironmentVariableW.argtypes = [LPCTSTR, LPCTSTR]
SetEnvironmentVariableW.restype = wintypes.BOOL
GetEnvironmentVariableW = kernel32.GetEnvironmentVariableW
GetEnvironmentVariableW.argtypes = [LPCTSTR, LPTSTR, DWORD]
GetEnvironmentVariableW.restype = DWORD
GetEnvironmentStringsW = kernel32.GetEnvironmentStringsW
GetEnvironmentStringsW.argtypes = []
GetEnvironmentStringsW.restype = ctypes.c_void_p
FreeEnvironmentStringsW = kernel32.FreeEnvironmentStringsW
FreeEnvironmentStringsW.argtypes = [ctypes.c_void_p]
FreeEnvironmentStringsW.restype = ctypes.c_bool
GetStdHandle = kernel32.GetStdHandle
GetStdHandle.argtypes = [DWORD]
GetStdHandle.restype = HANDLE
class COORD(ctypes.Structure):
_fields_ = [
("X", SHORT),
("Y", SHORT),
]
class SMALL_RECT(ctypes.Structure):
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
GetConsoleScreenBufferInfo = kernel32.GetConsoleScreenBufferInfo
GetConsoleScreenBufferInfo.argtypes = [
HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
GetConsoleScreenBufferInfo.restype = BOOL
GetConsoleOutputCP = kernel32.GetConsoleOutputCP
GetConsoleOutputCP.argtypes = []
GetConsoleOutputCP.restype = UINT
SetConsoleOutputCP = kernel32.SetConsoleOutputCP
SetConsoleOutputCP.argtypes = [UINT]
SetConsoleOutputCP.restype = BOOL
GetConsoleCP = kernel32.GetConsoleCP
GetConsoleCP.argtypes = []
GetConsoleCP.restype = UINT
SetConsoleCP = kernel32.SetConsoleCP
SetConsoleCP.argtypes = [UINT]
SetConsoleCP.restype = BOOL
SetConsoleTextAttribute = kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [HANDLE, WORD]
SetConsoleTextAttribute.restype = BOOL
SetConsoleCursorPosition = kernel32.SetConsoleCursorPosition
SetConsoleCursorPosition.argtypes = [HANDLE, COORD]
SetConsoleCursorPosition.restype = BOOL
ReadConsoleW = kernel32.ReadConsoleW
ReadConsoleW.argtypes = [HANDLE, LPVOID, DWORD, ctypes.POINTER(DWORD), LPVOID]
ReadConsoleW.restype = BOOL
MultiByteToWideChar = kernel32.MultiByteToWideChar
MultiByteToWideChar.argtypes = [
UINT, DWORD, LPCSTR, ctypes.c_int, LPWSTR, ctypes.c_int]
MultiByteToWideChar.restype = ctypes.c_int
WideCharToMultiByte = kernel32.WideCharToMultiByte
WideCharToMultiByte.argtypes = [
UINT, DWORD, LPCWSTR, ctypes.c_int, LPSTR, ctypes.c_int, LPCSTR, LPBOOL]
WideCharToMultiByte.restpye = ctypes.c_int
MoveFileW = kernel32.MoveFileW
MoveFileW.argtypes = [LPCTSTR, LPCTSTR]
MoveFileW.restype = BOOL
if hasattr(kernel32, "GetFileInformationByHandleEx"):
GetFileInformationByHandleEx = kernel32.GetFileInformationByHandleEx
GetFileInformationByHandleEx.argtypes = [
HANDLE, ctypes.c_int, ctypes.c_void_p, DWORD]
GetFileInformationByHandleEx.restype = BOOL
else:
# Windows XP
GetFileInformationByHandleEx = None
MAX_PATH = 260
FileNameInfo = 2
class FILE_NAME_INFO(ctypes.Structure):
_fields_ = [
("FileNameLength", DWORD),
("FileName", WCHAR),
]
_get_osfhandle = msvcrt._get_osfhandle
_get_osfhandle.argtypes = [ctypes.c_int]
_get_osfhandle.restype = HANDLE
GetFileType = kernel32.GetFileType
GetFileType.argtypes = [HANDLE]
GetFileType.restype = DWORD
FILE_TYPE_PIPE = 0x0003

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -115,7 +114,7 @@ class Metadata(Tags):
raise NotImplementedError
@loadfile(writable=False)
def save(self, filething, **kwargs):
def save(self, filething=None, **kwargs):
"""save(filething=None, **kwargs)
Save changes to a file.
@ -129,7 +128,7 @@ class Metadata(Tags):
raise NotImplementedError
@loadfile(writable=False)
def delete(self, filething):
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -11,8 +10,7 @@ import signal
import contextlib
import optparse
from mutagen._senf import print_
from mutagen._compat import text_type, iterbytes
from mutagen._util import iterbytes
def split_escape(string, sep, maxsplit=None, escape_char="\\"):
@ -25,7 +23,7 @@ def split_escape(string, sep, maxsplit=None, escape_char="\\"):
assert len(escape_char) == 1
if isinstance(string, bytes):
if isinstance(escape_char, text_type):
if isinstance(escape_char, str):
escape_char = escape_char.encode("ascii")
iter_ = iterbytes
else:
@ -88,8 +86,4 @@ class SignalHandler(object):
raise SystemExit("Aborted...")
class OptionParser(optparse.OptionParser):
"""OptionParser subclass which supports printing Unicode under Windows"""
def print_help(self, file=None):
print_(self.format_help(), file=file)
OptionParser = optparse.OptionParser

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Marcus Sundman
#
# This program is free software; you can redistribute it and/or modify
@ -15,8 +14,6 @@ import os.path
import mutagen
import mutagen.id3
from mutagen._senf import print_, argv
from mutagen._compat import text_type
from ._util import SignalHandler, OptionParser
@ -25,11 +22,6 @@ VERSION = (0, 1)
_sig = SignalHandler()
def printerr(*args, **kwargs):
kwargs.setdefault("file", sys.stderr)
print_(*args, **kwargs)
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = mutagen.version_string
@ -52,15 +44,15 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
try:
id3 = mutagen.id3.ID3(src, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found in ", src, file=sys.stderr)
print(u"No ID3 header found in ", src, file=sys.stderr)
return 1
except Exception as err:
print_(str(err), file=sys.stderr)
print(str(err), file=sys.stderr)
return 1
if verbose:
print_(u"File", src, u"contains:", file=sys.stderr)
print_(id3.pprint(), file=sys.stderr)
print(u"File", src, u"contains:", file=sys.stderr)
print(id3.pprint(), file=sys.stderr)
for tag in excluded_tags:
id3.delall(tag)
@ -72,7 +64,7 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
# no need to merge
pass
except Exception as err:
print_(str(err), file=sys.stderr)
print(str(err), file=sys.stderr)
return 1
else:
for frame in id3.values():
@ -91,12 +83,12 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
try:
id3.save(dst, v1=(2 if write_v1 else 0), v2_version=v2_version)
except Exception as err:
print_(u"Error saving", dst, u":\n%s" % text_type(err),
file=sys.stderr)
print(u"Error saving", dst, u":\n%s" % str(err),
file=sys.stderr)
return 1
else:
if verbose:
print_(u"Successfully saved", dst, file=sys.stderr)
print(u"Successfully saved", dst, file=sys.stderr)
return 0
@ -120,12 +112,12 @@ def main(argv):
(src, dst) = args
if not os.path.isfile(src):
print_(u"File not found:", src, file=sys.stderr)
print(u"File not found:", src, file=sys.stderr)
parser.print_help(file=sys.stderr)
return 1
if not os.path.isfile(dst):
printerr(u"File not found:", dst, file=sys.stderr)
print(u"File not found:", dst, file=sys.stderr)
parser.print_help(file=sys.stderr)
return 1
@ -139,4 +131,4 @@ def main(argv):
def entry_point():
_sig.init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Emfox Zhou <EmfoxZhou@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
@ -15,8 +14,6 @@ import locale
import mutagen
import mutagen.id3
from mutagen._senf import argv, print_, fsnative
from mutagen._compat import text_type
from ._util import SignalHandler, OptionParser
@ -75,7 +72,7 @@ def update(options, filenames):
for filename in filenames:
with _sig.block():
if verbose != "quiet":
print_(u"Updating", filename)
print(u"Updating", filename)
if has_id3v1(filename) and not noupdate and force_v1:
mutagen.id3.delete(filename, False, True)
@ -84,10 +81,10 @@ def update(options, filenames):
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose != "quiet":
print_(u"No ID3 header found; skipping...")
print(u"No ID3 header found; skipping...")
continue
except Exception as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
continue
for tag in filter(lambda t: t.startswith(("T", "COMM")), id3):
@ -111,7 +108,7 @@ def update(options, filenames):
frame.encoding = 1
if verbose == "debug":
print_(id3.pprint())
print(id3.pprint())
if not noupdate:
if remove_v1:
@ -154,9 +151,9 @@ def main(argv):
for i, arg in enumerate(argv):
if arg == "-v1":
argv[i] = fsnative(u"--force-v1")
argv[i] = "--force-v1"
elif arg == "-removev1":
argv[i] = fsnative(u"--remove-v1")
argv[i] = "--remove-v1"
(options, args) = parser.parse_args(argv[1:])
@ -168,4 +165,4 @@ def main(argv):
def entry_point():
_sig.init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -8,18 +7,17 @@
"""Pretend to be /usr/bin/id3v2 from id3lib, sort of."""
import os
import sys
import codecs
import mimetypes
import warnings
from optparse import SUPPRESS_HELP
import mutagen
import mutagen.id3
from mutagen.id3 import Encoding, PictureType
from mutagen._senf import fsnative, print_, argv, fsn2text, fsn2bytes, \
bytes2fsn
from mutagen._compat import PY2, text_type
from ._util import split_escape, SignalHandler, OptionParser
@ -57,7 +55,7 @@ Any editing operation will cause the ID3 tag to be upgraded to ID3v2.4.
def list_frames(option, opt, value, parser):
items = mutagen.id3.Frames.items()
for name, frame in sorted(items):
print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
print(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
raise SystemExit
@ -65,13 +63,13 @@ def list_frames_2_2(option, opt, value, parser):
items = mutagen.id3.Frames_2_2.items()
items.sort()
for name, frame in items:
print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
print(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
raise SystemExit
def list_genres(option, opt, value, parser):
for i, genre in enumerate(mutagen.id3.TCON.GENRES):
print_(u"%3d: %s" % (i, genre))
print(u"%3d: %s" % (i, genre))
raise SystemExit
@ -79,7 +77,7 @@ def delete_tags(filenames, v1, v2):
for filename in filenames:
with _sig.block():
if verbose:
print_(u"deleting ID3 tag info in", filename, file=sys.stderr)
print(u"deleting ID3 tag info in", filename, file=sys.stderr)
mutagen.id3.delete(filename, v1, v2)
@ -88,22 +86,22 @@ def delete_frames(deletes, filenames):
try:
deletes = frame_from_fsnative(deletes)
except ValueError as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
frames = deletes.split(",")
for filename in filenames:
with _sig.block():
if verbose:
print_(u"deleting %s from" % deletes, filename,
file=sys.stderr)
print("deleting %s from" % deletes, filename,
file=sys.stderr)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose:
print_(u"No ID3 header found; skipping.", file=sys.stderr)
print(u"No ID3 header found; skipping.", file=sys.stderr)
except Exception as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
raise SystemExit(1)
else:
for frame in frames:
@ -116,36 +114,32 @@ def frame_from_fsnative(arg):
or raises ValueError.
"""
assert isinstance(arg, fsnative)
text = fsn2text(arg, strict=True)
if PY2:
return text.encode("ascii")
else:
return text.encode("ascii").decode("ascii")
assert isinstance(arg, str)
return arg.encode("ascii").decode("ascii")
def value_from_fsnative(arg, escape):
"""Takes an item from argv and returns a text_type value without
"""Takes an item from argv and returns a str value without
surrogate escapes or raises ValueError.
"""
assert isinstance(arg, fsnative)
assert isinstance(arg, str)
if escape:
bytes_ = fsn2bytes(arg)
if PY2:
bytes_ = bytes_.decode("string_escape")
else:
bytes_ = os.fsencode(arg)
# With py3.7 this has started to warn for invalid escapes, but we
# don't control the input so ignore it.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bytes_ = codecs.escape_decode(bytes_)[0]
arg = bytes2fsn(bytes_)
arg = os.fsdecode(bytes_)
text = fsn2text(arg, strict=True)
text = arg.encode("utf-8").decode("utf-8")
return text
def error(*args):
print_(*args, file=sys.stderr)
print(*args, file=sys.stderr)
raise SystemExit(1)
@ -167,7 +161,7 @@ def write_files(edits, filenames, escape):
try:
frame = frame_from_fsnative(frame)
except ValueError as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
assert isinstance(frame, str)
@ -177,9 +171,9 @@ def write_files(edits, filenames, escape):
try:
value = value_from_fsnative(value, escape)
except ValueError as err:
error(u"%s: %s" % (frame, text_type(err)))
error(u"%s: %s" % (frame, str(err)))
assert isinstance(value, text_type)
assert isinstance(value, str)
encoded_edits.append((frame, value))
edits = encoded_edits
@ -205,16 +199,16 @@ def write_files(edits, filenames, escape):
for filename in filenames:
with _sig.block():
if verbose:
print_(u"Writing", filename, file=sys.stderr)
print(u"Writing", filename, file=sys.stderr)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose:
print_(u"No ID3 header found; creating a new tag",
print(u"No ID3 header found; creating a new tag",
file=sys.stderr)
id3 = mutagen.id3.ID3()
except Exception as err:
print_(str(err), file=sys.stderr)
print(str(err), file=sys.stderr)
continue
for (frame, vlist) in edits.items():
if frame == "POPM":
@ -264,7 +258,7 @@ def write_files(edits, filenames, escape):
with open(fn, "rb") as h:
data = h.read()
except IOError as e:
error(text_type(e))
error(str(e))
frame = mutagen.id3.APIC(encoding=encoding, mime=mime,
desc=desc, type=picture_type, data=data)
@ -338,31 +332,31 @@ def write_files(edits, filenames, escape):
def list_tags(filenames):
for filename in filenames:
print_("IDv2 tag info for", filename)
print("IDv2 tag info for", filename)
try:
id3 = mutagen.id3.ID3(filename, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found; skipping.")
print(u"No ID3 header found; skipping.")
except Exception as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
raise SystemExit(1)
else:
print_(id3.pprint())
print(id3.pprint())
def list_tags_raw(filenames):
for filename in filenames:
print_("Raw IDv2 tag info for", filename)
print("Raw IDv2 tag info for", filename)
try:
id3 = mutagen.id3.ID3(filename, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found; skipping.")
print(u"No ID3 header found; skipping.")
except Exception as err:
print_(text_type(err), file=sys.stderr)
print(str(err), file=sys.stderr)
raise SystemExit(1)
else:
for frame in id3.values():
print_(text_type(repr(frame)))
print(str(repr(frame)))
def main(argv):
@ -411,43 +405,43 @@ def main(argv):
parser.add_option(
"-a", "--artist", metavar='"ARTIST"', action="callback",
help="Set the artist information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TPE1"),
callback=lambda *args: args[3].edits.append(("--TPE1",
args[2])))
parser.add_option(
"-A", "--album", metavar='"ALBUM"', action="callback",
help="Set the album title information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TALB"),
callback=lambda *args: args[3].edits.append(("--TALB",
args[2])))
parser.add_option(
"-t", "--song", metavar='"SONG"', action="callback",
help="Set the song title information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TIT2"),
callback=lambda *args: args[3].edits.append(("--TIT2",
args[2])))
parser.add_option(
"-c", "--comment", metavar='"DESCRIPTION":"COMMENT":"LANGUAGE"',
action="callback", help="Set the comment information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--COMM"),
callback=lambda *args: args[3].edits.append(("--COMM",
args[2])))
parser.add_option(
"-p", "--picture",
metavar='"FILENAME":"DESCRIPTION":"IMAGE-TYPE":"MIME-TYPE"',
action="callback", help="Set the picture", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--APIC"),
callback=lambda *args: args[3].edits.append(("--APIC",
args[2])))
parser.add_option(
"-g", "--genre", metavar='"GENRE"', action="callback",
help="Set the genre or genre number", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TCON"),
callback=lambda *args: args[3].edits.append(("--TCON",
args[2])))
parser.add_option(
"-y", "--year", "--date", metavar='YYYY[-MM-DD]', action="callback",
help="Set the year/date", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TDRC"),
callback=lambda *args: args[3].edits.append(("--TDRC",
args[2])))
parser.add_option(
"-T", "--track", metavar='"num/num"', action="callback",
help="Set the track number/(optional) total tracks", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TRCK"),
callback=lambda *args: args[3].edits.append(("--TRCK",
args[2])))
for key, frame in mutagen.id3.Frames.items():
@ -487,4 +481,4 @@ def main(argv):
def entry_point():
_sig.init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -9,9 +8,9 @@
"""Split a multiplex/chained Ogg file into its component parts."""
import os
import sys
import mutagen.ogg
from mutagen._senf import argv
from ._util import SignalHandler, OptionParser
@ -72,4 +71,4 @@ def main(argv):
def entry_point():
_sig.init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -8,8 +7,7 @@
"""Full tag list for any given file."""
from mutagen._senf import print_, argv
from mutagen._compat import text_type
import sys
from ._util import SignalHandler, OptionParser
@ -20,7 +18,7 @@ _sig = SignalHandler()
def main(argv):
from mutagen import File
parser = OptionParser()
parser = OptionParser(usage="usage: %prog [options] FILE [FILE...]")
parser.add_option("--no-flac", help="Compatibility; does nothing.")
parser.add_option("--no-mp3", help="Compatibility; does nothing.")
parser.add_option("--no-apev2", help="Compatibility; does nothing.")
@ -30,16 +28,16 @@ def main(argv):
raise SystemExit(parser.print_help() or 1)
for filename in args:
print_(u"--", filename)
print(u"--", filename)
try:
print_(u"-", File(filename).pprint())
print(u"-", File(filename).pprint())
except AttributeError:
print_(u"- Unknown file type")
print(u"- Unknown file type")
except Exception as err:
print_(text_type(err))
print_(u"")
print(str(err))
print(u"")
def entry_point():
_sig.init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig, Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -10,8 +9,6 @@ import os
import sys
import traceback
from mutagen._senf import print_, argv
from ._util import SignalHandler
@ -83,7 +80,7 @@ def check_dir(path):
from mutagen.mp3 import MP3
rep = Report(path)
print_(u"Scanning", path)
print(u"Scanning", path)
for path, dirs, files in os.walk(path):
files.sort()
for fn in files:
@ -100,12 +97,12 @@ def check_dir(path):
else:
rep.success(mp3.tags)
print_(str(rep))
print(str(rep))
def main(argv):
if len(argv) == 1:
print_(u"Usage:", argv[0], u"directory ...")
print(u"Usage:", argv[0], u"directory ...")
else:
for path in argv[1:]:
check_dir(path)
@ -113,4 +110,4 @@ def main(argv):
def entry_point():
SignalHandler().init()
return main(argv)
return main(sys.argv)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -19,20 +18,36 @@ import errno
import decimal
from io import BytesIO
try:
import mmap
except ImportError:
# Google App Engine has no mmap:
# https://github.com/quodlibet/mutagen/issues/286
mmap = None
from collections import namedtuple
from contextlib import contextmanager
from functools import wraps
from fnmatch import fnmatchcase
from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \
izip, text_type, reraise
_DEFAULT_BUFFER_SIZE = 2 ** 20
def endswith(text, end):
# usefull for paths which can be both, str and bytes
if isinstance(text, str):
if not isinstance(end, str):
end = end.decode("ascii")
else:
if not isinstance(end, bytes):
end = end.encode("ascii")
return text.endswith(end)
def reraise(tp, value, tb):
raise tp(value).with_traceback(tb)
def bchr(x):
return bytes([x])
def iterbytes(b):
return (bytes([v]) for v in b)
def intround(value):
@ -50,7 +65,7 @@ def is_fileobj(fileobj):
file object
"""
return not (isinstance(fileobj, (text_type, bytes)) or
return not (isinstance(fileobj, (str, bytes)) or
hasattr(fileobj, "__fspath__"))
@ -105,8 +120,8 @@ def fileobj_name(fileobj):
"""
value = getattr(fileobj, "name", u"")
if not isinstance(value, (text_type, bytes)):
value = text_type(value)
if not isinstance(value, (str, bytes)):
value = str(value)
return value
@ -212,7 +227,7 @@ def _openfile(instance, filething, filename, fileobj, writable, create):
fileobj = filething
elif hasattr(filething, "__fspath__"):
filename = filething.__fspath__()
if not isinstance(filename, (bytes, text_type)):
if not isinstance(filename, (bytes, str)):
raise TypeError("expected __fspath__() to return a filename")
else:
filename = filething
@ -302,9 +317,6 @@ def hashable(cls):
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
# py2
assert "__hash__" in cls.__dict__
# py3
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
@ -340,8 +352,8 @@ def enum(cls):
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
@ -389,8 +401,8 @@ def flags(cls):
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
@ -403,7 +415,7 @@ def flags(cls):
matches.append("%s.%s" % (type(self).__name__, v))
value &= ~k
if value != 0 or not matches:
matches.append(text_type(value))
matches.append(str(value))
return " | ".join(matches)
@ -443,25 +455,13 @@ class DictMixin(object):
else:
return True
if PY2:
has_key = __has_key
__contains__ = __has_key
if PY2:
iterkeys = lambda self: iter(self.keys())
def values(self):
return [self[k] for k in self.keys()]
if PY2:
itervalues = lambda self: iter(self.values())
def items(self):
return list(izip(self.keys(), self.values()))
if PY2:
iteritems = lambda s: iter(s.items())
return list(zip(self.keys(), self.values()))
def clear(self):
for key in list(self.keys()):
@ -591,7 +591,7 @@ def _fill_cdata(cls):
funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack
funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack
for key, func in iteritems(funcs):
for key, func in funcs.items():
setattr(cls, key, staticmethod(func))
@ -602,12 +602,11 @@ class cdata(object):
uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0)
"""
from struct import error
error = error
error = struct.error
bitswap = b''.join(
chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8)))
for val in xrange(256))
bchr(sum(((val >> i) & 1) << (7 - i) for i in range(8)))
for val in range(256))
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
@ -683,65 +682,7 @@ def seek_end(fileobj, offset):
fileobj.seek(-offset, 2)
def mmap_move(fileobj, dest, src, count):
"""Mmaps the file object if possible and moves 'count' data
from 'src' to 'dest'. All data has to be inside the file size
(enlarging the file through this function isn't possible)
Will adjust the file offset.
Args:
fileobj (fileobj)
dest (int): The destination offset
src (int): The source offset
count (int) The amount of data to move
Raises:
mmap.error: In case move failed
IOError: In case an operation on the fileobj fails
ValueError: In case invalid parameters were given
"""
assert mmap is not None, "no mmap support"
if dest < 0 or src < 0 or count < 0:
raise ValueError("Invalid parameters")
try:
fileno = fileobj.fileno()
except (AttributeError, IOError):
raise mmap.error(
"File object does not expose/support a file descriptor")
fileobj.seek(0, 2)
filesize = fileobj.tell()
length = max(dest, src) + count
if length > filesize:
raise ValueError("Not in file size boundary")
offset = ((min(dest, src) // mmap.ALLOCATIONGRANULARITY) *
mmap.ALLOCATIONGRANULARITY)
assert dest >= offset
assert src >= offset
assert offset % mmap.ALLOCATIONGRANULARITY == 0
# Windows doesn't handle empty mappings, add a fast path here instead
if count == 0:
return
# fast path
if src == dest:
return
fileobj.flush()
file_map = mmap.mmap(fileno, length - offset, offset=offset)
try:
file_map.move(dest - offset, src - offset, count)
finally:
file_map.close()
def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16):
def resize_file(fobj, diff, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Resize a file by `diff`.
New space will be filled with zeros.
@ -778,7 +719,7 @@ def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16):
raise
def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16):
def move_bytes(fobj, dest, src, count, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Moves data around using read()/write().
Args:
@ -821,12 +762,11 @@ def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16):
fobj.flush()
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
def insert_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
equivalent.
Args:
fobj (fileobj)
@ -847,22 +787,14 @@ def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
raise ValueError
resize_file(fobj, size, BUFFER_SIZE)
if mmap is not None:
try:
mmap_move(fobj, offset + size, offset, movesize)
except mmap.error:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE)
else:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE)
move_bytes(fobj, offset + size, offset, movesize, BUFFER_SIZE)
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
def delete_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
equivalent.
Args:
fobj (fileobj)
@ -882,14 +814,7 @@ def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
if movesize < 0:
raise ValueError
if mmap is not None:
try:
mmap_move(fobj, offset, offset + size, movesize)
except mmap.error:
fallback_move(fobj, offset, offset + size, movesize, BUFFER_SIZE)
else:
fallback_move(fobj, offset, offset + size, movesize, BUFFER_SIZE)
move_bytes(fobj, offset, offset + size, movesize, BUFFER_SIZE)
resize_file(fobj, -size, BUFFER_SIZE)
@ -933,7 +858,7 @@ def dict_match(d, key, default=None):
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
for pattern, value in d.items():
if fnmatchcase(key, pattern):
return value
return default
@ -1075,7 +1000,7 @@ class BitReader(object):
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in xrange(count)))
return bytes(bytearray(self.bits(8) for _ in range(count)))
def skip(self, count):
"""Skip `count` bits.

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# 2013 Christoph Reiter
#
@ -17,10 +16,10 @@ The specification is at http://www.xiph.org/vorbis/doc/v-comment.html.
"""
import sys
from io import BytesIO
import mutagen
from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2
from mutagen._util import DictMixin, cdata, MutagenError
from mutagen._util import DictMixin, cdata, MutagenError, reraise
def is_valid_key(key):
@ -32,7 +31,7 @@ def is_valid_key(key):
Takes str/unicode in Python 2, unicode in Python 3
"""
if PY3 and isinstance(key, bytes):
if isinstance(key, bytes):
raise TypeError("needs to be str not bytes")
for c in key:
@ -104,7 +103,7 @@ class VComment(mutagen.Tags, list):
vendor_length = cdata.uint_le(fileobj.read(4))
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
count = cdata.uint_le(fileobj.read(4))
for i in xrange(count):
for i in range(count):
length = cdata.uint_le(fileobj.read(4))
try:
string = fileobj.read(length).decode('utf-8', errors)
@ -124,9 +123,7 @@ class VComment(mutagen.Tags, list):
except UnicodeEncodeError:
raise VorbisEncodingError("invalid tag name %r" % tag)
else:
# string keys in py3k
if PY3:
tag = tag.decode("ascii")
tag = tag.decode("ascii")
if is_valid_key(tag):
self.append((tag, value))
@ -145,30 +142,19 @@ class VComment(mutagen.Tags, list):
In Python 3 all keys and values have to be a string.
"""
if not isinstance(self.vendor, text_type):
if PY3:
raise ValueError("vendor needs to be str")
try:
self.vendor.decode('utf-8')
except UnicodeDecodeError:
raise ValueError
if not isinstance(self.vendor, str):
raise ValueError("vendor needs to be str")
for key, value in self:
try:
if not is_valid_key(key):
raise ValueError
raise ValueError("%r is not a valid key" % key)
except TypeError:
raise ValueError("%r is not a valid key" % key)
if not isinstance(value, text_type):
if PY3:
raise ValueError("%r needs to be str" % key)
try:
value.decode("utf-8")
except Exception:
raise ValueError("%r is not a valid value" % value)
if not isinstance(value, str):
err = "%r needs to be str for key %r" % (value, key)
raise ValueError(err)
return True
@ -213,7 +199,7 @@ class VComment(mutagen.Tags, list):
def pprint(self):
def _decode(value):
if not isinstance(value, text_type):
if not isinstance(value, str):
return value.decode('utf-8', 'replace')
return value
@ -221,7 +207,7 @@ class VComment(mutagen.Tags, list):
return u"\n".join(tags)
class VCommentDict(VComment, DictMixin):
class VCommentDict(VComment, DictMixin): # type: ignore
"""A VComment that looks like a dictionary.
This object differs from a dictionary in two ways. First,
@ -242,7 +228,6 @@ class VCommentDict(VComment, DictMixin):
work.
"""
# PY3 only
if isinstance(key, slice):
return VComment.__getitem__(self, key)
@ -260,7 +245,6 @@ class VCommentDict(VComment, DictMixin):
def __delitem__(self, key):
"""Delete all values associated with the key."""
# PY3 only
if isinstance(key, slice):
return VComment.__delitem__(self, key)
@ -296,7 +280,6 @@ class VCommentDict(VComment, DictMixin):
string.
"""
# PY3 only
if isinstance(key, slice):
return VComment.__setitem__(self, key, values)
@ -310,9 +293,6 @@ class VCommentDict(VComment, DictMixin):
except KeyError:
pass
if PY2:
key = key.encode('ascii')
for value in values:
self.append((key, value))

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -15,9 +14,8 @@
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import BitReader, BitReaderError, MutagenError, loadfile, \
convert_error
convert_error, endswith
from mutagen.id3._util import BitPaddedInt
from mutagen._compat import endswith, xrange
_FREQS = [
@ -243,7 +241,7 @@ class ProgramConfigElement(object):
elms = num_front_channel_elements + num_side_channel_elements + \
num_back_channel_elements
channels = 0
for i in xrange(elms):
for i in range(elms):
channels += 1
element_is_cpe = r.bits(1)
if element_is_cpe:
@ -323,7 +321,7 @@ class AACInfo(StreamInfo):
self.channels = pce.channels
# other pces..
for i in xrange(npce):
for i in range(npce):
ProgramConfigElement(r)
r.align()
except BitReaderError as e:
@ -347,7 +345,7 @@ class AACInfo(StreamInfo):
# Try up to X times to find a sync word and read up to Y frames.
# If more than Z frames are valid we assume a valid stream
offset = start_offset
for i in xrange(max_sync_tries):
for i in range(max_sync_tries):
fileobj.seek(offset)
s = _ADTSStream.find_stream(fileobj, max_initial_read)
if s is None:
@ -355,7 +353,7 @@ class AACInfo(StreamInfo):
# start right after the last found offset
offset += s.offset + 1
for i in xrange(frames_max):
for i in range(frames_max):
if not s.parse_frame():
break
if not s.sync(max_resync_read):
@ -375,7 +373,10 @@ class AACInfo(StreamInfo):
fileobj.seek(0, 2)
stream_size = fileobj.tell() - (offset + s.offset)
# approx
self.length = float(s.samples * stream_size) / (s.size * s.frequency)
self.length = 0.0
if s.frequency != 0:
self.length = \
float(s.samples * stream_size) / (s.size * s.frequency)
def pprint(self):
return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % (

329
libs/common/mutagen/ac3.py Normal file
View file

@ -0,0 +1,329 @@
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Pure AC3 file information.
"""
__all__ = ["AC3", "Open"]
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import (
BitReader,
BitReaderError,
MutagenError,
convert_error,
enum,
loadfile,
endswith,
)
@enum
class ChannelMode(object):
DUALMONO = 0
MONO = 1
STEREO = 2
C3F = 3
C2F1R = 4
C3F1R = 5
C2F2R = 6
C3F2R = 7
AC3_CHANNELS = {
ChannelMode.DUALMONO: 2,
ChannelMode.MONO: 1,
ChannelMode.STEREO: 2,
ChannelMode.C3F: 3,
ChannelMode.C2F1R: 3,
ChannelMode.C3F1R: 4,
ChannelMode.C2F2R: 4,
ChannelMode.C3F2R: 5
}
AC3_HEADER_SIZE = 7
AC3_SAMPLE_RATES = [48000, 44100, 32000]
AC3_BITRATES = [
32, 40, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384, 448, 512, 576, 640
]
@enum
class EAC3FrameType(object):
INDEPENDENT = 0
DEPENDENT = 1
AC3_CONVERT = 2
RESERVED = 3
EAC3_BLOCKS = [1, 2, 3, 6]
class AC3Error(MutagenError):
pass
class AC3Info(StreamInfo):
"""AC3 stream information.
The length of the stream is just a guess and might not be correct.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
codec (`str`): ac-3 or ec-3 (Enhanced AC-3)
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
codec = 'ac-3'
@convert_error(IOError, AC3Error)
def __init__(self, fileobj):
"""Raises AC3Error"""
header = bytearray(fileobj.read(6))
if len(header) < 6:
raise AC3Error("not enough data")
if not header.startswith(b"\x0b\x77"):
raise AC3Error("not a AC3 file")
bitstream_id = header[5] >> 3
if bitstream_id > 16:
raise AC3Error("invalid bitstream_id %i" % bitstream_id)
fileobj.seek(2)
self._read_header(fileobj, bitstream_id)
def _read_header(self, fileobj, bitstream_id):
bitreader = BitReader(fileobj)
try:
# This is partially based on code from
# https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/ac3_parser.c
if bitstream_id <= 10: # Normal AC-3
self._read_header_normal(bitreader, bitstream_id)
else: # Enhanced AC-3
self._read_header_enhanced(bitreader)
except BitReaderError as e:
raise AC3Error(e)
self.length = self._guess_length(fileobj)
def _read_header_normal(self, bitreader, bitstream_id):
r = bitreader
r.skip(16) # 16 bit CRC
sr_code = r.bits(2)
if sr_code == 3:
raise AC3Error("invalid sample rate code %i" % sr_code)
frame_size_code = r.bits(6)
if frame_size_code > 37:
raise AC3Error("invalid frame size code %i" % frame_size_code)
r.skip(5) # bitstream ID, already read
r.skip(3) # bitstream mode, not needed
channel_mode = ChannelMode(r.bits(3))
r.skip(2) # dolby surround mode or surround mix level
lfe_on = r.bits(1)
sr_shift = max(bitstream_id, 8) - 8
try:
self.sample_rate = AC3_SAMPLE_RATES[sr_code] >> sr_shift
self.bitrate = (AC3_BITRATES[frame_size_code >> 1] * 1000
) >> sr_shift
except KeyError as e:
raise AC3Error(e)
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_normal(r, channel_mode)
def _read_header_enhanced(self, bitreader):
r = bitreader
self.codec = "ec-3"
frame_type = r.bits(2)
if frame_type == EAC3FrameType.RESERVED:
raise AC3Error("invalid frame type %i" % frame_type)
r.skip(3) # substream ID, not needed
frame_size = (r.bits(11) + 1) << 1
if frame_size < AC3_HEADER_SIZE:
raise AC3Error("invalid frame size %i" % frame_size)
sr_code = r.bits(2)
try:
if sr_code == 3:
sr_code2 = r.bits(2)
if sr_code2 == 3:
raise AC3Error("invalid sample rate code %i" % sr_code2)
numblocks_code = 3
self.sample_rate = AC3_SAMPLE_RATES[sr_code2] // 2
else:
numblocks_code = r.bits(2)
self.sample_rate = AC3_SAMPLE_RATES[sr_code]
channel_mode = ChannelMode(r.bits(3))
lfe_on = r.bits(1)
self.bitrate = 8 * frame_size * self.sample_rate // (
EAC3_BLOCKS[numblocks_code] * 256)
except KeyError as e:
raise AC3Error(e)
r.skip(5) # bitstream ID, already read
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_enhanced(
r, frame_type, channel_mode, sr_code, numblocks_code)
@staticmethod
def _skip_unused_header_bits_normal(bitreader, channel_mode):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if r.bits(1): # Language Code Exists
r.skip(8) # Language Code
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
r.skip(7)
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if r.bits(1): # Language Code Exists, ch2
r.skip(8) # Language Code, ch2
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
r.skip(7)
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(2)
timecod1e = r.bits(1) # Time Code First Halve Exists
timecod2e = r.bits(1) # Time Code Second Halve Exists
if timecod1e:
r.skip(14) # Time Code First Half
if timecod2e:
r.skip(14) # Time Code Second Half
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _skip_unused_header_bits_enhanced(bitreader, frame_type, channel_mode,
sr_code, numblocks_code):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if frame_type == EAC3FrameType.DEPENDENT:
if r.bits(1): # chanmap exists
r.skip(16) # chanmap
if r.bits(1): # mixmdate, 1 Bit
# FIXME: Handle channel dependent fields
return
if r.bits(1): # Informational Metadata Exists
# bsmod, 3 Bits
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(5)
if channel_mode == ChannelMode.STEREO:
# dsurmod. 2 Bits
# dheadphonmod, 2 Bits
r.skip(4)
elif channel_mode >= ChannelMode.C2F2R:
r.skip(2) # dsurexmod
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
# adconvtyp, 1 Bit
r.skip(8)
if channel_mode == ChannelMode.DUALMONO:
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
# adconvtyp, ch2, 1 Bit
r.skip(8)
if sr_code < 3: # if not half sample rate
r.skip(1) # sourcefscod
if frame_type == EAC3FrameType.INDEPENDENT and numblocks_code == 3:
r.skip(1) # convsync
if frame_type == EAC3FrameType.AC3_CONVERT:
if numblocks_code != 3:
if r.bits(1): # blkid
r.skip(6) # frmsizecod
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _get_channels(channel_mode, lfe_on):
try:
return AC3_CHANNELS[channel_mode] + lfe_on
except KeyError as e:
raise AC3Error(e)
def _guess_length(self, fileobj):
# use bitrate + data size to guess length
if self.bitrate == 0:
return
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
return 8.0 * length / self.bitrate
def pprint(self):
return u"%s, %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self.codec, self.sample_rate, self.length, self.channels,
self.bitrate)
class AC3(FileType):
"""AC3(filething)
Arguments:
filething (filething)
Load AC3 or EAC3 files.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
Attributes:
info (`AC3Info`)
"""
_mimes = ["audio/ac3"]
@loadfile()
def load(self, filething):
self.info = AC3Info(filething.fileobj)
def add_tags(self):
raise AC3Error("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"\x0b\x77") * 2 \
+ (endswith(filename, ".ac3") or endswith(filename, ".eac3"))
Open = AC3
error = AC3Error

View file

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -9,26 +9,30 @@
"""AIFF audio stream information and tags."""
import sys
import struct
from struct import pack
from ._compat import endswith, text_type, reraise
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import resize_bytes, delete_bytes, MutagenError, loadfile, \
convert_error
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
IffID3,
InvalidChunk,
error as IffError,
)
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["AIFF", "Open", "delete"]
class error(MutagenError):
pass
class InvalidChunk(error):
class error(IffError):
pass
@ -36,22 +40,10 @@ class InvalidChunk(error):
_HUGE_VAL = 1.79769313486231e+308
def is_valid_chunk_id(id):
assert isinstance(id, text_type)
def read_float(data):
"""Raises OverflowError"""
return ((len(id) <= 4) and (min(id) >= u' ') and
(max(id) <= u'~'))
def assert_valid_chunk_id(id):
assert isinstance(id, text_type)
if not is_valid_chunk_id(id):
raise ValueError("AIFF key must be four ASCII characters.")
def read_float(data): # 10 bytes
assert len(data) == 10
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
@ -60,156 +52,70 @@ def read_float(data): # 10 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
raise OverflowError("inf and nan not supported")
else:
expon = expon - 16383
# this can raise OverflowError too
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class IFFChunk(object):
class AIFFChunk(IffChunk):
"""Representation of a single IFF chunk"""
# Chunk headers are 8 bytes long (4 for ID and 4 for the size)
HEADER_SIZE = 8
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sI', header)
def __init__(self, fileobj, parent_chunk=None):
self.__fileobj = fileobj
self.parent_chunk = parent_chunk
self.offset = fileobj.tell()
@classmethod
def get_class(cls, id):
if id == 'FORM':
return AIFFFormChunk
else:
return cls
header = fileobj.read(self.HEADER_SIZE)
if len(header) < self.HEADER_SIZE:
raise InvalidChunk()
def write_new_header(self, id_, size):
self._fileobj.write(pack('>4sI', id_, size))
self.id, self.data_size = struct.unpack('>4si', header)
try:
self.id = self.id.decode('ascii')
except UnicodeDecodeError:
raise InvalidChunk()
if not is_valid_chunk_id(self.id):
raise InvalidChunk()
self.size = self.HEADER_SIZE + self.data_size
self.data_offset = fileobj.tell()
def read(self):
"""Read the chunks data"""
self.__fileobj.seek(self.data_offset)
return self.__fileobj.read(self.data_size)
def write(self, data):
"""Write the chunk data"""
if len(data) > self.data_size:
raise ValueError
self.__fileobj.seek(self.data_offset)
self.__fileobj.write(data)
def delete(self):
"""Removes the chunk from the file"""
delete_bytes(self.__fileobj, self.size, self.offset)
if self.parent_chunk is not None:
self.parent_chunk._update_size(
self.parent_chunk.data_size - self.size)
def _update_size(self, data_size):
"""Update the size of the chunk"""
self.__fileobj.seek(self.offset + 4)
self.__fileobj.write(pack('>I', data_size))
if self.parent_chunk is not None:
size_diff = self.data_size - data_size
self.parent_chunk._update_size(
self.parent_chunk.data_size - size_diff)
self.data_size = data_size
self.size = data_size + self.HEADER_SIZE
def resize(self, new_data_size):
"""Resize the file and update the chunk sizes"""
resize_bytes(
self.__fileobj, self.data_size, new_data_size, self.data_offset)
self._update_size(new_data_size)
def write_size(self):
self._fileobj.write(pack('>I', self.data_size))
class IFFFile(object):
"""Representation of a IFF file"""
class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin):
"""The AIFF root chunk."""
def parse_next_subchunk(self):
return AIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != u'FORM':
raise InvalidChunk('Expected FORM chunk, got %s' % id)
AIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class AIFFFile(IffFile):
"""Representation of a AIFF file"""
def __init__(self, fileobj):
self.__fileobj = fileobj
self.__chunks = {}
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
fileobj.seek(0)
self.__chunks[u'FORM'] = IFFChunk(fileobj)
super().__init__(AIFFChunk, fileobj)
# Skip past the 4 byte FORM id
fileobj.seek(IFFChunk.HEADER_SIZE + 4)
# Where the next chunk can be located. We need to keep track of this
# since the size indicated in the FORM header may not match up with the
# offset determined from the size of the last chunk in the file
self.__next_offset = fileobj.tell()
# Load all of the chunks
while True:
try:
chunk = IFFChunk(fileobj, self[u'FORM'])
except InvalidChunk:
break
self.__chunks[chunk.id.strip()] = chunk
# Calculate the location of the next chunk,
# considering the pad byte
self.__next_offset = chunk.offset + chunk.size
self.__next_offset += self.__next_offset % 2
fileobj.seek(self.__next_offset)
if self.root.id != u'FORM':
raise InvalidChunk("Root chunk must be a FORM chunk, got %s"
% self.root.id)
def __contains__(self, id_):
"""Check if the IFF file contains a specific chunk"""
assert_valid_chunk_id(id_)
return id_ in self.__chunks
if id_ == 'FORM': # For backwards compatibility
return True
return super().__contains__(id_)
def __getitem__(self, id_):
"""Get a chunk from the IFF file"""
assert_valid_chunk_id(id_)
try:
return self.__chunks[id_]
except KeyError:
raise KeyError(
"%r has no %r chunk" % (self.__fileobj, id_))
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
assert_valid_chunk_id(id_)
self.__chunks.pop(id_).delete()
def insert_chunk(self, id_):
"""Insert a new chunk at the end of the IFF file"""
assert_valid_chunk_id(id_)
self.__fileobj.seek(self.__next_offset)
self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0))
self.__fileobj.seek(self.__next_offset)
chunk = IFFChunk(self.__fileobj, self[u'FORM'])
self[u'FORM']._update_size(self[u'FORM'].data_size + chunk.size)
self.__chunks[id_] = chunk
self.__next_offset = chunk.offset + chunk.size
if id_ == 'FORM': # For backwards compatibility
return self.root
return super().__getitem__(id_)
class AIFFInfo(StreamInfo):
@ -224,7 +130,7 @@ class AIFFInfo(StreamInfo):
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
sample_size (`int`): The audio sample size
bits_per_sample (`int`): The audio sample size
"""
length = 0
@ -236,7 +142,7 @@ class AIFFInfo(StreamInfo):
def __init__(self, fileobj):
"""Raises error"""
iff = IFFFile(fileobj)
iff = AIFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
@ -249,61 +155,30 @@ class AIFFInfo(StreamInfo):
info = struct.unpack('>hLh10s', data[:18])
channels, frame_count, sample_size, sample_rate = info
self.sample_rate = int(read_float(sample_rate))
self.sample_size = sample_size
try:
self.sample_rate = int(read_float(sample_rate))
except OverflowError:
raise error("Invalid sample rate")
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.sample_rate != 0:
self.length = frame_count / float(self.sample_rate)
self.bits_per_sample = sample_size
self.sample_size = sample_size # For backward compatibility
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
self.length = frame_count / float(self.sample_rate)
def pprint(self):
return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(ID3):
class _IFFID3(IffID3):
"""A AIFF file with ID3v2 tags"""
def _pre_load_header(self, fileobj):
try:
fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the AIFF file"""
fileobj = filething.fileobj
iff_file = IFFFile(fileobj)
if u'ID3' not in iff_file:
iff_file.insert_chunk(u'ID3')
chunk = iff_file[u'ID3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
new_size = len(data)
new_size += new_size % 2 # pad byte
assert new_size % 2 == 0
chunk.resize(new_size)
data += (new_size - len(data)) * b'\x00'
assert new_size == len(data)
chunk.write(data)
@loadfile(writable=True)
def delete(self, filething):
"""Completely removes the ID3 chunk from the AIFF file"""
delete(filething)
self.clear()
def _load_file(self, fileobj):
return AIFFFile(fileobj)
@convert_error(IOError, error)
@ -312,7 +187,7 @@ def delete(filething):
"""Completely removes the ID3 chunk from the AIFF file"""
try:
del IFFFile(filething.fileobj)[u'ID3']
del AIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -32,24 +31,17 @@ __all__ = ["APEv2", "APEv2File", "Open", "delete"]
import sys
import struct
from collections import MutableSequence
from io import BytesIO
from collections.abc import MutableSequence
from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string,
xrange)
from mutagen import Metadata, FileType, StreamInfo
from mutagen._util import DictMixin, cdata, delete_bytes, total_ordering, \
MutagenError, loadfile, convert_error, seek_end, get_size
MutagenError, loadfile, convert_error, seek_end, get_size, reraise
def is_valid_apev2_key(key):
if not isinstance(key, text_type):
if PY3:
raise TypeError("APEv2 key must be str")
try:
key = key.decode('ascii')
except UnicodeDecodeError:
return False
if not isinstance(key, str):
raise TypeError("APEv2 key must be str")
# PY26 - Change to set literal syntax (since set is faster than list here)
return ((2 <= len(key) <= 255) and (min(key) >= u' ') and
@ -61,7 +53,7 @@ def is_valid_apev2_key(key):
# 1: Item contains binary information
# 2: Item is a locator of external stored information [e.g. URL]
# 3: reserved"
TEXT, BINARY, EXTERNAL = xrange(3)
TEXT, BINARY, EXTERNAL = range(3)
HAS_HEADER = 1 << 31
HAS_NO_FOOTER = 1 << 30
@ -301,9 +293,9 @@ class APEv2(_CIDictProxy, Metadata):
def __parse_tag(self, tag, count):
"""Raises IOError and APEBadItemError"""
fileobj = cBytesIO(tag)
fileobj = BytesIO(tag)
for i in xrange(count):
for i in range(count):
tag_data = fileobj.read(8)
# someone writes wrong item counts
if not tag_data:
@ -330,11 +322,10 @@ class APEv2(_CIDictProxy, Metadata):
if key[-1:] == b"\x00":
key = key[:-1]
if PY3:
try:
key = key.decode("ascii")
except UnicodeError as err:
reraise(APEBadItemError, err, sys.exc_info()[2])
try:
key = key.decode("ascii")
except UnicodeError as err:
reraise(APEBadItemError, err, sys.exc_info()[2])
value = fileobj.read(size)
if len(value) != size:
raise APEBadItemError
@ -346,16 +337,12 @@ class APEv2(_CIDictProxy, Metadata):
def __getitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
return super(APEv2, self).__getitem__(key)
def __delitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
super(APEv2, self).__delitem__(key)
@ -383,43 +370,28 @@ class APEv2(_CIDictProxy, Metadata):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if PY2:
key = key.encode('ascii')
if not isinstance(value, _APEValue):
# let's guess at the content if we're not already a value...
if isinstance(value, text_type):
if isinstance(value, str):
# unicode? we've got to be text.
value = APEValue(value, TEXT)
elif isinstance(value, list):
items = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("item in list not str")
v = v.decode("utf-8")
if not isinstance(v, str):
raise TypeError("item in list not str")
items.append(v)
# list? text.
value = APEValue(u"\0".join(items), TEXT)
else:
if PY3:
value = APEValue(value, BINARY)
else:
try:
value.decode("utf-8")
except UnicodeError:
# invalid UTF8 text, probably binary
value = APEValue(value, BINARY)
else:
# valid UTF8, probably text
value = APEValue(value, TEXT)
value = APEValue(value, BINARY)
super(APEv2, self).__setitem__(key, value)
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething):
def save(self, filething=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
@ -481,7 +453,7 @@ class APEv2(_CIDictProxy, Metadata):
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
"""Remove tags from a file."""
fileobj = filething.fileobj
@ -544,7 +516,7 @@ def APEValue(value, kind):
class _APEValue(object):
kind = None
kind: int
value = None
def __init__(self, value, kind=None):
@ -578,7 +550,6 @@ class _APEValue(object):
return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind)
@swap_to_string
@total_ordering
class _APEUtf8Value(_APEValue):
@ -589,11 +560,8 @@ class _APEUtf8Value(_APEValue):
reraise(APEBadItemError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
if not isinstance(value, str):
raise TypeError("value not str")
return value
def _write(self):
@ -636,22 +604,16 @@ class APETextValue(_APEUtf8Value, MutableSequence):
return self.value.count(u"\0") + 1
def __setitem__(self, index, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
if not isinstance(value, str):
raise TypeError("value not str")
values = list(self)
values[index] = value
self.value = u"\0".join(values)
def insert(self, index, value):
if not isinstance(value, text_type):
if PY3:
raise TypeError("value not str")
else:
value = value.decode("utf-8")
if not isinstance(value, str):
raise TypeError("value not str")
values = list(self)
values.insert(index, value)
@ -666,7 +628,6 @@ class APETextValue(_APEUtf8Value, MutableSequence):
return u" / ".join(self)
@swap_to_string
@total_ordering
class APEBinaryValue(_APEValue):
"""An APEv2 binary value."""

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
@ -13,7 +12,6 @@ __all__ = ["ASF", "Open"]
from mutagen import FileType, Tags, StreamInfo
from mutagen._util import resize_bytes, DictMixin, loadfile, convert_error
from mutagen._compat import string_types, long_, PY3, izip
from ._util import error, ASFError, ASFHeaderError
from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \
@ -24,7 +22,7 @@ from ._attrs import ASFGUIDAttribute, ASFWordAttribute, ASFQWordAttribute, \
ASFUnicodeAttribute, ASFBaseAttribute, ASFValue
# pyflakes
# flake8
error, ASFError, ASFHeaderError, ASFValue
@ -75,7 +73,7 @@ class ASFInfo(StreamInfo):
return s
class ASFTags(list, DictMixin, Tags):
class ASFTags(list, DictMixin, Tags): # type: ignore
"""ASFTags()
Dictionary containing ASF attributes.
@ -89,7 +87,6 @@ class ASFTags(list, DictMixin, Tags):
"""
# PY3 only
if isinstance(key, slice):
return list.__getitem__(self, key)
@ -102,7 +99,6 @@ class ASFTags(list, DictMixin, Tags):
def __delitem__(self, key):
"""Delete all values associated with the key."""
# PY3 only
if isinstance(key, slice):
return list.__delitem__(self, key)
@ -129,7 +125,6 @@ class ASFTags(list, DictMixin, Tags):
string.
"""
# PY3 only
if isinstance(key, slice):
return list.__setitem__(self, key, values)
@ -139,16 +134,14 @@ class ASFTags(list, DictMixin, Tags):
to_append = []
for value in values:
if not isinstance(value, ASFBaseAttribute):
if isinstance(value, string_types):
if isinstance(value, str):
value = ASFUnicodeAttribute(value)
elif PY3 and isinstance(value, bytes):
elif isinstance(value, bytes):
value = ASFByteArrayAttribute(value)
elif isinstance(value, bool):
value = ASFBoolAttribute(value)
elif isinstance(value, int):
value = ASFDWordAttribute(value)
elif isinstance(value, long_):
value = ASFQWordAttribute(value)
else:
raise TypeError("Invalid type %r" % type(value))
to_append.append((key, value))
@ -163,7 +156,7 @@ class ASFTags(list, DictMixin, Tags):
def keys(self):
"""Return a sequence of all keys in the comment."""
return self and set(next(izip(*self)))
return self and set(next(zip(*self)))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
@ -252,7 +245,7 @@ class ASF(FileType):
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, padding=None):
def save(self, filething=None, padding=None):
"""save(filething=None, padding=None)
Save tag changes back to the loaded file.
@ -319,7 +312,7 @@ class ASF(FileType):
raise ASFError
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
"""delete(filething=None)
Args:

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
@ -9,9 +8,9 @@
import sys
import struct
from typing import Dict, Type
from mutagen._compat import swap_to_string, text_type, PY2, reraise
from mutagen._util import total_ordering
from mutagen._util import total_ordering, reraise
from ._util import ASFError
@ -19,9 +18,9 @@ from ._util import ASFError
class ASFBaseAttribute(object):
"""Generic attribute."""
TYPE = None
TYPE: int
_TYPES = {}
_TYPES: "Dict[int, Type[ASFBaseAttribute]]" = {}
value = None
"""The Python value of this attribute (type depends on the class)"""
@ -103,7 +102,6 @@ class ASFBaseAttribute(object):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFUnicodeAttribute(ASFBaseAttribute):
"""Unicode string attribute.
@ -122,11 +120,8 @@ class ASFUnicodeAttribute(ASFBaseAttribute):
reraise(ASFError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, text_type):
if PY2:
return value.decode("utf-8")
else:
raise TypeError("%r not str" % value)
if not isinstance(value, str):
raise TypeError("%r not str" % value)
return value
def _render(self):
@ -142,16 +137,15 @@ class ASFUnicodeAttribute(ASFBaseAttribute):
return self.value
def __eq__(self, other):
return text_type(self) == other
return str(self) == other
def __lt__(self, other):
return text_type(self) < other
return str(self) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFByteArrayAttribute(ASFBaseAttribute):
"""Byte array attribute.
@ -194,7 +188,6 @@ class ASFByteArrayAttribute(ASFBaseAttribute):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFBoolAttribute(ASFBaseAttribute):
"""Bool attribute.
@ -228,10 +221,10 @@ class ASFBoolAttribute(ASFBaseAttribute):
return bool(self.value)
def __bytes__(self):
return text_type(self.value).encode('utf-8')
return str(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
return str(self.value)
def __eq__(self, other):
return bool(self.value) == other
@ -243,7 +236,6 @@ class ASFBoolAttribute(ASFBaseAttribute):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFDWordAttribute(ASFBaseAttribute):
"""DWORD attribute.
@ -274,10 +266,10 @@ class ASFDWordAttribute(ASFBaseAttribute):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
return str(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
@ -289,7 +281,6 @@ class ASFDWordAttribute(ASFBaseAttribute):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFQWordAttribute(ASFBaseAttribute):
"""QWORD attribute.
@ -320,10 +311,10 @@ class ASFQWordAttribute(ASFBaseAttribute):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
return str(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
@ -335,7 +326,6 @@ class ASFQWordAttribute(ASFBaseAttribute):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFWordAttribute(ASFBaseAttribute):
"""WORD attribute.
@ -366,10 +356,10 @@ class ASFWordAttribute(ASFBaseAttribute):
return self.value
def __bytes__(self):
return text_type(self.value).encode('utf-8')
return str(self.value).encode('utf-8')
def __str__(self):
return text_type(self.value)
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
@ -381,7 +371,6 @@ class ASFWordAttribute(ASFBaseAttribute):
@ASFBaseAttribute._register
@swap_to_string
@total_ordering
class ASFGUIDAttribute(ASFBaseAttribute):
"""GUID attribute."""

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
@ -8,9 +7,9 @@
# (at your option) any later version.
import struct
from typing import Dict, Type
from mutagen._util import cdata, get_size
from mutagen._compat import text_type, xrange, izip
from mutagen._tags import PaddingInfo
from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError
@ -20,8 +19,8 @@ from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute
class BaseObject(object):
"""Base ASF object."""
GUID = None
_TYPES = {}
GUID: bytes
_TYPES: "Dict[bytes, Type[BaseObject]]" = {}
def __init__(self):
self.objects = []
@ -89,7 +88,7 @@ class HeaderObject(BaseObject):
remaining_header, num_objects = cls.parse_size(fileobj)
remaining_header -= 30
for i in xrange(num_objects):
for i in range(num_objects):
obj_header_size = 24
if remaining_header < obj_header_size:
raise ASFHeaderError("invalid header size")
@ -108,13 +107,16 @@ class HeaderObject(BaseObject):
try:
data = fileobj.read(payload_size)
except OverflowError:
except (OverflowError, MemoryError):
# read doesn't take 64bit values
raise ASFHeaderError("invalid header size")
if len(data) != payload_size:
raise ASFHeaderError("truncated")
obj.parse(asf, data)
try:
obj.parse(asf, data)
except struct.error:
raise ASFHeaderError("truncated")
header.objects.append(obj)
return header
@ -151,7 +153,8 @@ class HeaderObject(BaseObject):
# ask the user for padding adjustments
file_size = get_size(fileobj)
content_size = file_size - available
assert content_size >= 0
if content_size < 0:
raise ASFHeaderError("truncated content")
info = PaddingInfo(available - needed_size, content_size)
# add padding
@ -200,7 +203,7 @@ class ContentDescriptionObject(BaseObject):
texts.append(None)
pos = end
for key, value in izip(self.NAMES, texts):
for key, value in zip(self.NAMES, texts):
if value is not None:
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
@ -209,7 +212,7 @@ class ContentDescriptionObject(BaseObject):
def render_text(name):
value = asf.to_content_description.get(name)
if value is not None:
return text_type(value).encode("utf-16-le") + b"\x00\x00"
return str(value).encode("utf-16-le") + b"\x00\x00"
else:
return b""
@ -228,7 +231,7 @@ class ExtendedContentDescriptionObject(BaseObject):
super(ExtendedContentDescriptionObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
for i in range(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos + 2])
pos += 2
name = data[pos:pos + name_length]
@ -256,6 +259,8 @@ class FilePropertiesObject(BaseObject):
def parse(self, asf, data):
super(FilePropertiesObject, self).parse(asf, data)
if len(data) < 64:
raise ASFError("invalid field property entry")
length, _, preroll = struct.unpack("<QQQ", data[40:64])
# there are files where preroll is larger than length, limit to >= 0
asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0)
@ -319,7 +324,7 @@ class CodecListObject(BaseObject):
offset = 16
count, offset = cdata.uint32_le_from(data, offset)
for i in xrange(count):
for i in range(count):
try:
offset, type_, name, desc, codec = \
self._parse_entry(data, offset)
@ -377,6 +382,8 @@ class HeaderExtensionObject(BaseObject):
while datapos < datasize:
guid, size = struct.unpack(
"<16sQ", data[22 + datapos:22 + datapos + 24])
if size < 1:
raise ASFHeaderError("invalid size in header extension")
obj = BaseObject._get_object(guid)
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size])
self.objects.append(obj)
@ -407,7 +414,7 @@ class MetadataObject(BaseObject):
super(MetadataObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
for i in range(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
@ -439,7 +446,7 @@ class MetadataLibraryObject(BaseObject):
super(MetadataLibraryObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
for i in range(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#

View file

@ -0,0 +1,266 @@
# Copyright (C) 2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""DSDIFF audio stream information and tags."""
import struct
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffID3,
IffFile,
InvalidChunk,
error as IffError,
)
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["DSDIFF", "Open", "delete"]
class error(IffError):
pass
# See
# https://dsd-guide.com/sites/default/files/white-papers/DSDIFF_1.5_Spec.pdf
class DSDIFFChunk(IffChunk):
"""Representation of a single DSDIFF chunk"""
HEADER_SIZE = 12
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sQ', header)
@classmethod
def get_class(cls, id):
if id in DSDIFFListChunk.LIST_CHUNK_IDS:
return DSDIFFListChunk
elif id == 'DST':
return DSTChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(struct.pack('>4sQ', id_, size))
def write_size(self):
self._fileobj.write(struct.pack('>Q', self.data_size))
class DSDIFFListChunk(DSDIFFChunk, IffContainerChunkMixin):
"""A DSDIFF chunk containing other chunks.
"""
LIST_CHUNK_IDS = ['FRM8', 'PROP']
def parse_next_subchunk(self):
return DSDIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id not in self.LIST_CHUNK_IDS:
raise InvalidChunk('Not a list chunk: %s' % id)
DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class DSTChunk(DSDIFFChunk, IffContainerChunkMixin):
"""A DSDIFF chunk containing other chunks.
"""
def parse_next_subchunk(self):
return DSDIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != 'DST':
raise InvalidChunk('Not a DST chunk: %s' % id)
DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container(name_size=0)
class DSDIFFFile(IffFile):
"""Representation of a DSDIFF file"""
def __init__(self, fileobj):
super().__init__(DSDIFFChunk, fileobj)
if self.root.id != u'FRM8':
raise InvalidChunk("Root chunk must be a FRM8 chunk, got %r"
% self.root)
class DSDIFFInfo(StreamInfo):
"""DSDIFF stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): audio sample size (for DSD this is always 1)
bitrate (`int`): audio bitrate, in bits per second
compression (`str`): DSD (uncompressed) or DST
"""
channels = 0
length = 0
sample_rate = 0
bits_per_sample = 1
bitrate = 0
compression = None
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
iff = DSDIFFFile(fileobj)
try:
prop_chunk = iff['PROP']
except KeyError as e:
raise error(str(e))
if prop_chunk.name == 'SND ':
for chunk in prop_chunk.subchunks():
if chunk.id == 'FS' and chunk.data_size == 4:
data = chunk.read()
if len(data) < 4:
raise InvalidChunk("Not enough data in FS chunk")
self.sample_rate, = struct.unpack('>L', data[:4])
elif chunk.id == 'CHNL' and chunk.data_size >= 2:
data = chunk.read()
if len(data) < 2:
raise InvalidChunk("Not enough data in CHNL chunk")
self.channels, = struct.unpack('>H', data[:2])
elif chunk.id == 'CMPR' and chunk.data_size >= 4:
data = chunk.read()
if len(data) < 4:
raise InvalidChunk("Not enough data in CMPR chunk")
compression_id, = struct.unpack('>4s', data[:4])
self.compression = compression_id.decode('ascii').rstrip()
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.compression == 'DSD': # not compressed
try:
dsd_chunk = iff['DSD']
except KeyError as e:
raise error(str(e))
# DSD data has one bit per sample. Eight samples of a channel
# are clustered together for a channel byte. For multiple channels
# the channel bytes are interleaved (in the order specified in the
# CHNL chunk). See DSDIFF spec chapter 3.3.
sample_count = dsd_chunk.data_size * 8 / (self.channels or 1)
if self.sample_rate != 0:
self.length = sample_count / float(self.sample_rate)
self.bitrate = (self.channels * self.bits_per_sample
* self.sample_rate)
elif self.compression == 'DST':
try:
dst_frame = iff['DST']
dst_frame_info = dst_frame['FRTE']
except KeyError as e:
raise error(str(e))
if dst_frame_info.data_size >= 6:
data = dst_frame_info.read()
if len(data) < 6:
raise InvalidChunk("Not enough data in FRTE chunk")
frame_count, frame_rate = struct.unpack('>LH', data[:6])
if frame_rate:
self.length = frame_count / frame_rate
if frame_count:
dst_data_size = dst_frame.data_size - dst_frame_info.size
avg_frame_size = dst_data_size / frame_count
self.bitrate = avg_frame_size * 8 * frame_rate
def pprint(self):
return u"%d channel DSDIFF (%s) @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.compression, self.bitrate, self.sample_rate,
self.length)
class _DSDIFFID3(IffID3):
"""A DSDIFF file with ID3v2 tags"""
def _load_file(self, fileobj):
return DSDIFFFile(fileobj)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the DSDIFF file"""
try:
del DSDIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass
class DSDIFF(FileType):
"""DSDIFF(filething)
An DSDIFF audio file.
For tagging ID3v2 data is added to a chunk with the ID "ID3 ".
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`DSDIFFInfo`)
"""
_mimes = ["audio/x-dff"]
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
fileobj = filething.fileobj
try:
self.tags = _DSDIFFID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
fileobj.seek(0, 0)
self.info = DSDIFFInfo(fileobj)
def add_tags(self):
"""Add empty ID3 tags to the file."""
if self.tags is None:
self.tags = _DSDIFFID3()
else:
raise error("an ID3 tag already exists")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"FRM8") * 2 + endswith(filename, ".dff")
Open = DSDIFF

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Boris Pruessmann
#
# This program is free software; you can redistribute it and/or modify
@ -11,11 +10,11 @@
import sys
import struct
from ._compat import cBytesIO, reraise, endswith
from io import BytesIO
from mutagen import FileType, StreamInfo
from mutagen._util import cdata, MutagenError, loadfile, convert_error
from mutagen._util import cdata, MutagenError, loadfile, \
convert_error, reraise, endswith
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
@ -80,7 +79,7 @@ class DSDChunk(DSFChunk):
self.offset_metdata_chunk = cdata.ulonglong_le(data[20:28])
def write(self):
f = cBytesIO()
f = BytesIO()
f.write(self.chunk_header)
f.write(struct.pack("<Q", DSDChunk.CHUNK_SIZE))
f.write(struct.pack("<Q", self.total_size))
@ -199,7 +198,7 @@ class _DSFID3(ID3):
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, v2_version=4, v23_sep='/', padding=None):
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the DSF file"""
fileobj = filething.fileobj
@ -328,7 +327,7 @@ class DSF(FileType):
self.info = DSFInfo(dsf_file.fmt_chunk)
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
self.tags = None
delete(filething)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -12,9 +11,10 @@ EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
more like Vorbis or APEv2 tags.
"""
from typing import Callable, Dict
import mutagen.id3
from ._compat import iteritems, text_type, PY2
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match, loadfile
from mutagen.id3 import ID3, error, delete, ID3FileType
@ -65,10 +65,10 @@ class EasyID3(DictMixin, Metadata):
"""
Set = {}
Get = {}
Delete = {}
List = {}
Set: Dict[str, Callable] = {}
Get: Dict[str, Callable] = {}
Delete: Dict[str, Callable] = {}
List: Dict[str, Callable] = {}
# For compatibility.
valid_keys = Get
@ -173,7 +173,8 @@ class EasyID3(DictMixin, Metadata):
lambda s, v: setattr(s.__id3, 'load', v))
@loadfile(writable=True, create=True)
def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None):
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
@ -215,12 +216,8 @@ class EasyID3(DictMixin, Metadata):
raise EasyID3KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
if isinstance(value, str):
value = [value]
func = dict_match(self.Set, key.lower(), self.SetFallback)
if func is not None:
return func(self.__id3, key, value)
@ -470,7 +467,7 @@ def peakgain_list(id3, key):
keys.append("replaygain_%s_peak" % frame.desc)
return keys
for frameid, key in iteritems({
for frameid, key in {
"TALB": "album",
"TBPM": "bpm",
"TCMP": "compilation", # iTunes extension
@ -481,6 +478,7 @@ for frameid, key in iteritems({
"TLEN": "length",
"TMED": "media",
"TMOO": "mood",
"TIT1": "grouping",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
@ -499,7 +497,7 @@ for frameid, key in iteritems({
"TSRC": "isrc",
"TSST": "discsubtitle",
"TLAN": "language",
}):
}.items():
EasyID3.RegisterTextKey(key, frameid)
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
@ -520,7 +518,7 @@ EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
for desc, key in iteritems({
for desc, key in {
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
@ -541,7 +539,7 @@ for desc, key in iteritems({
u"MusicBrainz Work Id": "musicbrainz_workid",
u"Acoustid Fingerprint": "acoustid_fingerprint",
u"Acoustid Id": "acoustid_id",
}):
}.items():
EasyID3.RegisterTXXXKey(key, desc)
@ -557,4 +555,4 @@ class EasyID3FileType(ID3FileType):
tags (`EasyID3`)
"""
ID3 = EasyID3
ID3 = EasyID3 # type: ignore

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -6,10 +5,11 @@
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from typing import Dict, Callable
from mutagen import Tags
from mutagen._util import DictMixin, dict_match
from mutagen.mp4 import MP4, MP4Tags, error, delete
from ._compat import PY2, text_type, PY3
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
@ -32,10 +32,10 @@ class EasyMP4Tags(DictMixin, Tags):
MP4, not EasyMP4.
"""
Set = {}
Get = {}
Delete = {}
List = {}
Set: Dict[str, Callable] = {}
Get: Dict[str, Callable] = {}
Delete: Dict[str, Callable] = {}
List: Dict[str, Callable] = {}
def __init__(self, *args, **kwargs):
self.__mp4 = MP4Tags(*args, **kwargs)
@ -106,7 +106,7 @@ class EasyMP4Tags(DictMixin, Tags):
"""
def getter(tags, key):
return list(map(text_type, tags[atomid]))
return list(map(str, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
@ -126,7 +126,7 @@ class EasyMP4Tags(DictMixin, Tags):
if total:
ret.append(u"%d/%d" % (track, total))
else:
ret.append(text_type(track))
ret.append(str(track))
return ret
def setter(tags, key, value):
@ -167,10 +167,8 @@ class EasyMP4Tags(DictMixin, Tags):
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
if not isinstance(v, str):
raise TypeError("%r not str" % v)
encoded.append(v.encode("utf-8"))
tags[atomid] = encoded
@ -190,12 +188,8 @@ class EasyMP4Tags(DictMixin, Tags):
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
if isinstance(value, str):
value = [value]
func = dict_match(self.Set, key)
if func is not None:
@ -283,7 +277,7 @@ class EasyMP4(MP4):
tags (`EasyMP4Tags`)
"""
MP4Tags = EasyMP4Tags
MP4Tags = EasyMP4Tags # type: ignore
Get = EasyMP4Tags.Get
Set = EasyMP4Tags.Set

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -23,12 +22,12 @@ http://flac.sourceforge.net/format.html
__all__ = ["FLAC", "Open", "delete"]
import struct
from io import BytesIO
from ._vorbis import VCommentDict
import mutagen
from ._compat import cBytesIO, endswith, chr_, xrange
from mutagen._util import resize_bytes, MutagenError, get_size, loadfile, \
convert_error
convert_error, bchr, endswith
from mutagen._tags import PaddingInfo
from mutagen.id3._util import BitPaddedInt
from functools import reduce
@ -101,7 +100,7 @@ class MetadataBlock(object):
if data is not None:
if not isinstance(data, StrictFileObject):
if isinstance(data, bytes):
data = cBytesIO(data)
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError(
"StreamInfo requires string data or a file-like")
@ -232,7 +231,7 @@ class StreamInfo(MetadataBlock, mutagen.StreamInfo):
self.md5_signature = to_int_be(data.read(16))
def write(self):
f = cBytesIO()
f = BytesIO()
f.write(struct.pack(">I", self.min_blocksize)[-2:])
f.write(struct.pack(">I", self.max_blocksize)[-2:])
f.write(struct.pack(">I", self.min_framesize)[-3:])
@ -244,11 +243,11 @@ class StreamInfo(MetadataBlock, mutagen.StreamInfo):
byte = (self.sample_rate & 0xF) << 4
byte += ((self.channels - 1) & 7) << 1
byte += ((self.bits_per_sample - 1) >> 4) & 1
f.write(chr_(byte))
f.write(bchr(byte))
# 4 bits of bps, 4 of sample count
byte = ((self.bits_per_sample - 1) & 0xF) << 4
byte += (self.total_samples >> 32) & 0xF
f.write(chr_(byte))
f.write(bchr(byte))
# last 32 of sample count
f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF))
# MD5 signature
@ -284,6 +283,9 @@ class SeekPoint(tuple):
return super(SeekPoint, cls).__new__(
cls, (first_sample, byte_offset, num_samples))
def __getnewargs__(self):
return self.first_sample, self.byte_offset, self.num_samples
first_sample = property(lambda self: self[0])
byte_offset = property(lambda self: self[1])
num_samples = property(lambda self: self[2])
@ -322,7 +324,7 @@ class SeekTable(MetadataBlock):
sp = data.tryread(self.__SEEKPOINT_SIZE)
def write(self):
f = cBytesIO()
f = BytesIO()
for seekpoint in self.seekpoints:
packed = struct.pack(
self.__SEEKPOINT_FORMAT,
@ -371,7 +373,7 @@ class CueSheetTrackIndex(tuple):
"""
def __new__(cls, index_number, index_offset):
return super(cls, CueSheetTrackIndex).__new__(
return super(CueSheetTrackIndex, cls).__new__(
cls, (index_number, index_offset))
index_number = property(lambda self: self[0])
@ -394,7 +396,7 @@ class CueSheetTrack(object):
isrc (`mutagen.text`): ISRC code, exactly 12 characters
type (`int`): 0 for audio, 1 for digital data
pre_emphasis (`bool`): true if the track is recorded with pre-emphasis
indexes (List[`mutagen.flac.CueSheetTrackIndex`]):
indexes (list[CueSheetTrackIndex]):
list of CueSheetTrackIndex objects
"""
@ -442,9 +444,9 @@ class CueSheet(MetadataBlock):
lead_in_samples (`int`): number of lead-in samples
compact_disc (`bool`): true if the cuesheet corresponds to a
compact disc
tracks (List[`mutagen.flac.CueSheetTrack`]):
tracks (list[CueSheetTrack]):
list of CueSheetTrack objects
lead_out (`mutagen.flac.CueSheetTrack` or `None`):
lead_out (`CueSheetTrack` or `None`):
lead-out as CueSheetTrack or None if lead-out was not found
"""
@ -484,7 +486,7 @@ class CueSheet(MetadataBlock):
self.lead_in_samples = lead_in_samples
self.compact_disc = bool(flags & 0x80)
self.tracks = []
for i in xrange(num_tracks):
for i in range(num_tracks):
track = data.read(self.__CUESHEET_TRACK_SIZE)
start_offset, track_number, isrc_padded, flags, num_indexes = \
struct.unpack(self.__CUESHEET_TRACK_FORMAT, track)
@ -493,7 +495,7 @@ class CueSheet(MetadataBlock):
pre_emphasis = bool(flags & 0x40)
val = CueSheetTrack(
track_number, start_offset, isrc, type_, pre_emphasis)
for j in xrange(num_indexes):
for j in range(num_indexes):
index = data.read(self.__CUESHEET_TRACKINDEX_SIZE)
index_offset, index_number = struct.unpack(
self.__CUESHEET_TRACKINDEX_FORMAT, index)
@ -502,7 +504,7 @@ class CueSheet(MetadataBlock):
self.tracks.append(val)
def write(self):
f = cBytesIO()
f = BytesIO()
flags = 0
if self.compact_disc:
flags |= 0x80
@ -608,7 +610,7 @@ class Picture(MetadataBlock):
self.data = data.read(length)
def write(self):
f = cBytesIO()
f = BytesIO()
mime = self.mime.encode('UTF-8')
f.write(struct.pack('>2I', self.type, len(mime)))
f.write(mime)
@ -678,14 +680,13 @@ class FLAC(mutagen.FileType):
Attributes:
cuesheet (`CueSheet`): if any or `None`
seektable (`SeekTable`): if any or `None`
pictures (List[`Picture`]): list of embedded pictures
pictures (list[Picture]): list of embedded pictures
info (`StreamInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_mimes = ["audio/flac", "audio/x-flac", "application/x-flac"]
info = None
tags = None
METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict,
@ -711,7 +712,7 @@ class FLAC(mutagen.FileType):
if block_type._distrust_size:
# Some jackass is writing broken Metadata block length
# for Vorbis comment blocks, and the FLAC reference
# implementaton can parse them (mostly by accident),
# implementation can parse them (mostly by accident),
# so we have to too. Instead of parsing the size
# given, parse an actual Vorbis comment, leaving
# fileobj in the right position.
@ -732,7 +733,9 @@ class FLAC(mutagen.FileType):
if self.tags is None:
self.tags = block
else:
raise FLACVorbisError("> 1 Vorbis comment block found")
# https://github.com/quodlibet/mutagen/issues/377
# Something writes multiple and metaflac doesn't care
pass
elif block.code == CueSheet.code:
if self.cuesheet is None:
self.cuesheet = block
@ -756,19 +759,21 @@ class FLAC(mutagen.FileType):
add_vorbiscomment = add_tags
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
"""Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used.
"""
if self.tags is not None:
self.metadata_blocks.remove(self.tags)
try:
self.save(filething, padding=lambda x: 0)
finally:
self.metadata_blocks.append(self.tags)
temp_blocks = [
b for b in self.metadata_blocks if b.code != VCFLACDict.code]
self._save(filething, temp_blocks, False, padding=lambda x: 0)
self.metadata_blocks[:] = [
b for b in self.metadata_blocks
if b.code != VCFLACDict.code or b is self.tags]
self.tags.clear()
vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.")
@ -791,7 +796,7 @@ class FLAC(mutagen.FileType):
pass
try:
self.metadata_blocks[0].length
self.info.length
except (AttributeError, IndexError):
raise FLACNoHeaderError("Stream info block not found")
@ -805,7 +810,11 @@ class FLAC(mutagen.FileType):
@property
def info(self):
return self.metadata_blocks[0]
streaminfo_blocks = [
block for block in self.metadata_blocks
if block.code == StreamInfo.code
]
return streaminfo_blocks[0]
def add_picture(self, picture):
"""Add a new picture to the file.
@ -823,16 +832,11 @@ class FLAC(mutagen.FileType):
@property
def pictures(self):
"""
Returns:
List[`Picture`]: List of embedded pictures
"""
return [b for b in self.metadata_blocks if b.code == Picture.code]
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, deleteid3=False, padding=None):
def save(self, filething=None, deleteid3=False, padding=None):
"""Save metadata blocks to a file.
Args:
@ -843,6 +847,9 @@ class FLAC(mutagen.FileType):
If no filename is given, the one most recently loaded is used.
"""
self._save(filething, self.metadata_blocks, deleteid3, padding)
def _save(self, filething, metadata_blocks, deleteid3, padding):
f = StrictFileObject(filething.fileobj)
header = self.__check_header(f, filething.name)
audio_offset = self.__find_audio_offset(f)
@ -857,7 +864,7 @@ class FLAC(mutagen.FileType):
content_size = get_size(f) - audio_offset
assert content_size >= 0
data = MetadataBlock._writeblocks(
self.metadata_blocks, available, content_size, padding)
metadata_blocks, available, content_size, padding)
data_size = len(data)
resize_bytes(filething.fileobj, available, data_size, header)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
@ -61,7 +60,7 @@ from ._util import ID3EncryptionUnsupportedError, ID3JunkFrameError, \
# support open(filename) as interface
Open = ID3
# pyflakes
# flake8
ID3, ID3FileType, delete, ID3v1SaveOptions, Encoding, PictureType, CTOCFlags,
ID3TimeStamp, Frames, Frames_2_2, Frame, TextFrame, UrlFrame, UrlFrameU,
TimeStampTextFrame, BinaryFrame, NumericPartTextFrame, NumericTextFrame,

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
@ -53,8 +52,8 @@ class ID3(ID3Tags, mutagen.Metadata):
filething (filething): or `None`
Attributes:
version (Tuple[int]): ID3 tag version as a tuple
unknown_frames (List[bytes]): raw frame data of any unknown frames
version (tuple[int]): ID3 tag version as a tuple
unknown_frames (list[bytes]): raw frame data of any unknown frames
found
size (int): the total size of the ID3 tag, including the header
"""
@ -78,8 +77,6 @@ class ID3(ID3Tags, mutagen.Metadata):
@property
def version(self):
"""`tuple`: ID3 tag version as a tuple (of the loaded file)"""
if self._header is not None:
return self._header.version
return self._version
@ -112,10 +109,9 @@ class ID3(ID3Tags, mutagen.Metadata):
@convert_error(IOError, error)
@loadfile()
def load(self, filething, known_frames=None, translate=True, v2_version=4):
"""load(filething, known_frames=None, translate=True, v2_version=4)
Load tags from a filename.
def load(self, filething, known_frames=None, translate=True, v2_version=4,
load_v1=True):
"""Load tags from a filename.
Args:
filename (filething): filename or file object to load tag data from
@ -126,6 +122,11 @@ class ID3(ID3Tags, mutagen.Metadata):
call update_to_v23() / update_to_v24() manually.
v2_version (int): if update_to_v23 or update_to_v24 get called
(3 or 4)
load_v1 (bool): Load tags from ID3v1 header if present. If both
ID3v1 and ID3v2 headers are present, combine the tags from
the two, with ID3v2 having precedence.
.. versionadded:: 1.42
Example of loading a custom frame::
@ -149,13 +150,17 @@ class ID3(ID3Tags, mutagen.Metadata):
try:
self._header = ID3Header(fileobj)
except (ID3NoHeaderError, ID3UnsupportedVersionError):
frames, offset = find_id3v1(fileobj)
if not load_v1:
raise
frames, offset = find_id3v1(fileobj, v2_version, known_frames)
if frames is None:
raise
self.version = ID3Header._V11
for v in frames.values():
self.add(v)
if len(self.getall(v.HashKey)) == 0:
self.add(v)
else:
# XXX: attach to the header object so we have it in spec parsing..
if known_frames is not None:
@ -165,6 +170,14 @@ class ID3(ID3Tags, mutagen.Metadata):
remaining_data = self._read(self._header, data)
self._padding = len(remaining_data)
if load_v1:
v1v2_ver = 4 if self.version[1] == 4 else 3
frames, offset = find_id3v1(fileobj, v1v2_ver, known_frames)
if frames:
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
if translate:
if v2_version == 3:
self.update_to_v23()
@ -204,13 +217,14 @@ class ID3(ID3Tags, mutagen.Metadata):
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None):
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filename (fspath):
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
@ -268,7 +282,7 @@ class ID3(ID3Tags, mutagen.Metadata):
f.truncate()
@loadfile(writable=True)
def delete(self, filething, delete_v1=True, delete_v2=True):
def delete(self, filething=None, delete_v1=True, delete_v2=True):
"""delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -8,6 +7,7 @@
import zlib
from struct import unpack
from typing import Sequence
from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch, \
ID3SaveConfig, error
@ -17,9 +17,7 @@ from ._specs import BinaryDataSpec, StringSpec, Latin1TextSpec, \
VolumeAdjustmentSpec, ChannelSpec, MultiSpec, SynchronizedTextSpec, \
KeyEventSpec, TimeStampSpec, EncodedNumericPartTextSpec, \
EncodedNumericTextSpec, SpecError, PictureTypeSpec, ID3FramesSpec, \
Latin1TextListSpec, CTOCFlagsSpec, FrameIDSpec, RVASpec
from .._compat import text_type, string_types, swap_to_string, iteritems, \
izip, itervalues
Latin1TextListSpec, CTOCFlagsSpec, FrameIDSpec, RVASpec, Spec
def _bytes2key(b):
@ -51,8 +49,8 @@ class Frame(object):
FLAG24_UNSYNCH = 0x0002
FLAG24_DATALEN = 0x0001
_framespec = []
_optionalspec = []
_framespec: Sequence[Spec] = []
_optionalspec: Sequence[Spec] = []
def __init__(self, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and \
@ -61,7 +59,7 @@ class Frame(object):
# ask the sub class to fill in our data
other._to_other(self)
else:
for checker, val in izip(self._framespec, args):
for checker, val in zip(self._framespec, args):
setattr(self, checker.name, val)
for checker in self._framespec[len(args):]:
setattr(self, checker.name,
@ -277,6 +275,8 @@ class Frame(object):
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
if len(data) < 4:
raise ID3JunkFrameError('frame too small: %r' % data)
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
@ -291,7 +291,7 @@ class Frame(object):
frame._readData(header, data)
return frame
def __hash__(self):
def __hash__(self: object):
raise TypeError("Frame objects are unhashable")
@ -330,7 +330,7 @@ class CHAP(Frame):
def _pprint(self):
frame_pprint = u""
for frame in itervalues(self.sub_frames):
for frame in self.sub_frames.values():
for line in frame.pprint().splitlines():
frame_pprint += "\n" + " " * 4 + line
return u"%s time=%d..%d offset=%d..%d%s" % (
@ -377,7 +377,6 @@ class CTOC(Frame):
u",".join(self.child_element_ids), frame_pprint)
@swap_to_string
class TextFrame(Frame):
"""Text strings.
@ -399,7 +398,7 @@ class TextFrame(Frame):
]
def __bytes__(self):
return text_type(self).encode('utf-8')
return str(self).encode('utf-8')
def __str__(self):
return u'\u0000'.join(self.text)
@ -407,8 +406,8 @@ class TextFrame(Frame):
def __eq__(self, other):
if isinstance(other, bytes):
return bytes(self) == other
elif isinstance(other, text_type):
return text_type(self) == other
elif isinstance(other, str):
return str(self) == other
return self.text == other
__hash__ = Frame.__hash__
@ -481,7 +480,6 @@ class NumericPartTextFrame(TextFrame):
return int(self.text[0].split("/")[0])
@swap_to_string
class TimeStampTextFrame(TextFrame):
"""A list of time stamps.
@ -495,7 +493,7 @@ class TimeStampTextFrame(TextFrame):
]
def __bytes__(self):
return text_type(self).encode('utf-8')
return str(self).encode('utf-8')
def __str__(self):
return u','.join([stamp.text for stamp in self.text])
@ -504,7 +502,6 @@ class TimeStampTextFrame(TextFrame):
return u" / ".join([stamp.text for stamp in self.text])
@swap_to_string
class UrlFrame(Frame):
"""A frame containing a URL string.
@ -517,7 +514,7 @@ class UrlFrame(Frame):
ASCII.
"""
_framespec = [
_framespec: Sequence[Spec] = [
Latin1TextSpec('url'),
]
@ -587,7 +584,7 @@ class TCON(TextFrame):
if genreid:
for gid in genreid[1:-1].split(")("):
if gid.isdigit() and int(gid) < len(self.GENRES):
gid = text_type(self.GENRES[int(gid)])
gid = str(self.GENRES[int(gid)])
newgenres.append(gid)
elif gid == "CR":
newgenres.append(u"Cover")
@ -608,7 +605,7 @@ class TCON(TextFrame):
return genres
def __set_genres(self, genres):
if isinstance(genres, string_types):
if isinstance(genres, str):
genres = [genres]
self.text = [self.__decode(g) for g in genres]
@ -1044,7 +1041,6 @@ class SYTC(Frame):
__hash__ = Frame.__hash__
@swap_to_string
class USLT(Frame):
"""Unsynchronised lyrics/text transcription.
@ -1078,7 +1074,6 @@ class USLT(Frame):
return "%s=%s=%s" % (self.desc, self.lang, self.text)
@swap_to_string
class SYLT(Frame):
"""Synchronised lyrics/text."""
@ -1095,16 +1090,21 @@ class SYLT(Frame):
def HashKey(self):
return '%s:%s:%s' % (self.FrameID, self.desc, self.lang)
def _pprint(self):
return str(self)
def __eq__(self, other):
return str(self) == other
__hash__ = Frame.__hash__
def __str__(self):
return u"".join(text for (text, time) in self.text)
unit = 'fr' if self.format == 1 else 'ms'
return u"\n".join("[{0}{1}]: {2}".format(time, unit, text)
for (text, time) in self.text)
def __bytes__(self):
return text_type(self).encode("utf-8")
return str(self).encode("utf-8")
class COMM(TextFrame):
@ -1279,7 +1279,7 @@ class APIC(Frame):
return other
def _pprint(self):
type_desc = text_type(self.type)
type_desc = str(self.type)
if hasattr(self.type, "_pprint"):
type_desc = self.type._pprint()
@ -1309,7 +1309,7 @@ class PCNT(Frame):
return self.count
def _pprint(self):
return text_type(self.count)
return str(self.count)
class PCST(Frame):
@ -1328,7 +1328,7 @@ class PCST(Frame):
return self.value
def _pprint(self):
return text_type(self.value)
return str(self.value)
class POPM(Frame):
@ -1432,7 +1432,6 @@ class RBUF(Frame):
return self.size
@swap_to_string
class AENC(Frame):
"""Audio encryption.
@ -1549,7 +1548,6 @@ class UFID(Frame):
return "%s=%r" % (self.owner, self.data)
@swap_to_string
class USER(Frame):
"""Terms of use.
@ -1585,7 +1583,6 @@ class USER(Frame):
return "%r=%s" % (self.lang, self.text)
@swap_to_string
class OWNE(Frame):
"""Ownership frame."""
@ -1636,7 +1633,6 @@ class COMR(Frame):
__hash__ = Frame.__hash__
@swap_to_string
class ENCR(Frame):
"""Encryption method registration.
@ -1663,7 +1659,6 @@ class ENCR(Frame):
__hash__ = Frame.__hash__
@swap_to_string
class GRID(Frame):
"""Group identification registration."""
@ -1692,7 +1687,6 @@ class GRID(Frame):
__hash__ = Frame.__hash__
@swap_to_string
class PRIV(Frame):
"""Private frame."""
@ -1718,7 +1712,6 @@ class PRIV(Frame):
__hash__ = Frame.__hash__
@swap_to_string
class SIGN(Frame):
"""Signature frame."""
@ -2130,7 +2123,7 @@ Frames_2_2 = {}
k, v = None, None
for k, v in iteritems(globals()):
for k, v in globals().items():
if isinstance(v, type) and issubclass(v, Frame):
v.__module__ = "mutagen.id3"

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
@ -11,22 +10,32 @@
import errno
from struct import error as StructError, unpack
from mutagen._util import chr_, text_type
from mutagen._util import bchr
from ._frames import TCON, TRCK, COMM, TDRC, TALB, TPE1, TIT2
from ._frames import TCON, TRCK, COMM, TDRC, TYER, TALB, TPE1, TIT2
def find_id3v1(fileobj):
def find_id3v1(fileobj, v2_version=4, known_frames=None):
"""Returns a tuple of (id3tag, offset_to_end) or (None, 0)
offset mainly because we used to write too short tags in some cases and
we need the offset to delete them.
v2_version: Decides whether ID3v2.3 or ID3v2.4 tags
should be returned. Must be 3 or 4.
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
"""
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
# id3v1 is always at the end (after apev2)
extra_read = b"APETAGEX".index(b"TAG")
old_pos = fileobj.tell()
try:
fileobj.seek(-128 - extra_read, 2)
except IOError as e:
@ -38,6 +47,7 @@ def find_id3v1(fileobj):
raise
data = fileobj.read(128 + extra_read)
fileobj.seek(old_pos, 0)
try:
idx = data.index(b"TAG")
except ValueError:
@ -53,7 +63,7 @@ def find_id3v1(fileobj):
if idx == ape_idx + extra_read:
return (None, 0)
tag = ParseID3v1(data[idx:])
tag = ParseID3v1(data[idx:], v2_version, known_frames)
if tag is None:
return (None, 0)
@ -62,12 +72,21 @@ def find_id3v1(fileobj):
# ID3v1.1 support.
def ParseID3v1(data):
"""Parse an ID3v1 tag, returning a list of ID3v2.4 frames.
def ParseID3v1(data, v2_version=4, known_frames=None):
"""Parse an ID3v1 tag, returning a list of ID3v2 frames
Returns a {frame_name: frame} dict or None.
v2_version: Decides whether ID3v2.3 or ID3v2.4 tags
should be returned. Must be 3 or 4.
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
"""
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
try:
data = data[data.index(b"TAG"):]
except ValueError:
@ -97,23 +116,45 @@ def ParseID3v1(data):
title, artist, album, year, comment = map(
fix, [title, artist, album, year, comment])
frame_class = {
"TIT2": TIT2,
"TPE1": TPE1,
"TALB": TALB,
"TYER": TYER,
"TDRC": TDRC,
"COMM": COMM,
"TRCK": TRCK,
"TCON": TCON,
}
for key in frame_class:
if known_frames is not None:
if key in known_frames:
frame_class[key] = known_frames[key]
else:
frame_class[key] = None
frames = {}
if title:
frames["TIT2"] = TIT2(encoding=0, text=title)
if artist:
frames["TPE1"] = TPE1(encoding=0, text=[artist])
if album:
frames["TALB"] = TALB(encoding=0, text=album)
if title and frame_class["TIT2"]:
frames["TIT2"] = frame_class["TIT2"](encoding=0, text=title)
if artist and frame_class["TPE1"]:
frames["TPE1"] = frame_class["TPE1"](encoding=0, text=[artist])
if album and frame_class["TALB"]:
frames["TALB"] = frame_class["TALB"](encoding=0, text=album)
if year:
frames["TDRC"] = TDRC(encoding=0, text=year)
if comment:
frames["COMM"] = COMM(
if v2_version == 3 and frame_class["TYER"]:
frames["TYER"] = frame_class["TYER"](encoding=0, text=year)
elif frame_class["TDRC"]:
frames["TDRC"] = frame_class["TDRC"](encoding=0, text=year)
if comment and frame_class["COMM"]:
frames["COMM"] = frame_class["COMM"](
encoding=0, lang="eng", desc="ID3v1 Comment", text=comment)
# Don't read a track number if it looks like the comment was
# padded with spaces instead of nulls (thanks, WinAmp).
if track and ((track != 32) or (data[-3] == b'\x00'[0])):
if (track and frame_class["TRCK"] and
((track != 32) or (data[-3] == b'\x00'[0]))):
frames["TRCK"] = TRCK(encoding=0, text=str(track))
if genre != 255:
if genre != 255 and frame_class["TCON"]:
frames["TCON"] = TCON(encoding=0, text=str(genre))
return frames
@ -139,7 +180,7 @@ def MakeID3v1(id3):
if "TRCK" in id3:
try:
v1["track"] = chr_(+id3["TRCK"])
v1["track"] = bchr(+id3["TRCK"])
except ValueError:
v1["track"] = b"\x00"
else:
@ -152,14 +193,14 @@ def MakeID3v1(id3):
pass
else:
if genre in TCON.GENRES:
v1["genre"] = chr_(TCON.GENRES.index(genre))
v1["genre"] = bchr(TCON.GENRES.index(genre))
if "genre" not in v1:
v1["genre"] = b"\xff"
if "TDRC" in id3:
year = text_type(id3["TDRC"]).encode('ascii')
year = str(id3["TDRC"]).encode('ascii')
elif "TYER" in id3:
year = text_type(id3["TYER"]).encode('ascii')
year = str(id3["TYER"]).encode('ascii')
else:
year = b""
v1["year"] = (year + b"\x00\x00\x00\x00")[:4]

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
@ -10,10 +9,8 @@ import struct
import codecs
from struct import unpack, pack
from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \
xrange
from .._util import total_ordering, decode_terminated, enum, izip, flags, \
cdata, encode_endian, intround
from .._util import total_ordering, decode_terminated, enum, flags, \
cdata, encode_endian, intround, bchr
from ._util import BitPaddedInt, is_valid_frame_id
@ -87,7 +84,7 @@ class PictureType(object):
"""Publisher/Studio logotype"""
def _pprint(self):
return text_type(self).split(".", 1)[-1].lower().replace("_", " ")
return str(self).split(".", 1)[-1].lower().replace("_", " ")
@flags
@ -165,11 +162,11 @@ class ByteSpec(Spec):
return bytearray(data)[0], data[1:]
def write(self, config, frame, value):
return chr_(value)
return bchr(value)
def validate(self, frame, value):
if value is not None:
chr_(value)
bchr(value)
return value
@ -289,26 +286,22 @@ class StringSpec(Spec):
except UnicodeDecodeError:
raise SpecError("not ascii")
else:
if PY3:
chunk = ascii
chunk = ascii
return chunk, data[s.len:]
def write(self, config, frame, value):
if PY3:
value = value.encode("ascii")
value = value.encode("ascii")
return (bytes(value) + b'\x00' * self.len)[:self.len]
def validate(self, frame, value):
if value is None:
raise TypeError
if PY3:
if not isinstance(value, str):
raise TypeError("%s has to be str" % self.name)
value.encode("ascii")
else:
if not isinstance(value, bytes):
value = value.encode("ascii")
if not isinstance(value, str):
raise TypeError("%s has to be str" % self.name)
value.encode("ascii")
if len(value) == self.len:
return value
@ -424,7 +417,7 @@ class BinaryDataSpec(Spec):
def write(self, config, frame, value):
if isinstance(value, bytes):
return value
value = text_type(value).encode("ascii")
value = str(value).encode("ascii")
return value
def validate(self, frame, value):
@ -432,10 +425,10 @@ class BinaryDataSpec(Spec):
raise TypeError
if isinstance(value, bytes):
return value
elif PY3:
else:
raise TypeError("%s has to be bytes" % self.name)
value = text_type(value).encode("ascii")
value = str(value).encode("ascii")
return value
@ -493,7 +486,7 @@ class EncodedTextSpec(Spec):
raise SpecError(e)
def validate(self, frame, value):
return text_type(value)
return str(value)
class MultiSpec(Spec):
@ -522,26 +515,26 @@ class MultiSpec(Spec):
data.append(self.specs[0].write(config, frame, v))
else:
for record in value:
for v, s in izip(record, self.specs):
for v, s in zip(record, self.specs):
data.append(s.write(config, frame, v))
return b''.join(data)
def validate(self, frame, value):
if self.sep and isinstance(value, string_types):
if self.sep and isinstance(value, str):
value = value.split(self.sep)
if isinstance(value, list):
if len(self.specs) == 1:
return [self.specs[0].validate(frame, v) for v in value]
else:
return [
[s.validate(frame, v) for (v, s) in izip(val, self.specs)]
[s.validate(frame, v) for (v, s) in zip(val, self.specs)]
for val in value]
raise ValueError('Invalid MultiSpec data: %r' % value)
def _validate23(self, frame, value, **kwargs):
if len(self.specs) != 1:
return [[s._validate23(frame, v, **kwargs)
for (v, s) in izip(val, self.specs)]
for (v, s) in zip(val, self.specs)]
for val in value]
spec = self.specs[0]
@ -582,7 +575,7 @@ class Latin1TextSpec(Spec):
return value.encode('latin1') + b'\x00'
def validate(self, frame, value):
return text_type(value)
return str(value)
class ID3FramesSpec(Spec):
@ -632,7 +625,7 @@ class Latin1TextListSpec(Spec):
def read(self, header, frame, data):
count, data = self._bspec.read(header, frame, data)
entries = []
for i in xrange(count):
for i in range(count):
entry, data = self._lspec.read(header, frame, data)
entries.append(entry)
return entries, data
@ -647,7 +640,6 @@ class Latin1TextListSpec(Spec):
return [self._lspec.validate(frame, v) for v in value]
@swap_to_string
@total_ordering
class ID3TimeStamp(object):
"""A time stamp in ID3v2 format.
@ -665,10 +657,8 @@ class ID3TimeStamp(object):
def __init__(self, text):
if isinstance(text, ID3TimeStamp):
text = text.text
elif not isinstance(text, text_type):
if PY3:
raise TypeError("not a str")
text = text.decode("utf-8")
elif not isinstance(text, str):
raise TypeError("not a str")
self.text = text
@ -736,7 +726,7 @@ class TimeStampSpec(EncodedTextSpec):
class ChannelSpec(ByteSpec):
(OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE,
BACKCENTRE, SUBWOOFER) = xrange(9)
BACKCENTRE, SUBWOOFER) = range(9)
class VolumeAdjustmentSpec(Spec):
@ -771,7 +761,7 @@ class VolumePeakSpec(Spec):
if vol_bytes + 1 > len(data):
raise SpecError("not enough frame data")
shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8
for i in xrange(1, vol_bytes + 1):
for i in range(1, vol_bytes + 1):
peak *= 256
peak += data_array[i]
peak *= 2 ** shift

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2005 Michael Urman
# Copyright 2016 Christoph Reiter
#
@ -7,11 +6,12 @@
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
import struct
from itertools import zip_longest
from mutagen._tags import Tags
from mutagen._util import DictProxy, convert_error, read_full
from mutagen._compat import PY3, text_type, itervalues
from ._util import BitPaddedInt, unsynch, ID3JunkFrameError, \
ID3EncryptionUnsupportedError, is_valid_frame_id, error, \
@ -82,10 +82,7 @@ class ID3Header(object):
if self.f_extended:
extsize_data = read_full(fileobj, 4)
if PY3:
frame_id = extsize_data.decode("ascii", "replace")
else:
frame_id = extsize_data
frame_id = extsize_data.decode("ascii", "replace")
if frame_id in Frames:
# Some tagger sets the extended header flag but
@ -131,11 +128,10 @@ def determine_bpi(data, frames, EMPTY=b"\x00" * 10):
name, size, flags = struct.unpack('>4sLH', part)
size = BitPaddedInt(size)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asbpi += 1
else:
@ -151,11 +147,10 @@ def determine_bpi(data, frames, EMPTY=b"\x00" * 10):
break
name, size, flags = struct.unpack('>4sLH', part)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asint += 1
else:
@ -191,7 +186,7 @@ class ID3Tags(DictProxy, Tags):
order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"]
framedata = [
(f, save_frame(f, config=config)) for f in itervalues(self)]
(f, save_frame(f, config=config)) for f in self.values()]
def get_prio(frame):
try:
@ -243,7 +238,7 @@ class ID3Tags(DictProxy, Tags):
Args:
key (text): key for frames to delete
values (List[`Frame`]): frames to add
values (list[Frame]): frames to add
"""
self.delall(key)
@ -369,24 +364,23 @@ class ID3Tags(DictProxy, Tags):
self.__update_common()
# TDAT, TYER, and TIME have been turned into TDRC.
try:
date = text_type(self.get("TYER", ""))
if date.strip(u"\x00"):
self.pop("TYER")
dat = text_type(self.get("TDAT", ""))
if dat.strip("\x00"):
self.pop("TDAT")
date = "%s-%s-%s" % (date, dat[2:], dat[:2])
time = text_type(self.get("TIME", ""))
if time.strip("\x00"):
self.pop("TIME")
date += "T%s:%s:00" % (time[:2], time[2:])
if "TDRC" not in self:
self.add(TDRC(encoding=0, text=date))
except UnicodeDecodeError:
# Old ID3 tags have *lots* of Unicode problems, so if TYER
# is bad, just chuck the frames.
pass
timestamps = []
old_frames = [self.pop(n, []) for n in ["TYER", "TDAT", "TIME"]]
for y, d, t in zip_longest(*old_frames, fillvalue=u""):
ym = re.match(r"([0-9]+)\Z", y)
dm = re.match(r"([0-9]{2})([0-9]{2})\Z", d)
tm = re.match(r"([0-9]{2})([0-9]{2})\Z", t)
timestamp = ""
if ym:
timestamp += u"%s" % ym.groups()
if dm:
timestamp += u"-%s-%s" % dm.groups()[::-1]
if tm:
timestamp += u"T%s:%s:00" % tm.groups()
if timestamp:
timestamps.append(timestamp)
if timestamps and "TDRC" not in self:
self.add(TDRC(encoding=0, text=timestamps))
# TORY can be the first part of a TDOR.
if "TORY" in self:
@ -533,8 +527,7 @@ def save_frame(frame, name=None, config=None):
frame_name = name
else:
frame_name = type(frame).__name__
if PY3:
frame_name = frame_name.encode("ascii")
frame_name = frame_name.encode("ascii")
header = struct.pack('>4s4sH', frame_name, datasize, flags)
return header + framedata
@ -575,11 +568,10 @@ def read_frames(id3, data, frames):
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
# someone writes 2.3 frames with 2.2 names
@ -614,11 +606,10 @@ def read_frames(id3, data, frames):
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
tag = frames[name]

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2013 Christoph Reiter
# 2014 Ben Ockmore
@ -8,7 +7,6 @@
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from mutagen._compat import long_, integer_types, PY3
from mutagen._util import MutagenError
@ -110,7 +108,7 @@ class _BitPaddedMixin(object):
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
if isinstance(value, int):
while value:
if value & mask:
return False
@ -133,7 +131,7 @@ class BitPaddedInt(int, _BitPaddedMixin):
numeric_value = 0
shift = 0
if isinstance(value, integer_types):
if isinstance(value, int):
if value < 0:
raise ValueError
while value:
@ -149,21 +147,12 @@ class BitPaddedInt(int, _BitPaddedMixin):
else:
raise TypeError
if isinstance(numeric_value, int):
self = int.__new__(BitPaddedInt, numeric_value)
else:
self = long_.__new__(BitPaddedLong, numeric_value)
self = int.__new__(BitPaddedInt, numeric_value)
self.bits = bits
self.bigendian = bigendian
return self
if PY3:
BitPaddedLong = BitPaddedInt
else:
class BitPaddedLong(long_, _BitPaddedMixin):
pass
class ID3BadUnsynchData(error, ValueError):
"""Deprecated"""

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
@ -18,10 +17,9 @@ __all__ = ["MonkeysAudio", "Open", "delete"]
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen._util import cdata, convert_error
from mutagen._util import cdata, convert_error, endswith
class MonkeysAudioHeaderError(error):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -12,8 +11,7 @@ import struct
from mutagen import StreamInfo
from mutagen._util import MutagenError, enum, BitReader, BitReaderError, \
convert_error, intround
from mutagen._compat import endswith, xrange
convert_error, intround, endswith
from mutagen.id3 import ID3FileType, delete
from mutagen.id3._util import BitPaddedInt
@ -75,27 +73,27 @@ def _guess_xing_bitrate_mode(xing):
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = xrange(4)
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGFrame(object):
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1, 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
(1., 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1., 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1., 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2., 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2., 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in xrange(1, 4):
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
@ -306,7 +304,7 @@ class MPEGInfo(StreamInfo):
bitrate (`int`): audio bitrate, in bits per second.
In case :attr:`bitrate_mode` is :attr:`BitrateMode.UNKNOWN` the
bitrate is guessed based on the first frame.
sample_rate (`int`) audio sample rate, in Hz
sample_rate (`int`): audio sample rate, in Hz
encoder_info (`mutagen.text`): a string containing encoder name and
possibly version. In case a lame tag is present this will start
with ``"LAME "``, if unknown it is empty, otherwise the
@ -357,7 +355,7 @@ class MPEGInfo(StreamInfo):
# find a sync in the first 1024K, give up after some invalid syncs
max_read = 1024 * 1024
max_syncs = 1000
max_syncs = 1500
enough_frames = 4
min_frames = 2
@ -370,7 +368,7 @@ class MPEGInfo(StreamInfo):
if max_syncs <= 0:
break
for _ in xrange(enough_frames):
for _ in range(enough_frames):
try:
frame = MPEGFrame(fileobj)
except HeaderNotFoundError:
@ -480,4 +478,4 @@ class EasyMP3(MP3):
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3
ID3 = ID3 # type: ignore

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -13,9 +12,10 @@ http://wiki.hydrogenaud.io/index.php?title=MP3
from __future__ import division
from functools import partial
from io import BytesIO
from typing import List
from mutagen._util import cdata, BitReader
from mutagen._compat import xrange, iterbytes, cBytesIO
from mutagen._util import cdata, BitReader, iterbytes
class LAMEError(Exception):
@ -109,7 +109,7 @@ class LAMEHeader(object):
raise LAMEError("Not enough data")
# extended lame header
r = BitReader(cBytesIO(payload))
r = BitReader(BytesIO(payload))
revision = r.bits(4)
if revision != 0:
raise LAMEError("unsupported header revision %d" % revision)
@ -356,7 +356,7 @@ class XingHeader(object):
bytes = -1
"""Number of bytes, -1 if unknown"""
toc = []
toc: List[int] = []
"""List of 100 file offsets in percent encoded as 0-255. E.g. entry
50 contains the file offset in percent at 50% play time.
Empty if unknown.
@ -474,7 +474,7 @@ class VBRIHeader(object):
toc_frames = 0
"""Number of frames per table entry"""
toc = []
toc: List[int] = []
"""TOC"""
def __init__(self, fileobj):
@ -515,7 +515,7 @@ class VBRIHeader(object):
else:
raise VBRIHeaderError("Invalid TOC entry size")
self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)]
self.toc = [unpack(i)[0] for i in range(0, toc_size, toc_entry_size)]
@classmethod
def get_offset(cls, info):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -25,13 +24,15 @@ were all consulted.
import struct
import sys
from io import BytesIO
from collections.abc import Sequence
from datetime import timedelta
from mutagen import FileType, Tags, StreamInfo, PaddingInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, DictProxy, MutagenError, \
hashable, enum, get_size, resize_bytes, loadfile, convert_error
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO, izip, xrange)
hashable, enum, get_size, resize_bytes, loadfile, convert_error, bchr, \
reraise
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
@ -205,14 +206,10 @@ class MP4FreeForm(bytes):
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
@ -246,7 +243,7 @@ def _item_sort_key(key, value):
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr"]
order = dict(izip(order, xrange(len(order))))
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
@ -311,6 +308,7 @@ class MP4Tags(DictProxy, Tags):
* '\\xa9mvi' -- Movement Index
* 'shwm' -- work/movement
* 'stik' -- Media Kind
* 'hdvd' -- HD Video
* 'rtng' -- Content Rating
* 'tves' -- TV Episode
* 'tvsn' -- TV Season
@ -366,8 +364,7 @@ class MP4Tags(DictProxy, Tags):
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
self._failed_atoms.setdefault(_name2key(atom.name), []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
@ -392,7 +389,7 @@ class MP4Tags(DictProxy, Tags):
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, padding=None):
def save(self, filething=None, padding=None):
values = []
items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv))
@ -402,7 +399,7 @@ class MP4Tags(DictProxy, Tags):
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in iteritems(self._failed_atoms):
for key, failed in self._failed_atoms.items():
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
@ -562,6 +559,9 @@ class MP4Tags(DictProxy, Tags):
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
@ -601,7 +601,9 @@ class MP4Tags(DictProxy, Tags):
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
@ -746,7 +748,7 @@ class MP4Tags(DictProxy, Tags):
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
key, 0, AtomDataType.INTEGER, [bchr(bool(value))])
def __parse_cover(self, atom, data):
values = []
@ -760,6 +762,9 @@ class MP4Tags(DictProxy, Tags):
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
@ -807,18 +812,14 @@ class MP4Tags(DictProxy, Tags):
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
if isinstance(value, str):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
try:
v = v.decode("utf-8")
except (AttributeError, UnicodeDecodeError) as e:
raise TypeError(e)
if not isinstance(v, str):
raise TypeError("%r not str" % v)
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
@ -852,6 +853,7 @@ class MP4Tags(DictProxy, Tags):
b"pcst": (__parse_bool, __render_bool),
b"shwm": (__parse_integer, __render_integer, 1),
b"stik": (__parse_integer, __render_integer, 1),
b"hdvd": (__parse_integer, __render_integer, 1),
b"rtng": (__parse_integer, __render_integer, 1),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
@ -869,14 +871,14 @@ class MP4Tags(DictProxy, Tags):
def pprint(self):
def to_line(key, value):
assert isinstance(key, text_type)
if isinstance(value, text_type):
assert isinstance(key, str)
if isinstance(value, str):
return u"%s=%s" % (key, value)
return u"%s=%r" % (key, value)
values = []
for key, value in sorted(iteritems(self)):
if not isinstance(key, text_type):
for key, value in sorted(self.items()):
if not isinstance(key, str):
key = key.decode("latin-1")
if key == "covr":
values.append(u"%s=%s" % (key, u", ".join(
@ -889,6 +891,123 @@ class MP4Tags(DictProxy, Tags):
return u"\n".join(values)
class Chapter(object):
"""Chapter()
Chapter information container
"""
def __init__(self, start, title):
self.start = start
self.title = title
class MP4Chapters(Sequence):
"""MP4Chapters()
MPEG-4 Chapter information.
Supports the 'moov.udta.chpl' box.
A sequence of Chapter objects with the following members:
start (`float`): position from the start of the file in seconds
title (`str`): title of the chapter
"""
def __init__(self, *args, **kwargs):
self._timescale = None
self._duration = None
self._chapters = []
super(MP4Chapters, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def __len__(self):
return self._chapters.__len__()
def __getitem__(self, key):
return self._chapters.__getitem__(key)
def load(self, atoms, fileobj):
try:
mvhd = atoms.path(b"moov", b"mvhd")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_mvhd(mvhd, fileobj)
if not self._timescale:
raise MP4MetadataError("Unable to get timescale")
try:
chpl = atoms.path(b"moov", b"udta", b"chpl")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_chpl(chpl, fileobj)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.chpl" in atoms and b"moov.mvhd" in atoms
def _parse_mvhd(self, atom, fileobj):
assert atom.name == b"mvhd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid mvhd")
version = data[0]
pos = 4
if version == 0:
pos += 8 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
elif version == 1:
pos += 16 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">q", data[pos:pos + 8])[0]
pos += 8
def _parse_chpl(self, atom, fileobj):
assert atom.name == b"chpl"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid atom")
chapters = data[8]
pos = 9
for i in range(chapters):
start = struct.unpack(">Q", data[pos:pos + 8])[0] / 10000
pos += 8
title_len = data[pos]
pos += 1
try:
title = data[pos:pos + title_len].decode()
except UnicodeDecodeError as e:
raise MP4MetadataError("chapter %d title: %s" % (i, e))
pos += title_len
self._chapters.append(Chapter(start / self._timescale, title))
def pprint(self):
chapters = ["%s %s" % (timedelta(seconds=chapter.start), chapter.title)
for chapter in self._chapters]
return "chapters=%s" % '\n '.join(chapters)
class MP4Info(StreamInfo):
"""MP4Info()
@ -1004,7 +1123,7 @@ class MP4Info(StreamInfo):
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
entry_fileobj = BytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
@ -1044,6 +1163,7 @@ class MP4(FileType):
"""
MP4Tags = MP4Tags
MP4Chapters = MP4Chapters
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@ -1076,6 +1196,16 @@ class MP4(FileType):
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
if not MP4Chapters._can_load(atoms):
self.chapters = None
else:
try:
self.chapters = self.MP4Chapters(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
@property
def _padding(self):
if self.tags is None:
@ -1088,6 +1218,28 @@ class MP4(FileType):
super(MP4, self).save(*args, **kwargs)
def pprint(self):
"""
Returns:
text: stream information, comment key=value pairs and chapters.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
pass
else:
stream += ((tags and "\n" + tags) or "")
try:
chapters = self.chapters.pprint()
except AttributeError:
pass
else:
stream += "\n" + chapters
return stream
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -6,10 +5,10 @@
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from mutagen._compat import cBytesIO, xrange
from io import BytesIO
from mutagen.aac import ProgramConfigElement
from mutagen._util import BitReader, BitReaderError, cdata
from mutagen._compat import text_type
from ._util import parse_full_atom
from ._atom import Atom, AtomError
@ -47,7 +46,7 @@ class AudioSampleEntry(object):
if not ok:
raise ASEntryError("too short %r atom" % atom.name)
fileobj = cBytesIO(data)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
@ -93,7 +92,7 @@ class AudioSampleEntry(object):
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % atom.name)
fileobj = cBytesIO(data)
fileobj = BytesIO(data)
r = BitReader(fileobj)
# sample_rate in AudioSampleEntry covers values in
@ -134,7 +133,7 @@ class AudioSampleEntry(object):
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = cBytesIO(data)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
@ -168,7 +167,7 @@ class AudioSampleEntry(object):
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = cBytesIO(data)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
@ -204,14 +203,14 @@ class DescriptorError(Exception):
class BaseDescriptor(object):
TAG = None
TAG: int
@classmethod
def _parse_desc_length_file(cls, fileobj):
"""May raise ValueError"""
value = 0
for i in xrange(4):
for i in range(4):
try:
b = cdata.uint8(fileobj.read(1))
except cdata.error as e:
@ -239,9 +238,13 @@ class BaseDescriptor(object):
pos = fileobj.tell()
instance = cls(fileobj, length)
left = length - (fileobj.tell() - pos)
if left < 0:
raise DescriptorError("descriptor parsing read too much data")
fileobj.seek(left, 1)
if left > 0:
fileobj.seek(left, 1)
else:
# XXX: In case the instance length is shorted than the content
# assume the size is wrong and just continue parsing
# https://github.com/quodlibet/mutagen/issues/444
pass
return instance
@ -371,7 +374,7 @@ class DecoderSpecificInfo(BaseDescriptor):
name += "+SBR"
if self.psPresentFlag == 1:
name += "+PS"
return text_type(name)
return str(name)
@property
def sample_rate(self):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -8,7 +7,6 @@
import struct
from mutagen._compat import PY2
from mutagen._util import convert_error
# This is not an exhaustive list of container atoms, but just the
@ -180,12 +178,8 @@ class Atoms(object):
specifying the complete path ('moov.udta').
"""
if PY2:
if isinstance(names, basestring):
names = names.split(b".")
else:
if isinstance(names, bytes):
names = names.split(b".")
if isinstance(names, bytes):
names = names.split(b".")
for child in self.atoms:
if child.name == names[0]:

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
# Copyright (C) 2012 Christoph Reiter
#
@ -19,11 +18,10 @@ __all__ = ["Musepack", "Open", "delete"]
import struct
from ._compat import endswith, xrange
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen.id3._util import BitPaddedInt
from mutagen._util import cdata, convert_error, intround
from mutagen._util import cdata, convert_error, intround, endswith
class MusepackHeaderError(error):
@ -44,7 +42,7 @@ def _parse_sv8_int(fileobj, limit=9):
"""
num = 0
for i in xrange(limit):
for i in range(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
@ -143,9 +141,13 @@ class MusepackInfo(StreamInfo):
# packets can be at maximum data_size big and are padded with zeros
if frame_type == b"SH":
if frame_type not in mandatory_packets:
raise MusepackHeaderError("Duplicate SH packet")
mandatory_packets.remove(frame_type)
self.__parse_stream_header(fileobj, data_size)
elif frame_type == b"RG":
if frame_type not in mandatory_packets:
raise MusepackHeaderError("Duplicate RG packet")
mandatory_packets.remove(frame_type)
self.__parse_replaygain_packet(fileobj, data_size)
else:
@ -184,9 +186,13 @@ class MusepackInfo(StreamInfo):
remaining_size -= l1 + l2
data = fileobj.read(remaining_size)
if len(data) != remaining_size:
if len(data) != remaining_size or len(data) < 2:
raise MusepackHeaderError("SH packet ended unexpectedly.")
self.sample_rate = RATES[bytearray(data)[0] >> 5]
rate_index = (bytearray(data)[0] >> 5)
try:
self.sample_rate = RATES[rate_index]
except IndexError:
raise MusepackHeaderError("Invalid sample rate")
self.channels = (bytearray(data)[1] >> 4) + 1
def __parse_replaygain_packet(self, fileobj, data_size):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -19,10 +18,14 @@ http://www.xiph.org/ogg/doc/rfc3533.txt.
import struct
import sys
import zlib
from io import BytesIO
from typing import Type
from mutagen import FileType
from mutagen._util import cdata, resize_bytes, MutagenError, loadfile, seek_end
from ._compat import cBytesIO, reraise, chr_, izip, xrange
from mutagen._util import cdata, resize_bytes, MutagenError, loadfile, \
seek_end, bchr, reraise
from mutagen._file import StreamInfo
from mutagen._tags import Tags
class error(MutagenError):
@ -37,7 +40,7 @@ class OggPage(object):
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
The constructor is given a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
@ -50,7 +53,7 @@ class OggPage(object):
offset (`int` or `None`): offset this page was read from (default None)
complete (`bool`): if the last packet on this page is complete
(default True)
packets (List[`bytes`]): list of raw packet data (default [])
packets (list[bytes]): list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
@ -145,11 +148,11 @@ class OggPage(object):
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append(b"\xff" * quot + chr_(rem))
lacing_data.append(b"\xff" * quot + bchr(rem))
lacing_data = b"".join(lacing_data)
if not self.complete and lacing_data.endswith(b"\x00"):
lacing_data = lacing_data[:-1]
data.append(chr_(len(lacing_data)))
data.append(bchr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = b"".join(data)
@ -210,13 +213,13 @@ class OggPage(object):
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
occurring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
be left pointing to the place in the stream the error occurred,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
@ -267,11 +270,12 @@ class OggPage(object):
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
if page.packets:
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets]
@ -387,8 +391,8 @@ class OggPage(object):
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in izip(new_pages,
xrange(first, first + len(new_pages))):
for page, seq in zip(new_pages,
range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
@ -416,7 +420,7 @@ class OggPage(object):
offset_adjust = 0
new_data_end = None
assert len(old_pages) == len(new_data)
for old_page, data in izip(old_pages, new_data):
for old_page, data in zip(old_pages, new_data):
offset = old_page.offset + offset_adjust
data_size = len(data)
resize_bytes(fileobj, old_page.size, data_size, offset)
@ -460,7 +464,7 @@ class OggPage(object):
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
bytesobj = BytesIO(data[index:])
def is_valid(page):
return not finishing or page.position != -1
@ -506,9 +510,9 @@ class OggFileType(FileType):
filething (filething)
"""
_Info = None
_Tags = None
_Error = None
_Info: Type[StreamInfo]
_Tags: Type[Tags]
_Error: Type[error]
_mimes = ["application/ogg", "application/x-ogg"]
@loadfile()
@ -535,7 +539,7 @@ class OggFileType(FileType):
raise self._Error("no appropriate stream found")
@loadfile(writable=True)
def delete(self, filething):
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
@ -567,7 +571,7 @@ class OggFileType(FileType):
raise self._Error
@loadfile(writable=True)
def save(self, filething, padding=None):
def save(self, filething=None, padding=None):
"""save(filething=None, padding=None)
Save a tag to a file.

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -18,8 +17,7 @@ http://flac.sourceforge.net/ogg_mapping.html.
__all__ = ["OggFLAC", "Open", "delete"]
import struct
from ._compat import cBytesIO
from io import BytesIO
from mutagen import StreamInfo
from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError
@ -65,7 +63,7 @@ class OggFLACStreamInfo(StreamInfo):
self.serial = page.serial
# Skip over the block header.
stringobj = cBytesIO(page.packets[0][17:])
stringobj = BytesIO(page.packets[0][17:])
try:
flac_info = FLACStreamInfo(stringobj)
@ -101,7 +99,7 @@ class OggFLACVComment(VCommentDict):
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
comment = cBytesIO(OggPage.to_packets(pages)[0][4:])
comment = BytesIO(OggPage.to_packets(pages)[0][4:])
super(OggFLACVComment, self).__init__(comment, framing=False)
def _inject(self, fileobj, padding_func):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2012, 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -17,9 +16,9 @@ Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01
__all__ = ["OggOpus", "Open", "delete"]
import struct
from io import BytesIO
from mutagen import StreamInfo
from mutagen._compat import BytesIO
from mutagen._util import get_size, loadfile, convert_error
from mutagen._tags import PaddingInfo
from mutagen._vorbis import VCommentDict

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -50,17 +49,22 @@ class OggTheoraInfo(StreamInfo):
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x80theora"):
while not page.packets or \
not page.packets[0].startswith(b"\x80theora"):
page = OggPage(fileobj)
if not page.first:
raise OggTheoraHeaderError(
"page has ID header, but doesn't start a stream")
data = page.packets[0]
if len(data) < 42:
raise OggTheoraHeaderError("Truncated header")
vmaj, vmin = struct.unpack("2B", data[7:9])
if (vmaj, vmin) != (3, 2):
raise OggTheoraHeaderError(
"found Theora version %d.%d != 3.2" % (vmaj, vmin))
fps_num, fps_den = struct.unpack(">2I", data[22:30])
if not fps_den or not fps_num:
raise OggTheoraHeaderError("FRN or FRD is equal to zero")
self.fps = fps_num / float(fps_den)
self.bitrate = cdata.uint_be(b"\x00" + data[37:40])
self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F
@ -73,6 +77,7 @@ class OggTheoraInfo(StreamInfo):
position = page.position
mask = (1 << self.granule_shift) - 1
frames = (position >> self.granule_shift) + (position & mask)
assert self.fps
self.length = frames / float(self.fps)
def pprint(self):
@ -91,7 +96,10 @@ class OggTheoraCommentDict(VCommentDict):
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0][7:]
packets = OggPage.to_packets(pages)
if not packets:
raise error("Missing metadata packet")
data = packets[0][7:]
super(OggTheoraCommentDict, self).__init__(data, framing=False)
self._padding = len(data) - self._size
@ -100,7 +108,8 @@ class OggTheoraCommentDict(VCommentDict):
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x81theora"):
while not page.packets or \
not page.packets[0].startswith(b"\x81theora"):
page = OggPage(fileobj)
old_pages = [page]

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -43,7 +42,7 @@ class OggVorbisInfo(StreamInfo):
length (`float`): File length in seconds, as a float
channels (`int`): Number of channels
bitrate (`int`): Nominal ('average') bitrate in bits per second
sample_Rate (`int`): Sample rate in Hz
sample_rate (`int`): Sample rate in Hz
"""
@ -56,13 +55,20 @@ class OggVorbisInfo(StreamInfo):
"""Raises ogg.error, IOError"""
page = OggPage(fileobj)
if not page.packets:
raise OggVorbisHeaderError("page has not packets")
while not page.packets[0].startswith(b"\x01vorbis"):
page = OggPage(fileobj)
if not page.first:
raise OggVorbisHeaderError(
"page has ID header, but doesn't start a stream")
if len(page.packets[0]) < 28:
raise OggVorbisHeaderError(
"page contains a packet too short to be valid")
(self.channels, self.sample_rate, max_bitrate, nominal_bitrate,
min_bitrate) = struct.unpack("<B4i", page.packets[0][11:28])
min_bitrate) = struct.unpack("<BI3i", page.packets[0][11:28])
if self.sample_rate == 0:
raise OggVorbisHeaderError("sample rate can't be zero")
self.serial = page.serial
max_bitrate = max(0, max_bitrate)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
@ -22,12 +21,23 @@ __all__ = ["OptimFROG", "Open", "delete"]
import struct
from ._compat import endswith
from ._util import convert_error
from ._util import convert_error, endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
SAMPLE_TYPE_BITS = {
0: 8,
1: 8,
2: 16,
3: 16,
4: 24,
5: 24,
6: 32,
7: 32,
}
class OptimFROGHeaderError(error):
pass
@ -41,6 +51,8 @@ class OptimFROGInfo(StreamInfo):
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): the audio sample size
encoder_info (`mutagen.text`): encoder version, e.g. "5.100"
"""
@convert_error(IOError, OptimFROGHeaderError)
@ -48,18 +60,27 @@ class OptimFROGInfo(StreamInfo):
"""Raises OptimFROGHeaderError"""
header = fileobj.read(76)
if (len(header) != 76 or not header.startswith(b"OFR ") or
struct.unpack("<I", header[4:8])[0] not in [12, 15]):
if len(header) != 76 or not header.startswith(b"OFR "):
raise OptimFROGHeaderError("not an OptimFROG file")
data_size = struct.unpack("<I", header[4:8])[0]
if data_size != 12 and data_size < 15:
raise OptimFROGHeaderError("not an OptimFROG file")
(total_samples, total_samples_high, sample_type, self.channels,
self.sample_rate) = struct.unpack("<IHBBI", header[8:20])
total_samples += total_samples_high << 32
self.channels += 1
self.bits_per_sample = SAMPLE_TYPE_BITS.get(sample_type)
if self.sample_rate:
self.length = float(total_samples) / (self.channels *
self.sample_rate)
else:
self.length = 0.0
if data_size >= 15:
encoder_id = struct.unpack("<H", header[20:22])[0]
version = str((encoder_id >> 4) + 4500)
self.encoder_info = "%s.%s" % (version[0], version[1:])
else:
self.encoder_info = ""
def pprint(self):
return u"OptimFROG, %.2f seconds, %d Hz" % (self.length,

View file

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
@ -12,8 +11,7 @@ import struct
from mutagen import StreamInfo, MutagenError
from mutagen._file import FileType
from mutagen._util import loadfile
from mutagen._compat import xrange, endswith
from mutagen._util import loadfile, endswith
class SMFError(MutagenError):
@ -123,7 +121,7 @@ def _read_midi_length(fileobj):
# get a list of events and tempo changes for each track
tracks = []
first_tempos = None
for tracknum in xrange(ntracks):
for tracknum in range(ntracks):
identifier, chunk = read_chunk(fileobj)
if identifier != b"MTrk":
continue

237
libs/common/mutagen/tak.py Normal file
View file

@ -0,0 +1,237 @@
# Copyright (C) 2008 Lukáš Lalinský
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Tom's lossless Audio Kompressor (TAK) streams with APEv2 tags.
TAK is a lossless audio compressor developed by Thomas Becker.
For more information, see:
* http://www.thbeck.de/Tak/Tak.html
* http://wiki.hydrogenaudio.org/index.php?title=TAK
"""
__all__ = ["TAK", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen.apev2 import (
APEv2File,
delete,
error,
)
from mutagen._util import (
BitReader,
BitReaderError,
convert_error,
enum,
endswith,
)
@enum
class TAKMetadata(object):
END = 0
STREAM_INFO = 1
SEEK_TABLE = 2 # Removed in TAK 1.1.1
SIMPLE_WAVE_DATA = 3
ENCODER_INFO = 4
UNUSED_SPACE = 5 # New in TAK 1.0.3
MD5 = 6 # New in TAK 1.1.1
LAST_FRAME_INFO = 7 # New in TAK 1.1.1
CRC_SIZE = 3
ENCODER_INFO_CODEC_BITS = 6
ENCODER_INFO_PROFILE_BITS = 4
ENCODER_INFO_TOTAL_BITS = ENCODER_INFO_CODEC_BITS + ENCODER_INFO_PROFILE_BITS
SIZE_INFO_FRAME_DURATION_BITS = 4
SIZE_INFO_SAMPLE_NUM_BITS = 35
SIZE_INFO_TOTAL_BITS = (SIZE_INFO_FRAME_DURATION_BITS
+ SIZE_INFO_SAMPLE_NUM_BITS)
AUDIO_FORMAT_DATA_TYPE_BITS = 3
AUDIO_FORMAT_SAMPLE_RATE_BITS = 18
AUDIO_FORMAT_SAMPLE_BITS_BITS = 5
AUDIO_FORMAT_CHANNEL_NUM_BITS = 4
AUDIO_FORMAT_HAS_EXTENSION_BITS = 1
AUDIO_FORMAT_BITS_MIN = 31
AUDIO_FORMAT_BITS_MAX = 31 + 102
SAMPLE_RATE_MIN = 6000
SAMPLE_BITS_MIN = 8
CHANNEL_NUM_MIN = 1
STREAM_INFO_BITS_MIN = (ENCODER_INFO_TOTAL_BITS
+ SIZE_INFO_TOTAL_BITS
+ AUDIO_FORMAT_BITS_MIN)
STREAM_INFO_BITS_MAX = (ENCODER_INFO_TOTAL_BITS
+ SIZE_INFO_TOTAL_BITS
+ AUDIO_FORMAT_BITS_MAX)
STREAM_INFO_SIZE_MIN = (STREAM_INFO_BITS_MIN + 7) / 8
STREAM_INFO_SIZE_MAX = (STREAM_INFO_BITS_MAX + 7) / 8
class _LSBBitReader(BitReader):
"""BitReader implementation which reads bits starting at LSB in each byte.
"""
def _lsb(self, count):
value = self._buffer & 0xff >> (8 - count)
self._buffer = self._buffer >> count
self._bits -= count
return value
def bits(self, count):
"""Reads `count` bits and returns an uint, LSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
value = 0
if count <= self._bits:
value = self._lsb(count)
else:
# First read all available bits
shift = 0
remaining = count
if self._bits > 0:
remaining -= self._bits
shift = self._bits
value = self._lsb(self._bits)
assert self._bits == 0
# Now add additional bytes
n_bytes = (remaining - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
if remaining > 8: # Use full byte
remaining -= 8
value = (b << shift) | value
shift += 8
else:
self._buffer = b
self._bits = 8
b = self._lsb(remaining)
value = (b << shift) | value
assert 0 <= self._bits < 8
return value
class TAKHeaderError(error):
pass
class TAKInfo(StreamInfo):
"""TAK stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): audio sample size
encoder_info (`mutagen.text`): encoder version
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
encoder_info = ""
@convert_error(IOError, TAKHeaderError)
@convert_error(BitReaderError, TAKHeaderError)
def __init__(self, fileobj):
stream_id = fileobj.read(4)
if len(stream_id) != 4 or not stream_id == b"tBaK":
raise TAKHeaderError("not a TAK file")
bitreader = _LSBBitReader(fileobj)
while True:
type = TAKMetadata(bitreader.bits(7))
bitreader.skip(1) # Unused
size = struct.unpack("<I", bitreader.bytes(3) + b'\0')[0]
data_size = size - CRC_SIZE
pos = fileobj.tell()
if type == TAKMetadata.END:
break
elif type == TAKMetadata.STREAM_INFO:
self._parse_stream_info(bitreader, size)
elif type == TAKMetadata.ENCODER_INFO:
self._parse_encoder_info(bitreader, data_size)
assert bitreader.is_aligned()
fileobj.seek(pos + size)
if self.sample_rate > 0:
self.length = self.number_of_samples / float(self.sample_rate)
def _parse_stream_info(self, bitreader, size):
if size < STREAM_INFO_SIZE_MIN or size > STREAM_INFO_SIZE_MAX:
raise TAKHeaderError("stream info has invalid length")
# Encoder Info
bitreader.skip(ENCODER_INFO_CODEC_BITS)
bitreader.skip(ENCODER_INFO_PROFILE_BITS)
# Size Info
bitreader.skip(SIZE_INFO_FRAME_DURATION_BITS)
self.number_of_samples = bitreader.bits(SIZE_INFO_SAMPLE_NUM_BITS)
# Audio Format
bitreader.skip(AUDIO_FORMAT_DATA_TYPE_BITS)
self.sample_rate = (bitreader.bits(AUDIO_FORMAT_SAMPLE_RATE_BITS)
+ SAMPLE_RATE_MIN)
self.bits_per_sample = (bitreader.bits(AUDIO_FORMAT_SAMPLE_BITS_BITS)
+ SAMPLE_BITS_MIN)
self.channels = (bitreader.bits(AUDIO_FORMAT_CHANNEL_NUM_BITS)
+ CHANNEL_NUM_MIN)
bitreader.skip(AUDIO_FORMAT_HAS_EXTENSION_BITS)
def _parse_encoder_info(self, bitreader, size):
patch = bitreader.bits(8)
minor = bitreader.bits(8)
major = bitreader.bits(8)
self.encoder_info = "TAK %d.%d.%d" % (major, minor, patch)
def pprint(self):
return u"%s, %d Hz, %d bits, %.2f seconds, %d channel(s)" % (
self.encoder_info or "TAK", self.sample_rate, self.bits_per_sample,
self.length, self.channels)
class TAK(APEv2File):
"""TAK(filething)
Arguments:
filething (filething)
Attributes:
info (`TAKInfo`)
"""
_Info = TAKInfo
_mimes = ["audio/x-tak"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"tBaK") + endswith(filename.lower(), ".tak")
Open = TAK

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
@ -17,10 +16,9 @@ True Audio files use ID3 tags.
__all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"]
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.id3 import ID3FileType, delete
from mutagen._util import cdata, MutagenError, convert_error
from mutagen._util import cdata, MutagenError, convert_error, endswith
class error(MutagenError):
@ -99,4 +97,4 @@ class EasyTrueAudio(TrueAudio):
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3
ID3 = ID3 # type: ignore

209
libs/common/mutagen/wave.py Normal file
View file

@ -0,0 +1,209 @@
# Copyright (C) 2017 Borewit
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Microsoft WAVE/RIFF audio file/stream information and tags."""
import sys
import struct
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen._riff import RiffFile, InvalidChunk
from mutagen._iff import error as IffError
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
convert_error,
endswith,
loadfile,
reraise,
)
__all__ = ["WAVE", "Open", "delete"]
class error(IffError):
"""WAVE stream parsing errors."""
class _WaveFile(RiffFile):
"""Representation of a RIFF/WAVE file"""
def __init__(self, fileobj):
RiffFile.__init__(self, fileobj)
if self.file_type != u'WAVE':
raise error("Expected RIFF/WAVE.")
# Normalize ID3v2-tag-chunk to lowercase
if u'ID3' in self:
self[u'ID3'].id = u'id3'
class WaveStreamInfo(StreamInfo):
"""WaveStreamInfo()
Microsoft WAVE file information.
Information is parsed from the 'fmt' & 'data'chunk of the RIFF/WAVE file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0.0
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
SIZE = 16
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
wave_file = _WaveFile(fileobj)
try:
format_chunk = wave_file[u'fmt']
except KeyError as e:
raise error(str(e))
data = format_chunk.read()
if len(data) < 16:
raise InvalidChunk()
# RIFF: http://soundfile.sapp.org/doc/WaveFormat/
# Python struct.unpack:
# https://docs.python.org/2/library/struct.html#byte-order-size-and-alignment
info = struct.unpack('<hhLLhh', data[:self.SIZE])
self.audio_format, self.channels, self.sample_rate, byte_rate, \
block_align, self.bits_per_sample = info
self.bitrate = self.channels * self.bits_per_sample * self.sample_rate
# Calculate duration
self._number_of_samples = 0
if block_align > 0:
try:
data_chunk = wave_file[u'data']
self._number_of_samples = data_chunk.data_size / block_align
except KeyError:
pass
if self.sample_rate > 0:
self.length = self._number_of_samples / self.sample_rate
def pprint(self):
return u"%d channel RIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _WaveID3(ID3):
"""A Wave file with ID3v2 tags"""
def _pre_load_header(self, fileobj):
try:
fileobj.seek(_WaveFile(fileobj)[u'id3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the Wave/RIFF file"""
fileobj = filething.fileobj
wave_file = _WaveFile(fileobj)
if u'id3' not in wave_file:
wave_file.insert_chunk(u'id3')
chunk = wave_file[u'id3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
chunk.resize(len(data))
chunk.write(data)
def delete(self, filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
delete(filething)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
try:
_WaveFile(filething.fileobj).delete_chunk(u'id3')
except KeyError:
pass
class WAVE(FileType):
"""WAVE(filething)
A Waveform Audio File Format
(WAVE, or more commonly known as WAV due to its filename extension)
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`WaveStreamInfo`)
"""
_mimes = ["audio/wav", "audio/wave"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"RIFF") + (header[8:12] == b'WAVE')
+ endswith(filename, b".wav") + endswith(filename, b".wave"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _WaveID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
self.info = WaveStreamInfo(fileobj)
fileobj.seek(0, 0)
try:
self.tags = _WaveID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
Open = WAVE

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
# 2014 Christoph Reiter
#
@ -76,9 +75,10 @@ class WavPackInfo(StreamInfo):
Attributes:
channels (int): number of audio channels (1 or 2)
length (float: file length in seconds, as a float
length (float): file length in seconds, as a float
sample_rate (int): audio sampling rate in Hz
version (int) WavPack stream version
bits_per_sample (int): audio sample size
version (int): WavPack stream version
"""
def __init__(self, fileobj):
@ -90,6 +90,12 @@ class WavPackInfo(StreamInfo):
self.version = header.version
self.channels = bool(header.flags & 4) or 2
self.sample_rate = RATES[(header.flags >> 23) & 0xF]
self.bits_per_sample = ((header.flags & 3) + 1) * 8
# most common multiplier (DSD64)
if (header.flags >> 31) & 1:
self.sample_rate *= 4
self.bits_per_sample = 1
if header.total_samples == -1 or header.block_index != 0:
# TODO: we could make this faster by using the tag size
@ -114,6 +120,15 @@ class WavPackInfo(StreamInfo):
class WavPack(APEv2File):
"""WavPack(filething)
Arguments:
filething (filething)
Attributes:
info (`WavPackInfo`)
"""
_Info = WavPackInfo
_mimes = ["audio/x-wavpack"]