Merge branch 'nightly' into dependabot/pip/nightly/cloudinary-1.39.1

This commit is contained in:
JonnyWong16 2024-03-24 15:27:35 -07:00 committed by GitHub
commit 1f89e51f59
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
931 changed files with 167156 additions and 15533 deletions

View file

@ -47,7 +47,7 @@ jobs:
version: latest version: latest
- name: Cache Docker Layers - name: Cache Docker Layers
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: /tmp/.buildx-cache path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }} key: ${{ runner.os }}-buildx-${{ github.sha }}

View file

@ -129,7 +129,7 @@ jobs:
echo "$EOF" >> $GITHUB_OUTPUT echo "$EOF" >> $GITHUB_OUTPUT
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v2
id: create_release id: create_release
env: env:
GITHUB_TOKEN: ${{ secrets.GHACTIONS_TOKEN }} GITHUB_TOKEN: ${{ secrets.GHACTIONS_TOKEN }}

BIN
lib/PyWin32.chm Normal file

Binary file not shown.

74
lib/adodbapi/__init__.py Normal file
View file

@ -0,0 +1,74 @@
"""adodbapi - A python DB API 2.0 (PEP 249) interface to Microsoft ADO
Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole
* http://sourceforge.net/projects/adodbapi
"""
import sys
import time
from .adodbapi import Connection, Cursor, __version__, connect, dateconverter
from .apibase import (
BINARY,
DATETIME,
NUMBER,
ROWID,
STRING,
DatabaseError,
DataError,
Error,
FetchFailedError,
IntegrityError,
InterfaceError,
InternalError,
NotSupportedError,
OperationalError,
ProgrammingError,
Warning,
apilevel,
paramstyle,
threadsafety,
)
def Binary(aString):
"""This function constructs an object capable of holding a binary (long) string value."""
return bytes(aString)
def Date(year, month, day):
"This function constructs an object holding a date value."
return dateconverter.Date(year, month, day)
def Time(hour, minute, second):
"This function constructs an object holding a time value."
return dateconverter.Time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
"This function constructs an object holding a time stamp value."
return dateconverter.Timestamp(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
"""This function constructs an object holding a date value from the given ticks value
(number of seconds since the epoch; see the documentation of the standard Python time module for details).
"""
return Date(*time.gmtime(ticks)[:3])
def TimeFromTicks(ticks):
"""This function constructs an object holding a time value from the given ticks value
(number of seconds since the epoch; see the documentation of the standard Python time module for details).
"""
return Time(*time.gmtime(ticks)[3:6])
def TimestampFromTicks(ticks):
"""This function constructs an object holding a time stamp value from the given
ticks value (number of seconds since the epoch;
see the documentation of the standard Python time module for details)."""
return Timestamp(*time.gmtime(ticks)[:6])
version = "adodbapi v" + __version__

281
lib/adodbapi/ado_consts.py Normal file
View file

@ -0,0 +1,281 @@
# ADO enumerated constants documented on MSDN:
# http://msdn.microsoft.com/en-us/library/ms678353(VS.85).aspx
# IsolationLevelEnum
adXactUnspecified = -1
adXactBrowse = 0x100
adXactChaos = 0x10
adXactCursorStability = 0x1000
adXactIsolated = 0x100000
adXactReadCommitted = 0x1000
adXactReadUncommitted = 0x100
adXactRepeatableRead = 0x10000
adXactSerializable = 0x100000
# CursorLocationEnum
adUseClient = 3
adUseServer = 2
# CursorTypeEnum
adOpenDynamic = 2
adOpenForwardOnly = 0
adOpenKeyset = 1
adOpenStatic = 3
adOpenUnspecified = -1
# CommandTypeEnum
adCmdText = 1
adCmdStoredProc = 4
adSchemaTables = 20
# ParameterDirectionEnum
adParamInput = 1
adParamInputOutput = 3
adParamOutput = 2
adParamReturnValue = 4
adParamUnknown = 0
directions = {
0: "Unknown",
1: "Input",
2: "Output",
3: "InputOutput",
4: "Return",
}
def ado_direction_name(ado_dir):
try:
return "adParam" + directions[ado_dir]
except:
return "unknown direction (" + str(ado_dir) + ")"
# ObjectStateEnum
adStateClosed = 0
adStateOpen = 1
adStateConnecting = 2
adStateExecuting = 4
adStateFetching = 8
# FieldAttributeEnum
adFldMayBeNull = 0x40
# ConnectModeEnum
adModeUnknown = 0
adModeRead = 1
adModeWrite = 2
adModeReadWrite = 3
adModeShareDenyRead = 4
adModeShareDenyWrite = 8
adModeShareExclusive = 12
adModeShareDenyNone = 16
adModeRecursive = 0x400000
# XactAttributeEnum
adXactCommitRetaining = 131072
adXactAbortRetaining = 262144
ado_error_TIMEOUT = -2147217871
# DataTypeEnum - ADO Data types documented at:
# http://msdn2.microsoft.com/en-us/library/ms675318.aspx
adArray = 0x2000
adEmpty = 0x0
adBSTR = 0x8
adBigInt = 0x14
adBinary = 0x80
adBoolean = 0xB
adChapter = 0x88
adChar = 0x81
adCurrency = 0x6
adDBDate = 0x85
adDBTime = 0x86
adDBTimeStamp = 0x87
adDate = 0x7
adDecimal = 0xE
adDouble = 0x5
adError = 0xA
adFileTime = 0x40
adGUID = 0x48
adIDispatch = 0x9
adIUnknown = 0xD
adInteger = 0x3
adLongVarBinary = 0xCD
adLongVarChar = 0xC9
adLongVarWChar = 0xCB
adNumeric = 0x83
adPropVariant = 0x8A
adSingle = 0x4
adSmallInt = 0x2
adTinyInt = 0x10
adUnsignedBigInt = 0x15
adUnsignedInt = 0x13
adUnsignedSmallInt = 0x12
adUnsignedTinyInt = 0x11
adUserDefined = 0x84
adVarBinary = 0xCC
adVarChar = 0xC8
adVarNumeric = 0x8B
adVarWChar = 0xCA
adVariant = 0xC
adWChar = 0x82
# Additional constants used by introspection but not ADO itself
AUTO_FIELD_MARKER = -1000
adTypeNames = {
adBSTR: "adBSTR",
adBigInt: "adBigInt",
adBinary: "adBinary",
adBoolean: "adBoolean",
adChapter: "adChapter",
adChar: "adChar",
adCurrency: "adCurrency",
adDBDate: "adDBDate",
adDBTime: "adDBTime",
adDBTimeStamp: "adDBTimeStamp",
adDate: "adDate",
adDecimal: "adDecimal",
adDouble: "adDouble",
adEmpty: "adEmpty",
adError: "adError",
adFileTime: "adFileTime",
adGUID: "adGUID",
adIDispatch: "adIDispatch",
adIUnknown: "adIUnknown",
adInteger: "adInteger",
adLongVarBinary: "adLongVarBinary",
adLongVarChar: "adLongVarChar",
adLongVarWChar: "adLongVarWChar",
adNumeric: "adNumeric",
adPropVariant: "adPropVariant",
adSingle: "adSingle",
adSmallInt: "adSmallInt",
adTinyInt: "adTinyInt",
adUnsignedBigInt: "adUnsignedBigInt",
adUnsignedInt: "adUnsignedInt",
adUnsignedSmallInt: "adUnsignedSmallInt",
adUnsignedTinyInt: "adUnsignedTinyInt",
adUserDefined: "adUserDefined",
adVarBinary: "adVarBinary",
adVarChar: "adVarChar",
adVarNumeric: "adVarNumeric",
adVarWChar: "adVarWChar",
adVariant: "adVariant",
adWChar: "adWChar",
}
def ado_type_name(ado_type):
return adTypeNames.get(ado_type, "unknown type (" + str(ado_type) + ")")
# here in decimal, sorted by value
# adEmpty 0 Specifies no value (DBTYPE_EMPTY).
# adSmallInt 2 Indicates a two-byte signed integer (DBTYPE_I2).
# adInteger 3 Indicates a four-byte signed integer (DBTYPE_I4).
# adSingle 4 Indicates a single-precision floating-point value (DBTYPE_R4).
# adDouble 5 Indicates a double-precision floating-point value (DBTYPE_R8).
# adCurrency 6 Indicates a currency value (DBTYPE_CY). Currency is a fixed-point number
# with four digits to the right of the decimal point. It is stored in an eight-byte signed integer scaled by 10,000.
# adDate 7 Indicates a date value (DBTYPE_DATE). A date is stored as a double, the whole part of which is
# the number of days since December 30, 1899, and the fractional part of which is the fraction of a day.
# adBSTR 8 Indicates a null-terminated character string (Unicode) (DBTYPE_BSTR).
# adIDispatch 9 Indicates a pointer to an IDispatch interface on a COM object (DBTYPE_IDISPATCH).
# adError 10 Indicates a 32-bit error code (DBTYPE_ERROR).
# adBoolean 11 Indicates a boolean value (DBTYPE_BOOL).
# adVariant 12 Indicates an Automation Variant (DBTYPE_VARIANT).
# adIUnknown 13 Indicates a pointer to an IUnknown interface on a COM object (DBTYPE_IUNKNOWN).
# adDecimal 14 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_DECIMAL).
# adTinyInt 16 Indicates a one-byte signed integer (DBTYPE_I1).
# adUnsignedTinyInt 17 Indicates a one-byte unsigned integer (DBTYPE_UI1).
# adUnsignedSmallInt 18 Indicates a two-byte unsigned integer (DBTYPE_UI2).
# adUnsignedInt 19 Indicates a four-byte unsigned integer (DBTYPE_UI4).
# adBigInt 20 Indicates an eight-byte signed integer (DBTYPE_I8).
# adUnsignedBigInt 21 Indicates an eight-byte unsigned integer (DBTYPE_UI8).
# adFileTime 64 Indicates a 64-bit value representing the number of 100-nanosecond intervals since
# January 1, 1601 (DBTYPE_FILETIME).
# adGUID 72 Indicates a globally unique identifier (GUID) (DBTYPE_GUID).
# adBinary 128 Indicates a binary value (DBTYPE_BYTES).
# adChar 129 Indicates a string value (DBTYPE_STR).
# adWChar 130 Indicates a null-terminated Unicode character string (DBTYPE_WSTR).
# adNumeric 131 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_NUMERIC).
# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT).
# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT).
# adDBDate 133 Indicates a date value (yyyymmdd) (DBTYPE_DBDATE).
# adDBTime 134 Indicates a time value (hhmmss) (DBTYPE_DBTIME).
# adDBTimeStamp 135 Indicates a date/time stamp (yyyymmddhhmmss plus a fraction in billionths) (DBTYPE_DBTIMESTAMP).
# adChapter 136 Indicates a four-byte chapter value that identifies rows in a child rowset (DBTYPE_HCHAPTER).
# adPropVariant 138 Indicates an Automation PROPVARIANT (DBTYPE_PROP_VARIANT).
# adVarNumeric 139 Indicates a numeric value (Parameter object only).
# adVarChar 200 Indicates a string value (Parameter object only).
# adLongVarChar 201 Indicates a long string value (Parameter object only).
# adVarWChar 202 Indicates a null-terminated Unicode character string (Parameter object only).
# adLongVarWChar 203 Indicates a long null-terminated Unicode string value (Parameter object only).
# adVarBinary 204 Indicates a binary value (Parameter object only).
# adLongVarBinary 205 Indicates a long binary value (Parameter object only).
# adArray (Does not apply to ADOX.) 0x2000 A flag value, always combined with another data type constant,
# that indicates an array of that other data type.
# Error codes to names
adoErrors = {
0xE7B: "adErrBoundToCommand",
0xE94: "adErrCannotComplete",
0xEA4: "adErrCantChangeConnection",
0xC94: "adErrCantChangeProvider",
0xE8C: "adErrCantConvertvalue",
0xE8D: "adErrCantCreate",
0xEA3: "adErrCatalogNotSet",
0xE8E: "adErrColumnNotOnThisRow",
0xD5D: "adErrDataConversion",
0xE89: "adErrDataOverflow",
0xE9A: "adErrDelResOutOfScope",
0xEA6: "adErrDenyNotSupported",
0xEA7: "adErrDenyTypeNotSupported",
0xCB3: "adErrFeatureNotAvailable",
0xEA5: "adErrFieldsUpdateFailed",
0xC93: "adErrIllegalOperation",
0xCAE: "adErrInTransaction",
0xE87: "adErrIntegrityViolation",
0xBB9: "adErrInvalidArgument",
0xE7D: "adErrInvalidConnection",
0xE7C: "adErrInvalidParamInfo",
0xE82: "adErrInvalidTransaction",
0xE91: "adErrInvalidURL",
0xCC1: "adErrItemNotFound",
0xBCD: "adErrNoCurrentRecord",
0xE83: "adErrNotExecuting",
0xE7E: "adErrNotReentrant",
0xE78: "adErrObjectClosed",
0xD27: "adErrObjectInCollection",
0xD5C: "adErrObjectNotSet",
0xE79: "adErrObjectOpen",
0xBBA: "adErrOpeningFile",
0xE80: "adErrOperationCancelled",
0xE96: "adErrOutOfSpace",
0xE88: "adErrPermissionDenied",
0xE9E: "adErrPropConflicting",
0xE9B: "adErrPropInvalidColumn",
0xE9C: "adErrPropInvalidOption",
0xE9D: "adErrPropInvalidValue",
0xE9F: "adErrPropNotAllSettable",
0xEA0: "adErrPropNotSet",
0xEA1: "adErrPropNotSettable",
0xEA2: "adErrPropNotSupported",
0xBB8: "adErrProviderFailed",
0xE7A: "adErrProviderNotFound",
0xBBB: "adErrReadFile",
0xE93: "adErrResourceExists",
0xE92: "adErrResourceLocked",
0xE97: "adErrResourceOutOfScope",
0xE8A: "adErrSchemaViolation",
0xE8B: "adErrSignMismatch",
0xE81: "adErrStillConnecting",
0xE7F: "adErrStillExecuting",
0xE90: "adErrTreePermissionDenied",
0xE8F: "adErrURLDoesNotExist",
0xE99: "adErrURLNamedRowDoesNotExist",
0xE98: "adErrUnavailable",
0xE84: "adErrUnsafeOperation",
0xE95: "adErrVolumeNotFound",
0xBBC: "adErrWriteFile",
}

1223
lib/adodbapi/adodbapi.py Normal file

File diff suppressed because it is too large Load diff

794
lib/adodbapi/apibase.py Normal file
View file

@ -0,0 +1,794 @@
"""adodbapi.apibase - A python DB API 2.0 (PEP 249) interface to Microsoft ADO
Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole
* http://sourceforge.net/projects/pywin32
* http://sourceforge.net/projects/adodbapi
"""
import datetime
import decimal
import numbers
import sys
import time
# noinspection PyUnresolvedReferences
from . import ado_consts as adc
verbose = False # debugging flag
onIronPython = sys.platform == "cli"
if onIronPython: # we need type definitions for odd data we may need to convert
# noinspection PyUnresolvedReferences
from System import DateTime, DBNull
NullTypes = (type(None), DBNull)
else:
DateTime = type(NotImplemented) # should never be seen on win32
NullTypes = type(None)
# --- define objects to smooth out Python3 <-> Python 2.x differences
unicodeType = str
longType = int
StringTypes = str
makeByteBuffer = bytes
memoryViewType = memoryview
_BaseException = Exception
try: # jdhardy -- handle bytes under IronPython & Py3
bytes
except NameError:
bytes = str # define it for old Pythons
# ------- Error handlers ------
def standardErrorHandler(connection, cursor, errorclass, errorvalue):
err = (errorclass, errorvalue)
try:
connection.messages.append(err)
except:
pass
if cursor is not None:
try:
cursor.messages.append(err)
except:
pass
raise errorclass(errorvalue)
# Note: _BaseException is defined differently between Python 2.x and 3.x
class Error(_BaseException):
pass # Exception that is the base class of all other error
# exceptions. You can use this to catch all errors with one
# single 'except' statement. Warnings are not considered
# errors and thus should not use this class as base. It must
# be a subclass of the Python StandardError (defined in the
# module exceptions).
class Warning(_BaseException):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class FetchFailedError(OperationalError):
"""
Error is used by RawStoredProcedureQuerySet to determine when a fetch
failed due to a connection being closed or there is no record set
returned. (Non-standard, added especially for django)
"""
pass
# # # # # ----- Type Objects and Constructors ----- # # # # #
# Many databases need to have the input in a particular format for binding to an operation's input parameters.
# For example, if an input is destined for a DATE column, then it must be bound to the database in a particular
# string format. Similar problems exist for "Row ID" columns or large binary items (e.g. blobs or RAW columns).
# This presents problems for Python since the parameters to the executeXXX() method are untyped.
# When the database module sees a Python string object, it doesn't know if it should be bound as a simple CHAR
# column, as a raw BINARY item, or as a DATE.
#
# To overcome this problem, a module must provide the constructors defined below to create objects that can
# hold special values. When passed to the cursor methods, the module can then detect the proper type of
# the input parameter and bind it accordingly.
# A Cursor Object's description attribute returns information about each of the result columns of a query.
# The type_code must compare equal to one of Type Objects defined below. Type Objects may be equal to more than
# one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns;
# see the Implementation Hints below for details).
# SQL NULL values are represented by the Python None singleton on input and output.
# Note: Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover.
# def Date(year,month,day):
# "This function constructs an object holding a date value. "
# return dateconverter.date(year,month,day) #dateconverter.Date(year,month,day)
#
# def Time(hour,minute,second):
# "This function constructs an object holding a time value. "
# return dateconverter.time(hour, minute, second) # dateconverter.Time(hour,minute,second)
#
# def Timestamp(year,month,day,hour,minute,second):
# "This function constructs an object holding a time stamp value. "
# return dateconverter.datetime(year,month,day,hour,minute,second)
#
# def DateFromTicks(ticks):
# """This function constructs an object holding a date value from the given ticks value
# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """
# return Date(*time.gmtime(ticks)[:3])
#
# def TimeFromTicks(ticks):
# """This function constructs an object holding a time value from the given ticks value
# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """
# return Time(*time.gmtime(ticks)[3:6])
#
# def TimestampFromTicks(ticks):
# """This function constructs an object holding a time stamp value from the given
# ticks value (number of seconds since the epoch;
# see the documentation of the standard Python time module for details). """
# return Timestamp(*time.gmtime(ticks)[:6])
#
# def Binary(aString):
# """This function constructs an object capable of holding a binary (long) string value. """
# b = makeByteBuffer(aString)
# return b
# ----- Time converters ----------------------------------------------
class TimeConverter(object): # this is a generic time converter skeleton
def __init__(self): # the details will be filled in by instances
self._ordinal_1899_12_31 = datetime.date(1899, 12, 31).toordinal() - 1
# Use cls.types to compare if an input parameter is a datetime
self.types = {
type(self.Date(2000, 1, 1)),
type(self.Time(12, 1, 1)),
type(self.Timestamp(2000, 1, 1, 12, 1, 1)),
datetime.datetime,
datetime.time,
datetime.date,
}
def COMDate(self, obj):
"""Returns a ComDate from a date-time"""
try: # most likely a datetime
tt = obj.timetuple()
try:
ms = obj.microsecond
except:
ms = 0
return self.ComDateFromTuple(tt, ms)
except: # might be a tuple
try:
return self.ComDateFromTuple(obj)
except: # try an mxdate
try:
return obj.COMDate()
except:
raise ValueError('Cannot convert "%s" to COMdate.' % repr(obj))
def ComDateFromTuple(self, t, microseconds=0):
d = datetime.date(t[0], t[1], t[2])
integerPart = d.toordinal() - self._ordinal_1899_12_31
ms = (t[3] * 3600 + t[4] * 60 + t[5]) * 1000000 + microseconds
fractPart = float(ms) / 86400000000.0
return integerPart + fractPart
def DateObjectFromCOMDate(self, comDate):
"Returns an object of the wanted type from a ComDate"
raise NotImplementedError # "Abstract class"
def Date(self, year, month, day):
"This function constructs an object holding a date value."
raise NotImplementedError # "Abstract class"
def Time(self, hour, minute, second):
"This function constructs an object holding a time value."
raise NotImplementedError # "Abstract class"
def Timestamp(self, year, month, day, hour, minute, second):
"This function constructs an object holding a time stamp value."
raise NotImplementedError # "Abstract class"
# all purpose date to ISO format converter
def DateObjectToIsoFormatString(self, obj):
"This function should return a string in the format 'YYYY-MM-dd HH:MM:SS:ms' (ms optional)"
try: # most likely, a datetime.datetime
s = obj.isoformat(" ")
except (TypeError, AttributeError):
if isinstance(obj, datetime.date):
s = obj.isoformat() + " 00:00:00" # return exact midnight
else:
try: # maybe it has a strftime method, like mx
s = obj.strftime("%Y-%m-%d %H:%M:%S")
except AttributeError:
try: # but may be time.struct_time
s = time.strftime("%Y-%m-%d %H:%M:%S", obj)
except:
raise ValueError('Cannot convert "%s" to isoformat' % repr(obj))
return s
# -- Optional: if mx extensions are installed you may use mxDateTime ----
try:
import mx.DateTime
mxDateTime = True
except:
mxDateTime = False
if mxDateTime:
class mxDateTimeConverter(TimeConverter): # used optionally if installed
def __init__(self):
TimeConverter.__init__(self)
self.types.add(type(mx.DateTime))
def DateObjectFromCOMDate(self, comDate):
return mx.DateTime.DateTimeFromCOMDate(comDate)
def Date(self, year, month, day):
return mx.DateTime.Date(year, month, day)
def Time(self, hour, minute, second):
return mx.DateTime.Time(hour, minute, second)
def Timestamp(self, year, month, day, hour, minute, second):
return mx.DateTime.Timestamp(year, month, day, hour, minute, second)
else:
class mxDateTimeConverter(TimeConverter):
pass # if no mx is installed
class pythonDateTimeConverter(TimeConverter): # standard since Python 2.3
def __init__(self):
TimeConverter.__init__(self)
def DateObjectFromCOMDate(self, comDate):
if isinstance(comDate, datetime.datetime):
odn = comDate.toordinal()
tim = comDate.time()
new = datetime.datetime.combine(datetime.datetime.fromordinal(odn), tim)
return new
# return comDate.replace(tzinfo=None) # make non aware
elif isinstance(comDate, DateTime):
fComDate = comDate.ToOADate() # ironPython clr Date/Time
else:
fComDate = float(comDate) # ComDate is number of days since 1899-12-31
integerPart = int(fComDate)
floatpart = fComDate - integerPart
##if floatpart == 0.0:
## return datetime.date.fromordinal(integerPart + self._ordinal_1899_12_31)
dte = datetime.datetime.fromordinal(
integerPart + self._ordinal_1899_12_31
) + datetime.timedelta(milliseconds=floatpart * 86400000)
# millisecondsperday=86400000 # 24*60*60*1000
return dte
def Date(self, year, month, day):
return datetime.date(year, month, day)
def Time(self, hour, minute, second):
return datetime.time(hour, minute, second)
def Timestamp(self, year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
class pythonTimeConverter(TimeConverter): # the old, ?nix type date and time
def __init__(self): # caution: this Class gets confised by timezones and DST
TimeConverter.__init__(self)
self.types.add(time.struct_time)
def DateObjectFromCOMDate(self, comDate):
"Returns ticks since 1970"
if isinstance(comDate, datetime.datetime):
return comDate.timetuple()
elif isinstance(comDate, DateTime): # ironPython clr date/time
fcomDate = comDate.ToOADate()
else:
fcomDate = float(comDate)
secondsperday = 86400 # 24*60*60
# ComDate is number of days since 1899-12-31, gmtime epoch is 1970-1-1 = 25569 days
t = time.gmtime(secondsperday * (fcomDate - 25569.0))
return t # year,month,day,hour,minute,second,weekday,julianday,daylightsaving=t
def Date(self, year, month, day):
return self.Timestamp(year, month, day, 0, 0, 0)
def Time(self, hour, minute, second):
return time.gmtime((hour * 60 + minute) * 60 + second)
def Timestamp(self, year, month, day, hour, minute, second):
return time.localtime(
time.mktime((year, month, day, hour, minute, second, 0, 0, -1))
)
base_dateconverter = pythonDateTimeConverter()
# ------ DB API required module attributes ---------------------
threadsafety = 1 # TODO -- find out whether this module is actually BETTER than 1.
apilevel = "2.0" # String constant stating the supported DB API level.
paramstyle = "qmark" # the default parameter style
# ------ control for an extension which may become part of DB API 3.0 ---
accepted_paramstyles = ("qmark", "named", "format", "pyformat", "dynamic")
# ------------------------------------------------------------------------------------------
# define similar types for generic conversion routines
adoIntegerTypes = (
adc.adInteger,
adc.adSmallInt,
adc.adTinyInt,
adc.adUnsignedInt,
adc.adUnsignedSmallInt,
adc.adUnsignedTinyInt,
adc.adBoolean,
adc.adError,
) # max 32 bits
adoRowIdTypes = (adc.adChapter,) # v2.1 Rose
adoLongTypes = (adc.adBigInt, adc.adFileTime, adc.adUnsignedBigInt)
adoExactNumericTypes = (
adc.adDecimal,
adc.adNumeric,
adc.adVarNumeric,
adc.adCurrency,
) # v2.3 Cole
adoApproximateNumericTypes = (adc.adDouble, adc.adSingle) # v2.1 Cole
adoStringTypes = (
adc.adBSTR,
adc.adChar,
adc.adLongVarChar,
adc.adLongVarWChar,
adc.adVarChar,
adc.adVarWChar,
adc.adWChar,
)
adoBinaryTypes = (adc.adBinary, adc.adLongVarBinary, adc.adVarBinary)
adoDateTimeTypes = (adc.adDBTime, adc.adDBTimeStamp, adc.adDate, adc.adDBDate)
adoRemainingTypes = (
adc.adEmpty,
adc.adIDispatch,
adc.adIUnknown,
adc.adPropVariant,
adc.adArray,
adc.adUserDefined,
adc.adVariant,
adc.adGUID,
)
# this class is a trick to determine whether a type is a member of a related group of types. see PEP notes
class DBAPITypeObject(object):
def __init__(self, valuesTuple):
self.values = frozenset(valuesTuple)
def __eq__(self, other):
return other in self.values
def __ne__(self, other):
return other not in self.values
"""This type object is used to describe columns in a database that are string-based (e.g. CHAR). """
STRING = DBAPITypeObject(adoStringTypes)
"""This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs). """
BINARY = DBAPITypeObject(adoBinaryTypes)
"""This type object is used to describe numeric columns in a database. """
NUMBER = DBAPITypeObject(
adoIntegerTypes + adoLongTypes + adoExactNumericTypes + adoApproximateNumericTypes
)
"""This type object is used to describe date/time columns in a database. """
DATETIME = DBAPITypeObject(adoDateTimeTypes)
"""This type object is used to describe the "Row ID" column in a database. """
ROWID = DBAPITypeObject(adoRowIdTypes)
OTHER = DBAPITypeObject(adoRemainingTypes)
# ------- utilities for translating python data types to ADO data types ---------------------------------
typeMap = {
memoryViewType: adc.adVarBinary,
float: adc.adDouble,
type(None): adc.adEmpty,
str: adc.adBSTR,
bool: adc.adBoolean, # v2.1 Cole
decimal.Decimal: adc.adDecimal,
int: adc.adBigInt,
bytes: adc.adVarBinary,
}
def pyTypeToADOType(d):
tp = type(d)
try:
return typeMap[tp]
except KeyError: # The type was not defined in the pre-computed Type table
from . import dateconverter
if (
tp in dateconverter.types
): # maybe it is one of our supported Date/Time types
return adc.adDate
# otherwise, attempt to discern the type by probing the data object itself -- to handle duck typing
if isinstance(d, StringTypes):
return adc.adBSTR
if isinstance(d, numbers.Integral):
return adc.adBigInt
if isinstance(d, numbers.Real):
return adc.adDouble
raise DataError('cannot convert "%s" (type=%s) to ADO' % (repr(d), tp))
# # # # # # # # # # # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# functions to convert database values to Python objects
# ------------------------------------------------------------------------
# variant type : function converting variant to Python value
def variantConvertDate(v):
from . import dateconverter # this function only called when adodbapi is running
return dateconverter.DateObjectFromCOMDate(v)
def cvtString(variant): # use to get old action of adodbapi v1 if desired
if onIronPython:
try:
return variant.ToString()
except:
pass
return str(variant)
def cvtDecimal(variant): # better name
return _convertNumberWithCulture(variant, decimal.Decimal)
def cvtNumeric(variant): # older name - don't break old code
return cvtDecimal(variant)
def cvtFloat(variant):
return _convertNumberWithCulture(variant, float)
def _convertNumberWithCulture(variant, f):
try:
return f(variant)
except (ValueError, TypeError, decimal.InvalidOperation):
try:
europeVsUS = str(variant).replace(",", ".")
return f(europeVsUS)
except (ValueError, TypeError, decimal.InvalidOperation):
pass
def cvtInt(variant):
return int(variant)
def cvtLong(variant): # only important in old versions where long and int differ
return int(variant)
def cvtBuffer(variant):
return bytes(variant)
def cvtUnicode(variant):
return str(variant)
def identity(x):
return x
def cvtUnusual(variant):
if verbose > 1:
sys.stderr.write("Conversion called for Unusual data=%s\n" % repr(variant))
if isinstance(variant, DateTime): # COMdate or System.Date
from .adodbapi import ( # this will only be called when adodbapi is in use, and very rarely
dateconverter,
)
return dateconverter.DateObjectFromCOMDate(variant)
return variant # cannot find conversion function -- just give the data to the user
def convert_to_python(variant, func): # convert DB value into Python value
if isinstance(variant, NullTypes): # IronPython Null or None
return None
return func(variant) # call the appropriate conversion function
class MultiMap(dict): # builds a dictionary from {(sequence,of,keys) : function}
"""A dictionary of ado.type : function -- but you can set multiple items by passing a sequence of keys"""
# useful for defining conversion functions for groups of similar data types.
def __init__(self, aDict):
for k, v in list(aDict.items()):
self[k] = v # we must call __setitem__
def __setitem__(self, adoType, cvtFn):
"set a single item, or a whole sequence of items"
try: # user passed us a sequence, set them individually
for type in adoType:
dict.__setitem__(self, type, cvtFn)
except TypeError: # a single value fails attempt to iterate
dict.__setitem__(self, adoType, cvtFn)
# initialize variantConversions dictionary used to convert SQL to Python
# this is the dictionary of default conversion functions, built by the class above.
# this becomes a class attribute for the Connection, and that attribute is used
# to build the list of column conversion functions for the Cursor
variantConversions = MultiMap(
{
adoDateTimeTypes: variantConvertDate,
adoApproximateNumericTypes: cvtFloat,
adoExactNumericTypes: cvtDecimal, # use to force decimal rather than unicode
adoLongTypes: cvtLong,
adoIntegerTypes: cvtInt,
adoRowIdTypes: cvtInt,
adoStringTypes: identity,
adoBinaryTypes: cvtBuffer,
adoRemainingTypes: cvtUnusual,
}
)
# # # # # classes to emulate the result of cursor.fetchxxx() as a sequence of sequences # # # # #
# "an ENUM of how my low level records are laid out"
RS_WIN_32, RS_ARRAY, RS_REMOTE = list(range(1, 4))
class SQLrow(object): # a single database row
# class to emulate a sequence, so that a column may be retrieved by either number or name
def __init__(self, rows, index): # "rows" is an _SQLrows object, index is which row
self.rows = rows # parent 'fetch' container object
self.index = index # my row number within parent
def __getattr__(self, name): # used for row.columnName type of value access
try:
return self._getValue(self.rows.columnNames[name.lower()])
except KeyError:
raise AttributeError('Unknown column name "{}"'.format(name))
def _getValue(self, key): # key must be an integer
if (
self.rows.recordset_format == RS_ARRAY
): # retrieve from two-dimensional array
v = self.rows.ado_results[key, self.index]
elif self.rows.recordset_format == RS_REMOTE:
v = self.rows.ado_results[self.index][key]
else: # pywin32 - retrieve from tuple of tuples
v = self.rows.ado_results[key][self.index]
if self.rows.converters is NotImplemented:
return v
return convert_to_python(v, self.rows.converters[key])
def __len__(self):
return self.rows.numberOfColumns
def __getitem__(self, key): # used for row[key] type of value access
if isinstance(key, int): # normal row[1] designation
try:
return self._getValue(key)
except IndexError:
raise
if isinstance(key, slice):
indices = key.indices(self.rows.numberOfColumns)
vl = [self._getValue(i) for i in range(*indices)]
return tuple(vl)
try:
return self._getValue(
self.rows.columnNames[key.lower()]
) # extension row[columnName] designation
except (KeyError, TypeError):
er, st, tr = sys.exc_info()
raise er(
'No such key as "%s" in %s' % (repr(key), self.__repr__())
).with_traceback(tr)
def __iter__(self):
return iter(self.__next__())
def __next__(self):
for n in range(self.rows.numberOfColumns):
yield self._getValue(n)
def __repr__(self): # create a human readable representation
taglist = sorted(list(self.rows.columnNames.items()), key=lambda x: x[1])
s = "<SQLrow={"
for name, i in taglist:
s += name + ":" + repr(self._getValue(i)) + ", "
return s[:-2] + "}>"
def __str__(self): # create a pretty human readable representation
return str(
tuple(str(self._getValue(i)) for i in range(self.rows.numberOfColumns))
)
# TO-DO implement pickling an SQLrow directly
# def __getstate__(self): return self.__dict__
# def __setstate__(self, d): self.__dict__.update(d)
# which basically tell pickle to treat your class just like a normal one,
# taking self.__dict__ as representing the whole of the instance state,
# despite the existence of the __getattr__.
# # # #
class SQLrows(object):
# class to emulate a sequence for multiple rows using a container object
def __init__(self, ado_results, numberOfRows, cursor):
self.ado_results = ado_results # raw result of SQL get
try:
self.recordset_format = cursor.recordset_format
self.numberOfColumns = cursor.numberOfColumns
self.converters = cursor.converters
self.columnNames = cursor.columnNames
except AttributeError:
self.recordset_format = RS_ARRAY
self.numberOfColumns = 0
self.converters = []
self.columnNames = {}
self.numberOfRows = numberOfRows
def __len__(self):
return self.numberOfRows
def __getitem__(self, item): # used for row or row,column access
if not self.ado_results:
return []
if isinstance(item, slice): # will return a list of row objects
indices = item.indices(self.numberOfRows)
return [SQLrow(self, k) for k in range(*indices)]
elif isinstance(item, tuple) and len(item) == 2:
# d = some_rowsObject[i,j] will return a datum from a two-dimension address
i, j = item
if not isinstance(j, int):
try:
j = self.columnNames[j.lower()] # convert named column to numeric
except KeyError:
raise KeyError('adodbapi: no such column name as "%s"' % repr(j))
if self.recordset_format == RS_ARRAY: # retrieve from two-dimensional array
v = self.ado_results[j, i]
elif self.recordset_format == RS_REMOTE:
v = self.ado_results[i][j]
else: # pywin32 - retrieve from tuple of tuples
v = self.ado_results[j][i]
if self.converters is NotImplemented:
return v
return convert_to_python(v, self.converters[j])
else:
row = SQLrow(self, item) # new row descriptor
return row
def __iter__(self):
return iter(self.__next__())
def __next__(self):
for n in range(self.numberOfRows):
row = SQLrow(self, n)
yield row
# # # # #
# # # # # functions to re-format SQL requests to other paramstyle requirements # # # # # # # # # #
def changeNamedToQmark(
op,
): # convert from 'named' paramstyle to ADO required '?'mark parameters
outOp = ""
outparms = []
chunks = op.split(
"'"
) # quote all literals -- odd numbered list results are literals.
inQuotes = False
for chunk in chunks:
if inQuotes: # this is inside a quote
if chunk == "": # double apostrophe to quote one apostrophe
outOp = outOp[:-1] # so take one away
else:
outOp += "'" + chunk + "'" # else pass the quoted string as is.
else: # is SQL code -- look for a :namedParameter
while chunk: # some SQL string remains
sp = chunk.split(":", 1)
outOp += sp[0] # concat the part up to the :
s = ""
try:
chunk = sp[1]
except IndexError:
chunk = None
if chunk: # there was a parameter - parse it out
i = 0
c = chunk[0]
while c.isalnum() or c == "_":
i += 1
try:
c = chunk[i]
except IndexError:
break
s = chunk[:i]
chunk = chunk[i:]
if s:
outparms.append(s) # list the parameters in order
outOp += "?" # put in the Qmark
inQuotes = not inQuotes
return outOp, outparms
def changeFormatToQmark(
op,
): # convert from 'format' paramstyle to ADO required '?'mark parameters
outOp = ""
outparams = []
chunks = op.split(
"'"
) # quote all literals -- odd numbered list results are literals.
inQuotes = False
for chunk in chunks:
if inQuotes:
if (
outOp != "" and chunk == ""
): # he used a double apostrophe to quote one apostrophe
outOp = outOp[:-1] # so take one away
else:
outOp += "'" + chunk + "'" # else pass the quoted string as is.
else: # is SQL code -- look for a %s parameter
if "%(" in chunk: # ugh! pyformat!
while chunk: # some SQL string remains
sp = chunk.split("%(", 1)
outOp += sp[0] # concat the part up to the %
if len(sp) > 1:
try:
s, chunk = sp[1].split(")s", 1) # find the ')s'
except ValueError:
raise ProgrammingError(
'Pyformat SQL has incorrect format near "%s"' % chunk
)
outparams.append(s)
outOp += "?" # put in the Qmark
else:
chunk = None
else: # proper '%s' format
sp = chunk.split("%s") # make each %s
outOp += "?".join(sp) # into ?
inQuotes = not inQuotes # every other chunk is a quoted string
return outOp, outparams

View file

@ -0,0 +1,72 @@
""" db_print.py -- a simple demo for ADO database reads."""
import sys
import adodbapi.ado_consts as adc
cmd_args = ("filename", "table_name")
if "help" in sys.argv:
print("possible settings keywords are:", cmd_args)
sys.exit()
kw_args = {} # pick up filename and proxy address from command line (optionally)
for arg in sys.argv:
s = arg.split("=")
if len(s) > 1:
if s[0] in cmd_args:
kw_args[s[0]] = s[1]
kw_args.setdefault(
"filename", "test.mdb"
) # assumes server is running from examples folder
kw_args.setdefault("table_name", "Products") # the name of the demo table
# the server needs to select the provider based on his Python installation
provider_switch = ["provider", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"]
# ------------------------ START HERE -------------------------------------
# create the connection
constr = "Provider=%(provider)s;Data Source=%(filename)s"
import adodbapi as db
con = db.connect(constr, kw_args, macro_is64bit=provider_switch)
if kw_args["table_name"] == "?":
print("The tables in your database are:")
for name in con.get_table_names():
print(name)
else:
# make a cursor on the connection
with con.cursor() as c:
# run an SQL statement on the cursor
sql = "select * from %s" % kw_args["table_name"]
print('performing query="%s"' % sql)
c.execute(sql)
# check the results
print(
'result rowcount shows as= %d. (Note: -1 means "not known")' % (c.rowcount,)
)
print("")
print("result data description is:")
print(" NAME Type DispSize IntrnlSz Prec Scale Null?")
for d in c.description:
print(
("%16s %-12s %8s %8d %4d %5d %s")
% (d[0], adc.adTypeNames[d[1]], d[2], d[3], d[4], d[5], bool(d[6]))
)
print("")
print("str() of first five records are...")
# get the results
db = c.fetchmany(5)
# print them
for rec in db:
print(rec)
print("")
print("repr() of next row is...")
print(repr(c.fetchone()))
print("")
con.close()

View file

@ -0,0 +1,20 @@
""" db_table_names.py -- a simple demo for ADO database table listing."""
import sys
import adodbapi
try:
databasename = sys.argv[1]
except IndexError:
databasename = "test.mdb"
provider = ["prv", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"]
constr = "Provider=%(prv)s;Data Source=%(db)s"
# create the connection
con = adodbapi.connect(constr, db=databasename, macro_is64bit=provider)
print("Table names in= %s" % databasename)
for table in con.get_table_names():
print(table)

View file

@ -0,0 +1,41 @@
import sys
import adodbapi
try:
import adodbapi.is64bit as is64bit
is64 = is64bit.Python()
except ImportError:
is64 = False
if is64:
driver = "Microsoft.ACE.OLEDB.12.0"
else:
driver = "Microsoft.Jet.OLEDB.4.0"
extended = 'Extended Properties="Excel 8.0;HDR=Yes;IMEX=1;"'
try: # first command line argument will be xls file name -- default to the one written by xls_write.py
filename = sys.argv[1]
except IndexError:
filename = "xx.xls"
constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended)
conn = adodbapi.connect(constr)
try: # second command line argument will be worksheet name -- default to first worksheet
sheet = sys.argv[2]
except IndexError:
# use ADO feature to get the name of the first worksheet
sheet = conn.get_table_names()[0]
print("Shreadsheet=%s Worksheet=%s" % (filename, sheet))
print("------------------------------------------------------------")
crsr = conn.cursor()
sql = "SELECT * from [%s]" % sheet
crsr.execute(sql)
for row in crsr.fetchmany(10):
print(repr(row))
crsr.close()
conn.close()

View file

@ -0,0 +1,41 @@
import datetime
import adodbapi
try:
import adodbapi.is64bit as is64bit
is64 = is64bit.Python()
except ImportError:
is64 = False # in case the user has an old version of adodbapi
if is64:
driver = "Microsoft.ACE.OLEDB.12.0"
else:
driver = "Microsoft.Jet.OLEDB.4.0"
filename = "xx.xls" # file will be created if it does not exist
extended = 'Extended Properties="Excel 8.0;Readonly=False;"'
constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended)
conn = adodbapi.connect(constr)
with conn: # will auto commit if no errors
with conn.cursor() as crsr:
try:
crsr.execute("drop table SheetOne")
except:
pass # just is case there is one already there
# create the sheet and the header row and set the types for the columns
crsr.execute(
"create table SheetOne (Name varchar, Rank varchar, SrvcNum integer, Weight float, Birth date)"
)
sql = "INSERT INTO SheetOne (name, rank , srvcnum, weight, birth) values (?,?,?,?,?)"
data = ("Mike Murphy", "SSG", 123456789, 167.8, datetime.date(1922, 12, 27))
crsr.execute(sql, data) # write the first row of data
crsr.execute(
sql, ["John Jones", "Pvt", 987654321, 140.0, datetime.date(1921, 7, 4)]
) # another row of data
conn.close()
print("Created spreadsheet=%s worksheet=%s" % (filename, "SheetOne"))

41
lib/adodbapi/is64bit.py Normal file
View file

@ -0,0 +1,41 @@
"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version"""
import sys
def Python():
if sys.platform == "cli": # IronPython
import System
return System.IntPtr.Size == 8
else:
try:
return sys.maxsize > 2147483647
except AttributeError:
return sys.maxint > 2147483647
def os():
import platform
pm = platform.machine()
if pm != ".." and pm.endswith("64"): # recent Python (not Iron)
return True
else:
import os
if "PROCESSOR_ARCHITEW6432" in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
return os.environ["PROCESSOR_ARCHITECTURE"].endswith(
"64"
) # 64 bit Windows 64 bit program
except (IndexError, KeyError):
pass # not Windows
try:
return "64" in platform.architecture()[0] # this often works in Linux
except:
return False # is an older version of Python, assume also an older os (best we can guess)
if __name__ == "__main__":
print("is64bit.Python() =", Python(), "is64bit.os() =", os())

506
lib/adodbapi/license.txt Normal file
View file

@ -0,0 +1,506 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
<one line to give the library's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
<signature of Ty Coon>, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!

View file

@ -0,0 +1,144 @@
""" a clumsy attempt at a macro language to let the programmer execute code on the server (ex: determine 64bit)"""
from . import is64bit as is64bit
def macro_call(macro_name, args, kwargs):
"""allow the programmer to perform limited processing on the server by passing macro names and args
:new_key - the key name the macro will create
:args[0] - macro name
:args[1:] - any arguments
:code - the value of the keyword item
:kwargs - the connection keyword dictionary. ??key has been removed
--> the value to put in for kwargs['name'] = value
"""
if isinstance(args, (str, str)):
args = [
args
] # the user forgot to pass a sequence, so make a string into args[0]
new_key = args[0]
try:
if macro_name == "is64bit":
if is64bit.Python(): # if on 64 bit Python
return new_key, args[1] # return first argument
else:
try:
return new_key, args[2] # else return second argument (if defined)
except IndexError:
return new_key, "" # else return blank
elif (
macro_name == "getuser"
): # get the name of the user the server is logged in under
if not new_key in kwargs:
import getpass
return new_key, getpass.getuser()
elif macro_name == "getnode": # get the name of the computer running the server
import platform
try:
return new_key, args[1] % platform.node()
except IndexError:
return new_key, platform.node()
elif macro_name == "getenv": # expand the server's environment variable args[1]
try:
dflt = args[2] # if not found, default from args[2]
except IndexError: # or blank
dflt = ""
return new_key, os.environ.get(args[1], dflt)
elif macro_name == "auto_security":
if (
not "user" in kwargs or not kwargs["user"]
): # missing, blank, or Null username
return new_key, "Integrated Security=SSPI"
return new_key, "User ID=%(user)s; Password=%(password)s" % kwargs
elif (
macro_name == "find_temp_test_path"
): # helper function for testing ado operation -- undocumented
import os
import tempfile
return new_key, os.path.join(
tempfile.gettempdir(), "adodbapi_test", args[1]
)
raise ValueError("Unknown connect string macro=%s" % macro_name)
except:
raise ValueError("Error in macro processing %s %s" % (macro_name, repr(args)))
def process(
args, kwargs, expand_macros=False
): # --> connection string with keyword arguments processed.
"""attempts to inject arguments into a connection string using Python "%" operator for strings
co: adodbapi connection object
args: positional parameters from the .connect() call
kvargs: keyword arguments from the .connect() call
"""
try:
dsn = args[0]
except IndexError:
dsn = None
if isinstance(
dsn, dict
): # as a convenience the first argument may be django settings
kwargs.update(dsn)
elif (
dsn
): # the connection string is passed to the connection as part of the keyword dictionary
kwargs["connection_string"] = dsn
try:
a1 = args[1]
except IndexError:
a1 = None
# historically, the second positional argument might be a timeout value
if isinstance(a1, int):
kwargs["timeout"] = a1
# if the second positional argument is a string, then it is user
elif isinstance(a1, str):
kwargs["user"] = a1
# if the second positional argument is a dictionary, use it as keyword arguments, too
elif isinstance(a1, dict):
kwargs.update(a1)
try:
kwargs["password"] = args[2] # the third positional argument is password
kwargs["host"] = args[3] # the fourth positional argument is host name
kwargs["database"] = args[4] # the fifth positional argument is database name
except IndexError:
pass
# make sure connection string is defined somehow
if not "connection_string" in kwargs:
try: # perhaps 'dsn' was defined
kwargs["connection_string"] = kwargs["dsn"]
except KeyError:
try: # as a last effort, use the "host" keyword
kwargs["connection_string"] = kwargs["host"]
except KeyError:
raise TypeError("Must define 'connection_string' for ado connections")
if expand_macros:
for kwarg in list(kwargs.keys()):
if kwarg.startswith("macro_"): # If a key defines a macro
macro_name = kwarg[6:] # name without the "macro_"
macro_code = kwargs.pop(
kwarg
) # we remove the macro_key and get the code to execute
new_key, rslt = macro_call(
macro_name, macro_code, kwargs
) # run the code in the local context
kwargs[new_key] = rslt # put the result back in the keywords dict
# special processing for PyRO IPv6 host address
try:
s = kwargs["proxy_host"]
if ":" in s: # it is an IPv6 address
if s[0] != "[": # is not surrounded by brackets
kwargs["proxy_host"] = s.join(("[", "]")) # put it in brackets
except KeyError:
pass
return kwargs

92
lib/adodbapi/readme.txt Normal file
View file

@ -0,0 +1,92 @@
Project
-------
adodbapi
A Python DB-API 2.0 (PEP-249) module that makes it easy to use Microsoft ADO
for connecting with databases and other data sources
using either CPython or IronPython.
Home page: <http://sourceforge.net/projects/adodbapi>
Features:
* 100% DB-API 2.0 (PEP-249) compliant (including most extensions and recommendations).
* Includes pyunit testcases that describe how to use the module.
* Fully implemented in Python. -- runs in Python 2.5+ Python 3.0+ and IronPython 2.6+
* Licensed under the LGPL license, which means that it can be used freely even in commercial programs subject to certain restrictions.
* The user can choose between paramstyles: 'qmark' 'named' 'format' 'pyformat' 'dynamic'
* Supports data retrieval by column name e.g.:
for row in myCurser.execute("select name,age from students"):
print("Student", row.name, "is", row.age, "years old.")
* Supports user-definable system-to-Python data conversion functions (selected by ADO data type, or by column)
Prerequisites:
* C Python 2.7 or 3.5 or higher
and pywin32 (Mark Hammond's python for windows extensions.)
or
Iron Python 2.7 or higher. (works in IPy2.0 for all data types except BUFFER)
Installation:
* (C-Python on Windows): Install pywin32 ("pip install pywin32") which includes adodbapi.
* (IronPython on Windows): Download adodbapi from http://sf.net/projects/adodbapi. Unpack the zip.
Open a command window as an administrator. CD to the folder containing the unzipped files.
Run "setup.py install" using the IronPython of your choice.
NOTE: ...........
If you do not like the new default operation of returning Numeric columns as decimal.Decimal,
you can select other options by the user defined conversion feature.
Try:
adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtString
or:
adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtFloat
or:
adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = write_your_own_convertion_function
............
notes for 2.6.2:
The definitive source has been moved to https://github.com/mhammond/pywin32/tree/master/adodbapi.
Remote has proven too hard to configure and test with Pyro4. I am moving it to unsupported status
until I can change to a different connection method.
whats new in version 2.6
A cursor.prepare() method and support for prepared SQL statements.
Lots of refactoring, especially of the Remote and Server modules (still to be treated as Beta code).
The quick start document 'quick_reference.odt' will export as a nice-looking pdf.
Added paramstyles 'pyformat' and 'dynamic'. If your 'paramstyle' is 'named' you _must_ pass a dictionary of
parameters to your .execute() method. If your 'paramstyle' is 'format' 'pyformat' or 'dynamic', you _may_
pass a dictionary of parameters -- provided your SQL operation string is formatted correctly.
whats new in version 2.5
Remote module: (works on Linux!) allows a Windows computer to serve ADO databases via PyRO
Server module: PyRO server for ADO. Run using a command like= C:>python -m adodbapi.server
(server has simple connection string macros: is64bit, getuser, sql_provider, auto_security)
Brief documentation included. See adodbapi/examples folder adodbapi.rtf
New connection method conn.get_table_names() --> list of names of tables in database
Vastly refactored. Data conversion things have been moved to the new adodbapi.apibase module.
Many former module-level attributes are now class attributes. (Should be more thread-safe)
Connection objects are now context managers for transactions and will commit or rollback.
Cursor objects are context managers and will automatically close themselves.
Autocommit can be switched on and off.
Keyword and positional arguments on the connect() method work as documented in PEP 249.
Keyword arguments from the connect call can be formatted into the connection string.
New keyword arguments defined, such as: autocommit, paramstyle, remote_proxy, remote_port.
*** Breaking change: variantConversion lookups are simplified: the following will raise KeyError:
oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes]
Refactor as: oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes[0]]
License
-------
LGPL, see http://www.opensource.org/licenses/lgpl-license.php
Documentation
-------------
Look at adodbapi/quick_reference.md
http://www.python.org/topics/database/DatabaseAPI-2.0.html
read the examples in adodbapi/examples
and look at the test cases in adodbapi/test directory.
Mailing lists
-------------
The adodbapi mailing lists have been deactivated. Submit comments to the
pywin32 or IronPython mailing lists.
-- the bug tracker on sourceforge.net/projects/adodbapi may be checked, (infrequently).
-- please use: https://github.com/mhammond/pywin32/issues

634
lib/adodbapi/remote.py Normal file
View file

@ -0,0 +1,634 @@
"""adodbapi.remote - A python DB API 2.0 (PEP 249) interface to Microsoft ADO
Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole
* http://sourceforge.net/projects/pywin32
* http://sourceforge.net/projects/adodbapi
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
django adaptations and refactoring thanks to Adam Vandenberg
DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/
This module source should run correctly in CPython versions 2.5 and later,
or IronPython version 2.7 and later,
or, after running through 2to3.py, CPython 3.0 or later.
"""
__version__ = "2.6.0.4"
version = "adodbapi.remote v" + __version__
import array
import datetime
import os
import sys
import time
# Pyro4 is required for server and remote operation --> https://pypi.python.org/pypi/Pyro4/
try:
import Pyro4
except ImportError:
print('* * * Sorry, server operation requires Pyro4. Please "pip import" it.')
exit(11)
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
_BaseException = api._BaseException
sys.excepthook = Pyro4.util.excepthook
Pyro4.config.PREFER_IP_VERSION = 0 # allow system to prefer IPv6
Pyro4.config.COMMTIMEOUT = 40.0 # a bit longer than the default SQL server Gtimeout
Pyro4.config.SERIALIZER = "pickle"
try:
verbose = int(os.environ["ADODBAPI_VERBOSE"])
except:
verbose = False
if verbose:
print(version)
# --- define objects to smooth out Python3 <-> Python 2.x differences
unicodeType = str # this line will be altered by 2to3.py to '= str'
longType = int # this line will be altered by 2to3.py to '= int'
StringTypes = str
makeByteBuffer = bytes
memoryViewType = memoryview
# -----------------------------------------------------------
# conversion functions mandated by PEP 249
Binary = makeByteBuffer # override the function from apibase.py
def Date(year, month, day):
return datetime.date(year, month, day) # dateconverter.Date(year,month,day)
def Time(hour, minute, second):
return datetime.time(hour, minute, second) # dateconverter.Time(hour,minute,second)
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
return Date(*time.gmtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.gmtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.gmtime(ticks)[:6])
def connect(*args, **kwargs): # --> a remote db-api connection object
"""Create and open a remote db-api database connection object"""
# process the argument list the programmer gave us
kwargs = adodbapi.process_connect_string.process(args, kwargs)
# the "proxy_xxx" keys tell us where to find the PyRO proxy server
kwargs.setdefault(
"pyro_connection", "PYRO:ado.connection@%(proxy_host)s:%(proxy_port)s"
)
if not "proxy_port" in kwargs:
try:
pport = os.environ["PROXY_PORT"]
except KeyError:
pport = 9099
kwargs["proxy_port"] = pport
if not "proxy_host" in kwargs or not kwargs["proxy_host"]:
try:
phost = os.environ["PROXY_HOST"]
except KeyError:
phost = "[::1]" # '127.0.0.1'
kwargs["proxy_host"] = phost
ado_uri = kwargs["pyro_connection"] % kwargs
# ask PyRO make us a remote connection object
auto_retry = 3
while auto_retry:
try:
dispatcher = Pyro4.Proxy(ado_uri)
if "comm_timeout" in kwargs:
dispatcher._pyroTimeout = float(kwargs["comm_timeout"])
uri = dispatcher.make_connection()
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError("Cannot create connection to=%s" % ado_uri)
conn_uri = fix_uri(uri, kwargs) # get a host connection from the proxy server
while auto_retry:
try:
host_conn = Pyro4.Proxy(
conn_uri
) # bring up an exclusive Pyro connection for my ADO connection
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError(
"Cannot create ADO connection object using=%s" % conn_uri
)
if "comm_timeout" in kwargs:
host_conn._pyroTimeout = float(kwargs["comm_timeout"])
# make a local clone
myConn = Connection()
while auto_retry:
try:
myConn.connect(
kwargs, host_conn
) # call my connect method -- hand him the host connection
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError(
"Pyro error creating connection to/thru=%s" % repr(kwargs)
)
except _BaseException as e:
raise api.DatabaseError(
"Error creating remote connection to=%s, e=%s, %s"
% (repr(kwargs), repr(e), sys.exc_info()[2])
)
return myConn
def fix_uri(uri, kwargs):
"""convert a generic pyro uri with '0.0.0.0' into the address we actually called"""
u = uri.asString()
s = u.split("[::0]") # IPv6 generic address
if len(s) == 1: # did not find one
s = u.split("0.0.0.0") # IPv4 generic address
if len(s) > 1: # found a generic
return kwargs["proxy_host"].join(s) # fill in our address for the host
return uri
# # # # # ----- the Class that defines a connection ----- # # # # #
class Connection(object):
# include connection attributes required by api definition.
Warning = api.Warning
Error = api.Error
InterfaceError = api.InterfaceError
DataError = api.DataError
DatabaseError = api.DatabaseError
OperationalError = api.OperationalError
IntegrityError = api.IntegrityError
InternalError = api.InternalError
NotSupportedError = api.NotSupportedError
ProgrammingError = api.ProgrammingError
# set up some class attributes
paramstyle = api.paramstyle
@property
def dbapi(self): # a proposed db-api version 3 extension.
"Return a reference to the DBAPI module for this Connection."
return api
def __init__(self):
self.proxy = None
self.kwargs = {}
self.errorhandler = None
self.supportsTransactions = False
self.paramstyle = api.paramstyle
self.timeout = 30
self.cursors = {}
def connect(self, kwargs, connection_maker):
self.kwargs = kwargs
if verbose:
print('%s attempting: "%s"' % (version, repr(kwargs)))
self.proxy = connection_maker
##try:
ret = self.proxy.connect(kwargs) # ask the server to hook us up
##except ImportError, e: # Pyro is trying to import pywinTypes.comerrer
## self._raiseConnectionError(api.DatabaseError, 'Proxy cannot connect using=%s' % repr(kwargs))
if ret is not True:
self._raiseConnectionError(
api.OperationalError, "Proxy returns error message=%s" % repr(ret)
)
self.supportsTransactions = self.getIndexedValue("supportsTransactions")
self.paramstyle = self.getIndexedValue("paramstyle")
self.timeout = self.getIndexedValue("timeout")
if verbose:
print("adodbapi.remote New connection at %X" % id(self))
def _raiseConnectionError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = api.standardErrorHandler
eh(self, None, errorclass, errorvalue)
def close(self):
"""Close the connection now (rather than whenever __del__ is called).
The connection will be unusable from this point forward;
an Error (or subclass) exception will be raised if any operation is attempted with the connection.
The same applies to all cursor objects trying to use the connection.
"""
for crsr in list(self.cursors.values())[
:
]: # copy the list, then close each one
crsr.close()
try:
"""close the underlying remote Connection object"""
self.proxy.close()
if verbose:
print("adodbapi.remote Closed connection at %X" % id(self))
object.__delattr__(
self, "proxy"
) # future attempts to use closed cursor will be caught by __getattr__
except Exception:
pass
def __del__(self):
try:
self.proxy.close()
except:
pass
def commit(self):
"""Commit any pending transaction to the database.
Note that if the database supports an auto-commit feature,
this must be initially off. An interface method may be provided to turn it back on.
Database modules that do not support transactions should implement this method with void functionality.
"""
if not self.supportsTransactions:
return
result = self.proxy.commit()
if result:
self._raiseConnectionError(
api.OperationalError, "Error during commit: %s" % result
)
def _rollback(self):
"""In case a database does provide transactions this method causes the the database to roll back to
the start of any pending transaction. Closing a connection without committing the changes first will
cause an implicit rollback to be performed.
"""
result = self.proxy.rollback()
if result:
self._raiseConnectionError(
api.OperationalError, "Error during rollback: %s" % result
)
def __setattr__(self, name, value):
if name in ("paramstyle", "timeout", "autocommit"):
if self.proxy:
self.proxy.send_attribute_to_host(name, value)
object.__setattr__(self, name, value) # store attribute locally (too)
def __getattr__(self, item):
if (
item == "rollback"
): # the rollback method only appears if the database supports transactions
if self.supportsTransactions:
return (
self._rollback
) # return the rollback method so the caller can execute it.
else:
raise self.ProgrammingError(
"this data provider does not support Rollback"
)
elif item in (
"dbms_name",
"dbms_version",
"connection_string",
"autocommit",
): # 'messages' ):
return self.getIndexedValue(item)
elif item == "proxy":
raise self.ProgrammingError("Attempting to use closed connection")
else:
raise self.ProgrammingError('No remote access for attribute="%s"' % item)
def getIndexedValue(self, index):
r = self.proxy.get_attribute_for_remote(index)
return r
def cursor(self):
"Return a new Cursor Object using the connection."
myCursor = Cursor(self)
return myCursor
def _i_am_here(self, crsr):
"message from a new cursor proclaiming its existence"
self.cursors[crsr.id] = crsr
def _i_am_closing(self, crsr):
"message from a cursor giving connection a chance to clean up"
try:
del self.cursors[crsr.id]
except:
pass
def __enter__(self): # Connections are context managers
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self._rollback() # automatic rollback on errors
else:
self.commit()
def get_table_names(self):
return self.proxy.get_table_names()
def fixpickle(x):
"""pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute()"""
if x is None:
return None
if isinstance(x, dict):
# for 'named' paramstyle user will pass a mapping
newargs = {}
for arg, val in list(x.items()):
if isinstance(val, memoryViewType):
newval = array.array("B")
newval.fromstring(val)
newargs[arg] = newval
else:
newargs[arg] = val
return newargs
# if not a mapping, then a sequence
newargs = []
for arg in x:
if isinstance(arg, memoryViewType):
newarg = array.array("B")
newarg.fromstring(arg)
newargs.append(newarg)
else:
newargs.append(arg)
return newargs
class Cursor(object):
def __init__(self, connection):
self.command = None
self.errorhandler = None ## was: connection.errorhandler
self.connection = connection
self.proxy = self.connection.proxy
self.rs = None # the fetchable data for this cursor
self.converters = NotImplemented
self.id = connection.proxy.build_cursor()
connection._i_am_here(self)
self.recordset_format = api.RS_REMOTE
if verbose:
print(
"%s New cursor at %X on conn %X"
% (version, id(self), id(self.connection))
)
def prepare(self, operation):
self.command = operation
try:
del self.description
except AttributeError:
pass
self.proxy.crsr_prepare(self.id, operation)
def __iter__(self): # [2.1 Zamarev]
return iter(self.fetchone, None) # [2.1 Zamarev]
def __next__(self):
r = self.fetchone()
if r:
return r
raise StopIteration
def __enter__(self):
"Allow database cursors to be used with context managers."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"Allow database cursors to be used with context managers."
self.close()
def __getattr__(self, key):
if key == "numberOfColumns":
try:
return len(self.rs[0])
except:
return 0
if key == "description":
try:
self.description = self.proxy.crsr_get_description(self.id)[:]
return self.description
except TypeError:
return None
if key == "columnNames":
try:
r = dict(
self.proxy.crsr_get_columnNames(self.id)
) # copy the remote columns
except TypeError:
r = {}
self.columnNames = r
return r
if key == "remote_cursor":
raise api.OperationalError
try:
return self.proxy.crsr_get_attribute_for_remote(self.id, key)
except AttributeError:
raise api.InternalError(
'Failure getting attribute "%s" from proxy cursor.' % key
)
def __setattr__(self, key, value):
if key == "arraysize":
self.proxy.crsr_set_arraysize(self.id, value)
if key == "paramstyle":
if value in api.accepted_paramstyles:
self.proxy.crsr_set_paramstyle(self.id, value)
else:
self._raiseCursorError(
api.ProgrammingError, 'invalid paramstyle ="%s"' % value
)
object.__setattr__(self, key, value)
def _raiseCursorError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = api.standardErrorHandler
eh(self.connection, self, errorclass, errorvalue)
def execute(self, operation, parameters=None):
if self.connection is None:
self._raiseCursorError(
ProgrammingError, "Attempted operation on closed cursor"
)
self.command = operation
try:
del self.description
except AttributeError:
pass
try:
del self.columnNames
except AttributeError:
pass
fp = fixpickle(parameters)
if verbose > 2:
print(
(
'%s executing "%s" with params=%s'
% (version, operation, repr(parameters))
)
)
result = self.proxy.crsr_execute(self.id, operation, fp)
if result: # an exception was triggered
self._raiseCursorError(result[0], result[1])
def executemany(self, operation, seq_of_parameters):
if self.connection is None:
self._raiseCursorError(
ProgrammingError, "Attempted operation on closed cursor"
)
self.command = operation
try:
del self.description
except AttributeError:
pass
try:
del self.columnNames
except AttributeError:
pass
sq = [fixpickle(x) for x in seq_of_parameters]
if verbose > 2:
print(
(
'%s executemany "%s" with params=%s'
% (version, operation, repr(seq_of_parameters))
)
)
self.proxy.crsr_executemany(self.id, operation, sq)
def nextset(self):
try:
del self.description
except AttributeError:
pass
try:
del self.columnNames
except AttributeError:
pass
if verbose > 2:
print(("%s nextset" % version))
return self.proxy.crsr_nextset(self.id)
def callproc(self, procname, parameters=None):
if self.connection is None:
self._raiseCursorError(
ProgrammingError, "Attempted operation on closed cursor"
)
self.command = procname
try:
del self.description
except AttributeError:
pass
try:
del self.columnNames
except AttributeError:
pass
fp = fixpickle(parameters)
if verbose > 2:
print(
(
'%s callproc "%s" with params=%s'
% (version, procname, repr(parameters))
)
)
return self.proxy.crsr_callproc(self.id, procname, fp)
def fetchone(self):
try:
f1 = self.proxy.crsr_fetchone(self.id)
except _BaseException as e:
self._raiseCursorError(api.DatabaseError, e)
else:
if f1 is None:
return None
self.rs = [f1]
return api.SQLrows(self.rs, 1, self)[
0
] # new object to hold the results of the fetch
def fetchmany(self, size=None):
try:
self.rs = self.proxy.crsr_fetchmany(self.id, size)
if not self.rs:
return []
r = api.SQLrows(self.rs, len(self.rs), self)
return r
except Exception as e:
self._raiseCursorError(api.DatabaseError, e)
def fetchall(self):
try:
self.rs = self.proxy.crsr_fetchall(self.id)
if not self.rs:
return []
return api.SQLrows(self.rs, len(self.rs), self)
except Exception as e:
self._raiseCursorError(api.DatabaseError, e)
def close(self):
if self.connection is None:
return
self.connection._i_am_closing(self) # take me off the connection's cursors list
try:
self.proxy.crsr_close(self.id)
except:
pass
try:
del self.description
except:
pass
try:
del self.rs # let go of the recordset
except:
pass
self.connection = (
None # this will make all future method calls on me throw an exception
)
self.proxy = None
if verbose:
print("adodbapi.remote Closed cursor at %X" % id(self))
def __del__(self):
try:
self.close()
except:
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass

View file

@ -0,0 +1,15 @@
"""call using an open ADO connection --> list of table names"""
from . import adodbapi
def names(connection_object):
ado = connection_object.adoConn
schema = ado.OpenSchema(20) # constant = adSchemaTables
tables = []
while not schema.EOF:
name = adodbapi.getIndexedValue(schema.Fields, "TABLE_NAME").Value
tables.append(name)
schema.MoveNext()
del schema
return tables

70
lib/adodbapi/setup.py Normal file
View file

@ -0,0 +1,70 @@
"""adodbapi -- a pure Python PEP 249 DB-API package using Microsoft ADO
Adodbapi can be run on CPython 3.5 and later.
or IronPython version 2.6 and later (in theory, possibly no longer in practice!)
"""
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: SQL
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Database
"""
NAME = "adodbapi"
MAINTAINER = "Vernon Cole"
MAINTAINER_EMAIL = "vernondcole@gmail.com"
DESCRIPTION = (
"""A pure Python package implementing PEP 249 DB-API using Microsoft ADO."""
)
URL = "http://sourceforge.net/projects/adodbapi"
LICENSE = "LGPL"
CLASSIFIERS = filter(None, CLASSIFIERS.split("\n"))
AUTHOR = "Henrik Ekelund, Vernon Cole, et.al."
AUTHOR_EMAIL = "vernondcole@gmail.com"
PLATFORMS = ["Windows", "Linux"]
VERSION = None # in case searching for version fails
a = open("adodbapi.py") # find the version string in the source code
for line in a:
if "__version__" in line:
VERSION = line.split("'")[1]
print('adodbapi version="%s"' % VERSION)
break
a.close()
def setup_package():
from distutils.command.build_py import build_py
from distutils.core import setup
setup(
cmdclass={"build_py": build_py},
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
url=URL,
keywords="database ado odbc dbapi db-api Microsoft SQL",
## download_url=DOWNLOAD_URL,
long_description=open("README.txt").read(),
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
package_dir={"adodbapi": ""},
packages=["adodbapi"],
)
return
if __name__ == "__main__":
setup_package()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,221 @@
# Configure this to _YOUR_ environment in order to run the testcases.
"testADOdbapiConfig.py v 2.6.2.B00"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# # TESTERS:
# #
# # You will need to make numerous modifications to this file
# # to adapt it to your own testing environment.
# #
# # Skip down to the next "# #" line --
# # -- the things you need to change are below it.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import platform
import random
import sys
import is64bit
import setuptestframework
import tryconnection
print("\nPython", sys.version)
node = platform.node()
try:
print(
"node=%s, is64bit.os()= %s, is64bit.Python()= %s"
% (node, is64bit.os(), is64bit.Python())
)
except:
pass
if "--help" in sys.argv:
print(
"""Valid command-line switches are:
--package - create a temporary test package, run 2to3 if needed.
--all - run all possible tests
--time - loop over time format tests (including mxdatetime if present)
--nojet - do not test against an ACCESS database file
--mssql - test against Microsoft SQL server
--pg - test against PostgreSQL
--mysql - test against MariaDB
--remote= - test unsing remote server at= (experimental)
"""
)
exit()
try:
onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python
except:
onWindows = False
# create a random name for temporary table names
_alphabet = (
"PYFGCRLAOEUIDHTNSQJKXBMWVZ" # why, yes, I do happen to use a dvorak keyboard
)
tmp = "".join([random.choice(_alphabet) for x in range(9)])
mdb_name = "xx_" + tmp + ".mdb" # generate a non-colliding name for the temporary .mdb
testfolder = setuptestframework.maketemp()
if "--package" in sys.argv:
# create a new adodbapi module -- running 2to3 if needed.
pth = setuptestframework.makeadopackage(testfolder)
else:
# use the adodbapi module in which this file appears
pth = setuptestframework.find_ado_path()
if pth not in sys.path:
# look here _first_ to find modules
sys.path.insert(1, pth)
proxy_host = None
for arg in sys.argv:
if arg.startswith("--remote="):
proxy_host = arg.split("=")[1]
import adodbapi.remote as remote
break
# function to clean up the temporary folder -- calling program must run this function before exit.
cleanup = setuptestframework.getcleanupfunction()
try:
import adodbapi # will (hopefully) be imported using the "pth" discovered above
except SyntaxError:
print(
'\n* * * Are you trying to run Python2 code using Python3? Re-run this test using the "--package" switch.'
)
sys.exit(11)
try:
print(adodbapi.version) # show version
except:
print('"adodbapi.version" not present or not working.')
print(__doc__)
verbose = False
for a in sys.argv:
if a.startswith("--verbose"):
arg = True
try:
arg = int(a.split("=")[1])
except IndexError:
pass
adodbapi.adodbapi.verbose = arg
verbose = arg
doAllTests = "--all" in sys.argv
doAccessTest = not ("--nojet" in sys.argv)
doSqlServerTest = "--mssql" in sys.argv or doAllTests
doMySqlTest = "--mysql" in sys.argv or doAllTests
doPostgresTest = "--pg" in sys.argv or doAllTests
iterateOverTimeTests = ("--time" in sys.argv or doAllTests) and onWindows
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # start your environment setup here v v v
SQL_HOST_NODE = "testsql.2txt.us,1430"
try: # If mx extensions are installed, use mxDateTime
import mx.DateTime
doMxDateTimeTest = True
except:
doMxDateTimeTest = False # Requires eGenixMXExtensions
doTimeTest = True # obsolete python time format
if doAccessTest:
if proxy_host: # determine the (probably remote) database file folder
c = {"macro_find_temp_test_path": ["mdb", mdb_name], "proxy_host": proxy_host}
else:
c = {"mdb": setuptestframework.makemdb(testfolder, mdb_name)}
# macro definition for keyword "provider" using macro "is64bit" -- see documentation
# is64bit will return true for 64 bit versions of Python, so the macro will select the ACE provider
# (If running a remote ADO service, this will test the 64-bitedness of the ADO server.)
c["macro_is64bit"] = [
"provider",
"Microsoft.ACE.OLEDB.12.0", # 64 bit provider
"Microsoft.Jet.OLEDB.4.0",
] # 32 bit provider
connStrAccess = "Provider=%(provider)s;Data Source=%(mdb)s" # ;Mode=ReadWrite;Persist Security Info=False;Jet OLEDB:Bypass UserInfo Validation=True"
print(
" ...Testing ACCESS connection to {} file...".format(
c.get("mdb", "remote .mdb")
)
)
doAccessTest, connStrAccess, dbAccessconnect = tryconnection.try_connection(
verbose, connStrAccess, 10, **c
)
if doSqlServerTest:
c = {
"host": SQL_HOST_NODE, # name of computer with SQL Server
"database": "adotest",
"user": "adotestuser", # None implies Windows security
"password": "Sq1234567",
# macro definition for keyword "security" using macro "auto_security"
"macro_auto_security": "security",
"provider": "MSOLEDBSQL; MARS Connection=True",
}
if proxy_host:
c["proxy_host"] = proxy_host
connStr = "Provider=%(provider)s; Initial Catalog=%(database)s; Data Source=%(host)s; %(security)s;"
print(" ...Testing MS-SQL login to {}...".format(c["host"]))
(
doSqlServerTest,
connStrSQLServer,
dbSqlServerconnect,
) = tryconnection.try_connection(verbose, connStr, 30, **c)
if doMySqlTest:
c = {
"host": "testmysql.2txt.us",
"database": "adodbapitest",
"user": "adotest",
"password": "12345678",
"port": "3330", # note the nonstandard port for obfuscation
"driver": "MySQL ODBC 5.1 Driver",
} # or _driver="MySQL ODBC 3.51 Driver
if proxy_host:
c["proxy_host"] = proxy_host
c["macro_is64bit"] = [
"provider",
"Provider=MSDASQL;",
] # turn on the 64 bit ODBC adapter only if needed
cs = (
"%(provider)sDriver={%(driver)s};Server=%(host)s;Port=3330;"
+ "Database=%(database)s;user=%(user)s;password=%(password)s;Option=3;"
)
print(" ...Testing MySql login to {}...".format(c["host"]))
doMySqlTest, connStrMySql, dbMySqlconnect = tryconnection.try_connection(
verbose, cs, 5, **c
)
if doPostgresTest:
_computername = "testpg.2txt.us"
_databasename = "adotest"
_username = "adotestuser"
_password = "12345678"
kws = {"timeout": 4}
kws["macro_is64bit"] = [
"prov_drv",
"Provider=MSDASQL;Driver={PostgreSQL Unicode(x64)}",
"Driver=PostgreSQL Unicode",
]
# get driver from http://www.postgresql.org/ftp/odbc/versions/
# test using positional and keyword arguments (bad example for real code)
if proxy_host:
kws["proxy_host"] = proxy_host
print(" ...Testing PostgreSQL login to {}...".format(_computername))
doPostgresTest, connStrPostgres, dbPostgresConnect = tryconnection.try_connection(
verbose,
"%(prov_drv)s;Server=%(host)s;Database=%(database)s;uid=%(user)s;pwd=%(password)s;port=5430;", # note nonstandard port
_username,
_password,
_computername,
_databasename,
**kws
)
assert (
doAccessTest or doSqlServerTest or doMySqlTest or doPostgresTest
), "No database engine found for testing"

View file

@ -0,0 +1,939 @@
#!/usr/bin/env python
""" Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
"""
__version__ = "$Revision: 1.15.0 $"[11:-2]
__author__ = "Stuart Bishop <stuart@stuartbishop.net>"
import sys
import time
import unittest
if sys.version[0] >= "3": # python 3.x
_BaseException = Exception
def _failUnless(self, expr, msg=None):
self.assertTrue(expr, msg)
else: # python 2.x
from exceptions import Exception as _BaseException
def _failUnless(self, expr, msg=None):
self.failUnless(expr, msg) ## deprecated since Python 2.6
# set this to "True" to follow API 2.0 to the letter
TEST_FOR_NON_IDEMPOTENT_CLOSE = False
# Revision 1.15 2019/11/22 00:50:00 kf7xm
# Make Turn off IDEMPOTENT_CLOSE a proper skipTest
# Revision 1.14 2013/05/20 11:02:05 kf7xm
# Add a literal string to the format insertion test to catch trivial re-format algorithms
# Revision 1.13 2013/05/08 14:31:50 kf7xm
# Quick switch to Turn off IDEMPOTENT_CLOSE test. Also: Silence teardown failure
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3, 0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1") # python 3 make unicode into bytes
class DatabaseAPI20Test(unittest.TestCase):
"""Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
"""
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = "dbapi20test_" # If you need to specify a prefix for tables
ddl1 = "create table %sbooze (name varchar(20))" % table_prefix
ddl2 = "create table %sbarflys (name varchar(20), drink varchar(30))" % table_prefix
xddl1 = "drop table %sbooze" % table_prefix
xddl2 = "drop table %sbarflys" % table_prefix
lowerfunc = "lower" # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self, cursor):
cursor.execute(self.ddl1)
def executeDDL2(self, cursor):
cursor.execute(self.ddl2)
def setUp(self):
"""self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
"""
pass
def tearDown(self):
"""self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
"""
try:
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1, self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
except _BaseException:
pass
def _connect(self):
try:
r = self.driver.connect(*self.connect_args, **self.connect_kw_args)
except AttributeError:
self.fail("No connect method found in self.driver module")
return r
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel, "2.0")
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
_failUnless(self, threadsafety in (0, 1, 2, 3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
_failUnless(
self, paramstyle in ("qmark", "numeric", "named", "format", "pyformat")
)
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
if sys.version[0] == "3": # under Python 3 StardardError no longer exists
self.assertTrue(issubclass(self.driver.Warning, Exception))
self.assertTrue(issubclass(self.driver.Error, Exception))
else:
self.failUnless(issubclass(self.driver.Warning, Exception))
self.failUnless(issubclass(self.driver.Error, Exception))
_failUnless(self, issubclass(self.driver.InterfaceError, self.driver.Error))
_failUnless(self, issubclass(self.driver.DatabaseError, self.driver.Error))
_failUnless(self, issubclass(self.driver.OperationalError, self.driver.Error))
_failUnless(self, issubclass(self.driver.IntegrityError, self.driver.Error))
_failUnless(self, issubclass(self.driver.InternalError, self.driver.Error))
_failUnless(self, issubclass(self.driver.ProgrammingError, self.driver.Error))
_failUnless(self, issubclass(self.driver.NotSupportedError, self.driver.Error))
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
_failUnless(self, con.Warning is drv.Warning)
_failUnless(self, con.Error is drv.Error)
_failUnless(self, con.InterfaceError is drv.InterfaceError)
_failUnless(self, con.DatabaseError is drv.DatabaseError)
_failUnless(self, con.OperationalError is drv.OperationalError)
_failUnless(self, con.IntegrityError is drv.IntegrityError)
_failUnless(self, con.InternalError is drv.InternalError)
_failUnless(self, con.ProgrammingError is drv.ProgrammingError)
_failUnless(self, con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con, "rollback"):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute(
"insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix)
)
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze), 1)
self.assertEqual(len(booze[0]), 1)
self.assertEqual(booze[0][0], "Victoria Bitter")
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(
cur.description,
None,
"cursor.description should be none after executing a "
"statement that can return no rows (such as DDL)",
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertEqual(
len(cur.description), 1, "cursor.description describes too many columns"
)
self.assertEqual(
len(cur.description[0]),
7,
"cursor.description[x] tuples must have 7 elements",
)
self.assertEqual(
cur.description[0][0].lower(),
"name",
"cursor.description[x][0] must return column name",
)
self.assertEqual(
cur.description[0][1],
self.driver.STRING,
"cursor.description[x][1] must return column type. Got %r"
% cur.description[0][1],
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(
cur.description,
None,
"cursor.description not being set to None when executing "
"no-result statements (eg. DDL)",
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
_failUnless(
self,
cur.rowcount in (-1, 0), # Bug #543885
"cursor.rowcount should be -1 or 0 after executing no-result "
"statements",
)
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix)
)
_failUnless(
self,
cur.rowcount in (-1, 1),
"cursor.rowcount should == number or rows inserted, or "
"set to -1 after executing an insert statement",
)
cur.execute("select name from %sbooze" % self.table_prefix)
_failUnless(
self,
cur.rowcount in (-1, 1),
"cursor.rowcount should == number of rows returned, or "
"set to -1 after executing a select statement",
)
self.executeDDL2(cur)
self.assertEqual(
cur.rowcount,
-1,
"cursor.rowcount not being reset to -1 after executing "
"no-result statements",
)
finally:
con.close()
lower_func = "lower"
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur, "callproc"):
r = cur.callproc(self.lower_func, ("FOO",))
self.assertEqual(len(r), 1)
self.assertEqual(r[0], "FOO")
r = cur.fetchall()
self.assertEqual(len(r), 1, "callproc produced no result set")
self.assertEqual(len(r[0]), 1, "callproc produced invalid result set")
self.assertEqual(r[0][0], "foo", "callproc produced invalid results")
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error, self.executeDDL1, cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error, con.commit)
# connection.close should raise an Error if called more than once
#!!! reasonable persons differ about the usefulness of this test and this feature !!!
if TEST_FOR_NON_IDEMPOTENT_CLOSE:
self.assertRaises(self.driver.Error, con.close)
else:
self.skipTest(
"Non-idempotent close is considered a bad thing by some people."
)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self, cur):
self.executeDDL2(cur)
cur.execute(
"insert into %sbarflys values ('Victoria Bitter', 'thi%%s :may ca%%(u)se? troub:1e')"
% (self.table_prefix)
)
_failUnless(self, cur.rowcount in (-1, 1))
if self.driver.paramstyle == "qmark":
cur.execute(
"insert into %sbarflys values (?, 'thi%%s :may ca%%(u)se? troub:1e')"
% self.table_prefix,
("Cooper's",),
)
elif self.driver.paramstyle == "numeric":
cur.execute(
"insert into %sbarflys values (:1, 'thi%%s :may ca%%(u)se? troub:1e')"
% self.table_prefix,
("Cooper's",),
)
elif self.driver.paramstyle == "named":
cur.execute(
"insert into %sbarflys values (:beer, 'thi%%s :may ca%%(u)se? troub:1e')"
% self.table_prefix,
{"beer": "Cooper's"},
)
elif self.driver.paramstyle == "format":
cur.execute(
"insert into %sbarflys values (%%s, 'thi%%s :may ca%%(u)se? troub:1e')"
% self.table_prefix,
("Cooper's",),
)
elif self.driver.paramstyle == "pyformat":
cur.execute(
"insert into %sbarflys values (%%(beer)s, 'thi%%s :may ca%%(u)se? troub:1e')"
% self.table_prefix,
{"beer": "Cooper's"},
)
else:
self.fail("Invalid paramstyle")
_failUnless(self, cur.rowcount in (-1, 1))
cur.execute("select name, drink from %sbarflys" % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res), 2, "cursor.fetchall returned too few rows")
beers = [res[0][0], res[1][0]]
beers.sort()
self.assertEqual(
beers[0],
"Cooper's",
"cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly",
)
self.assertEqual(
beers[1],
"Victoria Bitter",
"cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly",
)
trouble = "thi%s :may ca%(u)se? troub:1e"
self.assertEqual(
res[0][1],
trouble,
"cursor.fetchall retrieved incorrect data, or data inserted "
"incorrectly. Got=%s, Expected=%s" % (repr(res[0][1]), repr(trouble)),
)
self.assertEqual(
res[1][1],
trouble,
"cursor.fetchall retrieved incorrect data, or data inserted "
"incorrectly. Got=%s, Expected=%s" % (repr(res[1][1]), repr(trouble)),
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [("Cooper's",), ("Boag's",)]
margs = [{"beer": "Cooper's"}, {"beer": "Boag's"}]
if self.driver.paramstyle == "qmark":
cur.executemany(
"insert into %sbooze values (?)" % self.table_prefix, largs
)
elif self.driver.paramstyle == "numeric":
cur.executemany(
"insert into %sbooze values (:1)" % self.table_prefix, largs
)
elif self.driver.paramstyle == "named":
cur.executemany(
"insert into %sbooze values (:beer)" % self.table_prefix, margs
)
elif self.driver.paramstyle == "format":
cur.executemany(
"insert into %sbooze values (%%s)" % self.table_prefix, largs
)
elif self.driver.paramstyle == "pyformat":
cur.executemany(
"insert into %sbooze values (%%(beer)s)" % (self.table_prefix),
margs,
)
else:
self.fail("Unknown paramstyle")
_failUnless(
self,
cur.rowcount in (-1, 2),
"insert using cursor.executemany set cursor.rowcount to "
"incorrect value %r" % cur.rowcount,
)
cur.execute("select name from %sbooze" % self.table_prefix)
res = cur.fetchall()
self.assertEqual(
len(res), 2, "cursor.fetchall retrieved incorrect number of rows"
)
beers = [res[0][0], res[1][0]]
beers.sort()
self.assertEqual(
beers[0], "Boag's", 'incorrect data "%s" retrieved' % beers[0]
)
self.assertEqual(beers[1], "Cooper's", "incorrect data retrieved")
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error, cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error, cur.fetchone)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertEqual(
cur.fetchone(),
None,
"cursor.fetchone should return None if a query retrieves " "no rows",
)
_failUnless(self, cur.rowcount in (-1, 0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix)
)
self.assertRaises(self.driver.Error, cur.fetchone)
cur.execute("select name from %sbooze" % self.table_prefix)
r = cur.fetchone()
self.assertEqual(
len(r), 1, "cursor.fetchone should have retrieved a single row"
)
self.assertEqual(
r[0], "Victoria Bitter", "cursor.fetchone retrieved incorrect data"
)
self.assertEqual(
cur.fetchone(),
None,
"cursor.fetchone should return None if no more rows available",
)
_failUnless(self, cur.rowcount in (-1, 1))
finally:
con.close()
samples = [
"Carlton Cold",
"Carlton Draft",
"Mountain Goat",
"Redback",
"Victoria Bitter",
"XXXX",
]
def _populate(self):
"""Return a list of sql commands to setup the DB for the fetch
tests.
"""
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix, s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
# issuing a query
self.assertRaises(self.driver.Error, cur.fetchmany, 4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute("select name from %sbooze" % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(
len(r),
1,
"cursor.fetchmany retrieved incorrect number of rows, "
"default of arraysize is one.",
)
cur.arraysize = 10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(
len(r), 3, "cursor.fetchmany retrieved incorrect number of rows"
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(
len(r), 2, "cursor.fetchmany retrieved incorrect number of rows"
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(
len(r),
0,
"cursor.fetchmany should return an empty sequence after "
"results are exhausted",
)
_failUnless(self, cur.rowcount in (-1, 6))
# Same as above, using cursor.arraysize
cur.arraysize = 4
cur.execute("select name from %sbooze" % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(
len(r), 4, "cursor.arraysize not being honoured by fetchmany"
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r), 2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r), 0)
_failUnless(self, cur.rowcount in (-1, 6))
cur.arraysize = 6
cur.execute("select name from %sbooze" % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
_failUnless(self, cur.rowcount in (-1, 6))
self.assertEqual(len(rows), 6)
self.assertEqual(len(rows), 6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0, 6):
self.assertEqual(
rows[i],
self.samples[i],
"incorrect data retrieved by cursor.fetchmany",
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(
len(rows),
0,
"cursor.fetchmany should return an empty sequence if "
"called after the whole result set has been fetched",
)
_failUnless(self, cur.rowcount in (-1, 6))
self.executeDDL2(cur)
cur.execute("select name from %sbarflys" % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(
len(r),
0,
"cursor.fetchmany should return an empty sequence if "
"query retrieved no rows",
)
_failUnless(self, cur.rowcount in (-1, 0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error, cur.fetchall)
cur.execute("select name from %sbooze" % self.table_prefix)
rows = cur.fetchall()
_failUnless(self, cur.rowcount in (-1, len(self.samples)))
self.assertEqual(
len(rows),
len(self.samples),
"cursor.fetchall did not retrieve all rows",
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0, len(self.samples)):
self.assertEqual(
rows[i], self.samples[i], "cursor.fetchall retrieved incorrect rows"
)
rows = cur.fetchall()
self.assertEqual(
len(rows),
0,
"cursor.fetchall should return an empty list if called "
"after the whole result set has been fetched",
)
_failUnless(self, cur.rowcount in (-1, len(self.samples)))
self.executeDDL2(cur)
cur.execute("select name from %sbarflys" % self.table_prefix)
rows = cur.fetchall()
_failUnless(self, cur.rowcount in (-1, 0))
self.assertEqual(
len(rows),
0,
"cursor.fetchall should return an empty list if "
"a select query returns no rows",
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute("select name from %sbooze" % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
_failUnless(self, cur.rowcount in (-1, 6))
self.assertEqual(
len(rows23), 2, "fetchmany returned incorrect number of rows"
)
self.assertEqual(
len(rows56), 2, "fetchall returned incorrect number of rows"
)
rows = [rows1[0]]
rows.extend([rows23[0][0], rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0], rows56[1][0]])
rows.sort()
for i in range(0, len(self.samples)):
self.assertEqual(
rows[i], self.samples[i], "incorrect data retrieved or inserted"
)
finally:
con.close()
def help_nextset_setUp(self, cur):
"""Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
"""
raise NotImplementedError("Helper not implemented")
# sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
# """
# cur.execute(sql)
def help_nextset_tearDown(self, cur):
"If cleaning up is needed after nextSetTest"
raise NotImplementedError("Helper not implemented")
# cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur, "nextset"):
return
try:
self.executeDDL1(cur)
sql = self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc("deleteme")
numberofrows = cur.fetchone()
assert numberofrows[0] == len(self.samples)
assert cur.nextset()
names = cur.fetchall()
assert len(names) == len(self.samples)
s = cur.nextset()
assert s == None, "No more return sets, should return None"
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError("Drivers need to override this test")
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
_failUnless(
self, hasattr(cur, "arraysize"), "cursor.arraysize must be defined"
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes((25,))
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000, 0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError("Driver needed to override this test")
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute("insert into %sbooze values (NULL)" % self.table_prefix)
cur.execute("select name from %sbooze" % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(len(r[0]), 1)
self.assertEqual(r[0][0], None, "NULL value not returned as None")
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002, 12, 25)
d2 = self.driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13, 45, 30)
t2 = self.driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002, 12, 25, 13, 45, 30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes("Something"))
b = self.driver.Binary(str2bytes(""))
def test_STRING(self):
_failUnless(
self, hasattr(self.driver, "STRING"), "module.STRING must be defined"
)
def test_BINARY(self):
_failUnless(
self, hasattr(self.driver, "BINARY"), "module.BINARY must be defined."
)
def test_NUMBER(self):
_failUnless(
self, hasattr(self.driver, "NUMBER"), "module.NUMBER must be defined."
)
def test_DATETIME(self):
_failUnless(
self, hasattr(self.driver, "DATETIME"), "module.DATETIME must be defined."
)
def test_ROWID(self):
_failUnless(
self, hasattr(self.driver, "ROWID"), "module.ROWID must be defined."
)

View file

@ -0,0 +1,41 @@
"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version"""
import sys
def Python():
if sys.platform == "cli": # IronPython
import System
return System.IntPtr.Size == 8
else:
try:
return sys.maxsize > 2147483647
except AttributeError:
return sys.maxint > 2147483647
def os():
import platform
pm = platform.machine()
if pm != ".." and pm.endswith("64"): # recent Python (not Iron)
return True
else:
import os
if "PROCESSOR_ARCHITEW6432" in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
return os.environ["PROCESSOR_ARCHITECTURE"].endswith(
"64"
) # 64 bit Windows 64 bit program
except IndexError:
pass # not Windows
try:
return "64" in platform.architecture()[0] # this often works in Linux
except:
return False # is an older version of Python, assume also an older os (best we can guess)
if __name__ == "__main__":
print("is64bit.Python() =", Python(), "is64bit.os() =", os())

View file

@ -0,0 +1,134 @@
#!/usr/bin/python2
# Configure this in order to run the testcases.
"setuptestframework.py v 2.6.0.8"
import os
import shutil
import sys
import tempfile
try:
OSErrors = (WindowsError, OSError)
except NameError: # not running on Windows
OSErrors = OSError
def maketemp():
temphome = tempfile.gettempdir()
tempdir = os.path.join(temphome, "adodbapi_test")
try:
os.mkdir(tempdir)
except:
pass
return tempdir
def _cleanup_function(testfolder, mdb_name):
try:
os.unlink(os.path.join(testfolder, mdb_name))
except:
pass # mdb database not present
try:
shutil.rmtree(testfolder)
print(" cleaned up folder", testfolder)
except:
pass # test package not present
def getcleanupfunction():
return _cleanup_function
def find_ado_path():
adoName = os.path.normpath(os.getcwd() + "/../../adodbapi.py")
adoPackage = os.path.dirname(adoName)
return adoPackage
# make a new package directory for the test copy of ado
def makeadopackage(testfolder):
adoName = os.path.normpath(os.getcwd() + "/../adodbapi.py")
adoPath = os.path.dirname(adoName)
if os.path.exists(adoName):
newpackage = os.path.join(testfolder, "adodbapi")
try:
os.mkdir(newpackage)
except OSErrors:
print(
"*Note: temporary adodbapi package already exists: may be two versions running?"
)
for f in os.listdir(adoPath):
if f.endswith(".py"):
shutil.copy(os.path.join(adoPath, f), newpackage)
if sys.version_info >= (3, 0): # only when running Py3.n
save = sys.stdout
sys.stdout = None
from lib2to3.main import main # use 2to3 to make test package
main("lib2to3.fixes", args=["-n", "-w", newpackage])
sys.stdout = save
return testfolder
else:
raise EnvironmentError("Connot find source of adodbapi to test.")
def makemdb(testfolder, mdb_name):
# following setup code borrowed from pywin32 odbc test suite
# kindly contributed by Frank Millman.
import os
_accessdatasource = os.path.join(testfolder, mdb_name)
if os.path.isfile(_accessdatasource):
print("using JET database=", _accessdatasource)
else:
try:
from win32com.client import constants
from win32com.client.gencache import EnsureDispatch
win32 = True
except ImportError: # perhaps we are running IronPython
win32 = False # iron Python
try:
from System import Activator, Type
except:
pass
# Create a brand-new database - what is the story with these?
dbe = None
for suffix in (".36", ".35", ".30"):
try:
if win32:
dbe = EnsureDispatch("DAO.DBEngine" + suffix)
else:
type = Type.GetTypeFromProgID("DAO.DBEngine" + suffix)
dbe = Activator.CreateInstance(type)
break
except:
pass
if dbe:
print(" ...Creating ACCESS db at " + _accessdatasource)
if win32:
workspace = dbe.Workspaces(0)
newdb = workspace.CreateDatabase(
_accessdatasource, constants.dbLangGeneral, constants.dbVersion40
)
else:
newdb = dbe.CreateDatabase(
_accessdatasource, ";LANGID=0x0409;CP=1252;COUNTRY=0"
)
newdb.Close()
else:
print(" ...copying test ACCESS db to " + _accessdatasource)
mdbName = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "examples", "test.mdb")
)
import shutil
shutil.copy(mdbName, _accessdatasource)
return _accessdatasource
if __name__ == "__main__":
print("Setting up a Jet database for server to use for remote testing...")
temp = maketemp()
makemdb(temp, "server_test.mdb")

View file

@ -0,0 +1,200 @@
print("This module depends on the dbapi20 compliance tests created by Stuart Bishop")
print("(see db-sig mailing list history for info)")
import platform
import sys
import unittest
import dbapi20
import setuptestframework
testfolder = setuptestframework.maketemp()
if "--package" in sys.argv:
pth = setuptestframework.makeadopackage(testfolder)
sys.argv.remove("--package")
else:
pth = setuptestframework.find_ado_path()
if pth not in sys.path:
sys.path.insert(1, pth)
# function to clean up the temporary folder -- calling program must run this function before exit.
cleanup = setuptestframework.getcleanupfunction()
import adodbapi
import adodbapi.is64bit as is64bit
db = adodbapi
if "--verbose" in sys.argv:
db.adodbapi.verbose = 3
print(adodbapi.version)
print("Tested with dbapi20 %s" % dbapi20.__version__)
try:
onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python
except:
onWindows = False
node = platform.node()
conn_kws = {}
host = "testsql.2txt.us,1430" # if None, will use macro to fill in node name
instance = r"%s\SQLEXPRESS"
conn_kws["name"] = "adotest"
conn_kws["user"] = "adotestuser" # None implies Windows security
conn_kws["password"] = "Sq1234567"
# macro definition for keyword "security" using macro "auto_security"
conn_kws["macro_auto_security"] = "security"
if host is None:
conn_kws["macro_getnode"] = ["host", instance]
else:
conn_kws["host"] = host
conn_kws[
"provider"
] = "Provider=MSOLEDBSQL;DataTypeCompatibility=80;MARS Connection=True;"
connStr = "%(provider)s; %(security)s; Initial Catalog=%(name)s;Data Source=%(host)s"
if onWindows and node != "z-PC":
pass # default should make a local SQL Server connection
elif node == "xxx": # try Postgres database
_computername = "25.223.161.222"
_databasename = "adotest"
_username = "adotestuser"
_password = "12345678"
_driver = "PostgreSQL Unicode"
_provider = ""
connStr = "%sDriver={%s};Server=%s;Database=%s;uid=%s;pwd=%s;" % (
_provider,
_driver,
_computername,
_databasename,
_username,
_password,
)
elif node == "yyy": # ACCESS data base is known to fail some tests.
if is64bit.Python():
driver = "Microsoft.ACE.OLEDB.12.0"
else:
driver = "Microsoft.Jet.OLEDB.4.0"
testmdb = setuptestframework.makemdb(testfolder)
connStr = r"Provider=%s;Data Source=%s" % (driver, testmdb)
else: # try a remote connection to an SQL server
conn_kws["proxy_host"] = "25.44.77.176"
import adodbapi.remote
db = adodbapi.remote
print("Using Connection String like=%s" % connStr)
print("Keywords=%s" % repr(conn_kws))
class test_adodbapi(dbapi20.DatabaseAPI20Test):
driver = db
connect_args = (connStr,)
connect_kw_args = conn_kws
def __init__(self, arg):
dbapi20.DatabaseAPI20Test.__init__(self, arg)
def getTestMethodName(self):
return self.id().split(".")[-1]
def setUp(self):
# Call superclass setUp In case this does something in the
# future
dbapi20.DatabaseAPI20Test.setUp(self)
if self.getTestMethodName() == "test_callproc":
con = self._connect()
engine = con.dbms_name
## print('Using database Engine=%s' % engine) ##
if engine != "MS Jet":
sql = """
create procedure templower
@theData varchar(50)
as
select lower(@theData)
"""
else: # Jet
sql = """
create procedure templower
(theData varchar(50))
as
select lower(theData);
"""
cur = con.cursor()
try:
cur.execute(sql)
con.commit()
except:
pass
cur.close()
con.close()
self.lower_func = "templower"
def tearDown(self):
if self.getTestMethodName() == "test_callproc":
con = self._connect()
cur = con.cursor()
try:
cur.execute("drop procedure templower")
except:
pass
con.commit()
dbapi20.DatabaseAPI20Test.tearDown(self)
def help_nextset_setUp(self, cur):
"Should create a procedure called deleteme"
'that returns two result sets, first the number of rows in booze then "name from booze"'
sql = """
create procedure deleteme as
begin
select count(*) from %sbooze
select name from %sbooze
end
""" % (
self.table_prefix,
self.table_prefix,
)
cur.execute(sql)
def help_nextset_tearDown(self, cur):
"If cleaning up is needed after nextSetTest"
try:
cur.execute("drop procedure deleteme")
except:
pass
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
stmts = [self.ddl1] + self._populate()
for sql in stmts:
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc("deleteme")
numberofrows = cur.fetchone()
assert numberofrows[0] == 6
assert cur.nextset()
names = cur.fetchall()
assert len(names) == len(self.samples)
s = cur.nextset()
assert s == None, "No more return sets, should return None"
finally:
try:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_setoutputsize(self):
pass
if __name__ == "__main__":
unittest.main()
cleanup(testfolder, None)

View file

@ -0,0 +1,33 @@
remote = False # automatic testing of remote access has been removed here
def try_connection(verbose, *args, **kwargs):
import adodbapi
dbconnect = adodbapi.connect
try:
s = dbconnect(*args, **kwargs) # connect to server
if verbose:
print("Connected to:", s.connection_string)
print("which has tables:", s.get_table_names())
s.close() # thanks, it worked, goodbye
except adodbapi.DatabaseError as inst:
print(inst.args[0]) # should be the error message
print("***Failed getting connection using=", repr(args), repr(kwargs))
return False, (args, kwargs), None
print(" (successful)")
return True, (args, kwargs, remote), dbconnect
def try_operation_with_expected_exception(
expected_exception_list, some_function, *args, **kwargs
):
try:
some_function(*args, **kwargs)
except expected_exception_list as e:
return True, e
except:
raise # an exception other than the expected occurred
return False, "The expected exception did not occur"

View file

@ -0,0 +1,396 @@
import math
import sys
from dataclasses import dataclass
from datetime import timezone
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
if sys.version_info < (3, 8):
from typing_extensions import Protocol, runtime_checkable
else:
from typing import Protocol, runtime_checkable
if sys.version_info < (3, 9):
from typing_extensions import Annotated, Literal
else:
from typing import Annotated, Literal
if sys.version_info < (3, 10):
EllipsisType = type(Ellipsis)
KW_ONLY = {}
SLOTS = {}
else:
from types import EllipsisType
KW_ONLY = {"kw_only": True}
SLOTS = {"slots": True}
__all__ = (
'BaseMetadata',
'GroupedMetadata',
'Gt',
'Ge',
'Lt',
'Le',
'Interval',
'MultipleOf',
'MinLen',
'MaxLen',
'Len',
'Timezone',
'Predicate',
'LowerCase',
'UpperCase',
'IsDigits',
'IsFinite',
'IsNotFinite',
'IsNan',
'IsNotNan',
'IsInfinite',
'IsNotInfinite',
'doc',
'DocInfo',
'__version__',
)
__version__ = '0.6.0'
T = TypeVar('T')
# arguments that start with __ are considered
# positional only
# see https://peps.python.org/pep-0484/#positional-only-arguments
class SupportsGt(Protocol):
def __gt__(self: T, __other: T) -> bool:
...
class SupportsGe(Protocol):
def __ge__(self: T, __other: T) -> bool:
...
class SupportsLt(Protocol):
def __lt__(self: T, __other: T) -> bool:
...
class SupportsLe(Protocol):
def __le__(self: T, __other: T) -> bool:
...
class SupportsMod(Protocol):
def __mod__(self: T, __other: T) -> T:
...
class SupportsDiv(Protocol):
def __div__(self: T, __other: T) -> T:
...
class BaseMetadata:
"""Base class for all metadata.
This exists mainly so that implementers
can do `isinstance(..., BaseMetadata)` while traversing field annotations.
"""
__slots__ = ()
@dataclass(frozen=True, **SLOTS)
class Gt(BaseMetadata):
"""Gt(gt=x) implies that the value must be greater than x.
It can be used with any type that supports the ``>`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
gt: SupportsGt
@dataclass(frozen=True, **SLOTS)
class Ge(BaseMetadata):
"""Ge(ge=x) implies that the value must be greater than or equal to x.
It can be used with any type that supports the ``>=`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
ge: SupportsGe
@dataclass(frozen=True, **SLOTS)
class Lt(BaseMetadata):
"""Lt(lt=x) implies that the value must be less than x.
It can be used with any type that supports the ``<`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
lt: SupportsLt
@dataclass(frozen=True, **SLOTS)
class Le(BaseMetadata):
"""Le(le=x) implies that the value must be less than or equal to x.
It can be used with any type that supports the ``<=`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
le: SupportsLe
@runtime_checkable
class GroupedMetadata(Protocol):
"""A grouping of multiple BaseMetadata objects.
`GroupedMetadata` on its own is not metadata and has no meaning.
All it the the constraint and metadata should be fully expressable
in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
Concrete implementations should override `GroupedMetadata.__iter__()`
to add their own metadata.
For example:
>>> @dataclass
>>> class Field(GroupedMetadata):
>>> gt: float | None = None
>>> description: str | None = None
...
>>> def __iter__(self) -> Iterable[BaseMetadata]:
>>> if self.gt is not None:
>>> yield Gt(self.gt)
>>> if self.description is not None:
>>> yield Description(self.gt)
Also see the implementation of `Interval` below for an example.
Parsers should recognize this and unpack it so that it can be used
both with and without unpacking:
- `Annotated[int, Field(...)]` (parser must unpack Field)
- `Annotated[int, *Field(...)]` (PEP-646)
""" # noqa: trailing-whitespace
@property
def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
return True
def __iter__(self) -> Iterator[BaseMetadata]:
...
if not TYPE_CHECKING:
__slots__ = () # allow subclasses to use slots
def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
# Basic ABC like functionality without the complexity of an ABC
super().__init_subclass__(*args, **kwargs)
if cls.__iter__ is GroupedMetadata.__iter__:
raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
def __iter__(self) -> Iterator[BaseMetadata]: # noqa: F811
raise NotImplementedError # more helpful than "None has no attribute..." type errors
@dataclass(frozen=True, **KW_ONLY, **SLOTS)
class Interval(GroupedMetadata):
"""Interval can express inclusive or exclusive bounds with a single object.
It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
are interpreted the same way as the single-bound constraints.
"""
gt: Union[SupportsGt, None] = None
ge: Union[SupportsGe, None] = None
lt: Union[SupportsLt, None] = None
le: Union[SupportsLe, None] = None
def __iter__(self) -> Iterator[BaseMetadata]:
"""Unpack an Interval into zero or more single-bounds."""
if self.gt is not None:
yield Gt(self.gt)
if self.ge is not None:
yield Ge(self.ge)
if self.lt is not None:
yield Lt(self.lt)
if self.le is not None:
yield Le(self.le)
@dataclass(frozen=True, **SLOTS)
class MultipleOf(BaseMetadata):
"""MultipleOf(multiple_of=x) might be interpreted in two ways:
1. Python semantics, implying ``value % multiple_of == 0``, or
2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
We encourage users to be aware of these two common interpretations,
and libraries to carefully document which they implement.
"""
multiple_of: Union[SupportsDiv, SupportsMod]
@dataclass(frozen=True, **SLOTS)
class MinLen(BaseMetadata):
"""
MinLen() implies minimum inclusive length,
e.g. ``len(value) >= min_length``.
"""
min_length: Annotated[int, Ge(0)]
@dataclass(frozen=True, **SLOTS)
class MaxLen(BaseMetadata):
"""
MaxLen() implies maximum inclusive length,
e.g. ``len(value) <= max_length``.
"""
max_length: Annotated[int, Ge(0)]
@dataclass(frozen=True, **SLOTS)
class Len(GroupedMetadata):
"""
Len() implies that ``min_length <= len(value) <= max_length``.
Upper bound may be omitted or ``None`` to indicate no upper length bound.
"""
min_length: Annotated[int, Ge(0)] = 0
max_length: Optional[Annotated[int, Ge(0)]] = None
def __iter__(self) -> Iterator[BaseMetadata]:
"""Unpack a Len into zone or more single-bounds."""
if self.min_length > 0:
yield MinLen(self.min_length)
if self.max_length is not None:
yield MaxLen(self.max_length)
@dataclass(frozen=True, **SLOTS)
class Timezone(BaseMetadata):
"""Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
tz-aware but any timezone is allowed.
You may also pass a specific timezone string or timezone object such as
``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
you only allow a specific timezone, though we note that this is often
a symptom of poor design.
"""
tz: Union[str, timezone, EllipsisType, None]
@dataclass(frozen=True, **SLOTS)
class Predicate(BaseMetadata):
"""``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
Users should prefer statically inspectable metadata, but if you need the full
power and flexibility of arbitrary runtime predicates... here it is.
We provide a few predefined predicates for common string constraints:
``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
``IsDigit = Predicate(str.isdigit)``. Users are encouraged to use methods which
can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
Some libraries might have special logic to handle certain predicates, e.g. by
checking for `str.isdigit` and using its presence to both call custom logic to
enforce digit-only strings, and customise some generated external schema.
We do not specify what behaviour should be expected for predicates that raise
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
skip invalid constraints, or statically raise an error; or it might try calling it
and then propogate or discard the resulting exception.
"""
func: Callable[[Any], bool]
@dataclass
class Not:
func: Callable[[Any], bool]
def __call__(self, __v: Any) -> bool:
return not self.func(__v)
_StrType = TypeVar("_StrType", bound=str)
LowerCase = Annotated[_StrType, Predicate(str.islower)]
"""
Return True if the string is a lowercase string, False otherwise.
A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
""" # noqa: E501
UpperCase = Annotated[_StrType, Predicate(str.isupper)]
"""
Return True if the string is an uppercase string, False otherwise.
A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
""" # noqa: E501
IsDigits = Annotated[_StrType, Predicate(str.isdigit)]
"""
Return True if the string is a digit string, False otherwise.
A string is a digit string if all characters in the string are digits and there is at least one character in the string.
""" # noqa: E501
IsAscii = Annotated[_StrType, Predicate(str.isascii)]
"""
Return True if all characters in the string are ASCII, False otherwise.
ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
"""
_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
"""Return True if x is neither an infinity nor a NaN, and False otherwise."""
IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
"""Return True if x is one of infinity or NaN, and False otherwise"""
IsNan = Annotated[_NumericType, Predicate(math.isnan)]
"""Return True if x is a NaN (not a number), and False otherwise."""
IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
"""Return True if x is anything but NaN (not a number), and False otherwise."""
IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
"""Return True if x is a positive or negative infinity, and False otherwise."""
IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
"""Return True if x is neither a positive or negative infinity, and False otherwise."""
try:
from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
except ImportError:
@dataclass(frozen=True, **SLOTS)
class DocInfo: # type: ignore [no-redef]
""" "
The return value of doc(), mainly to be used by tools that want to extract the
Annotated documentation at runtime.
"""
documentation: str
"""The documentation string passed to doc()."""
def doc(
documentation: str,
) -> DocInfo:
"""
Add documentation to a type annotation inside of Annotated.
For example:
>>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
"""
return DocInfo(documentation)

View file

@ -0,0 +1,147 @@
import math
import sys
from datetime import date, datetime, timedelta, timezone
from decimal import Decimal
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple
if sys.version_info < (3, 9):
from typing_extensions import Annotated
else:
from typing import Annotated
import annotated_types as at
class Case(NamedTuple):
"""
A test case for `annotated_types`.
"""
annotation: Any
valid_cases: Iterable[Any]
invalid_cases: Iterable[Any]
def cases() -> Iterable[Case]:
# Gt, Ge, Lt, Le
yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1))
yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1))
yield Case(
Annotated[datetime, at.Gt(datetime(2000, 1, 1))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
)
yield Case(
Annotated[datetime, at.Gt(date(2000, 1, 1))],
[date(2000, 1, 2), date(2000, 1, 3)],
[date(2000, 1, 1), date(1999, 12, 31)],
)
yield Case(
Annotated[datetime, at.Gt(Decimal('1.123'))],
[Decimal('1.1231'), Decimal('123')],
[Decimal('1.123'), Decimal('0')],
)
yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1))
yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1))
yield Case(
Annotated[datetime, at.Ge(datetime(2000, 1, 1))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(1998, 1, 1), datetime(1999, 12, 31)],
)
yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4))
yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9))
yield Case(
Annotated[datetime, at.Lt(datetime(2000, 1, 1))],
[datetime(1999, 12, 31), datetime(1999, 12, 31)],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
)
yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000))
yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9))
yield Case(
Annotated[datetime, at.Le(datetime(2000, 1, 1))],
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
)
# Interval
yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1))
yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1))
yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1))
yield Case(
Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 4)],
)
yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4))
yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1))
# lengths
yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10))
yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10))
yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10))
yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234'))
yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}])
yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4}))
yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4)))
# Timezone
yield Case(
Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)]
)
yield Case(
Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)]
)
yield Case(
Annotated[datetime, at.Timezone(timezone.utc)],
[datetime(2000, 1, 1, tzinfo=timezone.utc)],
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
)
yield Case(
Annotated[datetime, at.Timezone('Europe/London')],
[datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))],
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
)
# predicate types
yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom'])
yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC'])
yield Case(at.IsDigits[str], ['123'], ['', 'ab', 'a1b2'])
yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀'])
yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5])
yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf])
yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23])
yield Case(at.IsNan[float], [math.nan], [1.23, math.inf])
yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan])
yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23])
yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf])
# check stacked predicates
yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan])
# doc
yield Case(Annotated[int, at.doc("A number")], [1, 2], [])
# custom GroupedMetadata
class MyCustomGroupedMetadata(at.GroupedMetadata):
def __iter__(self) -> Iterator[at.Predicate]:
yield at.Predicate(lambda x: float(x).is_integer())
yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5])

View file

@ -1 +1 @@
__version__ = "1.2.3" __version__ = "1.3.0"

View file

@ -168,9 +168,9 @@ class Arrow:
isinstance(tzinfo, dt_tzinfo) isinstance(tzinfo, dt_tzinfo)
and hasattr(tzinfo, "localize") and hasattr(tzinfo, "localize")
and hasattr(tzinfo, "zone") and hasattr(tzinfo, "zone")
and tzinfo.zone # type: ignore[attr-defined] and tzinfo.zone
): ):
tzinfo = parser.TzinfoParser.parse(tzinfo.zone) # type: ignore[attr-defined] tzinfo = parser.TzinfoParser.parse(tzinfo.zone)
elif isinstance(tzinfo, str): elif isinstance(tzinfo, str):
tzinfo = parser.TzinfoParser.parse(tzinfo) tzinfo = parser.TzinfoParser.parse(tzinfo)
@ -495,7 +495,7 @@ class Arrow:
yield current yield current
values = [getattr(current, f) for f in cls._ATTRS] values = [getattr(current, f) for f in cls._ATTRS]
current = cls(*values, tzinfo=tzinfo).shift( # type: ignore current = cls(*values, tzinfo=tzinfo).shift( # type: ignore[misc]
**{frame_relative: relative_steps} **{frame_relative: relative_steps}
) )
@ -578,7 +578,7 @@ class Arrow:
for _ in range(3 - len(values)): for _ in range(3 - len(values)):
values.append(1) values.append(1)
floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore[misc]
if frame_absolute == "week": if frame_absolute == "week":
# if week_start is greater than self.isoweekday() go back one week by setting delta = 7 # if week_start is greater than self.isoweekday() go back one week by setting delta = 7
@ -792,7 +792,6 @@ class Arrow:
return self._datetime.isoformat() return self._datetime.isoformat()
def __format__(self, formatstr: str) -> str: def __format__(self, formatstr: str) -> str:
if len(formatstr) > 0: if len(formatstr) > 0:
return self.format(formatstr) return self.format(formatstr)
@ -804,7 +803,6 @@ class Arrow:
# attributes and properties # attributes and properties
def __getattr__(self, name: str) -> int: def __getattr__(self, name: str) -> int:
if name == "week": if name == "week":
return self.isocalendar()[1] return self.isocalendar()[1]
@ -965,7 +963,6 @@ class Arrow:
absolute_kwargs = {} absolute_kwargs = {}
for key, value in kwargs.items(): for key, value in kwargs.items():
if key in self._ATTRS: if key in self._ATTRS:
absolute_kwargs[key] = value absolute_kwargs[key] = value
elif key in ["week", "quarter"]: elif key in ["week", "quarter"]:
@ -1022,7 +1019,6 @@ class Arrow:
additional_attrs = ["weeks", "quarters", "weekday"] additional_attrs = ["weeks", "quarters", "weekday"]
for key, value in kwargs.items(): for key, value in kwargs.items():
if key in self._ATTRS_PLURAL or key in additional_attrs: if key in self._ATTRS_PLURAL or key in additional_attrs:
relative_kwargs[key] = value relative_kwargs[key] = value
else: else:
@ -1259,11 +1255,10 @@ class Arrow:
) )
if trunc(abs(delta)) != 1: if trunc(abs(delta)) != 1:
granularity += "s" # type: ignore granularity += "s" # type: ignore[assignment]
return locale.describe(granularity, delta, only_distance=only_distance) return locale.describe(granularity, delta, only_distance=only_distance)
else: else:
if not granularity: if not granularity:
raise ValueError( raise ValueError(
"Empty granularity list provided. " "Empty granularity list provided. "
@ -1314,7 +1309,7 @@ class Arrow:
def dehumanize(self, input_string: str, locale: str = "en_us") -> "Arrow": def dehumanize(self, input_string: str, locale: str = "en_us") -> "Arrow":
"""Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, that represents """Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, that represents
the time difference relative to the attrbiutes of the the time difference relative to the attributes of the
:class:`Arrow <arrow.arrow.Arrow>` object. :class:`Arrow <arrow.arrow.Arrow>` object.
:param timestring: a ``str`` representing a humanized relative time. :param timestring: a ``str`` representing a humanized relative time.
@ -1367,7 +1362,6 @@ class Arrow:
# Search input string for each time unit within locale # Search input string for each time unit within locale
for unit, unit_object in locale_obj.timeframes.items(): for unit, unit_object in locale_obj.timeframes.items():
# Need to check the type of unit_object to create the correct dictionary # Need to check the type of unit_object to create the correct dictionary
if isinstance(unit_object, Mapping): if isinstance(unit_object, Mapping):
strings_to_search = unit_object strings_to_search = unit_object
@ -1378,7 +1372,6 @@ class Arrow:
# Needs to cycle all through strings as some locales have strings that # Needs to cycle all through strings as some locales have strings that
# could overlap in a regex match, since input validation isn't being performed. # could overlap in a regex match, since input validation isn't being performed.
for time_delta, time_string in strings_to_search.items(): for time_delta, time_string in strings_to_search.items():
# Replace {0} with regex \d representing digits # Replace {0} with regex \d representing digits
search_string = str(time_string) search_string = str(time_string)
search_string = search_string.format(r"\d+") search_string = search_string.format(r"\d+")
@ -1419,7 +1412,7 @@ class Arrow:
# Assert error if string does not modify any units # Assert error if string does not modify any units
if not any([True for k, v in unit_visited.items() if v]): if not any([True for k, v in unit_visited.items() if v]):
raise ValueError( raise ValueError(
"Input string not valid. Note: Some locales do not support the week granulairty in Arrow. " "Input string not valid. Note: Some locales do not support the week granularity in Arrow. "
"If you are attempting to use the week granularity on an unsupported locale, this could be the cause of this error." "If you are attempting to use the week granularity on an unsupported locale, this could be the cause of this error."
) )
@ -1718,7 +1711,6 @@ class Arrow:
# math # math
def __add__(self, other: Any) -> "Arrow": def __add__(self, other: Any) -> "Arrow":
if isinstance(other, (timedelta, relativedelta)): if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime + other, self._datetime.tzinfo) return self.fromdatetime(self._datetime + other, self._datetime.tzinfo)
@ -1736,7 +1728,6 @@ class Arrow:
pass # pragma: no cover pass # pragma: no cover
def __sub__(self, other: Any) -> Union[timedelta, "Arrow"]: def __sub__(self, other: Any) -> Union[timedelta, "Arrow"]:
if isinstance(other, (timedelta, relativedelta)): if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime - other, self._datetime.tzinfo) return self.fromdatetime(self._datetime - other, self._datetime.tzinfo)
@ -1749,7 +1740,6 @@ class Arrow:
return NotImplemented return NotImplemented
def __rsub__(self, other: Any) -> timedelta: def __rsub__(self, other: Any) -> timedelta:
if isinstance(other, dt_datetime): if isinstance(other, dt_datetime):
return other - self._datetime return other - self._datetime
@ -1758,42 +1748,36 @@ class Arrow:
# comparisons # comparisons
def __eq__(self, other: Any) -> bool: def __eq__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return False return False
return self._datetime == self._get_datetime(other) return self._datetime == self._get_datetime(other)
def __ne__(self, other: Any) -> bool: def __ne__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return True return True
return not self.__eq__(other) return not self.__eq__(other)
def __gt__(self, other: Any) -> bool: def __gt__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented return NotImplemented
return self._datetime > self._get_datetime(other) return self._datetime > self._get_datetime(other)
def __ge__(self, other: Any) -> bool: def __ge__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented return NotImplemented
return self._datetime >= self._get_datetime(other) return self._datetime >= self._get_datetime(other)
def __lt__(self, other: Any) -> bool: def __lt__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented return NotImplemented
return self._datetime < self._get_datetime(other) return self._datetime < self._get_datetime(other)
def __le__(self, other: Any) -> bool: def __le__(self, other: Any) -> bool:
if not isinstance(other, (Arrow, dt_datetime)): if not isinstance(other, (Arrow, dt_datetime)):
return NotImplemented return NotImplemented
@ -1865,7 +1849,6 @@ class Arrow:
def _get_iteration_params(cls, end: Any, limit: Optional[int]) -> Tuple[Any, int]: def _get_iteration_params(cls, end: Any, limit: Optional[int]) -> Tuple[Any, int]:
"""Sets default end and limit values for range method.""" """Sets default end and limit values for range method."""
if end is None: if end is None:
if limit is None: if limit is None:
raise ValueError("One of 'end' or 'limit' is required.") raise ValueError("One of 'end' or 'limit' is required.")

View file

@ -267,11 +267,9 @@ class ArrowFactory:
raise TypeError(f"Cannot parse single argument of type {type(arg)!r}.") raise TypeError(f"Cannot parse single argument of type {type(arg)!r}.")
elif arg_count == 2: elif arg_count == 2:
arg_1, arg_2 = args[0], args[1] arg_1, arg_2 = args[0], args[1]
if isinstance(arg_1, datetime): if isinstance(arg_1, datetime):
# (datetime, tzinfo/str) -> fromdatetime @ tzinfo # (datetime, tzinfo/str) -> fromdatetime @ tzinfo
if isinstance(arg_2, (dt_tzinfo, str)): if isinstance(arg_2, (dt_tzinfo, str)):
return self.type.fromdatetime(arg_1, tzinfo=arg_2) return self.type.fromdatetime(arg_1, tzinfo=arg_2)
@ -281,7 +279,6 @@ class ArrowFactory:
) )
elif isinstance(arg_1, date): elif isinstance(arg_1, date):
# (date, tzinfo/str) -> fromdate @ tzinfo # (date, tzinfo/str) -> fromdate @ tzinfo
if isinstance(arg_2, (dt_tzinfo, str)): if isinstance(arg_2, (dt_tzinfo, str)):
return self.type.fromdate(arg_1, tzinfo=arg_2) return self.type.fromdate(arg_1, tzinfo=arg_2)

View file

@ -29,7 +29,6 @@ FORMAT_W3C: Final[str] = "YYYY-MM-DD HH:mm:ssZZ"
class DateTimeFormatter: class DateTimeFormatter:
# This pattern matches characters enclosed in square brackets are matched as # This pattern matches characters enclosed in square brackets are matched as
# an atomic group. For more info on atomic groups and how to they are # an atomic group. For more info on atomic groups and how to they are
# emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578 # emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578
@ -41,18 +40,15 @@ class DateTimeFormatter:
locale: locales.Locale locale: locales.Locale
def __init__(self, locale: str = DEFAULT_LOCALE) -> None: def __init__(self, locale: str = DEFAULT_LOCALE) -> None:
self.locale = locales.get_locale(locale) self.locale = locales.get_locale(locale)
def format(cls, dt: datetime, fmt: str) -> str: def format(cls, dt: datetime, fmt: str) -> str:
# FIXME: _format_token() is nullable # FIXME: _format_token() is nullable
return cls._FORMAT_RE.sub( return cls._FORMAT_RE.sub(
lambda m: cast(str, cls._format_token(dt, m.group(0))), fmt lambda m: cast(str, cls._format_token(dt, m.group(0))), fmt
) )
def _format_token(self, dt: datetime, token: Optional[str]) -> Optional[str]: def _format_token(self, dt: datetime, token: Optional[str]) -> Optional[str]:
if token and token.startswith("[") and token.endswith("]"): if token and token.startswith("[") and token.endswith("]"):
return token[1:-1] return token[1:-1]

View file

@ -129,7 +129,6 @@ class Locale:
_locale_map[locale_name.lower().replace("_", "-")] = cls _locale_map[locale_name.lower().replace("_", "-")] = cls
def __init__(self) -> None: def __init__(self) -> None:
self._month_name_to_ordinal = None self._month_name_to_ordinal = None
def describe( def describe(
@ -174,7 +173,7 @@ class Locale:
# Needed to determine the correct relative string to use # Needed to determine the correct relative string to use
timeframe_value = 0 timeframe_value = 0
for _unit_name, unit_value in timeframes: for _, unit_value in timeframes:
if trunc(unit_value) != 0: if trunc(unit_value) != 0:
timeframe_value = trunc(unit_value) timeframe_value = trunc(unit_value)
break break
@ -285,7 +284,6 @@ class Locale:
timeframe: TimeFrameLiteral, timeframe: TimeFrameLiteral,
delta: Union[float, int], delta: Union[float, int],
) -> str: ) -> str:
if timeframe == "now": if timeframe == "now":
return humanized return humanized
@ -425,7 +423,7 @@ class ItalianLocale(Locale):
"hours": "{0} ore", "hours": "{0} ore",
"day": "un giorno", "day": "un giorno",
"days": "{0} giorni", "days": "{0} giorni",
"week": "una settimana,", "week": "una settimana",
"weeks": "{0} settimane", "weeks": "{0} settimane",
"month": "un mese", "month": "un mese",
"months": "{0} mesi", "months": "{0} mesi",
@ -867,14 +865,16 @@ class FinnishLocale(Locale):
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "juuri nyt", "now": "juuri nyt",
"second": "sekunti", "second": {"past": "sekunti", "future": "sekunnin"},
"seconds": {"past": "{0} muutama sekunti", "future": "{0} muutaman sekunnin"}, "seconds": {"past": "{0} sekuntia", "future": "{0} sekunnin"},
"minute": {"past": "minuutti", "future": "minuutin"}, "minute": {"past": "minuutti", "future": "minuutin"},
"minutes": {"past": "{0} minuuttia", "future": "{0} minuutin"}, "minutes": {"past": "{0} minuuttia", "future": "{0} minuutin"},
"hour": {"past": "tunti", "future": "tunnin"}, "hour": {"past": "tunti", "future": "tunnin"},
"hours": {"past": "{0} tuntia", "future": "{0} tunnin"}, "hours": {"past": "{0} tuntia", "future": "{0} tunnin"},
"day": "päivä", "day": {"past": "päivä", "future": "päivän"},
"days": {"past": "{0} päivää", "future": "{0} päivän"}, "days": {"past": "{0} päivää", "future": "{0} päivän"},
"week": {"past": "viikko", "future": "viikon"},
"weeks": {"past": "{0} viikkoa", "future": "{0} viikon"},
"month": {"past": "kuukausi", "future": "kuukauden"}, "month": {"past": "kuukausi", "future": "kuukauden"},
"months": {"past": "{0} kuukautta", "future": "{0} kuukauden"}, "months": {"past": "{0} kuukautta", "future": "{0} kuukauden"},
"year": {"past": "vuosi", "future": "vuoden"}, "year": {"past": "vuosi", "future": "vuoden"},
@ -1887,7 +1887,7 @@ class GermanBaseLocale(Locale):
future = "in {0}" future = "in {0}"
and_word = "und" and_word = "und"
timeframes = { timeframes: ClassVar[Dict[TimeFrameLiteral, str]] = {
"now": "gerade eben", "now": "gerade eben",
"second": "einer Sekunde", "second": "einer Sekunde",
"seconds": "{0} Sekunden", "seconds": "{0} Sekunden",
@ -1982,7 +1982,9 @@ class GermanBaseLocale(Locale):
return super().describe(timeframe, delta, only_distance) return super().describe(timeframe, delta, only_distance)
# German uses a different case without 'in' or 'ago' # German uses a different case without 'in' or 'ago'
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) humanized: str = self.timeframes_only_distance[timeframe].format(
trunc(abs(delta))
)
return humanized return humanized
@ -2547,6 +2549,8 @@ class ArabicLocale(Locale):
"hours": {"2": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"}, "hours": {"2": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"},
"day": "يوم", "day": "يوم",
"days": {"2": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"}, "days": {"2": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"},
"week": "اسبوع",
"weeks": {"2": "اسبوعين", "ten": "{0} أسابيع", "higher": "{0} اسبوع"},
"month": "شهر", "month": "شهر",
"months": {"2": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"}, "months": {"2": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"},
"year": "سنة", "year": "سنة",
@ -3709,6 +3713,8 @@ class HungarianLocale(Locale):
"hours": {"past": "{0} órával", "future": "{0} óra"}, "hours": {"past": "{0} órával", "future": "{0} óra"},
"day": {"past": "egy nappal", "future": "egy nap"}, "day": {"past": "egy nappal", "future": "egy nap"},
"days": {"past": "{0} nappal", "future": "{0} nap"}, "days": {"past": "{0} nappal", "future": "{0} nap"},
"week": {"past": "egy héttel", "future": "egy hét"},
"weeks": {"past": "{0} héttel", "future": "{0} hét"},
"month": {"past": "egy hónappal", "future": "egy hónap"}, "month": {"past": "egy hónappal", "future": "egy hónap"},
"months": {"past": "{0} hónappal", "future": "{0} hónap"}, "months": {"past": "{0} hónappal", "future": "{0} hónap"},
"year": {"past": "egy évvel", "future": "egy év"}, "year": {"past": "egy évvel", "future": "egy év"},
@ -3934,7 +3940,6 @@ class ThaiLocale(Locale):
class LaotianLocale(Locale): class LaotianLocale(Locale):
names = ["lo", "lo-la"] names = ["lo", "lo-la"]
past = "{0} ກ່ອນຫນ້ານີ້" past = "{0} ກ່ອນຫນ້ານີ້"
@ -4119,6 +4124,7 @@ class BengaliLocale(Locale):
return f"{n}র্থ" return f"{n}র্থ"
if n == 6: if n == 6:
return f"{n}ষ্ঠ" return f"{n}ষ্ঠ"
return ""
class RomanshLocale(Locale): class RomanshLocale(Locale):
@ -4137,6 +4143,8 @@ class RomanshLocale(Locale):
"hours": "{0} ura", "hours": "{0} ura",
"day": "in di", "day": "in di",
"days": "{0} dis", "days": "{0} dis",
"week": "in'emna",
"weeks": "{0} emnas",
"month": "in mais", "month": "in mais",
"months": "{0} mais", "months": "{0} mais",
"year": "in onn", "year": "in onn",
@ -5399,7 +5407,7 @@ class LuxembourgishLocale(Locale):
future = "an {0}" future = "an {0}"
and_word = "an" and_word = "an"
timeframes = { timeframes: ClassVar[Dict[TimeFrameLiteral, str]] = {
"now": "just elo", "now": "just elo",
"second": "enger Sekonn", "second": "enger Sekonn",
"seconds": "{0} Sekonnen", "seconds": "{0} Sekonnen",
@ -5487,7 +5495,9 @@ class LuxembourgishLocale(Locale):
return super().describe(timeframe, delta, only_distance) return super().describe(timeframe, delta, only_distance)
# Luxembourgish uses a different case without 'in' or 'ago' # Luxembourgish uses a different case without 'in' or 'ago'
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) humanized: str = self.timeframes_only_distance[timeframe].format(
trunc(abs(delta))
)
return humanized return humanized

View file

@ -159,7 +159,6 @@ class DateTimeParser:
_input_re_map: Dict[_FORMAT_TYPE, Pattern[str]] _input_re_map: Dict[_FORMAT_TYPE, Pattern[str]]
def __init__(self, locale: str = DEFAULT_LOCALE, cache_size: int = 0) -> None: def __init__(self, locale: str = DEFAULT_LOCALE, cache_size: int = 0) -> None:
self.locale = locales.get_locale(locale) self.locale = locales.get_locale(locale)
self._input_re_map = self._BASE_INPUT_RE_MAP.copy() self._input_re_map = self._BASE_INPUT_RE_MAP.copy()
self._input_re_map.update( self._input_re_map.update(
@ -196,7 +195,6 @@ class DateTimeParser:
def parse_iso( def parse_iso(
self, datetime_string: str, normalize_whitespace: bool = False self, datetime_string: str, normalize_whitespace: bool = False
) -> datetime: ) -> datetime:
if normalize_whitespace: if normalize_whitespace:
datetime_string = re.sub(r"\s+", " ", datetime_string.strip()) datetime_string = re.sub(r"\s+", " ", datetime_string.strip())
@ -236,13 +234,14 @@ class DateTimeParser:
] ]
if has_time: if has_time:
if has_space_divider: if has_space_divider:
date_string, time_string = datetime_string.split(" ", 1) date_string, time_string = datetime_string.split(" ", 1)
else: else:
date_string, time_string = datetime_string.split("T", 1) date_string, time_string = datetime_string.split("T", 1)
time_parts = re.split(r"[\+\-Z]", time_string, 1, re.IGNORECASE) time_parts = re.split(
r"[\+\-Z]", time_string, maxsplit=1, flags=re.IGNORECASE
)
time_components: Optional[Match[str]] = self._TIME_RE.match(time_parts[0]) time_components: Optional[Match[str]] = self._TIME_RE.match(time_parts[0])
@ -303,7 +302,6 @@ class DateTimeParser:
fmt: Union[List[str], str], fmt: Union[List[str], str],
normalize_whitespace: bool = False, normalize_whitespace: bool = False,
) -> datetime: ) -> datetime:
if normalize_whitespace: if normalize_whitespace:
datetime_string = re.sub(r"\s+", " ", datetime_string) datetime_string = re.sub(r"\s+", " ", datetime_string)
@ -341,12 +339,11 @@ class DateTimeParser:
f"Unable to find a match group for the specified token {token!r}." f"Unable to find a match group for the specified token {token!r}."
) )
self._parse_token(token, value, parts) # type: ignore self._parse_token(token, value, parts) # type: ignore[arg-type]
return self._build_datetime(parts) return self._build_datetime(parts)
def _generate_pattern_re(self, fmt: str) -> Tuple[List[_FORMAT_TYPE], Pattern[str]]: def _generate_pattern_re(self, fmt: str) -> Tuple[List[_FORMAT_TYPE], Pattern[str]]:
# fmt is a string of tokens like 'YYYY-MM-DD' # fmt is a string of tokens like 'YYYY-MM-DD'
# we construct a new string by replacing each # we construct a new string by replacing each
# token by its pattern: # token by its pattern:
@ -498,7 +495,6 @@ class DateTimeParser:
value: Any, value: Any,
parts: _Parts, parts: _Parts,
) -> None: ) -> None:
if token == "YYYY": if token == "YYYY":
parts["year"] = int(value) parts["year"] = int(value)
@ -508,7 +504,7 @@ class DateTimeParser:
elif token in ["MMMM", "MMM"]: elif token in ["MMMM", "MMM"]:
# FIXME: month_number() is nullable # FIXME: month_number() is nullable
parts["month"] = self.locale.month_number(value.lower()) # type: ignore parts["month"] = self.locale.month_number(value.lower()) # type: ignore[typeddict-item]
elif token in ["MM", "M"]: elif token in ["MM", "M"]:
parts["month"] = int(value) parts["month"] = int(value)
@ -588,7 +584,6 @@ class DateTimeParser:
weekdate = parts.get("weekdate") weekdate = parts.get("weekdate")
if weekdate is not None: if weekdate is not None:
year, week = int(weekdate[0]), int(weekdate[1]) year, week = int(weekdate[0]), int(weekdate[1])
if weekdate[2] is not None: if weekdate[2] is not None:
@ -712,7 +707,6 @@ class DateTimeParser:
) )
def _parse_multiformat(self, string: str, formats: Iterable[str]) -> datetime: def _parse_multiformat(self, string: str, formats: Iterable[str]) -> datetime:
_datetime: Optional[datetime] = None _datetime: Optional[datetime] = None
for fmt in formats: for fmt in formats:
@ -740,12 +734,11 @@ class DateTimeParser:
class TzinfoParser: class TzinfoParser:
_TZINFO_RE: ClassVar[Pattern[str]] = re.compile( _TZINFO_RE: ClassVar[Pattern[str]] = re.compile(
r"^([\+\-])?(\d{2})(?:\:?(\d{2}))?$" r"^(?:\(UTC)*([\+\-])?(\d{2})(?:\:?(\d{2}))?"
) )
@classmethod @classmethod
def parse(cls, tzinfo_string: str) -> dt_tzinfo: def parse(cls, tzinfo_string: str) -> dt_tzinfo:
tzinfo: Optional[dt_tzinfo] = None tzinfo: Optional[dt_tzinfo] = None
if tzinfo_string == "local": if tzinfo_string == "local":
@ -755,7 +748,6 @@ class TzinfoParser:
tzinfo = tz.tzutc() tzinfo = tz.tzutc()
else: else:
iso_match = cls._TZINFO_RE.match(tzinfo_string) iso_match = cls._TZINFO_RE.match(tzinfo_string)
if iso_match: if iso_match:

View file

@ -20,7 +20,7 @@ from functools import wraps
from inspect import signature from inspect import signature
def _launch_forever_coro(coro, args, kwargs, loop): async def _run_forever_coro(coro, args, kwargs, loop):
''' '''
This helper function launches an async main function that was tagged with This helper function launches an async main function that was tagged with
forever=True. There are two possibilities: forever=True. There are two possibilities:
@ -48,7 +48,7 @@ def _launch_forever_coro(coro, args, kwargs, loop):
# forever=True feature from autoasync at some point in the future. # forever=True feature from autoasync at some point in the future.
thing = coro(*args, **kwargs) thing = coro(*args, **kwargs)
if iscoroutine(thing): if iscoroutine(thing):
loop.create_task(thing) await thing
def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False): def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False):
@ -127,7 +127,9 @@ def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False):
args, kwargs = bound_args.args, bound_args.kwargs args, kwargs = bound_args.args, bound_args.kwargs
if forever: if forever:
_launch_forever_coro(coro, args, kwargs, local_loop) local_loop.create_task(_run_forever_coro(
coro, args, kwargs, local_loop
))
local_loop.run_forever() local_loop.run_forever()
else: else:
return local_loop.run_until_complete(coro(*args, **kwargs)) return local_loop.run_until_complete(coro(*args, **kwargs))

View file

@ -26,6 +26,12 @@ def update_wrapper(
class _HashedSeq(list): class _HashedSeq(list):
"""This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hashvalue' __slots__ = 'hashvalue'
def __init__(self, tup, hash=hash): def __init__(self, tup, hash=hash):
@ -41,45 +47,57 @@ def _make_key(
kwds, kwds,
typed, typed,
kwd_mark=(object(),), kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)]), fasttypes={int, str},
sorted=sorted,
tuple=tuple, tuple=tuple,
type=type, type=type,
len=len, len=len,
): ):
'Make a cache key from optionally typed positional and keyword arguments' """Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
# All of code below relies on kwds preserving the order input by the user.
# Formerly, we sorted() the kwds before looping. The new way is *much*
# faster; however, it means that f(x=1, y=2) will now be treated as a
# distinct call from f(y=2, x=1) which will be cached separately.
key = args key = args
if kwds: if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark key += kwd_mark
for item in sorted_items: for item in kwds.items():
key += item key += item
if typed: if typed:
key += tuple(type(v) for v in args) key += tuple(type(v) for v in args)
if kwds: if kwds:
key += tuple(type(v) for k, v in sorted_items) key += tuple(type(v) for v in kwds.values())
elif len(key) == 1 and type(key[0]) in fasttypes: elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0] return key[0]
return _HashedSeq(key) return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False): # noqa: C901 def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator. """Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound. can grow without bound.
If *typed* is True, arguments of different types will be cached separately. If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as
distinct results. distinct calls with distinct results. Some types such as str and int may
be cached separately even when typed is false.
Arguments to the cached function must be hashable. Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with View the cache statistics named tuple (hits, misses, maxsize, currsize)
f.cache_info(). Clear the cache and statistics with f.cache_clear(). with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__. Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
""" """
@ -88,108 +106,138 @@ def lru_cache(maxsize=100, typed=False): # noqa: C901
# The internals of the lru_cache are encapsulated for thread safety and # The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version). # to allow the implementation to change (including a possible C version).
if isinstance(maxsize, int):
# Negative maxsize is treated as 0
if maxsize < 0:
maxsize = 0
elif callable(maxsize) and isinstance(typed, bool):
# The user_function was passed in directly via the maxsize argument
user_function, maxsize = maxsize, 128
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)
elif maxsize is not None:
raise TypeError('Expected first argument to be an integer, a callable, or None')
def decorating_function(user_function): def decorating_function(user_function):
cache = dict() wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
stats = [0, 0] # make statistics updateable non-locally wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
HITS, MISSES = 0, 1 # names for the stats fields return update_wrapper(wrapper, user_function)
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None return decorating_function
_len = len # localize the global len() function
def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
cache_len = cache.__len__ # get cache size without calling len()
lock = RLock() # because linkedlist updates aren't threadsafe lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0: if maxsize == 0:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call # No caching -- just a statistics update
nonlocal misses
misses += 1
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
stats[MISSES] += 1
return result return result
elif maxsize is None: elif maxsize is None:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# simple caching without ordering or size limit # Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed) key = make_key(args, kwds, typed)
result = cache_get( result = cache_get(key, sentinel)
key, root if result is not sentinel:
) # root used here as a unique not-found sentinel hits += 1
if result is not root:
stats[HITS] += 1
return result return result
misses += 1
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
cache[key] = result cache[key] = result
stats[MISSES] += 1
return result return result
else: else:
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency # Size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock: with lock:
link = cache_get(key) link = cache_get(key)
if link is not None: if link is not None:
# record recent use of the key by moving it # Move the link to the front of the circular queue
# to the front of the list link_prev, link_next, _key, result = link
(root,) = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next link_prev[NEXT] = link_next
link_next[PREV] = link_prev link_next[PREV] = link_prev
last = root[PREV] last = root[PREV]
last[NEXT] = root[PREV] = link last[NEXT] = root[PREV] = link
link[PREV] = last link[PREV] = last
link[NEXT] = root link[NEXT] = root
stats[HITS] += 1 hits += 1
return result return result
misses += 1
result = user_function(*args, **kwds) result = user_function(*args, **kwds)
with lock: with lock:
(root,) = nonlocal_root
if key in cache: if key in cache:
# getting here means that this same key was added to the # Getting here means that this same key was added to the
# cache while the lock was released. since the link # cache while the lock was released. Since the link
# update is already done, we need only return the # update is already done, we need only return the
# computed result and update the count of misses. # computed result and update the count of misses.
pass pass
elif _len(cache) >= maxsize: elif full:
# use the old root to store the new key and result # Use the old root to store the new key and result.
oldroot = root oldroot = root
oldroot[KEY] = key oldroot[KEY] = key
oldroot[RESULT] = result oldroot[RESULT] = result
# empty the oldest link and make it the new root # Empty the oldest link and make it the new root.
root = nonlocal_root[0] = oldroot[NEXT] # Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY] oldkey = root[KEY]
root[KEY] = root[RESULT] = None root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links # Now update the cache dictionary.
del cache[oldkey] del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot cache[key] = oldroot
else: else:
# put result in a new link at the front of the list # Put result in a new link at the front of the queue.
last = root[PREV] last = root[PREV]
link = [last, root, key, result] link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1 # Use the cache_len bound method instead of the len() function
# which could potentially be wrapped in an lru_cache itself.
full = cache_len() >= maxsize
return result return result
def cache_info(): def cache_info():
"""Report cache statistics""" """Report cache statistics"""
with lock: with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) return _CacheInfo(hits, misses, maxsize, cache_len())
def cache_clear(): def cache_clear():
"""Clear the cache and cache statistics""" """Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock: with lock:
cache.clear() cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None] root[:] = [root, root, None, None]
stats[:] = [0, 0] hits = misses = 0
full = False
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function) return wrapper
return decorating_function

View file

@ -11,9 +11,9 @@ from bleach.sanitizer import (
# yyyymmdd # yyyymmdd
__releasedate__ = "20230123" __releasedate__ = "20231006"
# x.y.z or x.y.z.dev0 -- semver # x.y.z or x.y.z.dev0 -- semver
__version__ = "6.0.0" __version__ = "6.1.0"
__all__ = ["clean", "linkify"] __all__ = ["clean", "linkify"]

View file

@ -395,10 +395,17 @@ class BleachHTMLTokenizer(HTMLTokenizer):
# followed by a series of characters. It's treated as a tag # followed by a series of characters. It's treated as a tag
# name that abruptly ends, but we should treat that like # name that abruptly ends, but we should treat that like
# character data # character data
yield { yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()}
"type": TAG_TOKEN_TYPE_CHARACTERS, elif last_error_token["data"] in (
"data": "<" + self.currentToken["name"], "eof-in-attribute-name",
} "eof-in-attribute-value-no-quotes",
):
# Handle the case where the text being parsed ends with <
# followed by a series of characters and then space and then
# more characters. It's treated as a tag name followed by an
# attribute that abruptly ends, but we should treat that like
# character data.
yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()}
else: else:
yield last_error_token yield last_error_token

View file

@ -45,8 +45,8 @@ def build_url_re(tlds=TLDS, protocols=html5lib_shim.allowed_protocols):
r"""\(* # Match any opening parentheses. r"""\(* # Match any opening parentheses.
\b(?<![@.])(?:(?:{0}):/{{0,3}}(?:(?:\w+:)?\w+@)?)? # http:// \b(?<![@.])(?:(?:{0}):/{{0,3}}(?:(?:\w+:)?\w+@)?)? # http://
([\w-]+\.)+(?:{1})(?:\:[0-9]+)?(?!\.\w)\b # xx.yy.tld(:##)? ([\w-]+\.)+(?:{1})(?:\:[0-9]+)?(?!\.\w)\b # xx.yy.tld(:##)?
(?:[/?][^\s\{{\}}\|\\\^\[\]`<>"]*)? (?:[/?][^\s\{{\}}\|\\\^`<>"]*)?
# /path/zz (excluding "unsafe" chars from RFC 1738, # /path/zz (excluding "unsafe" chars from RFC 3986,
# except for # and ~, which happen in practice) # except for # and ~, which happen in practice)
""".format( """.format(
"|".join(sorted(protocols)), "|".join(sorted(tlds)) "|".join(sorted(protocols)), "|".join(sorted(tlds))
@ -591,7 +591,7 @@ class LinkifyFilter(html5lib_shim.Filter):
in_a = False in_a = False
token_buffer = [] token_buffer = []
else: else:
token_buffer.append(token) token_buffer.extend(list(self.extract_entities(token)))
continue continue
if token["type"] in ["StartTag", "EmptyTag"]: if token["type"] in ["StartTag", "EmptyTag"]:

View file

@ -15,8 +15,8 @@ documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
""" """
__author__ = "Leonard Richardson (leonardr@segfault.org)" __author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.12.2" __version__ = "4.12.3"
__copyright__ = "Copyright (c) 2004-2023 Leonard Richardson" __copyright__ = "Copyright (c) 2004-2024 Leonard Richardson"
# Use of this source code is governed by the MIT license. # Use of this source code is governed by the MIT license.
__license__ = "MIT" __license__ = "MIT"

View file

@ -514,15 +514,19 @@ class DetectsXMLParsedAsHTML(object):
XML_PREFIX_B = b'<?xml' XML_PREFIX_B = b'<?xml'
@classmethod @classmethod
def warn_if_markup_looks_like_xml(cls, markup): def warn_if_markup_looks_like_xml(cls, markup, stacklevel=3):
"""Perform a check on some markup to see if it looks like XML """Perform a check on some markup to see if it looks like XML
that's not XHTML. If so, issue a warning. that's not XHTML. If so, issue a warning.
This is much less reliable than doing the check while parsing, This is much less reliable than doing the check while parsing,
but some of the tree builders can't do that. but some of the tree builders can't do that.
:param stacklevel: The stacklevel of the code calling this
function.
:return: True if the markup looks like non-XHTML XML, False :return: True if the markup looks like non-XHTML XML, False
otherwise. otherwise.
""" """
if isinstance(markup, bytes): if isinstance(markup, bytes):
prefix = cls.XML_PREFIX_B prefix = cls.XML_PREFIX_B
@ -535,15 +539,16 @@ class DetectsXMLParsedAsHTML(object):
and markup.startswith(prefix) and markup.startswith(prefix)
and not looks_like_html.search(markup[:500]) and not looks_like_html.search(markup[:500])
): ):
cls._warn() cls._warn(stacklevel=stacklevel+2)
return True return True
return False return False
@classmethod @classmethod
def _warn(cls): def _warn(cls, stacklevel=5):
"""Issue a warning about XML being parsed as HTML.""" """Issue a warning about XML being parsed as HTML."""
warnings.warn( warnings.warn(
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning,
stacklevel=stacklevel
) )
def _initialize_xml_detector(self): def _initialize_xml_detector(self):

View file

@ -77,7 +77,9 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
# html5lib only parses HTML, so if it's given XML that's worth # html5lib only parses HTML, so if it's given XML that's worth
# noting. # noting.
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup) DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
markup, stacklevel=3
)
yield (markup, None, None, False) yield (markup, None, None, False)

View file

@ -378,10 +378,10 @@ class HTMLParserTreeBuilder(HTMLTreeBuilder):
parser.soup = self.soup parser.soup = self.soup
try: try:
parser.feed(markup) parser.feed(markup)
parser.close()
except AssertionError as e: except AssertionError as e:
# html.parser raises AssertionError in rare cases to # html.parser raises AssertionError in rare cases to
# indicate a fatal problem with the markup, especially # indicate a fatal problem with the markup, especially
# when there's an error in the doctype declaration. # when there's an error in the doctype declaration.
raise ParserRejectedMarkup(e) raise ParserRejectedMarkup(e)
parser.close()
parser.already_closed_empty_element = [] parser.already_closed_empty_element = []

View file

@ -179,7 +179,9 @@ class LXMLTreeBuilderForXML(TreeBuilder):
self.processing_instruction_class = ProcessingInstruction self.processing_instruction_class = ProcessingInstruction
# We're in HTML mode, so if we're given XML, that's worth # We're in HTML mode, so if we're given XML, that's worth
# noting. # noting.
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup) DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
markup, stacklevel=3
)
else: else:
self.processing_instruction_class = XMLProcessingInstruction self.processing_instruction_class = XMLProcessingInstruction

View file

@ -1356,7 +1356,7 @@ class Tag(PageElement):
This is the first step in the deepcopy process. This is the first step in the deepcopy process.
""" """
clone = type(self)( clone = type(self)(
None, self.builder, self.name, self.namespace, None, None, self.name, self.namespace,
self.prefix, self.attrs, is_xml=self._is_xml, self.prefix, self.attrs, is_xml=self._is_xml,
sourceline=self.sourceline, sourcepos=self.sourcepos, sourceline=self.sourceline, sourcepos=self.sourcepos,
can_be_empty_element=self.can_be_empty_element, can_be_empty_element=self.can_be_empty_element,
@ -1845,6 +1845,11 @@ class Tag(PageElement):
return space_before + s + space_after return space_before + s + space_after
def _format_tag(self, eventual_encoding, formatter, opening): def _format_tag(self, eventual_encoding, formatter, opening):
if self.hidden:
# A hidden tag is invisible, although its contents
# are visible.
return ''
# A tag starts with the < character (see below). # A tag starts with the < character (see below).
# Then the / character, if this is a closing tag. # Then the / character, if this is a closing tag.

View file

@ -51,7 +51,7 @@ class Formatter(EntitySubstitution):
void_element_close_prefix='/', cdata_containing_tags=None, void_element_close_prefix='/', cdata_containing_tags=None,
empty_attributes_are_booleans=False, indent=1, empty_attributes_are_booleans=False, indent=1,
): ):
"""Constructor. r"""Constructor.
:param language: This should be Formatter.XML if you are formatting :param language: This should be Formatter.XML if you are formatting
XML markup and Formatter.HTML if you are formatting HTML markup. XML markup and Formatter.HTML if you are formatting HTML markup.
@ -76,7 +76,7 @@ class Formatter(EntitySubstitution):
negative, or "" will only insert newlines. Using a negative, or "" will only insert newlines. Using a
positive integer indent indents that many spaces per positive integer indent indents that many spaces per
level. If indent is a string (such as "\t"), that string level. If indent is a string (such as "\t"), that string
is used to indent each level. The default behavior to is used to indent each level. The default behavior is to
indent one space per level. indent one space per level.
""" """
self.language = language self.language = language

View file

@ -1105,7 +1105,7 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
doc = """<?xml version="1.0" encoding="utf-8"?> doc = """<?xml version="1.0" encoding="utf-8"?>
<Document xmlns="http://example.com/ns0" <Document xmlns="http://example.com/ns0"
xmlns:ns1="http://example.com/ns1" xmlns:ns1="http://example.com/ns1"
xmlns:ns2="http://example.com/ns2" xmlns:ns2="http://example.com/ns2">
<ns1:tag>foo</ns1:tag> <ns1:tag>foo</ns1:tag>
<ns1:tag>bar</ns1:tag> <ns1:tag>bar</ns1:tag>
<ns2:tag key="value">baz</ns2:tag> <ns2:tag key="value">baz</ns2:tag>

View file

@ -0,0 +1 @@
˙ ><applet></applet><applet></applet><apple|><applet><applet><appl„><applet><applet></applet></applet></applet></applet><applet></applet><apple>t<applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet>et><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><azplet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><plet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet></applet></applet></applet></applet></appt></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet><<meta charset=utf-8>

View file

@ -0,0 +1 @@
- ˙˙ <math><select><mi><select><select>t

View file

@ -14,19 +14,61 @@ from bs4 import (
BeautifulSoup, BeautifulSoup,
ParserRejectedMarkup, ParserRejectedMarkup,
) )
try:
from soupsieve.util import SelectorSyntaxError
import lxml
import html5lib
fully_fuzzable = True
except ImportError:
fully_fuzzable = False
@pytest.mark.skipif(not fully_fuzzable, reason="Prerequisites for fuzz tests are not installed.")
class TestFuzz(object): class TestFuzz(object):
# Test case markup files from fuzzers are given this extension so # Test case markup files from fuzzers are given this extension so
# they can be included in builds. # they can be included in builds.
TESTCASE_SUFFIX = ".testcase" TESTCASE_SUFFIX = ".testcase"
# Copied 20230512 from
# https://github.com/google/oss-fuzz/blob/4ac6a645a197a695fe76532251feb5067076b3f3/projects/bs4/bs4_fuzzer.py
#
# Copying the code lets us precisely duplicate the behavior of
# oss-fuzz. The downside is that this code changes over time, so
# multiple copies of the code must be kept around to run against
# older tests. I'm not sure what to do about this, but I may
# retire old tests after a time.
def fuzz_test_with_css(self, filename):
data = self.__markup(filename)
parsers = ['lxml-xml', 'html5lib', 'html.parser', 'lxml']
try:
idx = int(data[0]) % len(parsers)
except ValueError:
return
css_selector, data = data[1:10], data[10:]
try:
soup = BeautifulSoup(data[1:], features=parsers[idx])
except ParserRejectedMarkup:
return
except ValueError:
return
list(soup.find_all(True))
try:
soup.css.select(css_selector.decode('utf-8', 'replace'))
except SelectorSyntaxError:
return
soup.prettify()
# This class of error has been fixed by catching a less helpful # This class of error has been fixed by catching a less helpful
# exception from html.parser and raising ParserRejectedMarkup # exception from html.parser and raising ParserRejectedMarkup
# instead. # instead.
@pytest.mark.parametrize( @pytest.mark.parametrize(
"filename", [ "filename", [
"clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912", "clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912",
"crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a",
] ]
) )
def test_rejected_markup(self, filename): def test_rejected_markup(self, filename):
@ -38,6 +80,9 @@ class TestFuzz(object):
# which overflow the Python call stack when the tree is converted # which overflow the Python call stack when the tree is converted
# to a string. This is an issue with Beautiful Soup which was fixed # to a string. This is an issue with Beautiful Soup which was fixed
# as part of [bug=1471755]. # as part of [bug=1471755].
#
# These test cases are in the older format that doesn't specify
# which parser to use or give a CSS selector.
@pytest.mark.parametrize( @pytest.mark.parametrize(
"filename", [ "filename", [
"clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440", "clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440",
@ -46,18 +91,44 @@ class TestFuzz(object):
"clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400", "clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400",
] ]
) )
def test_deeply_nested_document(self, filename): def test_deeply_nested_document_without_css(self, filename):
# Parsing the document and encoding it back to a string is # Parsing the document and encoding it back to a string is
# sufficient to demonstrate that the overflow problem has # sufficient to demonstrate that the overflow problem has
# been fixed. # been fixed.
markup = self.__markup(filename) markup = self.__markup(filename)
BeautifulSoup(markup, 'html.parser').encode() BeautifulSoup(markup, 'html.parser').encode()
# This class of error has to do with very deeply nested documents
# which overflow the Python call stack when the tree is converted
# to a string. This is an issue with Beautiful Soup which was fixed
# as part of [bug=1471755].
@pytest.mark.parametrize(
"filename", [
"clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016",
"clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000",
"clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624",
]
)
def test_deeply_nested_document(self, filename):
self.fuzz_test_with_css(filename)
@pytest.mark.parametrize(
"filename", [
"clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256",
"clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824",
]
)
def test_soupsieve_errors(self, filename):
self.fuzz_test_with_css(filename)
# This class of error represents problems with html5lib's parser, # This class of error represents problems with html5lib's parser,
# not Beautiful Soup. I use # not Beautiful Soup. I use
# https://github.com/html5lib/html5lib-python/issues/568 to notify # https://github.com/html5lib/html5lib-python/issues/568 to notify
# the html5lib developers of these issues. # the html5lib developers of these issues.
@pytest.mark.skip("html5lib problems") #
# These test cases are in the older format that doesn't specify
# which parser to use or give a CSS selector.
@pytest.mark.skip(reason="html5lib-specific problems")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"filename", [ "filename", [
# b"""ÿ<!DOCTyPEV PUBLIC'''Ð'""" # b"""ÿ<!DOCTyPEV PUBLIC'''Ð'"""
@ -79,10 +150,24 @@ class TestFuzz(object):
"crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08" "crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08"
] ]
) )
def test_html5lib_parse_errors(self, filename): def test_html5lib_parse_errors_without_css(self, filename):
markup = self.__markup(filename) markup = self.__markup(filename)
print(BeautifulSoup(markup, 'html5lib').encode()) print(BeautifulSoup(markup, 'html5lib').encode())
# This class of error represents problems with html5lib's parser,
# not Beautiful Soup. I use
# https://github.com/html5lib/html5lib-python/issues/568 to notify
# the html5lib developers of these issues.
@pytest.mark.skip(reason="html5lib-specific problems")
@pytest.mark.parametrize(
"filename", [
# b'- \xff\xff <math>\x10<select><mi><select><select>t'
"clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640",
]
)
def test_html5lib_parse_errors(self, filename):
self.fuzz_test_with_css(filename)
def __markup(self, filename): def __markup(self, filename):
if not filename.endswith(self.TESTCASE_SUFFIX): if not filename.endswith(self.TESTCASE_SUFFIX):
filename += self.TESTCASE_SUFFIX filename += self.TESTCASE_SUFFIX

View file

@ -219,3 +219,16 @@ class TestMultiValuedAttributes(SoupTest):
) )
assert soup.a['class'] == 'foo' assert soup.a['class'] == 'foo'
assert soup.a['id'] == ['bar'] assert soup.a['id'] == ['bar']
def test_hidden_tag_is_invisible(self):
# Setting .hidden on a tag makes it invisible in output, but
# leaves its contents visible.
#
# This is not a documented or supported feature of Beautiful
# Soup (e.g. NavigableString doesn't support .hidden even
# though it could), but some people use it and it's not
# hurting anything to verify that it keeps working.
#
soup = self.soup('<div id="1"><span id="2">a string</span></div>')
soup.span.hidden = True
assert '<div id="1">a string</div>' == str(soup.div)

View file

@ -452,6 +452,6 @@ class WSGIErrorHandler(logging.Handler):
class LazyRfc3339UtcTime(object): class LazyRfc3339UtcTime(object):
def __str__(self): def __str__(self):
"""Return now() in RFC3339 UTC Format.""" """Return utcnow() in RFC3339 UTC Format."""
now = datetime.datetime.now() iso_formatted_now = datetime.datetime.utcnow().isoformat('T')
return now.isoformat('T') + 'Z' return f'{iso_formatted_now!s}Z'

View file

@ -622,13 +622,15 @@ def autovary(ignore=None, debug=False):
def convert_params(exception=ValueError, error=400): def convert_params(exception=ValueError, error=400):
"""Convert request params based on function annotations, with error handling. """Convert request params based on function annotations.
exception This function also processes errors that are subclasses of ``exception``.
Exception class to catch.
status :param BaseException exception: Exception class to catch.
The HTTP error code to return to the client on failure. :type exception: BaseException
:param error: The HTTP status code to return to the client on failure.
:type error: int
""" """
request = cherrypy.serving.request request = cherrypy.serving.request
types = request.handler.callable.__annotations__ types = request.handler.callable.__annotations__

View file

@ -47,7 +47,9 @@ try:
import pstats import pstats
def new_func_strip_path(func_name): def new_func_strip_path(func_name):
"""Make profiler output more readable by adding `__init__` modules' parents """Add ``__init__`` modules' parents.
This makes the profiler output more readable.
""" """
filename, line, name = func_name filename, line, name = func_name
if filename.endswith('__init__.py'): if filename.endswith('__init__.py'):

View file

@ -188,7 +188,7 @@ class Parser(configparser.ConfigParser):
def dict_from_file(self, file): def dict_from_file(self, file):
if hasattr(file, 'read'): if hasattr(file, 'read'):
self.readfp(file) self.read_file(file)
else: else:
self.read(file) self.read(file)
return self.as_dict() return self.as_dict()

View file

@ -1,19 +1,18 @@
"""Module with helpers for serving static files.""" """Module with helpers for serving static files."""
import mimetypes
import os import os
import platform import platform
import re import re
import stat import stat
import mimetypes
import urllib.parse
import unicodedata import unicodedata
import urllib.parse
from email.generator import _make_boundary as make_boundary from email.generator import _make_boundary as make_boundary
from io import UnsupportedOperation from io import UnsupportedOperation
import cherrypy import cherrypy
from cherrypy._cpcompat import ntob from cherrypy._cpcompat import ntob
from cherrypy.lib import cptools, httputil, file_generator_limited from cherrypy.lib import cptools, file_generator_limited, httputil
def _setup_mimetypes(): def _setup_mimetypes():
@ -185,7 +184,10 @@ def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
def _serve_fileobj(fileobj, content_type, content_length, debug=False): def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged.""" """Set ``response.body`` to the given file object, perhaps ranged.
Internal helper.
"""
response = cherrypy.serving.response response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code # HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code

View file

@ -494,7 +494,7 @@ class Bus(object):
"Cannot reconstruct command from '-c'. " "Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545') 'Ref: https://github.com/cherrypy/cherrypy/issues/1545')
except AttributeError: except AttributeError:
"""It looks Py_GetArgcArgv is completely absent in some environments """It looks Py_GetArgcArgv's completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine ``ctypes`` module is completely absent in Google AppEngine

View file

@ -136,6 +136,9 @@ class HTTPTests(helper.CPWebCase):
self.assertStatus(200) self.assertStatus(200)
self.assertBody(b'Hello world!') self.assertBody(b'Hello world!')
response.close()
c.close()
# Now send a message that has no Content-Length, but does send a body. # Now send a message that has no Content-Length, but does send a body.
# Verify that CP times out the socket and responds # Verify that CP times out the socket and responds
# with 411 Length Required. # with 411 Length Required.
@ -159,6 +162,9 @@ class HTTPTests(helper.CPWebCase):
self.status = str(response.status) self.status = str(response.status)
self.assertStatus(411) self.assertStatus(411)
response.close()
c.close()
def test_post_multipart(self): def test_post_multipart(self):
alphabet = 'abcdefghijklmnopqrstuvwxyz' alphabet = 'abcdefghijklmnopqrstuvwxyz'
# generate file contents for a large post # generate file contents for a large post
@ -184,6 +190,9 @@ class HTTPTests(helper.CPWebCase):
parts = ['%s * 65536' % ch for ch in alphabet] parts = ['%s * 65536' % ch for ch in alphabet]
self.assertBody(', '.join(parts)) self.assertBody(', '.join(parts))
response.close()
c.close()
def test_post_filename_with_special_characters(self): def test_post_filename_with_special_characters(self):
"""Testing that we can handle filenames with special characters. """Testing that we can handle filenames with special characters.
@ -217,6 +226,9 @@ class HTTPTests(helper.CPWebCase):
self.assertStatus(200) self.assertStatus(200)
self.assertBody(fname) self.assertBody(fname)
response.close()
c.close()
def test_malformed_request_line(self): def test_malformed_request_line(self):
if getattr(cherrypy.server, 'using_apache', False): if getattr(cherrypy.server, 'using_apache', False):
return self.skip('skipped due to known Apache differences...') return self.skip('skipped due to known Apache differences...')
@ -264,6 +276,9 @@ class HTTPTests(helper.CPWebCase):
self.body = response.fp.read(20) self.body = response.fp.read(20)
self.assertBody('Illegal header line.') self.assertBody('Illegal header line.')
response.close()
c.close()
def test_http_over_https(self): def test_http_over_https(self):
if self.scheme != 'https': if self.scheme != 'https':
return self.skip('skipped (not running HTTPS)... ') return self.skip('skipped (not running HTTPS)... ')

View file

@ -150,6 +150,8 @@ class IteratorTest(helper.CPWebCase):
self.assertStatus(200) self.assertStatus(200)
self.assertBody('0') self.assertBody('0')
itr_conn.close()
# Now we do the same check with streaming - some classes will # Now we do the same check with streaming - some classes will
# be automatically closed, while others cannot. # be automatically closed, while others cannot.
stream_counts = {} stream_counts = {}

View file

@ -1,5 +1,6 @@
"""Basic tests for the CherryPy core: request handling.""" """Basic tests for the CherryPy core: request handling."""
import datetime
import logging import logging
from cheroot.test import webtest from cheroot.test import webtest
@ -197,6 +198,33 @@ def test_custom_log_format(log_tracker, monkeypatch, server):
) )
def test_utc_in_timez(monkeypatch):
"""Test that ``LazyRfc3339UtcTime`` is rendered as ``str`` using UTC timestamp."""
utcoffset8_local_time_in_naive_utc = (
datetime.datetime(
year=2020,
month=1,
day=1,
hour=1,
minute=23,
second=45,
tzinfo=datetime.timezone(datetime.timedelta(hours=8)),
)
.astimezone(datetime.timezone.utc)
.replace(tzinfo=None)
)
class mock_datetime:
@classmethod
def utcnow(cls):
return utcoffset8_local_time_in_naive_utc
monkeypatch.setattr('datetime.datetime', mock_datetime)
rfc3339_utc_time = str(cherrypy._cplogging.LazyRfc3339UtcTime())
expected_time = '2019-12-31T17:23:45Z'
assert rfc3339_utc_time == expected_time
def test_timez_log_format(log_tracker, monkeypatch, server): def test_timez_log_format(log_tracker, monkeypatch, server):
"""Test a customized access_log_format string, which is a """Test a customized access_log_format string, which is a
feature of _cplogging.LogManager.access().""" feature of _cplogging.LogManager.access()."""

View file

@ -0,0 +1,6 @@
version = "2.9.*"
upstream_repository = "https://github.com/dateutil/dateutil"
partial_stub = true
[tool.stubtest]
ignore_missing_stub = true

View file

View file

@ -0,0 +1,9 @@
from typing_extensions import Self
class weekday:
def __init__(self, weekday: int, n: int | None = None) -> None: ...
def __call__(self, n: int) -> Self: ...
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
weekday: int
n: int

View file

@ -0,0 +1,8 @@
from datetime import date
from typing import Literal
EASTER_JULIAN: Literal[1]
EASTER_ORTHODOX: Literal[2]
EASTER_WESTERN: Literal[3]
def easter(year: int, method: Literal[1, 2, 3] = 3) -> date: ...

View file

@ -0,0 +1,67 @@
from collections.abc import Callable, Mapping
from datetime import datetime, tzinfo
from typing import IO, Any
from typing_extensions import TypeAlias
from .isoparser import isoparse as isoparse, isoparser as isoparser
_FileOrStr: TypeAlias = bytes | str | IO[str] | IO[Any]
_TzData: TypeAlias = tzinfo | int | str | None
_TzInfo: TypeAlias = Mapping[str, _TzData] | Callable[[str, int], _TzData]
class parserinfo:
JUMP: list[str]
WEEKDAYS: list[tuple[str, ...]]
MONTHS: list[tuple[str, ...]]
HMS: list[tuple[str, str, str]]
AMPM: list[tuple[str, str]]
UTCZONE: list[str]
PERTAIN: list[str]
TZOFFSET: dict[str, int]
def __init__(self, dayfirst: bool = False, yearfirst: bool = False) -> None: ...
def jump(self, name: str) -> bool: ...
def weekday(self, name: str) -> int | None: ...
def month(self, name: str) -> int | None: ...
def hms(self, name: str) -> int | None: ...
def ampm(self, name: str) -> int | None: ...
def pertain(self, name: str) -> bool: ...
def utczone(self, name: str) -> bool: ...
def tzoffset(self, name: str) -> int | None: ...
def convertyear(self, year: int) -> int: ...
def validate(self, res: datetime) -> bool: ...
class parser:
def __init__(self, info: parserinfo | None = None) -> None: ...
def parse(
self,
timestr: _FileOrStr,
default: datetime | None = None,
ignoretz: bool = False,
tzinfos: _TzInfo | None = None,
*,
dayfirst: bool | None = ...,
yearfirst: bool | None = ...,
fuzzy: bool = ...,
fuzzy_with_tokens: bool = ...,
) -> datetime: ...
DEFAULTPARSER: parser
def parse(
timestr: _FileOrStr,
parserinfo: parserinfo | None = None,
*,
dayfirst: bool | None = ...,
yearfirst: bool | None = ...,
ignoretz: bool = ...,
fuzzy: bool = ...,
fuzzy_with_tokens: bool = ...,
default: datetime | None = ...,
tzinfos: _TzInfo | None = ...,
) -> datetime: ...
class _tzparser: ...
DEFAULTTZPARSER: _tzparser
class ParserError(ValueError): ...

View file

@ -0,0 +1,15 @@
from _typeshed import SupportsRead
from datetime import date, datetime, time, tzinfo
from typing_extensions import TypeAlias
_Readable: TypeAlias = SupportsRead[str | bytes]
_TakesAscii: TypeAlias = str | bytes | _Readable
class isoparser:
def __init__(self, sep: str | bytes | None = None): ...
def isoparse(self, dt_str: _TakesAscii) -> datetime: ...
def parse_isodate(self, datestr: _TakesAscii) -> date: ...
def parse_isotime(self, timestr: _TakesAscii) -> time: ...
def parse_tzstr(self, tzstr: _TakesAscii, zero_as_utc: bool = True) -> tzinfo: ...
def isoparse(dt_str: _TakesAscii) -> datetime: ...

View file

@ -0,0 +1 @@
partial

View file

@ -0,0 +1,97 @@
from datetime import date, timedelta
from typing import SupportsFloat, TypeVar, overload
from typing_extensions import Self, TypeAlias
# See #9817 for why we reexport this here
from ._common import weekday as weekday
_DateT = TypeVar("_DateT", bound=date)
# Work around attribute and type having the same name.
_Weekday: TypeAlias = weekday
MO: weekday
TU: weekday
WE: weekday
TH: weekday
FR: weekday
SA: weekday
SU: weekday
class relativedelta:
years: int
months: int
days: int
leapdays: int
hours: int
minutes: int
seconds: int
microseconds: int
year: int | None
month: int | None
weekday: _Weekday | None
day: int | None
hour: int | None
minute: int | None
second: int | None
microsecond: int | None
def __init__(
self,
dt1: date | None = None,
dt2: date | None = None,
years: int | None = 0,
months: int | None = 0,
days: int | None = 0,
leapdays: int | None = 0,
weeks: int | None = 0,
hours: int | None = 0,
minutes: int | None = 0,
seconds: int | None = 0,
microseconds: int | None = 0,
year: int | None = None,
month: int | None = None,
day: int | None = None,
weekday: int | _Weekday | None = None,
yearday: int | None = None,
nlyearday: int | None = None,
hour: int | None = None,
minute: int | None = None,
second: int | None = None,
microsecond: int | None = None,
) -> None: ...
@property
def weeks(self) -> int: ...
@weeks.setter
def weeks(self, value: int) -> None: ...
def normalized(self) -> Self: ...
# TODO: use Union when mypy will handle it properly in overloaded operator
# methods (#2129, #1442, #1264 in mypy)
@overload
def __add__(self, other: relativedelta) -> Self: ...
@overload
def __add__(self, other: timedelta) -> Self: ...
@overload
def __add__(self, other: _DateT) -> _DateT: ...
@overload
def __radd__(self, other: relativedelta) -> Self: ...
@overload
def __radd__(self, other: timedelta) -> Self: ...
@overload
def __radd__(self, other: _DateT) -> _DateT: ...
@overload
def __rsub__(self, other: relativedelta) -> Self: ...
@overload
def __rsub__(self, other: timedelta) -> Self: ...
@overload
def __rsub__(self, other: _DateT) -> _DateT: ...
def __sub__(self, other: relativedelta) -> Self: ...
def __neg__(self) -> Self: ...
def __bool__(self) -> bool: ...
def __nonzero__(self) -> bool: ...
def __mul__(self, other: SupportsFloat) -> Self: ...
def __rmul__(self, other: SupportsFloat) -> Self: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __div__(self, other: SupportsFloat) -> Self: ...
def __truediv__(self, other: SupportsFloat) -> Self: ...
def __abs__(self) -> Self: ...
def __hash__(self) -> int: ...

View file

@ -0,0 +1,111 @@
import datetime
from _typeshed import Incomplete
from collections.abc import Iterable, Iterator, Sequence
from typing_extensions import TypeAlias
from ._common import weekday as weekdaybase
YEARLY: int
MONTHLY: int
WEEKLY: int
DAILY: int
HOURLY: int
MINUTELY: int
SECONDLY: int
class weekday(weekdaybase): ...
weekdays: tuple[weekday, weekday, weekday, weekday, weekday, weekday, weekday]
MO: weekday
TU: weekday
WE: weekday
TH: weekday
FR: weekday
SA: weekday
SU: weekday
class rrulebase:
def __init__(self, cache: bool = False) -> None: ...
def __iter__(self) -> Iterator[datetime.datetime]: ...
def __getitem__(self, item): ...
def __contains__(self, item): ...
def count(self): ...
def before(self, dt, inc: bool = False): ...
def after(self, dt, inc: bool = False): ...
def xafter(self, dt, count: Incomplete | None = None, inc: bool = False): ...
def between(self, after, before, inc: bool = False, count: int = 1): ...
class rrule(rrulebase):
def __init__(
self,
freq,
dtstart: datetime.date | None = None,
interval: int = 1,
wkst: weekday | int | None = None,
count: int | None = None,
until: datetime.date | int | None = None,
bysetpos: int | Iterable[int] | None = None,
bymonth: int | Iterable[int] | None = None,
bymonthday: int | Iterable[int] | None = None,
byyearday: int | Iterable[int] | None = None,
byeaster: int | Iterable[int] | None = None,
byweekno: int | Iterable[int] | None = None,
byweekday: int | weekday | Iterable[int] | Iterable[weekday] | None = None,
byhour: int | Iterable[int] | None = None,
byminute: int | Iterable[int] | None = None,
bysecond: int | Iterable[int] | None = None,
cache: bool = False,
) -> None: ...
def replace(self, **kwargs): ...
_RRule: TypeAlias = rrule
class _iterinfo:
rrule: _RRule
def __init__(self, rrule: _RRule) -> None: ...
yearlen: int | None
nextyearlen: int | None
yearordinal: int | None
yearweekday: int | None
mmask: Sequence[int] | None
mdaymask: Sequence[int] | None
nmdaymask: Sequence[int] | None
wdaymask: Sequence[int] | None
mrange: Sequence[int] | None
wnomask: Sequence[int] | None
nwdaymask: Sequence[int] | None
eastermask: Sequence[int] | None
lastyear: int | None
lastmonth: int | None
def rebuild(self, year, month): ...
def ydayset(self, year, month, day): ...
def mdayset(self, year, month, day): ...
def wdayset(self, year, month, day): ...
def ddayset(self, year, month, day): ...
def htimeset(self, hour, minute, second): ...
def mtimeset(self, hour, minute, second): ...
def stimeset(self, hour, minute, second): ...
class rruleset(rrulebase):
class _genitem:
dt: Incomplete
genlist: list[Incomplete]
gen: Incomplete
def __init__(self, genlist, gen) -> None: ...
def __next__(self) -> None: ...
next = __next__
def __lt__(self, other) -> bool: ...
def __gt__(self, other) -> bool: ...
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __init__(self, cache: bool = False) -> None: ...
def rrule(self, rrule: _RRule): ...
def rdate(self, rdate): ...
def exrule(self, exrule): ...
def exdate(self, exdate): ...
class _rrulestr:
def __call__(self, s, **kwargs) -> rrule | rruleset: ...
rrulestr: _rrulestr

View file

@ -0,0 +1,15 @@
from .tz import (
datetime_ambiguous as datetime_ambiguous,
datetime_exists as datetime_exists,
gettz as gettz,
resolve_imaginary as resolve_imaginary,
tzfile as tzfile,
tzical as tzical,
tzlocal as tzlocal,
tzoffset as tzoffset,
tzrange as tzrange,
tzstr as tzstr,
tzutc as tzutc,
)
UTC: tzutc

View file

@ -0,0 +1,28 @@
import abc
from datetime import datetime, timedelta, tzinfo
from typing import ClassVar
def tzname_in_python2(namefunc): ...
def enfold(dt: datetime, fold: int = 1): ...
class _DatetimeWithFold(datetime):
@property
def fold(self): ...
# Doesn't actually have ABCMeta as the metaclass at runtime,
# but mypy complains if we don't have it in the stub.
# See discussion in #8908
class _tzinfo(tzinfo, metaclass=abc.ABCMeta):
def is_ambiguous(self, dt: datetime) -> bool: ...
def fromutc(self, dt: datetime) -> datetime: ...
class tzrangebase(_tzinfo):
def __init__(self) -> None: ...
def utcoffset(self, dt: datetime | None) -> timedelta | None: ...
def dst(self, dt: datetime | None) -> timedelta | None: ...
def tzname(self, dt: datetime | None) -> str: ...
def fromutc(self, dt: datetime) -> datetime: ...
def is_ambiguous(self, dt: datetime) -> bool: ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
__reduce__ = object.__reduce__

View file

@ -0,0 +1,115 @@
import datetime
from _typeshed import Incomplete
from typing import ClassVar, Literal, Protocol, TypeVar
from ..relativedelta import relativedelta
from ._common import _tzinfo as _tzinfo, enfold as enfold, tzname_in_python2 as tzname_in_python2, tzrangebase as tzrangebase
_DT = TypeVar("_DT", bound=datetime.datetime)
ZERO: datetime.timedelta
EPOCH: datetime.datetime
EPOCHORDINAL: int
class tzutc(datetime.tzinfo):
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def tzname(self, dt: datetime.datetime | None) -> str: ...
def is_ambiguous(self, dt: datetime.datetime | None) -> bool: ...
def fromutc(self, dt: _DT) -> _DT: ...
def __eq__(self, other): ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset) -> None: ...
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def is_ambiguous(self, dt: datetime.datetime | None) -> bool: ...
def tzname(self, dt: datetime.datetime | None) -> str: ...
def fromutc(self, dt: _DT) -> _DT: ...
def __eq__(self, other): ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
__reduce__ = object.__reduce__
@classmethod
def instance(cls, name, offset) -> tzoffset: ...
class tzlocal(_tzinfo):
def __init__(self) -> None: ...
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def tzname(self, dt: datetime.datetime | None) -> str: ...
def is_ambiguous(self, dt: datetime.datetime | None) -> bool: ...
def __eq__(self, other): ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
__reduce__ = object.__reduce__
class _ttinfo:
def __init__(self) -> None: ...
def __eq__(self, other): ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
class _TZFileReader(Protocol):
# optional attribute:
# name: str
def read(self, size: int, /) -> bytes: ...
def seek(self, target: int, whence: Literal[1], /) -> object: ...
class tzfile(_tzinfo):
def __init__(self, fileobj: str | _TZFileReader, filename: str | None = None) -> None: ...
def is_ambiguous(self, dt: datetime.datetime | None, idx: int | None = None) -> bool: ...
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
def tzname(self, dt: datetime.datetime | None) -> str: ...
def __eq__(self, other): ...
__hash__: ClassVar[None] # type: ignore[assignment]
def __ne__(self, other): ...
def __reduce__(self): ...
def __reduce_ex__(self, protocol): ...
class tzrange(tzrangebase):
hasdst: bool
def __init__(
self,
stdabbr: str,
stdoffset: int | datetime.timedelta | None = None,
dstabbr: str | None = None,
dstoffset: int | datetime.timedelta | None = None,
start: relativedelta | None = None,
end: relativedelta | None = None,
) -> None: ...
def transitions(self, year: int) -> tuple[datetime.datetime, datetime.datetime]: ...
def __eq__(self, other): ...
class tzstr(tzrange):
hasdst: bool
def __init__(self, s: str, posix_offset: bool = False) -> None: ...
@classmethod
def instance(cls, name, offset) -> tzoffset: ...
class _ICalReader(Protocol):
# optional attribute:
# name: str
def read(self) -> str: ...
class tzical:
def __init__(self, fileobj: str | _ICalReader) -> None: ...
def keys(self): ...
def get(self, tzid: Incomplete | None = None): ...
TZFILES: list[str]
TZPATHS: list[str]
def datetime_exists(dt: datetime.datetime, tz: datetime.tzinfo | None = None) -> bool: ...
def datetime_ambiguous(dt: datetime.datetime, tz: datetime.tzinfo | None = None) -> bool: ...
def resolve_imaginary(dt: datetime.datetime) -> datetime.datetime: ...
class _GetTZ:
def __call__(self, name: str | None = ...) -> datetime.tzinfo | None: ...
def nocache(self, name: str | None) -> datetime.tzinfo | None: ...
gettz: _GetTZ

View file

@ -0,0 +1,5 @@
from datetime import datetime, timedelta, tzinfo
def default_tzinfo(dt: datetime, tzinfo: tzinfo) -> datetime: ...
def today(tzinfo: tzinfo | None = None) -> datetime: ...
def within_delta(dt1: datetime, dt2: datetime, delta: timedelta) -> bool: ...

View file

@ -0,0 +1,17 @@
from _typeshed import Incomplete
from typing import IO
from typing_extensions import TypeAlias
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
_MetadataType: TypeAlias = dict[str, Incomplete]
class ZoneInfoFile:
zones: dict[Incomplete, Incomplete]
metadata: _MetadataType | None
def __init__(self, zonefile_stream: IO[bytes] | None = None) -> None: ...
def get(self, name, default: Incomplete | None = None): ...
def get_zonefile_instance(new_instance: bool = False) -> ZoneInfoFile: ...
def gettz(name): ...
def gettz_db_metadata() -> _MetadataType: ...

View file

@ -0,0 +1,11 @@
from _typeshed import Incomplete, StrOrBytesPath
from collections.abc import Sequence
from tarfile import TarInfo
def rebuild(
filename: StrOrBytesPath,
tag: Incomplete | None = None,
format: str = "gz",
zonegroups: Sequence[str | TarInfo] = [],
metadata: Incomplete | None = None,
) -> None: ...

View file

@ -1,4 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import sys
try: try:
from ._version import version as __version__ from ._version import version as __version__
except ImportError: except ImportError:
@ -6,3 +8,17 @@ except ImportError:
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', __all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
'utils', 'zoneinfo'] 'utils', 'zoneinfo']
def __getattr__(name):
import importlib
if name in __all__:
return importlib.import_module("." + name, __name__)
raise AttributeError(
"module {!r} has not attribute {!r}".format(__name__, name)
)
def __dir__():
# __dir__ should include all the lazy-importable modules as well.
return [x for x in globals() if x not in sys.modules] + __all__

View file

@ -1,5 +1,4 @@
# coding: utf-8
# file generated by setuptools_scm # file generated by setuptools_scm
# don't change, don't track in version control # don't change, don't track in version control
version = '2.8.2' __version__ = version = '2.9.0.post0'
version_tuple = (2, 8, 2) __version_tuple__ = version_tuple = (2, 9, 0)

View file

@ -72,7 +72,7 @@ class isoparser(object):
Common: Common:
- ``YYYY`` - ``YYYY``
- ``YYYY-MM`` or ``YYYYMM`` - ``YYYY-MM``
- ``YYYY-MM-DD`` or ``YYYYMMDD`` - ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon: Uncommon:

View file

@ -182,7 +182,7 @@ class rrulebase(object):
# __len__() introduces a large performance penalty. # __len__() introduces a large performance penalty.
def count(self): def count(self):
""" Returns the number of recurrences in this set. It will have go """ Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """ through the whole recurrence, if this hasn't been done before. """
if self._len is None: if self._len is None:
for x in self: for x in self:
pass pass

View file

@ -34,7 +34,7 @@ except ImportError:
from warnings import warn from warnings import warn
ZERO = datetime.timedelta(0) ZERO = datetime.timedelta(0)
EPOCH = datetime.datetime.utcfromtimestamp(0) EPOCH = datetime.datetime(1970, 1, 1, 0, 0)
EPOCHORDINAL = EPOCH.toordinal() EPOCHORDINAL = EPOCH.toordinal()

View file

@ -1,5 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# Copyright 2015,2016,2017 Nir Cohen # Copyright 2015-2021 Nir Cohen
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -55,7 +55,7 @@ except ImportError:
# Python 3.7 # Python 3.7
TypedDict = dict TypedDict = dict
__version__ = "1.8.0" __version__ = "1.9.0"
class VersionDict(TypedDict): class VersionDict(TypedDict):
@ -125,6 +125,7 @@ _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
# Base file names to be looked up for if _UNIXCONFDIR is not readable. # Base file names to be looked up for if _UNIXCONFDIR is not readable.
_DISTRO_RELEASE_BASENAMES = [ _DISTRO_RELEASE_BASENAMES = [
"SuSE-release", "SuSE-release",
"altlinux-release",
"arch-release", "arch-release",
"base-release", "base-release",
"centos-release", "centos-release",
@ -151,6 +152,8 @@ _DISTRO_RELEASE_IGNORE_BASENAMES = (
"system-release", "system-release",
"plesk-release", "plesk-release",
"iredmail-release", "iredmail-release",
"board-release",
"ec2_version",
) )
@ -243,6 +246,7 @@ def id() -> str:
"rocky" Rocky Linux "rocky" Rocky Linux
"aix" AIX "aix" AIX
"guix" Guix System "guix" Guix System
"altlinux" ALT Linux
============== ========================================= ============== =========================================
If you have a need to get distros for reliable IDs added into this set, If you have a need to get distros for reliable IDs added into this set,
@ -991,10 +995,10 @@ class LinuxDistribution:
For details, see :func:`distro.info`. For details, see :func:`distro.info`.
""" """
return dict( return InfoDict(
id=self.id(), id=self.id(),
version=self.version(pretty, best), version=self.version(pretty, best),
version_parts=dict( version_parts=VersionDict(
major=self.major_version(best), major=self.major_version(best),
minor=self.minor_version(best), minor=self.minor_version(best),
build_number=self.build_number(best), build_number=self.build_number(best),

View file

@ -7,7 +7,9 @@ import socket
import sys import sys
import dns._asyncbackend import dns._asyncbackend
import dns._features
import dns.exception import dns.exception
import dns.inet
_is_win32 = sys.platform == "win32" _is_win32 = sys.platform == "win32"
@ -121,7 +123,7 @@ class StreamSocket(dns._asyncbackend.StreamSocket):
return self.writer.get_extra_info("peercert") return self.writer.get_extra_info("peercert")
try: if dns._features.have("doh"):
import anyio import anyio
import httpcore import httpcore
import httpcore._backends.anyio import httpcore._backends.anyio
@ -205,7 +207,7 @@ try:
resolver, local_port, bootstrap_address, family resolver, local_port, bootstrap_address, family
) )
except ImportError: else:
_HTTPTransport = dns._asyncbackend.NullTransport # type: ignore _HTTPTransport = dns._asyncbackend.NullTransport # type: ignore
@ -224,14 +226,12 @@ class Backend(dns._asyncbackend.Backend):
ssl_context=None, ssl_context=None,
server_hostname=None, server_hostname=None,
): ):
if destination is None and socktype == socket.SOCK_DGRAM and _is_win32:
raise NotImplementedError(
"destinationless datagram sockets "
"are not supported by asyncio "
"on Windows"
)
loop = _get_running_loop() loop = _get_running_loop()
if socktype == socket.SOCK_DGRAM: if socktype == socket.SOCK_DGRAM:
if _is_win32 and source is None:
# Win32 wants explicit binding before recvfrom(). This is the
# proper fix for [#637].
source = (dns.inet.any_for_af(af), 0)
transport, protocol = await loop.create_datagram_endpoint( transport, protocol = await loop.create_datagram_endpoint(
_DatagramProtocol, _DatagramProtocol,
source, source,
@ -266,7 +266,7 @@ class Backend(dns._asyncbackend.Backend):
await asyncio.sleep(interval) await asyncio.sleep(interval)
def datagram_connection_required(self): def datagram_connection_required(self):
return _is_win32 return False
def get_transport_class(self): def get_transport_class(self):
return _HTTPTransport return _HTTPTransport

92
lib/dns/_features.py Normal file
View file

@ -0,0 +1,92 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import importlib.metadata
import itertools
import string
from typing import Dict, List, Tuple
def _tuple_from_text(version: str) -> Tuple:
text_parts = version.split(".")
int_parts = []
for text_part in text_parts:
digit_prefix = "".join(
itertools.takewhile(lambda x: x in string.digits, text_part)
)
try:
int_parts.append(int(digit_prefix))
except Exception:
break
return tuple(int_parts)
def _version_check(
requirement: str,
) -> bool:
"""Is the requirement fulfilled?
The requirement must be of the form
package>=version
"""
package, minimum = requirement.split(">=")
try:
version = importlib.metadata.version(package)
except Exception:
return False
t_version = _tuple_from_text(version)
t_minimum = _tuple_from_text(minimum)
if t_version < t_minimum:
return False
return True
_cache: Dict[str, bool] = {}
def have(feature: str) -> bool:
"""Is *feature* available?
This tests if all optional packages needed for the
feature are available and recent enough.
Returns ``True`` if the feature is available,
and ``False`` if it is not or if metadata is
missing.
"""
value = _cache.get(feature)
if value is not None:
return value
requirements = _requirements.get(feature)
if requirements is None:
# we make a cache entry here for consistency not performance
_cache[feature] = False
return False
ok = True
for requirement in requirements:
if not _version_check(requirement):
ok = False
break
_cache[feature] = ok
return ok
def force(feature: str, enabled: bool) -> None:
"""Force the status of *feature* to be *enabled*.
This method is provided as a workaround for any cases
where importlib.metadata is ineffective, or for testing.
"""
_cache[feature] = enabled
_requirements: Dict[str, List[str]] = {
### BEGIN generated requirements
"dnssec": ["cryptography>=41"],
"doh": ["httpcore>=1.0.0", "httpx>=0.26.0", "h2>=4.1.0"],
"doq": ["aioquic>=0.9.25"],
"idna": ["idna>=3.6"],
"trio": ["trio>=0.23"],
"wmi": ["wmi>=1.5.1"],
### END generated requirements
}

View file

@ -8,9 +8,13 @@ import trio
import trio.socket # type: ignore import trio.socket # type: ignore
import dns._asyncbackend import dns._asyncbackend
import dns._features
import dns.exception import dns.exception
import dns.inet import dns.inet
if not dns._features.have("trio"):
raise ImportError("trio not found or too old")
def _maybe_timeout(timeout): def _maybe_timeout(timeout):
if timeout is not None: if timeout is not None:
@ -95,7 +99,7 @@ class StreamSocket(dns._asyncbackend.StreamSocket):
raise NotImplementedError raise NotImplementedError
try: if dns._features.have("doh"):
import httpcore import httpcore
import httpcore._backends.trio import httpcore._backends.trio
import httpx import httpx
@ -177,7 +181,7 @@ try:
resolver, local_port, bootstrap_address, family resolver, local_port, bootstrap_address, family
) )
except ImportError: else:
_HTTPTransport = dns._asyncbackend.NullTransport # type: ignore _HTTPTransport = dns._asyncbackend.NullTransport # type: ignore

View file

@ -32,7 +32,7 @@ def get_backend(name: str) -> Backend:
*name*, a ``str``, the name of the backend. Currently the "trio" *name*, a ``str``, the name of the backend. Currently the "trio"
and "asyncio" backends are available. and "asyncio" backends are available.
Raises NotImplementError if an unknown backend name is specified. Raises NotImplementedError if an unknown backend name is specified.
""" """
# pylint: disable=import-outside-toplevel,redefined-outer-name # pylint: disable=import-outside-toplevel,redefined-outer-name
backend = _backends.get(name) backend = _backends.get(name)

View file

@ -41,7 +41,7 @@ from dns.query import (
NoDOQ, NoDOQ,
UDPMode, UDPMode,
_compute_times, _compute_times,
_have_http2, _make_dot_ssl_context,
_matches_destination, _matches_destination,
_remaining, _remaining,
have_doh, have_doh,
@ -120,6 +120,8 @@ async def receive_udp(
request_mac: Optional[bytes] = b"", request_mac: Optional[bytes] = b"",
ignore_trailing: bool = False, ignore_trailing: bool = False,
raise_on_truncation: bool = False, raise_on_truncation: bool = False,
ignore_errors: bool = False,
query: Optional[dns.message.Message] = None,
) -> Any: ) -> Any:
"""Read a DNS message from a UDP socket. """Read a DNS message from a UDP socket.
@ -133,13 +135,14 @@ async def receive_udp(
""" """
wire = b"" wire = b""
while 1: while True:
(wire, from_address) = await sock.recvfrom(65535, _timeout(expiration)) (wire, from_address) = await sock.recvfrom(65535, _timeout(expiration))
if _matches_destination( if not _matches_destination(
sock.family, from_address, destination, ignore_unexpected sock.family, from_address, destination, ignore_unexpected
): ):
break continue
received_time = time.time() received_time = time.time()
try:
r = dns.message.from_wire( r = dns.message.from_wire(
wire, wire,
keyring=keyring, keyring=keyring,
@ -148,6 +151,23 @@ async def receive_udp(
ignore_trailing=ignore_trailing, ignore_trailing=ignore_trailing,
raise_on_truncation=raise_on_truncation, raise_on_truncation=raise_on_truncation,
) )
except dns.message.Truncated as e:
# See the comment in query.py for details.
if (
ignore_errors
and query is not None
and not query.is_response(e.message())
):
continue
else:
raise
except Exception:
if ignore_errors:
continue
else:
raise
if ignore_errors and query is not None and not query.is_response(r):
continue
return (r, received_time, from_address) return (r, received_time, from_address)
@ -164,6 +184,7 @@ async def udp(
raise_on_truncation: bool = False, raise_on_truncation: bool = False,
sock: Optional[dns.asyncbackend.DatagramSocket] = None, sock: Optional[dns.asyncbackend.DatagramSocket] = None,
backend: Optional[dns.asyncbackend.Backend] = None, backend: Optional[dns.asyncbackend.Backend] = None,
ignore_errors: bool = False,
) -> dns.message.Message: ) -> dns.message.Message:
"""Return the response obtained after sending a query via UDP. """Return the response obtained after sending a query via UDP.
@ -205,9 +226,13 @@ async def udp(
q.mac, q.mac,
ignore_trailing, ignore_trailing,
raise_on_truncation, raise_on_truncation,
ignore_errors,
q,
) )
r.time = received_time - begin_time r.time = received_time - begin_time
if not q.is_response(r): # We don't need to check q.is_response() if we are in ignore_errors mode
# as receive_udp() will have checked it.
if not (ignore_errors or q.is_response(r)):
raise BadResponse raise BadResponse
return r return r
@ -225,6 +250,7 @@ async def udp_with_fallback(
udp_sock: Optional[dns.asyncbackend.DatagramSocket] = None, udp_sock: Optional[dns.asyncbackend.DatagramSocket] = None,
tcp_sock: Optional[dns.asyncbackend.StreamSocket] = None, tcp_sock: Optional[dns.asyncbackend.StreamSocket] = None,
backend: Optional[dns.asyncbackend.Backend] = None, backend: Optional[dns.asyncbackend.Backend] = None,
ignore_errors: bool = False,
) -> Tuple[dns.message.Message, bool]: ) -> Tuple[dns.message.Message, bool]:
"""Return the response to the query, trying UDP first and falling back """Return the response to the query, trying UDP first and falling back
to TCP if UDP results in a truncated response. to TCP if UDP results in a truncated response.
@ -260,6 +286,7 @@ async def udp_with_fallback(
True, True,
udp_sock, udp_sock,
backend, backend,
ignore_errors,
) )
return (response, False) return (response, False)
except dns.message.Truncated: except dns.message.Truncated:
@ -292,14 +319,12 @@ async def send_tcp(
""" """
if isinstance(what, dns.message.Message): if isinstance(what, dns.message.Message):
wire = what.to_wire() tcpmsg = what.to_wire(prepend_length=True)
else: else:
wire = what
l = len(wire)
# copying the wire into tcpmsg is inefficient, but lets us # copying the wire into tcpmsg is inefficient, but lets us
# avoid writev() or doing a short write that would get pushed # avoid writev() or doing a short write that would get pushed
# onto the net # onto the net
tcpmsg = struct.pack("!H", l) + wire tcpmsg = len(what).to_bytes(2, "big") + what
sent_time = time.time() sent_time = time.time()
await sock.sendall(tcpmsg, _timeout(expiration, sent_time)) await sock.sendall(tcpmsg, _timeout(expiration, sent_time))
return (len(tcpmsg), sent_time) return (len(tcpmsg), sent_time)
@ -418,6 +443,7 @@ async def tls(
backend: Optional[dns.asyncbackend.Backend] = None, backend: Optional[dns.asyncbackend.Backend] = None,
ssl_context: Optional[ssl.SSLContext] = None, ssl_context: Optional[ssl.SSLContext] = None,
server_hostname: Optional[str] = None, server_hostname: Optional[str] = None,
verify: Union[bool, str] = True,
) -> dns.message.Message: ) -> dns.message.Message:
"""Return the response obtained after sending a query via TLS. """Return the response obtained after sending a query via TLS.
@ -439,11 +465,7 @@ async def tls(
cm: contextlib.AbstractAsyncContextManager = NullContext(sock) cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
else: else:
if ssl_context is None: if ssl_context is None:
# See the comment about ssl.create_default_context() in query.py ssl_context = _make_dot_ssl_context(server_hostname, verify)
ssl_context = ssl.create_default_context() # lgtm[py/insecure-protocol]
ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2
if server_hostname is None:
ssl_context.check_hostname = False
af = dns.inet.af_for_address(where) af = dns.inet.af_for_address(where)
stuple = _source_tuple(af, source, source_port) stuple = _source_tuple(af, source, source_port)
dtuple = (where, port) dtuple = (where, port)
@ -538,7 +560,7 @@ async def https(
transport = backend.get_transport_class()( transport = backend.get_transport_class()(
local_address=local_address, local_address=local_address,
http1=True, http1=True,
http2=_have_http2, http2=True,
verify=verify, verify=verify,
local_port=local_port, local_port=local_port,
bootstrap_address=bootstrap_address, bootstrap_address=bootstrap_address,
@ -550,7 +572,7 @@ async def https(
cm: contextlib.AbstractAsyncContextManager = NullContext(client) cm: contextlib.AbstractAsyncContextManager = NullContext(client)
else: else:
cm = httpx.AsyncClient( cm = httpx.AsyncClient(
http1=True, http2=_have_http2, verify=verify, transport=transport http1=True, http2=True, verify=verify, transport=transport
) )
async with cm as the_client: async with cm as the_client:

View file

@ -27,6 +27,7 @@ import time
from datetime import datetime from datetime import datetime
from typing import Callable, Dict, List, Optional, Set, Tuple, Union, cast from typing import Callable, Dict, List, Optional, Set, Tuple, Union, cast
import dns._features
import dns.exception import dns.exception
import dns.name import dns.name
import dns.node import dns.node
@ -1169,7 +1170,7 @@ def _need_pyca(*args, **kwargs):
) # pragma: no cover ) # pragma: no cover
try: if dns._features.have("dnssec"):
from cryptography.exceptions import InvalidSignature from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives.asymmetric import dsa # pylint: disable=W0611 from cryptography.hazmat.primitives.asymmetric import dsa # pylint: disable=W0611
from cryptography.hazmat.primitives.asymmetric import ec # pylint: disable=W0611 from cryptography.hazmat.primitives.asymmetric import ec # pylint: disable=W0611
@ -1184,20 +1185,20 @@ try:
get_algorithm_cls_from_dnskey, get_algorithm_cls_from_dnskey,
) )
from dns.dnssecalgs.base import GenericPrivateKey, GenericPublicKey from dns.dnssecalgs.base import GenericPrivateKey, GenericPublicKey
except ImportError: # pragma: no cover
validate = _need_pyca
validate_rrsig = _need_pyca
sign = _need_pyca
make_dnskey = _need_pyca
make_cdnskey = _need_pyca
_have_pyca = False
else:
validate = _validate # type: ignore validate = _validate # type: ignore
validate_rrsig = _validate_rrsig # type: ignore validate_rrsig = _validate_rrsig # type: ignore
sign = _sign sign = _sign
make_dnskey = _make_dnskey make_dnskey = _make_dnskey
make_cdnskey = _make_cdnskey make_cdnskey = _make_cdnskey
_have_pyca = True _have_pyca = True
else: # pragma: no cover
validate = _need_pyca
validate_rrsig = _need_pyca
sign = _need_pyca
make_dnskey = _need_pyca
make_cdnskey = _need_pyca
_have_pyca = False
### BEGIN generated Algorithm constants ### BEGIN generated Algorithm constants

View file

@ -1,9 +1,12 @@
from typing import Dict, Optional, Tuple, Type, Union from typing import Dict, Optional, Tuple, Type, Union
import dns.name import dns.name
try:
from dns.dnssecalgs.base import GenericPrivateKey from dns.dnssecalgs.base import GenericPrivateKey
from dns.dnssectypes import Algorithm
from dns.exception import UnsupportedAlgorithm
from dns.rdtypes.ANY.DNSKEY import DNSKEY
if dns._features.have("dnssec"):
from dns.dnssecalgs.dsa import PrivateDSA, PrivateDSANSEC3SHA1 from dns.dnssecalgs.dsa import PrivateDSA, PrivateDSANSEC3SHA1
from dns.dnssecalgs.ecdsa import PrivateECDSAP256SHA256, PrivateECDSAP384SHA384 from dns.dnssecalgs.ecdsa import PrivateECDSAP256SHA256, PrivateECDSAP384SHA384
from dns.dnssecalgs.eddsa import PrivateED448, PrivateED25519 from dns.dnssecalgs.eddsa import PrivateED448, PrivateED25519
@ -16,13 +19,9 @@ try:
) )
_have_cryptography = True _have_cryptography = True
except ImportError: else:
_have_cryptography = False _have_cryptography = False
from dns.dnssectypes import Algorithm
from dns.exception import UnsupportedAlgorithm
from dns.rdtypes.ANY.DNSKEY import DNSKEY
AlgorithmPrefix = Optional[Union[bytes, dns.name.Name]] AlgorithmPrefix = Optional[Union[bytes, dns.name.Name]]
algorithms: Dict[Tuple[Algorithm, AlgorithmPrefix], Type[GenericPrivateKey]] = {} algorithms: Dict[Tuple[Algorithm, AlgorithmPrefix], Type[GenericPrivateKey]] = {}

View file

@ -17,6 +17,7 @@
"""EDNS Options""" """EDNS Options"""
import binascii
import math import math
import socket import socket
import struct import struct
@ -58,7 +59,6 @@ class OptionType(dns.enum.IntEnum):
class Option: class Option:
"""Base class for all EDNS option types.""" """Base class for all EDNS option types."""
def __init__(self, otype: Union[OptionType, str]): def __init__(self, otype: Union[OptionType, str]):
@ -76,6 +76,9 @@ class Option:
""" """
raise NotImplementedError # pragma: no cover raise NotImplementedError # pragma: no cover
def to_text(self) -> str:
raise NotImplementedError # pragma: no cover
@classmethod @classmethod
def from_wire_parser(cls, otype: OptionType, parser: "dns.wire.Parser") -> "Option": def from_wire_parser(cls, otype: OptionType, parser: "dns.wire.Parser") -> "Option":
"""Build an EDNS option object from wire format. """Build an EDNS option object from wire format.
@ -141,7 +144,6 @@ class Option:
class GenericOption(Option): # lgtm[py/missing-equals] class GenericOption(Option): # lgtm[py/missing-equals]
"""Generic Option Class """Generic Option Class
This class is used for EDNS option types for which we have no better This class is used for EDNS option types for which we have no better
@ -343,6 +345,8 @@ class EDECode(dns.enum.IntEnum):
class EDEOption(Option): # lgtm[py/missing-equals] class EDEOption(Option): # lgtm[py/missing-equals]
"""Extended DNS Error (EDE, RFC8914)""" """Extended DNS Error (EDE, RFC8914)"""
_preserve_case = {"DNSKEY", "DS", "DNSSEC", "RRSIGs", "NSEC", "NXDOMAIN"}
def __init__(self, code: Union[EDECode, str], text: Optional[str] = None): def __init__(self, code: Union[EDECode, str], text: Optional[str] = None):
"""*code*, a ``dns.edns.EDECode`` or ``str``, the info code of the """*code*, a ``dns.edns.EDECode`` or ``str``, the info code of the
extended error. extended error.
@ -360,6 +364,13 @@ class EDEOption(Option): # lgtm[py/missing-equals]
def to_text(self) -> str: def to_text(self) -> str:
output = f"EDE {self.code}" output = f"EDE {self.code}"
if self.code in EDECode:
desc = EDECode.to_text(self.code)
desc = " ".join(
word if word in self._preserve_case else word.title()
for word in desc.split("_")
)
output += f" ({desc})"
if self.text is not None: if self.text is not None:
output += f": {self.text}" output += f": {self.text}"
return output return output
@ -392,9 +403,37 @@ class EDEOption(Option): # lgtm[py/missing-equals]
return cls(code, btext) return cls(code, btext)
class NSIDOption(Option):
def __init__(self, nsid: bytes):
super().__init__(OptionType.NSID)
self.nsid = nsid
def to_wire(self, file: Any = None) -> Optional[bytes]:
if file:
file.write(self.nsid)
return None
else:
return self.nsid
def to_text(self) -> str:
if all(c >= 0x20 and c <= 0x7E for c in self.nsid):
# All ASCII printable, so it's probably a string.
value = self.nsid.decode()
else:
value = binascii.hexlify(self.nsid).decode()
return f"NSID {value}"
@classmethod
def from_wire_parser(
cls, otype: Union[OptionType, str], parser: dns.wire.Parser
) -> Option:
return cls(parser.get_remaining())
_type_to_class: Dict[OptionType, Any] = { _type_to_class: Dict[OptionType, Any] = {
OptionType.ECS: ECSOption, OptionType.ECS: ECSOption,
OptionType.EDE: EDEOption, OptionType.EDE: EDEOption,
OptionType.NSID: NSIDOption,
} }

View file

@ -1,24 +1,30 @@
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import collections.abc import collections.abc
from typing import Any from typing import Any, Callable
from dns._immutable_ctx import immutable from dns._immutable_ctx import immutable
@immutable @immutable
class Dict(collections.abc.Mapping): # lgtm[py/missing-equals] class Dict(collections.abc.Mapping): # lgtm[py/missing-equals]
def __init__(self, dictionary: Any, no_copy: bool = False): def __init__(
self,
dictionary: Any,
no_copy: bool = False,
map_factory: Callable[[], collections.abc.MutableMapping] = dict,
):
"""Make an immutable dictionary from the specified dictionary. """Make an immutable dictionary from the specified dictionary.
If *no_copy* is `True`, then *dictionary* will be wrapped instead If *no_copy* is `True`, then *dictionary* will be wrapped instead
of copied. Only set this if you are sure there will be no external of copied. Only set this if you are sure there will be no external
references to the dictionary. references to the dictionary.
""" """
if no_copy and isinstance(dictionary, dict): if no_copy and isinstance(dictionary, collections.abc.MutableMapping):
self._odict = dictionary self._odict = dictionary
else: else:
self._odict = dict(dictionary) self._odict = map_factory()
self._odict.update(dictionary)
self._hash = None self._hash = None
def __getitem__(self, key): def __getitem__(self, key):

View file

@ -178,3 +178,20 @@ def any_for_af(af):
elif af == socket.AF_INET6: elif af == socket.AF_INET6:
return "::" return "::"
raise NotImplementedError(f"unknown address family {af}") raise NotImplementedError(f"unknown address family {af}")
def canonicalize(text: str) -> str:
"""Verify that *address* is a valid text form IPv4 or IPv6 address and return its
canonical text form. IPv6 addresses with scopes are rejected.
*text*, a ``str``, the address in textual form.
Raises ``ValueError`` if the text is not valid.
"""
try:
return dns.ipv6.canonicalize(text)
except Exception:
try:
return dns.ipv4.canonicalize(text)
except Exception:
raise ValueError

View file

@ -62,3 +62,16 @@ def inet_aton(text: Union[str, bytes]) -> bytes:
return struct.pack("BBBB", *b) return struct.pack("BBBB", *b)
except Exception: except Exception:
raise dns.exception.SyntaxError raise dns.exception.SyntaxError
def canonicalize(text: Union[str, bytes]) -> str:
"""Verify that *address* is a valid text form IPv4 address and return its
canonical text form.
*text*, a ``str`` or ``bytes``, the IPv4 address in textual form.
Raises ``dns.exception.SyntaxError`` if the text is not valid.
"""
# Note that inet_aton() only accepts canonial form, but we still run through
# inet_ntoa() to ensure the output is a str.
return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))

View file

@ -104,7 +104,7 @@ _colon_colon_end = re.compile(rb".*::$")
def inet_aton(text: Union[str, bytes], ignore_scope: bool = False) -> bytes: def inet_aton(text: Union[str, bytes], ignore_scope: bool = False) -> bytes:
"""Convert an IPv6 address in text form to binary form. """Convert an IPv6 address in text form to binary form.
*text*, a ``str``, the IPv6 address in textual form. *text*, a ``str`` or ``bytes``, the IPv6 address in textual form.
*ignore_scope*, a ``bool``. If ``True``, a scope will be ignored. *ignore_scope*, a ``bool``. If ``True``, a scope will be ignored.
If ``False``, the default, it is an error for a scope to be present. If ``False``, the default, it is an error for a scope to be present.
@ -206,3 +206,14 @@ def is_mapped(address: bytes) -> bool:
""" """
return address.startswith(_mapped_prefix) return address.startswith(_mapped_prefix)
def canonicalize(text: Union[str, bytes]) -> str:
"""Verify that *address* is a valid text form IPv6 address and return its
canonical text form. Addresses with scopes are rejected.
*text*, a ``str`` or ``bytes``, the IPv6 address in textual form.
Raises ``dns.exception.SyntaxError`` if the text is not valid.
"""
return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))

View file

@ -393,7 +393,7 @@ class Message:
section_number = section section_number = section
section = self.section_from_number(section_number) section = self.section_from_number(section_number)
elif isinstance(section, str): elif isinstance(section, str):
section_number = MessageSection.from_text(section) section_number = self._section_enum.from_text(section)
section = self.section_from_number(section_number) section = self.section_from_number(section_number)
else: else:
section_number = self.section_number(section) section_number = self.section_number(section)
@ -489,6 +489,34 @@ class Message:
rrset = None rrset = None
return rrset return rrset
def section_count(self, section: SectionType) -> int:
"""Returns the number of records in the specified section.
*section*, an ``int`` section number, a ``str`` section name, or one of
the section attributes of this message. This specifies the
the section of the message to count. For example::
my_message.section_count(my_message.answer)
my_message.section_count(dns.message.ANSWER)
my_message.section_count("ANSWER")
"""
if isinstance(section, int):
section_number = section
section = self.section_from_number(section_number)
elif isinstance(section, str):
section_number = self._section_enum.from_text(section)
section = self.section_from_number(section_number)
else:
section_number = self.section_number(section)
count = sum(max(1, len(rrs)) for rrs in section)
if section_number == MessageSection.ADDITIONAL:
if self.opt is not None:
count += 1
if self.tsig is not None:
count += 1
return count
def _compute_opt_reserve(self) -> int: def _compute_opt_reserve(self) -> int:
"""Compute the size required for the OPT RR, padding excluded""" """Compute the size required for the OPT RR, padding excluded"""
if not self.opt: if not self.opt:
@ -527,6 +555,8 @@ class Message:
max_size: int = 0, max_size: int = 0,
multi: bool = False, multi: bool = False,
tsig_ctx: Optional[Any] = None, tsig_ctx: Optional[Any] = None,
prepend_length: bool = False,
prefer_truncation: bool = False,
**kw: Dict[str, Any], **kw: Dict[str, Any],
) -> bytes: ) -> bytes:
"""Return a string containing the message in DNS compressed wire """Return a string containing the message in DNS compressed wire
@ -549,6 +579,15 @@ class Message:
*tsig_ctx*, a ``dns.tsig.HMACTSig`` or ``dns.tsig.GSSTSig`` object, the *tsig_ctx*, a ``dns.tsig.HMACTSig`` or ``dns.tsig.GSSTSig`` object, the
ongoing TSIG context, used when signing zone transfers. ongoing TSIG context, used when signing zone transfers.
*prepend_length*, a ``bool``, should be set to ``True`` if the caller
wants the message length prepended to the message itself. This is
useful for messages sent over TCP, TLS (DoT), or QUIC (DoQ).
*prefer_truncation*, a ``bool``, should be set to ``True`` if the caller
wants the message to be truncated if it would otherwise exceed the
maximum length. If the truncation occurs before the additional section,
the TC bit will be set.
Raises ``dns.exception.TooBig`` if *max_size* was exceeded. Raises ``dns.exception.TooBig`` if *max_size* was exceeded.
Returns a ``bytes``. Returns a ``bytes``.
@ -570,6 +609,7 @@ class Message:
r.reserve(opt_reserve) r.reserve(opt_reserve)
tsig_reserve = self._compute_tsig_reserve() tsig_reserve = self._compute_tsig_reserve()
r.reserve(tsig_reserve) r.reserve(tsig_reserve)
try:
for rrset in self.question: for rrset in self.question:
r.add_question(rrset.name, rrset.rdtype, rrset.rdclass) r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
for rrset in self.answer: for rrset in self.answer:
@ -578,6 +618,12 @@ class Message:
r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw) r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
for rrset in self.additional: for rrset in self.additional:
r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw) r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
except dns.exception.TooBig:
if prefer_truncation:
if r.section < dns.renderer.ADDITIONAL:
r.flags |= dns.flags.TC
else:
raise
r.release_reserved() r.release_reserved()
if self.opt is not None: if self.opt is not None:
r.add_opt(self.opt, self.pad, opt_reserve, tsig_reserve) r.add_opt(self.opt, self.pad, opt_reserve, tsig_reserve)
@ -598,7 +644,10 @@ class Message:
r.write_header() r.write_header()
if multi: if multi:
self.tsig_ctx = ctx self.tsig_ctx = ctx
return r.get_wire() wire = r.get_wire()
if prepend_length:
wire = len(wire).to_bytes(2, "big") + wire
return wire
@staticmethod @staticmethod
def _make_tsig( def _make_tsig(
@ -777,6 +826,8 @@ class Message:
if request_payload is None: if request_payload is None:
request_payload = payload request_payload = payload
self.request_payload = request_payload self.request_payload = request_payload
if pad < 0:
raise ValueError("pad must be non-negative")
self.pad = pad self.pad = pad
@property @property
@ -826,7 +877,7 @@ class Message:
if wanted: if wanted:
self.ednsflags |= dns.flags.DO self.ednsflags |= dns.flags.DO
elif self.opt: elif self.opt:
self.ednsflags &= ~dns.flags.DO self.ednsflags &= ~int(dns.flags.DO)
def rcode(self) -> dns.rcode.Rcode: def rcode(self) -> dns.rcode.Rcode:
"""Return the rcode. """Return the rcode.
@ -1035,7 +1086,6 @@ def _message_factory_from_opcode(opcode):
class _WireReader: class _WireReader:
"""Wire format reader. """Wire format reader.
parser: the binary parser parser: the binary parser
@ -1335,7 +1385,6 @@ def from_wire(
class _TextReader: class _TextReader:
"""Text format reader. """Text format reader.
tok: the tokenizer. tok: the tokenizer.
@ -1768,30 +1817,34 @@ def make_response(
our_payload: int = 8192, our_payload: int = 8192,
fudge: int = 300, fudge: int = 300,
tsig_error: int = 0, tsig_error: int = 0,
pad: Optional[int] = None,
) -> Message: ) -> Message:
"""Make a message which is a response for the specified query. """Make a message which is a response for the specified query.
The message returned is really a response skeleton; it has all The message returned is really a response skeleton; it has all of the infrastructure
of the infrastructure required of a response, but none of the required of a response, but none of the content.
content.
The response's question section is a shallow copy of the query's The response's question section is a shallow copy of the query's question section,
question section, so the query's question RRsets should not be so the query's question RRsets should not be changed.
changed.
*query*, a ``dns.message.Message``, the query to respond to. *query*, a ``dns.message.Message``, the query to respond to.
*recursion_available*, a ``bool``, should RA be set in the response? *recursion_available*, a ``bool``, should RA be set in the response?
*our_payload*, an ``int``, the payload size to advertise in EDNS *our_payload*, an ``int``, the payload size to advertise in EDNS responses.
responses.
*fudge*, an ``int``, the TSIG time fudge. *fudge*, an ``int``, the TSIG time fudge.
*tsig_error*, an ``int``, the TSIG error. *tsig_error*, an ``int``, the TSIG error.
Returns a ``dns.message.Message`` object whose specific class is *pad*, a non-negative ``int`` or ``None``. If 0, the default, do not pad; otherwise
appropriate for the query. For example, if query is a if not ``None`` add padding bytes to make the message size a multiple of *pad*.
``dns.update.UpdateMessage``, response will be too. Note that if padding is non-zero, an EDNS PADDING option will always be added to the
message. If ``None``, add padding following RFC 8467, namely if the request is
padded, pad the response to 468 otherwise do not pad.
Returns a ``dns.message.Message`` object whose specific class is appropriate for the
query. For example, if query is a ``dns.update.UpdateMessage``, response will be
too.
""" """
if query.flags & dns.flags.QR: if query.flags & dns.flags.QR:
@ -1804,7 +1857,13 @@ def make_response(
response.set_opcode(query.opcode()) response.set_opcode(query.opcode())
response.question = list(query.question) response.question = list(query.question)
if query.edns >= 0: if query.edns >= 0:
response.use_edns(0, 0, our_payload, query.payload) if pad is None:
# Set response padding per RFC 8467
pad = 0
for option in query.options:
if option.otype == dns.edns.OptionType.PADDING:
pad = 468
response.use_edns(0, 0, our_payload, query.payload, pad=pad)
if query.had_tsig: if query.had_tsig:
response.use_tsig( response.use_tsig(
query.keyring, query.keyring,

Some files were not shown because too many files have changed in this diff Show more