diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index f0947104..1185e5eb 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -47,7 +47,7 @@ jobs: version: latest - name: Cache Docker Layers - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} diff --git a/.github/workflows/publish-installers.yml b/.github/workflows/publish-installers.yml index 45610edf..7b79eea6 100644 --- a/.github/workflows/publish-installers.yml +++ b/.github/workflows/publish-installers.yml @@ -129,7 +129,7 @@ jobs: echo "$EOF" >> $GITHUB_OUTPUT - name: Create Release - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 id: create_release env: GITHUB_TOKEN: ${{ secrets.GHACTIONS_TOKEN }} diff --git a/lib/PyWin32.chm b/lib/PyWin32.chm new file mode 100644 index 00000000..7606f82b Binary files /dev/null and b/lib/PyWin32.chm differ diff --git a/lib/adodbapi/__init__.py b/lib/adodbapi/__init__.py new file mode 100644 index 00000000..0d769e05 --- /dev/null +++ b/lib/adodbapi/__init__.py @@ -0,0 +1,74 @@ +"""adodbapi - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/adodbapi +""" +import sys +import time + +from .adodbapi import Connection, Cursor, __version__, connect, dateconverter +from .apibase import ( + BINARY, + DATETIME, + NUMBER, + ROWID, + STRING, + DatabaseError, + DataError, + Error, + FetchFailedError, + IntegrityError, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + ProgrammingError, + Warning, + apilevel, + paramstyle, + threadsafety, +) + + +def Binary(aString): + """This function constructs an object capable of holding a binary (long) string value.""" + return bytes(aString) + + +def Date(year, month, day): + "This function constructs an object holding a date value." + return dateconverter.Date(year, month, day) + + +def Time(hour, minute, second): + "This function constructs an object holding a time value." + return dateconverter.Time(hour, minute, second) + + +def Timestamp(year, month, day, hour, minute, second): + "This function constructs an object holding a time stamp value." + return dateconverter.Timestamp(year, month, day, hour, minute, second) + + +def DateFromTicks(ticks): + """This function constructs an object holding a date value from the given ticks value + (number of seconds since the epoch; see the documentation of the standard Python time module for details). + """ + return Date(*time.gmtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + """This function constructs an object holding a time value from the given ticks value + (number of seconds since the epoch; see the documentation of the standard Python time module for details). + """ + return Time(*time.gmtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + """This function constructs an object holding a time stamp value from the given + ticks value (number of seconds since the epoch; + see the documentation of the standard Python time module for details).""" + return Timestamp(*time.gmtime(ticks)[:6]) + + +version = "adodbapi v" + __version__ diff --git a/lib/adodbapi/ado_consts.py b/lib/adodbapi/ado_consts.py new file mode 100644 index 00000000..ecb2147d --- /dev/null +++ b/lib/adodbapi/ado_consts.py @@ -0,0 +1,281 @@ +# ADO enumerated constants documented on MSDN: +# http://msdn.microsoft.com/en-us/library/ms678353(VS.85).aspx + +# IsolationLevelEnum +adXactUnspecified = -1 +adXactBrowse = 0x100 +adXactChaos = 0x10 +adXactCursorStability = 0x1000 +adXactIsolated = 0x100000 +adXactReadCommitted = 0x1000 +adXactReadUncommitted = 0x100 +adXactRepeatableRead = 0x10000 +adXactSerializable = 0x100000 + +# CursorLocationEnum +adUseClient = 3 +adUseServer = 2 + +# CursorTypeEnum +adOpenDynamic = 2 +adOpenForwardOnly = 0 +adOpenKeyset = 1 +adOpenStatic = 3 +adOpenUnspecified = -1 + +# CommandTypeEnum +adCmdText = 1 +adCmdStoredProc = 4 +adSchemaTables = 20 + +# ParameterDirectionEnum +adParamInput = 1 +adParamInputOutput = 3 +adParamOutput = 2 +adParamReturnValue = 4 +adParamUnknown = 0 +directions = { + 0: "Unknown", + 1: "Input", + 2: "Output", + 3: "InputOutput", + 4: "Return", +} + + +def ado_direction_name(ado_dir): + try: + return "adParam" + directions[ado_dir] + except: + return "unknown direction (" + str(ado_dir) + ")" + + +# ObjectStateEnum +adStateClosed = 0 +adStateOpen = 1 +adStateConnecting = 2 +adStateExecuting = 4 +adStateFetching = 8 + +# FieldAttributeEnum +adFldMayBeNull = 0x40 + +# ConnectModeEnum +adModeUnknown = 0 +adModeRead = 1 +adModeWrite = 2 +adModeReadWrite = 3 +adModeShareDenyRead = 4 +adModeShareDenyWrite = 8 +adModeShareExclusive = 12 +adModeShareDenyNone = 16 +adModeRecursive = 0x400000 + +# XactAttributeEnum +adXactCommitRetaining = 131072 +adXactAbortRetaining = 262144 + +ado_error_TIMEOUT = -2147217871 + +# DataTypeEnum - ADO Data types documented at: +# http://msdn2.microsoft.com/en-us/library/ms675318.aspx +adArray = 0x2000 +adEmpty = 0x0 +adBSTR = 0x8 +adBigInt = 0x14 +adBinary = 0x80 +adBoolean = 0xB +adChapter = 0x88 +adChar = 0x81 +adCurrency = 0x6 +adDBDate = 0x85 +adDBTime = 0x86 +adDBTimeStamp = 0x87 +adDate = 0x7 +adDecimal = 0xE +adDouble = 0x5 +adError = 0xA +adFileTime = 0x40 +adGUID = 0x48 +adIDispatch = 0x9 +adIUnknown = 0xD +adInteger = 0x3 +adLongVarBinary = 0xCD +adLongVarChar = 0xC9 +adLongVarWChar = 0xCB +adNumeric = 0x83 +adPropVariant = 0x8A +adSingle = 0x4 +adSmallInt = 0x2 +adTinyInt = 0x10 +adUnsignedBigInt = 0x15 +adUnsignedInt = 0x13 +adUnsignedSmallInt = 0x12 +adUnsignedTinyInt = 0x11 +adUserDefined = 0x84 +adVarBinary = 0xCC +adVarChar = 0xC8 +adVarNumeric = 0x8B +adVarWChar = 0xCA +adVariant = 0xC +adWChar = 0x82 +# Additional constants used by introspection but not ADO itself +AUTO_FIELD_MARKER = -1000 + +adTypeNames = { + adBSTR: "adBSTR", + adBigInt: "adBigInt", + adBinary: "adBinary", + adBoolean: "adBoolean", + adChapter: "adChapter", + adChar: "adChar", + adCurrency: "adCurrency", + adDBDate: "adDBDate", + adDBTime: "adDBTime", + adDBTimeStamp: "adDBTimeStamp", + adDate: "adDate", + adDecimal: "adDecimal", + adDouble: "adDouble", + adEmpty: "adEmpty", + adError: "adError", + adFileTime: "adFileTime", + adGUID: "adGUID", + adIDispatch: "adIDispatch", + adIUnknown: "adIUnknown", + adInteger: "adInteger", + adLongVarBinary: "adLongVarBinary", + adLongVarChar: "adLongVarChar", + adLongVarWChar: "adLongVarWChar", + adNumeric: "adNumeric", + adPropVariant: "adPropVariant", + adSingle: "adSingle", + adSmallInt: "adSmallInt", + adTinyInt: "adTinyInt", + adUnsignedBigInt: "adUnsignedBigInt", + adUnsignedInt: "adUnsignedInt", + adUnsignedSmallInt: "adUnsignedSmallInt", + adUnsignedTinyInt: "adUnsignedTinyInt", + adUserDefined: "adUserDefined", + adVarBinary: "adVarBinary", + adVarChar: "adVarChar", + adVarNumeric: "adVarNumeric", + adVarWChar: "adVarWChar", + adVariant: "adVariant", + adWChar: "adWChar", +} + + +def ado_type_name(ado_type): + return adTypeNames.get(ado_type, "unknown type (" + str(ado_type) + ")") + + +# here in decimal, sorted by value +# adEmpty 0 Specifies no value (DBTYPE_EMPTY). +# adSmallInt 2 Indicates a two-byte signed integer (DBTYPE_I2). +# adInteger 3 Indicates a four-byte signed integer (DBTYPE_I4). +# adSingle 4 Indicates a single-precision floating-point value (DBTYPE_R4). +# adDouble 5 Indicates a double-precision floating-point value (DBTYPE_R8). +# adCurrency 6 Indicates a currency value (DBTYPE_CY). Currency is a fixed-point number +# with four digits to the right of the decimal point. It is stored in an eight-byte signed integer scaled by 10,000. +# adDate 7 Indicates a date value (DBTYPE_DATE). A date is stored as a double, the whole part of which is +# the number of days since December 30, 1899, and the fractional part of which is the fraction of a day. +# adBSTR 8 Indicates a null-terminated character string (Unicode) (DBTYPE_BSTR). +# adIDispatch 9 Indicates a pointer to an IDispatch interface on a COM object (DBTYPE_IDISPATCH). +# adError 10 Indicates a 32-bit error code (DBTYPE_ERROR). +# adBoolean 11 Indicates a boolean value (DBTYPE_BOOL). +# adVariant 12 Indicates an Automation Variant (DBTYPE_VARIANT). +# adIUnknown 13 Indicates a pointer to an IUnknown interface on a COM object (DBTYPE_IUNKNOWN). +# adDecimal 14 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_DECIMAL). +# adTinyInt 16 Indicates a one-byte signed integer (DBTYPE_I1). +# adUnsignedTinyInt 17 Indicates a one-byte unsigned integer (DBTYPE_UI1). +# adUnsignedSmallInt 18 Indicates a two-byte unsigned integer (DBTYPE_UI2). +# adUnsignedInt 19 Indicates a four-byte unsigned integer (DBTYPE_UI4). +# adBigInt 20 Indicates an eight-byte signed integer (DBTYPE_I8). +# adUnsignedBigInt 21 Indicates an eight-byte unsigned integer (DBTYPE_UI8). +# adFileTime 64 Indicates a 64-bit value representing the number of 100-nanosecond intervals since +# January 1, 1601 (DBTYPE_FILETIME). +# adGUID 72 Indicates a globally unique identifier (GUID) (DBTYPE_GUID). +# adBinary 128 Indicates a binary value (DBTYPE_BYTES). +# adChar 129 Indicates a string value (DBTYPE_STR). +# adWChar 130 Indicates a null-terminated Unicode character string (DBTYPE_WSTR). +# adNumeric 131 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_NUMERIC). +# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT). +# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT). +# adDBDate 133 Indicates a date value (yyyymmdd) (DBTYPE_DBDATE). +# adDBTime 134 Indicates a time value (hhmmss) (DBTYPE_DBTIME). +# adDBTimeStamp 135 Indicates a date/time stamp (yyyymmddhhmmss plus a fraction in billionths) (DBTYPE_DBTIMESTAMP). +# adChapter 136 Indicates a four-byte chapter value that identifies rows in a child rowset (DBTYPE_HCHAPTER). +# adPropVariant 138 Indicates an Automation PROPVARIANT (DBTYPE_PROP_VARIANT). +# adVarNumeric 139 Indicates a numeric value (Parameter object only). +# adVarChar 200 Indicates a string value (Parameter object only). +# adLongVarChar 201 Indicates a long string value (Parameter object only). +# adVarWChar 202 Indicates a null-terminated Unicode character string (Parameter object only). +# adLongVarWChar 203 Indicates a long null-terminated Unicode string value (Parameter object only). +# adVarBinary 204 Indicates a binary value (Parameter object only). +# adLongVarBinary 205 Indicates a long binary value (Parameter object only). +# adArray (Does not apply to ADOX.) 0x2000 A flag value, always combined with another data type constant, +# that indicates an array of that other data type. + +# Error codes to names +adoErrors = { + 0xE7B: "adErrBoundToCommand", + 0xE94: "adErrCannotComplete", + 0xEA4: "adErrCantChangeConnection", + 0xC94: "adErrCantChangeProvider", + 0xE8C: "adErrCantConvertvalue", + 0xE8D: "adErrCantCreate", + 0xEA3: "adErrCatalogNotSet", + 0xE8E: "adErrColumnNotOnThisRow", + 0xD5D: "adErrDataConversion", + 0xE89: "adErrDataOverflow", + 0xE9A: "adErrDelResOutOfScope", + 0xEA6: "adErrDenyNotSupported", + 0xEA7: "adErrDenyTypeNotSupported", + 0xCB3: "adErrFeatureNotAvailable", + 0xEA5: "adErrFieldsUpdateFailed", + 0xC93: "adErrIllegalOperation", + 0xCAE: "adErrInTransaction", + 0xE87: "adErrIntegrityViolation", + 0xBB9: "adErrInvalidArgument", + 0xE7D: "adErrInvalidConnection", + 0xE7C: "adErrInvalidParamInfo", + 0xE82: "adErrInvalidTransaction", + 0xE91: "adErrInvalidURL", + 0xCC1: "adErrItemNotFound", + 0xBCD: "adErrNoCurrentRecord", + 0xE83: "adErrNotExecuting", + 0xE7E: "adErrNotReentrant", + 0xE78: "adErrObjectClosed", + 0xD27: "adErrObjectInCollection", + 0xD5C: "adErrObjectNotSet", + 0xE79: "adErrObjectOpen", + 0xBBA: "adErrOpeningFile", + 0xE80: "adErrOperationCancelled", + 0xE96: "adErrOutOfSpace", + 0xE88: "adErrPermissionDenied", + 0xE9E: "adErrPropConflicting", + 0xE9B: "adErrPropInvalidColumn", + 0xE9C: "adErrPropInvalidOption", + 0xE9D: "adErrPropInvalidValue", + 0xE9F: "adErrPropNotAllSettable", + 0xEA0: "adErrPropNotSet", + 0xEA1: "adErrPropNotSettable", + 0xEA2: "adErrPropNotSupported", + 0xBB8: "adErrProviderFailed", + 0xE7A: "adErrProviderNotFound", + 0xBBB: "adErrReadFile", + 0xE93: "adErrResourceExists", + 0xE92: "adErrResourceLocked", + 0xE97: "adErrResourceOutOfScope", + 0xE8A: "adErrSchemaViolation", + 0xE8B: "adErrSignMismatch", + 0xE81: "adErrStillConnecting", + 0xE7F: "adErrStillExecuting", + 0xE90: "adErrTreePermissionDenied", + 0xE8F: "adErrURLDoesNotExist", + 0xE99: "adErrURLNamedRowDoesNotExist", + 0xE98: "adErrUnavailable", + 0xE84: "adErrUnsafeOperation", + 0xE95: "adErrVolumeNotFound", + 0xBBC: "adErrWriteFile", +} diff --git a/lib/adodbapi/adodbapi.py b/lib/adodbapi/adodbapi.py new file mode 100644 index 00000000..8f7c045e --- /dev/null +++ b/lib/adodbapi/adodbapi.py @@ -0,0 +1,1223 @@ +"""adodbapi - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, versions 2.1 and later by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* https://github.com/mhammond/pywin32 +* http://sourceforge.net/projects/adodbapi + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + django adaptations and refactoring by Adam Vandenberg + +DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/ + +This module source should run correctly in CPython versions 2.7 and later, +or IronPython version 2.7 and later, +or, after running through 2to3.py, CPython 3.4 or later. +""" + +__version__ = "2.6.2.0" +version = "adodbapi v" + __version__ + +import copy +import decimal +import os +import sys +import weakref + +from . import ado_consts as adc, apibase as api, process_connect_string + +try: + verbose = int(os.environ["ADODBAPI_VERBOSE"]) +except: + verbose = False +if verbose: + print(version) + +# --- define objects to smooth out IronPython <-> CPython differences +onWin32 = False # assume the worst +if api.onIronPython: + from clr import Reference + from System import ( + Activator, + Array, + Byte, + DateTime, + DBNull, + Decimal as SystemDecimal, + Type, + ) + + def Dispatch(dispatch): + type = Type.GetTypeFromProgID(dispatch) + return Activator.CreateInstance(type) + + def getIndexedValue(obj, index): + return obj.Item[index] + +else: # try pywin32 + try: + import pythoncom + import pywintypes + import win32com.client + + onWin32 = True + + def Dispatch(dispatch): + return win32com.client.Dispatch(dispatch) + + except ImportError: + import warnings + + warnings.warn( + "pywin32 package (or IronPython) required for adodbapi.", ImportWarning + ) + + def getIndexedValue(obj, index): + return obj(index) + + +from collections.abc import Mapping + +# --- define objects to smooth out Python3000 <-> Python 2.x differences +unicodeType = str +longType = int +StringTypes = str +maxint = sys.maxsize + + +# ----------------- The .connect method ----------------- +def make_COM_connecter(): + try: + if onWin32: + pythoncom.CoInitialize() # v2.1 Paj + c = Dispatch("ADODB.Connection") # connect _after_ CoIninialize v2.1.1 adamvan + except: + raise api.InterfaceError( + "Windows COM Error: Dispatch('ADODB.Connection') failed." + ) + return c + + +def connect(*args, **kwargs): # --> a db-api connection object + """Connect to a database. + + call using: + :connection_string -- An ADODB formatted connection string, see: + * http://www.connectionstrings.com + * http://www.asp101.com/articles/john/connstring/default.asp + :timeout -- A command timeout value, in seconds (default 30 seconds) + """ + co = Connection() # make an empty connection object + + kwargs = process_connect_string.process(args, kwargs, True) + + try: # connect to the database, using the connection information in kwargs + co.connect(kwargs) + return co + except Exception as e: + message = 'Error opening connection to "%s"' % co.connection_string + raise api.OperationalError(e, message) + + +# so you could use something like: +# myConnection.paramstyle = 'named' +# The programmer may also change the default. +# For example, if I were using django, I would say: +# import adodbapi as Database +# Database.adodbapi.paramstyle = 'format' + +# ------- other module level defaults -------- +defaultIsolationLevel = adc.adXactReadCommitted +# Set defaultIsolationLevel on module level before creating the connection. +# For example: +# import adodbapi, ado_consts +# adodbapi.adodbapi.defaultIsolationLevel=ado_consts.adXactBrowse" +# +# Set defaultCursorLocation on module level before creating the connection. +# It may be one of the "adUse..." consts. +defaultCursorLocation = adc.adUseClient # changed from adUseServer as of v 2.3.0 + +dateconverter = api.pythonDateTimeConverter() # default + + +def format_parameters(ADOparameters, show_value=False): + """Format a collection of ADO Command Parameters. + + Used by error reporting in _execute_command. + """ + try: + if show_value: + desc = [ + 'Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s", Precision: %s, NumericScale: %s' + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Value, + p.Precision, + p.NumericScale, + ) + for p in ADOparameters + ] + else: + desc = [ + "Name: %s, Dir.: %s, Type: %s, Size: %s, Precision: %s, NumericScale: %s" + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Precision, + p.NumericScale, + ) + for p in ADOparameters + ] + return "[" + "\n".join(desc) + "]" + except: + return "[]" + + +def _configure_parameter(p, value, adotype, settings_known): + """Configure the given ADO Parameter 'p' with the Python 'value'.""" + + if adotype in api.adoBinaryTypes: + p.Size = len(value) + p.AppendChunk(value) + + elif isinstance(value, StringTypes): # v2.1 Jevon + L = len(value) + if adotype in api.adoStringTypes: # v2.2.1 Cole + if settings_known: + L = min(L, p.Size) # v2.1 Cole limit data to defined size + p.Value = value[:L] # v2.1 Jevon & v2.1 Cole + else: + p.Value = value # dont limit if db column is numeric + if L > 0: # v2.1 Cole something does not like p.Size as Zero + p.Size = L # v2.1 Jevon + + elif isinstance(value, decimal.Decimal): + if api.onIronPython: + s = str(value) + p.Value = s + p.Size = len(s) + else: + p.Value = value + exponent = value.as_tuple()[2] + digit_count = len(value.as_tuple()[1]) + p.Precision = digit_count + if exponent == 0: + p.NumericScale = 0 + elif exponent < 0: + p.NumericScale = -exponent + if p.Precision < p.NumericScale: + p.Precision = p.NumericScale + else: # exponent > 0: + p.NumericScale = 0 + p.Precision = digit_count + exponent + + elif type(value) in dateconverter.types: + if settings_known and adotype in api.adoDateTimeTypes: + p.Value = dateconverter.COMDate(value) + else: # probably a string + # provide the date as a string in the format 'YYYY-MM-dd' + s = dateconverter.DateObjectToIsoFormatString(value) + p.Value = s + p.Size = len(s) + + elif api.onIronPython and isinstance(value, longType): # Iron Python Long + s = str(value) # feature workaround for IPy 2.0 + p.Value = s + + elif adotype == adc.adEmpty: # ADO will not let you specify a null column + p.Type = ( + adc.adInteger + ) # so we will fake it to be an integer (just to have something) + p.Value = None # and pass in a Null *value* + + # For any other type, set the value and let pythoncom do the right thing. + else: + p.Value = value + + +# # # # # ----- the Class that defines a connection ----- # # # # # +class Connection(object): + # include connection attributes as class attributes required by api definition. + Warning = api.Warning + Error = api.Error + InterfaceError = api.InterfaceError + DataError = api.DataError + DatabaseError = api.DatabaseError + OperationalError = api.OperationalError + IntegrityError = api.IntegrityError + InternalError = api.InternalError + NotSupportedError = api.NotSupportedError + ProgrammingError = api.ProgrammingError + FetchFailedError = api.FetchFailedError # (special for django) + # ...class attributes... (can be overridden by instance attributes) + verbose = api.verbose + + @property + def dbapi(self): # a proposed db-api version 3 extension. + "Return a reference to the DBAPI module for this Connection." + return api + + def __init__(self): # now define the instance attributes + self.connector = None + self.paramstyle = api.paramstyle + self.supportsTransactions = False + self.connection_string = "" + self.cursors = weakref.WeakValueDictionary() + self.dbms_name = "" + self.dbms_version = "" + self.errorhandler = None # use the standard error handler for this instance + self.transaction_level = 0 # 0 == Not in a transaction, at the top level + self._autocommit = False + + def connect(self, kwargs, connection_maker=make_COM_connecter): + if verbose > 9: + print("kwargs=", repr(kwargs)) + try: + self.connection_string = ( + kwargs["connection_string"] % kwargs + ) # insert keyword arguments + except Exception as e: + self._raiseConnectionError( + KeyError, "Python string format error in connection string->" + ) + self.timeout = kwargs.get("timeout", 30) + self.mode = kwargs.get("mode", adc.adModeUnknown) + self.kwargs = kwargs + if verbose: + print('%s attempting: "%s"' % (version, self.connection_string)) + self.connector = connection_maker() + self.connector.ConnectionTimeout = self.timeout + self.connector.ConnectionString = self.connection_string + self.connector.Mode = self.mode + + try: + self.connector.Open() # Open the ADO connection + except api.Error: + self._raiseConnectionError( + api.DatabaseError, + "ADO error trying to Open=%s" % self.connection_string, + ) + + try: # Stefan Fuchs; support WINCCOLEDBProvider + if getIndexedValue(self.connector.Properties, "Transaction DDL").Value != 0: + self.supportsTransactions = True + except pywintypes.com_error: + pass # Stefan Fuchs + self.dbms_name = getIndexedValue(self.connector.Properties, "DBMS Name").Value + try: # Stefan Fuchs + self.dbms_version = getIndexedValue( + self.connector.Properties, "DBMS Version" + ).Value + except pywintypes.com_error: + pass # Stefan Fuchs + self.connector.CursorLocation = defaultCursorLocation # v2.1 Rose + if self.supportsTransactions: + self.connector.IsolationLevel = defaultIsolationLevel + self._autocommit = bool(kwargs.get("autocommit", False)) + if not self._autocommit: + self.transaction_level = ( + self.connector.BeginTrans() + ) # Disables autocommit & inits transaction_level + else: + self._autocommit = True + if "paramstyle" in kwargs: + self.paramstyle = kwargs["paramstyle"] # let setattr do the error checking + self.messages = [] + if verbose: + print("adodbapi New connection at %X" % id(self)) + + def _raiseConnectionError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self, None, errorclass, errorvalue) + + def _closeAdoConnection(self): # all v2.1 Rose + """close the underlying ADO Connection object, + rolling it back first if it supports transactions.""" + if self.connector is None: + return + if not self._autocommit: + if self.transaction_level: + try: + self.connector.RollbackTrans() + except: + pass + self.connector.Close() + if verbose: + print("adodbapi Closed connection at %X" % id(self)) + + def close(self): + """Close the connection now (rather than whenever __del__ is called). + + The connection will be unusable from this point forward; + an Error (or subclass) exception will be raised if any operation is attempted with the connection. + The same applies to all cursor objects trying to use the connection. + """ + for crsr in list(self.cursors.values())[ + : + ]: # copy the list, then close each one + crsr.close(dont_tell_me=True) # close without back-link clearing + self.messages = [] + try: + self._closeAdoConnection() # v2.1 Rose + except Exception as e: + self._raiseConnectionError(sys.exc_info()[0], sys.exc_info()[1]) + + self.connector = None # v2.4.2.2 fix subtle timeout bug + # per M.Hammond: "I expect the benefits of uninitializing are probably fairly small, + # so never uninitializing will probably not cause any problems." + + def commit(self): + """Commit any pending transaction to the database. + + Note that if the database supports an auto-commit feature, + this must be initially off. An interface method may be provided to turn it back on. + Database modules that do not support transactions should implement this method with void functionality. + """ + self.messages = [] + if not self.supportsTransactions: + return + + try: + self.transaction_level = self.connector.CommitTrans() + if verbose > 1: + print("commit done on connection at %X" % id(self)) + if not ( + self._autocommit + or (self.connector.Attributes & adc.adXactAbortRetaining) + ): + # If attributes has adXactCommitRetaining it performs retaining commits that is, + # calling CommitTrans automatically starts a new transaction. Not all providers support this. + # If not, we will have to start a new transaction by this command: + self.transaction_level = self.connector.BeginTrans() + except Exception as e: + self._raiseConnectionError(api.ProgrammingError, e) + + def _rollback(self): + """In case a database does provide transactions this method causes the the database to roll back to + the start of any pending transaction. Closing a connection without committing the changes first will + cause an implicit rollback to be performed. + + If the database does not support the functionality required by the method, the interface should + throw an exception in case the method is used. + The preferred approach is to not implement the method and thus have Python generate + an AttributeError in case the method is requested. This allows the programmer to check for database + capabilities using the standard hasattr() function. + + For some dynamically configured interfaces it may not be appropriate to require dynamically making + the method available. These interfaces should then raise a NotSupportedError to indicate the + non-ability to perform the roll back when the method is invoked. + """ + self.messages = [] + if ( + self.transaction_level + ): # trying to roll back with no open transaction causes an error + try: + self.transaction_level = self.connector.RollbackTrans() + if verbose > 1: + print("rollback done on connection at %X" % id(self)) + if not self._autocommit and not ( + self.connector.Attributes & adc.adXactAbortRetaining + ): + # If attributes has adXactAbortRetaining it performs retaining aborts that is, + # calling RollbackTrans automatically starts a new transaction. Not all providers support this. + # If not, we will have to start a new transaction by this command: + if ( + not self.transaction_level + ): # if self.transaction_level == 0 or self.transaction_level is None: + self.transaction_level = self.connector.BeginTrans() + except Exception as e: + self._raiseConnectionError(api.ProgrammingError, e) + + def __setattr__(self, name, value): + if name == "autocommit": # extension: allow user to turn autocommit on or off + if self.supportsTransactions: + object.__setattr__(self, "_autocommit", bool(value)) + try: + self._rollback() # must clear any outstanding transactions + except: + pass + return + elif name == "paramstyle": + if value not in api.accepted_paramstyles: + self._raiseConnectionError( + api.NotSupportedError, + 'paramstyle="%s" not in:%s' + % (value, repr(api.accepted_paramstyles)), + ) + elif name == "variantConversions": + value = copy.copy( + value + ) # make a new copy -- no changes in the default, please + object.__setattr__(self, name, value) + + def __getattr__(self, item): + if ( + item == "rollback" + ): # the rollback method only appears if the database supports transactions + if self.supportsTransactions: + return ( + self._rollback + ) # return the rollback method so the caller can execute it. + else: + raise AttributeError("this data provider does not support Rollback") + elif item == "autocommit": + return self._autocommit + else: + raise AttributeError( + 'no such attribute in ADO connection object as="%s"' % item + ) + + def cursor(self): + "Return a new Cursor Object using the connection." + self.messages = [] + c = Cursor(self) + return c + + def _i_am_here(self, crsr): + "message from a new cursor proclaiming its existence" + oid = id(crsr) + self.cursors[oid] = crsr + + def _i_am_closing(self, crsr): + "message from a cursor giving connection a chance to clean up" + try: + del self.cursors[id(crsr)] + except: + pass + + def printADOerrors(self): + j = self.connector.Errors.Count + if j: + print("ADO Errors:(%i)" % j) + for e in self.connector.Errors: + print("Description: %s" % e.Description) + print("Error: %s %s " % (e.Number, adc.adoErrors.get(e.Number, "unknown"))) + if e.Number == adc.ado_error_TIMEOUT: + print( + "Timeout Error: Try using adodbpi.connect(constr,timeout=Nseconds)" + ) + print("Source: %s" % e.Source) + print("NativeError: %s" % e.NativeError) + print("SQL State: %s" % e.SQLState) + + def _suggest_error_class(self): + """Introspect the current ADO Errors and determine an appropriate error class. + + Error.SQLState is a SQL-defined error condition, per the SQL specification: + http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt + + The 23000 class of errors are integrity errors. + Error 40002 is a transactional integrity error. + """ + if self.connector is not None: + for e in self.connector.Errors: + state = str(e.SQLState) + if state.startswith("23") or state == "40002": + return api.IntegrityError + return api.DatabaseError + + def __del__(self): + try: + self._closeAdoConnection() # v2.1 Rose + except: + pass + self.connector = None + + def __enter__(self): # Connections are context managers + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type: + self._rollback() # automatic rollback on errors + else: + self.commit() + + def get_table_names(self): + schema = self.connector.OpenSchema(20) # constant = adSchemaTables + + tables = [] + while not schema.EOF: + name = getIndexedValue(schema.Fields, "TABLE_NAME").Value + tables.append(name) + schema.MoveNext() + del schema + return tables + + +# # # # # ----- the Class that defines a cursor ----- # # # # # +class Cursor(object): + ## ** api required attributes: + ## description... + ## This read-only attribute is a sequence of 7-item sequences. + ## Each of these sequences contains information describing one result column: + ## (name, type_code, display_size, internal_size, precision, scale, null_ok). + ## This attribute will be None for operations that do not return rows or if the + ## cursor has not had an operation invoked via the executeXXX() method yet. + ## The type_code can be interpreted by comparing it to the Type Objects specified in the section below. + ## rowcount... + ## This read-only attribute specifies the number of rows that the last executeXXX() produced + ## (for DQL statements like select) or affected (for DML statements like update or insert). + ## The attribute is -1 in case no executeXXX() has been performed on the cursor or + ## the rowcount of the last operation is not determinable by the interface.[7] + ## arraysize... + ## This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). + ## It defaults to 1 meaning to fetch a single row at a time. + ## Implementations must observe this value with respect to the fetchmany() method, + ## but are free to interact with the database a single row at a time. + ## It may also be used in the implementation of executemany(). + ## ** extension attributes: + ## paramstyle... + ## allows the programmer to override the connection's default paramstyle + ## errorhandler... + ## allows the programmer to override the connection's default error handler + + def __init__(self, connection): + self.command = None + self._ado_prepared = False + self.messages = [] + self.connection = connection + self.paramstyle = connection.paramstyle # used for overriding the paramstyle + self._parameter_names = [] + self.recordset_is_remote = False + self.rs = None # the ADO recordset for this cursor + self.converters = [] # conversion function for each column + self.columnNames = {} # names of columns {lowercase name : number,...} + self.numberOfColumns = 0 + self._description = None + self.rowcount = -1 + self.errorhandler = connection.errorhandler + self.arraysize = 1 + connection._i_am_here(self) + if verbose: + print( + "%s New cursor at %X on conn %X" + % (version, id(self), id(self.connection)) + ) + + def __iter__(self): # [2.1 Zamarev] + return iter(self.fetchone, None) # [2.1 Zamarev] + + def prepare(self, operation): + self.command = operation + self._description = None + self._ado_prepared = "setup" + + def __next__(self): + r = self.fetchone() + if r: + return r + raise StopIteration + + def __enter__(self): + "Allow database cursors to be used with context managers." + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + "Allow database cursors to be used with context managers." + self.close() + + def _raiseCursorError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self.connection, self, errorclass, errorvalue) + + def build_column_info(self, recordset): + self.converters = [] # convertion function for each column + self.columnNames = {} # names of columns {lowercase name : number,...} + self._description = None + + # if EOF and BOF are true at the same time, there are no records in the recordset + if (recordset is None) or (recordset.State == adc.adStateClosed): + self.rs = None + self.numberOfColumns = 0 + return + self.rs = recordset # v2.1.1 bkline + self.recordset_format = api.RS_ARRAY if api.onIronPython else api.RS_WIN_32 + self.numberOfColumns = recordset.Fields.Count + try: + varCon = self.connection.variantConversions + except AttributeError: + varCon = api.variantConversions + for i in range(self.numberOfColumns): + f = getIndexedValue(self.rs.Fields, i) + try: + self.converters.append( + varCon[f.Type] + ) # conversion function for this column + except KeyError: + self._raiseCursorError( + api.InternalError, "Data column of Unknown ADO type=%s" % f.Type + ) + self.columnNames[f.Name.lower()] = i # columnNames lookup + + def _makeDescriptionFromRS(self): + # Abort if closed or no recordset. + if self.rs is None: + self._description = None + return + desc = [] + for i in range(self.numberOfColumns): + f = getIndexedValue(self.rs.Fields, i) + if self.rs.EOF or self.rs.BOF: + display_size = None + else: + display_size = ( + f.ActualSize + ) # TODO: Is this the correct defintion according to the DB API 2 Spec ? + null_ok = bool(f.Attributes & adc.adFldMayBeNull) # v2.1 Cole + desc.append( + ( + f.Name, + f.Type, + display_size, + f.DefinedSize, + f.Precision, + f.NumericScale, + null_ok, + ) + ) + self._description = desc + + def get_description(self): + if not self._description: + self._makeDescriptionFromRS() + return self._description + + def __getattr__(self, item): + if item == "description": + return self.get_description() + object.__getattribute__( + self, item + ) # may get here on Remote attribute calls for existing attributes + + def format_description(self, d): + """Format db_api description tuple for printing.""" + if self.description is None: + self._makeDescriptionFromRS() + if isinstance(d, int): + d = self.description[d] + desc = ( + "Name= %s, Type= %s, DispSize= %s, IntSize= %s, Precision= %s, Scale= %s NullOK=%s" + % ( + d[0], + adc.adTypeNames.get(d[1], str(d[1]) + " (unknown type)"), + d[2], + d[3], + d[4], + d[5], + d[6], + ) + ) + return desc + + def close(self, dont_tell_me=False): + """Close the cursor now (rather than whenever __del__ is called). + The cursor will be unusable from this point forward; an Error (or subclass) + exception will be raised if any operation is attempted with the cursor. + """ + if self.connection is None: + return + self.messages = [] + if ( + self.rs and self.rs.State != adc.adStateClosed + ): # rs exists and is open #v2.1 Rose + self.rs.Close() # v2.1 Rose + self.rs = None # let go of the recordset so ADO will let it be disposed #v2.1 Rose + if not dont_tell_me: + self.connection._i_am_closing( + self + ) # take me off the connection's cursors list + self.connection = ( + None # this will make all future method calls on me throw an exception + ) + if verbose: + print("adodbapi Closed cursor at %X" % id(self)) + + def __del__(self): + try: + self.close() + except: + pass + + def _new_command(self, command_type=adc.adCmdText): + self.cmd = None + self.messages = [] + + if self.connection is None: + self._raiseCursorError(api.InterfaceError, None) + return + try: + self.cmd = Dispatch("ADODB.Command") + self.cmd.ActiveConnection = self.connection.connector + self.cmd.CommandTimeout = self.connection.timeout + self.cmd.CommandType = command_type + self.cmd.CommandText = self.commandText + self.cmd.Prepared = bool(self._ado_prepared) + except: + self._raiseCursorError( + api.DatabaseError, + 'Error creating new ADODB.Command object for "%s"' + % repr(self.commandText), + ) + + def _execute_command(self): + # Stored procedures may have an integer return value + self.return_value = None + recordset = None + count = -1 # default value + if verbose: + print('Executing command="%s"' % self.commandText) + try: + # ----- the actual SQL is executed here --- + if api.onIronPython: + ra = Reference[int]() + recordset = self.cmd.Execute(ra) + count = ra.Value + else: # pywin32 + recordset, count = self.cmd.Execute() + # ----- ------------------------------- --- + except Exception as e: + _message = "" + if hasattr(e, "args"): + _message += str(e.args) + "\n" + _message += "Command:\n%s\nParameters:\n%s" % ( + self.commandText, + format_parameters(self.cmd.Parameters, True), + ) + klass = self.connection._suggest_error_class() + self._raiseCursorError(klass, _message) + try: + self.rowcount = recordset.RecordCount + except: + self.rowcount = count + self.build_column_info(recordset) + + # The ADO documentation hints that obtaining the recordcount may be timeconsuming + # "If the Recordset object does not support approximate positioning, this property + # may be a significant drain on resources # [ekelund] + # Therefore, COM will not return rowcount for server-side cursors. [Cole] + # Client-side cursors (the default since v2.8) will force a static + # cursor, and rowcount will then be set accurately [Cole] + + def get_rowcount(self): + return self.rowcount + + def get_returned_parameters(self): + """with some providers, returned parameters and the .return_value are not available until + after the last recordset has been read. In that case, you must coll nextset() until it + returns None, then call this method to get your returned information.""" + + retLst = ( + [] + ) # store procedures may return altered parameters, including an added "return value" item + for p in tuple(self.cmd.Parameters): + if verbose > 2: + print( + 'Returned=Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s",' + " Precision: %s, NumericScale: %s" + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Value, + p.Precision, + p.NumericScale, + ) + ) + pyObject = api.convert_to_python(p.Value, api.variantConversions[p.Type]) + if p.Direction == adc.adParamReturnValue: + self.returnValue = ( + pyObject # also load the undocumented attribute (Vernon's Error!) + ) + self.return_value = pyObject + else: + retLst.append(pyObject) + return retLst # return the parameter list to the caller + + def callproc(self, procname, parameters=None): + """Call a stored database procedure with the given name. + The sequence of parameters must contain one entry for each + argument that the sproc expects. The result of the + call is returned as modified copy of the input + sequence. Input parameters are left untouched, output and + input/output parameters replaced with possibly new values. + + The sproc may also provide a result set as output, + which is available through the standard .fetch*() methods. + Extension: A "return_value" property may be set on the + cursor if the sproc defines an integer return value. + """ + self._parameter_names = [] + self.commandText = procname + self._new_command(command_type=adc.adCmdStoredProc) + self._buildADOparameterList(parameters, sproc=True) + if verbose > 2: + print( + "Calling Stored Proc with Params=", + format_parameters(self.cmd.Parameters, True), + ) + self._execute_command() + return self.get_returned_parameters() + + def _reformat_operation(self, operation, parameters): + if self.paramstyle in ("format", "pyformat"): # convert %s to ? + operation, self._parameter_names = api.changeFormatToQmark(operation) + elif self.paramstyle == "named" or ( + self.paramstyle == "dynamic" and isinstance(parameters, Mapping) + ): + operation, self._parameter_names = api.changeNamedToQmark( + operation + ) # convert :name to ? + return operation + + def _buildADOparameterList(self, parameters, sproc=False): + self.parameters = parameters + if parameters is None: + parameters = [] + + # Note: ADO does not preserve the parameter list, even if "Prepared" is True, so we must build every time. + parameters_known = False + if sproc: # needed only if we are calling a stored procedure + try: # attempt to use ADO's parameter list + self.cmd.Parameters.Refresh() + if verbose > 2: + print( + "ADO detected Params=", + format_parameters(self.cmd.Parameters, True), + ) + print("Program Parameters=", repr(parameters)) + parameters_known = True + except api.Error: + if verbose: + print("ADO Parameter Refresh failed") + pass + else: + if len(parameters) != self.cmd.Parameters.Count - 1: + raise api.ProgrammingError( + "You must supply %d parameters for this stored procedure" + % (self.cmd.Parameters.Count - 1) + ) + if sproc or parameters != []: + i = 0 + if parameters_known: # use ado parameter list + if self._parameter_names: # named parameters + for i, pm_name in enumerate(self._parameter_names): + p = getIndexedValue(self.cmd.Parameters, i) + try: + _configure_parameter( + p, parameters[pm_name], p.Type, parameters_known + ) + except Exception as e: + _message = ( + "Error Converting Parameter %s: %s, %s <- %s\n" + % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(parameters[pm_name]), + ) + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + else: # regular sequence of parameters + for value in parameters: + p = getIndexedValue(self.cmd.Parameters, i) + if ( + p.Direction == adc.adParamReturnValue + ): # this is an extra parameter added by ADO + i += 1 # skip the extra + p = getIndexedValue(self.cmd.Parameters, i) + try: + _configure_parameter(p, value, p.Type, parameters_known) + except Exception as e: + _message = ( + "Error Converting Parameter %s: %s, %s <- %s\n" + % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(value), + ) + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + i += 1 + else: # -- build own parameter list + if ( + self._parameter_names + ): # we expect a dictionary of parameters, this is the list of expected names + for parm_name in self._parameter_names: + elem = parameters[parm_name] + adotype = api.pyTypeToADOType(elem) + p = self.cmd.CreateParameter( + parm_name, adotype, adc.adParamInput + ) + _configure_parameter(p, elem, adotype, parameters_known) + try: + self.cmd.Parameters.Append(p) + except Exception as e: + _message = "Error Building Parameter %s: %s, %s <- %s\n" % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(elem), + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + else: # expecting the usual sequence of parameters + if sproc: + p = self.cmd.CreateParameter( + "@RETURN_VALUE", adc.adInteger, adc.adParamReturnValue + ) + self.cmd.Parameters.Append(p) + + for elem in parameters: + name = "p%i" % i + adotype = api.pyTypeToADOType(elem) + p = self.cmd.CreateParameter( + name, adotype, adc.adParamInput + ) # Name, Type, Direction, Size, Value + _configure_parameter(p, elem, adotype, parameters_known) + try: + self.cmd.Parameters.Append(p) + except Exception as e: + _message = "Error Building Parameter %s: %s, %s <- %s\n" % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(elem), + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + i += 1 + if self._ado_prepared == "setup": + self._ado_prepared = ( + True # parameters will be "known" by ADO next loop + ) + + def execute(self, operation, parameters=None): + """Prepare and execute a database operation (query or command). + + Parameters may be provided as sequence or mapping and will be bound to variables in the operation. + Variables are specified in a database-specific notation + (see the module's paramstyle attribute for details). [5] + A reference to the operation will be retained by the cursor. + If the same operation object is passed in again, then the cursor + can optimize its behavior. This is most effective for algorithms + where the same operation is used, but different parameters are bound to it (many times). + + For maximum efficiency when reusing an operation, it is best to use + the setinputsizes() method to specify the parameter types and sizes ahead of time. + It is legal for a parameter to not match the predefined information; + the implementation should compensate, possibly with a loss of efficiency. + + The parameters may also be specified as list of tuples to e.g. insert multiple rows in + a single operation, but this kind of usage is depreciated: executemany() should be used instead. + + Return value is not defined. + + [5] The module will use the __getitem__ method of the parameters object to map either positions + (integers) or names (strings) to parameter values. This allows for both sequences and mappings + to be used as input. + The term "bound" refers to the process of binding an input value to a database execution buffer. + In practical terms, this means that the input value is directly used as a value in the operation. + The client should not be required to "escape" the value so that it can be used -- the value + should be equal to the actual database value.""" + if ( + self.command is not operation + or self._ado_prepared == "setup" + or not hasattr(self, "commandText") + ): + if self.command is not operation: + self._ado_prepared = False + self.command = operation + self._parameter_names = [] + self.commandText = ( + operation + if (self.paramstyle == "qmark" or not parameters) + else self._reformat_operation(operation, parameters) + ) + self._new_command() + self._buildADOparameterList(parameters) + if verbose > 3: + print("Params=", format_parameters(self.cmd.Parameters, True)) + self._execute_command() + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) + and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + + Return values are not defined. + """ + self.messages = list() + total_recordcount = 0 + + self.prepare(operation) + for params in seq_of_parameters: + self.execute(self.command, params) + if self.rowcount == -1: + total_recordcount = -1 + if total_recordcount != -1: + total_recordcount += self.rowcount + self.rowcount = total_recordcount + + def _fetch(self, limit=None): + """Fetch rows from the current recordset. + + limit -- Number of rows to fetch, or None (default) to fetch all rows. + """ + if self.connection is None or self.rs is None: + self._raiseCursorError( + api.FetchFailedError, "fetch() on closed connection or empty query set" + ) + return + + if self.rs.State == adc.adStateClosed or self.rs.BOF or self.rs.EOF: + return list() + if limit: # limit number of rows retrieved + ado_results = self.rs.GetRows(limit) + else: # get all rows + ado_results = self.rs.GetRows() + if ( + self.recordset_format == api.RS_ARRAY + ): # result of GetRows is a two-dimension array + length = ( + len(ado_results) // self.numberOfColumns + ) # length of first dimension + else: # pywin32 + length = len(ado_results[0]) # result of GetRows is tuples in a tuple + fetchObject = api.SQLrows( + ado_results, length, self + ) # new object to hold the results of the fetch + return fetchObject + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, + or None when no more data is available. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + result = self._fetch(1) + if result: # return record (not list of records) + return result[0] + return None + + def fetchmany(self, size=None): + """Fetch the next set of rows of a query result, returning a list of tuples. An empty sequence is returned when no more rows are available. + + The number of rows to fetch per call is specified by the parameter. + If it is not given, the cursor's arraysize determines the number of rows to be fetched. + The method should try to fetch as many rows as indicated by the size parameter. + If this is not possible due to the specified number of rows not being available, + fewer rows may be returned. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + + Note there are performance considerations involved with the size parameter. + For optimal performance, it is usually best to use the arraysize attribute. + If the size parameter is used, then it is best for it to retain the same value from + one fetchmany() call to the next. + """ + self.messages = [] + if size is None: + size = self.arraysize + return self._fetch(size) + + def fetchall(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). + + Note that the cursor's arraysize attribute + can affect the performance of this operation. + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + return self._fetch() + + def nextset(self): + """Skip to the next available recordset, discarding any remaining rows from the current recordset. + + If there are no more sets, the method returns None. Otherwise, it returns a true + value and subsequent calls to the fetch methods will return rows from the next result set. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + if self.connection is None or self.rs is None: + self._raiseCursorError( + api.OperationalError, + ("nextset() on closed connection or empty query set"), + ) + return None + + if api.onIronPython: + try: + recordset = self.rs.NextRecordset() + except TypeError: + recordset = None + except api.Error as exc: + self._raiseCursorError(api.NotSupportedError, exc.args) + else: # pywin32 + try: # [begin 2.1 ekelund] + rsTuple = self.rs.NextRecordset() # + except pywintypes.com_error as exc: # return appropriate error + self._raiseCursorError( + api.NotSupportedError, exc.args + ) # [end 2.1 ekelund] + recordset = rsTuple[0] + if recordset is None: + return None + self.build_column_info(recordset) + return True + + def setinputsizes(self, sizes): + pass + + def setoutputsize(self, size, column=None): + pass + + def _last_query(self): # let the programmer see what query we actually used + try: + if self.parameters == None: + ret = self.commandText + else: + ret = "%s,parameters=%s" % (self.commandText, repr(self.parameters)) + except: + ret = None + return ret + + query = property(_last_query, None, None, "returns the last query executed") + + +if __name__ == "__main__": + raise api.ProgrammingError(version + " cannot be run as a main program.") diff --git a/lib/adodbapi/apibase.py b/lib/adodbapi/apibase.py new file mode 100644 index 00000000..a56cd4b6 --- /dev/null +++ b/lib/adodbapi/apibase.py @@ -0,0 +1,794 @@ +"""adodbapi.apibase - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* http://sourceforge.net/projects/adodbapi +""" + +import datetime +import decimal +import numbers +import sys +import time + +# noinspection PyUnresolvedReferences +from . import ado_consts as adc + +verbose = False # debugging flag + +onIronPython = sys.platform == "cli" +if onIronPython: # we need type definitions for odd data we may need to convert + # noinspection PyUnresolvedReferences + from System import DateTime, DBNull + + NullTypes = (type(None), DBNull) +else: + DateTime = type(NotImplemented) # should never be seen on win32 + NullTypes = type(None) + +# --- define objects to smooth out Python3 <-> Python 2.x differences +unicodeType = str +longType = int +StringTypes = str +makeByteBuffer = bytes +memoryViewType = memoryview +_BaseException = Exception + +try: # jdhardy -- handle bytes under IronPython & Py3 + bytes +except NameError: + bytes = str # define it for old Pythons + + +# ------- Error handlers ------ +def standardErrorHandler(connection, cursor, errorclass, errorvalue): + err = (errorclass, errorvalue) + try: + connection.messages.append(err) + except: + pass + if cursor is not None: + try: + cursor.messages.append(err) + except: + pass + raise errorclass(errorvalue) + + +# Note: _BaseException is defined differently between Python 2.x and 3.x +class Error(_BaseException): + pass # Exception that is the base class of all other error + # exceptions. You can use this to catch all errors with one + # single 'except' statement. Warnings are not considered + # errors and thus should not use this class as base. It must + # be a subclass of the Python StandardError (defined in the + # module exceptions). + + +class Warning(_BaseException): + pass + + +class InterfaceError(Error): + pass + + +class DatabaseError(Error): + pass + + +class InternalError(DatabaseError): + pass + + +class OperationalError(DatabaseError): + pass + + +class ProgrammingError(DatabaseError): + pass + + +class IntegrityError(DatabaseError): + pass + + +class DataError(DatabaseError): + pass + + +class NotSupportedError(DatabaseError): + pass + + +class FetchFailedError(OperationalError): + """ + Error is used by RawStoredProcedureQuerySet to determine when a fetch + failed due to a connection being closed or there is no record set + returned. (Non-standard, added especially for django) + """ + + pass + + +# # # # # ----- Type Objects and Constructors ----- # # # # # +# Many databases need to have the input in a particular format for binding to an operation's input parameters. +# For example, if an input is destined for a DATE column, then it must be bound to the database in a particular +# string format. Similar problems exist for "Row ID" columns or large binary items (e.g. blobs or RAW columns). +# This presents problems for Python since the parameters to the executeXXX() method are untyped. +# When the database module sees a Python string object, it doesn't know if it should be bound as a simple CHAR +# column, as a raw BINARY item, or as a DATE. +# +# To overcome this problem, a module must provide the constructors defined below to create objects that can +# hold special values. When passed to the cursor methods, the module can then detect the proper type of +# the input parameter and bind it accordingly. + +# A Cursor Object's description attribute returns information about each of the result columns of a query. +# The type_code must compare equal to one of Type Objects defined below. Type Objects may be equal to more than +# one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns; +# see the Implementation Hints below for details). + +# SQL NULL values are represented by the Python None singleton on input and output. + +# Note: Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover. + + +# def Date(year,month,day): +# "This function constructs an object holding a date value. " +# return dateconverter.date(year,month,day) #dateconverter.Date(year,month,day) +# +# def Time(hour,minute,second): +# "This function constructs an object holding a time value. " +# return dateconverter.time(hour, minute, second) # dateconverter.Time(hour,minute,second) +# +# def Timestamp(year,month,day,hour,minute,second): +# "This function constructs an object holding a time stamp value. " +# return dateconverter.datetime(year,month,day,hour,minute,second) +# +# def DateFromTicks(ticks): +# """This function constructs an object holding a date value from the given ticks value +# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """ +# return Date(*time.gmtime(ticks)[:3]) +# +# def TimeFromTicks(ticks): +# """This function constructs an object holding a time value from the given ticks value +# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """ +# return Time(*time.gmtime(ticks)[3:6]) +# +# def TimestampFromTicks(ticks): +# """This function constructs an object holding a time stamp value from the given +# ticks value (number of seconds since the epoch; +# see the documentation of the standard Python time module for details). """ +# return Timestamp(*time.gmtime(ticks)[:6]) +# +# def Binary(aString): +# """This function constructs an object capable of holding a binary (long) string value. """ +# b = makeByteBuffer(aString) +# return b +# ----- Time converters ---------------------------------------------- +class TimeConverter(object): # this is a generic time converter skeleton + def __init__(self): # the details will be filled in by instances + self._ordinal_1899_12_31 = datetime.date(1899, 12, 31).toordinal() - 1 + # Use cls.types to compare if an input parameter is a datetime + self.types = { + type(self.Date(2000, 1, 1)), + type(self.Time(12, 1, 1)), + type(self.Timestamp(2000, 1, 1, 12, 1, 1)), + datetime.datetime, + datetime.time, + datetime.date, + } + + def COMDate(self, obj): + """Returns a ComDate from a date-time""" + try: # most likely a datetime + tt = obj.timetuple() + + try: + ms = obj.microsecond + except: + ms = 0 + return self.ComDateFromTuple(tt, ms) + except: # might be a tuple + try: + return self.ComDateFromTuple(obj) + except: # try an mxdate + try: + return obj.COMDate() + except: + raise ValueError('Cannot convert "%s" to COMdate.' % repr(obj)) + + def ComDateFromTuple(self, t, microseconds=0): + d = datetime.date(t[0], t[1], t[2]) + integerPart = d.toordinal() - self._ordinal_1899_12_31 + ms = (t[3] * 3600 + t[4] * 60 + t[5]) * 1000000 + microseconds + fractPart = float(ms) / 86400000000.0 + return integerPart + fractPart + + def DateObjectFromCOMDate(self, comDate): + "Returns an object of the wanted type from a ComDate" + raise NotImplementedError # "Abstract class" + + def Date(self, year, month, day): + "This function constructs an object holding a date value." + raise NotImplementedError # "Abstract class" + + def Time(self, hour, minute, second): + "This function constructs an object holding a time value." + raise NotImplementedError # "Abstract class" + + def Timestamp(self, year, month, day, hour, minute, second): + "This function constructs an object holding a time stamp value." + raise NotImplementedError # "Abstract class" + # all purpose date to ISO format converter + + def DateObjectToIsoFormatString(self, obj): + "This function should return a string in the format 'YYYY-MM-dd HH:MM:SS:ms' (ms optional)" + try: # most likely, a datetime.datetime + s = obj.isoformat(" ") + except (TypeError, AttributeError): + if isinstance(obj, datetime.date): + s = obj.isoformat() + " 00:00:00" # return exact midnight + else: + try: # maybe it has a strftime method, like mx + s = obj.strftime("%Y-%m-%d %H:%M:%S") + except AttributeError: + try: # but may be time.struct_time + s = time.strftime("%Y-%m-%d %H:%M:%S", obj) + except: + raise ValueError('Cannot convert "%s" to isoformat' % repr(obj)) + return s + + +# -- Optional: if mx extensions are installed you may use mxDateTime ---- +try: + import mx.DateTime + + mxDateTime = True +except: + mxDateTime = False +if mxDateTime: + + class mxDateTimeConverter(TimeConverter): # used optionally if installed + def __init__(self): + TimeConverter.__init__(self) + self.types.add(type(mx.DateTime)) + + def DateObjectFromCOMDate(self, comDate): + return mx.DateTime.DateTimeFromCOMDate(comDate) + + def Date(self, year, month, day): + return mx.DateTime.Date(year, month, day) + + def Time(self, hour, minute, second): + return mx.DateTime.Time(hour, minute, second) + + def Timestamp(self, year, month, day, hour, minute, second): + return mx.DateTime.Timestamp(year, month, day, hour, minute, second) + +else: + + class mxDateTimeConverter(TimeConverter): + pass # if no mx is installed + + +class pythonDateTimeConverter(TimeConverter): # standard since Python 2.3 + def __init__(self): + TimeConverter.__init__(self) + + def DateObjectFromCOMDate(self, comDate): + if isinstance(comDate, datetime.datetime): + odn = comDate.toordinal() + tim = comDate.time() + new = datetime.datetime.combine(datetime.datetime.fromordinal(odn), tim) + return new + # return comDate.replace(tzinfo=None) # make non aware + elif isinstance(comDate, DateTime): + fComDate = comDate.ToOADate() # ironPython clr Date/Time + else: + fComDate = float(comDate) # ComDate is number of days since 1899-12-31 + integerPart = int(fComDate) + floatpart = fComDate - integerPart + ##if floatpart == 0.0: + ## return datetime.date.fromordinal(integerPart + self._ordinal_1899_12_31) + dte = datetime.datetime.fromordinal( + integerPart + self._ordinal_1899_12_31 + ) + datetime.timedelta(milliseconds=floatpart * 86400000) + # millisecondsperday=86400000 # 24*60*60*1000 + return dte + + def Date(self, year, month, day): + return datetime.date(year, month, day) + + def Time(self, hour, minute, second): + return datetime.time(hour, minute, second) + + def Timestamp(self, year, month, day, hour, minute, second): + return datetime.datetime(year, month, day, hour, minute, second) + + +class pythonTimeConverter(TimeConverter): # the old, ?nix type date and time + def __init__(self): # caution: this Class gets confised by timezones and DST + TimeConverter.__init__(self) + self.types.add(time.struct_time) + + def DateObjectFromCOMDate(self, comDate): + "Returns ticks since 1970" + if isinstance(comDate, datetime.datetime): + return comDate.timetuple() + elif isinstance(comDate, DateTime): # ironPython clr date/time + fcomDate = comDate.ToOADate() + else: + fcomDate = float(comDate) + secondsperday = 86400 # 24*60*60 + # ComDate is number of days since 1899-12-31, gmtime epoch is 1970-1-1 = 25569 days + t = time.gmtime(secondsperday * (fcomDate - 25569.0)) + return t # year,month,day,hour,minute,second,weekday,julianday,daylightsaving=t + + def Date(self, year, month, day): + return self.Timestamp(year, month, day, 0, 0, 0) + + def Time(self, hour, minute, second): + return time.gmtime((hour * 60 + minute) * 60 + second) + + def Timestamp(self, year, month, day, hour, minute, second): + return time.localtime( + time.mktime((year, month, day, hour, minute, second, 0, 0, -1)) + ) + + +base_dateconverter = pythonDateTimeConverter() + +# ------ DB API required module attributes --------------------- +threadsafety = 1 # TODO -- find out whether this module is actually BETTER than 1. + +apilevel = "2.0" # String constant stating the supported DB API level. + +paramstyle = "qmark" # the default parameter style + +# ------ control for an extension which may become part of DB API 3.0 --- +accepted_paramstyles = ("qmark", "named", "format", "pyformat", "dynamic") + +# ------------------------------------------------------------------------------------------ +# define similar types for generic conversion routines +adoIntegerTypes = ( + adc.adInteger, + adc.adSmallInt, + adc.adTinyInt, + adc.adUnsignedInt, + adc.adUnsignedSmallInt, + adc.adUnsignedTinyInt, + adc.adBoolean, + adc.adError, +) # max 32 bits +adoRowIdTypes = (adc.adChapter,) # v2.1 Rose +adoLongTypes = (adc.adBigInt, adc.adFileTime, adc.adUnsignedBigInt) +adoExactNumericTypes = ( + adc.adDecimal, + adc.adNumeric, + adc.adVarNumeric, + adc.adCurrency, +) # v2.3 Cole +adoApproximateNumericTypes = (adc.adDouble, adc.adSingle) # v2.1 Cole +adoStringTypes = ( + adc.adBSTR, + adc.adChar, + adc.adLongVarChar, + adc.adLongVarWChar, + adc.adVarChar, + adc.adVarWChar, + adc.adWChar, +) +adoBinaryTypes = (adc.adBinary, adc.adLongVarBinary, adc.adVarBinary) +adoDateTimeTypes = (adc.adDBTime, adc.adDBTimeStamp, adc.adDate, adc.adDBDate) +adoRemainingTypes = ( + adc.adEmpty, + adc.adIDispatch, + adc.adIUnknown, + adc.adPropVariant, + adc.adArray, + adc.adUserDefined, + adc.adVariant, + adc.adGUID, +) + + +# this class is a trick to determine whether a type is a member of a related group of types. see PEP notes +class DBAPITypeObject(object): + def __init__(self, valuesTuple): + self.values = frozenset(valuesTuple) + + def __eq__(self, other): + return other in self.values + + def __ne__(self, other): + return other not in self.values + + +"""This type object is used to describe columns in a database that are string-based (e.g. CHAR). """ +STRING = DBAPITypeObject(adoStringTypes) + +"""This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs). """ +BINARY = DBAPITypeObject(adoBinaryTypes) + +"""This type object is used to describe numeric columns in a database. """ +NUMBER = DBAPITypeObject( + adoIntegerTypes + adoLongTypes + adoExactNumericTypes + adoApproximateNumericTypes +) + +"""This type object is used to describe date/time columns in a database. """ + +DATETIME = DBAPITypeObject(adoDateTimeTypes) +"""This type object is used to describe the "Row ID" column in a database. """ +ROWID = DBAPITypeObject(adoRowIdTypes) + +OTHER = DBAPITypeObject(adoRemainingTypes) + +# ------- utilities for translating python data types to ADO data types --------------------------------- +typeMap = { + memoryViewType: adc.adVarBinary, + float: adc.adDouble, + type(None): adc.adEmpty, + str: adc.adBSTR, + bool: adc.adBoolean, # v2.1 Cole + decimal.Decimal: adc.adDecimal, + int: adc.adBigInt, + bytes: adc.adVarBinary, +} + + +def pyTypeToADOType(d): + tp = type(d) + try: + return typeMap[tp] + except KeyError: # The type was not defined in the pre-computed Type table + from . import dateconverter + + if ( + tp in dateconverter.types + ): # maybe it is one of our supported Date/Time types + return adc.adDate + # otherwise, attempt to discern the type by probing the data object itself -- to handle duck typing + if isinstance(d, StringTypes): + return adc.adBSTR + if isinstance(d, numbers.Integral): + return adc.adBigInt + if isinstance(d, numbers.Real): + return adc.adDouble + raise DataError('cannot convert "%s" (type=%s) to ADO' % (repr(d), tp)) + + +# # # # # # # # # # # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# functions to convert database values to Python objects +# ------------------------------------------------------------------------ +# variant type : function converting variant to Python value +def variantConvertDate(v): + from . import dateconverter # this function only called when adodbapi is running + + return dateconverter.DateObjectFromCOMDate(v) + + +def cvtString(variant): # use to get old action of adodbapi v1 if desired + if onIronPython: + try: + return variant.ToString() + except: + pass + return str(variant) + + +def cvtDecimal(variant): # better name + return _convertNumberWithCulture(variant, decimal.Decimal) + + +def cvtNumeric(variant): # older name - don't break old code + return cvtDecimal(variant) + + +def cvtFloat(variant): + return _convertNumberWithCulture(variant, float) + + +def _convertNumberWithCulture(variant, f): + try: + return f(variant) + except (ValueError, TypeError, decimal.InvalidOperation): + try: + europeVsUS = str(variant).replace(",", ".") + return f(europeVsUS) + except (ValueError, TypeError, decimal.InvalidOperation): + pass + + +def cvtInt(variant): + return int(variant) + + +def cvtLong(variant): # only important in old versions where long and int differ + return int(variant) + + +def cvtBuffer(variant): + return bytes(variant) + + +def cvtUnicode(variant): + return str(variant) + + +def identity(x): + return x + + +def cvtUnusual(variant): + if verbose > 1: + sys.stderr.write("Conversion called for Unusual data=%s\n" % repr(variant)) + if isinstance(variant, DateTime): # COMdate or System.Date + from .adodbapi import ( # this will only be called when adodbapi is in use, and very rarely + dateconverter, + ) + + return dateconverter.DateObjectFromCOMDate(variant) + return variant # cannot find conversion function -- just give the data to the user + + +def convert_to_python(variant, func): # convert DB value into Python value + if isinstance(variant, NullTypes): # IronPython Null or None + return None + return func(variant) # call the appropriate conversion function + + +class MultiMap(dict): # builds a dictionary from {(sequence,of,keys) : function} + """A dictionary of ado.type : function -- but you can set multiple items by passing a sequence of keys""" + + # useful for defining conversion functions for groups of similar data types. + def __init__(self, aDict): + for k, v in list(aDict.items()): + self[k] = v # we must call __setitem__ + + def __setitem__(self, adoType, cvtFn): + "set a single item, or a whole sequence of items" + try: # user passed us a sequence, set them individually + for type in adoType: + dict.__setitem__(self, type, cvtFn) + except TypeError: # a single value fails attempt to iterate + dict.__setitem__(self, adoType, cvtFn) + + +# initialize variantConversions dictionary used to convert SQL to Python +# this is the dictionary of default conversion functions, built by the class above. +# this becomes a class attribute for the Connection, and that attribute is used +# to build the list of column conversion functions for the Cursor +variantConversions = MultiMap( + { + adoDateTimeTypes: variantConvertDate, + adoApproximateNumericTypes: cvtFloat, + adoExactNumericTypes: cvtDecimal, # use to force decimal rather than unicode + adoLongTypes: cvtLong, + adoIntegerTypes: cvtInt, + adoRowIdTypes: cvtInt, + adoStringTypes: identity, + adoBinaryTypes: cvtBuffer, + adoRemainingTypes: cvtUnusual, + } +) + +# # # # # classes to emulate the result of cursor.fetchxxx() as a sequence of sequences # # # # # +# "an ENUM of how my low level records are laid out" +RS_WIN_32, RS_ARRAY, RS_REMOTE = list(range(1, 4)) + + +class SQLrow(object): # a single database row + # class to emulate a sequence, so that a column may be retrieved by either number or name + def __init__(self, rows, index): # "rows" is an _SQLrows object, index is which row + self.rows = rows # parent 'fetch' container object + self.index = index # my row number within parent + + def __getattr__(self, name): # used for row.columnName type of value access + try: + return self._getValue(self.rows.columnNames[name.lower()]) + except KeyError: + raise AttributeError('Unknown column name "{}"'.format(name)) + + def _getValue(self, key): # key must be an integer + if ( + self.rows.recordset_format == RS_ARRAY + ): # retrieve from two-dimensional array + v = self.rows.ado_results[key, self.index] + elif self.rows.recordset_format == RS_REMOTE: + v = self.rows.ado_results[self.index][key] + else: # pywin32 - retrieve from tuple of tuples + v = self.rows.ado_results[key][self.index] + if self.rows.converters is NotImplemented: + return v + return convert_to_python(v, self.rows.converters[key]) + + def __len__(self): + return self.rows.numberOfColumns + + def __getitem__(self, key): # used for row[key] type of value access + if isinstance(key, int): # normal row[1] designation + try: + return self._getValue(key) + except IndexError: + raise + if isinstance(key, slice): + indices = key.indices(self.rows.numberOfColumns) + vl = [self._getValue(i) for i in range(*indices)] + return tuple(vl) + try: + return self._getValue( + self.rows.columnNames[key.lower()] + ) # extension row[columnName] designation + except (KeyError, TypeError): + er, st, tr = sys.exc_info() + raise er( + 'No such key as "%s" in %s' % (repr(key), self.__repr__()) + ).with_traceback(tr) + + def __iter__(self): + return iter(self.__next__()) + + def __next__(self): + for n in range(self.rows.numberOfColumns): + yield self._getValue(n) + + def __repr__(self): # create a human readable representation + taglist = sorted(list(self.rows.columnNames.items()), key=lambda x: x[1]) + s = "" + + def __str__(self): # create a pretty human readable representation + return str( + tuple(str(self._getValue(i)) for i in range(self.rows.numberOfColumns)) + ) + + # TO-DO implement pickling an SQLrow directly + # def __getstate__(self): return self.__dict__ + # def __setstate__(self, d): self.__dict__.update(d) + # which basically tell pickle to treat your class just like a normal one, + # taking self.__dict__ as representing the whole of the instance state, + # despite the existence of the __getattr__. + # # # # + + +class SQLrows(object): + # class to emulate a sequence for multiple rows using a container object + def __init__(self, ado_results, numberOfRows, cursor): + self.ado_results = ado_results # raw result of SQL get + try: + self.recordset_format = cursor.recordset_format + self.numberOfColumns = cursor.numberOfColumns + self.converters = cursor.converters + self.columnNames = cursor.columnNames + except AttributeError: + self.recordset_format = RS_ARRAY + self.numberOfColumns = 0 + self.converters = [] + self.columnNames = {} + self.numberOfRows = numberOfRows + + def __len__(self): + return self.numberOfRows + + def __getitem__(self, item): # used for row or row,column access + if not self.ado_results: + return [] + if isinstance(item, slice): # will return a list of row objects + indices = item.indices(self.numberOfRows) + return [SQLrow(self, k) for k in range(*indices)] + elif isinstance(item, tuple) and len(item) == 2: + # d = some_rowsObject[i,j] will return a datum from a two-dimension address + i, j = item + if not isinstance(j, int): + try: + j = self.columnNames[j.lower()] # convert named column to numeric + except KeyError: + raise KeyError('adodbapi: no such column name as "%s"' % repr(j)) + if self.recordset_format == RS_ARRAY: # retrieve from two-dimensional array + v = self.ado_results[j, i] + elif self.recordset_format == RS_REMOTE: + v = self.ado_results[i][j] + else: # pywin32 - retrieve from tuple of tuples + v = self.ado_results[j][i] + if self.converters is NotImplemented: + return v + return convert_to_python(v, self.converters[j]) + else: + row = SQLrow(self, item) # new row descriptor + return row + + def __iter__(self): + return iter(self.__next__()) + + def __next__(self): + for n in range(self.numberOfRows): + row = SQLrow(self, n) + yield row + # # # # # + + # # # # # functions to re-format SQL requests to other paramstyle requirements # # # # # # # # # # + + +def changeNamedToQmark( + op, +): # convert from 'named' paramstyle to ADO required '?'mark parameters + outOp = "" + outparms = [] + chunks = op.split( + "'" + ) # quote all literals -- odd numbered list results are literals. + inQuotes = False + for chunk in chunks: + if inQuotes: # this is inside a quote + if chunk == "": # double apostrophe to quote one apostrophe + outOp = outOp[:-1] # so take one away + else: + outOp += "'" + chunk + "'" # else pass the quoted string as is. + else: # is SQL code -- look for a :namedParameter + while chunk: # some SQL string remains + sp = chunk.split(":", 1) + outOp += sp[0] # concat the part up to the : + s = "" + try: + chunk = sp[1] + except IndexError: + chunk = None + if chunk: # there was a parameter - parse it out + i = 0 + c = chunk[0] + while c.isalnum() or c == "_": + i += 1 + try: + c = chunk[i] + except IndexError: + break + s = chunk[:i] + chunk = chunk[i:] + if s: + outparms.append(s) # list the parameters in order + outOp += "?" # put in the Qmark + inQuotes = not inQuotes + return outOp, outparms + + +def changeFormatToQmark( + op, +): # convert from 'format' paramstyle to ADO required '?'mark parameters + outOp = "" + outparams = [] + chunks = op.split( + "'" + ) # quote all literals -- odd numbered list results are literals. + inQuotes = False + for chunk in chunks: + if inQuotes: + if ( + outOp != "" and chunk == "" + ): # he used a double apostrophe to quote one apostrophe + outOp = outOp[:-1] # so take one away + else: + outOp += "'" + chunk + "'" # else pass the quoted string as is. + else: # is SQL code -- look for a %s parameter + if "%(" in chunk: # ugh! pyformat! + while chunk: # some SQL string remains + sp = chunk.split("%(", 1) + outOp += sp[0] # concat the part up to the % + if len(sp) > 1: + try: + s, chunk = sp[1].split(")s", 1) # find the ')s' + except ValueError: + raise ProgrammingError( + 'Pyformat SQL has incorrect format near "%s"' % chunk + ) + outparams.append(s) + outOp += "?" # put in the Qmark + else: + chunk = None + else: # proper '%s' format + sp = chunk.split("%s") # make each %s + outOp += "?".join(sp) # into ? + inQuotes = not inQuotes # every other chunk is a quoted string + return outOp, outparams diff --git a/lib/adodbapi/examples/db_print.py b/lib/adodbapi/examples/db_print.py new file mode 100644 index 00000000..3f5f9d5b --- /dev/null +++ b/lib/adodbapi/examples/db_print.py @@ -0,0 +1,72 @@ +""" db_print.py -- a simple demo for ADO database reads.""" + +import sys + +import adodbapi.ado_consts as adc + +cmd_args = ("filename", "table_name") +if "help" in sys.argv: + print("possible settings keywords are:", cmd_args) + sys.exit() + +kw_args = {} # pick up filename and proxy address from command line (optionally) +for arg in sys.argv: + s = arg.split("=") + if len(s) > 1: + if s[0] in cmd_args: + kw_args[s[0]] = s[1] + +kw_args.setdefault( + "filename", "test.mdb" +) # assumes server is running from examples folder +kw_args.setdefault("table_name", "Products") # the name of the demo table + +# the server needs to select the provider based on his Python installation +provider_switch = ["provider", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"] + +# ------------------------ START HERE ------------------------------------- +# create the connection +constr = "Provider=%(provider)s;Data Source=%(filename)s" +import adodbapi as db + +con = db.connect(constr, kw_args, macro_is64bit=provider_switch) + +if kw_args["table_name"] == "?": + print("The tables in your database are:") + for name in con.get_table_names(): + print(name) +else: + # make a cursor on the connection + with con.cursor() as c: + # run an SQL statement on the cursor + sql = "select * from %s" % kw_args["table_name"] + print('performing query="%s"' % sql) + c.execute(sql) + + # check the results + print( + 'result rowcount shows as= %d. (Note: -1 means "not known")' % (c.rowcount,) + ) + print("") + print("result data description is:") + print(" NAME Type DispSize IntrnlSz Prec Scale Null?") + for d in c.description: + print( + ("%16s %-12s %8s %8d %4d %5d %s") + % (d[0], adc.adTypeNames[d[1]], d[2], d[3], d[4], d[5], bool(d[6])) + ) + print("") + print("str() of first five records are...") + + # get the results + db = c.fetchmany(5) + + # print them + for rec in db: + print(rec) + + print("") + print("repr() of next row is...") + print(repr(c.fetchone())) + print("") +con.close() diff --git a/lib/adodbapi/examples/db_table_names.py b/lib/adodbapi/examples/db_table_names.py new file mode 100644 index 00000000..eb512a33 --- /dev/null +++ b/lib/adodbapi/examples/db_table_names.py @@ -0,0 +1,20 @@ +""" db_table_names.py -- a simple demo for ADO database table listing.""" +import sys + +import adodbapi + +try: + databasename = sys.argv[1] +except IndexError: + databasename = "test.mdb" + +provider = ["prv", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"] +constr = "Provider=%(prv)s;Data Source=%(db)s" + +# create the connection +con = adodbapi.connect(constr, db=databasename, macro_is64bit=provider) + +print("Table names in= %s" % databasename) + +for table in con.get_table_names(): + print(table) diff --git a/lib/adodbapi/examples/xls_read.py b/lib/adodbapi/examples/xls_read.py new file mode 100644 index 00000000..45e0d277 --- /dev/null +++ b/lib/adodbapi/examples/xls_read.py @@ -0,0 +1,41 @@ +import sys + +import adodbapi + +try: + import adodbapi.is64bit as is64bit + + is64 = is64bit.Python() +except ImportError: + is64 = False + +if is64: + driver = "Microsoft.ACE.OLEDB.12.0" +else: + driver = "Microsoft.Jet.OLEDB.4.0" +extended = 'Extended Properties="Excel 8.0;HDR=Yes;IMEX=1;"' + +try: # first command line argument will be xls file name -- default to the one written by xls_write.py + filename = sys.argv[1] +except IndexError: + filename = "xx.xls" + +constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended) + +conn = adodbapi.connect(constr) + +try: # second command line argument will be worksheet name -- default to first worksheet + sheet = sys.argv[2] +except IndexError: + # use ADO feature to get the name of the first worksheet + sheet = conn.get_table_names()[0] + +print("Shreadsheet=%s Worksheet=%s" % (filename, sheet)) +print("------------------------------------------------------------") +crsr = conn.cursor() +sql = "SELECT * from [%s]" % sheet +crsr.execute(sql) +for row in crsr.fetchmany(10): + print(repr(row)) +crsr.close() +conn.close() diff --git a/lib/adodbapi/examples/xls_write.py b/lib/adodbapi/examples/xls_write.py new file mode 100644 index 00000000..9d1d3114 --- /dev/null +++ b/lib/adodbapi/examples/xls_write.py @@ -0,0 +1,41 @@ +import datetime + +import adodbapi + +try: + import adodbapi.is64bit as is64bit + + is64 = is64bit.Python() +except ImportError: + is64 = False # in case the user has an old version of adodbapi +if is64: + driver = "Microsoft.ACE.OLEDB.12.0" +else: + driver = "Microsoft.Jet.OLEDB.4.0" +filename = "xx.xls" # file will be created if it does not exist +extended = 'Extended Properties="Excel 8.0;Readonly=False;"' + +constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended) + +conn = adodbapi.connect(constr) +with conn: # will auto commit if no errors + with conn.cursor() as crsr: + try: + crsr.execute("drop table SheetOne") + except: + pass # just is case there is one already there + + # create the sheet and the header row and set the types for the columns + crsr.execute( + "create table SheetOne (Name varchar, Rank varchar, SrvcNum integer, Weight float, Birth date)" + ) + + sql = "INSERT INTO SheetOne (name, rank , srvcnum, weight, birth) values (?,?,?,?,?)" + + data = ("Mike Murphy", "SSG", 123456789, 167.8, datetime.date(1922, 12, 27)) + crsr.execute(sql, data) # write the first row of data + crsr.execute( + sql, ["John Jones", "Pvt", 987654321, 140.0, datetime.date(1921, 7, 4)] + ) # another row of data +conn.close() +print("Created spreadsheet=%s worksheet=%s" % (filename, "SheetOne")) diff --git a/lib/adodbapi/is64bit.py b/lib/adodbapi/is64bit.py new file mode 100644 index 00000000..bba12b43 --- /dev/null +++ b/lib/adodbapi/is64bit.py @@ -0,0 +1,41 @@ +"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version""" +import sys + + +def Python(): + if sys.platform == "cli": # IronPython + import System + + return System.IntPtr.Size == 8 + else: + try: + return sys.maxsize > 2147483647 + except AttributeError: + return sys.maxint > 2147483647 + + +def os(): + import platform + + pm = platform.machine() + if pm != ".." and pm.endswith("64"): # recent Python (not Iron) + return True + else: + import os + + if "PROCESSOR_ARCHITEW6432" in os.environ: + return True # 32 bit program running on 64 bit Windows + try: + return os.environ["PROCESSOR_ARCHITECTURE"].endswith( + "64" + ) # 64 bit Windows 64 bit program + except (IndexError, KeyError): + pass # not Windows + try: + return "64" in platform.architecture()[0] # this often works in Linux + except: + return False # is an older version of Python, assume also an older os (best we can guess) + + +if __name__ == "__main__": + print("is64bit.Python() =", Python(), "is64bit.os() =", os()) diff --git a/lib/adodbapi/license.txt b/lib/adodbapi/license.txt new file mode 100644 index 00000000..c255f4aa --- /dev/null +++ b/lib/adodbapi/license.txt @@ -0,0 +1,506 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + diff --git a/lib/adodbapi/process_connect_string.py b/lib/adodbapi/process_connect_string.py new file mode 100644 index 00000000..fa34d23a --- /dev/null +++ b/lib/adodbapi/process_connect_string.py @@ -0,0 +1,144 @@ +""" a clumsy attempt at a macro language to let the programmer execute code on the server (ex: determine 64bit)""" +from . import is64bit as is64bit + + +def macro_call(macro_name, args, kwargs): + """allow the programmer to perform limited processing on the server by passing macro names and args + + :new_key - the key name the macro will create + :args[0] - macro name + :args[1:] - any arguments + :code - the value of the keyword item + :kwargs - the connection keyword dictionary. ??key has been removed + --> the value to put in for kwargs['name'] = value + """ + if isinstance(args, (str, str)): + args = [ + args + ] # the user forgot to pass a sequence, so make a string into args[0] + new_key = args[0] + try: + if macro_name == "is64bit": + if is64bit.Python(): # if on 64 bit Python + return new_key, args[1] # return first argument + else: + try: + return new_key, args[2] # else return second argument (if defined) + except IndexError: + return new_key, "" # else return blank + + elif ( + macro_name == "getuser" + ): # get the name of the user the server is logged in under + if not new_key in kwargs: + import getpass + + return new_key, getpass.getuser() + + elif macro_name == "getnode": # get the name of the computer running the server + import platform + + try: + return new_key, args[1] % platform.node() + except IndexError: + return new_key, platform.node() + + elif macro_name == "getenv": # expand the server's environment variable args[1] + try: + dflt = args[2] # if not found, default from args[2] + except IndexError: # or blank + dflt = "" + return new_key, os.environ.get(args[1], dflt) + + elif macro_name == "auto_security": + if ( + not "user" in kwargs or not kwargs["user"] + ): # missing, blank, or Null username + return new_key, "Integrated Security=SSPI" + return new_key, "User ID=%(user)s; Password=%(password)s" % kwargs + + elif ( + macro_name == "find_temp_test_path" + ): # helper function for testing ado operation -- undocumented + import os + import tempfile + + return new_key, os.path.join( + tempfile.gettempdir(), "adodbapi_test", args[1] + ) + + raise ValueError("Unknown connect string macro=%s" % macro_name) + except: + raise ValueError("Error in macro processing %s %s" % (macro_name, repr(args))) + + +def process( + args, kwargs, expand_macros=False +): # --> connection string with keyword arguments processed. + """attempts to inject arguments into a connection string using Python "%" operator for strings + + co: adodbapi connection object + args: positional parameters from the .connect() call + kvargs: keyword arguments from the .connect() call + """ + try: + dsn = args[0] + except IndexError: + dsn = None + if isinstance( + dsn, dict + ): # as a convenience the first argument may be django settings + kwargs.update(dsn) + elif ( + dsn + ): # the connection string is passed to the connection as part of the keyword dictionary + kwargs["connection_string"] = dsn + try: + a1 = args[1] + except IndexError: + a1 = None + # historically, the second positional argument might be a timeout value + if isinstance(a1, int): + kwargs["timeout"] = a1 + # if the second positional argument is a string, then it is user + elif isinstance(a1, str): + kwargs["user"] = a1 + # if the second positional argument is a dictionary, use it as keyword arguments, too + elif isinstance(a1, dict): + kwargs.update(a1) + try: + kwargs["password"] = args[2] # the third positional argument is password + kwargs["host"] = args[3] # the fourth positional argument is host name + kwargs["database"] = args[4] # the fifth positional argument is database name + except IndexError: + pass + + # make sure connection string is defined somehow + if not "connection_string" in kwargs: + try: # perhaps 'dsn' was defined + kwargs["connection_string"] = kwargs["dsn"] + except KeyError: + try: # as a last effort, use the "host" keyword + kwargs["connection_string"] = kwargs["host"] + except KeyError: + raise TypeError("Must define 'connection_string' for ado connections") + if expand_macros: + for kwarg in list(kwargs.keys()): + if kwarg.startswith("macro_"): # If a key defines a macro + macro_name = kwarg[6:] # name without the "macro_" + macro_code = kwargs.pop( + kwarg + ) # we remove the macro_key and get the code to execute + new_key, rslt = macro_call( + macro_name, macro_code, kwargs + ) # run the code in the local context + kwargs[new_key] = rslt # put the result back in the keywords dict + # special processing for PyRO IPv6 host address + try: + s = kwargs["proxy_host"] + if ":" in s: # it is an IPv6 address + if s[0] != "[": # is not surrounded by brackets + kwargs["proxy_host"] = s.join(("[", "]")) # put it in brackets + except KeyError: + pass + return kwargs diff --git a/lib/adodbapi/readme.txt b/lib/adodbapi/readme.txt new file mode 100644 index 00000000..cf591905 --- /dev/null +++ b/lib/adodbapi/readme.txt @@ -0,0 +1,92 @@ +Project +------- +adodbapi + +A Python DB-API 2.0 (PEP-249) module that makes it easy to use Microsoft ADO +for connecting with databases and other data sources +using either CPython or IronPython. + +Home page: + +Features: +* 100% DB-API 2.0 (PEP-249) compliant (including most extensions and recommendations). +* Includes pyunit testcases that describe how to use the module. +* Fully implemented in Python. -- runs in Python 2.5+ Python 3.0+ and IronPython 2.6+ +* Licensed under the LGPL license, which means that it can be used freely even in commercial programs subject to certain restrictions. +* The user can choose between paramstyles: 'qmark' 'named' 'format' 'pyformat' 'dynamic' +* Supports data retrieval by column name e.g.: + for row in myCurser.execute("select name,age from students"): + print("Student", row.name, "is", row.age, "years old.") +* Supports user-definable system-to-Python data conversion functions (selected by ADO data type, or by column) + +Prerequisites: +* C Python 2.7 or 3.5 or higher + and pywin32 (Mark Hammond's python for windows extensions.) +or + Iron Python 2.7 or higher. (works in IPy2.0 for all data types except BUFFER) + +Installation: +* (C-Python on Windows): Install pywin32 ("pip install pywin32") which includes adodbapi. +* (IronPython on Windows): Download adodbapi from http://sf.net/projects/adodbapi. Unpack the zip. + Open a command window as an administrator. CD to the folder containing the unzipped files. + Run "setup.py install" using the IronPython of your choice. + +NOTE: ........... +If you do not like the new default operation of returning Numeric columns as decimal.Decimal, +you can select other options by the user defined conversion feature. +Try: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtString +or: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtFloat +or: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = write_your_own_convertion_function + ............ +notes for 2.6.2: + The definitive source has been moved to https://github.com/mhammond/pywin32/tree/master/adodbapi. + Remote has proven too hard to configure and test with Pyro4. I am moving it to unsupported status + until I can change to a different connection method. +whats new in version 2.6 + A cursor.prepare() method and support for prepared SQL statements. + Lots of refactoring, especially of the Remote and Server modules (still to be treated as Beta code). + The quick start document 'quick_reference.odt' will export as a nice-looking pdf. + Added paramstyles 'pyformat' and 'dynamic'. If your 'paramstyle' is 'named' you _must_ pass a dictionary of + parameters to your .execute() method. If your 'paramstyle' is 'format' 'pyformat' or 'dynamic', you _may_ + pass a dictionary of parameters -- provided your SQL operation string is formatted correctly. + +whats new in version 2.5 + Remote module: (works on Linux!) allows a Windows computer to serve ADO databases via PyRO + Server module: PyRO server for ADO. Run using a command like= C:>python -m adodbapi.server + (server has simple connection string macros: is64bit, getuser, sql_provider, auto_security) + Brief documentation included. See adodbapi/examples folder adodbapi.rtf + New connection method conn.get_table_names() --> list of names of tables in database + + Vastly refactored. Data conversion things have been moved to the new adodbapi.apibase module. + Many former module-level attributes are now class attributes. (Should be more thread-safe) + Connection objects are now context managers for transactions and will commit or rollback. + Cursor objects are context managers and will automatically close themselves. + Autocommit can be switched on and off. + Keyword and positional arguments on the connect() method work as documented in PEP 249. + Keyword arguments from the connect call can be formatted into the connection string. + New keyword arguments defined, such as: autocommit, paramstyle, remote_proxy, remote_port. + *** Breaking change: variantConversion lookups are simplified: the following will raise KeyError: + oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes] + Refactor as: oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes[0]] + +License +------- +LGPL, see http://www.opensource.org/licenses/lgpl-license.php + +Documentation +------------- + +Look at adodbapi/quick_reference.md +http://www.python.org/topics/database/DatabaseAPI-2.0.html +read the examples in adodbapi/examples +and look at the test cases in adodbapi/test directory. + +Mailing lists +------------- +The adodbapi mailing lists have been deactivated. Submit comments to the +pywin32 or IronPython mailing lists. + -- the bug tracker on sourceforge.net/projects/adodbapi may be checked, (infrequently). + -- please use: https://github.com/mhammond/pywin32/issues diff --git a/lib/adodbapi/remote.py b/lib/adodbapi/remote.py new file mode 100644 index 00000000..ae22b5a7 --- /dev/null +++ b/lib/adodbapi/remote.py @@ -0,0 +1,634 @@ +"""adodbapi.remote - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* http://sourceforge.net/projects/adodbapi + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + django adaptations and refactoring thanks to Adam Vandenberg + +DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/ + +This module source should run correctly in CPython versions 2.5 and later, +or IronPython version 2.7 and later, +or, after running through 2to3.py, CPython 3.0 or later. +""" + +__version__ = "2.6.0.4" +version = "adodbapi.remote v" + __version__ + +import array +import datetime +import os +import sys +import time + +# Pyro4 is required for server and remote operation --> https://pypi.python.org/pypi/Pyro4/ +try: + import Pyro4 +except ImportError: + print('* * * Sorry, server operation requires Pyro4. Please "pip import" it.') + exit(11) + +import adodbapi +import adodbapi.apibase as api +import adodbapi.process_connect_string +from adodbapi.apibase import ProgrammingError + +_BaseException = api._BaseException + +sys.excepthook = Pyro4.util.excepthook +Pyro4.config.PREFER_IP_VERSION = 0 # allow system to prefer IPv6 +Pyro4.config.COMMTIMEOUT = 40.0 # a bit longer than the default SQL server Gtimeout +Pyro4.config.SERIALIZER = "pickle" + +try: + verbose = int(os.environ["ADODBAPI_VERBOSE"]) +except: + verbose = False +if verbose: + print(version) + +# --- define objects to smooth out Python3 <-> Python 2.x differences +unicodeType = str # this line will be altered by 2to3.py to '= str' +longType = int # this line will be altered by 2to3.py to '= int' +StringTypes = str +makeByteBuffer = bytes +memoryViewType = memoryview + +# ----------------------------------------------------------- +# conversion functions mandated by PEP 249 +Binary = makeByteBuffer # override the function from apibase.py + + +def Date(year, month, day): + return datetime.date(year, month, day) # dateconverter.Date(year,month,day) + + +def Time(hour, minute, second): + return datetime.time(hour, minute, second) # dateconverter.Time(hour,minute,second) + + +def Timestamp(year, month, day, hour, minute, second): + return datetime.datetime(year, month, day, hour, minute, second) + + +def DateFromTicks(ticks): + return Date(*time.gmtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + return Time(*time.gmtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + return Timestamp(*time.gmtime(ticks)[:6]) + + +def connect(*args, **kwargs): # --> a remote db-api connection object + """Create and open a remote db-api database connection object""" + # process the argument list the programmer gave us + kwargs = adodbapi.process_connect_string.process(args, kwargs) + # the "proxy_xxx" keys tell us where to find the PyRO proxy server + kwargs.setdefault( + "pyro_connection", "PYRO:ado.connection@%(proxy_host)s:%(proxy_port)s" + ) + if not "proxy_port" in kwargs: + try: + pport = os.environ["PROXY_PORT"] + except KeyError: + pport = 9099 + kwargs["proxy_port"] = pport + if not "proxy_host" in kwargs or not kwargs["proxy_host"]: + try: + phost = os.environ["PROXY_HOST"] + except KeyError: + phost = "[::1]" # '127.0.0.1' + kwargs["proxy_host"] = phost + ado_uri = kwargs["pyro_connection"] % kwargs + # ask PyRO make us a remote connection object + auto_retry = 3 + while auto_retry: + try: + dispatcher = Pyro4.Proxy(ado_uri) + if "comm_timeout" in kwargs: + dispatcher._pyroTimeout = float(kwargs["comm_timeout"]) + uri = dispatcher.make_connection() + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError("Cannot create connection to=%s" % ado_uri) + + conn_uri = fix_uri(uri, kwargs) # get a host connection from the proxy server + while auto_retry: + try: + host_conn = Pyro4.Proxy( + conn_uri + ) # bring up an exclusive Pyro connection for my ADO connection + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError( + "Cannot create ADO connection object using=%s" % conn_uri + ) + if "comm_timeout" in kwargs: + host_conn._pyroTimeout = float(kwargs["comm_timeout"]) + # make a local clone + myConn = Connection() + while auto_retry: + try: + myConn.connect( + kwargs, host_conn + ) # call my connect method -- hand him the host connection + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError( + "Pyro error creating connection to/thru=%s" % repr(kwargs) + ) + except _BaseException as e: + raise api.DatabaseError( + "Error creating remote connection to=%s, e=%s, %s" + % (repr(kwargs), repr(e), sys.exc_info()[2]) + ) + return myConn + + +def fix_uri(uri, kwargs): + """convert a generic pyro uri with '0.0.0.0' into the address we actually called""" + u = uri.asString() + s = u.split("[::0]") # IPv6 generic address + if len(s) == 1: # did not find one + s = u.split("0.0.0.0") # IPv4 generic address + if len(s) > 1: # found a generic + return kwargs["proxy_host"].join(s) # fill in our address for the host + return uri + + +# # # # # ----- the Class that defines a connection ----- # # # # # +class Connection(object): + # include connection attributes required by api definition. + Warning = api.Warning + Error = api.Error + InterfaceError = api.InterfaceError + DataError = api.DataError + DatabaseError = api.DatabaseError + OperationalError = api.OperationalError + IntegrityError = api.IntegrityError + InternalError = api.InternalError + NotSupportedError = api.NotSupportedError + ProgrammingError = api.ProgrammingError + # set up some class attributes + paramstyle = api.paramstyle + + @property + def dbapi(self): # a proposed db-api version 3 extension. + "Return a reference to the DBAPI module for this Connection." + return api + + def __init__(self): + self.proxy = None + self.kwargs = {} + self.errorhandler = None + self.supportsTransactions = False + self.paramstyle = api.paramstyle + self.timeout = 30 + self.cursors = {} + + def connect(self, kwargs, connection_maker): + self.kwargs = kwargs + if verbose: + print('%s attempting: "%s"' % (version, repr(kwargs))) + self.proxy = connection_maker + ##try: + ret = self.proxy.connect(kwargs) # ask the server to hook us up + ##except ImportError, e: # Pyro is trying to import pywinTypes.comerrer + ## self._raiseConnectionError(api.DatabaseError, 'Proxy cannot connect using=%s' % repr(kwargs)) + if ret is not True: + self._raiseConnectionError( + api.OperationalError, "Proxy returns error message=%s" % repr(ret) + ) + + self.supportsTransactions = self.getIndexedValue("supportsTransactions") + self.paramstyle = self.getIndexedValue("paramstyle") + self.timeout = self.getIndexedValue("timeout") + if verbose: + print("adodbapi.remote New connection at %X" % id(self)) + + def _raiseConnectionError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self, None, errorclass, errorvalue) + + def close(self): + """Close the connection now (rather than whenever __del__ is called). + + The connection will be unusable from this point forward; + an Error (or subclass) exception will be raised if any operation is attempted with the connection. + The same applies to all cursor objects trying to use the connection. + """ + for crsr in list(self.cursors.values())[ + : + ]: # copy the list, then close each one + crsr.close() + try: + """close the underlying remote Connection object""" + self.proxy.close() + if verbose: + print("adodbapi.remote Closed connection at %X" % id(self)) + object.__delattr__( + self, "proxy" + ) # future attempts to use closed cursor will be caught by __getattr__ + except Exception: + pass + + def __del__(self): + try: + self.proxy.close() + except: + pass + + def commit(self): + """Commit any pending transaction to the database. + + Note that if the database supports an auto-commit feature, + this must be initially off. An interface method may be provided to turn it back on. + Database modules that do not support transactions should implement this method with void functionality. + """ + if not self.supportsTransactions: + return + result = self.proxy.commit() + if result: + self._raiseConnectionError( + api.OperationalError, "Error during commit: %s" % result + ) + + def _rollback(self): + """In case a database does provide transactions this method causes the the database to roll back to + the start of any pending transaction. Closing a connection without committing the changes first will + cause an implicit rollback to be performed. + """ + result = self.proxy.rollback() + if result: + self._raiseConnectionError( + api.OperationalError, "Error during rollback: %s" % result + ) + + def __setattr__(self, name, value): + if name in ("paramstyle", "timeout", "autocommit"): + if self.proxy: + self.proxy.send_attribute_to_host(name, value) + object.__setattr__(self, name, value) # store attribute locally (too) + + def __getattr__(self, item): + if ( + item == "rollback" + ): # the rollback method only appears if the database supports transactions + if self.supportsTransactions: + return ( + self._rollback + ) # return the rollback method so the caller can execute it. + else: + raise self.ProgrammingError( + "this data provider does not support Rollback" + ) + elif item in ( + "dbms_name", + "dbms_version", + "connection_string", + "autocommit", + ): # 'messages' ): + return self.getIndexedValue(item) + elif item == "proxy": + raise self.ProgrammingError("Attempting to use closed connection") + else: + raise self.ProgrammingError('No remote access for attribute="%s"' % item) + + def getIndexedValue(self, index): + r = self.proxy.get_attribute_for_remote(index) + return r + + def cursor(self): + "Return a new Cursor Object using the connection." + myCursor = Cursor(self) + return myCursor + + def _i_am_here(self, crsr): + "message from a new cursor proclaiming its existence" + self.cursors[crsr.id] = crsr + + def _i_am_closing(self, crsr): + "message from a cursor giving connection a chance to clean up" + try: + del self.cursors[crsr.id] + except: + pass + + def __enter__(self): # Connections are context managers + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type: + self._rollback() # automatic rollback on errors + else: + self.commit() + + def get_table_names(self): + return self.proxy.get_table_names() + + +def fixpickle(x): + """pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute()""" + if x is None: + return None + if isinstance(x, dict): + # for 'named' paramstyle user will pass a mapping + newargs = {} + for arg, val in list(x.items()): + if isinstance(val, memoryViewType): + newval = array.array("B") + newval.fromstring(val) + newargs[arg] = newval + else: + newargs[arg] = val + return newargs + # if not a mapping, then a sequence + newargs = [] + for arg in x: + if isinstance(arg, memoryViewType): + newarg = array.array("B") + newarg.fromstring(arg) + newargs.append(newarg) + else: + newargs.append(arg) + return newargs + + +class Cursor(object): + def __init__(self, connection): + self.command = None + self.errorhandler = None ## was: connection.errorhandler + self.connection = connection + self.proxy = self.connection.proxy + self.rs = None # the fetchable data for this cursor + self.converters = NotImplemented + self.id = connection.proxy.build_cursor() + connection._i_am_here(self) + self.recordset_format = api.RS_REMOTE + if verbose: + print( + "%s New cursor at %X on conn %X" + % (version, id(self), id(self.connection)) + ) + + def prepare(self, operation): + self.command = operation + try: + del self.description + except AttributeError: + pass + self.proxy.crsr_prepare(self.id, operation) + + def __iter__(self): # [2.1 Zamarev] + return iter(self.fetchone, None) # [2.1 Zamarev] + + def __next__(self): + r = self.fetchone() + if r: + return r + raise StopIteration + + def __enter__(self): + "Allow database cursors to be used with context managers." + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + "Allow database cursors to be used with context managers." + self.close() + + def __getattr__(self, key): + if key == "numberOfColumns": + try: + return len(self.rs[0]) + except: + return 0 + if key == "description": + try: + self.description = self.proxy.crsr_get_description(self.id)[:] + return self.description + except TypeError: + return None + if key == "columnNames": + try: + r = dict( + self.proxy.crsr_get_columnNames(self.id) + ) # copy the remote columns + + except TypeError: + r = {} + self.columnNames = r + return r + + if key == "remote_cursor": + raise api.OperationalError + try: + return self.proxy.crsr_get_attribute_for_remote(self.id, key) + except AttributeError: + raise api.InternalError( + 'Failure getting attribute "%s" from proxy cursor.' % key + ) + + def __setattr__(self, key, value): + if key == "arraysize": + self.proxy.crsr_set_arraysize(self.id, value) + if key == "paramstyle": + if value in api.accepted_paramstyles: + self.proxy.crsr_set_paramstyle(self.id, value) + else: + self._raiseCursorError( + api.ProgrammingError, 'invalid paramstyle ="%s"' % value + ) + object.__setattr__(self, key, value) + + def _raiseCursorError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self.connection, self, errorclass, errorvalue) + + def execute(self, operation, parameters=None): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = operation + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + fp = fixpickle(parameters) + if verbose > 2: + print( + ( + '%s executing "%s" with params=%s' + % (version, operation, repr(parameters)) + ) + ) + result = self.proxy.crsr_execute(self.id, operation, fp) + if result: # an exception was triggered + self._raiseCursorError(result[0], result[1]) + + def executemany(self, operation, seq_of_parameters): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = operation + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + sq = [fixpickle(x) for x in seq_of_parameters] + if verbose > 2: + print( + ( + '%s executemany "%s" with params=%s' + % (version, operation, repr(seq_of_parameters)) + ) + ) + self.proxy.crsr_executemany(self.id, operation, sq) + + def nextset(self): + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + if verbose > 2: + print(("%s nextset" % version)) + return self.proxy.crsr_nextset(self.id) + + def callproc(self, procname, parameters=None): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = procname + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + fp = fixpickle(parameters) + if verbose > 2: + print( + ( + '%s callproc "%s" with params=%s' + % (version, procname, repr(parameters)) + ) + ) + return self.proxy.crsr_callproc(self.id, procname, fp) + + def fetchone(self): + try: + f1 = self.proxy.crsr_fetchone(self.id) + except _BaseException as e: + self._raiseCursorError(api.DatabaseError, e) + else: + if f1 is None: + return None + self.rs = [f1] + return api.SQLrows(self.rs, 1, self)[ + 0 + ] # new object to hold the results of the fetch + + def fetchmany(self, size=None): + try: + self.rs = self.proxy.crsr_fetchmany(self.id, size) + if not self.rs: + return [] + r = api.SQLrows(self.rs, len(self.rs), self) + return r + except Exception as e: + self._raiseCursorError(api.DatabaseError, e) + + def fetchall(self): + try: + self.rs = self.proxy.crsr_fetchall(self.id) + if not self.rs: + return [] + return api.SQLrows(self.rs, len(self.rs), self) + except Exception as e: + self._raiseCursorError(api.DatabaseError, e) + + def close(self): + if self.connection is None: + return + self.connection._i_am_closing(self) # take me off the connection's cursors list + try: + self.proxy.crsr_close(self.id) + except: + pass + try: + del self.description + except: + pass + try: + del self.rs # let go of the recordset + except: + pass + self.connection = ( + None # this will make all future method calls on me throw an exception + ) + self.proxy = None + if verbose: + print("adodbapi.remote Closed cursor at %X" % id(self)) + + def __del__(self): + try: + self.close() + except: + pass + + def setinputsizes(self, sizes): + pass + + def setoutputsize(self, size, column=None): + pass diff --git a/lib/adodbapi/schema_table.py b/lib/adodbapi/schema_table.py new file mode 100644 index 00000000..8621830e --- /dev/null +++ b/lib/adodbapi/schema_table.py @@ -0,0 +1,15 @@ +"""call using an open ADO connection --> list of table names""" +from . import adodbapi + + +def names(connection_object): + ado = connection_object.adoConn + schema = ado.OpenSchema(20) # constant = adSchemaTables + + tables = [] + while not schema.EOF: + name = adodbapi.getIndexedValue(schema.Fields, "TABLE_NAME").Value + tables.append(name) + schema.MoveNext() + del schema + return tables diff --git a/lib/adodbapi/setup.py b/lib/adodbapi/setup.py new file mode 100644 index 00000000..d25869ad --- /dev/null +++ b/lib/adodbapi/setup.py @@ -0,0 +1,70 @@ +"""adodbapi -- a pure Python PEP 249 DB-API package using Microsoft ADO + +Adodbapi can be run on CPython 3.5 and later. +or IronPython version 2.6 and later (in theory, possibly no longer in practice!) +""" +CLASSIFIERS = """\ +Development Status :: 5 - Production/Stable +Intended Audience :: Developers +License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Operating System :: Microsoft :: Windows +Operating System :: POSIX :: Linux +Programming Language :: Python +Programming Language :: Python :: 3 +Programming Language :: SQL +Topic :: Software Development +Topic :: Software Development :: Libraries :: Python Modules +Topic :: Database +""" + +NAME = "adodbapi" +MAINTAINER = "Vernon Cole" +MAINTAINER_EMAIL = "vernondcole@gmail.com" +DESCRIPTION = ( + """A pure Python package implementing PEP 249 DB-API using Microsoft ADO.""" +) +URL = "http://sourceforge.net/projects/adodbapi" +LICENSE = "LGPL" +CLASSIFIERS = filter(None, CLASSIFIERS.split("\n")) +AUTHOR = "Henrik Ekelund, Vernon Cole, et.al." +AUTHOR_EMAIL = "vernondcole@gmail.com" +PLATFORMS = ["Windows", "Linux"] + +VERSION = None # in case searching for version fails +a = open("adodbapi.py") # find the version string in the source code +for line in a: + if "__version__" in line: + VERSION = line.split("'")[1] + print('adodbapi version="%s"' % VERSION) + break +a.close() + + +def setup_package(): + from distutils.command.build_py import build_py + from distutils.core import setup + + setup( + cmdclass={"build_py": build_py}, + name=NAME, + maintainer=MAINTAINER, + maintainer_email=MAINTAINER_EMAIL, + description=DESCRIPTION, + url=URL, + keywords="database ado odbc dbapi db-api Microsoft SQL", + ## download_url=DOWNLOAD_URL, + long_description=open("README.txt").read(), + license=LICENSE, + classifiers=CLASSIFIERS, + author=AUTHOR, + author_email=AUTHOR_EMAIL, + platforms=PLATFORMS, + version=VERSION, + package_dir={"adodbapi": ""}, + packages=["adodbapi"], + ) + return + + +if __name__ == "__main__": + setup_package() diff --git a/lib/adodbapi/test/adodbapitest.py b/lib/adodbapi/test/adodbapitest.py new file mode 100644 index 00000000..e5b3dc19 --- /dev/null +++ b/lib/adodbapi/test/adodbapitest.py @@ -0,0 +1,1692 @@ +""" Unit tests version 2.6.1.0 for adodbapi""" +""" + adodbapi - A python DB API 2.0 interface to Microsoft ADO + + Copyright (C) 2002 Henrik Ekelund + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Updates by Vernon Cole +""" + +import copy +import datetime +import decimal +import random +import string +import sys +import unittest + +try: + import win32com.client + + win32 = True +except ImportError: + win32 = False + +# run the configuration module. +import adodbapitestconfig as config # will set sys.path to find correct version of adodbapi + +# in our code below, all our switches are from config.whatever +import tryconnection + +import adodbapi +import adodbapi.apibase as api + +try: + import adodbapi.ado_consts as ado_consts +except ImportError: # we are doing a shortcut import as a module -- so + try: + import ado_consts + except ImportError: + from adodbapi import ado_consts + + +def str2bytes(sval): + return sval.encode("latin1") + + +long = int + + +def randomstring(length): + return "".join([random.choice(string.ascii_letters) for n in range(32)]) + + +class CommonDBTests(unittest.TestCase): + "Self contained super-simple tests in easy syntax, should work on everything between mySQL and Oracle" + + def setUp(self): + self.engine = "unknown" + + def getEngine(self): + return self.engine + + def getConnection(self): + raise NotImplementedError # "This method must be overriden by a subclass" + + def getCursor(self): + return self.getConnection().cursor() + + def testConnection(self): + crsr = self.getCursor() + assert crsr.__class__.__name__ == "Cursor" + + def testErrorHandlerInherits(self): + if not self.remote: + conn = self.getConnection() + mycallable = lambda connection, cursor, errorclass, errorvalue: 1 + conn.errorhandler = mycallable + crsr = conn.cursor() + assert ( + crsr.errorhandler == mycallable + ), "Error handler on crsr should be same as on connection" + + def testDefaultErrorHandlerConnection(self): + if not self.remote: + conn = self.getConnection() + del conn.messages[:] + try: + conn.close() + conn.commit() # Should not be able to use connection after it is closed + except: + assert len(conn.messages) == 1 + assert len(conn.messages[0]) == 2 + assert conn.messages[0][0] == api.ProgrammingError + + def testOwnErrorHandlerConnection(self): + if self.remote: # ToDo: use "skip" + return + mycallable = ( + lambda connection, cursor, errorclass, errorvalue: 1 + ) # does not raise anything + conn = self.getConnection() + conn.errorhandler = mycallable + conn.close() + conn.commit() # Should not be able to use connection after it is closed + assert len(conn.messages) == 0 + + conn.errorhandler = None # This should bring back the standard error handler + try: + conn.close() + conn.commit() # Should not be able to use connection after it is closed + except: + pass + # The Standard errorhandler appends error to messages attribute + assert ( + len(conn.messages) > 0 + ), "Setting errorhandler to none should bring back the standard error handler" + + def testDefaultErrorHandlerCursor(self): + crsr = self.getConnection().cursor() + if not self.remote: + del crsr.messages[:] + try: + crsr.execute("SELECT abbtytddrf FROM dasdasd") + except: + assert len(crsr.messages) == 1 + assert len(crsr.messages[0]) == 2 + assert crsr.messages[0][0] == api.DatabaseError + + def testOwnErrorHandlerCursor(self): + if self.remote: # ToDo: should be a "skip" + return + mycallable = ( + lambda connection, cursor, errorclass, errorvalue: 1 + ) # does not raise anything + crsr = self.getConnection().cursor() + crsr.errorhandler = mycallable + crsr.execute("SELECT abbtytddrf FROM dasdasd") + assert len(crsr.messages) == 0 + + crsr.errorhandler = None # This should bring back the standard error handler + try: + crsr.execute("SELECT abbtytddrf FROM dasdasd") + except: + pass + # The Standard errorhandler appends error to messages attribute + assert ( + len(crsr.messages) > 0 + ), "Setting errorhandler to none should bring back the standard error handler" + + def testUserDefinedConversions(self): + if self.remote: ## Todo: should be a "skip" + return + try: + duplicatingConverter = lambda aStringField: aStringField * 2 + assert duplicatingConverter("gabba") == "gabbagabba" + + self.helpForceDropOnTblTemp() + conn = self.getConnection() + # the variantConversions attribute should not exist on a normal connection object + self.assertRaises(AttributeError, lambda x: conn.variantConversions[x], [2]) + if not self.remote: + # create a variantConversions attribute on the connection + conn.variantConversions = copy.copy(api.variantConversions) + crsr = conn.cursor() + tabdef = ( + "CREATE TABLE xx_%s (fldData VARCHAR(100) NOT NULL, fld2 VARCHAR(20))" + % config.tmp + ) + crsr.execute(tabdef) + crsr.execute( + "INSERT INTO xx_%s(fldData,fld2) VALUES('gabba','booga')" + % config.tmp + ) + crsr.execute( + "INSERT INTO xx_%s(fldData,fld2) VALUES('hey','yo')" % config.tmp + ) + # change converter for ALL adoStringTypes columns + conn.variantConversions[api.adoStringTypes] = duplicatingConverter + crsr.execute( + "SELECT fldData,fld2 FROM xx_%s ORDER BY fldData" % config.tmp + ) + + rows = crsr.fetchall() + row = rows[0] + self.assertEqual(row[0], "gabbagabba") + row = rows[1] + self.assertEqual(row[0], "heyhey") + self.assertEqual(row[1], "yoyo") + + upcaseConverter = lambda aStringField: aStringField.upper() + assert upcaseConverter("upThis") == "UPTHIS" + + # now use a single column converter + rows.converters[1] = upcaseConverter # convert second column + self.assertEqual(row[0], "heyhey") # first will be unchanged + self.assertEqual(row[1], "YO") # second will convert to upper case + + finally: + try: + del conn.variantConversions # Restore the default + except: + pass + self.helpRollbackTblTemp() + + def testUserDefinedConversionForExactNumericTypes(self): + # variantConversions is a dictionary of conversion functions + # held internally in adodbapi.apibase + # + # !!! this test intentionally alters the value of what should be constant in the module + # !!! no new code should use this example, to is only a test to see that the + # !!! deprecated way of doing this still works. (use connection.variantConversions) + # + if not self.remote and sys.version_info < (3, 0): ### Py3 need different test + oldconverter = adodbapi.variantConversions[ + ado_consts.adNumeric + ] # keep old function to restore later + # By default decimal and "numbers" are returned as decimals. + # Instead, make numbers return as floats + try: + adodbapi.variantConversions[ado_consts.adNumeric] = adodbapi.cvtFloat + self.helpTestDataType( + "decimal(18,2)", "NUMBER", 3.45, compareAlmostEqual=1 + ) + self.helpTestDataType( + "numeric(18,2)", "NUMBER", 3.45, compareAlmostEqual=1 + ) + # now return strings + adodbapi.variantConversions[ado_consts.adNumeric] = adodbapi.cvtString + self.helpTestDataType("numeric(18,2)", "NUMBER", "3.45") + # now a completly weird user defined convertion + adodbapi.variantConversions[ado_consts.adNumeric] = ( + lambda x: "!!This function returns a funny unicode string %s!!" % x + ) + self.helpTestDataType( + "numeric(18,2)", + "NUMBER", + "3.45", + allowedReturnValues=[ + "!!This function returns a funny unicode string 3.45!!" + ], + ) + finally: + # now reset the converter to its original function + adodbapi.variantConversions[ + ado_consts.adNumeric + ] = oldconverter # Restore the original convertion function + + def helpTestDataType( + self, + sqlDataTypeString, + DBAPIDataTypeString, + pyData, + pyDataInputAlternatives=None, + compareAlmostEqual=None, + allowedReturnValues=None, + ): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData """ + % config.tmp + + sqlDataTypeString + + ")\n" + ) + + crsr.execute(tabdef) + + # Test Null values mapped to None + crsr.execute("INSERT INTO xx_%s (fldId) VALUES (1)" % config.tmp) + + crsr.execute("SELECT fldId,fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchone() + self.assertEqual(rs[1], None) # Null should be mapped to None + assert rs[0] == 1 + + # Test description related + descTuple = crsr.description[1] + assert descTuple[0] in ["fldData", "flddata"], 'was "%s" expected "%s"' % ( + descTuple[0], + "fldData", + ) + + if DBAPIDataTypeString == "STRING": + assert descTuple[1] == api.STRING, 'was "%s" expected "%s"' % ( + descTuple[1], + api.STRING.values, + ) + elif DBAPIDataTypeString == "NUMBER": + assert descTuple[1] == api.NUMBER, 'was "%s" expected "%s"' % ( + descTuple[1], + api.NUMBER.values, + ) + elif DBAPIDataTypeString == "BINARY": + assert descTuple[1] == api.BINARY, 'was "%s" expected "%s"' % ( + descTuple[1], + api.BINARY.values, + ) + elif DBAPIDataTypeString == "DATETIME": + assert descTuple[1] == api.DATETIME, 'was "%s" expected "%s"' % ( + descTuple[1], + api.DATETIME.values, + ) + elif DBAPIDataTypeString == "ROWID": + assert descTuple[1] == api.ROWID, 'was "%s" expected "%s"' % ( + descTuple[1], + api.ROWID.values, + ) + elif DBAPIDataTypeString == "UUID": + assert descTuple[1] == api.OTHER, 'was "%s" expected "%s"' % ( + descTuple[1], + api.OTHER.values, + ) + else: + raise NotImplementedError # "DBAPIDataTypeString not provided" + + # Test data binding + inputs = [pyData] + if pyDataInputAlternatives: + inputs.extend(pyDataInputAlternatives) + inputs = set(inputs) # removes redundant string==unicode tests + fldId = 1 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (?,?)" % config.tmp, + (fldId, inParam), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE ?=fldID" % config.tmp, [fldId] + ) + rs = crsr.fetchone() + if allowedReturnValues: + allowedTypes = tuple([type(aRV) for aRV in allowedReturnValues]) + assert isinstance( + rs[0], allowedTypes + ), 'result type "%s" must be one of %s' % (type(rs[0]), allowedTypes) + else: + assert isinstance( + rs[0], type(pyData) + ), 'result type "%s" must be instance of %s' % ( + type(rs[0]), + type(pyData), + ) + + if compareAlmostEqual and DBAPIDataTypeString == "DATETIME": + iso1 = adodbapi.dateconverter.DateObjectToIsoFormatString(rs[0]) + iso2 = adodbapi.dateconverter.DateObjectToIsoFormatString(pyData) + self.assertEqual(iso1, iso2) + elif compareAlmostEqual: + s = float(pyData) + v = float(rs[0]) + assert ( + abs(v - s) / s < 0.00001 + ), "Values not almost equal recvd=%s, expected=%f" % (rs[0], s) + else: + if allowedReturnValues: + ok = False + self.assertTrue( + rs[0] in allowedReturnValues, + 'Value "%s" not in %s' % (repr(rs[0]), allowedReturnValues), + ) + else: + self.assertEqual( + rs[0], + pyData, + 'Values are not equal recvd="%s", expected="%s"' + % (rs[0], pyData), + ) + + def testDataTypeFloat(self): + self.helpTestDataType("real", "NUMBER", 3.45, compareAlmostEqual=True) + self.helpTestDataType("float", "NUMBER", 1.79e37, compareAlmostEqual=True) + + def testDataTypeDecmal(self): + self.helpTestDataType( + "decimal(18,2)", + "NUMBER", + 3.45, + allowedReturnValues=["3.45", "3,45", decimal.Decimal("3.45")], + ) + self.helpTestDataType( + "numeric(18,2)", + "NUMBER", + 3.45, + allowedReturnValues=["3.45", "3,45", decimal.Decimal("3.45")], + ) + self.helpTestDataType( + "decimal(20,2)", + "NUMBER", + 444444444444444444, + allowedReturnValues=[ + "444444444444444444.00", + "444444444444444444,00", + decimal.Decimal("444444444444444444"), + ], + ) + if self.getEngine() == "MSSQL": + self.helpTestDataType( + "uniqueidentifier", + "UUID", + "{71A4F49E-39F3-42B1-A41E-48FF154996E6}", + allowedReturnValues=["{71A4F49E-39F3-42B1-A41E-48FF154996E6}"], + ) + + def testDataTypeMoney(self): # v2.1 Cole -- use decimal for money + if self.getEngine() == "MySQL": + self.helpTestDataType( + "DECIMAL(20,4)", "NUMBER", decimal.Decimal("-922337203685477.5808") + ) + elif self.getEngine() == "PostgreSQL": + self.helpTestDataType( + "money", + "NUMBER", + decimal.Decimal("-922337203685477.5808"), + compareAlmostEqual=True, + allowedReturnValues=[ + -922337203685477.5808, + decimal.Decimal("-922337203685477.5808"), + ], + ) + else: + self.helpTestDataType("smallmoney", "NUMBER", decimal.Decimal("214748.02")) + self.helpTestDataType( + "money", "NUMBER", decimal.Decimal("-922337203685477.5808") + ) + + def testDataTypeInt(self): + if self.getEngine() != "PostgreSQL": + self.helpTestDataType("tinyint", "NUMBER", 115) + self.helpTestDataType("smallint", "NUMBER", -32768) + if self.getEngine() not in ["ACCESS", "PostgreSQL"]: + self.helpTestDataType( + "bit", "NUMBER", 1 + ) # Does not work correctly with access + if self.getEngine() in ["MSSQL", "PostgreSQL"]: + self.helpTestDataType( + "bigint", + "NUMBER", + 3000000000, + allowedReturnValues=[3000000000, int(3000000000)], + ) + self.helpTestDataType("int", "NUMBER", 2147483647) + + def testDataTypeChar(self): + for sqlDataType in ("char(6)", "nchar(6)"): + self.helpTestDataType( + sqlDataType, + "STRING", + "spam ", + allowedReturnValues=["spam", "spam", "spam ", "spam "], + ) + + def testDataTypeVarChar(self): + if self.getEngine() == "MySQL": + stringKinds = ["varchar(10)", "text"] + elif self.getEngine() == "PostgreSQL": + stringKinds = ["varchar(10)", "text", "character varying"] + else: + stringKinds = [ + "varchar(10)", + "nvarchar(10)", + "text", + "ntext", + ] # ,"varchar(max)"] + + for sqlDataType in stringKinds: + self.helpTestDataType(sqlDataType, "STRING", "spam", ["spam"]) + + def testDataTypeDate(self): + if self.getEngine() == "PostgreSQL": + dt = "timestamp" + else: + dt = "datetime" + self.helpTestDataType( + dt, "DATETIME", adodbapi.Date(2002, 10, 28), compareAlmostEqual=True + ) + if self.getEngine() not in ["MySQL", "PostgreSQL"]: + self.helpTestDataType( + "smalldatetime", + "DATETIME", + adodbapi.Date(2002, 10, 28), + compareAlmostEqual=True, + ) + if tag != "pythontime" and self.getEngine() not in [ + "MySQL", + "PostgreSQL", + ]: # fails when using pythonTime + self.helpTestDataType( + dt, + "DATETIME", + adodbapi.Timestamp(2002, 10, 28, 12, 15, 1), + compareAlmostEqual=True, + ) + + def testDataTypeBinary(self): + binfld = str2bytes("\x07\x00\xE2\x40*") + arv = [binfld, adodbapi.Binary(binfld), bytes(binfld)] + if self.getEngine() == "PostgreSQL": + self.helpTestDataType( + "bytea", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + else: + self.helpTestDataType( + "binary(5)", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + self.helpTestDataType( + "varbinary(100)", + "BINARY", + adodbapi.Binary(binfld), + allowedReturnValues=arv, + ) + if self.getEngine() != "MySQL": + self.helpTestDataType( + "image", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + + def helpRollbackTblTemp(self): + self.helpForceDropOnTblTemp() + + def helpForceDropOnTblTemp(self): + conn = self.getConnection() + with conn.cursor() as crsr: + try: + crsr.execute("DROP TABLE xx_%s" % config.tmp) + if not conn.autocommit: + conn.commit() + except: + pass + + def helpCreateAndPopulateTableTemp(self, crsr): + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldData INTEGER + ) + """ + % config.tmp + ) + try: # EAFP + crsr.execute(tabdef) + except api.DatabaseError: # was not dropped before + self.helpForceDropOnTblTemp() # so drop it now + crsr.execute(tabdef) + for i in range(9): # note: this poor SQL code, but a valid test + crsr.execute("INSERT INTO xx_%s (fldData) VALUES (%i)" % (config.tmp, i)) + # NOTE: building the test table without using parameter substitution + + def testFetchAll(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 9 + # test slice of rows + i = 3 + for row in rs[3:-2]: # should have rowid 3..6 + assert row[0] == i + i += 1 + self.helpRollbackTblTemp() + + def testPreparedStatement(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.prepare("SELECT fldData FROM xx_%s" % config.tmp) + crsr.execute(crsr.command) # remember the one that was prepared + rs = crsr.fetchall() + assert len(rs) == 9 + assert rs[2][0] == 2 + self.helpRollbackTblTemp() + + def testWrongPreparedStatement(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.prepare("SELECT * FROM nowhere") + crsr.execute( + "SELECT fldData FROM xx_%s" % config.tmp + ) # should execute this one, not the prepared one + rs = crsr.fetchall() + assert len(rs) == 9 + assert rs[2][0] == 2 + self.helpRollbackTblTemp() + + def testIterator(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + for i, row in enumerate( + crsr + ): # using cursor as an iterator, rather than fetchxxx + assert row[0] == i + self.helpRollbackTblTemp() + + def testExecuteMany(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + seq_of_values = [(111,), (222,)] + crsr.executemany( + "INSERT INTO xx_%s (fldData) VALUES (?)" % config.tmp, seq_of_values + ) + if crsr.rowcount == -1: + print( + self.getEngine() + + " Provider does not support rowcount (on .executemany())" + ) + else: + self.assertEqual(crsr.rowcount, 2) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 11 + self.helpRollbackTblTemp() + + def testRowCount(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + if crsr.rowcount == -1: + # print("provider does not support rowcount on select") + pass + else: + self.assertEqual(crsr.rowcount, 9) + self.helpRollbackTblTemp() + + def testRowCountNoRecordset(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("DELETE FROM xx_%s WHERE fldData >= 5" % config.tmp) + if crsr.rowcount == -1: + print(self.getEngine() + " Provider does not support rowcount (on DELETE)") + else: + self.assertEqual(crsr.rowcount, 4) + self.helpRollbackTblTemp() + + def testFetchMany(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchmany(3) + assert len(rs) == 3 + rs = crsr.fetchmany(5) + assert len(rs) == 5 + rs = crsr.fetchmany(5) + assert len(rs) == 1 # Asked for five, but there is only one left + self.helpRollbackTblTemp() + + def testFetchManyWithArraySize(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchmany() + assert len(rs) == 1 # arraysize Defaults to one + crsr.arraysize = 4 + rs = crsr.fetchmany() + assert len(rs) == 4 + rs = crsr.fetchmany() + assert len(rs) == 4 + rs = crsr.fetchmany() + assert len(rs) == 0 + self.helpRollbackTblTemp() + + def testErrorConnect(self): + conn = self.getConnection() + kw = {} + if "proxy_host" in conn.kwargs: + kw["proxy_host"] = conn.kwargs["proxy_host"] + conn.close() + self.assertRaises(api.DatabaseError, self.db, "not a valid connect string", kw) + + def testRowIterator(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldTwo integer, + fldThree integer, + fldFour integer) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = [(2, 3, 4), (102, 103, 104)] + fldId = 1 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldTwo,fldThree,fldFour) VALUES (?,?,?,?)" + % config.tmp, + (fldId, inParam[0], inParam[1], inParam[2]), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldTwo,fldThree,fldFour FROM xx_%s WHERE ?=fldID" % config.tmp, + [fldId], + ) + rec = crsr.fetchone() + # check that stepping through an emulated row works + for j in range(len(inParam)): + assert ( + rec[j] == inParam[j] + ), 'returned value:"%s" != test value:"%s"' % (rec[j], inParam[j]) + # check that we can get a complete tuple from a row + assert tuple(rec) == inParam, 'returned value:"%s" != test value:"%s"' % ( + repr(rec), + repr(inParam), + ) + # test that slices of rows work + slice1 = tuple(rec[:-1]) + slice2 = tuple(inParam[0:2]) + assert slice1 == slice2, 'returned value:"%s" != test value:"%s"' % ( + repr(slice1), + repr(slice2), + ) + # now test named column retrieval + assert rec["fldTwo"] == inParam[0] + assert rec.fldThree == inParam[1] + assert rec.fldFour == inParam[2] + # test array operation + # note that the fields vv vv vv are out of order + crsr.execute("select fldThree,fldFour,fldTwo from xx_%s" % config.tmp) + recs = crsr.fetchall() + assert recs[1][0] == 103 + assert recs[0][1] == 4 + assert recs[1]["fldFour"] == 104 + assert recs[0, 0] == 3 + assert recs[0, "fldTwo"] == 2 + assert recs[1, 2] == 102 + for i in range(1): + for j in range(2): + assert recs[i][j] == recs[i, j] + + def testFormatParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + conn.paramstyle = "format" # test nonstandard use of paramstyle + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10), + fldConst varchar(30)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["one", "two", "three"] + fldId = 2 + for inParam in inputs: + fldId += 1 + sql = ( + "INSERT INTO xx_" + + config.tmp + + " (fldId,fldConst,fldData) VALUES (%s,'thi%s :may cause? trouble', %s)" + ) + try: + crsr.execute(sql, (fldId, inParam)) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData, fldConst FROM xx_" + config.tmp + " WHERE %s=fldID", + [fldId], + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + self.assertEqual(rec[1], "thi%s :may cause? trouble") + + # now try an operation with a "%s" as part of a literal + sel = ( + "insert into xx_" + config.tmp + " (fldId,fldData) VALUES (%s,'four%sfive')" + ) + params = (20,) + crsr.execute(sel, params) + + # test the .query implementation + assert "(?," in crsr.query, 'expected:"%s" in "%s"' % ("(?,", crsr.query) + # test the .command attribute + assert crsr.command == sel, 'expected:"%s" but found "%s"' % (sel, crsr.command) + + # test the .parameters attribute + if not self.remote: # parameter list will be altered in transit + self.assertEqual(crsr.parameters, params) + # now make sure the data made it + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=20" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "four%sfive") + + def testNamedParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + crsr.paramstyle = "named" # test nonstandard use of paramstyle + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["four", "five", "six"] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (:Id,:f_Val)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE fldID=:Id" % config.tmp, {"Id": fldId} + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a ":" as part of a literal + crsr.execute( + "insert into xx_%s (fldId,fldData) VALUES (:xyz,'six:five')" % config.tmp, + {"xyz": 30}, + ) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six:five") + + def testPyformatParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + crsr.paramstyle = "pyformat" # test nonstandard use of paramstyle + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["four", "five", "six"] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (%%(Id)s,%%(f_Val)s)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE fldID=%%(Id)s" % config.tmp, + {"Id": fldId}, + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a "%" as part of a literal + crsr.execute( + "insert into xx_%s (fldId,fldData) VALUES (%%(xyz)s,'six%%five')" + % config.tmp, + {"xyz": 30}, + ) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six%five") + + def testAutomaticParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + conn.paramstyle = "dynamic" # test nonstandard use of paramstyle + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10), + fldConst varchar(30)) + """ + % config.tmp + ) + crsr.execute(tabdef) + inputs = ["one", "two", "three"] + fldId = 2 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_" + + config.tmp + + " (fldId,fldConst,fldData) VALUES (?,'thi%s :may cause? troub:1e', ?)", + (fldId, inParam), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + trouble = "thi%s :may cause? troub:1e" + crsr.execute( + "SELECT fldData, fldConst FROM xx_" + config.tmp + " WHERE ?=fldID", + [fldId], + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + self.assertEqual(rec[1], trouble) + # inputs = [u'four',u'five',u'six'] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (:Id,:f_Val)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE :Id=fldID" % config.tmp, {"Id": fldId} + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a ":" as part of a literal -- and use a prepared query + ppdcmd = ( + "insert into xx_%s (fldId,fldData) VALUES (:xyz,'six:five')" % config.tmp + ) + crsr.prepare(ppdcmd) + crsr.execute(ppdcmd, {"xyz": 30}) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six:five") + + def testRollBack(self): + conn = self.getConnection() + crsr = conn.cursor() + assert not crsr.connection.autocommit, "Unexpected beginning condition" + self.helpCreateAndPopulateTableTemp(crsr) + crsr.connection.commit() # commit the first bunch + + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + self.conn.rollback() + crsr.execute(selectSql) + assert ( + crsr.fetchone() == None + ), "cursor.fetchone should return None if a query retrieves no rows" + crsr.execute("SELECT fldData from xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 9, "the original records should still be present" + self.helpRollbackTblTemp() + + def testCommit(self): + try: + con2 = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + assert not con2.autocommit, "default should be manual commit" + crsr = con2.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + con2.commit() + + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + crsr.close() + con2.close() + conn = self.getConnection() + crsr = self.getCursor() + with conn.cursor() as crsr: + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + assert rs[0][0] == 100 + self.helpRollbackTblTemp() + + def testAutoRollback(self): + try: + con2 = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + assert not con2.autocommit, "unexpected beginning condition" + crsr = con2.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + crsr.close() + con2.close() + crsr = self.getCursor() + try: + crsr.execute( + selectSql + ) # closing the connection should have forced rollback + row = crsr.fetchone() + except api.DatabaseError: + row = None # if the entire table disappeared the rollback was perfect and the test passed + assert row == None, ( + "cursor.fetchone should return None if a query retrieves no rows. Got %s" + % repr(row) + ) + self.helpRollbackTblTemp() + + def testAutoCommit(self): + try: + ac_conn = self.getAnotherConnection({"autocommit": True}) + except NotImplementedError: + return # should be "SKIP" for ACCESS + crsr = ac_conn.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + crsr.close() + with self.getCursor() as crsr: + selectSql = "SELECT fldData from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + rs = crsr.fetchall() + assert len(rs) == 10, "all records should still be present" + ac_conn.close() + self.helpRollbackTblTemp() + + def testSwitchedAutoCommit(self): + try: + ac_conn = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + ac_conn.autocommit = True + crsr = ac_conn.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + crsr.close() + conn = self.getConnection() + ac_conn.close() + with self.getCursor() as crsr: + selectSql = "SELECT fldData from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + rs = crsr.fetchall() + assert len(rs) == 10, "all records should still be present" + self.helpRollbackTblTemp() + + def testExtendedTypeHandling(self): + class XtendString(str): + pass + + class XtendInt(int): + pass + + class XtendFloat(float): + pass + + xs = XtendString(randomstring(30)) + xi = XtendInt(random.randint(-100, 500)) + xf = XtendFloat(random.random()) + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + s VARCHAR(40) NOT NULL, + i INTEGER NOT NULL, + f REAL NOT NULL)""" + % config.tmp + ) + crsr.execute(tabdef) + crsr.execute( + "INSERT INTO xx_%s (s, i, f) VALUES (?, ?, ?)" % config.tmp, (xs, xi, xf) + ) + crsr.close() + conn = self.getConnection() + with self.getCursor() as crsr: + selectSql = "SELECT s, i, f from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + row = crsr.fetchone() + self.assertEqual(row.s, xs) + self.assertEqual(row.i, xi) + self.assertAlmostEqual(row.f, xf) + self.helpRollbackTblTemp() + + +class TestADOwithSQLServer(CommonDBTests): + def setUp(self): + self.conn = config.dbSqlServerconnect( + *config.connStrSQLServer[0], **config.connStrSQLServer[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "MSSQL" + self.db = config.dbSqlServerconnect + self.remote = config.connStrSQLServer[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrSQLServer[1]) + if addkeys: + keys.update(addkeys) + return config.dbSqlServerconnect(*config.connStrSQLServer[0], **keys) + + def testVariableReturningStoredProcedure(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput varchar(50), + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput=@theInput+@theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + retvalues = crsr.callproc( + "sp_DeleteMeOnlyForTesting", ("Dodsworth", "Anne", " ") + ) + assert retvalues[0] == "Dodsworth", '%s is not "Dodsworth"' % repr(retvalues[0]) + assert retvalues[1] == "Anne", '%s is not "Anne"' % repr(retvalues[1]) + assert retvalues[2] == "DodsworthAnne", '%s is not "DodsworthAnne"' % repr( + retvalues[2] + ) + self.conn.rollback() + + def testMultipleSetReturn(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + + spdef = """ + CREATE PROCEDURE sp_DeleteMe_OnlyForTesting + AS + SELECT fldData FROM xx_%s ORDER BY fldData ASC + SELECT fldData From xx_%s where fldData = -9999 + SELECT fldData FROM xx_%s ORDER BY fldData DESC + """ % ( + config.tmp, + config.tmp, + config.tmp, + ) + try: + crsr.execute("DROP PROCEDURE sp_DeleteMe_OnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + retvalues = crsr.callproc("sp_DeleteMe_OnlyForTesting") + row = crsr.fetchone() + self.assertEqual(row[0], 0) + assert crsr.nextset() == True, "Operation should succeed" + assert not crsr.fetchall(), "Should be an empty second set" + assert crsr.nextset() == True, "third set should be present" + rowdesc = crsr.fetchall() + self.assertEqual(rowdesc[0][0], 8) + assert crsr.nextset() == None, "No more return sets, should return None" + + self.helpRollbackTblTemp() + + def testDatetimeProcedureParameter(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput DATETIME, + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput = CONVERT(CHARACTER(20), @theInput, 0) + @theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + result = crsr.callproc( + "sp_DeleteMeOnlyForTesting", + [adodbapi.Timestamp(2014, 12, 25, 0, 1, 0), "Beep", " " * 30], + ) + + assert result[2] == "Dec 25 2014 12:01AM Beep", 'value was="%s"' % result[2] + self.conn.rollback() + + def testIncorrectStoredProcedureParameter(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput DATETIME, + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput = CONVERT(CHARACTER(20), @theInput) + @theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + # calling the sproc with a string for the first parameter where a DateTime is expected + result = tryconnection.try_operation_with_expected_exception( + (api.DataError, api.DatabaseError), + crsr.callproc, + ["sp_DeleteMeOnlyForTesting"], + {"parameters": ["this is wrong", "Anne", "not Alice"]}, + ) + if result[0]: # the expected exception was raised + assert "@theInput" in str(result[1]) or "DatabaseError" in str( + result + ), "Identifies the wrong erroneous parameter" + else: + assert result[0], result[1] # incorrect or no exception + self.conn.rollback() + + +class TestADOwithAccessDB(CommonDBTests): + def setUp(self): + self.conn = config.dbAccessconnect( + *config.connStrAccess[0], **config.connStrAccess[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "ACCESS" + self.db = config.dbAccessconnect + self.remote = config.connStrAccess[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + raise NotImplementedError("Jet cannot use a second connection to the database") + + def testOkConnect(self): + c = self.db(*config.connStrAccess[0], **config.connStrAccess[1]) + assert c != None + c.close() + + +class TestADOwithMySql(CommonDBTests): + def setUp(self): + self.conn = config.dbMySqlconnect( + *config.connStrMySql[0], **config.connStrMySql[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "MySQL" + self.db = config.dbMySqlconnect + self.remote = config.connStrMySql[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrMySql[1]) + if addkeys: + keys.update(addkeys) + return config.dbMySqlconnect(*config.connStrMySql[0], **keys) + + def testOkConnect(self): + c = self.db(*config.connStrMySql[0], **config.connStrMySql[1]) + assert c != None + + # def testStoredProcedure(self): + # crsr=self.conn.cursor() + # try: + # crsr.execute("DROP PROCEDURE DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + # spdef= """ + # DELIMITER $$ + # CREATE PROCEDURE DeleteMeOnlyForTesting (onein CHAR(10), twoin CHAR(10), OUT theout CHAR(20)) + # DETERMINISTIC + # BEGIN + # SET theout = onein //|| twoin; + # /* (SELECT 'a small string' as result; */ + # END $$ + # """ + # + # crsr.execute(spdef) + # + # retvalues=crsr.callproc('DeleteMeOnlyForTesting',('Dodsworth','Anne',' ')) + # print 'return value (mysql)=',repr(crsr.returnValue) ### + # assert retvalues[0]=='Dodsworth', '%s is not "Dodsworth"'%repr(retvalues[0]) + # assert retvalues[1]=='Anne','%s is not "Anne"'%repr(retvalues[1]) + # assert retvalues[2]=='DodsworthAnne','%s is not "DodsworthAnne"'%repr(retvalues[2]) + # + # try: + # crsr.execute("DROP PROCEDURE, DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + + +class TestADOwithPostgres(CommonDBTests): + def setUp(self): + self.conn = config.dbPostgresConnect( + *config.connStrPostgres[0], **config.connStrPostgres[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "PostgreSQL" + self.db = config.dbPostgresConnect + self.remote = config.connStrPostgres[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrPostgres[1]) + if addkeys: + keys.update(addkeys) + return config.dbPostgresConnect(*config.connStrPostgres[0], **keys) + + def testOkConnect(self): + c = self.db(*config.connStrPostgres[0], **config.connStrPostgres[1]) + assert c != None + + # def testStoredProcedure(self): + # crsr=self.conn.cursor() + # spdef= """ + # CREATE OR REPLACE FUNCTION DeleteMeOnlyForTesting (text, text) + # RETURNS text AS $funk$ + # BEGIN + # RETURN $1 || $2; + # END; + # $funk$ + # LANGUAGE SQL; + # """ + # + # crsr.execute(spdef) + # retvalues = crsr.callproc('DeleteMeOnlyForTesting',('Dodsworth','Anne',' ')) + # ### print 'return value (pg)=',repr(crsr.returnValue) ### + # assert retvalues[0]=='Dodsworth', '%s is not "Dodsworth"'%repr(retvalues[0]) + # assert retvalues[1]=='Anne','%s is not "Anne"'%repr(retvalues[1]) + # assert retvalues[2]=='Dodsworth Anne','%s is not "Dodsworth Anne"'%repr(retvalues[2]) + # self.conn.rollback() + # try: + # crsr.execute("DROP PROCEDURE, DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + + +class TimeConverterInterfaceTest(unittest.TestCase): + def testIDate(self): + assert self.tc.Date(1990, 2, 2) + + def testITime(self): + assert self.tc.Time(13, 2, 2) + + def testITimestamp(self): + assert self.tc.Timestamp(1990, 2, 2, 13, 2, 1) + + def testIDateObjectFromCOMDate(self): + assert self.tc.DateObjectFromCOMDate(37435.7604282) + + def testICOMDate(self): + assert hasattr(self.tc, "COMDate") + + def testExactDate(self): + d = self.tc.Date(1994, 11, 15) + comDate = self.tc.COMDate(d) + correct = 34653.0 + assert comDate == correct, comDate + + def testExactTimestamp(self): + d = self.tc.Timestamp(1994, 11, 15, 12, 0, 0) + comDate = self.tc.COMDate(d) + correct = 34653.5 + self.assertEqual(comDate, correct) + + d = self.tc.Timestamp(2003, 5, 6, 14, 15, 17) + comDate = self.tc.COMDate(d) + correct = 37747.593946759262 + self.assertEqual(comDate, correct) + + def testIsoFormat(self): + d = self.tc.Timestamp(1994, 11, 15, 12, 3, 10) + iso = self.tc.DateObjectToIsoFormatString(d) + self.assertEqual(str(iso[:19]), "1994-11-15 12:03:10") + + dt = self.tc.Date(2003, 5, 2) + iso = self.tc.DateObjectToIsoFormatString(dt) + self.assertEqual(str(iso[:10]), "2003-05-02") + + +if config.doMxDateTimeTest: + import mx.DateTime + + +class TestMXDateTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.mxDateTimeConverter() + + def testCOMDate(self): + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 2) + cmd = self.tc.COMDate(t) + assert cmd == t.COMDate() + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 0) + t2 = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 2) + assert t2 > cmd > t + + def testDate(self): + assert mx.DateTime.Date(1980, 11, 4) == self.tc.Date(1980, 11, 4) + + def testTime(self): + assert mx.DateTime.Time(13, 11, 4) == self.tc.Time(13, 11, 4) + + def testTimestamp(self): + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 1) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 1) + assert t == obj + + +import time + + +class TestPythonTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.pythonTimeConverter() + + def testCOMDate(self): + mk = time.mktime((2002, 6, 28, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + t = time.localtime(mk) + # Fri, 28 Jun 2002 18:15:01 +0000 + cmd = self.tc.COMDate(t) + assert abs(cmd - 37435.7604282) < 1.0 / 24, "%f more than an hour wrong" % cmd + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t1 = time.gmtime( + time.mktime((2002, 6, 28, 0, 14, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + # there are errors in the implementation of gmtime which we ignore + t2 = time.gmtime( + time.mktime((2002, 6, 29, 12, 14, 2, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + assert t1 < cmd < t2, '"%s" should be about 2002-6-28 12:15:01' % repr(cmd) + + def testDate(self): + t1 = time.mktime((2002, 6, 28, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 30, 0)) + t2 = time.mktime((2002, 6, 30, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, 0)) + obj = self.tc.Date(2002, 6, 29) + assert t1 < time.mktime(obj) < t2, obj + + def testTime(self): + self.assertEqual( + self.tc.Time(18, 15, 2), time.gmtime(18 * 60 * 60 + 15 * 60 + 2) + ) + + def testTimestamp(self): + t1 = time.localtime( + time.mktime((2002, 6, 28, 18, 14, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + t2 = time.localtime( + time.mktime((2002, 6, 28, 18, 16, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 2) + assert t1 < obj < t2, obj + + +class TestPythonDateTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.pythonDateTimeConverter() + + def testCOMDate(self): + t = datetime.datetime(2002, 6, 28, 18, 15, 1) + # Fri, 28 Jun 2002 18:15:01 +0000 + cmd = self.tc.COMDate(t) + assert abs(cmd - 37435.7604282) < 1.0 / 24, "more than an hour wrong" + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t1 = datetime.datetime(2002, 6, 28, 18, 14, 1) + t2 = datetime.datetime(2002, 6, 28, 18, 16, 1) + assert t1 < cmd < t2, cmd + + tx = datetime.datetime( + 2002, 6, 28, 18, 14, 1, 900000 + ) # testing that microseconds don't become milliseconds + c1 = self.tc.DateObjectFromCOMDate(self.tc.COMDate(tx)) + assert t1 < c1 < t2, c1 + + def testDate(self): + t1 = datetime.date(2002, 6, 28) + t2 = datetime.date(2002, 6, 30) + obj = self.tc.Date(2002, 6, 29) + assert t1 < obj < t2, obj + + def testTime(self): + self.assertEqual(self.tc.Time(18, 15, 2).isoformat()[:8], "18:15:02") + + def testTimestamp(self): + t1 = datetime.datetime(2002, 6, 28, 18, 14, 1) + t2 = datetime.datetime(2002, 6, 28, 18, 16, 1) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 2) + assert t1 < obj < t2, obj + + +suites = [] +suites.append(unittest.makeSuite(TestPythonDateTimeConverter, "test")) +if config.doMxDateTimeTest: + suites.append(unittest.makeSuite(TestMXDateTimeConverter, "test")) +if config.doTimeTest: + suites.append(unittest.makeSuite(TestPythonTimeConverter, "test")) + +if config.doAccessTest: + suites.append(unittest.makeSuite(TestADOwithAccessDB, "test")) +if config.doSqlServerTest: + suites.append(unittest.makeSuite(TestADOwithSQLServer, "test")) +if config.doMySqlTest: + suites.append(unittest.makeSuite(TestADOwithMySql, "test")) +if config.doPostgresTest: + suites.append(unittest.makeSuite(TestADOwithPostgres, "test")) + + +class cleanup_manager(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + config.cleanup(config.testfolder, config.mdb_name) + + +suite = unittest.TestSuite(suites) +if __name__ == "__main__": + mysuite = copy.deepcopy(suite) + with cleanup_manager(): + defaultDateConverter = adodbapi.dateconverter + print(__doc__) + print("Default Date Converter is %s" % (defaultDateConverter,)) + dateconverter = defaultDateConverter + tag = "datetime" + unittest.TextTestRunner().run(mysuite) + + if config.iterateOverTimeTests: + for test, dateconverter, tag in ( + (config.doTimeTest, api.pythonTimeConverter, "pythontime"), + (config.doMxDateTimeTest, api.mxDateTimeConverter, "mx"), + ): + if test: + mysuite = copy.deepcopy( + suite + ) # work around a side effect of unittest.TextTestRunner + adodbapi.adodbapi.dateconverter = dateconverter() + print("Changed dateconverter to ") + print(adodbapi.adodbapi.dateconverter) + unittest.TextTestRunner().run(mysuite) diff --git a/lib/adodbapi/test/adodbapitestconfig.py b/lib/adodbapi/test/adodbapitestconfig.py new file mode 100644 index 00000000..98f25444 --- /dev/null +++ b/lib/adodbapi/test/adodbapitestconfig.py @@ -0,0 +1,221 @@ +# Configure this to _YOUR_ environment in order to run the testcases. +"testADOdbapiConfig.py v 2.6.2.B00" + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # +# # TESTERS: +# # +# # You will need to make numerous modifications to this file +# # to adapt it to your own testing environment. +# # +# # Skip down to the next "# #" line -- +# # -- the things you need to change are below it. +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +import platform +import random +import sys + +import is64bit +import setuptestframework +import tryconnection + +print("\nPython", sys.version) +node = platform.node() +try: + print( + "node=%s, is64bit.os()= %s, is64bit.Python()= %s" + % (node, is64bit.os(), is64bit.Python()) + ) +except: + pass + +if "--help" in sys.argv: + print( + """Valid command-line switches are: + --package - create a temporary test package, run 2to3 if needed. + --all - run all possible tests + --time - loop over time format tests (including mxdatetime if present) + --nojet - do not test against an ACCESS database file + --mssql - test against Microsoft SQL server + --pg - test against PostgreSQL + --mysql - test against MariaDB + --remote= - test unsing remote server at= (experimental) + """ + ) + exit() +try: + onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python +except: + onWindows = False + +# create a random name for temporary table names +_alphabet = ( + "PYFGCRLAOEUIDHTNSQJKXBMWVZ" # why, yes, I do happen to use a dvorak keyboard +) +tmp = "".join([random.choice(_alphabet) for x in range(9)]) +mdb_name = "xx_" + tmp + ".mdb" # generate a non-colliding name for the temporary .mdb +testfolder = setuptestframework.maketemp() + +if "--package" in sys.argv: + # create a new adodbapi module -- running 2to3 if needed. + pth = setuptestframework.makeadopackage(testfolder) +else: + # use the adodbapi module in which this file appears + pth = setuptestframework.find_ado_path() +if pth not in sys.path: + # look here _first_ to find modules + sys.path.insert(1, pth) + +proxy_host = None +for arg in sys.argv: + if arg.startswith("--remote="): + proxy_host = arg.split("=")[1] + import adodbapi.remote as remote + + break + + +# function to clean up the temporary folder -- calling program must run this function before exit. +cleanup = setuptestframework.getcleanupfunction() +try: + import adodbapi # will (hopefully) be imported using the "pth" discovered above +except SyntaxError: + print( + '\n* * * Are you trying to run Python2 code using Python3? Re-run this test using the "--package" switch.' + ) + sys.exit(11) +try: + print(adodbapi.version) # show version +except: + print('"adodbapi.version" not present or not working.') +print(__doc__) + +verbose = False +for a in sys.argv: + if a.startswith("--verbose"): + arg = True + try: + arg = int(a.split("=")[1]) + except IndexError: + pass + adodbapi.adodbapi.verbose = arg + verbose = arg + +doAllTests = "--all" in sys.argv +doAccessTest = not ("--nojet" in sys.argv) +doSqlServerTest = "--mssql" in sys.argv or doAllTests +doMySqlTest = "--mysql" in sys.argv or doAllTests +doPostgresTest = "--pg" in sys.argv or doAllTests +iterateOverTimeTests = ("--time" in sys.argv or doAllTests) and onWindows + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # start your environment setup here v v v +SQL_HOST_NODE = "testsql.2txt.us,1430" + +try: # If mx extensions are installed, use mxDateTime + import mx.DateTime + + doMxDateTimeTest = True +except: + doMxDateTimeTest = False # Requires eGenixMXExtensions + +doTimeTest = True # obsolete python time format + +if doAccessTest: + if proxy_host: # determine the (probably remote) database file folder + c = {"macro_find_temp_test_path": ["mdb", mdb_name], "proxy_host": proxy_host} + else: + c = {"mdb": setuptestframework.makemdb(testfolder, mdb_name)} + + # macro definition for keyword "provider" using macro "is64bit" -- see documentation + # is64bit will return true for 64 bit versions of Python, so the macro will select the ACE provider + # (If running a remote ADO service, this will test the 64-bitedness of the ADO server.) + c["macro_is64bit"] = [ + "provider", + "Microsoft.ACE.OLEDB.12.0", # 64 bit provider + "Microsoft.Jet.OLEDB.4.0", + ] # 32 bit provider + connStrAccess = "Provider=%(provider)s;Data Source=%(mdb)s" # ;Mode=ReadWrite;Persist Security Info=False;Jet OLEDB:Bypass UserInfo Validation=True" + print( + " ...Testing ACCESS connection to {} file...".format( + c.get("mdb", "remote .mdb") + ) + ) + doAccessTest, connStrAccess, dbAccessconnect = tryconnection.try_connection( + verbose, connStrAccess, 10, **c + ) + +if doSqlServerTest: + c = { + "host": SQL_HOST_NODE, # name of computer with SQL Server + "database": "adotest", + "user": "adotestuser", # None implies Windows security + "password": "Sq1234567", + # macro definition for keyword "security" using macro "auto_security" + "macro_auto_security": "security", + "provider": "MSOLEDBSQL; MARS Connection=True", + } + if proxy_host: + c["proxy_host"] = proxy_host + connStr = "Provider=%(provider)s; Initial Catalog=%(database)s; Data Source=%(host)s; %(security)s;" + print(" ...Testing MS-SQL login to {}...".format(c["host"])) + ( + doSqlServerTest, + connStrSQLServer, + dbSqlServerconnect, + ) = tryconnection.try_connection(verbose, connStr, 30, **c) + +if doMySqlTest: + c = { + "host": "testmysql.2txt.us", + "database": "adodbapitest", + "user": "adotest", + "password": "12345678", + "port": "3330", # note the nonstandard port for obfuscation + "driver": "MySQL ODBC 5.1 Driver", + } # or _driver="MySQL ODBC 3.51 Driver + if proxy_host: + c["proxy_host"] = proxy_host + c["macro_is64bit"] = [ + "provider", + "Provider=MSDASQL;", + ] # turn on the 64 bit ODBC adapter only if needed + cs = ( + "%(provider)sDriver={%(driver)s};Server=%(host)s;Port=3330;" + + "Database=%(database)s;user=%(user)s;password=%(password)s;Option=3;" + ) + print(" ...Testing MySql login to {}...".format(c["host"])) + doMySqlTest, connStrMySql, dbMySqlconnect = tryconnection.try_connection( + verbose, cs, 5, **c + ) + + +if doPostgresTest: + _computername = "testpg.2txt.us" + _databasename = "adotest" + _username = "adotestuser" + _password = "12345678" + kws = {"timeout": 4} + kws["macro_is64bit"] = [ + "prov_drv", + "Provider=MSDASQL;Driver={PostgreSQL Unicode(x64)}", + "Driver=PostgreSQL Unicode", + ] + # get driver from http://www.postgresql.org/ftp/odbc/versions/ + # test using positional and keyword arguments (bad example for real code) + if proxy_host: + kws["proxy_host"] = proxy_host + print(" ...Testing PostgreSQL login to {}...".format(_computername)) + doPostgresTest, connStrPostgres, dbPostgresConnect = tryconnection.try_connection( + verbose, + "%(prov_drv)s;Server=%(host)s;Database=%(database)s;uid=%(user)s;pwd=%(password)s;port=5430;", # note nonstandard port + _username, + _password, + _computername, + _databasename, + **kws + ) + +assert ( + doAccessTest or doSqlServerTest or doMySqlTest or doPostgresTest +), "No database engine found for testing" diff --git a/lib/adodbapi/test/dbapi20.py b/lib/adodbapi/test/dbapi20.py new file mode 100644 index 00000000..e378b194 --- /dev/null +++ b/lib/adodbapi/test/dbapi20.py @@ -0,0 +1,939 @@ +#!/usr/bin/env python +""" Python DB API 2.0 driver compliance unit test suite. + + This software is Public Domain and may be used without restrictions. + + "Now we have booze and barflies entering the discussion, plus rumours of + DBAs on drugs... and I won't tell you what flashes through my mind each + time I read the subject line with 'Anal Compliance' in it. All around + this is turning out to be a thoroughly unwholesome unit test." + + -- Ian Bicking +""" + +__version__ = "$Revision: 1.15.0 $"[11:-2] +__author__ = "Stuart Bishop " + +import sys +import time +import unittest + +if sys.version[0] >= "3": # python 3.x + _BaseException = Exception + + def _failUnless(self, expr, msg=None): + self.assertTrue(expr, msg) + +else: # python 2.x + from exceptions import Exception as _BaseException + + def _failUnless(self, expr, msg=None): + self.failUnless(expr, msg) ## deprecated since Python 2.6 + + +# set this to "True" to follow API 2.0 to the letter +TEST_FOR_NON_IDEMPOTENT_CLOSE = False + +# Revision 1.15 2019/11/22 00:50:00 kf7xm +# Make Turn off IDEMPOTENT_CLOSE a proper skipTest + +# Revision 1.14 2013/05/20 11:02:05 kf7xm +# Add a literal string to the format insertion test to catch trivial re-format algorithms + +# Revision 1.13 2013/05/08 14:31:50 kf7xm +# Quick switch to Turn off IDEMPOTENT_CLOSE test. Also: Silence teardown failure + + +# Revision 1.12 2009/02/06 03:35:11 kf7xm +# Tested okay with Python 3.0, includes last minute patches from Mark H. +# +# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole +# Include latest changes from main branch +# Updates for py3k +# +# Revision 1.11 2005/01/02 02:41:01 zenzen +# Update author email address +# +# Revision 1.10 2003/10/09 03:14:14 zenzen +# Add test for DB API 2.0 optional extension, where database exceptions +# are exposed as attributes on the Connection object. +# +# Revision 1.9 2003/08/13 01:16:36 zenzen +# Minor tweak from Stefan Fleiter +# +# Revision 1.8 2003/04/10 00:13:25 zenzen +# Changes, as per suggestions by M.-A. Lemburg +# - Add a table prefix, to ensure namespace collisions can always be avoided +# +# Revision 1.7 2003/02/26 23:33:37 zenzen +# Break out DDL into helper functions, as per request by David Rushby +# +# Revision 1.6 2003/02/21 03:04:33 zenzen +# Stuff from Henrik Ekelund: +# added test_None +# added test_nextset & hooks +# +# Revision 1.5 2003/02/17 22:08:43 zenzen +# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize +# defaults to 1 & generic cursor.callproc test added +# +# Revision 1.4 2003/02/15 00:16:33 zenzen +# Changes, as per suggestions and bug reports by M.-A. Lemburg, +# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar +# - Class renamed +# - Now a subclass of TestCase, to avoid requiring the driver stub +# to use multiple inheritance +# - Reversed the polarity of buggy test in test_description +# - Test exception heirarchy correctly +# - self.populate is now self._populate(), so if a driver stub +# overrides self.ddl1 this change propogates +# - VARCHAR columns now have a width, which will hopefully make the +# DDL even more portible (this will be reversed if it causes more problems) +# - cursor.rowcount being checked after various execute and fetchXXX methods +# - Check for fetchall and fetchmany returning empty lists after results +# are exhausted (already checking for empty lists if select retrieved +# nothing +# - Fix bugs in test_setoutputsize_basic and test_setinputsizes +# +def str2bytes(sval): + if sys.version_info < (3, 0) and isinstance(sval, str): + sval = sval.decode("latin1") + return sval.encode("latin1") # python 3 make unicode into bytes + + +class DatabaseAPI20Test(unittest.TestCase): + """Test a database self.driver for DB API 2.0 compatibility. + This implementation tests Gadfly, but the TestCase + is structured so that other self.drivers can subclass this + test case to ensure compiliance with the DB-API. It is + expected that this TestCase may be expanded in the future + if ambiguities or edge conditions are discovered. + + The 'Optional Extensions' are not yet being tested. + + self.drivers should subclass this test, overriding setUp, tearDown, + self.driver, connect_args and connect_kw_args. Class specification + should be as follows: + + import dbapi20 + class mytest(dbapi20.DatabaseAPI20Test): + [...] + + Don't 'import DatabaseAPI20Test from dbapi20', or you will + confuse the unit tester - just 'import dbapi20'. + """ + + # The self.driver module. This should be the module where the 'connect' + # method is to be found + driver = None + connect_args = () # List of arguments to pass to connect + connect_kw_args = {} # Keyword arguments for connect + table_prefix = "dbapi20test_" # If you need to specify a prefix for tables + + ddl1 = "create table %sbooze (name varchar(20))" % table_prefix + ddl2 = "create table %sbarflys (name varchar(20), drink varchar(30))" % table_prefix + xddl1 = "drop table %sbooze" % table_prefix + xddl2 = "drop table %sbarflys" % table_prefix + + lowerfunc = "lower" # Name of stored procedure to convert string->lowercase + + # Some drivers may need to override these helpers, for example adding + # a 'commit' after the execute. + def executeDDL1(self, cursor): + cursor.execute(self.ddl1) + + def executeDDL2(self, cursor): + cursor.execute(self.ddl2) + + def setUp(self): + """self.drivers should override this method to perform required setup + if any is necessary, such as creating the database. + """ + pass + + def tearDown(self): + """self.drivers should override this method to perform required cleanup + if any is necessary, such as deleting the test database. + The default drops the tables that may be created. + """ + try: + con = self._connect() + try: + cur = con.cursor() + for ddl in (self.xddl1, self.xddl2): + try: + cur.execute(ddl) + con.commit() + except self.driver.Error: + # Assume table didn't exist. Other tests will check if + # execute is busted. + pass + finally: + con.close() + except _BaseException: + pass + + def _connect(self): + try: + r = self.driver.connect(*self.connect_args, **self.connect_kw_args) + except AttributeError: + self.fail("No connect method found in self.driver module") + return r + + def test_connect(self): + con = self._connect() + con.close() + + def test_apilevel(self): + try: + # Must exist + apilevel = self.driver.apilevel + # Must equal 2.0 + self.assertEqual(apilevel, "2.0") + except AttributeError: + self.fail("Driver doesn't define apilevel") + + def test_threadsafety(self): + try: + # Must exist + threadsafety = self.driver.threadsafety + # Must be a valid value + _failUnless(self, threadsafety in (0, 1, 2, 3)) + except AttributeError: + self.fail("Driver doesn't define threadsafety") + + def test_paramstyle(self): + try: + # Must exist + paramstyle = self.driver.paramstyle + # Must be a valid value + _failUnless( + self, paramstyle in ("qmark", "numeric", "named", "format", "pyformat") + ) + except AttributeError: + self.fail("Driver doesn't define paramstyle") + + def test_Exceptions(self): + # Make sure required exceptions exist, and are in the + # defined heirarchy. + if sys.version[0] == "3": # under Python 3 StardardError no longer exists + self.assertTrue(issubclass(self.driver.Warning, Exception)) + self.assertTrue(issubclass(self.driver.Error, Exception)) + else: + self.failUnless(issubclass(self.driver.Warning, Exception)) + self.failUnless(issubclass(self.driver.Error, Exception)) + + _failUnless(self, issubclass(self.driver.InterfaceError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.DatabaseError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.OperationalError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.IntegrityError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.InternalError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.ProgrammingError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.NotSupportedError, self.driver.Error)) + + def test_ExceptionsAsConnectionAttributes(self): + # OPTIONAL EXTENSION + # Test for the optional DB API 2.0 extension, where the exceptions + # are exposed as attributes on the Connection object + # I figure this optional extension will be implemented by any + # driver author who is using this test suite, so it is enabled + # by default. + con = self._connect() + drv = self.driver + _failUnless(self, con.Warning is drv.Warning) + _failUnless(self, con.Error is drv.Error) + _failUnless(self, con.InterfaceError is drv.InterfaceError) + _failUnless(self, con.DatabaseError is drv.DatabaseError) + _failUnless(self, con.OperationalError is drv.OperationalError) + _failUnless(self, con.IntegrityError is drv.IntegrityError) + _failUnless(self, con.InternalError is drv.InternalError) + _failUnless(self, con.ProgrammingError is drv.ProgrammingError) + _failUnless(self, con.NotSupportedError is drv.NotSupportedError) + + def test_commit(self): + con = self._connect() + try: + # Commit must work, even if it doesn't do anything + con.commit() + finally: + con.close() + + def test_rollback(self): + con = self._connect() + # If rollback is defined, it should either work or throw + # the documented exception + if hasattr(con, "rollback"): + try: + con.rollback() + except self.driver.NotSupportedError: + pass + + def test_cursor(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + def test_cursor_isolation(self): + con = self._connect() + try: + # Make sure cursors created from the same connection have + # the documented transaction isolation level + cur1 = con.cursor() + cur2 = con.cursor() + self.executeDDL1(cur1) + cur1.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + cur2.execute("select name from %sbooze" % self.table_prefix) + booze = cur2.fetchall() + self.assertEqual(len(booze), 1) + self.assertEqual(len(booze[0]), 1) + self.assertEqual(booze[0][0], "Victoria Bitter") + finally: + con.close() + + def test_description(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual( + cur.description, + None, + "cursor.description should be none after executing a " + "statement that can return no rows (such as DDL)", + ) + cur.execute("select name from %sbooze" % self.table_prefix) + self.assertEqual( + len(cur.description), 1, "cursor.description describes too many columns" + ) + self.assertEqual( + len(cur.description[0]), + 7, + "cursor.description[x] tuples must have 7 elements", + ) + self.assertEqual( + cur.description[0][0].lower(), + "name", + "cursor.description[x][0] must return column name", + ) + self.assertEqual( + cur.description[0][1], + self.driver.STRING, + "cursor.description[x][1] must return column type. Got %r" + % cur.description[0][1], + ) + + # Make sure self.description gets reset + self.executeDDL2(cur) + self.assertEqual( + cur.description, + None, + "cursor.description not being set to None when executing " + "no-result statements (eg. DDL)", + ) + finally: + con.close() + + def test_rowcount(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + _failUnless( + self, + cur.rowcount in (-1, 0), # Bug #543885 + "cursor.rowcount should be -1 or 0 after executing no-result " + "statements", + ) + cur.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + _failUnless( + self, + cur.rowcount in (-1, 1), + "cursor.rowcount should == number or rows inserted, or " + "set to -1 after executing an insert statement", + ) + cur.execute("select name from %sbooze" % self.table_prefix) + _failUnless( + self, + cur.rowcount in (-1, 1), + "cursor.rowcount should == number of rows returned, or " + "set to -1 after executing a select statement", + ) + self.executeDDL2(cur) + self.assertEqual( + cur.rowcount, + -1, + "cursor.rowcount not being reset to -1 after executing " + "no-result statements", + ) + finally: + con.close() + + lower_func = "lower" + + def test_callproc(self): + con = self._connect() + try: + cur = con.cursor() + if self.lower_func and hasattr(cur, "callproc"): + r = cur.callproc(self.lower_func, ("FOO",)) + self.assertEqual(len(r), 1) + self.assertEqual(r[0], "FOO") + r = cur.fetchall() + self.assertEqual(len(r), 1, "callproc produced no result set") + self.assertEqual(len(r[0]), 1, "callproc produced invalid result set") + self.assertEqual(r[0][0], "foo", "callproc produced invalid results") + finally: + con.close() + + def test_close(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + # cursor.execute should raise an Error if called after connection + # closed + self.assertRaises(self.driver.Error, self.executeDDL1, cur) + + # connection.commit should raise an Error if called after connection' + # closed.' + self.assertRaises(self.driver.Error, con.commit) + + # connection.close should raise an Error if called more than once + #!!! reasonable persons differ about the usefulness of this test and this feature !!! + if TEST_FOR_NON_IDEMPOTENT_CLOSE: + self.assertRaises(self.driver.Error, con.close) + else: + self.skipTest( + "Non-idempotent close is considered a bad thing by some people." + ) + + def test_execute(self): + con = self._connect() + try: + cur = con.cursor() + self._paraminsert(cur) + finally: + con.close() + + def _paraminsert(self, cur): + self.executeDDL2(cur) + cur.execute( + "insert into %sbarflys values ('Victoria Bitter', 'thi%%s :may ca%%(u)se? troub:1e')" + % (self.table_prefix) + ) + _failUnless(self, cur.rowcount in (-1, 1)) + + if self.driver.paramstyle == "qmark": + cur.execute( + "insert into %sbarflys values (?, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "numeric": + cur.execute( + "insert into %sbarflys values (:1, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "named": + cur.execute( + "insert into %sbarflys values (:beer, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + {"beer": "Cooper's"}, + ) + elif self.driver.paramstyle == "format": + cur.execute( + "insert into %sbarflys values (%%s, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "pyformat": + cur.execute( + "insert into %sbarflys values (%%(beer)s, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + {"beer": "Cooper's"}, + ) + else: + self.fail("Invalid paramstyle") + _failUnless(self, cur.rowcount in (-1, 1)) + + cur.execute("select name, drink from %sbarflys" % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res), 2, "cursor.fetchall returned too few rows") + beers = [res[0][0], res[1][0]] + beers.sort() + self.assertEqual( + beers[0], + "Cooper's", + "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly", + ) + self.assertEqual( + beers[1], + "Victoria Bitter", + "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly", + ) + trouble = "thi%s :may ca%(u)se? troub:1e" + self.assertEqual( + res[0][1], + trouble, + "cursor.fetchall retrieved incorrect data, or data inserted " + "incorrectly. Got=%s, Expected=%s" % (repr(res[0][1]), repr(trouble)), + ) + self.assertEqual( + res[1][1], + trouble, + "cursor.fetchall retrieved incorrect data, or data inserted " + "incorrectly. Got=%s, Expected=%s" % (repr(res[1][1]), repr(trouble)), + ) + + def test_executemany(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + largs = [("Cooper's",), ("Boag's",)] + margs = [{"beer": "Cooper's"}, {"beer": "Boag's"}] + if self.driver.paramstyle == "qmark": + cur.executemany( + "insert into %sbooze values (?)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "numeric": + cur.executemany( + "insert into %sbooze values (:1)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "named": + cur.executemany( + "insert into %sbooze values (:beer)" % self.table_prefix, margs + ) + elif self.driver.paramstyle == "format": + cur.executemany( + "insert into %sbooze values (%%s)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "pyformat": + cur.executemany( + "insert into %sbooze values (%%(beer)s)" % (self.table_prefix), + margs, + ) + else: + self.fail("Unknown paramstyle") + _failUnless( + self, + cur.rowcount in (-1, 2), + "insert using cursor.executemany set cursor.rowcount to " + "incorrect value %r" % cur.rowcount, + ) + cur.execute("select name from %sbooze" % self.table_prefix) + res = cur.fetchall() + self.assertEqual( + len(res), 2, "cursor.fetchall retrieved incorrect number of rows" + ) + beers = [res[0][0], res[1][0]] + beers.sort() + self.assertEqual( + beers[0], "Boag's", 'incorrect data "%s" retrieved' % beers[0] + ) + self.assertEqual(beers[1], "Cooper's", "incorrect data retrieved") + finally: + con.close() + + def test_fetchone(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchone should raise an Error if called before + # executing a select-type query + self.assertRaises(self.driver.Error, cur.fetchone) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + self.executeDDL1(cur) + self.assertRaises(self.driver.Error, cur.fetchone) + + cur.execute("select name from %sbooze" % self.table_prefix) + self.assertEqual( + cur.fetchone(), + None, + "cursor.fetchone should return None if a query retrieves " "no rows", + ) + _failUnless(self, cur.rowcount in (-1, 0)) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + cur.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + self.assertRaises(self.driver.Error, cur.fetchone) + + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchone() + self.assertEqual( + len(r), 1, "cursor.fetchone should have retrieved a single row" + ) + self.assertEqual( + r[0], "Victoria Bitter", "cursor.fetchone retrieved incorrect data" + ) + self.assertEqual( + cur.fetchone(), + None, + "cursor.fetchone should return None if no more rows available", + ) + _failUnless(self, cur.rowcount in (-1, 1)) + finally: + con.close() + + samples = [ + "Carlton Cold", + "Carlton Draft", + "Mountain Goat", + "Redback", + "Victoria Bitter", + "XXXX", + ] + + def _populate(self): + """Return a list of sql commands to setup the DB for the fetch + tests. + """ + populate = [ + "insert into %sbooze values ('%s')" % (self.table_prefix, s) + for s in self.samples + ] + return populate + + def test_fetchmany(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchmany should raise an Error if called without + # issuing a query + self.assertRaises(self.driver.Error, cur.fetchmany, 4) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchmany() + self.assertEqual( + len(r), + 1, + "cursor.fetchmany retrieved incorrect number of rows, " + "default of arraysize is one.", + ) + cur.arraysize = 10 + r = cur.fetchmany(3) # Should get 3 rows + self.assertEqual( + len(r), 3, "cursor.fetchmany retrieved incorrect number of rows" + ) + r = cur.fetchmany(4) # Should get 2 more + self.assertEqual( + len(r), 2, "cursor.fetchmany retrieved incorrect number of rows" + ) + r = cur.fetchmany(4) # Should be an empty sequence + self.assertEqual( + len(r), + 0, + "cursor.fetchmany should return an empty sequence after " + "results are exhausted", + ) + _failUnless(self, cur.rowcount in (-1, 6)) + + # Same as above, using cursor.arraysize + cur.arraysize = 4 + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchmany() # Should get 4 rows + self.assertEqual( + len(r), 4, "cursor.arraysize not being honoured by fetchmany" + ) + r = cur.fetchmany() # Should get 2 more + self.assertEqual(len(r), 2) + r = cur.fetchmany() # Should be an empty sequence + self.assertEqual(len(r), 0) + _failUnless(self, cur.rowcount in (-1, 6)) + + cur.arraysize = 6 + cur.execute("select name from %sbooze" % self.table_prefix) + rows = cur.fetchmany() # Should get all rows + _failUnless(self, cur.rowcount in (-1, 6)) + self.assertEqual(len(rows), 6) + self.assertEqual(len(rows), 6) + rows = [r[0] for r in rows] + rows.sort() + + # Make sure we get the right data back out + for i in range(0, 6): + self.assertEqual( + rows[i], + self.samples[i], + "incorrect data retrieved by cursor.fetchmany", + ) + + rows = cur.fetchmany() # Should return an empty list + self.assertEqual( + len(rows), + 0, + "cursor.fetchmany should return an empty sequence if " + "called after the whole result set has been fetched", + ) + _failUnless(self, cur.rowcount in (-1, 6)) + + self.executeDDL2(cur) + cur.execute("select name from %sbarflys" % self.table_prefix) + r = cur.fetchmany() # Should get empty sequence + self.assertEqual( + len(r), + 0, + "cursor.fetchmany should return an empty sequence if " + "query retrieved no rows", + ) + _failUnless(self, cur.rowcount in (-1, 0)) + + finally: + con.close() + + def test_fetchall(self): + con = self._connect() + try: + cur = con.cursor() + # cursor.fetchall should raise an Error if called + # without executing a query that may return rows (such + # as a select) + self.assertRaises(self.driver.Error, cur.fetchall) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + # cursor.fetchall should raise an Error if called + # after executing a a statement that cannot return rows + self.assertRaises(self.driver.Error, cur.fetchall) + + cur.execute("select name from %sbooze" % self.table_prefix) + rows = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, len(self.samples))) + self.assertEqual( + len(rows), + len(self.samples), + "cursor.fetchall did not retrieve all rows", + ) + rows = [r[0] for r in rows] + rows.sort() + for i in range(0, len(self.samples)): + self.assertEqual( + rows[i], self.samples[i], "cursor.fetchall retrieved incorrect rows" + ) + rows = cur.fetchall() + self.assertEqual( + len(rows), + 0, + "cursor.fetchall should return an empty list if called " + "after the whole result set has been fetched", + ) + _failUnless(self, cur.rowcount in (-1, len(self.samples))) + + self.executeDDL2(cur) + cur.execute("select name from %sbarflys" % self.table_prefix) + rows = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, 0)) + self.assertEqual( + len(rows), + 0, + "cursor.fetchall should return an empty list if " + "a select query returns no rows", + ) + + finally: + con.close() + + def test_mixedfetch(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute("select name from %sbooze" % self.table_prefix) + rows1 = cur.fetchone() + rows23 = cur.fetchmany(2) + rows4 = cur.fetchone() + rows56 = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, 6)) + self.assertEqual( + len(rows23), 2, "fetchmany returned incorrect number of rows" + ) + self.assertEqual( + len(rows56), 2, "fetchall returned incorrect number of rows" + ) + + rows = [rows1[0]] + rows.extend([rows23[0][0], rows23[1][0]]) + rows.append(rows4[0]) + rows.extend([rows56[0][0], rows56[1][0]]) + rows.sort() + for i in range(0, len(self.samples)): + self.assertEqual( + rows[i], self.samples[i], "incorrect data retrieved or inserted" + ) + finally: + con.close() + + def help_nextset_setUp(self, cur): + """Should create a procedure called deleteme + that returns two result sets, first the + number of rows in booze then "name from booze" + """ + raise NotImplementedError("Helper not implemented") + # sql=""" + # create procedure deleteme as + # begin + # select count(*) from booze + # select name from booze + # end + # """ + # cur.execute(sql) + + def help_nextset_tearDown(self, cur): + "If cleaning up is needed after nextSetTest" + raise NotImplementedError("Helper not implemented") + # cur.execute("drop procedure deleteme") + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + if not hasattr(cur, "nextset"): + return + + try: + self.executeDDL1(cur) + sql = self._populate() + for sql in self._populate(): + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc("deleteme") + numberofrows = cur.fetchone() + assert numberofrows[0] == len(self.samples) + assert cur.nextset() + names = cur.fetchall() + assert len(names) == len(self.samples) + s = cur.nextset() + assert s == None, "No more return sets, should return None" + finally: + self.help_nextset_tearDown(cur) + + finally: + con.close() + + def test_nextset(self): + raise NotImplementedError("Drivers need to override this test") + + def test_arraysize(self): + # Not much here - rest of the tests for this are in test_fetchmany + con = self._connect() + try: + cur = con.cursor() + _failUnless( + self, hasattr(cur, "arraysize"), "cursor.arraysize must be defined" + ) + finally: + con.close() + + def test_setinputsizes(self): + con = self._connect() + try: + cur = con.cursor() + cur.setinputsizes((25,)) + self._paraminsert(cur) # Make sure cursor still works + finally: + con.close() + + def test_setoutputsize_basic(self): + # Basic test is to make sure setoutputsize doesn't blow up + con = self._connect() + try: + cur = con.cursor() + cur.setoutputsize(1000) + cur.setoutputsize(2000, 0) + self._paraminsert(cur) # Make sure the cursor still works + finally: + con.close() + + def test_setoutputsize(self): + # Real test for setoutputsize is driver dependant + raise NotImplementedError("Driver needed to override this test") + + def test_None(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + cur.execute("insert into %sbooze values (NULL)" % self.table_prefix) + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchall() + self.assertEqual(len(r), 1) + self.assertEqual(len(r[0]), 1) + self.assertEqual(r[0][0], None, "NULL value not returned as None") + finally: + con.close() + + def test_Date(self): + d1 = self.driver.Date(2002, 12, 25) + d2 = self.driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(d1),str(d2)) + + def test_Time(self): + t1 = self.driver.Time(13, 45, 30) + t2 = self.driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Timestamp(self): + t1 = self.driver.Timestamp(2002, 12, 25, 13, 45, 30) + t2 = self.driver.TimestampFromTicks( + time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)) + ) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Binary(self): + b = self.driver.Binary(str2bytes("Something")) + b = self.driver.Binary(str2bytes("")) + + def test_STRING(self): + _failUnless( + self, hasattr(self.driver, "STRING"), "module.STRING must be defined" + ) + + def test_BINARY(self): + _failUnless( + self, hasattr(self.driver, "BINARY"), "module.BINARY must be defined." + ) + + def test_NUMBER(self): + _failUnless( + self, hasattr(self.driver, "NUMBER"), "module.NUMBER must be defined." + ) + + def test_DATETIME(self): + _failUnless( + self, hasattr(self.driver, "DATETIME"), "module.DATETIME must be defined." + ) + + def test_ROWID(self): + _failUnless( + self, hasattr(self.driver, "ROWID"), "module.ROWID must be defined." + ) diff --git a/lib/adodbapi/test/is64bit.py b/lib/adodbapi/test/is64bit.py new file mode 100644 index 00000000..39834540 --- /dev/null +++ b/lib/adodbapi/test/is64bit.py @@ -0,0 +1,41 @@ +"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version""" +import sys + + +def Python(): + if sys.platform == "cli": # IronPython + import System + + return System.IntPtr.Size == 8 + else: + try: + return sys.maxsize > 2147483647 + except AttributeError: + return sys.maxint > 2147483647 + + +def os(): + import platform + + pm = platform.machine() + if pm != ".." and pm.endswith("64"): # recent Python (not Iron) + return True + else: + import os + + if "PROCESSOR_ARCHITEW6432" in os.environ: + return True # 32 bit program running on 64 bit Windows + try: + return os.environ["PROCESSOR_ARCHITECTURE"].endswith( + "64" + ) # 64 bit Windows 64 bit program + except IndexError: + pass # not Windows + try: + return "64" in platform.architecture()[0] # this often works in Linux + except: + return False # is an older version of Python, assume also an older os (best we can guess) + + +if __name__ == "__main__": + print("is64bit.Python() =", Python(), "is64bit.os() =", os()) diff --git a/lib/adodbapi/test/setuptestframework.py b/lib/adodbapi/test/setuptestframework.py new file mode 100644 index 00000000..fcaaf1ae --- /dev/null +++ b/lib/adodbapi/test/setuptestframework.py @@ -0,0 +1,134 @@ +#!/usr/bin/python2 +# Configure this in order to run the testcases. +"setuptestframework.py v 2.6.0.8" +import os +import shutil +import sys +import tempfile + +try: + OSErrors = (WindowsError, OSError) +except NameError: # not running on Windows + OSErrors = OSError + + +def maketemp(): + temphome = tempfile.gettempdir() + tempdir = os.path.join(temphome, "adodbapi_test") + try: + os.mkdir(tempdir) + except: + pass + return tempdir + + +def _cleanup_function(testfolder, mdb_name): + try: + os.unlink(os.path.join(testfolder, mdb_name)) + except: + pass # mdb database not present + try: + shutil.rmtree(testfolder) + print(" cleaned up folder", testfolder) + except: + pass # test package not present + + +def getcleanupfunction(): + return _cleanup_function + + +def find_ado_path(): + adoName = os.path.normpath(os.getcwd() + "/../../adodbapi.py") + adoPackage = os.path.dirname(adoName) + return adoPackage + + +# make a new package directory for the test copy of ado +def makeadopackage(testfolder): + adoName = os.path.normpath(os.getcwd() + "/../adodbapi.py") + adoPath = os.path.dirname(adoName) + if os.path.exists(adoName): + newpackage = os.path.join(testfolder, "adodbapi") + try: + os.mkdir(newpackage) + except OSErrors: + print( + "*Note: temporary adodbapi package already exists: may be two versions running?" + ) + for f in os.listdir(adoPath): + if f.endswith(".py"): + shutil.copy(os.path.join(adoPath, f), newpackage) + if sys.version_info >= (3, 0): # only when running Py3.n + save = sys.stdout + sys.stdout = None + from lib2to3.main import main # use 2to3 to make test package + + main("lib2to3.fixes", args=["-n", "-w", newpackage]) + sys.stdout = save + return testfolder + else: + raise EnvironmentError("Connot find source of adodbapi to test.") + + +def makemdb(testfolder, mdb_name): + # following setup code borrowed from pywin32 odbc test suite + # kindly contributed by Frank Millman. + import os + + _accessdatasource = os.path.join(testfolder, mdb_name) + if os.path.isfile(_accessdatasource): + print("using JET database=", _accessdatasource) + else: + try: + from win32com.client import constants + from win32com.client.gencache import EnsureDispatch + + win32 = True + except ImportError: # perhaps we are running IronPython + win32 = False # iron Python + try: + from System import Activator, Type + except: + pass + + # Create a brand-new database - what is the story with these? + dbe = None + for suffix in (".36", ".35", ".30"): + try: + if win32: + dbe = EnsureDispatch("DAO.DBEngine" + suffix) + else: + type = Type.GetTypeFromProgID("DAO.DBEngine" + suffix) + dbe = Activator.CreateInstance(type) + break + except: + pass + if dbe: + print(" ...Creating ACCESS db at " + _accessdatasource) + if win32: + workspace = dbe.Workspaces(0) + newdb = workspace.CreateDatabase( + _accessdatasource, constants.dbLangGeneral, constants.dbVersion40 + ) + else: + newdb = dbe.CreateDatabase( + _accessdatasource, ";LANGID=0x0409;CP=1252;COUNTRY=0" + ) + newdb.Close() + else: + print(" ...copying test ACCESS db to " + _accessdatasource) + mdbName = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "examples", "test.mdb") + ) + import shutil + + shutil.copy(mdbName, _accessdatasource) + + return _accessdatasource + + +if __name__ == "__main__": + print("Setting up a Jet database for server to use for remote testing...") + temp = maketemp() + makemdb(temp, "server_test.mdb") diff --git a/lib/adodbapi/test/test_adodbapi_dbapi20.py b/lib/adodbapi/test/test_adodbapi_dbapi20.py new file mode 100644 index 00000000..f8986484 --- /dev/null +++ b/lib/adodbapi/test/test_adodbapi_dbapi20.py @@ -0,0 +1,200 @@ +print("This module depends on the dbapi20 compliance tests created by Stuart Bishop") +print("(see db-sig mailing list history for info)") +import platform +import sys +import unittest + +import dbapi20 +import setuptestframework + +testfolder = setuptestframework.maketemp() +if "--package" in sys.argv: + pth = setuptestframework.makeadopackage(testfolder) + sys.argv.remove("--package") +else: + pth = setuptestframework.find_ado_path() +if pth not in sys.path: + sys.path.insert(1, pth) +# function to clean up the temporary folder -- calling program must run this function before exit. +cleanup = setuptestframework.getcleanupfunction() + +import adodbapi +import adodbapi.is64bit as is64bit + +db = adodbapi + +if "--verbose" in sys.argv: + db.adodbapi.verbose = 3 + +print(adodbapi.version) +print("Tested with dbapi20 %s" % dbapi20.__version__) + +try: + onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python +except: + onWindows = False + +node = platform.node() + +conn_kws = {} +host = "testsql.2txt.us,1430" # if None, will use macro to fill in node name +instance = r"%s\SQLEXPRESS" +conn_kws["name"] = "adotest" + +conn_kws["user"] = "adotestuser" # None implies Windows security +conn_kws["password"] = "Sq1234567" +# macro definition for keyword "security" using macro "auto_security" +conn_kws["macro_auto_security"] = "security" + +if host is None: + conn_kws["macro_getnode"] = ["host", instance] +else: + conn_kws["host"] = host + +conn_kws[ + "provider" +] = "Provider=MSOLEDBSQL;DataTypeCompatibility=80;MARS Connection=True;" +connStr = "%(provider)s; %(security)s; Initial Catalog=%(name)s;Data Source=%(host)s" + +if onWindows and node != "z-PC": + pass # default should make a local SQL Server connection +elif node == "xxx": # try Postgres database + _computername = "25.223.161.222" + _databasename = "adotest" + _username = "adotestuser" + _password = "12345678" + _driver = "PostgreSQL Unicode" + _provider = "" + connStr = "%sDriver={%s};Server=%s;Database=%s;uid=%s;pwd=%s;" % ( + _provider, + _driver, + _computername, + _databasename, + _username, + _password, + ) +elif node == "yyy": # ACCESS data base is known to fail some tests. + if is64bit.Python(): + driver = "Microsoft.ACE.OLEDB.12.0" + else: + driver = "Microsoft.Jet.OLEDB.4.0" + testmdb = setuptestframework.makemdb(testfolder) + connStr = r"Provider=%s;Data Source=%s" % (driver, testmdb) +else: # try a remote connection to an SQL server + conn_kws["proxy_host"] = "25.44.77.176" + import adodbapi.remote + + db = adodbapi.remote + +print("Using Connection String like=%s" % connStr) +print("Keywords=%s" % repr(conn_kws)) + + +class test_adodbapi(dbapi20.DatabaseAPI20Test): + driver = db + connect_args = (connStr,) + connect_kw_args = conn_kws + + def __init__(self, arg): + dbapi20.DatabaseAPI20Test.__init__(self, arg) + + def getTestMethodName(self): + return self.id().split(".")[-1] + + def setUp(self): + # Call superclass setUp In case this does something in the + # future + dbapi20.DatabaseAPI20Test.setUp(self) + if self.getTestMethodName() == "test_callproc": + con = self._connect() + engine = con.dbms_name + ## print('Using database Engine=%s' % engine) ## + if engine != "MS Jet": + sql = """ + create procedure templower + @theData varchar(50) + as + select lower(@theData) + """ + else: # Jet + sql = """ + create procedure templower + (theData varchar(50)) + as + select lower(theData); + """ + cur = con.cursor() + try: + cur.execute(sql) + con.commit() + except: + pass + cur.close() + con.close() + self.lower_func = "templower" + + def tearDown(self): + if self.getTestMethodName() == "test_callproc": + con = self._connect() + cur = con.cursor() + try: + cur.execute("drop procedure templower") + except: + pass + con.commit() + dbapi20.DatabaseAPI20Test.tearDown(self) + + def help_nextset_setUp(self, cur): + "Should create a procedure called deleteme" + 'that returns two result sets, first the number of rows in booze then "name from booze"' + sql = """ + create procedure deleteme as + begin + select count(*) from %sbooze + select name from %sbooze + end + """ % ( + self.table_prefix, + self.table_prefix, + ) + cur.execute(sql) + + def help_nextset_tearDown(self, cur): + "If cleaning up is needed after nextSetTest" + try: + cur.execute("drop procedure deleteme") + except: + pass + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + + stmts = [self.ddl1] + self._populate() + for sql in stmts: + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc("deleteme") + numberofrows = cur.fetchone() + assert numberofrows[0] == 6 + assert cur.nextset() + names = cur.fetchall() + assert len(names) == len(self.samples) + s = cur.nextset() + assert s == None, "No more return sets, should return None" + finally: + try: + self.help_nextset_tearDown(cur) + finally: + con.close() + + def test_setoutputsize(self): + pass + + +if __name__ == "__main__": + unittest.main() + cleanup(testfolder, None) diff --git a/lib/adodbapi/test/tryconnection.py b/lib/adodbapi/test/tryconnection.py new file mode 100644 index 00000000..9d3901a8 --- /dev/null +++ b/lib/adodbapi/test/tryconnection.py @@ -0,0 +1,33 @@ +remote = False # automatic testing of remote access has been removed here + + +def try_connection(verbose, *args, **kwargs): + import adodbapi + + dbconnect = adodbapi.connect + try: + s = dbconnect(*args, **kwargs) # connect to server + if verbose: + print("Connected to:", s.connection_string) + print("which has tables:", s.get_table_names()) + s.close() # thanks, it worked, goodbye + except adodbapi.DatabaseError as inst: + print(inst.args[0]) # should be the error message + print("***Failed getting connection using=", repr(args), repr(kwargs)) + return False, (args, kwargs), None + + print(" (successful)") + + return True, (args, kwargs, remote), dbconnect + + +def try_operation_with_expected_exception( + expected_exception_list, some_function, *args, **kwargs +): + try: + some_function(*args, **kwargs) + except expected_exception_list as e: + return True, e + except: + raise # an exception other than the expected occurred + return False, "The expected exception did not occur" diff --git a/lib/annotated_types/__init__.py b/lib/annotated_types/__init__.py new file mode 100644 index 00000000..2f989504 --- /dev/null +++ b/lib/annotated_types/__init__.py @@ -0,0 +1,396 @@ +import math +import sys +from dataclasses import dataclass +from datetime import timezone +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union + +if sys.version_info < (3, 8): + from typing_extensions import Protocol, runtime_checkable +else: + from typing import Protocol, runtime_checkable + +if sys.version_info < (3, 9): + from typing_extensions import Annotated, Literal +else: + from typing import Annotated, Literal + +if sys.version_info < (3, 10): + EllipsisType = type(Ellipsis) + KW_ONLY = {} + SLOTS = {} +else: + from types import EllipsisType + + KW_ONLY = {"kw_only": True} + SLOTS = {"slots": True} + + +__all__ = ( + 'BaseMetadata', + 'GroupedMetadata', + 'Gt', + 'Ge', + 'Lt', + 'Le', + 'Interval', + 'MultipleOf', + 'MinLen', + 'MaxLen', + 'Len', + 'Timezone', + 'Predicate', + 'LowerCase', + 'UpperCase', + 'IsDigits', + 'IsFinite', + 'IsNotFinite', + 'IsNan', + 'IsNotNan', + 'IsInfinite', + 'IsNotInfinite', + 'doc', + 'DocInfo', + '__version__', +) + +__version__ = '0.6.0' + + +T = TypeVar('T') + + +# arguments that start with __ are considered +# positional only +# see https://peps.python.org/pep-0484/#positional-only-arguments + + +class SupportsGt(Protocol): + def __gt__(self: T, __other: T) -> bool: + ... + + +class SupportsGe(Protocol): + def __ge__(self: T, __other: T) -> bool: + ... + + +class SupportsLt(Protocol): + def __lt__(self: T, __other: T) -> bool: + ... + + +class SupportsLe(Protocol): + def __le__(self: T, __other: T) -> bool: + ... + + +class SupportsMod(Protocol): + def __mod__(self: T, __other: T) -> T: + ... + + +class SupportsDiv(Protocol): + def __div__(self: T, __other: T) -> T: + ... + + +class BaseMetadata: + """Base class for all metadata. + + This exists mainly so that implementers + can do `isinstance(..., BaseMetadata)` while traversing field annotations. + """ + + __slots__ = () + + +@dataclass(frozen=True, **SLOTS) +class Gt(BaseMetadata): + """Gt(gt=x) implies that the value must be greater than x. + + It can be used with any type that supports the ``>`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + gt: SupportsGt + + +@dataclass(frozen=True, **SLOTS) +class Ge(BaseMetadata): + """Ge(ge=x) implies that the value must be greater than or equal to x. + + It can be used with any type that supports the ``>=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + ge: SupportsGe + + +@dataclass(frozen=True, **SLOTS) +class Lt(BaseMetadata): + """Lt(lt=x) implies that the value must be less than x. + + It can be used with any type that supports the ``<`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + lt: SupportsLt + + +@dataclass(frozen=True, **SLOTS) +class Le(BaseMetadata): + """Le(le=x) implies that the value must be less than or equal to x. + + It can be used with any type that supports the ``<=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + le: SupportsLe + + +@runtime_checkable +class GroupedMetadata(Protocol): + """A grouping of multiple BaseMetadata objects. + + `GroupedMetadata` on its own is not metadata and has no meaning. + All it the the constraint and metadata should be fully expressable + in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`. + + Concrete implementations should override `GroupedMetadata.__iter__()` + to add their own metadata. + For example: + + >>> @dataclass + >>> class Field(GroupedMetadata): + >>> gt: float | None = None + >>> description: str | None = None + ... + >>> def __iter__(self) -> Iterable[BaseMetadata]: + >>> if self.gt is not None: + >>> yield Gt(self.gt) + >>> if self.description is not None: + >>> yield Description(self.gt) + + Also see the implementation of `Interval` below for an example. + + Parsers should recognize this and unpack it so that it can be used + both with and without unpacking: + + - `Annotated[int, Field(...)]` (parser must unpack Field) + - `Annotated[int, *Field(...)]` (PEP-646) + """ # noqa: trailing-whitespace + + @property + def __is_annotated_types_grouped_metadata__(self) -> Literal[True]: + return True + + def __iter__(self) -> Iterator[BaseMetadata]: + ... + + if not TYPE_CHECKING: + __slots__ = () # allow subclasses to use slots + + def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None: + # Basic ABC like functionality without the complexity of an ABC + super().__init_subclass__(*args, **kwargs) + if cls.__iter__ is GroupedMetadata.__iter__: + raise TypeError("Can't subclass GroupedMetadata without implementing __iter__") + + def __iter__(self) -> Iterator[BaseMetadata]: # noqa: F811 + raise NotImplementedError # more helpful than "None has no attribute..." type errors + + +@dataclass(frozen=True, **KW_ONLY, **SLOTS) +class Interval(GroupedMetadata): + """Interval can express inclusive or exclusive bounds with a single object. + + It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which + are interpreted the same way as the single-bound constraints. + """ + + gt: Union[SupportsGt, None] = None + ge: Union[SupportsGe, None] = None + lt: Union[SupportsLt, None] = None + le: Union[SupportsLe, None] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack an Interval into zero or more single-bounds.""" + if self.gt is not None: + yield Gt(self.gt) + if self.ge is not None: + yield Ge(self.ge) + if self.lt is not None: + yield Lt(self.lt) + if self.le is not None: + yield Le(self.le) + + +@dataclass(frozen=True, **SLOTS) +class MultipleOf(BaseMetadata): + """MultipleOf(multiple_of=x) might be interpreted in two ways: + + 1. Python semantics, implying ``value % multiple_of == 0``, or + 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of`` + + We encourage users to be aware of these two common interpretations, + and libraries to carefully document which they implement. + """ + + multiple_of: Union[SupportsDiv, SupportsMod] + + +@dataclass(frozen=True, **SLOTS) +class MinLen(BaseMetadata): + """ + MinLen() implies minimum inclusive length, + e.g. ``len(value) >= min_length``. + """ + + min_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class MaxLen(BaseMetadata): + """ + MaxLen() implies maximum inclusive length, + e.g. ``len(value) <= max_length``. + """ + + max_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class Len(GroupedMetadata): + """ + Len() implies that ``min_length <= len(value) <= max_length``. + + Upper bound may be omitted or ``None`` to indicate no upper length bound. + """ + + min_length: Annotated[int, Ge(0)] = 0 + max_length: Optional[Annotated[int, Ge(0)]] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack a Len into zone or more single-bounds.""" + if self.min_length > 0: + yield MinLen(self.min_length) + if self.max_length is not None: + yield MaxLen(self.max_length) + + +@dataclass(frozen=True, **SLOTS) +class Timezone(BaseMetadata): + """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive). + + ``Annotated[datetime, Timezone(None)]`` must be a naive datetime. + ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be + tz-aware but any timezone is allowed. + + You may also pass a specific timezone string or timezone object such as + ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that + you only allow a specific timezone, though we note that this is often + a symptom of poor design. + """ + + tz: Union[str, timezone, EllipsisType, None] + + +@dataclass(frozen=True, **SLOTS) +class Predicate(BaseMetadata): + """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values. + + Users should prefer statically inspectable metadata, but if you need the full + power and flexibility of arbitrary runtime predicates... here it is. + + We provide a few predefined predicates for common string constraints: + ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and + ``IsDigit = Predicate(str.isdigit)``. Users are encouraged to use methods which + can be given special handling, and avoid indirection like ``lambda s: s.lower()``. + + Some libraries might have special logic to handle certain predicates, e.g. by + checking for `str.isdigit` and using its presence to both call custom logic to + enforce digit-only strings, and customise some generated external schema. + + We do not specify what behaviour should be expected for predicates that raise + an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently + skip invalid constraints, or statically raise an error; or it might try calling it + and then propogate or discard the resulting exception. + """ + + func: Callable[[Any], bool] + + +@dataclass +class Not: + func: Callable[[Any], bool] + + def __call__(self, __v: Any) -> bool: + return not self.func(__v) + + +_StrType = TypeVar("_StrType", bound=str) + +LowerCase = Annotated[_StrType, Predicate(str.islower)] +""" +Return True if the string is a lowercase string, False otherwise. + +A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string. +""" # noqa: E501 +UpperCase = Annotated[_StrType, Predicate(str.isupper)] +""" +Return True if the string is an uppercase string, False otherwise. + +A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string. +""" # noqa: E501 +IsDigits = Annotated[_StrType, Predicate(str.isdigit)] +""" +Return True if the string is a digit string, False otherwise. + +A string is a digit string if all characters in the string are digits and there is at least one character in the string. +""" # noqa: E501 +IsAscii = Annotated[_StrType, Predicate(str.isascii)] +""" +Return True if all characters in the string are ASCII, False otherwise. + +ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too. +""" + +_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex]) +IsFinite = Annotated[_NumericType, Predicate(math.isfinite)] +"""Return True if x is neither an infinity nor a NaN, and False otherwise.""" +IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))] +"""Return True if x is one of infinity or NaN, and False otherwise""" +IsNan = Annotated[_NumericType, Predicate(math.isnan)] +"""Return True if x is a NaN (not a number), and False otherwise.""" +IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))] +"""Return True if x is anything but NaN (not a number), and False otherwise.""" +IsInfinite = Annotated[_NumericType, Predicate(math.isinf)] +"""Return True if x is a positive or negative infinity, and False otherwise.""" +IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))] +"""Return True if x is neither a positive or negative infinity, and False otherwise.""" + +try: + from typing_extensions import DocInfo, doc # type: ignore [attr-defined] +except ImportError: + + @dataclass(frozen=True, **SLOTS) + class DocInfo: # type: ignore [no-redef] + """ " + The return value of doc(), mainly to be used by tools that want to extract the + Annotated documentation at runtime. + """ + + documentation: str + """The documentation string passed to doc().""" + + def doc( + documentation: str, + ) -> DocInfo: + """ + Add documentation to a type annotation inside of Annotated. + + For example: + + >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ... + """ + return DocInfo(documentation) diff --git a/lib/jaraco/classes/__init__.py b/lib/annotated_types/py.typed similarity index 100% rename from lib/jaraco/classes/__init__.py rename to lib/annotated_types/py.typed diff --git a/lib/annotated_types/test_cases.py b/lib/annotated_types/test_cases.py new file mode 100644 index 00000000..f54df700 --- /dev/null +++ b/lib/annotated_types/test_cases.py @@ -0,0 +1,147 @@ +import math +import sys +from datetime import date, datetime, timedelta, timezone +from decimal import Decimal +from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple + +if sys.version_info < (3, 9): + from typing_extensions import Annotated +else: + from typing import Annotated + +import annotated_types as at + + +class Case(NamedTuple): + """ + A test case for `annotated_types`. + """ + + annotation: Any + valid_cases: Iterable[Any] + invalid_cases: Iterable[Any] + + +def cases() -> Iterable[Case]: + # Gt, Ge, Lt, Le + yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Gt(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(date(2000, 1, 1))], + [date(2000, 1, 2), date(2000, 1, 3)], + [date(2000, 1, 1), date(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(Decimal('1.123'))], + [Decimal('1.1231'), Decimal('123')], + [Decimal('1.123'), Decimal('0')], + ) + + yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1)) + yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Ge(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(1998, 1, 1), datetime(1999, 12, 31)], + ) + + yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4)) + yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Lt(datetime(2000, 1, 1))], + [datetime(1999, 12, 31), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000)) + yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Le(datetime(2000, 1, 1))], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + # Interval + yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1)) + yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1)) + yield Case( + Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(2000, 1, 4)], + ) + + yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4)) + yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1)) + + # lengths + + yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + + yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + + yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10)) + yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234')) + + yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}]) + yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4})) + yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4))) + + # Timezone + + yield Case( + Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)] + ) + yield Case( + Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)] + ) + yield Case( + Annotated[datetime, at.Timezone(timezone.utc)], + [datetime(2000, 1, 1, tzinfo=timezone.utc)], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + yield Case( + Annotated[datetime, at.Timezone('Europe/London')], + [datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + + # predicate types + + yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom']) + yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC']) + yield Case(at.IsDigits[str], ['123'], ['', 'ab', 'a1b2']) + yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀']) + + yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5]) + + yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf]) + yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23]) + yield Case(at.IsNan[float], [math.nan], [1.23, math.inf]) + yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan]) + yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23]) + yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf]) + + # check stacked predicates + yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan]) + + # doc + yield Case(Annotated[int, at.doc("A number")], [1, 2], []) + + # custom GroupedMetadata + class MyCustomGroupedMetadata(at.GroupedMetadata): + def __iter__(self) -> Iterator[at.Predicate]: + yield at.Predicate(lambda x: float(x).is_integer()) + + yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5]) diff --git a/lib/arrow/_version.py b/lib/arrow/_version.py index 10aa336c..67bc602a 100644 --- a/lib/arrow/_version.py +++ b/lib/arrow/_version.py @@ -1 +1 @@ -__version__ = "1.2.3" +__version__ = "1.3.0" diff --git a/lib/arrow/arrow.py b/lib/arrow/arrow.py index 1ede107f..8d329efd 100644 --- a/lib/arrow/arrow.py +++ b/lib/arrow/arrow.py @@ -168,9 +168,9 @@ class Arrow: isinstance(tzinfo, dt_tzinfo) and hasattr(tzinfo, "localize") and hasattr(tzinfo, "zone") - and tzinfo.zone # type: ignore[attr-defined] + and tzinfo.zone ): - tzinfo = parser.TzinfoParser.parse(tzinfo.zone) # type: ignore[attr-defined] + tzinfo = parser.TzinfoParser.parse(tzinfo.zone) elif isinstance(tzinfo, str): tzinfo = parser.TzinfoParser.parse(tzinfo) @@ -495,7 +495,7 @@ class Arrow: yield current values = [getattr(current, f) for f in cls._ATTRS] - current = cls(*values, tzinfo=tzinfo).shift( # type: ignore + current = cls(*values, tzinfo=tzinfo).shift( # type: ignore[misc] **{frame_relative: relative_steps} ) @@ -578,7 +578,7 @@ class Arrow: for _ in range(3 - len(values)): values.append(1) - floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore + floor = self.__class__(*values, tzinfo=self.tzinfo) # type: ignore[misc] if frame_absolute == "week": # if week_start is greater than self.isoweekday() go back one week by setting delta = 7 @@ -792,7 +792,6 @@ class Arrow: return self._datetime.isoformat() def __format__(self, formatstr: str) -> str: - if len(formatstr) > 0: return self.format(formatstr) @@ -804,7 +803,6 @@ class Arrow: # attributes and properties def __getattr__(self, name: str) -> int: - if name == "week": return self.isocalendar()[1] @@ -965,7 +963,6 @@ class Arrow: absolute_kwargs = {} for key, value in kwargs.items(): - if key in self._ATTRS: absolute_kwargs[key] = value elif key in ["week", "quarter"]: @@ -1022,7 +1019,6 @@ class Arrow: additional_attrs = ["weeks", "quarters", "weekday"] for key, value in kwargs.items(): - if key in self._ATTRS_PLURAL or key in additional_attrs: relative_kwargs[key] = value else: @@ -1259,11 +1255,10 @@ class Arrow: ) if trunc(abs(delta)) != 1: - granularity += "s" # type: ignore + granularity += "s" # type: ignore[assignment] return locale.describe(granularity, delta, only_distance=only_distance) else: - if not granularity: raise ValueError( "Empty granularity list provided. " @@ -1314,7 +1309,7 @@ class Arrow: def dehumanize(self, input_string: str, locale: str = "en_us") -> "Arrow": """Returns a new :class:`Arrow ` object, that represents - the time difference relative to the attrbiutes of the + the time difference relative to the attributes of the :class:`Arrow ` object. :param timestring: a ``str`` representing a humanized relative time. @@ -1367,7 +1362,6 @@ class Arrow: # Search input string for each time unit within locale for unit, unit_object in locale_obj.timeframes.items(): - # Need to check the type of unit_object to create the correct dictionary if isinstance(unit_object, Mapping): strings_to_search = unit_object @@ -1378,7 +1372,6 @@ class Arrow: # Needs to cycle all through strings as some locales have strings that # could overlap in a regex match, since input validation isn't being performed. for time_delta, time_string in strings_to_search.items(): - # Replace {0} with regex \d representing digits search_string = str(time_string) search_string = search_string.format(r"\d+") @@ -1419,7 +1412,7 @@ class Arrow: # Assert error if string does not modify any units if not any([True for k, v in unit_visited.items() if v]): raise ValueError( - "Input string not valid. Note: Some locales do not support the week granulairty in Arrow. " + "Input string not valid. Note: Some locales do not support the week granularity in Arrow. " "If you are attempting to use the week granularity on an unsupported locale, this could be the cause of this error." ) @@ -1718,7 +1711,6 @@ class Arrow: # math def __add__(self, other: Any) -> "Arrow": - if isinstance(other, (timedelta, relativedelta)): return self.fromdatetime(self._datetime + other, self._datetime.tzinfo) @@ -1736,7 +1728,6 @@ class Arrow: pass # pragma: no cover def __sub__(self, other: Any) -> Union[timedelta, "Arrow"]: - if isinstance(other, (timedelta, relativedelta)): return self.fromdatetime(self._datetime - other, self._datetime.tzinfo) @@ -1749,7 +1740,6 @@ class Arrow: return NotImplemented def __rsub__(self, other: Any) -> timedelta: - if isinstance(other, dt_datetime): return other - self._datetime @@ -1758,42 +1748,36 @@ class Arrow: # comparisons def __eq__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return False return self._datetime == self._get_datetime(other) def __ne__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return True return not self.__eq__(other) def __gt__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return NotImplemented return self._datetime > self._get_datetime(other) def __ge__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return NotImplemented return self._datetime >= self._get_datetime(other) def __lt__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return NotImplemented return self._datetime < self._get_datetime(other) def __le__(self, other: Any) -> bool: - if not isinstance(other, (Arrow, dt_datetime)): return NotImplemented @@ -1865,7 +1849,6 @@ class Arrow: def _get_iteration_params(cls, end: Any, limit: Optional[int]) -> Tuple[Any, int]: """Sets default end and limit values for range method.""" if end is None: - if limit is None: raise ValueError("One of 'end' or 'limit' is required.") diff --git a/lib/arrow/factory.py b/lib/arrow/factory.py index aad4af8b..f35085f1 100644 --- a/lib/arrow/factory.py +++ b/lib/arrow/factory.py @@ -267,11 +267,9 @@ class ArrowFactory: raise TypeError(f"Cannot parse single argument of type {type(arg)!r}.") elif arg_count == 2: - arg_1, arg_2 = args[0], args[1] if isinstance(arg_1, datetime): - # (datetime, tzinfo/str) -> fromdatetime @ tzinfo if isinstance(arg_2, (dt_tzinfo, str)): return self.type.fromdatetime(arg_1, tzinfo=arg_2) @@ -281,7 +279,6 @@ class ArrowFactory: ) elif isinstance(arg_1, date): - # (date, tzinfo/str) -> fromdate @ tzinfo if isinstance(arg_2, (dt_tzinfo, str)): return self.type.fromdate(arg_1, tzinfo=arg_2) diff --git a/lib/arrow/formatter.py b/lib/arrow/formatter.py index 728bea1a..d45f7153 100644 --- a/lib/arrow/formatter.py +++ b/lib/arrow/formatter.py @@ -29,7 +29,6 @@ FORMAT_W3C: Final[str] = "YYYY-MM-DD HH:mm:ssZZ" class DateTimeFormatter: - # This pattern matches characters enclosed in square brackets are matched as # an atomic group. For more info on atomic groups and how to they are # emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578 @@ -41,18 +40,15 @@ class DateTimeFormatter: locale: locales.Locale def __init__(self, locale: str = DEFAULT_LOCALE) -> None: - self.locale = locales.get_locale(locale) def format(cls, dt: datetime, fmt: str) -> str: - # FIXME: _format_token() is nullable return cls._FORMAT_RE.sub( lambda m: cast(str, cls._format_token(dt, m.group(0))), fmt ) def _format_token(self, dt: datetime, token: Optional[str]) -> Optional[str]: - if token and token.startswith("[") and token.endswith("]"): return token[1:-1] diff --git a/lib/arrow/locales.py b/lib/arrow/locales.py index 3627497f..34b2a098 100644 --- a/lib/arrow/locales.py +++ b/lib/arrow/locales.py @@ -129,7 +129,6 @@ class Locale: _locale_map[locale_name.lower().replace("_", "-")] = cls def __init__(self) -> None: - self._month_name_to_ordinal = None def describe( @@ -174,7 +173,7 @@ class Locale: # Needed to determine the correct relative string to use timeframe_value = 0 - for _unit_name, unit_value in timeframes: + for _, unit_value in timeframes: if trunc(unit_value) != 0: timeframe_value = trunc(unit_value) break @@ -285,7 +284,6 @@ class Locale: timeframe: TimeFrameLiteral, delta: Union[float, int], ) -> str: - if timeframe == "now": return humanized @@ -425,7 +423,7 @@ class ItalianLocale(Locale): "hours": "{0} ore", "day": "un giorno", "days": "{0} giorni", - "week": "una settimana,", + "week": "una settimana", "weeks": "{0} settimane", "month": "un mese", "months": "{0} mesi", @@ -867,14 +865,16 @@ class FinnishLocale(Locale): timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { "now": "juuri nyt", - "second": "sekunti", - "seconds": {"past": "{0} muutama sekunti", "future": "{0} muutaman sekunnin"}, + "second": {"past": "sekunti", "future": "sekunnin"}, + "seconds": {"past": "{0} sekuntia", "future": "{0} sekunnin"}, "minute": {"past": "minuutti", "future": "minuutin"}, "minutes": {"past": "{0} minuuttia", "future": "{0} minuutin"}, "hour": {"past": "tunti", "future": "tunnin"}, "hours": {"past": "{0} tuntia", "future": "{0} tunnin"}, - "day": "päivä", + "day": {"past": "päivä", "future": "päivän"}, "days": {"past": "{0} päivää", "future": "{0} päivän"}, + "week": {"past": "viikko", "future": "viikon"}, + "weeks": {"past": "{0} viikkoa", "future": "{0} viikon"}, "month": {"past": "kuukausi", "future": "kuukauden"}, "months": {"past": "{0} kuukautta", "future": "{0} kuukauden"}, "year": {"past": "vuosi", "future": "vuoden"}, @@ -1887,7 +1887,7 @@ class GermanBaseLocale(Locale): future = "in {0}" and_word = "und" - timeframes = { + timeframes: ClassVar[Dict[TimeFrameLiteral, str]] = { "now": "gerade eben", "second": "einer Sekunde", "seconds": "{0} Sekunden", @@ -1982,7 +1982,9 @@ class GermanBaseLocale(Locale): return super().describe(timeframe, delta, only_distance) # German uses a different case without 'in' or 'ago' - humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + humanized: str = self.timeframes_only_distance[timeframe].format( + trunc(abs(delta)) + ) return humanized @@ -2547,6 +2549,8 @@ class ArabicLocale(Locale): "hours": {"2": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"}, "day": "يوم", "days": {"2": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"}, + "week": "اسبوع", + "weeks": {"2": "اسبوعين", "ten": "{0} أسابيع", "higher": "{0} اسبوع"}, "month": "شهر", "months": {"2": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"}, "year": "سنة", @@ -3709,6 +3713,8 @@ class HungarianLocale(Locale): "hours": {"past": "{0} órával", "future": "{0} óra"}, "day": {"past": "egy nappal", "future": "egy nap"}, "days": {"past": "{0} nappal", "future": "{0} nap"}, + "week": {"past": "egy héttel", "future": "egy hét"}, + "weeks": {"past": "{0} héttel", "future": "{0} hét"}, "month": {"past": "egy hónappal", "future": "egy hónap"}, "months": {"past": "{0} hónappal", "future": "{0} hónap"}, "year": {"past": "egy évvel", "future": "egy év"}, @@ -3934,7 +3940,6 @@ class ThaiLocale(Locale): class LaotianLocale(Locale): - names = ["lo", "lo-la"] past = "{0} àºà»ˆàº­àº™àº«àº™à»‰àº²àº™àºµà»‰" @@ -4119,6 +4124,7 @@ class BengaliLocale(Locale): return f"{n}রà§à¦¥" if n == 6: return f"{n}ষà§à¦ " + return "" class RomanshLocale(Locale): @@ -4137,6 +4143,8 @@ class RomanshLocale(Locale): "hours": "{0} ura", "day": "in di", "days": "{0} dis", + "week": "in'emna", + "weeks": "{0} emnas", "month": "in mais", "months": "{0} mais", "year": "in onn", @@ -5399,7 +5407,7 @@ class LuxembourgishLocale(Locale): future = "an {0}" and_word = "an" - timeframes = { + timeframes: ClassVar[Dict[TimeFrameLiteral, str]] = { "now": "just elo", "second": "enger Sekonn", "seconds": "{0} Sekonnen", @@ -5487,7 +5495,9 @@ class LuxembourgishLocale(Locale): return super().describe(timeframe, delta, only_distance) # Luxembourgish uses a different case without 'in' or 'ago' - humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + humanized: str = self.timeframes_only_distance[timeframe].format( + trunc(abs(delta)) + ) return humanized diff --git a/lib/arrow/parser.py b/lib/arrow/parser.py index e95d78b0..645e3da7 100644 --- a/lib/arrow/parser.py +++ b/lib/arrow/parser.py @@ -159,7 +159,6 @@ class DateTimeParser: _input_re_map: Dict[_FORMAT_TYPE, Pattern[str]] def __init__(self, locale: str = DEFAULT_LOCALE, cache_size: int = 0) -> None: - self.locale = locales.get_locale(locale) self._input_re_map = self._BASE_INPUT_RE_MAP.copy() self._input_re_map.update( @@ -196,7 +195,6 @@ class DateTimeParser: def parse_iso( self, datetime_string: str, normalize_whitespace: bool = False ) -> datetime: - if normalize_whitespace: datetime_string = re.sub(r"\s+", " ", datetime_string.strip()) @@ -236,13 +234,14 @@ class DateTimeParser: ] if has_time: - if has_space_divider: date_string, time_string = datetime_string.split(" ", 1) else: date_string, time_string = datetime_string.split("T", 1) - time_parts = re.split(r"[\+\-Z]", time_string, 1, re.IGNORECASE) + time_parts = re.split( + r"[\+\-Z]", time_string, maxsplit=1, flags=re.IGNORECASE + ) time_components: Optional[Match[str]] = self._TIME_RE.match(time_parts[0]) @@ -303,7 +302,6 @@ class DateTimeParser: fmt: Union[List[str], str], normalize_whitespace: bool = False, ) -> datetime: - if normalize_whitespace: datetime_string = re.sub(r"\s+", " ", datetime_string) @@ -341,12 +339,11 @@ class DateTimeParser: f"Unable to find a match group for the specified token {token!r}." ) - self._parse_token(token, value, parts) # type: ignore + self._parse_token(token, value, parts) # type: ignore[arg-type] return self._build_datetime(parts) def _generate_pattern_re(self, fmt: str) -> Tuple[List[_FORMAT_TYPE], Pattern[str]]: - # fmt is a string of tokens like 'YYYY-MM-DD' # we construct a new string by replacing each # token by its pattern: @@ -498,7 +495,6 @@ class DateTimeParser: value: Any, parts: _Parts, ) -> None: - if token == "YYYY": parts["year"] = int(value) @@ -508,7 +504,7 @@ class DateTimeParser: elif token in ["MMMM", "MMM"]: # FIXME: month_number() is nullable - parts["month"] = self.locale.month_number(value.lower()) # type: ignore + parts["month"] = self.locale.month_number(value.lower()) # type: ignore[typeddict-item] elif token in ["MM", "M"]: parts["month"] = int(value) @@ -588,7 +584,6 @@ class DateTimeParser: weekdate = parts.get("weekdate") if weekdate is not None: - year, week = int(weekdate[0]), int(weekdate[1]) if weekdate[2] is not None: @@ -712,7 +707,6 @@ class DateTimeParser: ) def _parse_multiformat(self, string: str, formats: Iterable[str]) -> datetime: - _datetime: Optional[datetime] = None for fmt in formats: @@ -740,12 +734,11 @@ class DateTimeParser: class TzinfoParser: _TZINFO_RE: ClassVar[Pattern[str]] = re.compile( - r"^([\+\-])?(\d{2})(?:\:?(\d{2}))?$" + r"^(?:\(UTC)*([\+\-])?(\d{2})(?:\:?(\d{2}))?" ) @classmethod def parse(cls, tzinfo_string: str) -> dt_tzinfo: - tzinfo: Optional[dt_tzinfo] = None if tzinfo_string == "local": @@ -755,7 +748,6 @@ class TzinfoParser: tzinfo = tz.tzutc() else: - iso_match = cls._TZINFO_RE.match(tzinfo_string) if iso_match: diff --git a/lib/autocommand/autoasync.py b/lib/autocommand/autoasync.py index 3c8ebdcf..688f7e05 100644 --- a/lib/autocommand/autoasync.py +++ b/lib/autocommand/autoasync.py @@ -20,7 +20,7 @@ from functools import wraps from inspect import signature -def _launch_forever_coro(coro, args, kwargs, loop): +async def _run_forever_coro(coro, args, kwargs, loop): ''' This helper function launches an async main function that was tagged with forever=True. There are two possibilities: @@ -48,7 +48,7 @@ def _launch_forever_coro(coro, args, kwargs, loop): # forever=True feature from autoasync at some point in the future. thing = coro(*args, **kwargs) if iscoroutine(thing): - loop.create_task(thing) + await thing def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False): @@ -127,7 +127,9 @@ def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False): args, kwargs = bound_args.args, bound_args.kwargs if forever: - _launch_forever_coro(coro, args, kwargs, local_loop) + local_loop.create_task(_run_forever_coro( + coro, args, kwargs, local_loop + )) local_loop.run_forever() else: return local_loop.run_until_complete(coro(*args, **kwargs)) diff --git a/lib/backports/functools_lru_cache.py b/lib/backports/functools_lru_cache.py index 1b83fe99..e372cff3 100644 --- a/lib/backports/functools_lru_cache.py +++ b/lib/backports/functools_lru_cache.py @@ -26,6 +26,12 @@ def update_wrapper( class _HashedSeq(list): + """This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + __slots__ = 'hashvalue' def __init__(self, tup, hash=hash): @@ -41,45 +47,57 @@ def _make_key( kwds, typed, kwd_mark=(object(),), - fasttypes=set([int, str, frozenset, type(None)]), - sorted=sorted, + fasttypes={int, str}, tuple=tuple, type=type, len=len, ): - 'Make a cache key from optionally typed positional and keyword arguments' + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + # All of code below relies on kwds preserving the order input by the user. + # Formerly, we sorted() the kwds before looping. The new way is *much* + # faster; however, it means that f(x=1, y=2) will now be treated as a + # distinct call from f(y=2, x=1) which will be cached separately. key = args if kwds: - sorted_items = sorted(kwds.items()) key += kwd_mark - for item in sorted_items: + for item in kwds.items(): key += item if typed: key += tuple(type(v) for v in args) if kwds: - key += tuple(type(v) for k, v in sorted_items) + key += tuple(type(v) for v in kwds.values()) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) -def lru_cache(maxsize=100, typed=False): # noqa: C901 +def lru_cache(maxsize=128, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. + For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as + distinct calls with distinct results. Some types such as str and int may + be cached separately even when typed is false. Arguments to the cached function must be hashable. - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. - See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) """ @@ -88,108 +106,138 @@ def lru_cache(maxsize=100, typed=False): # noqa: C901 # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). + if isinstance(maxsize, int): + # Negative maxsize is treated as 0 + if maxsize < 0: + maxsize = 0 + elif callable(maxsize) and isinstance(typed, bool): + # The user_function was passed in directly via the maxsize argument + user_function, maxsize = maxsize, 128 + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} + return update_wrapper(wrapper, user_function) + elif maxsize is not None: + raise TypeError('Expected first argument to be an integer, a callable, or None') + def decorating_function(user_function): - cache = dict() - stats = [0, 0] # make statistics updateable non-locally - HITS, MISSES = 0, 1 # names for the stats fields - make_key = _make_key - cache_get = cache.get # bound method to lookup key or return None - _len = len # localize the global len() function - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - nonlocal_root = [root] # make updateable non-locally - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - if maxsize == 0: - - def wrapper(*args, **kwds): - # no caching, just do a statistics update after a successful call - result = user_function(*args, **kwds) - stats[MISSES] += 1 - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # simple caching without ordering or size limit - key = make_key(args, kwds, typed) - result = cache_get( - key, root - ) # root used here as a unique not-found sentinel - if result is not root: - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - cache[key] = result - stats[MISSES] += 1 - return result - - else: - - def wrapper(*args, **kwds): - # size limited caching that tracks accesses by recency - key = make_key(args, kwds, typed) if kwds or typed else args - with lock: - link = cache_get(key) - if link is not None: - # record recent use of the key by moving it - # to the front of the list - (root,) = nonlocal_root - link_prev, link_next, key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - with lock: - (root,) = nonlocal_root - if key in cache: - # getting here means that this same key was added to the - # cache while the lock was released. since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif _len(cache) >= maxsize: - # use the old root to store the new key and result - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # empty the oldest link and make it the new root - root = nonlocal_root[0] = oldroot[NEXT] - oldkey = root[KEY] - root[KEY] = root[RESULT] = None - # now update the cache dictionary for the new links - del cache[oldkey] - cache[key] = oldroot - else: - # put result in a new link at the front of the list - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - stats[MISSES] += 1 - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) - - def cache_clear(): - """Clear the cache and cache statistics""" - with lock: - cache.clear() - root = nonlocal_root[0] - root[:] = [root, root, None, None] - stats[:] = [0, 0] - - wrapper.__wrapped__ = user_function - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear + wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) + wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} return update_wrapper(wrapper, user_function) return decorating_function + + +def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update + nonlocal misses + misses += 1 + result = user_function(*args, **kwds) + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + cache[key] = result + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = cache_len() >= maxsize + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, cache_len()) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper diff --git a/lib/bleach/__init__.py b/lib/bleach/__init__.py index 4e87eb80..12e93b4d 100644 --- a/lib/bleach/__init__.py +++ b/lib/bleach/__init__.py @@ -11,9 +11,9 @@ from bleach.sanitizer import ( # yyyymmdd -__releasedate__ = "20230123" +__releasedate__ = "20231006" # x.y.z or x.y.z.dev0 -- semver -__version__ = "6.0.0" +__version__ = "6.1.0" __all__ = ["clean", "linkify"] diff --git a/lib/bleach/html5lib_shim.py b/lib/bleach/html5lib_shim.py index aa5189b1..ca1cc8c8 100644 --- a/lib/bleach/html5lib_shim.py +++ b/lib/bleach/html5lib_shim.py @@ -395,10 +395,17 @@ class BleachHTMLTokenizer(HTMLTokenizer): # followed by a series of characters. It's treated as a tag # name that abruptly ends, but we should treat that like # character data - yield { - "type": TAG_TOKEN_TYPE_CHARACTERS, - "data": "<" + self.currentToken["name"], - } + yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()} + elif last_error_token["data"] in ( + "eof-in-attribute-name", + "eof-in-attribute-value-no-quotes", + ): + # Handle the case where the text being parsed ends with < + # followed by a series of characters and then space and then + # more characters. It's treated as a tag name followed by an + # attribute that abruptly ends, but we should treat that like + # character data. + yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()} else: yield last_error_token diff --git a/lib/bleach/linkifier.py b/lib/bleach/linkifier.py index 679d7ead..8fcefb2c 100644 --- a/lib/bleach/linkifier.py +++ b/lib/bleach/linkifier.py @@ -45,8 +45,8 @@ def build_url_re(tlds=TLDS, protocols=html5lib_shim.allowed_protocols): r"""\(* # Match any opening parentheses. \b(?"]*)? - # /path/zz (excluding "unsafe" chars from RFC 1738, + (?:[/?][^\s\{{\}}\|\\\^`<>"]*)? + # /path/zz (excluding "unsafe" chars from RFC 3986, # except for # and ~, which happen in practice) """.format( "|".join(sorted(protocols)), "|".join(sorted(tlds)) @@ -591,7 +591,7 @@ class LinkifyFilter(html5lib_shim.Filter): in_a = False token_buffer = [] else: - token_buffer.append(token) + token_buffer.extend(list(self.extract_entities(token))) continue if token["type"] in ["StartTag", "EmptyTag"]: diff --git a/lib/bs4/__init__.py b/lib/bs4/__init__.py index 3d2ab09a..d8ad5e1d 100644 --- a/lib/bs4/__init__.py +++ b/lib/bs4/__init__.py @@ -15,8 +15,8 @@ documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ """ __author__ = "Leonard Richardson (leonardr@segfault.org)" -__version__ = "4.12.2" -__copyright__ = "Copyright (c) 2004-2023 Leonard Richardson" +__version__ = "4.12.3" +__copyright__ = "Copyright (c) 2004-2024 Leonard Richardson" # Use of this source code is governed by the MIT license. __license__ = "MIT" diff --git a/lib/bs4/builder/__init__.py b/lib/bs4/builder/__init__.py index 2e397458..ffb31fc2 100644 --- a/lib/bs4/builder/__init__.py +++ b/lib/bs4/builder/__init__.py @@ -514,15 +514,19 @@ class DetectsXMLParsedAsHTML(object): XML_PREFIX_B = b' foo bar baz diff --git a/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase new file mode 100644 index 00000000..4828f8a4 --- /dev/null +++ b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase @@ -0,0 +1 @@ + ÿÿ ÿ tet>< \ No newline at end of file diff --git a/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase new file mode 100644 index 00000000..107da539 Binary files /dev/null and b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase differ diff --git a/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase new file mode 100644 index 00000000..b60a250c --- /dev/null +++ b/lib/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase @@ -0,0 +1 @@ +- ÿÿ