[CalendarServer-changes] [12088] twext/trunk/twext/enterprise

source_changes at macosforge.org source_changes at macosforge.org
Wed Mar 12 11:24:48 PDT 2014


Revision: 12088
          http://trac.calendarserver.org//changeset/12088
Author:   wsanchez at apple.com
Date:     2013-12-13 15:59:41 -0800 (Fri, 13 Dec 2013)
Log Message:
-----------
lint, style

Modified Paths:
--------------
    twext/trunk/twext/enterprise/adbapi2.py
    twext/trunk/twext/enterprise/dal/model.py
    twext/trunk/twext/enterprise/dal/parseschema.py
    twext/trunk/twext/enterprise/dal/record.py
    twext/trunk/twext/enterprise/dal/syntax.py
    twext/trunk/twext/enterprise/dal/test/test_parseschema.py
    twext/trunk/twext/enterprise/dal/test/test_record.py
    twext/trunk/twext/enterprise/dal/test/test_sqlsyntax.py
    twext/trunk/twext/enterprise/fixtures.py
    twext/trunk/twext/enterprise/ienterprise.py
    twext/trunk/twext/enterprise/locking.py
    twext/trunk/twext/enterprise/queue.py
    twext/trunk/twext/enterprise/test/test_adbapi2.py
    twext/trunk/twext/enterprise/test/test_fixtures.py
    twext/trunk/twext/enterprise/test/test_locking.py
    twext/trunk/twext/enterprise/test/test_queue.py
    twext/trunk/twext/enterprise/test/test_util.py
    twext/trunk/twext/enterprise/util.py

Modified: twext/trunk/twext/enterprise/adbapi2.py
===================================================================
--- twext/trunk/twext/enterprise/adbapi2.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/adbapi2.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -69,7 +69,7 @@
 # argument to the ConnectionPool but it should probably be determined
 # automatically from the database binding.
 
-DEFAULT_PARAM_STYLE = 'pyformat'
+DEFAULT_PARAM_STYLE = "pyformat"
 DEFAULT_DIALECT = POSTGRES_DIALECT
 
 
@@ -185,7 +185,7 @@
         dedicated to and associated with the current cursor.  It executes the
         given SQL, re-connecting first if necessary, re-cycling the old
         connection if necessary, and then, if there are results from the
-        statement (as determined by the DB-API 2.0 'description' attribute) it
+        statement (as determined by the DB-API 2.0 "description" attribute) it
         will fetch all the rows and return them, leaving them to be relayed to
         L{_ConnectedTxn.execSQL} via the L{ThreadHolder}.
 
@@ -224,18 +224,21 @@
             were returned by the executed statement.
         """
         wasFirst = self._first
+
         # If this is the first time this cursor has been used in this
         # transaction, remember that, but mark it as now used.
         self._first = False
         if args is None:
             args = []
+
         # Note: as of this writing, derived parameters are only used to support
         # cx_Oracle's "host variable" feature (i.e. cursor.var()), and creating
         # a host variable will never be a connection-oriented error (a
         # disconnected cursor can happily create variables of all types).
-        # However, this may need to move into the 'try' below if other database
+        # However, this may need to move into the "try" below if other database
         # features need to compute database arguments based on runtime state.
         derived = _deriveParameters(self._cursor, args)
+
         try:
             self._cursor.execute(sql, args)
         except:
@@ -263,7 +266,7 @@
                     # predictable across different databases, or even different
                     # bindings to the same database, so we have to do a
                     # catch-all here.  While I can't imagine another type of
-                    # error at the moment, bare 'except:'s are notorious for
+                    # error at the moment, bare C{except:}s are notorious for
                     # making debugging surprising error conditions very
                     # difficult, so let's make sure that the error is logged
                     # just in case.
@@ -280,14 +283,16 @@
                 self._cursor     = self._connection.cursor()
 
                 # Note that although this method is being invoked recursively,
-                # the '_first' flag is re-set at the very top, so we will _not_
+                # the "_first" flag is re-set at the very top, so we will _not_
                 # be re-entering it more than once.
                 result = self._reallyExecSQL(sql, args, raiseOnZeroRowCount)
                 return result
             else:
                 raise
+
         if derived is not None:
             _deriveQueryEnded(self._cursor, derived)
+
         if self._cursor.description:
             # see test_raiseOnZeroRowCountWithUnreliableRowCount
             rows = self._cursor.fetchall()
@@ -333,6 +338,7 @@
         """
         if not self._completed:
             self._completed = "ended"
+
             def reallySomething():
                 """
                 Do the database work and set appropriate flags.  Executed in
@@ -342,6 +348,7 @@
                     return
                 really()
                 self._first = True
+
             result = self._holder.submit(reallySomething)
             self._pool._repoolAfter(self, result)
             return result
@@ -383,6 +390,7 @@
             if self._cursor is None:
                 return
             self._connection.close()
+
         holder.submit(_reallyClose)
         return holder.stop()
 
@@ -470,11 +478,11 @@
 
 
     def execSQL(self, *a, **kw):
-        return self._enspool('execSQL', a, kw)
+        return self._enspool("execSQL", a, kw)
 
 
     def commit(self):
-        return self._enspool('commit')
+        return self._enspool("commit")
 
 
     def abort(self):
@@ -538,11 +546,14 @@
         Run pre-hooks, commit, the real DB commit, and then post-hooks.
         """
         pre = self._preCommit.runHooks()
+
         def ok(ignored):
             self._abort.clear()
             return doCommit().addCallback(self._commit.runHooks)
+
         def failed(why):
             return self.abort().addCallback(lambda ignored: why)
+
         return pre.addCallbacks(ok, failed)
 
 
@@ -561,7 +572,7 @@
 
 class _SingleTxn(_CommitAndAbortHooks,
                  proxyForInterface(iface=IAsyncTransaction,
-                                   originalAttribute='_baseTxn')):
+                                   originalAttribute="_baseTxn")):
     """
     A L{_SingleTxn} is a single-use wrapper for the longer-lived
     L{_ConnectedTxn}, so that if a badly-behaved API client accidentally hangs
@@ -595,7 +606,7 @@
         """
         Reveal the backend in the string representation.
         """
-        return '_SingleTxn(%r)' % (self._baseTxn,)
+        return "_SingleTxn(%r)" % (self._baseTxn,)
 
 
     def _unspoolOnto(self, baseTxn):
@@ -621,13 +632,15 @@
         self._checkComplete()
         if block is None and self._blockedQueue is not None:
             return self._blockedQueue.execSQL(sql, args, raiseOnZeroRowCount)
-        # 'block' should always be _currentBlock at this point.
+        # "block" should always be _currentBlock at this point.
         d = super(_SingleTxn, self).execSQL(sql, args, raiseOnZeroRowCount)
         self._stillExecuting.append(d)
+
         def itsDone(result):
             self._stillExecuting.remove(d)
             self._checkNextBlock()
             return result
+
         d.addBoth(itsDone)
         return d
 
@@ -640,12 +653,12 @@
         """
         if self._stillExecuting:
             # If we're still executing statements, nevermind.  We'll get called
-            # again by the 'itsDone' callback above.
+            # again by the C{itsDone} callback above.
             return
 
         if self._currentBlock is not None:
             # If there's still a current block, then keep it going.  We'll be
-            # called by the '_finishExecuting' callback below.
+            # called by the C{_finishExecuting} callback below.
             return
 
         # There's no block executing now.  What to do?
@@ -679,9 +692,11 @@
             # until they're done.  (Commit will be repeated in
             # _checkNextBlock.)
             return self._blockedQueue.commit()
+
         def reallyCommit():
             self._markComplete()
             return super(_SingleTxn, self).commit()
+
         return self._commitWithHooks(reallyCommit)
 
 
@@ -702,9 +717,11 @@
         """
         self._pool._waiting.remove(self)
         self._completed = True
-        self._unspoolOnto(_NoTxn(self._pool,
-                                 "connection pool shut down while txn "
-                                 "waiting for database connection."))
+        self._unspoolOnto(_NoTxn(
+            self._pool,
+            "connection pool shut down while txn "
+            "waiting for database connection."
+        ))
 
 
     def _checkComplete(self):
@@ -772,7 +789,7 @@
     Does not implement commit() or abort(), because this will simply group
     commands.  In order to implement sub-transactions or checkpoints, some
     understanding of the SQL dialect in use by the underlying connection is
-    required.  Instead, it provides 'end'.
+    required.  Instead, it provides C{end}.
     """
     implements(ICommandBlock)
 
@@ -810,14 +827,18 @@
         """
         if track and self._ended:
             raise AlreadyFinishedError()
+
         self._singleTxn._checkComplete()
+
         if self._singleTxn._currentBlock is self and self._started:
             d = self._singleTxn._execSQLForBlock(
                 sql, args, raiseOnZeroRowCount, self)
         else:
             d = self._spool.execSQL(sql, args, raiseOnZeroRowCount)
+
         if track:
             self._trackForEnd(d)
+
         return d
 
 
@@ -840,6 +861,7 @@
         if self._ended:
             raise AlreadyFinishedError()
         self._ended = True
+
         # TODO: maybe this should return a Deferred that's a clone of
         # _endDeferred, so that callers can determine when the block is really
         # complete?  Struggling for an actual use-case on that one.
@@ -885,9 +907,11 @@
         if self._retry is not None:
             self._retry.cancel()
         d = self._holder.stop()
+
         def removeme(ignored):
             if self in self._pool._busy:
                 self._pool._busy.remove(self)
+
         d.addCallback(removeme)
         return d
 
@@ -899,6 +923,7 @@
     disturbing its results.
     """
     d = Deferred()
+
     def fired(result):
         d.callback(result)
         return result
@@ -952,8 +977,11 @@
     RETRY_TIMEOUT = 10.0
 
 
-    def __init__(self, connectionFactory, maxConnections=10,
-                 paramstyle=DEFAULT_PARAM_STYLE, dialect=DEFAULT_DIALECT):
+    def __init__(
+        self,
+        connectionFactory, maxConnections=10,
+        paramstyle=DEFAULT_PARAM_STYLE, dialect=DEFAULT_DIALECT
+    ):
 
         super(ConnectionPool, self).__init__()
         self.connectionFactory = connectionFactory
@@ -1006,7 +1034,7 @@
             yield self._busy[0].abort()
 
         # Phase 4: All transactions should now be in the free list, since
-        # 'abort()' will have put them there.  Shut down all the associated
+        # "abort()" will have put them there.  Shut down all the associated
         # ThreadHolders.
         while self._free:
             # Releasing a L{_ConnectedTxn} doesn't automatically recycle it /
@@ -1039,6 +1067,7 @@
             # FIXME: should be wrapping a _SingleTxn around this to get
             # .commandBlock()
             return _NoTxn(self, "txn created while DB pool shutting down")
+
         if self._free:
             basetxn = self._free.pop(0)
             self._busy.append(basetxn)
@@ -1050,6 +1079,7 @@
             # (free doesn't need to be considered, as it's tested above)
             if self._activeConnectionCount() < self.maxConnections:
                 self._startOneMore()
+
         return txn
 
 
@@ -1067,14 +1097,16 @@
         holder = self._createHolder()
         holder.start()
         txn = _ConnectingPseudoTxn(self, holder)
-        # take up a slot in the 'busy' list, sit there so we can be aborted.
+        # take up a slot in the "busy" list, sit there so we can be aborted.
         self._busy.append(txn)
+
         def initCursor():
             # support threadlevel=1; we can't necessarily cursor() in a
             # different thread than we do transactions in.
             connection = self.connectionFactory()
             cursor     = connection.cursor()
             return (connection, cursor)
+
         def finishInit((connection, cursor)):
             if txn._aborted:
                 return
@@ -1086,12 +1118,15 @@
             )
             self._busy.remove(txn)
             self._repoolNow(baseTxn)
+
         def maybeTryAgain(f):
             log.err(f, "Re-trying connection due to connection failure")
             txn._retry = self.reactor.callLater(self.RETRY_TIMEOUT, resubmit)
+
         def resubmit():
             d = holder.submit(initCursor)
             d.addCallbacks(finishInit, maybeTryAgain)
+
         resubmit()
 
 
@@ -1103,15 +1138,18 @@
         self._busy.remove(txn)
         finishRecord = (txn, d)
         self._finishing.append(finishRecord)
+
         def repool(result):
             self._finishing.remove(finishRecord)
             self._repoolNow(txn)
             return result
+
         def discard(result):
             self._finishing.remove(finishRecord)
             txn._releaseConnection()
             self._startOneMore()
             return result
+
         return d.addCallbacks(repool, discard)
 
 
@@ -1130,11 +1168,13 @@
 
 
 def txnarg():
-    return [('transactionID', Integer())]
+    return [("transactionID", Integer())]
 
 
 CHUNK_MAX = 0xffff
 
+
+
 class BigArgument(Argument):
     """
     An argument whose payload can be larger than L{CHUNK_MAX}, by splitting
@@ -1162,9 +1202,9 @@
 
 class Pickle(BigArgument):
     """
-    A pickle sent over AMP.  This is to serialize the 'args' argument to
-    C{execSQL}, which is the dynamically-typed 'args' list argument to a DB-API
-    C{execute} function, as well as its dynamically-typed result ('rows').
+    A pickle sent over AMP.  This is to serialize the "args" argument to
+    C{execSQL}, which is the dynamically-typed "args" list argument to a DB-API
+    C{execute} function, as well as its dynamically-typed result ("rows").
 
     This should be cleaned up into a nicer structure, but this is not a network
     protocol, so we can be a little relaxed about security.
@@ -1235,11 +1275,13 @@
     """
     Execute an SQL statement.
     """
-    arguments = [('sql', String()),
-                 ('queryID', String()),
-                 ('args', Pickle()),
-                 ('blockID', String()),
-                 ('reportZeroRowCount', Boolean())] + txnarg()
+    arguments = [
+        ("sql", String()),
+        ("queryID", String()),
+        ("args", Pickle()),
+        ("blockID", String()),
+        ("reportZeroRowCount", Boolean())
+    ] + txnarg()
     errors = _quashErrors
 
 
@@ -1268,8 +1310,7 @@
     L{ExecSQL}.
     """
 
-    arguments = [('queryID', String()),
-                 ('row', Pickle())]
+    arguments = [("queryID", String()), ("row", Pickle())]
     errors = _quashErrors
 
 
@@ -1279,10 +1320,12 @@
     A query issued with L{ExecSQL} is complete.
     """
 
-    arguments = [('queryID', String()),
-                 ('norows', Boolean()),
-                 ('derived', Pickle()),
-                 ('noneResult', Boolean())]
+    arguments = [
+        ("queryID", String()),
+        ("norows", Boolean()),
+        ("derived", Pickle()),
+        ("noneResult", Boolean())
+    ]
     errors = _quashErrors
 
 
@@ -1359,19 +1402,23 @@
                     reportZeroRowCount):
         derived = None
         noneResult = False
+
         for param in args:
             if IDerivedParameter.providedBy(param):
                 if derived is None:
                     derived = []
                 derived.append(param)
+
         if blockID:
             txn = self._blocks[blockID]
         else:
             txn = self._txns[transactionID]
+
         if reportZeroRowCount:
             rozrc = _NoRows
         else:
             rozrc = None
+
         try:
             rows = yield txn.execSQL(sql, args, rozrc)
         except _NoRows:
@@ -1386,8 +1433,10 @@
             else:
                 noneResult = True
 
-        self.callRemote(QueryComplete, queryID=queryID, norows=norows,
-                        derived=derived, noneResult=noneResult)
+        self.callRemote(
+            QueryComplete, queryID=queryID, norows=norows,
+            derived=derived, noneResult=noneResult
+        )
         returnValue({})
 
 
@@ -1422,8 +1471,9 @@
     A client which can execute SQL.
     """
 
-    def __init__(self, dialect=POSTGRES_DIALECT,
-                 paramstyle=DEFAULT_PARAM_STYLE):
+    def __init__(
+        self, dialect=POSTGRES_DIALECT, paramstyle=DEFAULT_PARAM_STYLE
+    ):
         # See DEFAULT_PARAM_STYLE FIXME above.
         super(ConnectionPoolClient, self).__init__()
         self._nextID    = count().next
@@ -1511,6 +1561,7 @@
             results = None
         else:
             results = self.results
+
         if derived is not None:
             # 1) Bleecchh.
             # 2) FIXME: add some direct tests in test_adbapi2, the unit test
@@ -1562,7 +1613,7 @@
     @property
     def paramstyle(self):
         """
-        Forward 'paramstyle' attribute to the client.
+        Forward C{paramstyle} attribute to the client.
         """
         return self._client.paramstyle
 
@@ -1570,7 +1621,7 @@
     @property
     def dialect(self):
         """
-        Forward 'dialect' attribute to the client.
+        Forward C{dialect} attribute to the client.
         """
         return self._client.dialect
 
@@ -1608,10 +1659,13 @@
     def commit(self):
         def reallyCommit():
             self._committing = True
+
             def done(whatever):
                 self._committed = True
                 return whatever
+
             return self._complete(Commit).addBoth(done)
+
         return self._commitWithHooks(reallyCommit)
 
 
@@ -1658,7 +1712,7 @@
     @property
     def paramstyle(self):
         """
-        Forward 'paramstyle' attribute to the transaction.
+        Forward C{paramstyle} attribute to the transaction.
         """
         return self._transaction.paramstyle
 
@@ -1666,7 +1720,7 @@
     @property
     def dialect(self):
         """
-        Forward 'dialect' attribute to the transaction.
+        Forward C{dialect} attribute to the transaction.
         """
         return self._transaction.dialect
 

Modified: twext/trunk/twext/enterprise/dal/model.py
===================================================================
--- twext/trunk/twext/enterprise/dal/model.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/model.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -18,17 +18,35 @@
 """
 Model classes for SQL.
 """
+
+__all__ = [
+    "SQLType",
+    "Constraint",
+    "Check",
+    "ProcedureCall",
+    "NO_DEFAULT",
+    "Column",
+    "Table",
+    "Index",
+    "PseudoIndex",
+    "Sequence",
+    "Schema",
+]
+
 from twisted.python.util import FancyEqMixin
 
+
+
 class SQLType(object):
     """
-    A data-type as defined in SQL; like "integer" or "real" or "varchar(255)".
+    A data-type as defined in SQL; like C{integer} or C{real} or
+    C{varchar(255)}.
 
     @ivar name: the name of this type.
     @type name: C{str}
 
-    @ivar length: the length of this type, if it is a type like 'varchar' or
-        'character' that comes with a parenthetical length.
+    @ivar length: the length of this type, if it is a type like C{varchar} or
+        C{character} that comes with a parenthetical length.
     @type length: C{int} or C{NoneType}
     """
 
@@ -62,10 +80,10 @@
         present.
         """
         if self.length:
-            lendesc = '(%s)' % (self.length)
+            lendesc = "(%s)" % (self.length)
         else:
-            lendesc = ''
-        return '<SQL Type: %r%s>' % (self.name, lendesc)
+            lendesc = ""
+        return "<SQL Type: %r%s>" % (self.name, lendesc)
 
 
 
@@ -73,8 +91,8 @@
     """
     A constraint on a set of columns.
 
-    @ivar type: the type of constraint.  Currently, only C{'UNIQUE'} and C{'NOT
-        NULL'} are supported.
+    @ivar type: the type of constraint.  Currently, only C{"UNIQUE"} and C{"NOT
+        NULL"} are supported.
     @type type: C{str}
 
     @ivar affectsColumns: Columns affected by this constraint.
@@ -82,9 +100,9 @@
     @type affectsColumns: C{list} of L{Column}
     """
 
-    # Values for 'type' attribute:
-    NOT_NULL = 'NOT NULL'
-    UNIQUE = 'UNIQUE'
+    # Values for "type" attribute:
+    NOT_NULL = "NOT NULL"
+    UNIQUE = "UNIQUE"
 
     def __init__(self, type, affectsColumns, name=None):
         self.affectsColumns = affectsColumns
@@ -97,7 +115,7 @@
 
 class Check(Constraint):
     """
-    A 'check' constraint, which evaluates an SQL expression.
+    A C{check} constraint, which evaluates an SQL expression.
 
     @ivar expression: the expression that should evaluate to True.
     @type expression: L{twext.enterprise.dal.syntax.ExpressionSyntax}
@@ -107,7 +125,7 @@
     def __init__(self, syntaxExpression, name=None):
         self.expression = syntaxExpression
         super(Check, self).__init__(
-            'CHECK', [c.model for c in self.expression.allColumns()], name
+            "CHECK", [c.model for c in self.expression.allColumns()], name
         )
 
 
@@ -126,8 +144,8 @@
 
 class NO_DEFAULT(object):
     """
-    Placeholder value for not having a default.  (C{None} would not be suitable,
-    as that would imply a default of C{NULL}).
+    Placeholder value for not having a default.  (C{None} would not be
+    suitable, as that would imply a default of C{NULL}).
     """
 
 
@@ -160,13 +178,14 @@
         this will be a reference to that table; otherwise (normally) C{None}.
     @type references: L{Table} or C{NoneType}
 
-    @ivar deleteAction: If this column references another table, home will this column's
-        row be altered when the matching row in that other table is deleted? Possible values are
-        None - for 'on delete no action'
-        'cascade' - for 'on delete cascade'
-        'set null' - for 'on delete set null'
-        'set default' - for 'on delete set default'
-    @type deleteAction: C{bool}
+    @ivar deleteAction: If this column references another table, home will this
+        column's row be altered when the matching row in that other table is
+        deleted? Possible values are:
+        C{None} - for "on delete no action";
+        C{"cascade"} - for "on delete cascade";
+        C{"set null"} - for "on delete set null";
+        C{"set default"} - for "on delete set default".
+    @type deleteAction: C{str}
     """
 
     compareAttributes = 'table name'.split()
@@ -182,7 +201,7 @@
 
 
     def __repr__(self):
-        return '<Column (%s %r)>' % (self.name, self.type)
+        return "<Column (%s %r)>" % (self.name, self.type)
 
 
     def compare(self, other):
@@ -196,8 +215,11 @@
         results = []
 
         # TODO: sql_dump does not do types write now - so ignore this
-#        if self.type != other.type:
-#            results.append("Table: %s, mismatched column type: %s" % (self.table.name, self.name))
+        # if self.type != other.type:
+        #     results.append(
+        #         "Table: %s, mismatched column type: %s"
+        #         % (self.table.name, self.name)
+        #     )
 
         # TODO: figure out how to compare default, references and deleteAction
         return results
@@ -255,7 +277,7 @@
     """
     A set of columns.
 
-    @ivar descriptiveComment: A docstring for the table.  Parsed from a '--'
+    @ivar descriptiveComment: A docstring for the table.  Parsed from a C{--}
         comment preceding this table in the SQL schema file that was parsed, if
         any.
     @type descriptiveComment: C{str}
@@ -266,11 +288,11 @@
         key of this table, or C{None} if no primary key has been specified.
     """
 
-    compareAttributes = 'schema name'.split()
+    compareAttributes = "schema name".split()
 
     def __init__(self, schema, name):
         _checkstr(name)
-        self.descriptiveComment = ''
+        self.descriptiveComment = ""
         self.schema = schema
         self.name = name
         self.columns = []
@@ -281,7 +303,7 @@
 
 
     def __repr__(self):
-        return '<Table %r:%r>' % (self.name, self.columns)
+        return "<Table %r:%r>" % (self.name, self.columns)
 
 
     def compare(self, other):
@@ -295,9 +317,13 @@
         results = []
 
         myColumns = dict([(item.name.lower(), item) for item in self.columns])
-        otherColumns = dict([(item.name.lower(), item) for item in other.columns])
+        otherColumns = dict([
+            (item.name.lower(), item) for item in other.columns
+        ])
         for item in set(myColumns.keys()) ^ set(otherColumns.keys()):
-            results.append("Table: %s, missing column: %s" % (self.name, item,))
+            results.append(
+                "Table: %s, missing column: %s" % (self.name, item,)
+            )
 
         for name in set(myColumns.keys()) & set(otherColumns.keys()):
             results.extend(myColumns[name].compare(otherColumns[name]))
@@ -353,8 +379,8 @@
 
     def checkConstraint(self, protoExpression, name=None):
         """
-        This table is affected by a 'check' constraint.  (Should only be called
-        during schema parsing.)
+        This table is affected by a C{check} constraint.  (Should only be
+        called during schema parsing.)
 
         @param protoExpression: proto expression.
         """
@@ -365,8 +391,8 @@
         """
         A statically-defined row was inserted as part of the schema itself.
         This is used for tables that want to track static enumerations, for
-        example, but want to be referred to by a foreign key in other tables for
-        proper referential integrity.
+        example, but want to be referred to by a foreign key in other tables
+        for proper referential integrity.
 
         Append this data to this L{Table}'s L{Table.schemaRows}.
 
@@ -424,15 +450,23 @@
 
 class PseudoIndex(object):
     """
-    A class used to represent explicit and implicit indexes. An implicit index is one the
-    DB creates for primary key and unique columns in a table. An explicit index is one
-    created by a CREATE [UNIQUE] INDEX statement. Because the name of an implicit index
-    is implementation defined, instead we create a name based on the table name, uniqueness
-    and column names.
+    A class used to represent explicit and implicit indexes. An implicit index
+    is one the DB creates for primary key and unique columns in a table. An
+    explicit index is one created by a CREATE [UNIQUE] INDEX statement. Because
+    the name of an implicit index is implementation-defined, instead we create
+    a name based on the table name, uniqueness and column names.
     """
 
     def __init__(self, table, columns, unique=False):
-        self.name = "%s%s:(%s)" % (table.name, "-unique" if unique else "", ",".join([col.name for col in columns]))
+        if unique:
+            suffix = "-unique"
+        else:
+            suffix = ""
+
+        self.name = (
+            "%s%s:(%s)"
+            % (table.name, suffix, ",".join([col.name for col in columns]))
+        )
         self.table = table
         self.unique = unique
         self.columns = columns
@@ -456,7 +490,7 @@
     A sequence object.
     """
 
-    compareAttributes = 'name'.split()
+    compareAttributes = "name".split()
 
     def __init__(self, schema, name):
         _checkstr(name)
@@ -466,7 +500,7 @@
 
 
     def __repr__(self):
-        return '<Sequence %r>' % (self.name,)
+        return "<Sequence %r>" % (self.name,)
 
 
     def compare(self, other):
@@ -484,8 +518,8 @@
 
 def _namedFrom(name, sequence):
     """
-    Retrieve an item with a given name attribute from a given sequence, or raise
-    a L{KeyError}.
+    Retrieve an item with a given name attribute from a given sequence, or
+    raise a L{KeyError}.
     """
     for item in sequence:
         if item.name == name:
@@ -499,7 +533,7 @@
     A schema containing tables, indexes, and sequences.
     """
 
-    def __init__(self, filename='<string>'):
+    def __init__(self, filename="<string>"):
         self.filename = filename
         self.tables = []
         self.indexes = []
@@ -507,7 +541,7 @@
 
 
     def __repr__(self):
-        return '<Schema %r>' % (self.filename,)
+        return "<Schema %r>" % (self.filename,)
 
 
     def compare(self, other):
@@ -522,11 +556,19 @@
 
         def _compareLists(list1, list2, descriptor):
             myItems = dict([(item.name.lower()[:63], item) for item in list1])
-            otherItems = dict([(item.name.lower()[:63], item) for item in list2])
+            otherItems = dict([
+                (item.name.lower()[:63], item) for item in list2
+            ])
             for item in set(myItems.keys()) - set(otherItems.keys()):
-                results.append("Schema: %s, missing %s: %s" % (other.filename, descriptor, item,))
+                results.append(
+                    "Schema: %s, missing %s: %s"
+                    % (other.filename, descriptor, item)
+                )
             for item in set(otherItems.keys()) - set(myItems.keys()):
-                results.append("Schema: %s, missing %s: %s" % (self.filename, descriptor, item,))
+                results.append(
+                    "Schema: %s, missing %s: %s"
+                    % (self.filename, descriptor, item)
+                )
 
             for name in set(myItems.keys()) & set(otherItems.keys()):
                 results.extend(myItems[name].compare(otherItems[name]))
@@ -540,14 +582,17 @@
 
     def pseudoIndexes(self):
         """
-        Return a set of indexes that include "implicit" indexes from table/column constraints. The name of the
-        index is formed from the table name and then list of columns.
+        Return a set of indexes that include "implicit" indexes from
+        table/column constraints. The name of the index is formed from the
+        table name and then list of columns.
         """
         results = []
 
         # First add the list of explicit indexes we have
         for index in self.indexes:
-            results.append(PseudoIndex(index.table, index.columns, index.unique))
+            results.append(
+                PseudoIndex(index.table, index.columns, index.unique)
+            )
 
         # Now do implicit index for each table
         for table in self.tables:
@@ -555,7 +600,9 @@
                 results.append(PseudoIndex(table, table.primaryKey, True))
             for constraint in table.constraints:
                 if constraint.type == Constraint.UNIQUE:
-                    results.append(PseudoIndex(table, constraint.affectsColumns, True))
+                    results.append(
+                        PseudoIndex(table, constraint.affectsColumns, True)
+                    )
 
         return results
 

Modified: twext/trunk/twext/enterprise/dal/parseschema.py
===================================================================
--- twext/trunk/twext/enterprise/dal/parseschema.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/parseschema.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -20,6 +20,19 @@
 Parser for SQL schema.
 """
 
+__all__ = [
+    "tableFromCreateStatement",
+    "schemaFromPath",
+    "schemaFromString",
+    "addSQLToSchema",
+    "ViolatedExpectation",
+    "nameOrIdentifier",
+    "expectSingle",
+    "expect",
+    "significant",
+    "iterSignificant",
+]
+
 from itertools import chain
 
 from sqlparse import parse, keywords
@@ -41,12 +54,12 @@
 def _fixKeywords():
     """
     Work around bugs in SQLParse, adding SEQUENCE as a keyword (since it is
-    treated as one in postgres) and removing ACCESS and SIZE (since we use those
-    as column names).  Technically those are keywords in SQL, but they aren't
-    treated as such by postgres's parser.
+    treated as one in postgres) and removing ACCESS and SIZE (since we use
+    those as column names).  Technically those are keywords in SQL, but they
+    aren't treated as such by postgres's parser.
     """
-    keywords.KEYWORDS['SEQUENCE'] = Keyword
-    for columnNameKeyword in ['ACCESS', 'SIZE']:
+    keywords.KEYWORDS["SEQUENCE"] = Keyword
+    for columnNameKeyword in ["ACCESS", "SIZE"]:
         del keywords.KEYWORDS[columnNameKeyword]
 
 _fixKeywords()
@@ -58,19 +71,17 @@
     Add a table from a CREATE TABLE sqlparse statement object.
 
     @param schema: The schema to add the table statement to.
-
     @type schema: L{Schema}
 
     @param stmt: The C{CREATE TABLE} statement object.
-
     @type stmt: L{Statement}
     """
     i = iterSignificant(stmt)
-    expect(i, ttype=Keyword.DDL, value='CREATE')
-    expect(i, ttype=Keyword, value='TABLE')
+    expect(i, ttype=Keyword.DDL, value="CREATE")
+    expect(i, ttype=Keyword, value="TABLE")
     function = expect(i, cls=Function)
     i = iterSignificant(function)
-    name = expect(i, cls=Identifier).get_name().encode('utf-8')
+    name = expect(i, cls=Identifier).get_name().encode("utf-8")
     self = Table(schema, name)
     parens = expect(i, cls=Parenthesis)
     cp = _ColumnParser(self, iterSignificant(parens), parens)
@@ -115,79 +126,105 @@
     Add new SQL to an existing schema.
 
     @param schema: The schema to add the new SQL to.
-
     @type schema: L{Schema}
 
     @param schemaData: A string containing some SQL statements.
-
     @type schemaData: C{str}
 
     @return: the C{schema} argument
     """
     parsed = parse(schemaData)
+
     for stmt in parsed:
-        preface = ''
+        preface = ""
+
         while stmt.tokens and not significant(stmt.tokens[0]):
             preface += str(stmt.tokens.pop(0))
+
         if not stmt.tokens:
             continue
-        if stmt.get_type() == 'CREATE':
+
+        if stmt.get_type() == "CREATE":
             createType = stmt.token_next(1, True).value.upper()
-            if createType == u'TABLE':
+
+            if createType == u"TABLE":
                 t = tableFromCreateStatement(schema, stmt)
                 t.addComment(preface)
-            elif createType == u'SEQUENCE':
-                Sequence(schema,
-                         stmt.token_next(2, True).get_name().encode('utf-8'))
-            elif createType in (u'INDEX', u'UNIQUE'):
+
+            elif createType == u"SEQUENCE":
+                Sequence(
+                    schema,
+                    stmt.token_next(2, True).get_name().encode("utf-8")
+                )
+
+            elif createType in (u"INDEX", u"UNIQUE"):
                 signifindex = iterSignificant(stmt)
-                expect(signifindex, ttype=Keyword.DDL, value='CREATE')
+                expect(signifindex, ttype=Keyword.DDL, value="CREATE")
                 token = signifindex.next()
                 unique = False
+
                 if token.match(Keyword, "UNIQUE"):
                     unique = True
                     token = signifindex.next()
+
                 if not token.match(Keyword, "INDEX"):
                     raise ViolatedExpectation("INDEX or UNQIUE", token.value)
+
                 indexName = nameOrIdentifier(signifindex.next())
-                expect(signifindex, ttype=Keyword, value='ON')
+                expect(signifindex, ttype=Keyword, value="ON")
                 token = signifindex.next()
+
                 if isinstance(token, Function):
                     [tableName, columnArgs] = iterSignificant(token)
                 else:
                     tableName = token
                     token = signifindex.next()
+
                     if token.match(Keyword, "USING"):
-                        [_ignore, columnArgs] = iterSignificant(expect(signifindex, cls=Function))
+                        [_ignore, columnArgs] = iterSignificant(
+                            expect(signifindex, cls=Function)
+                        )
                     else:
-                        raise ViolatedExpectation('USING', token)
+                        raise ViolatedExpectation("USING", token)
+
                 tableName = nameOrIdentifier(tableName)
                 arggetter = iterSignificant(columnArgs)
 
-                expect(arggetter, ttype=Punctuation, value=u'(')
+                expect(arggetter, ttype=Punctuation, value=u"(")
                 valueOrValues = arggetter.next()
+
                 if isinstance(valueOrValues, IdentifierList):
                     valuelist = valueOrValues.get_identifiers()
                 else:
                     valuelist = [valueOrValues]
-                expect(arggetter, ttype=Punctuation, value=u')')
 
-                idx = Index(schema, indexName, schema.tableNamed(tableName), unique)
+                expect(arggetter, ttype=Punctuation, value=u")")
+
+                idx = Index(
+                    schema, indexName, schema.tableNamed(tableName), unique
+                )
+
                 for token in valuelist:
                     columnName = nameOrIdentifier(token)
                     idx.addColumn(idx.table.columnNamed(columnName))
-        elif stmt.get_type() == 'INSERT':
+
+        elif stmt.get_type() == "INSERT":
             insertTokens = iterSignificant(stmt)
-            expect(insertTokens, ttype=Keyword.DML, value='INSERT')
-            expect(insertTokens, ttype=Keyword, value='INTO')
+            expect(insertTokens, ttype=Keyword.DML, value="INSERT")
+            expect(insertTokens, ttype=Keyword, value="INTO")
+
             tableName = expect(insertTokens, cls=Identifier).get_name()
-            expect(insertTokens, ttype=Keyword, value='VALUES')
+            expect(insertTokens, ttype=Keyword, value="VALUES")
+
             values = expect(insertTokens, cls=Parenthesis)
             vals = iterSignificant(values)
-            expect(vals, ttype=Punctuation, value='(')
+            expect(vals, ttype=Punctuation, value="(")
+
             valuelist = expect(vals, cls=IdentifierList)
-            expect(vals, ttype=Punctuation, value=')')
+            expect(vals, ttype=Punctuation, value=")")
+
             rowData = []
+
             for ident in valuelist.get_identifiers():
                 rowData.append(
                     {Number.Integer: int,
@@ -196,8 +233,10 @@
                 )
 
             schema.tableNamed(tableName).insertSchemaRow(rowData)
+
         else:
-            print('unknown type:', stmt.get_type())
+            print("unknown type:", stmt.get_type())
+
     return schema
 
 
@@ -276,12 +315,14 @@
         parens = iterSignificant(parens)
         expect(parens, ttype=Punctuation, value="(")
         idorids = parens.next()
+
         if isinstance(idorids, Identifier):
             idnames = [idorids.get_name()]
         elif isinstance(idorids, IdentifierList):
             idnames = [x.get_name() for x in idorids.get_identifiers()]
         else:
             raise ViolatedExpectation("identifier or list", repr(idorids))
+
         expect(parens, ttype=Punctuation, value=")")
         return idnames
 
@@ -295,11 +336,15 @@
         parens = iterSignificant(parens)
         expect(parens, ttype=Punctuation, value="(")
         nexttok = parens.next()
+
         if isinstance(nexttok, Comparison):
             lhs, op, rhs = list(iterSignificant(nexttok))
-            result = CompoundComparison(self.nameOrValue(lhs),
-                                        op.value.encode("ascii"),
-                                        self.nameOrValue(rhs))
+            result = CompoundComparison(
+                self.nameOrValue(lhs),
+                op.value.encode("ascii"),
+                self.nameOrValue(rhs)
+            )
+
         elif isinstance(nexttok, Identifier):
             # our version of SQLParse seems to break down and not create a nice
             # "Comparison" object when a keyword is present.  This is just a
@@ -331,26 +376,29 @@
 
     def parseConstraint(self, constraintType):
         """
-        Parse a 'free' constraint, described explicitly in the table as opposed
-        to being implicitly associated with a column by being placed after it.
+        Parse a C{free} constraint, described explicitly in the table as
+        opposed to being implicitly associated with a column by being placed
+        after it.
         """
         ident = None
         # TODO: make use of identifier in tableConstraint, currently only used
         # for checkConstraint.
-        if constraintType.match(Keyword, 'CONSTRAINT'):
+        if constraintType.match(Keyword, "CONSTRAINT"):
             ident = expect(self, cls=Identifier).get_name()
             constraintType = expect(self, ttype=Keyword)
-        if constraintType.match(Keyword, 'PRIMARY'):
-            expect(self, ttype=Keyword, value='KEY')
+
+        if constraintType.match(Keyword, "PRIMARY"):
+            expect(self, ttype=Keyword, value="KEY")
             names = self.namesInParens(expect(self, cls=Parenthesis))
             self.table.primaryKey = [self.table.columnNamed(n) for n in names]
-        elif constraintType.match(Keyword, 'UNIQUE'):
+        elif constraintType.match(Keyword, "UNIQUE"):
             names = self.namesInParens(expect(self, cls=Parenthesis))
             self.table.tableConstraint(Constraint.UNIQUE, names)
-        elif constraintType.match(Keyword, 'CHECK'):
+        elif constraintType.match(Keyword, "CHECK"):
             self.table.checkConstraint(self.readExpression(self.next()), ident)
         else:
-            raise ViolatedExpectation('PRIMARY or UNIQUE', constraintType)
+            raise ViolatedExpectation("PRIMARY or UNIQUE", constraintType)
+
         return self.checkEnd(self.next())
 
 
@@ -375,9 +423,13 @@
             [funcIdent, args] = iterSignificant(typeName)
             typeName = funcIdent
             arggetter = iterSignificant(args)
-            expect(arggetter, value=u'(')
-            typeLength = int(expect(arggetter,
-                                    ttype=Number.Integer).value.encode('utf-8'))
+            expect(arggetter, value=u"(")
+            typeLength = int(
+                expect(
+                    arggetter,
+                    ttype=Number.Integer
+                ).value.encode("utf-8")
+            )
         else:
             maybeTypeArgs = self.next()
             if isinstance(maybeTypeArgs, Parenthesis):
@@ -389,98 +441,153 @@
                 # something else
                 typeLength = None
                 self.pushback(maybeTypeArgs)
+
         theType = SQLType(typeName.value.encode("utf-8"), typeLength)
         theColumn = self.table.addColumn(
             name=name.encode("utf-8"), type=theType
         )
+
         for val in self:
             if val.ttype == Punctuation:
                 return self.checkEnd(val)
             else:
                 expected = True
+
                 def oneConstraint(t):
                     self.table.tableConstraint(t, [theColumn.name])
 
-                if val.match(Keyword, 'PRIMARY'):
-                    expect(self, ttype=Keyword, value='KEY')
+                if val.match(Keyword, "PRIMARY"):
+                    expect(self, ttype=Keyword, value="KEY")
                     # XXX check to make sure there's no other primary key yet
                     self.table.primaryKey = [theColumn]
-                elif val.match(Keyword, 'UNIQUE'):
+
+                elif val.match(Keyword, "UNIQUE"):
                     # XXX add UNIQUE constraint
                     oneConstraint(Constraint.UNIQUE)
-                elif val.match(Keyword, 'NOT'):
-                    # possibly not necessary, as 'NOT NULL' is a single keyword
+
+                elif val.match(Keyword, "NOT"):
+                    # possibly not necessary, as "NOT NULL" is a single keyword
                     # in sqlparse as of 0.1.2
-                    expect(self, ttype=Keyword, value='NULL')
+                    expect(self, ttype=Keyword, value="NULL")
                     oneConstraint(Constraint.NOT_NULL)
-                elif val.match(Keyword, 'NOT NULL'):
+
+                elif val.match(Keyword, "NOT NULL"):
                     oneConstraint(Constraint.NOT_NULL)
-                elif val.match(Keyword, 'CHECK'):
-                    self.table.checkConstraint(self.readExpression(self.next()))
-                elif val.match(Keyword, 'DEFAULT'):
+
+                elif val.match(Keyword, "CHECK"):
+                    self.table.checkConstraint(
+                        self.readExpression(self.next())
+                    )
+
+                elif val.match(Keyword, "DEFAULT"):
                     theDefault = self.next()
+
                     if isinstance(theDefault, Parenthesis):
                         iDefault = iterSignificant(theDefault)
                         expect(iDefault, ttype=Punctuation, value="(")
                         theDefault = iDefault.next()
+
                     if isinstance(theDefault, Function):
                         thingo = theDefault.tokens[0].get_name()
                         parens = expectSingle(
                             theDefault.tokens[-1], cls=Parenthesis
                         )
                         pareniter = iterSignificant(parens)
-                        if thingo.upper() == 'NEXTVAL':
+                        if thingo.upper() == "NEXTVAL":
                             expect(pareniter, ttype=Punctuation, value="(")
                             seqname = _destringify(
-                                expect(pareniter, ttype=String.Single).value)
+                                expect(pareniter, ttype=String.Single).value
+                            )
                             defaultValue = self.table.schema.sequenceNamed(
                                 seqname
                             )
                             defaultValue.referringColumns.append(theColumn)
                         else:
-                            defaultValue = ProcedureCall(thingo.encode('utf-8'),
-                                                         parens)
+                            defaultValue = ProcedureCall(
+                                thingo.encode("utf-8"), parens
+                            )
+
                     elif theDefault.ttype == Number.Integer:
                         defaultValue = int(theDefault.value)
-                    elif (theDefault.ttype == Keyword and
-                          theDefault.value.lower() == 'false'):
+
+                    elif (
+                        theDefault.ttype == Keyword and
+                        theDefault.value.lower() == "false"
+                    ):
                         defaultValue = False
-                    elif (theDefault.ttype == Keyword and
-                          theDefault.value.lower() == 'true'):
+
+                    elif (
+                        theDefault.ttype == Keyword and
+                        theDefault.value.lower() == "true"
+                    ):
                         defaultValue = True
-                    elif (theDefault.ttype == Keyword and
-                          theDefault.value.lower() == 'null'):
+
+                    elif (
+                        theDefault.ttype == Keyword and
+                        theDefault.value.lower() == "null"
+                    ):
                         defaultValue = None
+
                     elif theDefault.ttype == String.Single:
                         defaultValue = _destringify(theDefault.value)
+
                     else:
                         raise RuntimeError(
-                            "not sure what to do: default %r" % (
-                            theDefault))
+                            "not sure what to do: default %r"
+                            % (theDefault,)
+                        )
+
                     theColumn.setDefaultValue(defaultValue)
-                elif val.match(Keyword, 'REFERENCES'):
+
+                elif val.match(Keyword, "REFERENCES"):
                     target = nameOrIdentifier(self.next())
                     theColumn.doesReferenceName(target)
-                elif val.match(Keyword, 'ON'):
-                    expect(self, ttype=Keyword.DML, value='DELETE')
+
+                elif val.match(Keyword, "ON"):
+                    expect(self, ttype=Keyword.DML, value="DELETE")
                     refAction = self.next()
-                    if refAction.ttype == Keyword and refAction.value.upper() == 'CASCADE':
-                        theColumn.deleteAction = 'cascade'
-                    elif refAction.ttype == Keyword and refAction.value.upper() == 'SET':
+
+                    if (
+                        refAction.ttype == Keyword and
+                        refAction.value.upper() == "CASCADE"
+                    ):
+                        theColumn.deleteAction = "cascade"
+
+                    elif (
+                        refAction.ttype == Keyword and
+                        refAction.value.upper() == "SET"
+                    ):
                         setAction = self.next()
-                        if setAction.ttype == Keyword and setAction.value.upper() == 'NULL':
-                            theColumn.deleteAction = 'set null'
-                        elif setAction.ttype == Keyword and setAction.value.upper() == 'DEFAULT':
-                            theColumn.deleteAction = 'set default'
+
+                        if (
+                            setAction.ttype == Keyword and
+                            setAction.value.upper() == "NULL"
+                        ):
+                            theColumn.deleteAction = "set null"
+
+                        elif (
+                            setAction.ttype == Keyword and
+                            setAction.value.upper() == "DEFAULT"
+                        ):
+                            theColumn.deleteAction = "set default"
+
                         else:
-                            raise RuntimeError("Invalid on delete set %r" % (setAction.value,))
+                            raise RuntimeError(
+                                "Invalid on delete set %r"
+                                % (setAction.value,)
+                            )
+
                     else:
-                        raise RuntimeError("Invalid on delete %r" % (refAction.value,))
+                        raise RuntimeError(
+                            "Invalid on delete %r"
+                            % (refAction.value,)
+                        )
 
                 else:
                     expected = False
+
                 if not expected:
-                    print('UNEXPECTED TOKEN:', repr(val), theColumn)
+                    print("UNEXPECTED TOKEN:", repr(val), theColumn)
                     print(self.parens)
                     import pprint
                     pprint.pprint(self.parens.tokens)
@@ -534,14 +641,17 @@
     """
     if ttype is not None:
         if nextval.ttype != ttype:
-            raise ViolatedExpectation(ttype, '%s:%r' % (nextval.ttype, nextval))
+            raise ViolatedExpectation(
+                ttype, "%s:%r" % (nextval.ttype, nextval)
+            )
     if value is not None:
         if nextval.value.upper() != value.upper():
             raise ViolatedExpectation(value, nextval.value)
     if cls is not None:
         if nextval.__class__ != cls:
-            raise ViolatedExpectation(cls, '%s:%r' %
-                                      (nextval.__class__.__name__, nextval))
+            raise ViolatedExpectation(
+                cls, "%s:%r" % (nextval.__class__.__name__, nextval)
+            )
     return nextval
 
 
@@ -560,10 +670,10 @@
 
 def significant(token):
     """
-    Determine if the token is 'significant', i.e. that it is not a comment and
+    Determine if the token is "significant", i.e. that it is not a comment and
     not whitespace.
     """
-    # comment has 'None' is_whitespace() result.  intentional?
+    # comment has None is_whitespace() result.  intentional?
     return (not isinstance(token, Comment) and not token.is_whitespace())
 
 

Modified: twext/trunk/twext/enterprise/dal/record.py
===================================================================
--- twext/trunk/twext/enterprise/dal/record.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/record.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -22,6 +22,13 @@
 L{twext.enterprise.dal.syntax}.
 """
 
+__all__ = [
+    "ReadOnly",
+    "NoSuchRecord",
+    "fromTable",
+    "Record",
+]
+
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twext.enterprise.dal.syntax import (
     Select, Tuple, Constant, ColumnSyntax, Insert, Update, Delete
@@ -29,6 +36,8 @@
 from twext.enterprise.util import parseSQLTimestamp
 # from twext.enterprise.dal.syntax import ExpressionSyntax
 
+
+
 class ReadOnly(AttributeError):
     """
     A caller attempted to set an attribute on a database-backed record, rather
@@ -38,10 +47,11 @@
     def __init__(self, className, attributeName):
         self.className = className
         self.attributeName = attributeName
-        super(ReadOnly, self).__init__("SQL-backed attribute '{0}.{1}' is "
-                                       "read-only. Use '.update(...)' to "
-                                       "modify attributes."
-                                       .format(className, attributeName))
+        super(ReadOnly, self).__init__(
+            "SQL-backed attribute '{0}.{1}' is read-only. "
+            "Use '.update(...)' to modify attributes."
+            .format(className, attributeName)
+        )
 
 
 
@@ -65,6 +75,7 @@
         newbases = []
         table = None
         namer = None
+
         for base in bases:
             if isinstance(base, fromTable):
                 if table is not None:
@@ -83,6 +94,7 @@
                     if isinstance(base, _RecordMeta):
                         namer = base
                 newbases.append(base)
+
         if table is not None:
             attrmap = {}
             colmap = {}
@@ -93,6 +105,7 @@
                 colmap[column] = attrname
             ns.update(table=table, __attrmap__=attrmap, __colmap__=colmap)
             ns.update(attrmap)
+
         return super(_RecordMeta, cls).__new__(cls, name, tuple(newbases), ns)
 
 
@@ -135,6 +148,7 @@
     __metaclass__ = _RecordMeta
 
     transaction = None
+
     def __setattr__(self, name, value):
         """
         Once the transaction is initialized, this object is immutable.  If you
@@ -142,12 +156,15 @@
         """
         if self.transaction is not None:
             raise ReadOnly(self.__class__.__name__, name)
+
         return super(Record, self).__setattr__(name, value)
 
 
     def __repr__(self):
-        r = "<{0} record from table {1}".format(self.__class__.__name__,
-                                                self.table.model.name)
+        r = (
+            "<{0} record from table {1}"
+            .format(self.__class__.__name__, self.table.model.name)
+        )
         for k in sorted(self.__attrmap__.keys()):
             r += " {0}={1}".format(k, repr(getattr(self, k)))
         r += ">"
@@ -162,11 +179,13 @@
         names).
         """
         words = columnName.lower().split("_")
+
         def cap(word):
-            if word.lower() == 'id':
+            if word.lower() == "id":
                 return word.upper()
             else:
                 return word.capitalize()
+
         return words[0] + "".join(map(cap, words[1:]))
 
 
@@ -184,15 +203,16 @@
 
     @classmethod
     def _primaryKeyComparison(cls, primaryKey):
-        return (cls._primaryKeyExpression() ==
-                Tuple(map(Constant, primaryKey)))
+        return cls._primaryKeyExpression() == Tuple(map(Constant, primaryKey))
 
 
     @classmethod
     @inlineCallbacks
     def load(cls, transaction, *primaryKey):
-        results = (yield cls.query(transaction,
-                                cls._primaryKeyComparison(primaryKey)))
+        results = yield cls.query(
+            transaction,
+            cls._primaryKeyComparison(primaryKey)
+        )
         if len(results) != 1:
             raise NoSuchRecord()
         else:
@@ -207,7 +227,7 @@
 
         Used like this::
 
-            MyRecord.create(transaction, column1=1, column2=u'two')
+            MyRecord.create(transaction, column1=1, column2=u"two")
         """
         self = cls()
         colmap = {}
@@ -222,11 +242,14 @@
                 colmap[col] = k.pop(attr)
             else:
                 if col.model.needsValue():
-                    raise TypeError("required attribute " + repr(attr) +
-                                    " not passed")
+                    raise TypeError(
+                        "required attribute {0!r} not passed"
+                        .format(attr)
+                    )
                 else:
                     needsCols.append(col)
                     needsAttrs.append(attr)
+
         if k:
             raise TypeError("received unknown attribute{0}: {1}".format(
                 "s" if len(k) > 1 else "", ", ".join(sorted(k))
@@ -235,7 +258,9 @@
                         .on(transaction))
         if needsCols:
             self._attributesFromRow(zip(needsAttrs, result[0]))
+
         self.transaction = transaction
+
         returnValue(self)
 
 
@@ -262,9 +287,10 @@
             has been deleted, or fails with L{NoSuchRecord} if the underlying
             row was already deleted.
         """
-        return Delete(From=self.table,
-                      Where=self._primaryKeyComparison(self._primaryKeyValue())
-                      ).on(self.transaction, raiseOnZeroRowCount=NoSuchRecord)
+        return Delete(
+            From=self.table,
+            Where=self._primaryKeyComparison(self._primaryKeyValue())
+        ).on(self.transaction, raiseOnZeroRowCount=NoSuchRecord)
 
 
     @inlineCallbacks
@@ -278,9 +304,12 @@
         colmap = {}
         for k, v in kw.iteritems():
             colmap[self.__attrmap__[k]] = v
-        yield (Update(colmap,
-                      Where=self._primaryKeyComparison(self._primaryKeyValue()))
-                .on(self.transaction))
+
+        yield Update(
+            colmap,
+            Where=self._primaryKeyComparison(self._primaryKeyValue())
+        ).on(self.transaction)
+
         self.__dict__.update(kw)
 
 
@@ -295,9 +324,13 @@
         @rtype: L{Deferred}
         """
         return cls._rowsFromQuery(
-            transaction, Delete(Where=cls._primaryKeyComparison(primaryKey),
-                        From=cls.table, Return=list(cls.table)),
-            lambda : NoSuchRecord()
+            transaction,
+            Delete(
+                Where=cls._primaryKeyComparison(primaryKey),
+                From=cls.table,
+                Return=list(cls.table)
+            ),
+            lambda: NoSuchRecord()
         ).addCallback(lambda x: x[0])
 
 
@@ -326,9 +359,11 @@
             kw.update(OrderBy=order, Ascending=ascending)
         if group is not None:
             kw.update(GroupBy=group)
-        return cls._rowsFromQuery(transaction, Select(list(cls.table),
-                                                      From=cls.table,
-                                                      Where=expr, **kw), None)
+        return cls._rowsFromQuery(
+            transaction,
+            Select(list(cls.table), From=cls.table, Where=expr, **kw),
+            None
+        )
 
 
     @classmethod
@@ -337,11 +372,15 @@
         Load all rows from the table that corresponds to C{cls} and return
         instances of C{cls} corresponding to all.
         """
-        return cls._rowsFromQuery(transaction,
-                                  Select(list(cls.table),
-                                         From=cls.table,
-                                         OrderBy=cls._primaryKeyExpression()),
-                                  None)
+        return cls._rowsFromQuery(
+            transaction,
+            Select(
+                list(cls.table),
+                From=cls.table,
+                OrderBy=cls._primaryKeyExpression()
+            ),
+            None
+        )
 
 
     @classmethod
@@ -354,7 +393,7 @@
         @param transaction: an L{IAsyncTransaction} to execute the query on.
 
         @param qry: a L{_DMLStatement} (XXX: maybe _DMLStatement or some
-            interface that defines 'on' should be public?) whose results are
+            interface that defines "on" should be public?) whose results are
             the list of columns in C{self.table}.
 
         @param rozrc: The C{raiseOnZeroRowCount} argument.
@@ -371,11 +410,3 @@
             self.transaction = transaction
             selves.append(self)
         returnValue(selves)
-
-
-
-__all__ = [
-    "ReadOnly",
-    "fromTable",
-    "NoSuchRecord",
-]

Modified: twext/trunk/twext/enterprise/dal/syntax.py
===================================================================
--- twext/trunk/twext/enterprise/dal/syntax.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/syntax.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -19,6 +19,56 @@
 Syntax wrappers and generators for SQL.
 """
 
+__all__ = [
+    "DALError",
+    "QueryPlaceholder",
+    "FixedPlaceholder",
+    "NumericPlaceholder",
+    "defaultPlaceholder",
+    "QueryGenerator",
+    "TableMismatch",
+    "NotEnoughValues",
+    "Syntax",
+    "comparison",
+    "ExpressionSyntax",
+    "FunctionInvocation",
+    "Constant",
+    "NamedValue",
+    "Function",
+    "SchemaSyntax",
+    "SequenceSyntax",
+    "TableSyntax",
+    "TableAlias",
+    "Join",
+    "ColumnSyntax",
+    "ResultAliasSyntax",
+    "AliasReferenceSyntax",
+    "AliasedColumnSyntax",
+    "Comparison",
+    "NullComparison",
+    "CompoundComparison",
+    "ColumnComparison",
+    "Column",
+    "Tuple",
+    "SetExpression",
+    "Union",
+    "Intersect",
+    "Except",
+    "Select",
+    "Insert",
+    "Update",
+    "Delete",
+    "Lock",
+    "DatabaseLock",
+    "DatabaseUnlock",
+    "RollbackToSavepoint",
+    "ReleaseSavepoint",
+    "SavepointAction",
+    "NoOp",
+    "SQLFragment",
+    "Parameter",
+]
+
 from itertools import count, repeat
 from functools import partial
 from operator import eq, ne
@@ -88,7 +138,7 @@
 
 
     def placeholder(self):
-        return ':' + str(self._next())
+        return ":" + str(self._next())
 
 
 
@@ -96,7 +146,7 @@
     """
     Generate a default L{QueryPlaceholder}
     """
-    return FixedPlaceholder('?')
+    return FixedPlaceholder("?")
 
 
 
@@ -146,9 +196,9 @@
     """
 
     _paramstyles = {
-        'pyformat': partial(FixedPlaceholder, "%s"),
-        'numeric': NumericPlaceholder,
-        'qmark': defaultPlaceholder,
+        "pyformat": partial(FixedPlaceholder, "%s"),
+        "numeric": NumericPlaceholder,
+        "qmark": defaultPlaceholder,
     }
 
 
@@ -162,7 +212,7 @@
         """
         A hook for subclasses to provide additional keyword arguments to the
         C{bind} call when L{_Statement.on} is executed.  Currently this is used
-        only for 'out' parameters to capture results when executing statements
+        only for "out" parameters to capture results when executing statements
         that do not normally have a result (L{Insert}, L{Delete}, L{Update}).
         """
         return {}
@@ -170,24 +220,23 @@
 
     def _extraResult(self, result, outvars, queryGenerator):
         """
-        A hook for subclasses to manipulate the results of 'on', after they've
+        A hook for subclasses to manipulate the results of "on", after they've
         been retrieved by the database but before they've been given to
         application code.
 
-        @param result: a L{Deferred} that will fire with the rows as returned by
-            the database.
+        @param result: a L{Deferred} that will fire with the rows as returned
+            by the database.
         @type result: C{list} of rows, which are C{list}s or C{tuple}s.
 
         @param outvars: a dictionary of extra variables returned by
             C{self._extraVars}.
 
-        @param queryGenerator: information about the connection where the statement
-            was executed.
+        @param queryGenerator: information about the connection where the
+            statement was executed.
 
         @type queryGenerator: L{QueryGenerator} (a subclass thereof)
 
         @return: the result to be returned from L{_Statement.on}.
-
         @rtype: L{Deferred} firing result rows
         """
         return result
@@ -210,12 +259,15 @@
         @rtype: a L{Deferred} firing a C{list} of records (C{tuple}s or
             C{list}s)
         """
-        queryGenerator = QueryGenerator(txn.dialect, self._paramstyles[txn.paramstyle]())
+        queryGenerator = QueryGenerator(
+            txn.dialect, self._paramstyles[txn.paramstyle]()
+        )
         outvars = self._extraVars(txn, queryGenerator)
         kw.update(outvars)
         fragment = self.toSQL(queryGenerator).bind(**kw)
-        result = txn.execSQL(fragment.text, fragment.parameters,
-                             raiseOnZeroRowCount)
+        result = txn.execSQL(
+            fragment.text, fragment.parameters, raiseOnZeroRowCount
+        )
         result = self._extraResult(result, outvars, queryGenerator)
         if queryGenerator.dialect == ORACLE_DIALECT and result:
             result.addCallback(self._fixOracleNulls)
@@ -225,8 +277,9 @@
     def _resultColumns(self):
         """
         Subclasses must implement this to return a description of the columns
-        expected to be returned.  This is a list of L{ColumnSyntax} objects, and
-        possibly other expression syntaxes which will be converted to C{None}.
+        expected to be returned.  This is a list of L{ColumnSyntax} objects,
+        and possibly other expression syntaxes which will be converted to
+        C{None}.
         """
         raise NotImplementedError(
             "Each statement subclass must describe its result"
@@ -253,19 +306,25 @@
         """
         if rows is None:
             return None
+
         newRows = []
+
         for row in rows:
             newRow = []
+
             for column, description in zip(row, self._resultShape()):
-                if ((description is not None and
-                     # FIXME: "is the python type str" is what I mean; this list
-                     # should be more centrally maintained
-                     description.type.name in ('varchar', 'text', 'char') and
-                     column is None
-                    )):
-                    column = ''
+                if (
+                    description is not None and
+                    # FIXME: "is the python type str" is what I mean; this list
+                    # should be more centrally maintained
+                    description.type.name in ("varchar", "text", "char") and
+                    column is None
+                ):
+                    column = ""
                 newRow.append(column)
+
             newRows.append(newRow)
+
         return newRows
 
 
@@ -277,7 +336,7 @@
     This class will define dynamic attribute access to represent its underlying
     model as a Python namespace.
 
-    You can access the underlying model as '.model'.
+    You can access the underlying model as ".model".
     """
 
     modelType = None
@@ -292,7 +351,7 @@
 
     def __repr__(self):
         if self.model is not None:
-            return '<Syntax for: %r>' % (self.model,)
+            return "<Syntax for: %r>" % (self.model,)
         return super(Syntax, self).__repr__()
 
 
@@ -314,20 +373,20 @@
 
 
 class ExpressionSyntax(Syntax):
-    __eq__ = comparison('=')
-    __ne__ = comparison('!=')
+    __eq__ = comparison("=")
+    __ne__ = comparison("!=")
 
     # NB: these operators "cannot be used with lists" (see ORA-01796)
-    __gt__ = comparison('>')
-    __ge__ = comparison('>=')
-    __lt__ = comparison('<')
-    __le__ = comparison('<=')
+    __gt__ = comparison(">")
+    __ge__ = comparison(">=")
+    __lt__ = comparison("<")
+    __le__ = comparison("<=")
 
     # TODO: operators aren't really comparisons; these should behave slightly
-    # differently.  (For example; in Oracle, 'select 3 = 4 from dual' doesn't
-    # work, but 'select 3 + 4 from dual' does; similarly, you can't do 'select *
-    # from foo where 3 + 4', but you can do 'select * from foo where 3 + 4 >
-    # 0'.)
+    # differently.  (For example; in Oracle, C{select 3 = 4 from dual} doesn't
+    # work, but C{select 3 + 4 from dual} does; similarly, you can't do
+    # C{select * from foo where 3 + 4}, but you can do C{select * from foo
+    # where 3 + 4 > 0}.)
     __add__ = comparison("+")
     __sub__ = comparison("-")
     __div__ = comparison("/")
@@ -341,32 +400,47 @@
 
     def In(self, other):
         """
-        We support two forms of the SQL "IN" syntax: one where a list of values is supplied, the other where
-        a sub-select is used to provide a set of values.
+        We support two forms of the SQL "IN" syntax: one where a list of values
+        is supplied, the other where a sub-select is used to provide a set of
+        values.
 
         @param other: a constant parameter or sub-select
         @type other: L{Parameter} or L{Select}
         """
         if isinstance(other, Parameter):
             if other.count is None:
-                raise DALError("IN expression needs an explicit count of parameters")
-            return CompoundComparison(self, 'in', Constant(other))
+                raise DALError(
+                    "IN expression needs an explicit count of parameters"
+                )
+            return CompoundComparison(self, "in", Constant(other))
         else:
-            # Can't be Select.__contains__ because __contains__ gets __nonzero__
-            # called on its result by the 'in' syntax.
-            return CompoundComparison(self, 'in', other)
+            # Can't be Select.__contains__ because __contains__ gets
+            # __nonzero__ called on its result by the "in" syntax.
+            return CompoundComparison(self, "in", other)
 
 
     def StartsWith(self, other):
-        return CompoundComparison(self, "like", CompoundComparison(Constant(other), '||', Constant('%')))
+        return CompoundComparison(
+            self, "like",
+            CompoundComparison(Constant(other), "||", Constant("%"))
+        )
 
 
     def EndsWith(self, other):
-        return CompoundComparison(self, "like", CompoundComparison(Constant('%'), '||', Constant(other)))
+        return CompoundComparison(
+            self, "like",
+            CompoundComparison(Constant("%"), "||", Constant(other))
+        )
 
 
     def Contains(self, other):
-        return CompoundComparison(self, "like", CompoundComparison(Constant('%'), '||', CompoundComparison(Constant(other), '||', Constant('%'))))
+        return CompoundComparison(
+            self, "like",
+            CompoundComparison(
+                Constant("%"), "||",
+                CompoundComparison(Constant(other), "||", Constant("%"))
+            )
+        )
 
 
 
@@ -390,16 +464,20 @@
     def subSQL(self, queryGenerator, allTables):
         result = SQLFragment(self.function.nameFor(queryGenerator))
         result.append(_inParens(
-            _commaJoined(_convert(arg).subSQL(queryGenerator, allTables)
-                         for arg in self.args)))
+            _commaJoined(
+                _convert(arg).subSQL(queryGenerator, allTables)
+                for arg in self.args
+            )
+        ))
         return result
 
 
 
 class Constant(ExpressionSyntax):
     """
-    Generates an expression for a place holder where a value will be bound to the query. If the constant is a Parameter
-    with count > 1 then a parenthesized, comma-separated list of place holders will be generated.
+    Generates an expression for a place holder where a value will be bound to
+    the query. If the constant is a Parameter with count > 1 then a
+    parenthesized, comma-separated list of place holders will be generated.
     """
     def __init__(self, value):
         self.value = value
@@ -411,11 +489,19 @@
 
     def subSQL(self, queryGenerator, allTables):
         if isinstance(self.value, Parameter) and self.value.count is not None:
-            return _inParens(_CommaList(
-                [SQLFragment(queryGenerator.placeholder.placeholder(), [self.value] if ctr == 0 else []) for ctr in range(self.value.count)]
-            ).subSQL(queryGenerator, allTables))
+            return _inParens(
+                _CommaList([
+                    SQLFragment(
+                        queryGenerator.placeholder.placeholder(),
+                        [self.value] if counter == 0 else []
+                    )
+                    for counter in range(self.value.count)
+                ]).subSQL(queryGenerator, allTables)
+            )
         else:
-            return SQLFragment(queryGenerator.placeholder.placeholder(), [self.value])
+            return SQLFragment(
+                queryGenerator.placeholder.placeholder(), [self.value]
+            )
 
 
 
@@ -444,8 +530,12 @@
 
 
     def nameFor(self, queryGenerator):
-        if queryGenerator.dialect == ORACLE_DIALECT and self.oracleName is not None:
+        if (
+            queryGenerator.dialect == ORACLE_DIALECT and
+            self.oracleName is not None
+        ):
             return self.oracleName
+
         return self.name
 
 
@@ -486,7 +576,9 @@
             try:
                 seqModel = self.model.sequenceNamed(attr)
             except KeyError:
-                raise AttributeError("schema has no table or sequence %r" % (attr,))
+                raise AttributeError(
+                    "schema has no table or sequence %r" % (attr,)
+                )
             else:
                 return SequenceSyntax(seqModel)
         else:
@@ -523,8 +615,8 @@
 
 def _nameForDialect(name, dialect):
     """
-    If the given name is being computed in the oracle dialect, truncate it to 30
-    characters.
+    If the given name is being computed in the oracle dialect, truncate it to
+    30 characters.
     """
     if dialect == ORACLE_DIALECT:
         name = name[:30]
@@ -546,44 +638,46 @@
 
         As in SQL, C{someTable.join(someTable)} is an error; you can't join a
         table against itself.  However, C{t = someTable.alias();
-        someTable.join(t)} is usable as a 'from' clause.
+        someTable.join(t)} is usable as a C{from} clause.
         """
         return TableAlias(self.model)
 
 
-    def join(self, otherTableSyntax, on=None, type=''):
+    def join(self, otherTableSyntax, on=None, type=""):
         """
         Create a L{Join}, representing a join between two tables.
         """
         if on is None:
-            type = 'cross'
+            type = "cross"
         return Join(self, type, otherTableSyntax, on)
 
 
     def subSQL(self, queryGenerator, allTables):
         """
         Generate the L{SQLFragment} for this table's identification; this is
-        for use in a 'from' clause.
+        for use in a C{from} clause.
         """
         # XXX maybe there should be a specific method which is only invoked
         # from the FROM clause, that only tables and joins would implement?
-        return SQLFragment(_nameForDialect(self.model.name, queryGenerator.dialect))
+        return SQLFragment(
+            _nameForDialect(self.model.name, queryGenerator.dialect)
+        )
 
 
     def __getattr__(self, attr):
         """
         Attributes named after columns on a L{TableSyntax} are returned by
         accessing their names as attributes.  For example, if there is a schema
-        syntax object created from SQL equivalent to 'create table foo (bar
-        integer, baz integer)', 'schemaSyntax.foo.bar' and
-        'schemaSyntax.foo.baz'
+        syntax object created from SQL equivalent to C{create table foo (bar
+        integer, baz integer)}, C{schemaSyntax.foo.bar} and
+        C{schemaSyntax.foo.baz}
         """
         try:
             column = self.model.columnNamed(attr)
         except KeyError:
-            raise AttributeError("table {0} has no column {1}".format(
-                self.model.name, attr
-            ))
+            raise AttributeError(
+                "table {0} has no column {1}".format(self.model.name, attr)
+            )
         else:
             return ColumnSyntax(column)
 
@@ -640,7 +734,7 @@
 
     def subSQL(self, queryGenerator, allTables):
         """
-        Return an L{SQLFragment} with a string of the form C{'mytable myalias'}
+        Return an L{SQLFragment} with a string of the form C{"mytable myalias"}
         suitable for use in a FROM clause.
         """
         result = super(TableAlias, self).subSQL(queryGenerator, allTables)
@@ -659,9 +753,11 @@
             which is only stable within the query which populated C{allTables}.
         @rtype: C{str}
         """
-        anum = [t for t in allTables
-                if isinstance(t, TableAlias)].index(self) + 1
-        return 'alias%d' % (anum,)
+        anum = [
+            t for t in allTables
+            if isinstance(t, TableAlias)
+        ].index(self) + 1
+        return "alias%d" % (anum,)
 
 
     def __getattr__(self, attr):
@@ -671,7 +767,7 @@
 
 class Join(object):
     """
-    A DAL object representing an SQL 'join' statement.
+    A DAL object representing an SQL C{join} statement.
 
     @ivar leftSide: a L{Join} or L{TableSyntax} representing the left side of
         this join.
@@ -679,11 +775,10 @@
     @ivar rightSide: a L{TableSyntax} representing the right side of this join.
 
     @ivar type: the type of join this is.  For example, for a left outer join,
-        this would be C{'left outer'}.
+        this would be C{"left outer"}.
     @type type: C{str}
 
-    @ivar on: the 'on' clause of this table.
-
+    @ivar on: the "on" clause of this table.
     @type on: L{ExpressionSyntax}
     """
 
@@ -697,14 +792,14 @@
     def subSQL(self, queryGenerator, allTables):
         stmt = SQLFragment()
         stmt.append(self.leftSide.subSQL(queryGenerator, allTables))
-        stmt.text += ' '
+        stmt.text += " "
         if self.type:
             stmt.text += self.type
-            stmt.text += ' '
-        stmt.text += 'join '
+            stmt.text += " "
+        stmt.text += "join "
         stmt.append(self.rightSide.subSQL(queryGenerator, allTables))
-        if self.type != 'cross':
-            stmt.text += ' on '
+        if self.type != "cross":
+            stmt.text += " on "
             stmt.append(self.on.subSQL(queryGenerator, allTables))
         return stmt
 
@@ -720,22 +815,25 @@
 
     def join(self, otherTable, on=None, type=None):
         if on is None:
-            type = 'cross'
+            type = "cross"
         return Join(self, type, otherTable, on)
 
 
-_KEYWORDS = ["access",
-             # SQL keyword, but we have a column with this name
-             "path",
-             # Not actually a standard keyword, but a function in oracle, and we
-             # have a column with this name.
-             "size",
-             # not actually sure what this is; only experimentally determined
-             # that not quoting it causes an issue.
-            ]
+_KEYWORDS = [
+    # SQL keyword, but we have a column with this name
+    "access",
 
+    # Not actually a standard keyword, but a function in oracle, and we have a
+    # column with this name.
+    "path",
 
+    # not actually sure what this is; only experimentally determined that not
+    # quoting it causes an issue.
+    "size",
+]
 
+
+
 class ColumnSyntax(ExpressionSyntax):
     """
     Syntactic convenience for L{Column}.
@@ -756,7 +854,7 @@
 
 
     def subSQL(self, queryGenerator, allTables):
-        # XXX This, and 'model', could in principle conflict with column names.
+        # XXX This, and "model", could in principle conflict with column names.
         # Maybe do something about that.
         name = self.model.name
         if queryGenerator.shouldQuote(name):
@@ -768,8 +866,12 @@
             qualified = False
             for tableSyntax in allTables:
                 if self.model.table is not tableSyntax.model:
-                    if self.model.name in (c.name for c in
-                                           tableSyntax.model.columns):
+                    if (
+                        self.model.name in (
+                            c.name for c in
+                            tableSyntax.model.columns
+                        )
+                    ):
                         qualified = True
                         break
         if qualified:
@@ -783,7 +885,7 @@
 
 
     def _qualify(self, name, allTables):
-        return self.model.table.name + '.' + name
+        return self.model.table.name + "." + name
 
 
 
@@ -835,8 +937,8 @@
     """
     An L{AliasedColumnSyntax} is like a L{ColumnSyntax}, but it generates SQL
     for a column of a table under an alias, rather than directly.  i.e. this is
-    used for C{'something.col'} in C{'select something.col from tablename
-    something'} rather than the 'col' in C{'select col from tablename'}.
+    used for C{"something.col"} in C{"select something.col from tablename
+    something"} rather than the "col" in C{"select col from tablename"}.
 
     @see: L{TableSyntax.alias}
     """
@@ -850,7 +952,7 @@
 
 
     def _qualify(self, name, allTables):
-        return self._tableAlias._aliasName(allTables) + '.' + name
+        return self._tableAlias._aliasName(allTables) + "." + name
 
 
 
@@ -864,7 +966,7 @@
 
     def _subexpression(self, expr, queryGenerator, allTables):
         result = expr.subSQL(queryGenerator, allTables)
-        if self.op not in ('and', 'or') and isinstance(expr, Comparison):
+        if self.op not in ("and", "or") and isinstance(expr, Comparison):
             result = _inParens(result)
         return result
 
@@ -874,11 +976,11 @@
 
 
     def And(self, other):
-        return self.booleanOp('and', other)
+        return self.booleanOp("and", other)
 
 
     def Or(self, other):
-        return self.booleanOp('or', other)
+        return self.booleanOp("or", other)
 
 
 
@@ -887,7 +989,7 @@
     A L{NullComparison} is a comparison of a column or expression with None.
     """
     def __init__(self, a, op):
-        # 'b' is always None for this comparison type
+        # "b" is always None for this comparison type
         super(NullComparison, self).__init__(a, op, None)
 
 
@@ -913,36 +1015,50 @@
 
 
     def subSQL(self, queryGenerator, allTables):
-        if (queryGenerator.dialect == ORACLE_DIALECT
-             and isinstance(self.b, Constant) and self.b.value == ''
-             and self.op in ('=', '!=')):
-            return NullComparison(self.a, self.op).subSQL(queryGenerator, allTables)
+        if (
+            queryGenerator.dialect == ORACLE_DIALECT and
+            isinstance(self.b, Constant) and
+            self.b.value == "" and self.op in ("=", "!=")
+        ):
+            return NullComparison(self.a, self.op).subSQL(
+                queryGenerator, allTables
+            )
+
         stmt = SQLFragment()
         result = self._subexpression(self.a, queryGenerator, allTables)
-        if (isinstance(self.a, CompoundComparison)
-            and self.a.op == 'or' and self.op == 'and'):
+        if (
+            isinstance(self.a, CompoundComparison) and
+            self.a.op == "or" and self.op == "and"
+        ):
             result = _inParens(result)
         stmt.append(result)
 
-        stmt.text += ' %s ' % (self.op,)
+        stmt.text += " %s " % (self.op,)
 
         result = self._subexpression(self.b, queryGenerator, allTables)
-        if (isinstance(self.b, CompoundComparison)
-            and self.b.op == 'or' and self.op == 'and'):
+        if (
+            isinstance(self.b, CompoundComparison) and
+            self.b.op == "or" and self.op == "and"
+        ):
             result = _inParens(result)
+
         if isinstance(self.b, Tuple):
             # If the right-hand side of the comparison is a Tuple, it needs to
             # be double-parenthesized in Oracle, as per
             # http://docs.oracle.com/cd/B28359_01/server.111/b28286/expressions015.htm#i1033664
             # because it is an expression list.
             result = _inParens(result)
+
         stmt.append(result)
+
         return stmt
 
 
 
 _operators = {"=": eq, "!=": ne}
 
+
+
 class ColumnComparison(CompoundComparison):
     """
     Comparing two columns is the same as comparing any other two expressions,
@@ -967,6 +1083,7 @@
     def allColumns(self):
         return []
 
+
 ALL_COLUMNS = _AllColumns()
 
 
@@ -1014,9 +1131,9 @@
                 if column in table:
                     break
             else:
-                raise TableMismatch("{} not found in {}".format(
-                    column, tables
-                ))
+                raise TableMismatch(
+                    "{} not found in {}".format(column, tables)
+                )
     return None
 
 
@@ -1032,8 +1149,10 @@
 
 
     def subSQL(self, queryGenerator, allTables):
-        return _inParens(_commaJoined(c.subSQL(queryGenerator, allTables)
-                                      for c in self.columns))
+        return _inParens(_commaJoined(
+            c.subSQL(queryGenerator, allTables)
+            for c in self.columns
+        ))
 
 
     def allColumns(self):
@@ -1051,10 +1170,11 @@
 
     def __init__(self, selects, optype=None):
         """
-
         @param selects: a single Select or a list of Selects
         @type selects: C{list} or L{Select}
-        @param optype: whether to use the ALL, DISTINCT constructs: C{None} use neither, OPTYPE_ALL, or OPTYPE_DISTINCT
+
+        @param optype: whether to use the ALL, DISTINCT constructs: C{None} use
+            neither, OPTYPE_ALL, or OPTYPE_DISTINCT
         @type optype: C{str}
         """
 
@@ -1065,9 +1185,15 @@
 
         for select in self.selects:
             if not isinstance(select, Select):
-                raise DALError("Must have SELECT statements in a set expression")
-        if self.optype not in (None, SetExpression.OPTYPE_ALL, SetExpression.OPTYPE_DISTINCT,):
-            raise DALError("Must have either 'all' or 'distinct' in a set expression")
+                raise DALError(
+                    "Must have SELECT statements in a set expression"
+                )
+        if self.optype not in (
+            None, SetExpression.OPTYPE_ALL, SetExpression.OPTYPE_DISTINCT,
+        ):
+            raise DALError(
+                "Must have either 'all' or 'distinct' in a set expression"
+            )
 
 
     def subSQL(self, queryGenerator, allTables):
@@ -1121,13 +1247,16 @@
 
 class Select(_Statement):
     """
-    'select' statement.
+    C{select} statement.
     """
 
-    def __init__(self, columns=None, Where=None, From=None, OrderBy=None,
-                 GroupBy=None, Limit=None, ForUpdate=False, NoWait=False, Ascending=None,
-                 Having=None, Distinct=False, As=None,
-                 SetExpression=None):
+    def __init__(
+        self,
+        columns=None, Where=None, From=None,
+        OrderBy=None, GroupBy=None,
+        Limit=None, ForUpdate=False, NoWait=False, Ascending=None,
+        Having=None, Distinct=False, As=None, SetExpression=None
+    ):
         self.From = From
         self.Where = Where
         self.Distinct = Distinct
@@ -1165,30 +1294,33 @@
         """
         if isinstance(other, (list, tuple)):
             other = Tuple(other)
-        return CompoundComparison(other, '=', self)
+        return CompoundComparison(other, "=", self)
 
 
     def _toSQL(self, queryGenerator):
         """
-        @return: a 'select' statement with placeholders and arguments
-
+        @return: a C{select} statement with placeholders and arguments
         @rtype: L{SQLFragment}
         """
         if self.SetExpression is not None:
             stmt = SQLFragment("(")
         else:
             stmt = SQLFragment()
+
         stmt.append(SQLFragment("select "))
         if self.Distinct:
             stmt.text += "distinct "
+
         allTables = self.From.tables()
         stmt.append(self.columns.subSQL(queryGenerator, allTables))
         stmt.text += " from "
         stmt.append(self.From.subSQL(queryGenerator, allTables))
+
         if self.Where is not None:
             wherestmt = self.Where.subSQL(queryGenerator, allTables)
             stmt.text += " where "
             stmt.append(wherestmt)
+
         if self.GroupBy is not None:
             stmt.text += " group by "
             fst = True
@@ -1196,15 +1328,18 @@
                 if fst:
                     fst = False
                 else:
-                    stmt.text += ', '
+                    stmt.text += ", "
                 stmt.append(subthing.subSQL(queryGenerator, allTables))
+
         if self.Having is not None:
             havingstmt = self.Having.subSQL(queryGenerator, allTables)
             stmt.text += " having "
             stmt.append(havingstmt)
+
         if self.SetExpression is not None:
             stmt.append(SQLFragment(")"))
             stmt.append(self.SetExpression.subSQL(queryGenerator, allTables))
+
         if self.OrderBy is not None:
             stmt.text += " order by "
             fst = True
@@ -1212,7 +1347,7 @@
                 if fst:
                     fst = False
                 else:
-                    stmt.text += ', '
+                    stmt.text += ", "
                 stmt.append(subthing.subSQL(queryGenerator, allTables))
             if self.Ascending is not None:
                 if self.Ascending:
@@ -1220,10 +1355,12 @@
                 else:
                     kw = " desc"
                 stmt.append(SQLFragment(kw))
+
         if self.ForUpdate:
             stmt.text += " for update"
             if self.NoWait:
                 stmt.text += " nowait"
+
         if self.Limit is not None:
             limitConst = Constant(self.Limit).subSQL(queryGenerator, allTables)
             if queryGenerator.dialect == ORACLE_DIALECT:
@@ -1234,6 +1371,7 @@
             else:
                 stmt.text += " limit "
             stmt.append(limitConst)
+
         return stmt
 
 
@@ -1241,10 +1379,12 @@
         result = SQLFragment("(")
         result.append(self.toSQL(queryGenerator))
         result.append(SQLFragment(")"))
+
         if self.As is not None:
             if self.As == "":
                 self.As = queryGenerator.nextGeneratedID()
             result.append(SQLFragment(" %s" % (self.As,)))
+
         return result
 
 
@@ -1278,7 +1418,10 @@
             # the right rows.
             return self.From.tables()
         else:
-            tables = set([column.model.table for column in self.columns.columns if isinstance(column, ColumnSyntax)])
+            tables = set([
+                column.model.table for column in self.columns.columns
+                if isinstance(column, ColumnSyntax)
+            ])
             for table in self.From.tables():
                 tables.add(table.model)
             return [TableSyntax(table) for table in tables]
@@ -1333,8 +1476,10 @@
 
 
     def subSQL(self, queryGenerator, allTables):
-        return _commaJoined(f.subSQL(queryGenerator, allTables)
-                            for f in self.subfragments)
+        return _commaJoined(
+            f.subSQL(queryGenerator, allTables)
+            for f in self.subfragments
+        )
 
 
 
@@ -1345,14 +1490,14 @@
 
     def _returningClause(self, queryGenerator, stmt, allTables):
         """
-        Add a dialect-appropriate 'returning' clause to the end of the given
+        Add a dialect-appropriate C{returning} clause to the end of the given
         SQL statement.
 
         @param queryGenerator: describes the database we are generating the
             statement for.
         @type queryGenerator: L{QueryGenerator}
 
-        @param stmt: the SQL fragment generated without the 'returning' clause
+        @param stmt: the SQL fragment generated without the C{returning} clause
         @type stmt: L{SQLFragment}
 
         @param allTables: all tables involved in the query; see any C{subSQL}
@@ -1361,18 +1506,22 @@
         @return: the C{stmt} parameter.
         """
         retclause = self.Return
+
         if retclause is None:
             return stmt
+
         if isinstance(retclause, (tuple, list)):
             retclause = _CommaList(retclause)
+
         if queryGenerator.dialect == SQLITE_DIALECT:
             # sqlite does this another way.
             return stmt
-        elif retclause is not None:
-            stmt.text += ' returning '
+
+        if retclause is not None:
+            stmt.text += " returning "
             stmt.append(retclause.subSQL(queryGenerator, allTables))
             if queryGenerator.dialect == ORACLE_DIALECT:
-                stmt.text += ' into '
+                stmt.text += " into "
                 params = []
                 retvals = self._returnAsList()
                 for n, _ignore_v in enumerate(retvals):
@@ -1381,6 +1530,7 @@
                         .subSQL(queryGenerator, allTables)
                     )
                 stmt.append(_commaJoined(params))
+
         return stmt
 
 
@@ -1403,7 +1553,10 @@
 
 
     def _extraResult(self, result, outvars, queryGenerator):
-        if queryGenerator.dialect == ORACLE_DIALECT and self.Return is not None:
+        if (
+            queryGenerator.dialect == ORACLE_DIALECT and
+            self.Return is not None
+        ):
             def processIt(shouldBeNone):
                 result = [[v.value for _ignore_k, v in outvars]]
                 return result
@@ -1429,10 +1582,10 @@
 
 
     def preQuery(self, cursor):
-        typeMap = {'integer': cx_Oracle.NUMBER,
-                   'text': cx_Oracle.NCLOB,
-                   'varchar': cx_Oracle.STRING,
-                   'timestamp': cx_Oracle.TIMESTAMP}
+        typeMap = {"integer": cx_Oracle.NUMBER,
+                   "text": cx_Oracle.NCLOB,
+                   "varchar": cx_Oracle.STRING,
+                   "timestamp": cx_Oracle.TIMESTAMP}
         self.var = cursor.var(typeMap[self.typeID])
         return self.var
 
@@ -1445,7 +1598,7 @@
 
 class Insert(_DMLStatement):
     """
-    'insert' statement.
+    C{insert} statement.
     """
 
     def __init__(self, columnMap, Return=None):
@@ -1458,19 +1611,21 @@
                        if column not in columns]
         if unspecified:
             raise NotEnoughValues(
-                'Columns [%s] required.' %
-                    (', '.join([c.name for c in unspecified])))
+                "Columns [%s] required."
+                % (", ".join([c.name for c in unspecified]))
+            )
 
 
     def _toSQL(self, queryGenerator):
         """
-        @return: a 'insert' statement with placeholders and arguments
+        @return: a C{insert} statement with placeholders and arguments
 
         @rtype: L{SQLFragment}
         """
         columnsAndValues = self.columnMap.items()
         tableModel = columnsAndValues[0][0].model.table
         specifiedColumnModels = [x.model for x in self.columnMap.keys()]
+
         if queryGenerator.dialect == ORACLE_DIALECT:
             # See test_nextSequenceDefaultImplicitExplicitOracle.
             for column in tableModel.columns:
@@ -1480,19 +1635,26 @@
                         columnsAndValues.append(
                             (columnSyntax, SequenceSyntax(column.default))
                         )
-        sortedColumns = sorted(columnsAndValues,
-                               key=lambda (c, v): c.model.name)
+
+        sortedColumns = sorted(
+            columnsAndValues,
+            key=lambda (c, v): c.model.name
+        )
         allTables = []
-        stmt = SQLFragment('insert into ')
+
+        stmt = SQLFragment("insert into ")
         stmt.append(TableSyntax(tableModel).subSQL(queryGenerator, allTables))
         stmt.append(SQLFragment(" "))
-        stmt.append(_inParens(_commaJoined(
-            [c.subSQL(queryGenerator, allTables) for (c, _ignore_v) in
-             sortedColumns])))
+        stmt.append(_inParens(_commaJoined([
+            c.subSQL(queryGenerator, allTables)
+            for (c, _ignore_v) in sortedColumns
+        ])))
         stmt.append(SQLFragment(" values "))
-        stmt.append(_inParens(_commaJoined(
-            [_convert(v).subSQL(queryGenerator, allTables)
-             for (c, v) in sortedColumns])))
+        stmt.append(_inParens(_commaJoined([
+            _convert(v).subSQL(queryGenerator, allTables)
+            for (c, v) in sortedColumns
+        ])))
+
         return self._returningClause(queryGenerator, stmt, allTables)
 
 
@@ -1505,14 +1667,17 @@
         result = super(_DMLStatement, self).on(txn, *a, **kw)
         if self.Return is not None and txn.dialect == SQLITE_DIALECT:
             table = self._returnAsList()[0].model.table
-            return Select(self._returnAsList(),
-                   # TODO: error reporting when 'return' includes columns
-                   # foreign to the primary table.
-                   From=TableSyntax(table),
-                   Where=ColumnSyntax(Column(table, "rowid",
-                                             SQLType("integer", None))) ==
-                         _sqliteLastInsertRowID()
-                   ).on(txn, *a, **kw)
+            return Select(
+                self._returnAsList(),
+                # TODO: error reporting when "return" includes columns
+                # foreign to the primary table.
+                From=TableSyntax(table),
+                Where=(
+                    ColumnSyntax(
+                        Column(table, "rowid", SQLType("integer", None))
+                    ) == _sqliteLastInsertRowID()
+                )
+            ).on(txn, *a, **kw)
         return result
 
 
@@ -1531,7 +1696,7 @@
 
 class Update(_DMLStatement):
     """
-    'update' statement
+    C{update} statement
 
     @ivar columnMap: A L{dict} mapping L{ColumnSyntax} objects to values to
         change; values may be simple database values (such as L{str},
@@ -1567,13 +1732,16 @@
             preresult = prequery.on(txn, *a, **kw)
             before = yield preresult
             yield upcall()
-            result = (yield Select(self._returnAsList(),
-                            # TODO: error reporting when 'return' includes
-                            # columns foreign to the primary table.
-                            From=TableSyntax(table),
-                            Where=reduce(lambda left, right: left.Or(right),
-                                         ((rowidcol == x) for [x] in before))
-                            ).on(txn, *a, **kw))
+            result = yield Select(
+                self._returnAsList(),
+                # TODO: error reporting when "return" includes
+                # columns foreign to the primary table.
+                From=TableSyntax(table),
+                Where=reduce(
+                    lambda left, right: left.Or(right),
+                    ((rowidcol == x) for [x] in before)
+                )
+            ).on(txn, *a, **kw)
             returnValue(result)
         else:
             returnValue((yield upcall()))
@@ -1581,37 +1749,40 @@
 
     def _toSQL(self, queryGenerator):
         """
-        @return: a 'insert' statement with placeholders and arguments
-
+        @return: an C{insert} statement with placeholders and arguments
         @rtype: L{SQLFragment}
         """
-        sortedColumns = sorted(self.columnMap.items(),
-                               key=lambda (c, v): c.model.name)
+        sortedColumns = sorted(
+            self.columnMap.items(), key=lambda (c, v): c.model.name
+        )
         allTables = []
-        result = SQLFragment('update ')
+        result = SQLFragment("update ")
         result.append(
             TableSyntax(sortedColumns[0][0].model.table).subSQL(
-                queryGenerator, allTables)
+                queryGenerator, allTables
+            )
         )
-        result.text += ' set '
-        result.append(
-            _commaJoined(
-                [c.subSQL(queryGenerator, allTables).append(
-                    SQLFragment(" = ").subSQL(queryGenerator, allTables)
-                ).append(_convert(v).subSQL(queryGenerator, allTables))
-                    for (c, v) in sortedColumns]
+        result.text += " set "
+        result.append(_commaJoined([
+            c.subSQL(queryGenerator, allTables).append(
+                SQLFragment(" = ").subSQL(queryGenerator, allTables)
+            ).append(
+                _convert(v).subSQL(queryGenerator, allTables)
             )
-        )
+            for (c, v) in sortedColumns
+        ]))
+
         if self.Where is not None:
-            result.append(SQLFragment(' where '))
+            result.append(SQLFragment(" where "))
             result.append(self.Where.subSQL(queryGenerator, allTables))
+
         return self._returningClause(queryGenerator, result, allTables)
 
 
 
 class Delete(_DMLStatement):
     """
-    'delete' statement.
+    C{delete} statement.
     """
 
     def __init__(self, From, Where, Return=None):
@@ -1626,10 +1797,10 @@
     def _toSQL(self, queryGenerator):
         result = SQLFragment()
         allTables = self.From.tables()
-        result.text += 'delete from '
+        result.text += "delete from "
         result.append(self.From.subSQL(queryGenerator, allTables))
         if self.Where is not None:
-            result.text += ' where '
+            result.text += " where "
             result.append(self.Where.subSQL(queryGenerator, allTables))
         return self._returningClause(queryGenerator, result, allTables)
 
@@ -1638,8 +1809,10 @@
     def on(self, txn, *a, **kw):
         upcall = lambda: super(Delete, self).on(txn, *a, **kw)
         if txn.dialect == SQLITE_DIALECT and self.Return is not None:
-            result = yield Select(self._returnAsList(), From=self.From,
-                                  Where=self.Where).on(txn, *a, **kw)
+            result = yield Select(
+                self._returnAsList(),
+                From=self.From, Where=self.Where
+            ).on(txn, *a, **kw)
             yield upcall()
         else:
             result = yield upcall()
@@ -1661,7 +1834,7 @@
 
 class Lock(_LockingStatement):
     """
-    An SQL 'lock' statement.
+    An SQL "lock" statement.
     """
 
     def __init__(self, table, mode):
@@ -1671,7 +1844,7 @@
 
     @classmethod
     def exclusive(cls, table):
-        return cls(table, 'exclusive')
+        return cls(table, "exclusive")
 
 
     def _toSQL(self, queryGenerator):
@@ -1680,13 +1853,16 @@
             # concurrency would require some kind of locking statement here.
             # BEGIN IMMEDIATE maybe, if that's okay in the middle of a
             # transaction or repeatedly?
-            return SQLFragment('select null')
-        return SQLFragment('lock table ').append(
-            self.table.subSQL(queryGenerator, [self.table])).append(
-            SQLFragment(' in %s mode' % (self.mode,)))
+            return SQLFragment("select null")
 
+        return SQLFragment("lock table ").append(
+            self.table.subSQL(queryGenerator, [self.table])
+        ).append(
+            SQLFragment(" in %s mode" % (self.mode,))
+        )
 
 
+
 class DatabaseLock(_LockingStatement):
     """
     An SQL exclusive session level advisory lock
@@ -1694,7 +1870,7 @@
 
     def _toSQL(self, queryGenerator):
         assert(queryGenerator.dialect == POSTGRES_DIALECT)
-        return SQLFragment('select pg_advisory_lock(1)')
+        return SQLFragment("select pg_advisory_lock(1)")
 
 
     def on(self, txn, *a, **kw):
@@ -1715,7 +1891,7 @@
 
     def _toSQL(self, queryGenerator):
         assert(queryGenerator.dialect == POSTGRES_DIALECT)
-        return SQLFragment('select pg_advisory_unlock(1)')
+        return SQLFragment("select pg_advisory_unlock(1)")
 
 
     def on(self, txn, *a, **kw):
@@ -1731,7 +1907,7 @@
 
 class Savepoint(_LockingStatement):
     """
-    An SQL 'savepoint' statement.
+    An SQL C{savepoint} statement.
     """
 
     def __init__(self, name):
@@ -1739,13 +1915,13 @@
 
 
     def _toSQL(self, queryGenerator):
-        return SQLFragment('savepoint %s' % (self.name,))
+        return SQLFragment("savepoint %s" % (self.name,))
 
 
 
 class RollbackToSavepoint(_LockingStatement):
     """
-    An SQL 'rollback to savepoint' statement.
+    An SQL C{rollback to savepoint} statement.
     """
 
     def __init__(self, name):
@@ -1753,13 +1929,13 @@
 
 
     def _toSQL(self, queryGenerator):
-        return SQLFragment('rollback to savepoint %s' % (self.name,))
+        return SQLFragment("rollback to savepoint %s" % (self.name,))
 
 
 
 class ReleaseSavepoint(_LockingStatement):
     """
-    An SQL 'release savepoint' statement.
+    An SQL C{release savepoint} statement.
     """
 
     def __init__(self, name):
@@ -1767,7 +1943,7 @@
 
 
     def _toSQL(self, queryGenerator):
-        return SQLFragment('release savepoint %s' % (self.name,))
+        return SQLFragment("release savepoint %s" % (self.name,))
 
 
 
@@ -1787,7 +1963,7 @@
 
     def release(self, txn):
         if txn.dialect == ORACLE_DIALECT:
-            # There is no 'release savepoint' statement in oracle, but then, we
+            # There is no "release savepoint" statement in oracle, but then, we
             # don't need it because there's no resource to manage.  Just don't
             # do anything.
             return NoOp()
@@ -1821,13 +1997,17 @@
             if isinstance(parameter, Parameter):
                 if parameter.count is not None:
                     if parameter.count != len(kw[parameter.name]):
-                        raise DALError("Number of place holders does not match number of items to bind")
+                        raise DALError(
+                            "Number of place holders does not match "
+                            "number of items to bind"
+                        )
                     for item in kw[parameter.name]:
                         params.append(item)
                 else:
                     params.append(kw[parameter.name])
             else:
                 params.append(parameter)
+
         return SQLFragment(self.text, params)
 
 
@@ -1885,7 +2065,7 @@
 
 
     def __repr__(self):
-        return 'Parameter(%r)' % (self.name,)
+        return "Parameter(%r)" % (self.name,)
 
 
 
@@ -1897,8 +2077,9 @@
 
 # You can't insert a column with no rows.  In SQL that just isn't valid syntax,
 # and in this DAL you need at least one key or we can't tell what table you're
-# talking about.  Luckily there's the 'default' keyword to the rescue, which, in
-# the context of an INSERT statement means 'use the default value explicitly'.
+# talking about.  Luckily there's the C{default} keyword to the rescue, which,
+# in the context of an INSERT statement means "use the default value
+# explicitly".
 # (Although this is a special keyword in a CREATE statement, in an INSERT it
 # behaves like an expression to the best of my knowledge.)
-default = NamedValue('default')
+default = NamedValue("default")

Modified: twext/trunk/twext/enterprise/dal/test/test_parseschema.py
===================================================================
--- twext/trunk/twext/enterprise/dal/test/test_parseschema.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/test/test_parseschema.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -27,6 +27,7 @@
 from twisted.trial.unittest import TestCase
 
 
+
 class SchemaTestHelper(object):
     """
     Mix-in that can parse a schema from a string.
@@ -218,7 +219,8 @@
                 "create table sample (example integer unique);",
                 "create table sample (example integer, unique (example));",
                 "create table sample "
-                "(example integer, constraint unique_example unique (example))"]:
+                "(example integer, constraint unique_example unique (example))"
+        ]:
             s = self.schemaFromString(identicalSchema)
             table = s.tableNamed('sample')
             column = table.columnNamed('example')
@@ -320,8 +322,8 @@
 
     def test_deleteAction(self):
         """
-        A column with an 'on delete cascade' constraint will have its C{cascade}
-        attribute set to True.
+        A column with an 'on delete cascade' constraint will have its
+        C{cascade} attribute set to True.
         """
         s = self.schemaFromString(
             """
@@ -329,11 +331,24 @@
             create table c2 (d2 integer references a1 on delete cascade);
             create table e3 (f3 integer references a1 on delete set null);
             create table g4 (h4 integer references a1 on delete set default);
-            """)
-        self.assertEquals(s.tableNamed("a1").columnNamed("b1").deleteAction, None)
-        self.assertEquals(s.tableNamed("c2").columnNamed("d2").deleteAction, "cascade")
-        self.assertEquals(s.tableNamed("e3").columnNamed("f3").deleteAction, "set null")
-        self.assertEquals(s.tableNamed("g4").columnNamed("h4").deleteAction, "set default")
+            """
+        )
+        self.assertEquals(
+            s.tableNamed("a1").columnNamed("b1").deleteAction,
+            None
+        )
+        self.assertEquals(
+            s.tableNamed("c2").columnNamed("d2").deleteAction,
+            "cascade"
+        )
+        self.assertEquals(
+            s.tableNamed("e3").columnNamed("f3").deleteAction,
+            "set null"
+        )
+        self.assertEquals(
+            s.tableNamed("g4").columnNamed("h4").deleteAction,
+            "set default"
+        )
 
 
     def test_indexes(self):
@@ -350,8 +365,8 @@
             create index idx_a_b on a(b);
             create index idx_a_b_c on a (c, b);
             create index idx_c on z using btree (c);
-            """)
-
+            """
+        )
         a = s.tableNamed("a")
         b = s.indexNamed("idx_a_b")
         bc = s.indexNamed('idx_a_b_c')
@@ -373,11 +388,14 @@
 
             create unique index idx_a_c on a(c);
             create index idx_a_b_c on a (c, b);
-            """)
-
-        self.assertEqual(set([pseudo.name for pseudo in s.pseudoIndexes()]), set((
-            "a-unique:(c)",
-            "a:(c,b)",
-            "a-unique:(b)",
-            "z-unique:(c)",
-        )))
+            """
+        )
+        self.assertEqual(
+            set([pseudo.name for pseudo in s.pseudoIndexes()]),
+            set((
+                "a-unique:(c)",
+                "a:(c,b)",
+                "a-unique:(b)",
+                "z-unique:(c)",
+            ))
+        )

Modified: twext/trunk/twext/enterprise/dal/test/test_record.py
===================================================================
--- twext/trunk/twext/enterprise/dal/test/test_record.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/test/test_record.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -37,7 +37,7 @@
 
 
 sth = SchemaTestHelper()
-sth.id = lambda : __name__
+sth.id = lambda: __name__
 schemaString = """
 create table ALPHA (BETA integer primary key, GAMMA text);
 create table DELTA (PHI integer primary key default (nextval('myseq')),
@@ -88,10 +88,12 @@
         txn = self.pool.connection()
         yield txn.execSQL("insert into ALPHA values (:1, :2)", [234, "one"])
         yield txn.execSQL("insert into ALPHA values (:1, :2)", [456, "two"])
+
         rec = yield TestRecord.load(txn, 456)
         self.assertIsInstance(rec, TestRecord)
         self.assertEquals(rec.beta, 456)
         self.assertEquals(rec.gamma, "two")
+
         rec2 = yield TestRecord.load(txn, 234)
         self.assertIsInstance(rec2, TestRecord)
         self.assertEqual(rec2.beta, 234)
@@ -115,9 +117,11 @@
         be created in the database.
         """
         txn = self.pool.connection()
+
         rec = yield TestRecord.create(txn, beta=3, gamma=u'epsilon')
         self.assertEquals(rec.beta, 3)
         self.assertEqual(rec.gamma, u'epsilon')
+
         rows = yield txn.execSQL("select BETA, GAMMA from ALPHA")
         self.assertEqual(rows, [tuple([3, u'epsilon'])])
 
@@ -129,11 +133,14 @@
         be deleted in the database.
         """
         txn = self.pool.connection()
+
         def mkrow(beta, gamma):
             return txn.execSQL("insert into ALPHA values (:1, :2)",
                                [beta, gamma])
-        yield gatherResults([mkrow(123, u"one"), mkrow(234, u"two"),
-                             mkrow(345, u"three")])
+
+        yield gatherResults(
+            [mkrow(123, u"one"), mkrow(234, u"two"), mkrow(345, u"three")]
+        )
         tr = yield TestRecord.load(txn, 234)
         yield tr.delete()
         rows = yield txn.execSQL("select BETA, GAMMA from ALPHA order by BETA")
@@ -185,12 +192,19 @@
         txn = self.pool.connection()
         # Create ...
         rec = yield TestAutoRecord.create(txn, epsilon=1)
-        self.assertEquals(rec.zeta, datetime.datetime(2012, 12, 12, 12, 12, 12))
+        self.assertEquals(
+            rec.zeta,
+            datetime.datetime(2012, 12, 12, 12, 12, 12)
+        )
         yield txn.commit()
         # ... should have the same effect as loading.
+
         txn = self.pool.connection()
         rec = (yield TestAutoRecord.all(txn))[0]
-        self.assertEquals(rec.zeta, datetime.datetime(2012, 12, 12, 12, 12, 12))
+        self.assertEquals(
+            rec.zeta,
+            datetime.datetime(2012, 12, 12, 12, 12, 12)
+        )
 
 
     @inlineCallbacks
@@ -200,11 +214,14 @@
         don't map to any column), it raises a L{TypeError}.
         """
         txn = self.pool.connection()
-        te = yield self.failUnlessFailure(TestRecord.create(
-                                        txn, beta=3, gamma=u'three',
-                                        extraBonusAttribute=u'nope',
-                                        otherBonusAttribute=4321,
-                                    ), TypeError)
+        te = yield self.failUnlessFailure(
+            TestRecord.create(
+                txn, beta=3, gamma=u'three',
+                extraBonusAttribute=u'nope',
+                otherBonusAttribute=4321,
+            ),
+            TypeError
+        )
         self.assertIn("extraBonusAttribute, otherBonusAttribute", str(te))
 
 
@@ -233,8 +250,10 @@
         """
         txn = self.pool.connection()
         rec = yield TestRecord.create(txn, beta=7, gamma=u'what')
+
         def setit():
             rec.beta = 12
+
         ro = self.assertRaises(ReadOnly, setit)
         self.assertEqual(rec.beta, 7)
         self.assertIn("SQL-backed attribute 'TestRecord.beta' is read-only. "
@@ -318,11 +337,14 @@
             yield txn.execSQL("insert into ALPHA values (:1, :2)",
                               [beta, gamma])
 
-        records = yield TestRecord.query(txn, TestRecord.gamma == u"three",
-                                         TestRecord.beta)
+        records = yield TestRecord.query(
+            txn, TestRecord.gamma == u"three", TestRecord.beta
+        )
         self.assertEqual([record.beta for record in records], [345, 356])
-        records = yield TestRecord.query(txn, TestRecord.gamma == u"three",
-                                         TestRecord.beta, ascending=False)
+
+        records = yield TestRecord.query(
+            txn, TestRecord.gamma == u"three", TestRecord.beta, ascending=False
+        )
         self.assertEqual([record.beta for record in records], [356, 345])
 
 
@@ -332,15 +354,25 @@
         A L{Record} may be loaded and deleted atomically, with L{Record.pop}.
         """
         txn = self.pool.connection()
-        for beta, gamma in [(123, u"one"), (234, u"two"), (345, u"three"),
-                            (356, u"three"), (456, u"four")]:
-            yield txn.execSQL("insert into ALPHA values (:1, :2)",
-                              [beta, gamma])
+        for beta, gamma in [
+            (123, u"one"),
+            (234, u"two"),
+            (345, u"three"),
+            (356, u"three"),
+            (456, u"four"),
+        ]:
+            yield txn.execSQL(
+                "insert into ALPHA values (:1, :2)", [beta, gamma]
+            )
+
         rec = yield TestRecord.pop(txn, 234)
         self.assertEqual(rec.gamma, u'two')
-        self.assertEqual((yield txn.execSQL("select count(*) from ALPHA "
-                                            "where BETA = :1", [234])),
-                         [tuple([0])])
+        self.assertEqual(
+            (yield txn.execSQL(
+                "select count(*) from ALPHA where BETA = :1", [234]
+            )),
+            [tuple([0])]
+        )
         yield self.failUnlessFailure(TestRecord.pop(txn, 234), NoSuchRecord)
 
 
@@ -349,6 +381,15 @@
         The naming convention maps columns C{LIKE_THIS} to be attributes
         C{likeThis}.
         """
-        self.assertEqual(Record.namingConvention(u"like_this"), "likeThis")
-        self.assertEqual(Record.namingConvention(u"LIKE_THIS"), "likeThis")
-        self.assertEqual(Record.namingConvention(u"LIKE_THIS_ID"), "likeThisID")
+        self.assertEqual(
+            Record.namingConvention(u"like_this"),
+            "likeThis"
+        )
+        self.assertEqual(
+            Record.namingConvention(u"LIKE_THIS"),
+            "likeThis"
+        )
+        self.assertEqual(
+            Record.namingConvention(u"LIKE_THIS_ID"),
+            "likeThisID"
+        )

Modified: twext/trunk/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- twext/trunk/twext/enterprise/dal/test/test_sqlsyntax.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/dal/test/test_sqlsyntax.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -26,13 +26,15 @@
     Savepoint, RollbackToSavepoint, ReleaseSavepoint, SavepointAction,
     Union, Intersect, Except, SetExpression, DALError,
     ResultAliasSyntax, Count, QueryGenerator, ALL_COLUMNS,
-    DatabaseLock, DatabaseUnlock)
+    DatabaseLock, DatabaseUnlock
+)
 from twext.enterprise.dal.syntax import FixedPlaceholder, NumericPlaceholder
 from twext.enterprise.dal.syntax import Function
 from twext.enterprise.dal.syntax import SchemaSyntax
 from twext.enterprise.dal.test.test_parseschema import SchemaTestHelper
-from twext.enterprise.ienterprise import (POSTGRES_DIALECT, ORACLE_DIALECT,
-                                          SQLITE_DIALECT)
+from twext.enterprise.ienterprise import (
+    POSTGRES_DIALECT, ORACLE_DIALECT, SQLITE_DIALECT
+)
 from twext.enterprise.test.test_adbapi2 import ConnectionPoolHelper
 from twext.enterprise.test.test_adbapi2 import NetworkedPoolHelper
 from twext.enterprise.test.test_adbapi2 import resultOf, AssertResultHelper
@@ -50,15 +52,15 @@
     """
 
     def __init__(self, paramstyle):
-        self.paramstyle = 'qmark'
+        self.paramstyle = "qmark"
 
 
 
 class FakeCXOracleModule(object):
-    NUMBER = 'the NUMBER type'
-    STRING = 'a string type (for varchars)'
-    NCLOB = 'the NCLOB type. (for text)'
-    TIMESTAMP = 'for timestamps!'
+    NUMBER = "the NUMBER type"
+    STRING = "a string type (for varchars)"
+    NCLOB = "the NCLOB type. (for text)"
+    TIMESTAMP = "for timestamps!"
 
 
 
@@ -68,11 +70,11 @@
     """
     counter = 0
 
-    def __init__(self, dialect=SQLITE_DIALECT, paramstyle='numeric'):
+    def __init__(self, dialect=SQLITE_DIALECT, paramstyle="numeric"):
         self.execed = []
         self.pendingResults = []
         self.dialect = SQLITE_DIALECT
-        self.paramstyle = 'numeric'
+        self.paramstyle = "numeric"
 
 
     def nextResult(self, result):
@@ -104,12 +106,13 @@
     """
 
     dialect = ORACLE_DIALECT
-    paramstyle = 'numeric'
+    paramstyle = "numeric"
 
     def execSQL(self, text, params, exc):
         return succeed([[None, None]])
 
 
+
 EXAMPLE_SCHEMA = """
 create sequence A_SEQ;
 create table FOO (BAR integer, BAZ varchar(255));
@@ -123,6 +126,8 @@
                         ANUMBER integer);
 """
 
+
+
 class ExampleSchemaHelper(SchemaTestHelper):
     """
     setUp implementor.
@@ -140,11 +145,13 @@
 
     def test_simplestSelect(self):
         """
-        L{Select} generates a 'select' statement, by default, asking for all
+        L{Select} generates a C{select} statement, by default, asking for all
         rows in a table.
         """
-        self.assertEquals(Select(From=self.schema.FOO).toSQL(),
-                          SQLFragment("select * from FOO", []))
+        self.assertEquals(
+            Select(From=self.schema.FOO).toSQL(),
+            SQLFragment("select * from FOO", [])
+        )
 
 
     def test_tableSyntaxFromSchemaSyntaxCompare(self):
@@ -158,34 +165,44 @@
 
     def test_simpleWhereClause(self):
         """
-        L{Select} generates a 'select' statement with a 'where' clause
+        L{Select} generates a C{select} statement with a C{where} clause
         containing an expression.
         """
-        self.assertEquals(Select(From=self.schema.FOO,
-                                 Where=self.schema.FOO.BAR == 1).toSQL(),
-                          SQLFragment("select * from FOO where BAR = ?", [1]))
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == 1
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR = ?", [1])
+        )
 
 
     def test_alternateMetadata(self):
         """
-        L{Select} generates a 'select' statement with the specified placeholder
-        syntax when explicitly given L{ConnectionMetadata} which specifies a
-        placeholder.
+        L{Select} generates a C{select} statement with the specified
+        placeholder syntax when explicitly given L{ConnectionMetadata} which
+        specifies a placeholder.
         """
-        self.assertEquals(Select(From=self.schema.FOO,
-                                 Where=self.schema.FOO.BAR == 1).toSQL(
-                                 QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("$$"))),
-                          SQLFragment("select * from FOO where BAR = $$", [1]))
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == 1
+            ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("$$"))),
+            SQLFragment("select * from FOO where BAR = $$", [1])
+        )
 
 
     def test_columnComparison(self):
         """
-        L{Select} generates a 'select' statement which compares columns.
+        L{Select} generates a C{select} statement which compares columns.
         """
-        self.assertEquals(Select(From=self.schema.FOO,
-                                 Where=self.schema.FOO.BAR ==
-                                 self.schema.FOO.BAZ).toSQL(),
-                          SQLFragment("select * from FOO where BAR = BAZ", []))
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == self.schema.FOO.BAZ
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR = BAZ", [])
+        )
 
 
     def test_comparisonTestErrorPrevention(self):
@@ -199,55 +216,65 @@
         """
         def sampleComparison():
             if self.schema.FOO.BAR > self.schema.FOO.BAZ:
-                return 'comparison should not succeed'
+                return "comparison should not succeed"
         self.assertRaises(DALError, sampleComparison)
 
 
     def test_compareWithNULL(self):
         """
-        Comparing a column with None results in the generation of an 'is null'
-        or 'is not null' SQL statement.
+        Comparing a column with None results in the generation of an C{is null}
+        or C{is not null} SQL statement.
         """
-        self.assertEquals(Select(From=self.schema.FOO,
-                                 Where=self.schema.FOO.BAR ==
-                                 None).toSQL(),
-                          SQLFragment(
-                              "select * from FOO where BAR is null", []))
-        self.assertEquals(Select(From=self.schema.FOO,
-                                 Where=self.schema.FOO.BAR !=
-                                 None).toSQL(),
-                          SQLFragment(
-                              "select * from FOO where BAR is not null", []))
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == None
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR is null", [])
+        )
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR != None
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR is not null", [])
+        )
 
 
     def test_compareWithEmptyStringOracleSpecialCase(self):
         """
-        Oracle considers the empty string to be a NULL value, so comparisons
-        with the empty string should be 'is NULL' comparisons.
+        Oracle considers the empty string to be a C{NULL} value, so comparisons
+        with the empty string should be C{is NULL} comparisons.
         """
         # Sanity check: let's make sure that the non-oracle case looks normal.
-        self.assertEquals(Select(
-            From=self.schema.FOO,
-            Where=self.schema.FOO.BAR == '').toSQL(),
-            SQLFragment(
-                "select * from FOO where BAR = ?", [""]))
-        self.assertEquals(Select(
-            From=self.schema.FOO,
-            Where=self.schema.FOO.BAR != '').toSQL(),
-            SQLFragment(
-                "select * from FOO where BAR != ?", [""]))
-        self.assertEquals(Select(
-            From=self.schema.FOO,
-            Where=self.schema.FOO.BAR == ''
-        ).toSQL(QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())),
-            SQLFragment(
-                "select * from FOO where BAR is null", []))
-        self.assertEquals(Select(
-            From=self.schema.FOO,
-            Where=self.schema.FOO.BAR != ''
-        ).toSQL(QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())),
-            SQLFragment(
-                "select * from FOO where BAR is not null", []))
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == ""
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR = ?", [""])
+        )
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR != ""
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR != ?", [""])
+        )
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR == ""
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())),
+            SQLFragment("select * from FOO where BAR is null", [])
+        )
+        self.assertEquals(
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR != ""
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())),
+            SQLFragment("select * from FOO where BAR is not null", [])
+        )
 
 
     def test_compoundWhere(self):
@@ -255,20 +282,24 @@
         L{Select.And} and L{Select.Or} will return compound columns.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   Where=(self.schema.FOO.BAR < 2).Or(
-                          self.schema.FOO.BAR > 5)).toSQL(),
-            SQLFragment("select * from FOO where BAR < ? or BAR > ?", [2, 5]))
+            Select(
+                From=self.schema.FOO,
+                Where=(self.schema.FOO.BAR < 2).Or(self.schema.FOO.BAR > 5)
+            ).toSQL(),
+            SQLFragment("select * from FOO where BAR < ? or BAR > ?", [2, 5])
+        )
 
 
     def test_orderBy(self):
         """
-        L{Select}'s L{OrderBy} parameter generates an 'order by' clause for a
-        'select' statement.
+        L{Select}'s L{OrderBy} parameter generates an C{order by} clause for a
+        C{select} statement.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   OrderBy=self.schema.FOO.BAR).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                OrderBy=self.schema.FOO.BAR
+            ).toSQL(),
             SQLFragment("select * from FOO order by BAR")
         )
 
@@ -279,23 +310,29 @@
         order for query results with an OrderBy clause.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   OrderBy=self.schema.FOO.BAR,
-                   Ascending=False).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                OrderBy=self.schema.FOO.BAR,
+                Ascending=False
+            ).toSQL(),
             SQLFragment("select * from FOO order by BAR desc")
         )
 
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   OrderBy=self.schema.FOO.BAR,
-                   Ascending=True).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                OrderBy=self.schema.FOO.BAR,
+                Ascending=True
+            ).toSQL(),
             SQLFragment("select * from FOO order by BAR asc")
         )
 
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   OrderBy=[self.schema.FOO.BAR, self.schema.FOO.BAZ],
-                   Ascending=True).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                OrderBy=[self.schema.FOO.BAR, self.schema.FOO.BAZ],
+                Ascending=True
+            ).toSQL(),
             SQLFragment("select * from FOO order by BAR, BAZ asc")
         )
 
@@ -308,16 +345,17 @@
         columns.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   OrderBy=Tuple([self.schema.FOO.BAR,
-                                  self.schema.FOO.BAZ])).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                OrderBy=Tuple([self.schema.FOO.BAR, self.schema.FOO.BAZ])
+            ).toSQL(),
             SQLFragment("select * from FOO order by BAR, BAZ")
         )
 
 
     def test_forUpdate(self):
         """
-        L{Select}'s L{ForUpdate} parameter generates a 'for update' clause at
+        L{Select}'s L{ForUpdate} parameter generates a C{for update} clause at
         the end of the query.
         """
         self.assertEquals(
@@ -328,12 +366,14 @@
 
     def test_groupBy(self):
         """
-        L{Select}'s L{GroupBy} parameter generates a 'group by' clause for a
-        'select' statement.
+        L{Select}'s L{GroupBy} parameter generates a C{group by} clause for a
+        C{select} statement.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   GroupBy=self.schema.FOO.BAR).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                GroupBy=self.schema.FOO.BAR
+            ).toSQL(),
             SQLFragment("select * from FOO group by BAR")
         )
 
@@ -343,29 +383,34 @@
         L{Select}'s L{GroupBy} parameter can accept multiple columns in a list.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   GroupBy=[self.schema.FOO.BAR,
-                            self.schema.FOO.BAZ]).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                GroupBy=[self.schema.FOO.BAR, self.schema.FOO.BAZ]
+            ).toSQL(),
             SQLFragment("select * from FOO group by BAR, BAZ")
         )
 
 
     def test_joinClause(self):
         """
-        A table's .join() method returns a join statement in a SELECT.
+        A table's C{.join()} method returns a join statement in a C{SELECT}.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO.join(
-                self.schema.BOZ, self.schema.FOO.BAR ==
-                self.schema.BOZ.QUX)).toSQL(),
+            Select(
+                From=self.schema.FOO.join(
+                    self.schema.BOZ,
+                    self.schema.FOO.BAR == self.schema.BOZ.QUX
+                )
+            ).toSQL(),
             SQLFragment("select * from FOO join BOZ on BAR = QUX", [])
         )
 
 
     def test_crossJoin(self):
         """
-        A join with no clause specified will generate a cross join.  (This is an
-        explicit synonym for an implicit join: i.e. 'select * from FOO, BAR'.)
+        A join with no clause specified will generate a cross join.  (This is
+        AN explicit synonym for an implicit join: i.e. C{select * from FOO,
+        BAR}.)
         """
         self.assertEquals(
             Select(From=self.schema.FOO.join(self.schema.BOZ)).toSQL(),
@@ -378,32 +423,39 @@
         L{Join.join} will result in a multi-table join.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR,
-                    self.schema.BOZ.QUX],
-                   From=self.schema.FOO
-                   .join(self.schema.BOZ).join(self.schema.OTHER)).toSQL(),
+            Select(
+                [self.schema.FOO.BAR, self.schema.BOZ.QUX],
+                From=self.schema.FOO
+                .join(self.schema.BOZ)
+                .join(self.schema.OTHER)
+            ).toSQL(),
             SQLFragment(
-                "select FOO.BAR, QUX from FOO "
-                "cross join BOZ cross join OTHER")
+                "select FOO.BAR, QUX from FOO cross join BOZ cross join OTHER"
+            )
         )
 
 
     def test_multiJoin(self):
         """
         L{Join.join} has the same signature as L{TableSyntax.join} and supports
-        the same 'on' and 'type' arguments.
+        the same C{on} and C{type} arguments.
         """
 
         self.assertEquals(
-            Select([self.schema.FOO.BAR],
-                   From=self.schema.FOO.join(
-                       self.schema.BOZ).join(
-                           self.schema.OTHER,
-                           self.schema.OTHER.BAR == self.schema.FOO.BAR,
-                           'left outer')).toSQL(),
+            Select(
+                [self.schema.FOO.BAR],
+                From=self.schema.FOO.join(
+                    self.schema.BOZ
+                ).join(
+                    self.schema.OTHER,
+                    self.schema.OTHER.BAR == self.schema.FOO.BAR,
+                    "left outer"
+                )
+            ).toSQL(),
             SQLFragment(
                 "select FOO.BAR from FOO cross join BOZ left outer join OTHER "
-                "on OTHER.BAR = FOO.BAR")
+                "on OTHER.BAR = FOO.BAR"
+            )
         )
 
 
@@ -441,10 +493,14 @@
         fooPrime = foo.alias()
         fooPrimePrime = foo.alias()
         self.assertEquals(
-            Select([fooPrime.BAR, fooPrimePrime.BAR],
-                   From=fooPrime.join(fooPrimePrime)).toSQL(),
-            SQLFragment("select alias1.BAR, alias2.BAR "
-                        "from FOO alias1 cross join FOO alias2")
+            Select(
+                [fooPrime.BAR, fooPrimePrime.BAR],
+                From=fooPrime.join(fooPrimePrime)
+            ).toSQL(),
+            SQLFragment(
+                "select alias1.BAR, alias2.BAR "
+                "from FOO alias1 cross join FOO alias2"
+            )
         )
 
 
@@ -454,8 +510,7 @@
         output by the SQL statement rather than the all-columns wildcard.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR],
-                   From=self.schema.FOO).toSQL(),
+            Select([self.schema.FOO.BAR], From=self.schema.FOO).toSQL(),
             SQLFragment("select BAR from FOO")
         )
 
@@ -465,8 +520,10 @@
         Iterating a L{TableSyntax} iterates its columns, in the order that they
         are defined.
         """
-        self.assertEquals(list(self.schema.FOO),
-                          [self.schema.FOO.BAR, self.schema.FOO.BAZ])
+        self.assertEquals(
+            list(self.schema.FOO),
+            [self.schema.FOO.BAR, self.schema.FOO.BAZ]
+        )
 
 
     def test_noColumn(self):
@@ -474,8 +531,7 @@
         Accessing an attribute that is not a defined column on a L{TableSyntax}
         raises an L{AttributeError}.
         """
-        self.assertRaises(AttributeError,
-                          lambda : self.schema.FOO.NOT_A_COLUMN)
+        self.assertRaises(AttributeError, lambda: self.schema.FOO.NOT_A_COLUMN)
 
 
     def test_columnAliases(self):
@@ -486,16 +542,23 @@
         """
         self.assertEquals(self.schema.FOO.columnAliases(), {})
         self.schema.FOO.ALIAS = self.schema.FOO.BAR
-        # you comparing ColumnSyntax object results in a ColumnComparison, which
-        # you can't test for truth.
-        fixedForEquality = dict([(k, v.model) for k, v in
-                                 self.schema.FOO.columnAliases().items()])
-        self.assertEquals(fixedForEquality,
-                          {'ALIAS': self.schema.FOO.BAR.model})
-        self.assertIdentical(self.schema.FOO.ALIAS.model,
-                             self.schema.FOO.BAR.model)
 
+        # you comparing ColumnSyntax object results in a ColumnComparison,
+        # which you can't test for truth.
+        fixedForEquality = dict([
+            (k, v.model) for k, v in self.schema.FOO.columnAliases().items()
+        ])
 
+        self.assertEquals(
+            fixedForEquality,
+            {"ALIAS": self.schema.FOO.BAR.model}
+        )
+        self.assertIdentical(
+            self.schema.FOO.ALIAS.model,
+            self.schema.FOO.BAR.model
+        )
+
+
     def test_multiColumnSelection(self):
         """
         If multiple columns are specified by the argument to L{Select}, those
@@ -503,51 +566,61 @@
         wildcard.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAZ,
-                    self.schema.FOO.BAR],
-                   From=self.schema.FOO).toSQL(),
+            Select(
+                [self.schema.FOO.BAZ, self.schema.FOO.BAR],
+                From=self.schema.FOO
+            ).toSQL(),
             SQLFragment("select BAZ, BAR from FOO")
         )
 
 
     def test_joinColumnSelection(self):
         """
-        If multiple columns are specified by the argument to L{Select} that uses
-        a L{TableSyntax.join}, those will be output by the SQL statement.
+        If multiple columns are specified by the argument to L{Select} that
+        uses a L{TableSyntax.join}, those will be output by the SQL statement.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAZ,
-                    self.schema.BOZ.QUX],
-                   From=self.schema.FOO.join(self.schema.BOZ,
-                                             self.schema.FOO.BAR ==
-                                             self.schema.BOZ.QUX)).toSQL(),
+            Select(
+                [self.schema.FOO.BAZ, self.schema.BOZ.QUX],
+                From=self.schema.FOO.join(
+                    self.schema.BOZ,
+                    self.schema.FOO.BAR == self.schema.BOZ.QUX
+                )
+            ).toSQL(),
             SQLFragment("select BAZ, QUX from FOO join BOZ on BAR = QUX")
         )
 
 
     def test_tableMismatch(self):
         """
-        When a column in the 'columns' argument does not match the table from
-        the 'From' argument, L{Select} raises a L{TableMismatch}.
+        When a column in the C{columns} argument does not match the table from
+        the C{From} argument, L{Select} raises a L{TableMismatch}.
         """
-        self.assertRaises(TableMismatch, Select, [self.schema.BOZ.QUX],
-                          From=self.schema.FOO)
+        self.assertRaises(
+            TableMismatch,
+            Select, [self.schema.BOZ.QUX], From=self.schema.FOO
+        )
 
 
     def test_qualifyNames(self):
         """
-        When two columns in the FROM clause requested from different tables have
-        the same name, the emitted SQL should explicitly disambiguate them.
+        When two columns in the C{from} clause requested from different tables
+        have the same name, the emitted SQL should explicitly disambiguate
+        them.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR,
-                    self.schema.OTHER.BAR],
-                   From=self.schema.FOO.join(self.schema.OTHER,
-                                             self.schema.OTHER.FOO_BAR ==
-                                             self.schema.FOO.BAR)).toSQL(),
+            Select(
+                [self.schema.FOO.BAR, self.schema.OTHER.BAR],
+                From=self.schema.FOO.join(
+                    self.schema.OTHER,
+                    self.schema.OTHER.FOO_BAR == self.schema.FOO.BAR
+                )
+            ).toSQL(),
             SQLFragment(
                 "select FOO.BAR, OTHER.BAR from FOO "
-                "join OTHER on FOO_BAR = FOO.BAR"))
+                "join OTHER on FOO_BAR = FOO.BAR"
+            )
+        )
 
 
     def test_bindParameters(self):
@@ -556,13 +629,16 @@
         L{Parameter} objects in its parameter list replaced with the keyword
         arguments to C{bind}.
         """
-
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   Where=(self.schema.FOO.BAR > Parameter("testing")).And(
-                   self.schema.FOO.BAZ < 7)).toSQL().bind(testing=173),
-            SQLFragment("select * from FOO where BAR > ? and BAZ < ?",
-                         [173, 7]))
+            Select(
+                From=self.schema.FOO,
+                Where=(self.schema.FOO.BAR > Parameter("testing"))
+                .And(self.schema.FOO.BAZ < 7)
+            ).toSQL().bind(testing=173),
+            SQLFragment(
+                "select * from FOO where BAR > ? and BAZ < ?", [173, 7]
+            )
+        )
 
 
     def test_rightHandSideExpression(self):
@@ -571,9 +647,10 @@
         comparison operation.
         """
         self.assertEquals(
-            Select(From=self.schema.FOO,
-                   Where=self.schema.FOO.BAR >
-                   (self.schema.FOO.BAZ + 3)).toSQL(),
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR > (self.schema.FOO.BAZ + 3)
+            ).toSQL(),
             SQLFragment("select * from FOO where BAR > (BAZ + ?)", [3])
         )
 
@@ -595,7 +672,10 @@
                 ),
             ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                "(select * from FOO where BAR = ?) UNION (select * from FOO where BAR = ?)", [1, 2]))
+                "(select * from FOO where BAR = ?) "
+                "UNION (select * from FOO where BAR = ?)", [1, 2]
+            )
+        )
 
         # Simple INTERSECT ALL
         self.assertEquals(
@@ -611,7 +691,10 @@
                 ),
             ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                "(select * from FOO where BAR = ?) INTERSECT ALL (select * from FOO where BAR = ?)", [1, 2]))
+                "(select * from FOO where BAR = ?) "
+                "INTERSECT ALL (select * from FOO where BAR = ?)", [1, 2]
+            )
+        )
 
         # Multiple EXCEPTs, not nested, Postgres dialect
         self.assertEquals(
@@ -632,7 +715,11 @@
                 ),
             ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                "(select * from FOO) EXCEPT DISTINCT (select * from FOO where BAR = ?) EXCEPT DISTINCT (select * from FOO where BAR = ?)", [2, 3]))
+                "(select * from FOO) "
+                "EXCEPT DISTINCT (select * from FOO where BAR = ?) "
+                "EXCEPT DISTINCT (select * from FOO where BAR = ?)", [2, 3]
+            )
+        )
 
         # Nested EXCEPTs, Oracle dialect
         self.assertEquals(
@@ -652,7 +739,10 @@
                 ),
             ).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                "(select * from FOO) MINUS ((select * from FOO where BAR = ?) MINUS (select * from FOO where BAR = ?))", [2, 3]))
+                "(select * from FOO) MINUS ((select * from FOO where BAR = ?) "
+                "MINUS (select * from FOO where BAR = ?))", [2, 3]
+            )
+        )
 
         # UNION with order by
         self.assertEquals(
@@ -668,42 +758,49 @@
                 OrderBy=self.schema.FOO.BAR,
             ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                "(select * from FOO where BAR = ?) UNION (select * from FOO where BAR = ?) order by BAR", [1, 2]))
+                "(select * from FOO where BAR = ?) "
+                "UNION (select * from FOO where BAR = ?) order by BAR", [1, 2]
+            )
+        )
 
 
     def test_simpleSubSelects(self):
         """
-        L{Max}C{(column)} produces an object in the 'columns' clause that
-        renders the 'max' aggregate in SQL.
+        L{Max}C{(column)} produces an object in the C{columns} clause that
+        renders the C{max} aggregate in SQL.
         """
         self.assertEquals(
             Select(
                 [Max(self.schema.BOZ.QUX)],
                 From=(Select([self.schema.BOZ.QUX], From=self.schema.BOZ))
             ).toSQL(),
-            SQLFragment(
-                "select max(QUX) from (select QUX from BOZ) genid_1"))
+            SQLFragment("select max(QUX) from (select QUX from BOZ) genid_1")
+        )
 
         self.assertEquals(
             Select(
                 [Count(self.schema.BOZ.QUX)],
                 From=(Select([self.schema.BOZ.QUX], From=self.schema.BOZ))
             ).toSQL(),
-            SQLFragment(
-                "select count(QUX) from (select QUX from BOZ) genid_1"))
+            SQLFragment("select count(QUX) from (select QUX from BOZ) genid_1")
+        )
 
         self.assertEquals(
             Select(
                 [Max(self.schema.BOZ.QUX)],
-                From=(Select([self.schema.BOZ.QUX], From=self.schema.BOZ, As="alias_BAR")),
+                From=(Select(
+                    [self.schema.BOZ.QUX],
+                    From=self.schema.BOZ,
+                    As="alias_BAR"
+                )),
             ).toSQL(),
-            SQLFragment(
-                "select max(QUX) from (select QUX from BOZ) alias_BAR"))
+            SQLFragment("select max(QUX) from (select QUX from BOZ) alias_BAR")
+        )
 
 
     def test_setSubSelects(self):
         """
-        L{SetExpression} in a From sub-select.
+        L{SetExpression} in a C{From} sub-select.
         """
         # Simple UNION
         self.assertEquals(
@@ -723,7 +820,10 @@
                 )
             ).toSQL(),
             SQLFragment(
-                "select max(BAR) from ((select BAR from FOO where BAR = ?) UNION (select BAR from FOO where BAR = ?)) genid_1", [1, 2]))
+                "select max(BAR) from ((select BAR from FOO where BAR = ?) "
+                "UNION (select BAR from FOO where BAR = ?)) genid_1", [1, 2]
+            )
+        )
 
 
     def test_selectColumnAliases(self):
@@ -735,101 +835,143 @@
                 [ResultAliasSyntax(self.schema.BOZ.QUX, "BOZ_QUX")],
                 From=self.schema.BOZ
             ).toSQL(),
-            SQLFragment("select QUX BOZ_QUX from BOZ"))
+            SQLFragment("select QUX BOZ_QUX from BOZ")
+        )
 
         self.assertEquals(
             Select(
                 [ResultAliasSyntax(Max(self.schema.BOZ.QUX))],
                 From=self.schema.BOZ
             ).toSQL(),
-            SQLFragment("select max(QUX) genid_1 from BOZ"))
+            SQLFragment("select max(QUX) genid_1 from BOZ")
+        )
 
         alias = ResultAliasSyntax(Max(self.schema.BOZ.QUX))
         self.assertEquals(
-            Select([alias.columnReference()],
-                From=Select(
-                    [alias],
-                    From=self.schema.BOZ)
+            Select(
+                [alias.columnReference()],
+                From=Select([alias], From=self.schema.BOZ)
             ).toSQL(),
-            SQLFragment("select genid_1 from (select max(QUX) genid_1 from BOZ) genid_2"))
+            SQLFragment(
+                "select genid_1 from "
+                "(select max(QUX) genid_1 from BOZ) genid_2"
+            )
+        )
 
         alias = ResultAliasSyntax(Len(self.schema.BOZ.QUX))
         self.assertEquals(
-            Select([alias.columnReference()],
-                From=Select(
-                    [alias],
-                    From=self.schema.BOZ)
+            Select(
+                [alias.columnReference()],
+                From=Select([alias], From=self.schema.BOZ)
             ).toSQL(),
-            SQLFragment("select genid_1 from (select character_length(QUX) genid_1 from BOZ) genid_2"))
+            SQLFragment(
+                "select genid_1 from "
+                "(select character_length(QUX) genid_1 from BOZ) genid_2"
+            )
+        )
 
 
     def test_inSubSelect(self):
         """
-        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax
+        L{ColumnSyntax.In} returns a sub-expression using the SQL C{in} syntax
         with a sub-select.
         """
-        wherein = (self.schema.FOO.BAR.In(
-                    Select([self.schema.BOZ.QUX], From=self.schema.BOZ)))
+        wherein = self.schema.FOO.BAR.In(
+            Select([self.schema.BOZ.QUX], From=self.schema.BOZ)
+        )
         self.assertEquals(
             Select(From=self.schema.FOO, Where=wherein).toSQL(),
             SQLFragment(
-                "select * from FOO where BAR in (select QUX from BOZ)"))
+                "select * from FOO where BAR in (select QUX from BOZ)"
+            )
+        )
 
 
     def test_inParameter(self):
         """
-        L{ColumnSyntax.In} returns a sub-expression using the SQL 'in' syntax
+        L{ColumnSyntax.In} returns a sub-expression using the SQL C{in} syntax
         with parameter list.
         """
         # One item with IN only
-        items = set(('A',))
+        items = set(("A",))
         self.assertEquals(
-            Select(From=self.schema.FOO, Where=self.schema.FOO.BAR.In(Parameter("names", len(items)))).toSQL().bind(names=items),
-            SQLFragment(
-                "select * from FOO where BAR in (?)", ['A']))
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR.In(
+                    Parameter("names", len(items))
+                )
+            ).toSQL().bind(names=items),
+            SQLFragment("select * from FOO where BAR in (?)", ["A"])
+        )
 
         # Two items with IN only
-        items = set(('A', 'B'))
+        items = set(("A", "B"))
         self.assertEquals(
-            Select(From=self.schema.FOO, Where=self.schema.FOO.BAR.In(Parameter("names", len(items)))).toSQL().bind(names=items),
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR.In(
+                    Parameter("names", len(items))
+                )
+            ).toSQL().bind(names=items),
             SQLFragment(
-                "select * from FOO where BAR in (?, ?)", ['A', 'B']))
+                "select * from FOO where BAR in (?, ?)", ["A", "B"]
+            )
+        )
 
         # Two items with preceding AND
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAZ == Parameter('P1')).And(
-                    self.schema.FOO.BAR.In(Parameter("names", len(items))
-                ))
+                Where=(
+                    (
+                        self.schema.FOO.BAZ == Parameter("P1")
+                    ).And(
+                        self.schema.FOO.BAR.In(Parameter("names", len(items)))
+                    )
+                )
             ).toSQL().bind(P1="P1", names=items),
             SQLFragment(
-                "select * from FOO where BAZ = ? and BAR in (?, ?)", ['P1', 'A', 'B']),
+                "select * from FOO where BAZ = ? and BAR in (?, ?)",
+                ["P1", "A", "B"]
+            ),
         )
 
         # Two items with following AND
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAR.In(Parameter("names", len(items))).And(
-                    self.schema.FOO.BAZ == Parameter('P2')
-                ))
+                Where=(
+                    (
+                        self.schema.FOO.BAR.In(Parameter("names", len(items)))
+                    ).And(
+                        self.schema.FOO.BAZ == Parameter("P2")
+                    )
+                )
             ).toSQL().bind(P2="P2", names=items),
             SQLFragment(
-                "select * from FOO where BAR in (?, ?) and BAZ = ?", ['A', 'B', 'P2']),
+                "select * from FOO where BAR in (?, ?) and BAZ = ?",
+                ["A", "B", "P2"]
+            ),
         )
 
         # Two items with preceding OR and following AND
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAZ == Parameter('P1')).Or(
-                    self.schema.FOO.BAR.In(Parameter("names", len(items))).And(
-                        self.schema.FOO.BAZ == Parameter('P2')
-                ))
+                Where=(
+                    (
+                        self.schema.FOO.BAZ == Parameter("P1")
+                    ).Or(
+                        self.schema.FOO.BAR.In(Parameter("names", len(items)))
+                    ).And(
+                        self.schema.FOO.BAZ == Parameter("P2")
+                    )
+                )
             ).toSQL().bind(P1="P1", P2="P2", names=items),
             SQLFragment(
-                "select * from FOO where BAZ = ? or BAR in (?, ?) and BAZ = ?", ['P1', 'A', 'B', 'P2']),
+                "select * from FOO where BAZ = ? or BAR in (?, ?) and BAZ = ?",
+                ["P1", "A", "B", "P2"]
+            ),
         )
 
         # Check various error situations
@@ -843,67 +985,78 @@
         # Mismatched count and len(items)
         self.assertRaises(
             DALError,
-            Select(From=self.schema.FOO, Where=self.schema.FOO.BAR.In(Parameter("names", len(items)))).toSQL().bind,
-            names=["a", "b", "c", ]
+            Select(
+                From=self.schema.FOO,
+                Where=self.schema.FOO.BAR.In(Parameter("names", len(items)))
+            ).toSQL().bind,
+            names=["a", "b", "c"]
         )
 
 
     def test_max(self):
         """
-        L{Max}C{(column)} produces an object in the 'columns' clause that
-        renders the 'max' aggregate in SQL.
+        L{Max}C{(column)} produces an object in the C{columns} clause that
+        renders the C{max} aggregate in SQL.
         """
         self.assertEquals(
             Select([Max(self.schema.BOZ.QUX)], From=self.schema.BOZ).toSQL(),
-            SQLFragment(
-                "select max(QUX) from BOZ"))
+            SQLFragment("select max(QUX) from BOZ")
+        )
 
 
     def test_countAllCoumns(self):
         """
-        L{Count}C{(ALL_COLUMNS)} produces an object in the 'columns' clause that
-        renders the 'count' in SQL.
+        L{Count}C{(ALL_COLUMNS)} produces an object in the C{columns} clause
+        that renders the C{count} in SQL.
         """
         self.assertEquals(
             Select([Count(ALL_COLUMNS)], From=self.schema.BOZ).toSQL(),
-            SQLFragment(
-                "select count(*) from BOZ"))
+            SQLFragment("select count(*) from BOZ")
+        )
 
 
     def test_aggregateComparison(self):
         """
-        L{Max}C{(column) > constant} produces an object in the 'columns' clause
-        that renders a comparison to the 'max' aggregate in SQL.
+        L{Max}C{(column) > constant} produces an object in the C{columns}
+        clause that renders a comparison to the C{max} aggregate in SQL.
         """
-        self.assertEquals(Select([Max(self.schema.BOZ.QUX) + 12],
-                                From=self.schema.BOZ).toSQL(),
-                          SQLFragment("select max(QUX) + ? from BOZ", [12]))
+        self.assertEquals(
+            Select(
+                [Max(self.schema.BOZ.QUX) + 12],
+                From=self.schema.BOZ
+            ).toSQL(),
+            SQLFragment("select max(QUX) + ? from BOZ", [12])
+        )
 
 
     def test_multiColumnExpression(self):
         """
-        Multiple columns may be provided in an expression in the 'columns'
-        portion of a Select() statement.  All arithmetic operators are
+        Multiple columns may be provided in an expression in the C{columns}
+        portion of a C{Select()} statement.  All arithmetic operators are
         supported.
         """
         self.assertEquals(
-            Select([((self.schema.FOO.BAR + self.schema.FOO.BAZ) / 3) * 7],
-                   From=self.schema.FOO).toSQL(),
+            Select(
+                [((self.schema.FOO.BAR + self.schema.FOO.BAZ) / 3) * 7],
+                From=self.schema.FOO
+            ).toSQL(),
             SQLFragment("select ((BAR + BAZ) / ?) * ? from FOO", [3, 7])
         )
 
 
     def test_len(self):
         """
-        Test for the 'Len' function for determining character length of a
+        Test for the L{Len} function for determining character length of a
         column.  (Note that this should be updated to use different techniques
         as necessary in different databases.)
         """
         self.assertEquals(
-            Select([Len(self.schema.TEXTUAL.MYTEXT)],
-                    From=self.schema.TEXTUAL).toSQL(),
-            SQLFragment(
-                "select character_length(MYTEXT) from TEXTUAL"))
+            Select(
+                [Len(self.schema.TEXTUAL.MYTEXT)],
+                From=self.schema.TEXTUAL
+            ).toSQL(),
+            SQLFragment("select character_length(MYTEXT) from TEXTUAL")
+        )
 
 
     def test_startswith(self):
@@ -913,8 +1066,8 @@
         as necessary in different databases.)
         """
         self.assertEquals(
-            Select([
-                self.schema.TEXTUAL.MYTEXT],
+            Select(
+                [self.schema.TEXTUAL.MYTEXT],
                 From=self.schema.TEXTUAL,
                 Where=self.schema.TEXTUAL.MYTEXT.StartsWith("test"),
             ).toSQL(),
@@ -932,8 +1085,8 @@
         as necessary in different databases.)
         """
         self.assertEquals(
-            Select([
-                self.schema.TEXTUAL.MYTEXT],
+            Select(
+                [self.schema.TEXTUAL.MYTEXT],
                 From=self.schema.TEXTUAL,
                 Where=self.schema.TEXTUAL.MYTEXT.EndsWith("test"),
             ).toSQL(),
@@ -951,8 +1104,8 @@
         as necessary in different databases.)
         """
         self.assertEquals(
-            Select([
-                self.schema.TEXTUAL.MYTEXT],
+            Select(
+                [self.schema.TEXTUAL.MYTEXT],
                 From=self.schema.TEXTUAL,
                 Where=self.schema.TEXTUAL.MYTEXT.Contains("test"),
             ).toSQL(),
@@ -965,13 +1118,15 @@
 
     def test_insert(self):
         """
-        L{Insert.toSQL} generates an 'insert' statement with all the relevant
+        L{Insert.toSQL} generates an C{insert} statement with all the relevant
         columns.
         """
         self.assertEquals(
-            Insert({self.schema.FOO.BAR: 23,
-                    self.schema.FOO.BAZ: 9}).toSQL(),
-            SQLFragment("insert into FOO (BAR, BAZ) values (?, ?)", [23, 9]))
+            Insert(
+                {self.schema.FOO.BAR: 23, self.schema.FOO.BAZ: 9}
+            ).toSQL(),
+            SQLFragment("insert into FOO (BAR, BAZ) values (?, ?)", [23, 9])
+        )
 
 
     def test_insertNotEnough(self):
@@ -987,49 +1142,52 @@
 
     def test_insertReturning(self):
         """
-        L{Insert}'s C{Return} argument will insert an SQL 'returning' clause.
+        L{Insert}'s C{Return} argument will insert an SQL C{returning} clause.
         """
         self.assertEquals(
-            Insert({self.schema.FOO.BAR: 23,
-                    self.schema.FOO.BAZ: 9},
-                   Return=self.schema.FOO.BAR).toSQL(),
+            Insert(
+                {self.schema.FOO.BAR: 23, self.schema.FOO.BAZ: 9},
+                Return=self.schema.FOO.BAR
+            ).toSQL(),
             SQLFragment(
                 "insert into FOO (BAR, BAZ) values (?, ?) returning BAR",
-                [23, 9])
+                [23, 9]
+            )
         )
 
 
     def test_insertMultiReturn(self):
         """
-        L{Insert}'s C{Return} argument can also be a C{tuple}, which will insert
-        an SQL 'returning' clause with multiple columns.
+        L{Insert}'s C{Return} argument can also be a C{tuple}, which will
+        insert an SQL C{returning} clause with multiple columns.
         """
         self.assertEquals(
-            Insert({self.schema.FOO.BAR: 23,
-                    self.schema.FOO.BAZ: 9},
-                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)).toSQL(),
+            Insert(
+                {self.schema.FOO.BAR: 23, self.schema.FOO.BAZ: 9},
+                Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
+            ).toSQL(),
             SQLFragment(
                 "insert into FOO (BAR, BAZ) values (?, ?) returning BAR, BAZ",
-                [23, 9])
+                [23, 9]
+            )
         )
 
 
     def test_insertMultiReturnOracle(self):
         """
-        In Oracle's SQL dialect, the 'returning' clause requires an 'into'
+        In Oracle's SQL dialect, the C{returning} clause requires an C{into}
         clause indicating where to put the results, as they can't be simply
-        relayed to the cursor.  Further, additional bound variables are required
-        to capture the output parameters.
+        relayed to the cursor.  Further, additional bound variables are
+        required to capture the output parameters.
         """
         self.assertEquals(
-            Insert({self.schema.FOO.BAR: 40,
-                    self.schema.FOO.BAZ: 50},
-                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)).toSQL(
-                       QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())
-                   ),
+            Insert(
+                {self.schema.FOO.BAR: 40, self.schema.FOO.BAZ: 50},
+                Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, NumericPlaceholder())),
             SQLFragment(
-                "insert into FOO (BAR, BAZ) values (:1, :2) returning BAR, BAZ"
-                " into :3, :4",
+                "insert into FOO (BAR, BAZ) values (:1, :2) "
+                "returning BAR, BAZ into :3, :4",
                 [40, 50, Parameter("oracle_out_0"), Parameter("oracle_out_1")]
             )
         )
@@ -1037,21 +1195,21 @@
 
     def test_insertMultiReturnSQLite(self):
         """
-        In SQLite's SQL dialect, there is no 'returning' clause, but given that
-        SQLite serializes all SQL transactions, you can rely upon 'select'
-        after a write operation to reliably give you exactly what was just
-        modified.  Therefore, although 'toSQL' won't include any indication of
-        the return value, the 'on' method will execute a 'select' statement
-        following the insert to retrieve the value.
+        In SQLite's SQL dialect, there is no C{returning} clause, but given
+        that SQLite serializes all SQL transactions, you can rely upon
+        C{select} after a write operation to reliably give you exactly what was
+        just modified.  Therefore, although C{toSQL} won't include any
+        indication of the return value, the C{on} method will execute a
+        C{select} statement following the insert to retrieve the value.
         """
-        insertStatement = Insert({self.schema.FOO.BAR: 39,
-                    self.schema.FOO.BAZ: 82},
-                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
+        insertStatement = Insert(
+            {self.schema.FOO.BAR: 39, self.schema.FOO.BAZ: 82},
+            Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
         )
-        qg = lambda : QueryGenerator(SQLITE_DIALECT, NumericPlaceholder())
-        self.assertEquals(insertStatement.toSQL(qg()),
-            SQLFragment("insert into FOO (BAR, BAZ) values (:1, :2)",
-                        [39, 82])
+        qg = lambda: QueryGenerator(SQLITE_DIALECT, NumericPlaceholder())
+        self.assertEquals(
+            insertStatement.toSQL(qg()),
+            SQLFragment("insert into FOO (BAR, BAZ) values (:1, :2)", [39, 82])
         )
         result = []
         csql = CatchSQL()
@@ -1059,8 +1217,17 @@
         self.assertEqual(result, [2])
         self.assertEqual(
             csql.execed,
-            [["insert into FOO (BAR, BAZ) values (:1, :2)", [39, 82]],
-             ["select BAR, BAZ from FOO where rowid = last_insert_rowid()", []]]
+            [
+                [
+                    "insert into FOO (BAR, BAZ) values (:1, :2)",
+                    [39, 82]
+                ],
+                [
+                    "select BAR, BAZ from FOO "
+                    "where rowid = last_insert_rowid()",
+                    []
+                ],
+            ]
         )
 
 
@@ -1069,8 +1236,9 @@
         Insert a row I{without} a C{Return=} parameter should also work as
         normal in sqlite.
         """
-        statement = Insert({self.schema.FOO.BAR: 12,
-                            self.schema.FOO.BAZ: 48})
+        statement = Insert(
+            {self.schema.FOO.BAR: 12, self.schema.FOO.BAZ: 48}
+        )
         csql = CatchSQL()
         statement.on(csql)
         self.assertEqual(
@@ -1081,23 +1249,27 @@
 
     def test_updateReturningSQLite(self):
         """
-        Since SQLite does not support the SQL 'returning' syntax extension, in
+        Since SQLite does not support the SQL C{returning} syntax extension, in
         order to preserve the rows that will be modified during an UPDATE
         statement, we must first find the rows that will be affected, then
         update them, then return the rows that were affected.  Since we might
-        be changing even part of the primary key, we use the internal 'rowid'
+        be changing even part of the primary key, we use the internal C{rowid}
         column to uniquely and reliably identify rows in the sqlite database
         that have been modified.
         """
         csql = CatchSQL()
-        stmt = Update({self.schema.FOO.BAR: 4321},
-                      Where=self.schema.FOO.BAZ == 1234,
-                      Return=self.schema.FOO.BAR)
+        stmt = Update(
+            {self.schema.FOO.BAR: 4321},
+            Where=self.schema.FOO.BAZ == 1234,
+            Return=self.schema.FOO.BAR
+        )
         csql.nextResult([["sample row id"]])
         result = resultOf(stmt.on(csql))
-        # Three statements were executed; make sure that the result returned was
-        # the result of executing the 3rd (and final) one.
+
+        # Three statements were executed; make sure that the result returned
+        # was the result of executing the 3rd (and final) one.
         self.assertResultList(result, 3)
+
         # Check that they were the right statements.
         self.assertEqual(len(csql.execed), 3)
         self.assertEqual(
@@ -1117,18 +1289,22 @@
     def test_updateReturningMultipleValuesSQLite(self):
         """
         When SQLite updates multiple values, it must embed the row ID of each
-        subsequent value into its second 'where' clause, as there is no way to
+        subsequent value into its second C{where} clause, as there is no way to
         pass a list of values to a single statement..
         """
         csql = CatchSQL()
-        stmt = Update({self.schema.FOO.BAR: 4321},
-                      Where=self.schema.FOO.BAZ == 1234,
-                      Return=self.schema.FOO.BAR)
+        stmt = Update(
+            {self.schema.FOO.BAR: 4321},
+            Where=self.schema.FOO.BAZ == 1234,
+            Return=self.schema.FOO.BAR
+        )
         csql.nextResult([["one row id"], ["and another"], ["and one more"]])
         result = resultOf(stmt.on(csql))
-        # Three statements were executed; make sure that the result returned was
-        # the result of executing the 3rd (and final) one.
+
+        # Three statements were executed; make sure that the result returned
+        # was the result of executing the 3rd (and final) one.
         self.assertResultList(result, 3)
+
         # Check that they were the right statements.
         self.assertEqual(len(csql.execed), 3)
         self.assertEqual(
@@ -1141,8 +1317,11 @@
         )
         self.assertEqual(
             csql.execed[2],
-            ["select BAR from FOO where rowid = :1 or rowid = :2 or rowid = :3",
-             ["one row id", "and another", "and one more"]]
+            [
+                "select BAR from FOO "
+                "where rowid = :1 or rowid = :2 or rowid = :3",
+                ["one row id", "and another", "and one more"]
+            ]
         )
 
 
@@ -1151,8 +1330,11 @@
         When SQLite deletes a value, ...
         """
         csql = CatchSQL()
-        stmt = Delete(From=self.schema.FOO, Where=self.schema.FOO.BAZ == 1234,
-                      Return=self.schema.FOO.BAR)
+        stmt = Delete(
+            From=self.schema.FOO,
+            Where=self.schema.FOO.BAZ == 1234,
+            Return=self.schema.FOO.BAR
+        )
         result = resultOf(stmt.on(csql))
         self.assertResultList(result, 1)
         self.assertEqual(len(csql.execed), 2)
@@ -1173,48 +1355,61 @@
         """
         self.assertRaises(
             TableMismatch,
-            Insert, {self.schema.FOO.BAR: 23,
-                     self.schema.FOO.BAZ: 9,
-                     self.schema.TEXTUAL.MYTEXT: 'hello'}
+            Insert, {
+                self.schema.FOO.BAR: 23,
+                self.schema.FOO.BAZ: 9,
+                self.schema.TEXTUAL.MYTEXT: "hello"
+            }
         )
 
 
     def test_quotingOnKeywordConflict(self):
         """
-        'access' is a keyword, so although our schema parser will leniently
-        accept it, it must be quoted in any outgoing SQL.  (This is only done in
-        the Oracle dialect, because it isn't necessary in postgres, and
+        "access" is a keyword, so although our schema parser will leniently
+        accept it, it must be quoted in any outgoing SQL.  (This is only done
+        in the Oracle dialect, because it isn't necessary in postgres, and
         idiosyncratic case-folding rules make it challenging to do it in both.)
         """
         self.assertEquals(
-            Insert({self.schema.LEVELS.ACCESS: 1,
-                    self.schema.LEVELS.USERNAME:
-                    "hi"}).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
+            Insert(
+                {
+                    self.schema.LEVELS.ACCESS: 1,
+                    self.schema.LEVELS.USERNAME: "hi"
+                }
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                'insert into LEVELS ("ACCESS", USERNAME) values (?, ?)',
-                [1, "hi"])
+                """insert into LEVELS ("ACCESS", USERNAME) values (?, ?)""",
+                [1, "hi"]
+            )
         )
         self.assertEquals(
-            Insert({self.schema.LEVELS.ACCESS: 1,
-                    self.schema.LEVELS.USERNAME:
-                    "hi"}).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
+            Insert(
+                {
+                    self.schema.LEVELS.ACCESS: 1,
+                    self.schema.LEVELS.USERNAME: "hi"
+                }
+            ).toSQL(QueryGenerator(POSTGRES_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
-                'insert into LEVELS (ACCESS, USERNAME) values (?, ?)',
-                [1, "hi"])
+                "insert into LEVELS (ACCESS, USERNAME) values (?, ?)",
+                [1, "hi"]
+            )
         )
 
 
     def test_updateReturning(self):
         """
-        L{update}'s C{Return} argument will update an SQL 'returning' clause.
+        L{update}'s C{Return} argument will update an SQL C{returning} clause.
         """
         self.assertEquals(
-            Update({self.schema.FOO.BAR: 23},
-                   self.schema.FOO.BAZ == 43,
-                   Return=self.schema.FOO.BAR).toSQL(),
+            Update(
+                {self.schema.FOO.BAR: 23},
+                self.schema.FOO.BAZ == 43,
+                Return=self.schema.FOO.BAR
+            ).toSQL(),
             SQLFragment(
                 "update FOO set BAR = ? where BAZ = ? returning BAR",
-                [23, 43])
+                [23, 43]
+            )
         )
 
 
@@ -1225,9 +1420,11 @@
         """
         self.assertRaises(
             TableMismatch,
-            Update, {self.schema.FOO.BAR: 23,
-                     self.schema.FOO.BAZ: 9,
-                     self.schema.TEXTUAL.MYTEXT: 'hello'},
+            Update, {
+                self.schema.FOO.BAR: 23,
+                self.schema.FOO.BAZ: 9,
+                self.schema.TEXTUAL.MYTEXT: "hello"
+            },
             Where=self.schema.FOO.BAZ == 9
         )
 
@@ -1240,12 +1437,12 @@
         sqlfunc = Function("hello")
         self.assertEquals(
             Update(
-                {self.schema.FOO.BAR: 23,
-                 self.schema.FOO.BAZ: sqlfunc()},
+                {self.schema.FOO.BAR: 23, self.schema.FOO.BAZ: sqlfunc()},
                 Where=self.schema.FOO.BAZ == 9
             ).toSQL(),
-            SQLFragment("update FOO set BAR = ?, BAZ = hello() "
-                        "where BAZ = ?", [23, 9])
+            SQLFragment(
+                "update FOO set BAR = ?, BAZ = hello() where BAZ = ?", [23, 9]
+            )
         )
 
 
@@ -1257,165 +1454,186 @@
         sqlfunc = Function("hello")
         self.assertEquals(
             Insert(
-                {self.schema.FOO.BAR: 23,
-                 self.schema.FOO.BAZ: sqlfunc()},
+                {self.schema.FOO.BAR: 23, self.schema.FOO.BAZ: sqlfunc()},
             ).toSQL(),
-            SQLFragment("insert into FOO (BAR, BAZ) "
-                        "values (?, hello())", [23])
+            SQLFragment("insert into FOO (BAR, BAZ) values (?, hello())", [23])
         )
 
 
     def test_deleteReturning(self):
         """
-        L{Delete}'s C{Return} argument will delete an SQL 'returning' clause.
+        L{Delete}'s C{Return} argument will delete an SQL C{returning} clause.
         """
         self.assertEquals(
-            Delete(self.schema.FOO,
-                   Where=self.schema.FOO.BAR == 7,
-                   Return=self.schema.FOO.BAZ).toSQL(),
-            SQLFragment(
-                "delete from FOO where BAR = ? returning BAZ", [7])
+            Delete(
+                self.schema.FOO,
+                Where=self.schema.FOO.BAR == 7,
+                Return=self.schema.FOO.BAZ
+            ).toSQL(),
+            SQLFragment("delete from FOO where BAR = ? returning BAZ", [7])
         )
 
 
     def test_update(self):
         """
-        L{Update.toSQL} generates an 'update' statement.
+        L{Update.toSQL} generates an C{update} statement.
         """
         self.assertEquals(
-            Update({self.schema.FOO.BAR: 4321},
-                    self.schema.FOO.BAZ == 1234).toSQL(),
-            SQLFragment("update FOO set BAR = ? where BAZ = ?", [4321, 1234]))
+            Update(
+                {self.schema.FOO.BAR: 4321},
+                self.schema.FOO.BAZ == 1234
+            ).toSQL(),
+            SQLFragment("update FOO set BAR = ? where BAZ = ?", [4321, 1234])
+        )
 
 
     def test_delete(self):
         """
-        L{Delete} generates an SQL 'delete' statement.
+        L{Delete} generates an SQL C{delete} statement.
         """
         self.assertEquals(
-            Delete(self.schema.FOO,
-                   Where=self.schema.FOO.BAR == 12).toSQL(),
-            SQLFragment(
-                "delete from FOO where BAR = ?", [12])
+            Delete(self.schema.FOO, Where=self.schema.FOO.BAR == 12).toSQL(),
+            SQLFragment("delete from FOO where BAR = ?", [12])
         )
 
         self.assertEquals(
-            Delete(self.schema.FOO,
-                   Where=None).toSQL(),
+            Delete(self.schema.FOO, Where=None).toSQL(),
             SQLFragment("delete from FOO")
         )
 
 
     def test_lock(self):
         """
-        L{Lock.exclusive} generates a ('lock table') statement, locking the
+        L{Lock.exclusive} generates a C{lock table} statement, locking the
         table in the specified mode.
         """
-        self.assertEquals(Lock.exclusive(self.schema.FOO).toSQL(),
-                          SQLFragment("lock table FOO in exclusive mode"))
+        self.assertEquals(
+            Lock.exclusive(self.schema.FOO).toSQL(),
+            SQLFragment("lock table FOO in exclusive mode")
+        )
 
 
     def test_databaseLock(self):
         """
-        L{DatabaseLock} generates a ('pg_advisory_lock') statement
+        L{DatabaseLock} generates a C{pg_advisory_lock} statement
         """
-        self.assertEquals(DatabaseLock().toSQL(),
-                          SQLFragment("select pg_advisory_lock(1)"))
+        self.assertEquals(
+            DatabaseLock().toSQL(),
+            SQLFragment("select pg_advisory_lock(1)")
+        )
 
 
     def test_databaseUnlock(self):
         """
-        L{DatabaseUnlock} generates a ('pg_advisory_unlock') statement
+        L{DatabaseUnlock} generates a C{pg_advisory_unlock} statement
         """
-        self.assertEquals(DatabaseUnlock().toSQL(),
-                          SQLFragment("select pg_advisory_unlock(1)"))
+        self.assertEquals(
+            DatabaseUnlock().toSQL(),
+            SQLFragment("select pg_advisory_unlock(1)")
+        )
 
 
     def test_savepoint(self):
         """
-        L{Savepoint} generates a ('savepoint') statement.
+        L{Savepoint} generates a C{savepoint} statement.
         """
-        self.assertEquals(Savepoint("test").toSQL(),
-                          SQLFragment("savepoint test"))
+        self.assertEquals(
+            Savepoint("test").toSQL(),
+            SQLFragment("savepoint test")
+        )
 
 
     def test_rollbacktosavepoint(self):
         """
-        L{RollbackToSavepoint} generates a ('rollback to savepoint') statement.
+        L{RollbackToSavepoint} generates a C{rollback to savepoint} statement.
         """
-        self.assertEquals(RollbackToSavepoint("test").toSQL(),
-                          SQLFragment("rollback to savepoint test"))
+        self.assertEquals(
+            RollbackToSavepoint("test").toSQL(),
+            SQLFragment("rollback to savepoint test")
+        )
 
 
     def test_releasesavepoint(self):
         """
-        L{ReleaseSavepoint} generates a ('release savepoint') statement.
+        L{ReleaseSavepoint} generates a C{release savepoint} statement.
         """
-        self.assertEquals(ReleaseSavepoint("test").toSQL(),
-                          SQLFragment("release savepoint test"))
+        self.assertEquals(
+            ReleaseSavepoint("test").toSQL(),
+            SQLFragment("release savepoint test")
+        )
 
 
     def test_savepointaction(self):
         """
-        L{SavepointAction} generates a ('savepoint') statement.
+        L{SavepointAction} generates a C{savepoint} statement.
         """
         self.assertEquals(SavepointAction("test")._name, "test")
 
 
     def test_limit(self):
         """
-        A L{Select} object with a 'Limit' keyword parameter will generate
-        a SQL statement with a 'limit' clause.
+        A L{Select} object with a C{Limit} keyword parameter will generate
+        a SQL statement with a C{limit} clause.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR],
-                   From=self.schema.FOO,
-                   Limit=123).toSQL(),
-            SQLFragment(
-                "select BAR from FOO limit ?", [123]))
+            Select(
+                [self.schema.FOO.BAR],
+                From=self.schema.FOO,
+                Limit=123
+            ).toSQL(),
+            SQLFragment("select BAR from FOO limit ?", [123])
+        )
 
 
     def test_limitOracle(self):
         """
-        A L{Select} object with a 'Limit' keyword parameter will generate a SQL
-        statement using a ROWNUM subquery for Oracle.
+        A L{Select} object with a C{Limit} keyword parameter will generate a
+        SQL statement using a ROWNUM subquery for Oracle.
 
         See U{this "ask tom" article from 2006 for more
         information
         <http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html>}.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR],
-                   From=self.schema.FOO,
-                   Limit=123).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
+            Select(
+                [self.schema.FOO.BAR],
+                From=self.schema.FOO,
+                Limit=123
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
             SQLFragment(
                 "select * from (select BAR from FOO) "
-                "where ROWNUM <= ?", [123])
+                "where ROWNUM <= ?", [123]
+            )
         )
 
 
     def test_having(self):
         """
-        A L{Select} object with a 'Having' keyword parameter will generate
-        a SQL statement with a 'having' expression.
+        A L{Select} object with a C{Having} keyword parameter will generate
+        a SQL statement with a C{having} expression.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR],
-                   From=self.schema.FOO,
-                   Having=Max(self.schema.FOO.BAZ) < 7).toSQL(),
+            Select(
+                [self.schema.FOO.BAR],
+                From=self.schema.FOO,
+                Having=Max(self.schema.FOO.BAZ) < 7
+            ).toSQL(),
             SQLFragment("select BAR from FOO having max(BAZ) < ?", [7])
         )
 
 
     def test_distinct(self):
         """
-        A L{Select} object with a 'Disinct' keyword parameter with a value of
-        C{True} will generate a SQL statement with a 'distinct' keyword
+        A L{Select} object with a C{Disinct} keyword parameter with a value of
+        C{True} will generate a SQL statement with a C{distinct} keyword
         preceding its list of columns.
         """
         self.assertEquals(
-            Select([self.schema.FOO.BAR], From=self.schema.FOO,
-                   Distinct=True).toSQL(),
+            Select(
+                [self.schema.FOO.BAR],
+                From=self.schema.FOO,
+                Distinct=True
+            ).toSQL(),
             SQLFragment("select distinct BAR from FOO")
         )
 
@@ -1423,25 +1641,26 @@
     def test_nextSequenceValue(self):
         """
         When a sequence is used as a value in an expression, it renders as the
-        call to 'nextval' that will produce its next value.
+        call to C{nextval} that will produce its next value.
         """
         self.assertEquals(
-            Insert({self.schema.BOZ.QUX:
-                    self.schema.A_SEQ}).toSQL(),
-            SQLFragment("insert into BOZ (QUX) values (nextval('A_SEQ'))", []))
+            Insert({self.schema.BOZ.QUX: self.schema.A_SEQ}).toSQL(),
+            SQLFragment("insert into BOZ (QUX) values (nextval('A_SEQ'))", [])
+        )
 
 
     def test_nextSequenceValueOracle(self):
         """
         When a sequence is used as a value in an expression in the Oracle
-        dialect, it renders as the 'nextval' attribute of the appropriate
+        dialect, it renders as the C{nextval} attribute of the appropriate
         sequence.
         """
         self.assertEquals(
-            Insert({self.schema.BOZ.QUX:
-                    self.schema.A_SEQ}).toSQL(
-                        QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
-            SQLFragment("insert into BOZ (QUX) values (A_SEQ.nextval)", []))
+            Insert(
+                {self.schema.BOZ.QUX: self.schema.A_SEQ}
+            ).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
+            SQLFragment("insert into BOZ (QUX) values (A_SEQ.nextval)", [])
+        )
 
 
     def test_nextSequenceDefaultImplicitExplicitOracle(self):
@@ -1452,68 +1671,80 @@
         """
         addSQLToSchema(
             schema=self.schema.model,
-            schemaData="create table DFLTR (a varchar(255), "
-            "b integer default nextval('A_SEQ'));"
+            schemaData=(
+                "create table DFLTR (a varchar(255), "
+                "b integer default nextval('A_SEQ'));"
+            )
         )
         self.assertEquals(
-            Insert({self.schema.DFLTR.a: 'hello'}).toSQL(
+            Insert({self.schema.DFLTR.a: "hello"}).toSQL(
                 QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))
             ),
             SQLFragment("insert into DFLTR (a, b) values "
-                        "(?, A_SEQ.nextval)", ['hello']),
+                        "(?, A_SEQ.nextval)", ["hello"]),
         )
         # Should be the same if it's explicitly specified.
         self.assertEquals(
-            Insert({self.schema.DFLTR.a: 'hello',
-                    self.schema.DFLTR.b: self.schema.A_SEQ}).toSQL(
+            Insert(
+                {
+                    self.schema.DFLTR.a: "hello",
+                    self.schema.DFLTR.b: self.schema.A_SEQ
+                }
+            ).toSQL(
                 QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))
             ),
-            SQLFragment("insert into DFLTR (a, b) values "
-                        "(?, A_SEQ.nextval)", ['hello']),
+            SQLFragment(
+                "insert into DFLTR (a, b) values (?, A_SEQ.nextval)", ["hello"]
+            ),
         )
 
 
     def test_numericParams(self):
         """
-        An L{IAsyncTransaction} with the 'numeric' paramstyle attribute will
-        cause statements to be generated with parameters in the style of :1 :2
-        :3, as per the DB-API.
+        An L{IAsyncTransaction} with the C{numeric} paramstyle attribute will
+        cause statements to be generated with parameters in the style of
+        C{:1 :2 :3}, as per the DB-API.
         """
         stmts = []
+
         class FakeOracleTxn(object):
             def execSQL(self, text, params, exc):
                 stmts.append((text, params))
             dialect = ORACLE_DIALECT
-            paramstyle = 'numeric'
-        Select([self.schema.FOO.BAR],
-               From=self.schema.FOO,
-               Where=(self.schema.FOO.BAR == 7).And(
-                   self.schema.FOO.BAZ == 9)
-              ).on(FakeOracleTxn())
+            paramstyle = "numeric"
+
+        Select(
+            [self.schema.FOO.BAR],
+            From=self.schema.FOO,
+            Where=(self.schema.FOO.BAR == 7).And(self.schema.FOO.BAZ == 9)
+        ).on(FakeOracleTxn())
+
         self.assertEquals(
-            stmts, [("select BAR from FOO where BAR = :1 and BAZ = :2",
-                     [7, 9])]
+            stmts,
+            [("select BAR from FOO where BAR = :1 and BAZ = :2", [7, 9])]
         )
 
 
     def test_rewriteOracleNULLs_Select(self):
         """
         Oracle databases cannot distinguish between the empty string and
-        C{NULL}.  When you insert an empty string, C{cx_Oracle} therefore treats
-        it as a C{None} and will return that when you select it back again.  We
-        address this in the schema by dropping 'not null' constraints.
+        C{NULL}.  When you insert an empty string, C{cx_Oracle} therefore
+        treats it as a C{None} and will return that when you select it back
+        again.  We address this in the schema by dropping C{not null}
+        constraints.
 
         Therefore, when executing a statement which includes a string column,
-        'on' should rewrite None return values from C{cx_Oracle} to be empty
+        C{on} should rewrite None return values from C{cx_Oracle} to be empty
         bytestrings, but only for string columns.
         """
-
         rows = resultOf(
-            Select([self.schema.NULLCHECK.ASTRING,
-                    self.schema.NULLCHECK.ANUMBER],
-                   From=self.schema.NULLCHECK).on(NullTestingOracleTxn()))[0]
+            Select(
+                [self.schema.NULLCHECK.ASTRING, self.schema.NULLCHECK.ANUMBER],
+                From=self.schema.NULLCHECK
+            ).on(NullTestingOracleTxn())
+        )[0]
 
-        self.assertEquals(rows, [['', None]])
+        self.assertEquals(rows, [["", None]])
 
 
     def test_rewriteOracleNULLs_SelectAllColumns(self):
@@ -1524,56 +1755,92 @@
         rows = resultOf(
             Select(From=self.schema.NULLCHECK).on(NullTestingOracleTxn())
         )[0]
-        self.assertEquals(rows, [['', None]])
+        self.assertEquals(rows, [["", None]])
 
 
     def test_nestedLogicalExpressions(self):
         """
         Make sure that logical operator precedence inserts proper parenthesis
-        when needed.  e.g. 'a.And(b.Or(c))' needs to be 'a and (b or c)' not 'a
-        and b or c'.
+        when needed.  e.g. C{a.And(b.Or(c))} needs to be C{a and (b or c)} not
+        C{a and b or c}.
         """
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAR != 7).
-                    And(self.schema.FOO.BAZ != 8).
-                    And((self.schema.FOO.BAR == 8).Or(self.schema.FOO.BAZ == 0))
+                Where=(
+                    (
+                        self.schema.FOO.BAR != 7
+                    ).And(
+                        self.schema.FOO.BAZ != 8
+                    ).And(
+                        (self.schema.FOO.BAR == 8).Or(self.schema.FOO.BAZ == 0)
+                    )
+                )
             ).toSQL(),
-            SQLFragment("select * from FOO where BAR != ? and BAZ != ? and "
-                        "(BAR = ? or BAZ = ?)", [7, 8, 8, 0]))
+            SQLFragment(
+                "select * from FOO where BAR != ? and BAZ != ? and "
+                "(BAR = ? or BAZ = ?)",
+                [7, 8, 8, 0]
+            )
+        )
 
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAR != 7).
-                    Or(self.schema.FOO.BAZ != 8).
-                    Or((self.schema.FOO.BAR == 8).And(self.schema.FOO.BAZ == 0))
+                Where=(
+                    (
+                        self.schema.FOO.BAR != 7
+                    ).Or(
+                        self.schema.FOO.BAZ != 8
+                    ).Or(
+                        (
+                            self.schema.FOO.BAR == 8
+                        ).And(
+                            self.schema.FOO.BAZ == 0
+                        )
+                    )
+                )
             ).toSQL(),
-            SQLFragment("select * from FOO where BAR != ? or BAZ != ? or "
-                        "BAR = ? and BAZ = ?", [7, 8, 8, 0]))
+            SQLFragment(
+                "select * from FOO where BAR != ? or BAZ != ? or "
+                "BAR = ? and BAZ = ?",
+                [7, 8, 8, 0]
+            )
+        )
 
         self.assertEquals(
             Select(
                 From=self.schema.FOO,
-                Where=(self.schema.FOO.BAR != 7).
-                    Or(self.schema.FOO.BAZ != 8).
-                    And((self.schema.FOO.BAR == 8).Or(self.schema.FOO.BAZ == 0))
+                Where=(
+                    (
+                        self.schema.FOO.BAR != 7
+                    ).Or(
+                        self.schema.FOO.BAZ != 8
+                    ).And(
+                        (self.schema.FOO.BAR == 8).Or(self.schema.FOO.BAZ == 0)
+                    )
+                )
             ).toSQL(),
-            SQLFragment("select * from FOO where (BAR != ? or BAZ != ?) and "
-                        "(BAR = ? or BAZ = ?)", [7, 8, 8, 0]))
+            SQLFragment(
+                "select * from FOO where (BAR != ? or BAZ != ?) and "
+                "(BAR = ? or BAZ = ?)",
+                [7, 8, 8, 0]
+            )
+        )
 
 
     def test_updateWithNULL(self):
         """
-        As per the DB-API specification, "SQL NULL values are represented by the
-        Python None singleton on input and output."  When a C{None} is provided
-        as a value to an L{Update}, it will be relayed to the database as a
-        parameter.
+        As per the DB-API specification, "SQL NULL values are represented by
+        the Python None singleton on input and output."  When a C{None} is
+        provided as a value to an L{Update}, it will be relayed to the database
+        as a parameter.
         """
         self.assertEquals(
-            Update({self.schema.BOZ.QUX: None},
-                   Where=self.schema.BOZ.QUX == 7).toSQL(),
+            Update(
+                {self.schema.BOZ.QUX: None},
+                Where=self.schema.BOZ.QUX == 7
+            ).toSQL(),
             SQLFragment("update BOZ set QUX = ? where QUX = ?", [None, 7])
         )
 
@@ -1581,20 +1848,27 @@
     def test_subSelectComparison(self):
         """
         A comparison of a column to a sub-select in a where clause will result
-        in a parenthetical 'Where' clause.
+        in a parenthetical C{where} clause.
         """
         self.assertEquals(
             Update(
                 {self.schema.BOZ.QUX: 9},
-                Where=self.schema.BOZ.QUX ==
-                Select([self.schema.FOO.BAR], From=self.schema.FOO,
-                       Where=self.schema.FOO.BAZ == 12)).toSQL(),
+                Where=(
+                    self.schema.BOZ.QUX ==
+                    Select(
+                        [self.schema.FOO.BAR],
+                        From=self.schema.FOO,
+                        Where=self.schema.FOO.BAZ == 12
+                    )
+                )
+            ).toSQL(),
             SQLFragment(
                 # NOTE: it's very important that the comparison _always_ go in
-                # this order (column from the UPDATE first, inner SELECT second)
-                # as the other order will be considered a syntax error.
-                "update BOZ set QUX = ? where QUX = ("
-                "select BAR from FOO where BAZ = ?)", [9, 12]
+                # this order (column from the UPDATE first, inner SELECT
+                # second) as the other order will be considered a syntax error.
+                "update BOZ set QUX = ? "
+                "where QUX = (select BAR from FOO where BAZ = ?)",
+                [9, 12]
             )
         )
 
@@ -1610,10 +1884,15 @@
         self.assertEquals(
             Update(
                 {self.schema.BOZ.QUX: 1},
-                Where=(self.schema.BOZ.QUX, self.schema.BOZ.QUUX) ==
-                Select([self.schema.FOO.BAR, self.schema.FOO.BAZ],
-                       From=self.schema.FOO,
-                       Where=self.schema.FOO.BAZ == 2)).toSQL(),
+                Where=(
+                    (self.schema.BOZ.QUX, self.schema.BOZ.QUUX) ==
+                    Select(
+                        [self.schema.FOO.BAR, self.schema.FOO.BAZ],
+                        From=self.schema.FOO,
+                        Where=self.schema.FOO.BAZ == 2
+                    )
+                )
+            ).toSQL(),
             SQLFragment(
                 # NOTE: it's very important that the comparison _always_ go in
                 # this order (tuple of columns from the UPDATE first, inner
@@ -1633,8 +1912,10 @@
             Select(
                 [self.schema.FOO.BAR],
                 From=self.schema.FOO,
-                Where=(Tuple([self.schema.FOO.BAR, self.schema.FOO.BAZ]) ==
-                       Tuple([Constant(7), Constant(9)]))
+                Where=(
+                    Tuple([self.schema.FOO.BAR, self.schema.FOO.BAZ]) ==
+                    Tuple([Constant(7), Constant(9)])
+                )
             ).toSQL(),
             SQLFragment(
                 "select BAR from FOO where (BAR, BAZ) = ((?, ?))", [7, 9]
@@ -1644,12 +1925,11 @@
 
     def test_oracleTableTruncation(self):
         """
-        L{Table}'s SQL generation logic will truncate table names if the dialect
-        (i.e. Oracle) demands it.  (See txdav.common.datastore.sql_tables for
-        the schema translator and enforcement of name uniqueness in the derived
-        schema.)
+        L{Table}'s SQL generation logic will truncate table names if the
+        dialect (i.e. Oracle) demands it.
+        (See txdav.common.datastore.sql_tables for the schema translator and
+        enforcement of name uniqueness in the derived schema.)
         """
-
         addSQLToSchema(
             self.schema.model,
             "create table veryveryveryveryveryveryveryverylong "
@@ -1657,18 +1937,20 @@
         )
         vvl = self.schema.veryveryveryveryveryveryveryverylong
         self.assertEquals(
-            Insert({vvl.foo: 1}).toSQL(QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))),
+            Insert({vvl.foo: 1}).toSQL(
+                QueryGenerator(ORACLE_DIALECT, FixedPlaceholder("?"))
+            ),
             SQLFragment(
-                "insert into veryveryveryveryveryveryveryve (foo) values "
-                "(?)", [1]
+                "insert into veryveryveryveryveryveryveryve (foo) values (?)",
+                [1]
             )
         )
 
 
     def test_columnEqualityTruth(self):
         """
-        Mostly in support of test_columnsAsDictKeys, the 'same' column should
-        compare True to itself and False to other values.
+        Mostly in support of L{test_columnsAsDictKeys}, the "same" column
+        should compare C{True} to itself and C{False} to other values.
         """
         s = self.schema
         self.assertEquals(bool(s.FOO.BAR == s.FOO.BAR), True)
@@ -1679,10 +1961,10 @@
     def test_columnsAsDictKeys(self):
         """
         An odd corner of the syntactic sugar provided by the DAL is that the
-        column objects have to participate both in augmented equality comparison
-        ("==" returns an expression object) as well as dictionary keys (for
-        Insert and Update statement objects).  Therefore it should be possible
-        to I{manipulate} dictionaries of keys as well.
+        column objects have to participate both in augmented equality
+        comparison (C{==} returns an expression object) as well as dictionary
+        keys (for Insert and Update statement objects).  Therefore it should be
+        possible to I{manipulate} dictionaries of keys as well.
         """
         values = {self.schema.FOO.BAR: 1}
         self.assertEquals(values, {self.schema.FOO.BAR: 1})
@@ -1695,31 +1977,38 @@
     def test_rewriteOracleNULLs_Insert(self):
         """
         The behavior described in L{test_rewriteOracleNULLs_Select} applies to
-        other statement types as well, specifically those with 'returning'
+        other statement types as well, specifically those with C{returning}
         clauses.
         """
         # Add 2 cursor variable values so that these will be used by
         # FakeVariable.getvalue.
         self.factory.varvals.extend([None, None])
         rows = self.resultOf(
-            Insert({self.schema.NULLCHECK.ASTRING: '',
-                    self.schema.NULLCHECK.ANUMBER: None},
-                   Return=[self.schema.NULLCHECK.ASTRING,
-                           self.schema.NULLCHECK.ANUMBER]
-                  ).on(self.createTransaction()))[0]
-        self.assertEquals(rows, [['', None]])
+            Insert(
+                {
+                    self.schema.NULLCHECK.ASTRING: "",
+                    self.schema.NULLCHECK.ANUMBER: None,
+                },
+                Return=[
+                    self.schema.NULLCHECK.ASTRING,
+                    self.schema.NULLCHECK.ANUMBER,
+                ]
+            ).on(self.createTransaction())
+        )[0]
+        self.assertEquals(rows, [["", None]])
 
 
     def test_insertMultiReturnOnOracleTxn(self):
         """
         As described in L{test_insertMultiReturnOracle}, Oracle deals with
-        'returning' clauses by using out parameters.  However, this is not quite
-        enough, as the code needs to actually retrieve the values from the out
-        parameters.
+        C{returning} clauses by using out parameters.  However, this is not
+        quite enough, as the code needs to actually retrieve the values from
+        the out parameters.
         """
-        i = Insert({self.schema.FOO.BAR: 40,
-                    self.schema.FOO.BAZ: 50},
-                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ))
+        i = Insert(
+            {self.schema.FOO.BAR: 40, self.schema.FOO.BAZ: 50},
+            Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)
+        )
         self.factory.varvals.extend(["first val!", "second val!"])
         result = self.resultOf(i.on(self.createTransaction()))
         self.assertEquals(result, [[["first val!", "second val!"]]])
@@ -1737,15 +2026,18 @@
         """
         # This statement should return nothing from .fetchall(), so...
         self.factory.hasResults = False
-        i = Insert({self.schema.FOO.BAR: 40,
-                    self.schema.FOO.BAZ: 50})
+        i = Insert(
+            {self.schema.FOO.BAR: 40, self.schema.FOO.BAZ: 50}
+        )
         result = self.resultOf(i.on(self.createTransaction()))
         self.assertEquals(result, [None])
 
 
 
-class OracleConnectionTests(ConnectionPoolHelper, ExampleSchemaHelper,
-                            OracleConnectionMethods, TestCase):
+class OracleConnectionTests(
+    ConnectionPoolHelper, ExampleSchemaHelper, OracleConnectionMethods,
+    TestCase
+):
     """
     Tests which use an oracle connection.
     """
@@ -1754,22 +2046,24 @@
 
     def setUp(self):
         """
-        Create a fake oracle-ish connection pool without using real threads or a
-        real database.
+        Create a fake oracle-ish connection pool without using real threads or
+        a real database.
         """
-        self.patch(syntax, 'cx_Oracle', FakeCXOracleModule)
+        self.patch(syntax, "cx_Oracle", FakeCXOracleModule)
         super(OracleConnectionTests, self).setUp()
         ExampleSchemaHelper.setUp(self)
 
 
 
-class OracleNetConnectionTests(NetworkedPoolHelper, ExampleSchemaHelper,
-                               OracleConnectionMethods, TestCase):
+class OracleNetConnectionTests(
+    NetworkedPoolHelper, ExampleSchemaHelper, OracleConnectionMethods,
+    TestCase
+):
 
     dialect = ORACLE_DIALECT
 
     def setUp(self):
-        self.patch(syntax, 'cx_Oracle', FakeCXOracleModule)
+        self.patch(syntax, "cx_Oracle", FakeCXOracleModule)
         super(OracleNetConnectionTests, self).setUp()
         ExampleSchemaHelper.setUp(self)
         self.pump.client.dialect = ORACLE_DIALECT

Modified: twext/trunk/twext/enterprise/fixtures.py
===================================================================
--- twext/trunk/twext/enterprise/fixtures.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/fixtures.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -38,6 +38,7 @@
 from twext.internet.threadutils import ThreadHolder
 
 
+
 def buildConnectionPool(testCase, schemaText="", dialect=SQLITE_DIALECT):
     """
     Build a L{ConnectionPool} for testing purposes, with the given C{testCase}.
@@ -56,17 +57,21 @@
     """
     sqlitename = testCase.mktemp()
     seqs = {}
+
     def connectionFactory(label=testCase.id()):
         conn = sqlite3.connect(sqlitename)
+
         def nextval(seq):
             result = seqs[seq] = seqs.get(seq, 0) + 1
             return result
+
         conn.create_function("nextval", 1, nextval)
         return conn
+
     con = connectionFactory()
     con.executescript(schemaText)
     con.commit()
-    pool = ConnectionPool(connectionFactory, paramstyle='numeric',
+    pool = ConnectionPool(connectionFactory, paramstyle="numeric",
                           dialect=SQLITE_DIALECT)
     pool.startService()
     testCase.addCleanup(pool.stopService)
@@ -77,16 +82,18 @@
 def resultOf(deferred, propagate=False):
     """
     Add a callback and errback which will capture the result of a L{Deferred}
-    in a list, and return that list.  If 'propagate' is True, pass through the
+    in a list, and return that list.  If C{propagate} is True, pass through the
     results.
     """
     results = []
+
     if propagate:
         def cb(r):
             results.append(r)
             return r
     else:
         cb = results.append
+
     deferred.addBoth(cb)
     return results
 
@@ -128,12 +135,15 @@
             oget = newq.get
             newq.get = lambda: oget(timeout=0)
             oput = newq.put
+
             def putit(x):
                 p = oput(x)
                 if not self.test.paused:
                     self.flush()
                 return p
+
             newq.put = putit
+
         self._q_ = newq
 
 
@@ -184,7 +194,7 @@
 
     def suggestThreadPoolSize(self, size):
         """
-        Approximate the behavior of a 'real' reactor.
+        Approximate the behavior of a "real" reactor.
         """
         self._pool.adjustPoolsize(maxthreads=size)
 
@@ -314,8 +324,10 @@
 
 def synchronousConnectionFactory(test):
     tmpdb = test.mktemp()
+
     def connect():
         return sqlite3.connect(tmpdb)
+
     return connect
 
 
@@ -561,8 +573,10 @@
         @rtype: L{FakeConnection}
         """
         aConnection = FakeConnection(self)
+
         def thunk():
             return aConnection
+
         self._connectResultQueue.append(thunk)
         return aConnection
 

Modified: twext/trunk/twext/enterprise/ienterprise.py
===================================================================
--- twext/trunk/twext/enterprise/ienterprise.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/ienterprise.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -50,9 +50,9 @@
 
 
 
-POSTGRES_DIALECT = 'postgres-dialect'
-ORACLE_DIALECT = 'oracle-dialect'
-SQLITE_DIALECT = 'sqlite-dialect'
+POSTGRES_DIALECT = "postgres-dialect"
+ORACLE_DIALECT = "oracle-dialect"
+SQLITE_DIALECT = "sqlite-dialect"
 ORACLE_TABLE_NAME_MAX = 30
 
 
@@ -64,14 +64,16 @@
 
     paramstyle = Attribute(
         """
-        A copy of the 'paramstyle' attribute from a DB-API 2.0 module.
-        """)
+        A copy of the C{paramstyle} attribute from a DB-API 2.0 module.
+        """
+    )
 
     dialect = Attribute(
         """
-        A copy of the 'dialect' attribute from the connection pool.  One of the
-        C{*_DIALECT} constants in this module, such as C{POSTGRES_DIALECT}.
-        """)
+        A copy of the C{dialect} attribute from the connection pool.  One of
+        the C{*_DIALECT} constants in this module, such as L{POSTGRES_DIALECT}.
+        """
+    )
 
 
     def execSQL(sql, args=(), raiseOnZeroRowCount=None):

Modified: twext/trunk/twext/enterprise/locking.py
===================================================================
--- twext/trunk/twext/enterprise/locking.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/locking.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -51,7 +51,7 @@
 
     @return: inSchema
     """
-    LockTable = Table(inSchema, 'NAMED_LOCK')
+    LockTable = Table(inSchema, "NAMED_LOCK")
 
     LockTable.addColumn("LOCK_NAME", SQLType("varchar", 255))
     LockTable.tableConstraint(Constraint.NOT_NULL, ["LOCK_NAME"])
@@ -85,11 +85,16 @@
         def autoRelease(self):
             txn.preCommit(lambda: self.release(True))
             return self
+
         def lockFailed(f):
             raise LockTimeout(name)
-        return cls.create(txn, lockName=name).addCallback(autoRelease).addErrback(lockFailed)
 
+        d = cls.create(txn, lockName=name)
+        d.addCallback(autoRelease)
+        d.addErrback(lockFailed)
+        return d
 
+
     def release(self, ignoreAlreadyUnlocked=False):
         """
         Release this lock.

Modified: twext/trunk/twext/enterprise/queue.py
===================================================================
--- twext/trunk/twext/enterprise/queue.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/queue.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -115,7 +115,7 @@
     (in the worst case) pass from worker->controller->controller->worker.
     """
 
-    def performWork(table, workID): #@NoSelf
+    def performWork(table, workID):
         """
         @param table: The table where work is waiting.
         @type table: L{TableSyntax}
@@ -142,7 +142,7 @@
     # Initializing this duplicate schema avoids a circular dependency, but this
     # should really be accomplished with independent schema objects that the
     # transaction is made aware of somehow.
-    NodeTable = Table(inSchema, 'NODE_INFO')
+    NodeTable = Table(inSchema, "NODE_INFO")
 
     NodeTable.addColumn("HOSTNAME", SQLType("varchar", 255))
     NodeTable.addColumn("PID", SQLType("integer", None))
@@ -151,12 +151,14 @@
         # Note: in the real data structure, this is actually a not-cleaned-up
         # sqlparse internal data structure, but it *should* look closer to
         # this.
-        ProcedureCall("timezone", ["UTC", NamedValue('CURRENT_TIMESTAMP')])
+        ProcedureCall("timezone", ["UTC", NamedValue("CURRENT_TIMESTAMP")])
     )
     for column in NodeTable.columns:
         NodeTable.tableConstraint(Constraint.NOT_NULL, [column.name])
-    NodeTable.primaryKey = [NodeTable.columnNamed("HOSTNAME"),
-                            NodeTable.columnNamed("PORT")]
+    NodeTable.primaryKey = [
+        NodeTable.columnNamed("HOSTNAME"),
+        NodeTable.columnNamed("PORT"),
+    ]
 
     return inSchema
 
@@ -260,8 +262,9 @@
     @classmethod
     @wraps(thunk)
     def inner(cls, *a, **k):
-        raise NotImplementedError(qual(cls) + " does not implement " +
-                                  thunk.func_name)
+        raise NotImplementedError(
+            qual(cls) + " does not implement " + thunk.func_name
+        )
     return inner
 
 
@@ -360,6 +363,7 @@
 
     group = None
 
+
     @abstract
     def doWork(self):
         """
@@ -371,6 +375,7 @@
         will be taken care of by the job queueing machinery.
         """
 
+
     @classmethod
     def forTable(cls, table):
         """
@@ -457,8 +462,8 @@
 
 class ConnectionFromPeerNode(SchemaAMP):
     """
-    A connection to a peer node.  Symmetric; since the 'client' and the
-    'server' both serve the same role, the logic is the same in every node.
+    A connection to a peer node.  Symmetric; since the "client" and the
+    "server" both serve the same role, the logic is the same in every node.
 
     @ivar localWorkerPool: the pool of local worker procesess that can process
         queue work.
@@ -492,8 +497,9 @@
         self.peerPool = peerPool
         self._bonusLoad = 0
         self._reportedLoad = 0
-        super(ConnectionFromPeerNode, self).__init__(peerPool.schema,
-                                                     boxReceiver, locator)
+        super(ConnectionFromPeerNode, self).__init__(
+            peerPool.schema, boxReceiver, locator
+        )
 
 
     def reportCurrentLoad(self):
@@ -553,13 +559,16 @@
         """
         d = self.callRemote(PerformWork, table=table, workID=workID)
         self._bonusLoad += 1
+
         @d.addBoth
         def performed(result):
             self._bonusLoad -= 1
             return result
+
         @d.addCallback
         def success(result):
             return None
+
         return d
 
 
@@ -577,9 +586,9 @@
 
         @return: a L{Deferred} that fires when the work has been completed.
         """
-        return self.peerPool.performWorkForPeer(table, workID).addCallback(
-            lambda ignored: {}
-        )
+        d = self.peerPool.performWorkForPeer(table, workID)
+        d.addCallback(lambda ignored: {})
+        return d
 
 
     @IdentifyNode.responder
@@ -720,10 +729,12 @@
         """
         d = self.callRemote(PerformWork, table=table, workID=workID)
         self._load += 1
+
         @d.addBoth
         def f(result):
             self._load -= 1
             return result
+
         return d
 
 
@@ -800,8 +811,9 @@
         process has instructed this worker to do it; so, look up the data in
         the row, and do it.
         """
-        return (ultimatelyPerform(self.transactionFactory, table, workID)
-                .addCallback(lambda ignored: {}))
+        d = ultimatelyPerform(self.transactionFactory, table, workID)
+        d.addCallback(lambda ignored: {})
+        return d
 
 
 
@@ -839,6 +851,7 @@
         except NoSuchRecord:
             # The record has already been removed
             pass
+
     return inTransaction(txnFactory, work)
 
 
@@ -948,30 +961,41 @@
         commit, and asking the local node controller process to do the work.
         """
         created = self.workItemType.create(self.txn, **self.kw)
+
         def whenCreated(item):
             self._whenProposed.callback(self)
+
             @self.txn.postCommit
             def whenDone():
                 self._whenCommitted.callback(self)
+
                 def maybeLater():
                     performer = self._chooser.choosePerformer()
-                    @passthru(performer.performWork(item.table, item.workID)
-                              .addCallback)
+
+                    @passthru(
+                        performer.performWork(item.table, item.workID)
+                        .addCallback
+                    )
                     def performed(result):
                         self._whenExecuted.callback(self)
+
                     @performed.addErrback
                     def notPerformed(why):
                         self._whenExecuted.errback(why)
+
                 reactor = self._chooser.reactor
                 when = max(0, astimestamp(item.notBefore) - reactor.seconds())
                 # TODO: Track the returned DelayedCall so it can be stopped
                 # when the service stops.
                 self._chooser.reactor.callLater(when, maybeLater)
+
             @self.txn.postAbort
             def whenFailed():
                 self._whenCommitted.errback(TransactionFailed)
+
         def whenNotCreated(failure):
             self._whenProposed.errback(failure)
+
         created.addCallbacks(whenCreated, whenNotCreated)
 
 
@@ -1192,6 +1216,7 @@
         """
         if self.workerPool.hasAvailableCapacity():
             return self.workerPool
+
         if self.peers and not onlyLocally:
             return sorted(self.peers, lambda p: p.currentLoadEstimate())[0]
         else:
@@ -1271,6 +1296,7 @@
                 self._lastSeenNodeIndex = nodes.index(
                     (self.thisProcess.hostname, self.thisProcess.port)
                 )
+
             for itemType in self.allWorkItemTypes():
                 tooLate = datetime.utcfromtimestamp(
                     self.reactor.seconds() - self.queueProcessTimeout
@@ -1282,6 +1308,7 @@
                     peer = self.choosePerformer()
                     yield peer.performWork(overdueItem.table,
                                            overdueItem.workID)
+
         return inTransaction(self.transactionFactory, workCheck)
 
     _currentWorkDeferred = None
@@ -1294,8 +1321,10 @@
         those checks in time based on the size of the cluster.
         """
         self._lostWorkCheckCall = None
-        @passthru(self._periodicLostWorkCheck().addErrback(log.err)
-                  .addCallback)
+
+        @passthru(
+            self._periodicLostWorkCheck().addErrback(log.err).addCallback
+        )
         def scheduleNext(result):
             self._currentWorkDeferred = None
             if not self.running:
@@ -1310,6 +1339,7 @@
             self._lostWorkCheckCall = self.reactor.callLater(
                 delay, self._lostWorkCheckLoop
             )
+
         self._currentWorkDeferred = scheduleNext
 
 
@@ -1340,10 +1370,12 @@
                     txn, hostname=self.hostname, port=self.ampPort,
                     pid=self.pid, time=datetime.now()
                 )
+
             for node in nodes:
                 self._startConnectingTo(node)
 
         self._startingUp = inTransaction(self.transactionFactory, startup)
+
         @self._startingUp.addBoth
         def done(result):
             self._startingUp = None
@@ -1358,14 +1390,19 @@
         Stop this service, terminating any incoming or outgoing connections.
         """
         yield super(PeerConnectionPool, self).stopService()
+
         if self._startingUp is not None:
             yield self._startingUp
+
         if self._listeningPort is not None:
             yield self._listeningPort.stopListening()
+
         if self._lostWorkCheckCall is not None:
             self._lostWorkCheckCall.cancel()
+
         if self._currentWorkDeferred is not None:
             yield self._currentWorkDeferred
+
         for peer in self.peers:
             peer.transport.abortConnection()
 
@@ -1397,16 +1434,21 @@
         @type node: L{NodeInfo}
         """
         connected = node.endpoint(self.reactor).connect(self.peerFactory())
+
         def whenConnected(proto):
             self.mapPeer(node.hostname, node.port, proto)
-            proto.callRemote(IdentifyNode,
-                             host=self.thisProcess.hostname,
-                             port=self.thisProcess.port).addErrback(
-                                 noted, "identify"
-                             )
+            proto.callRemote(
+                IdentifyNode,
+                host=self.thisProcess.hostname,
+                port=self.thisProcess.port
+            ).addErrback(noted, "identify")
+
         def noted(err, x="connect"):
-            log.msg("Could not {0} to cluster peer {1} because {2}"
-                    .format(x, node, str(err.value)))
+            log.msg(
+                "Could not {0} to cluster peer {1} because {2}"
+                .format(x, node, str(err.value))
+            )
+
         connected.addCallbacks(whenConnected, noted)
 
 

Modified: twext/trunk/twext/enterprise/test/test_adbapi2.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_adbapi2.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/test/test_adbapi2.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -48,6 +48,7 @@
 from twext.enterprise.adbapi2 import _HookableOperation
 
 
+
 class TrashCollector(object):
     """
     Test helper for monitoring gc.garbage.
@@ -170,29 +171,29 @@
         [[counter, echo]] = alphaResult[0]
 
         b = self.createTransaction()
-        # 'b' should have opened a connection.
+        # "b" should have opened a connection.
         self.assertEquals(len(self.factory.connections), 2)
         betaResult = self.resultOf(b.execSQL("beta"))
         [[bcounter, becho]] = betaResult[0]
 
-        # both 'a' and 'b' are holding open a connection now; let's try to open
+        # both "a" and "b" are holding open a connection now; let's try to open
         # a third one.  (The ordering will be deterministic even if this fails,
         # because those threads are already busy.)
         c = self.createTransaction()
         gammaResult = self.resultOf(c.execSQL("gamma"))
 
-        # Did 'c' open a connection?  Let's hope not...
+        # Did "c" open a connection?  Let's hope not...
         self.assertEquals(len(self.factory.connections), 2)
         # SQL shouldn't be executed too soon...
         self.assertEquals(gammaResult, [])
 
         commitResult = self.resultOf(b.commit())
 
-        # Now that 'b' has committed, 'c' should be able to complete.
+        # Now that "b" has committed, "c" should be able to complete.
         [[ccounter, cecho]] = gammaResult[0]
 
-        # The connection for 'a' ought to still be busy, so let's make sure
-        # we're using the one for 'c'.
+        # The connection for "a" ought to still be busy, so let's make sure
+        # we're using the one for "c".
         self.assertEquals(ccounter, bcounter)
 
         # Sanity check: the commit should have succeded!
@@ -232,9 +233,11 @@
         self.factory.willFail()
         self.factory.willConnect()
         c = self.createTransaction()
+
         def checkOneFailure():
             errors = self.flushLoggedErrors(FakeConnectionError)
             self.assertEquals(len(errors), 1)
+
         checkOneFailure()
         d = c.execSQL("alpha")
         happened = []
@@ -396,17 +399,21 @@
         """
         self.pauseHolders()
         preClose = self.createTransaction()
-        preCloseResult = self.resultOf(preClose.execSQL('statement'))
+        preCloseResult = self.resultOf(preClose.execSQL("statement"))
         stopResult = self.resultOf(self.pool.stopService())
         postClose = self.createTransaction()
         queryResult = self.resultOf(postClose.execSQL("hello"))
         self.assertEquals(stopResult, [])
         self.assertEquals(len(queryResult), 1)
-        self.assertEquals(queryResult[0].type,
-                          self.translateError(ConnectionError))
+        self.assertEquals(
+            queryResult[0].type,
+            self.translateError(ConnectionError)
+        )
         self.assertEquals(len(preCloseResult), 1)
-        self.assertEquals(preCloseResult[0].type,
-                          self.translateError(ConnectionError))
+        self.assertEquals(
+            preCloseResult[0].type,
+            self.translateError(ConnectionError)
+        )
 
 
     def test_abortFailsDuringStopService(self):
@@ -454,9 +461,11 @@
         # Use up the available connections ...
         for i in xrange(self.pool.maxConnections):
             active.append(self.createTransaction())
+
         # ... so that this one has to be spooled.
         spooled = self.createTransaction()
         result = self.resultOf(spooled.execSQL("alpha"))
+
         # sanity check, it would be bad if this actually executed.
         self.assertEqual(result, [])
         self.resultOf(spooled.abort())
@@ -474,17 +483,21 @@
 
         # steal it from the queue so we can do it out of order
         d, work = self.holders[0]._q.get()
+
         # that should be the only work unit so don't continue if something else
         # got in there
         self.assertEquals(list(self.holders[0]._q.queue), [])
         self.assertEquals(len(self.holders), 1)
         self.flushHolders()
         stopResult = self.resultOf(self.pool.stopService())
+
         # Sanity check that we haven't actually stopped it yet
         self.assertEquals(abortResult, [])
+
         # We haven't fired it yet, so the service had better not have
         # stopped...
         self.assertEquals(stopResult, [])
+
         d.callback(None)
         self.flushHolders()
         self.assertEquals(abortResult, [None])
@@ -516,16 +529,20 @@
         """
         tc = TrashCollector(self)
         commitExecuted = []
+
         def carefullyManagedScope():
             t = self.createTransaction()
+
             def holdAReference():
                 """
-                This is a hook that holds a reference to 't'.
+                This is a hook that holds a reference to "t".
                 """
                 commitExecuted.append(True)
                 return t.execSQL("teardown", [])
+
             hook(t, holdAReference)
             finish(t)
+
         self.failIf(commitExecuted, "Commit hook executed.")
         carefullyManagedScope()
         tc.checkTrash()
@@ -535,32 +552,40 @@
         """
         Committing a transaction does not cause gc garbage.
         """
-        self.circularReferenceTest(lambda txn: txn.commit(),
-                                   lambda txn, hook: txn.preCommit(hook))
+        self.circularReferenceTest(
+            lambda txn: txn.commit(),
+            lambda txn, hook: txn.preCommit(hook)
+        )
 
 
     def test_noGarbageOnCommitWithAbortHook(self):
         """
         Committing a transaction does not cause gc garbage.
         """
-        self.circularReferenceTest(lambda txn: txn.commit(),
-                                   lambda txn, hook: txn.postAbort(hook))
+        self.circularReferenceTest(
+            lambda txn: txn.commit(),
+            lambda txn, hook: txn.postAbort(hook)
+        )
 
 
     def test_noGarbageOnAbort(self):
         """
         Aborting a transaction does not cause gc garbage.
         """
-        self.circularReferenceTest(lambda txn: txn.abort(),
-                                   lambda txn, hook: txn.preCommit(hook))
+        self.circularReferenceTest(
+            lambda txn: txn.abort(),
+            lambda txn, hook: txn.preCommit(hook)
+        )
 
 
     def test_noGarbageOnAbortWithPostCommitHook(self):
         """
         Aborting a transaction does not cause gc garbage.
         """
-        self.circularReferenceTest(lambda txn: txn.abort(),
-                                   lambda txn, hook: txn.postCommit(hook))
+        self.circularReferenceTest(
+            lambda txn: txn.abort(),
+            lambda txn, hook: txn.postCommit(hook)
+        )
 
 
     def test_tooManyConnectionsWhileOthersFinish(self):
@@ -573,7 +598,8 @@
         self.pauseHolders()
         a.abort()
         b.abort()
-        # Remove the holders for the existing connections, so that the 'extra'
+
+        # Remove the holders for the existing connections, so that the "extra"
         # connection() call wins the race and gets executed first.
         self.holders[:] = []
         self.createTransaction()
@@ -640,7 +666,7 @@
     def test_reConnectWhenFirstExecFails(self):
         """
         Generally speaking, DB-API 2.0 adapters do not provide information
-        about the cause of a failed 'execute' method; they definitely don't
+        about the cause of a failed C{execute} method; they definitely don't
         provide it in a way which can be identified as related to the syntax of
         the query, the state of the database itself, the state of the
         connection, etc.
@@ -650,7 +676,7 @@
         exceptions which are raised by the I{first} statement executed in a
         transaction.
         """
-        # Allow 'connect' to succeed.  This should behave basically the same
+        # Allow C{connect} to succeed.  This should behave basically the same
         # whether connect() happened to succeed in some previous transaction
         # and it's recycling the underlying transaction, or connect() just
         # succeeded.  Either way you just have a _SingleTxn wrapping a
@@ -658,31 +684,37 @@
         txn = self.createTransaction()
         self.assertEquals(len(self.factory.connections), 1,
                           "Sanity check failed.")
+
         class CustomExecuteFailed(Exception):
             """
-            Custom 'execute-failed' exception.
+            Custom "execute-failed" exception.
             """
+
         self.factory.connections[0].executeWillFail(CustomExecuteFailed)
         results = self.resultOf(txn.execSQL("hello, world!"))
         [[[counter, echo]]] = results
         self.assertEquals("hello, world!", echo)
+
         # Two execution attempts should have been made, one on each connection.
         # The first failed with a RuntimeError, but that is deliberately
         # obscured, because then we tried again and it succeeded.
-        self.assertEquals(len(self.factory.connections), 2,
-                          "No new connection opened.")
+        self.assertEquals(
+            len(self.factory.connections), 2,
+            "No new connection opened."
+        )
         self.assertEquals(self.factory.connections[0].executions, 1)
         self.assertEquals(self.factory.connections[1].executions, 1)
         self.assertEquals(self.factory.connections[0].closed, True)
         self.assertEquals(self.factory.connections[1].closed, False)
 
-        # Nevertheless, since there is currently no classification of 'safe'
+        # Nevertheless, since there is currently no classification of "safe"
         # errors, we should probably log these messages when they occur.
         self.assertEquals(len(self.flushLoggedErrors(CustomExecuteFailed)), 1)
 
 
     def test_reConnectWhenFirstExecOnExistingConnectionFails(
-            self, moreFailureSetup=lambda factory: None):
+        self, moreFailureSetup=lambda factory: None
+    ):
         """
         Another situation that might arise is that a connection will be
         successfully connected, executed and recycled into the connection pool;
@@ -691,19 +723,23 @@
         """
         txn = self.createTransaction()
         moreFailureSetup(self.factory)
-        self.assertEquals(len(self.factory.connections), 1,
-                          "Sanity check failed.")
+        self.assertEquals(
+            len(self.factory.connections), 1, "Sanity check failed."
+        )
         results = self.resultOf(txn.execSQL("hello, world!"))
         txn.commit()
         [[[counter, echo]]] = results
         self.assertEquals("hello, world!", echo)
         txn2 = self.createTransaction()
-        self.assertEquals(len(self.factory.connections), 1,
-                          "Sanity check failed.")
+        self.assertEquals(
+            len(self.factory.connections), 1, "Sanity check failed."
+        )
+
         class CustomExecFail(Exception):
             """
-            Custom 'execute()' failure.
+            Custom C{execute()} failure.
             """
+
         self.factory.connections[0].executeWillFail(CustomExecFail)
         results = self.resultOf(txn2.execSQL("second try!"))
         txn2.commit()
@@ -721,13 +757,16 @@
         L{test_reConnectWhenFirstExecOnExistingConnectionFails}, the
         failure should be logged, but transparent to application code.
         """
+
         class BindingSpecificException(Exception):
             """
             Exception that's a placeholder for something that a database
             binding might raise.
             """
+
         def alsoFailClose(factory):
             factory.childCloseWillFail(BindingSpecificException())
+
         t = self.test_reConnectWhenFirstExecOnExistingConnectionFails(
             alsoFailClose
         )
@@ -742,8 +781,10 @@
         commit.
         """
         txn = self.createTransaction()
+
         def simple():
             simple.done = True
+
         simple.done = False
         txn.preCommit(simple)
         self.assertEquals(simple.done, False)
@@ -760,14 +801,19 @@
         """
         txn = self.createTransaction()
         d = Deferred()
+
         def wait():
             wait.started = True
+
             def executed(it):
                 wait.sqlResult = it
+
             # To make sure the _underlying_ commit operation was Deferred, we
             # have to execute some SQL to make sure it happens.
-            return (d.addCallback(lambda ignored: txn.execSQL("some test sql"))
-                     .addCallback(executed))
+            d.addCallback(lambda ignored: txn.execSQL("some test sql"))
+            d.addCallback(executed)
+            return d
+
         wait.started = False
         wait.sqlResult = None
         txn.preCommit(wait)
@@ -796,10 +842,13 @@
             test.aborted = False
             # Create transaction and add monitoring hooks.
             txn = self.createTransaction()
+
             def didCommit():
                 test.committed = True
+
             def didAbort():
                 test.aborted = True
+
             txn.postCommit(didCommit)
             txn.postAbort(didAbort)
             txn.preCommit(flawedCallable)
@@ -811,8 +860,10 @@
 
         def failer():
             return fail(ZeroDivisionError())
+
         def raiser():
             raise EOFError()
+
         test(failer, ZeroDivisionError)
         test(raiser, EOFError)
 
@@ -834,7 +885,8 @@
         self.factory.rollbackFail = True
         [x] = self.resultOf(txn.commit())
 
-        # No statements have been executed, so 'commit' will *not* be executed.
+        # No statements have been executed, so C{commit} will *not* be
+        # executed.
         self.assertEquals(self.factory.commitFail, True)
         self.assertIdentical(x, None)
         self.assertEquals(len(self.pool._free), 1)
@@ -865,13 +917,15 @@
         [f] = self.resultOf(txn.execSQL("divide by zero", []))
         f.trap(self.translateError(ZeroDivisionError))
         self.assertEquals(self.factory.connections[0].executions, 2)
+
         # Reconnection should work exactly as before.
         self.assertEquals(self.factory.connections[0].closed, False)
+
         # Application code has to roll back its transaction at this point,
         # since it failed (and we don't necessarily know why it failed: not
         # enough information).
         self.resultOf(txn.abort())
-        self.factory.connections[0].executions = 0 # re-set for next test
+        self.factory.connections[0].executions = 0  # re-set for next test
         self.assertEquals(len(self.factory.connections), 1)
         self.test_reConnectWhenFirstExecFails()
 
@@ -892,6 +946,7 @@
         self.assertEquals("maybe change something!", echo)
         self.factory.rollbackFail = True
         [x] = self.resultOf(txn.abort())
+
         # Abort does not propagate the error on, the transaction merely gets
         # disposed of.
         self.assertIdentical(x, None)
@@ -917,6 +972,7 @@
         self.assertEquals("maybe change something!", echo)
         [x] = self.resultOf(txn.commit())
         x.trap(self.translateError(CommitFail))
+
         self.assertEquals(len(self.pool._free), 1)
         self.assertEquals(self.pool._finishing, [])
         self.assertEquals(len(self.factory.connections), 2)
@@ -938,9 +994,11 @@
         c = self.resultOf(cb.execSQL("c"))
         cb.end()
         e = self.resultOf(txn.execSQL("e"))
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [("a", []), ("b", []), ("c", []), ("d", []),
-                           ("e", [])])
+
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions,
+            [("a", []), ("b", []), ("c", []), ("d", []), ("e", [])]
+        )
         self.assertEquals(len(a), 1)
         self.assertEquals(len(b), 1)
         self.assertEquals(len(c), 1)
@@ -964,9 +1022,10 @@
         cb.end()
         self.flushHolders()
 
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [("a", []), ("b", []), ("c", []), ("d", []),
-                           ("e", [])])
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions,
+            [("a", []), ("b", []), ("c", []), ("d", []), ("e", [])]
+        )
 
         self.assertEquals(len(a), 1)
         self.assertEquals(len(b), 1)
@@ -992,9 +1051,10 @@
         cb1.end()
         flush()
         self.flushHolders()
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [("a", []), ("b", []), ("c", []), ("d", []),
-                           ("e", [])])
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions,
+            [("a", []), ("b", []), ("c", []), ("d", []), ("e", [])]
+        )
 
 
     def test_twoCommandBlocksLatently(self):
@@ -1028,8 +1088,10 @@
         commitResult = self.resultOf(txn.commit())
         self.resultOf(block.execSQL("in block"))
         self.assertEquals(commitResult, [])
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [("in block", [])])
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions,
+            [("in block", [])]
+        )
         block.end()
         self.flushHolders()
         self.assertEquals(commitResult, [None])
@@ -1050,8 +1112,9 @@
         self.assertRaises(AlreadyFinishedError, block2.execSQL, "bar")
         self.assertRaises(AlreadyFinishedError, block.execSQL, "foo")
         self.assertRaises(AlreadyFinishedError, txn.execSQL, "baz")
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [])
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions, []
+        )
         # end() should _not_ raise an exception, because this is the sort of
         # thing that might be around a try/finally or try/except; it's just
         # putting the commandBlock itself into a state consistent with the
@@ -1069,8 +1132,9 @@
         block = txn.commandBlock()
         block.end()
         self.assertRaises(AlreadyFinishedError, block.execSQL, "hello")
-        self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
-                          [])
+        self.assertEquals(
+            self.factory.connections[0].cursors[0].allExecutions, []
+        )
 
 
     def test_commandBlockAfterCommitRaises(self):
@@ -1199,9 +1263,13 @@
         L{ConnectionPoolClient}.
         """
         super(NetworkedPoolHelper, self).setUp()
-        self.pump = IOPump(ConnectionPoolClient(dialect=self.dialect,
-                                                paramstyle=self.paramstyle),
-                           ConnectionPoolConnection(self.pool))
+        self.pump = IOPump(
+            ConnectionPoolClient(
+                dialect=self.dialect,
+                paramstyle=self.paramstyle
+            ),
+            ConnectionPoolConnection(self.pool)
+        )
 
 
     def flushHolders(self):
@@ -1300,5 +1368,3 @@
         self.assertEquals(hookOp._hooks, None)
         hookOp.addHook(hook)
         self.assertEquals(hookOp._hooks, None)
-
-

Modified: twext/trunk/twext/enterprise/test/test_fixtures.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_fixtures.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/test/test_fixtures.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -26,6 +26,8 @@
 from twisted.trial.reporter import TestResult
 from twext.enterprise.adbapi2 import ConnectionPool
 
+
+
 class PoolTests(TestCase):
     """
     Tests for fixtures that create a connection pool.
@@ -37,13 +39,17 @@
         running only for the duration of the test.
         """
         collect = []
+
         class SampleTest(TestCase):
             def setUp(self):
                 self.pool = buildConnectionPool(self)
+
             def test_sample(self):
                 collect.append(self.pool.running)
+
             def tearDown(self):
                 collect.append(self.pool.running)
+
         r = TestResult()
         t = SampleTest("test_sample")
         t.run(r)

Modified: twext/trunk/twext/enterprise/test/test_locking.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_locking.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/test/test_locking.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -30,6 +30,8 @@
 create table NAMED_LOCK (LOCK_NAME varchar(255) unique primary key);
 """
 
+
+
 class TestLocking(TestCase):
     """
     Test locking and unlocking a database row.
@@ -87,6 +89,8 @@
         yield NamedLock.acquire(txn1, u"a test lock")
 
         txn2 = self.pool.connection()
-        yield self.assertFailure(NamedLock.acquire(txn2, u"a test lock"), LockTimeout)
+        yield self.assertFailure(
+            NamedLock.acquire(txn2, u"a test lock"), LockTimeout
+        )
         yield txn2.abort()
         self.flushLoggedErrors()

Modified: twext/trunk/twext/enterprise/test/test_queue.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_queue.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/test/test_queue.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -38,7 +38,7 @@
 from twisted.trial.unittest import TestCase
 from twisted.python.failure import Failure
 from twisted.internet.defer import (
-    Deferred, inlineCallbacks, gatherResults, passthru#, returnValue
+    Deferred, inlineCallbacks, gatherResults, passthru
 )
 
 from twisted.application.service import Service, MultiService
@@ -62,6 +62,8 @@
 from twext.enterprise.queue import _BaseQueuer, NonPerformingQueuer
 import twext.enterprise.queue
 
+
+
 class Clock(_Clock):
     """
     More careful L{IReactorTime} fake which mimics the exception behavior of
@@ -122,31 +124,39 @@
             def __init__(self):
                 self.commits = []
                 self.aborts = []
+
             def commit(self):
                 self.commits.append(Deferred())
                 return self.commits[-1]
+
             def abort(self):
                 self.aborts.append(Deferred())
                 return self.aborts[-1]
 
         createdTxns = []
+
         def createTxn():
             createdTxns.append(faketxn())
             return createdTxns[-1]
+
         dfrs = []
+
         def operation(t):
             self.assertIdentical(t, createdTxns[-1])
             dfrs.append(Deferred())
             return dfrs[-1]
+
         d = inTransaction(createTxn, operation)
         x = []
         d.addCallback(x.append)
         self.assertEquals(x, [])
         self.assertEquals(len(dfrs), 1)
         dfrs[0].callback(35)
+
         # Commit in progress, so still no result...
         self.assertEquals(x, [])
         createdTxns[0].commits[0].callback(42)
+
         # Committed, everything's done.
         self.assertEquals(x, [35])
 
@@ -154,31 +164,39 @@
 
 class SimpleSchemaHelper(SchemaTestHelper):
     def id(self):
-        return 'worker'
+        return "worker"
 
+
+
 SQL = passthru
 
-schemaText = SQL("""
+schemaText = SQL(
+    """
     create table DUMMY_WORK_ITEM (WORK_ID integer primary key,
                                   NOT_BEFORE timestamp,
                                   A integer, B integer,
                                   DELETE_ON_LOAD integer default 0);
     create table DUMMY_WORK_DONE (WORK_ID integer primary key,
                                   A_PLUS_B integer);
-""")
+    """
+)
 
-nodeSchema = SQL("""
+nodeSchema = SQL(
+    """
     create table NODE_INFO (HOSTNAME varchar(255) not null,
                             PID integer not null,
                             PORT integer not null,
                             TIME timestamp default current_timestamp not null,
                             primary key (HOSTNAME, PORT));
-""")
+    """
+)
 
 schema = SchemaSyntax(SimpleSchemaHelper().schemaFromString(schemaText))
 
-dropSQL = ["drop table {name}".format(name=table.model.name)
-           for table in schema]
+dropSQL = [
+    "drop table {name}".format(name=table.model.name)
+    for table in schema
+]
 
 
 class DummyWorkDone(Record, fromTable(schema.DUMMY_WORK_DONE)):
@@ -228,13 +246,16 @@
         L{TableSyntaxByName}.
         """
         client = SchemaAMP(schema)
+
         class SampleCommand(Command):
-            arguments = [('table', TableSyntaxByName())]
+            arguments = [("table", TableSyntaxByName())]
+
         class Receiver(SchemaAMP):
             @SampleCommand.responder
             def gotIt(self, table):
                 self.it = table
                 return {}
+
         server = Receiver(schema)
         clientT = StringTransport()
         serverT = StringTransport()
@@ -393,20 +414,25 @@
         connection = Connection(local, remote)
         connection.start()
         d = Deferred()
+
         class DummyPerformer(object):
             def performWork(self, table, workID):
                 self.table = table
                 self.workID = workID
                 return d
+
         # Doing real database I/O in this test would be tedious so fake the
         # first method in the call stack which actually talks to the DB.
         dummy = DummyPerformer()
+
         def chooseDummy(onlyLocally=False):
             return dummy
+
         peer.choosePerformer = chooseDummy
         performed = local.performWork(schema.DUMMY_WORK_ITEM, 7384)
         performResult = []
         performed.addCallback(performResult.append)
+
         # Sanity check.
         self.assertEquals(performResult, [])
         connection.flush()
@@ -460,9 +486,11 @@
             )
         yield setup
         yield qpool._periodicLostWorkCheck()
+
         @transactionally(dbpool.connection)
         def check(txn):
             return DummyWorkDone.all(txn)
+
         every = yield check
         self.assertEquals([x.aPlusB for x in every], [7])
 
@@ -482,11 +510,14 @@
         qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema)
         realChoosePerformer = qpool.choosePerformer
         performerChosen = []
+
         def catchPerformerChoice():
             result = realChoosePerformer()
             performerChosen.append(True)
             return result
+
         qpool.choosePerformer = catchPerformerChoice
+
         @transactionally(dbpool.connection)
         def check(txn):
             return qpool.enqueueWork(
@@ -529,17 +560,21 @@
         qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema)
         realChoosePerformer = qpool.choosePerformer
         performerChosen = []
+
         def catchPerformerChoice():
             result = realChoosePerformer()
             performerChosen.append(True)
             return result
+
         qpool.choosePerformer = catchPerformerChoice
+
         @transactionally(dbpool.connection)
         def check(txn):
             return qpool.enqueueWork(
                 txn, DummyWorkItem, a=3, b=9,
                 notBefore=datetime.datetime(2012, 12, 12, 12, 12, 0)
             ).whenProposed()
+
         proposal = yield check
 
         clock.advance(1000)
@@ -558,13 +593,16 @@
         clock = Clock()
         peerPool = PeerConnectionPool(clock, None, 4322, schema)
         factory = peerPool.workerListenerFactory()
+
         def peer():
             p = factory.buildProtocol(None)
             t = StringTransport()
             p.makeConnection(t)
             return p, t
+
         worker1, _ignore_trans1 = peer()
         worker2, _ignore_trans2 = peer()
+
         # Ask the worker to do something.
         worker1.performWork(schema.DUMMY_WORK_ITEM, 1)
         self.assertEquals(worker1.currentLoad, 1)
@@ -587,11 +625,13 @@
         cph.setUp(self)
         pcp = PeerConnectionPool(reactor, cph.pool.connection, 4321, schema)
         now = then + datetime.timedelta(seconds=pcp.queueProcessTimeout * 2)
+
         @transactionally(cph.pool.connection)
         def createOldWork(txn):
             one = DummyWorkItem.create(txn, workID=1, a=3, b=4, notBefore=then)
             two = DummyWorkItem.create(txn, workID=2, a=7, b=9, notBefore=now)
             return gatherResults([one, two])
+
         pcp.startService()
         cph.flushHolders()
         reactor.advance(pcp.queueProcessTimeout * 2)
@@ -696,23 +736,29 @@
         L{PeerConnectionPool} requires access to a database and the reactor.
         """
         self.store = yield buildStore(self, None)
+
         def doit(txn):
             return txn.execSQL(schemaText)
-        yield inTransaction(lambda: self.store.newTransaction("bonus schema"),
-                            doit)
+
+        yield inTransaction(
+            lambda: self.store.newTransaction("bonus schema"), doit
+        )
+
         def indirectedTransactionFactory(*a):
             """
-            Allow tests to replace 'self.store.newTransaction' to provide
+            Allow tests to replace "self.store.newTransaction" to provide
             fixtures with extra methods on a test-by-test basis.
             """
             return self.store.newTransaction(*a)
+
         def deschema():
             @inlineCallbacks
             def deletestuff(txn):
                 for stmt in dropSQL:
                     yield txn.execSQL(stmt)
-            return inTransaction(lambda *a: self.store.newTransaction(*a),
-                                 deletestuff)
+            return inTransaction(
+                lambda *a: self.store.newTransaction(*a), deletestuff
+            )
         self.addCleanup(deschema)
 
         from twisted.internet import reactor
@@ -725,8 +771,10 @@
             def __init__(self, d):
                 super(FireMeService, self).__init__()
                 self.d = d
+
             def startService(self):
                 self.d.callback(None)
+
         d1 = Deferred()
         d2 = Deferred()
         FireMeService(d1).setServiceParent(self.node1)
@@ -762,7 +810,7 @@
         """
         # TODO: this exact test should run against LocalQueuer as well.
         def operation(txn):
-            # TODO: how does 'enqueue' get associated with the transaction?
+            # TODO: how does "enqueue" get associated with the transaction?
             # This is not the fact with a raw t.w.enterprise transaction.
             # Should probably do something with components.
             return txn.enqueue(DummyWorkItem, a=3, b=4, workID=4321,
@@ -770,10 +818,16 @@
         result = yield inTransaction(self.store.newTransaction, operation)
         # Wait for it to be executed.  Hopefully this does not time out :-\.
         yield result.whenExecuted()
+
         def op2(txn):
-            return Select([schema.DUMMY_WORK_DONE.WORK_ID,
-                           schema.DUMMY_WORK_DONE.A_PLUS_B],
-                          From=schema.DUMMY_WORK_DONE).on(txn)
+            return Select(
+                [
+                    schema.DUMMY_WORK_DONE.WORK_ID,
+                    schema.DUMMY_WORK_DONE.A_PLUS_B,
+                ],
+                From=schema.DUMMY_WORK_DONE
+            ).on(txn)
+
         rows = yield inTransaction(self.store.newTransaction, op2)
         self.assertEquals(rows, [[4321, 7]])
 
@@ -784,30 +838,45 @@
         When a L{WorkItem} is concurrently deleted by another transaction, it
         should I{not} perform its work.
         """
-        # Provide access to a method called 'concurrently' everything using
+        # Provide access to a method called "concurrently" everything using
         original = self.store.newTransaction
+
         def decorate(*a, **k):
             result = original(*a, **k)
             result.concurrently = self.store.newTransaction
             return result
+
         self.store.newTransaction = decorate
 
         def operation(txn):
-            return txn.enqueue(DummyWorkItem, a=30, b=40, workID=5678,
-                               deleteOnLoad=1,
-                               notBefore=datetime.datetime.utcnow())
+            return txn.enqueue(
+                DummyWorkItem, a=30, b=40, workID=5678,
+                deleteOnLoad=1,
+                notBefore=datetime.datetime.utcnow()
+            )
+
         proposal = yield inTransaction(self.store.newTransaction, operation)
         yield proposal.whenExecuted()
+
         # Sanity check on the concurrent deletion.
         def op2(txn):
-            return Select([schema.DUMMY_WORK_ITEM.WORK_ID],
-                          From=schema.DUMMY_WORK_ITEM).on(txn)
+            return Select(
+                [schema.DUMMY_WORK_ITEM.WORK_ID],
+                From=schema.DUMMY_WORK_ITEM
+            ).on(txn)
+
         rows = yield inTransaction(self.store.newTransaction, op2)
         self.assertEquals(rows, [])
+
         def op3(txn):
-            return Select([schema.DUMMY_WORK_DONE.WORK_ID,
-                           schema.DUMMY_WORK_DONE.A_PLUS_B],
-                          From=schema.DUMMY_WORK_DONE).on(txn)
+            return Select(
+                [
+                    schema.DUMMY_WORK_DONE.WORK_ID,
+                    schema.DUMMY_WORK_DONE.A_PLUS_B,
+                ],
+                From=schema.DUMMY_WORK_DONE
+            ).on(txn)
+
         rows = yield inTransaction(self.store.newTransaction, op3)
         self.assertEquals(rows, [])
 

Modified: twext/trunk/twext/enterprise/test/test_util.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_util.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/test/test_util.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -20,6 +20,8 @@
 
 from twext.enterprise.util import parseSQLTimestamp
 
+
+
 class TimestampTests(TestCase):
     """
     Tests for date-related functions.

Modified: twext/trunk/twext/enterprise/util.py
===================================================================
--- twext/trunk/twext/enterprise/util.py	2013-12-13 21:51:30 UTC (rev 12087)
+++ twext/trunk/twext/enterprise/util.py	2013-12-13 23:59:41 UTC (rev 12088)
@@ -44,12 +44,12 @@
     @param column: a single value from a column.
 
     @return: a converted value based on the type of the input; oracle CLOBs and
-        datetime timestamps will be converted to strings, unicode values will be
-        converted to UTF-8 encoded byte sequences (C{str}s), and floating point
-        numbers will be converted to integer types if they are integers.  Any
-        other types will be left alone.
+        datetime timestamps will be converted to strings, unicode values will
+        be converted to UTF-8 encoded byte sequences (C{str}s), and floating
+        point numbers will be converted to integer types if they are integers.
+        Any other types will be left alone.
     """
-    if hasattr(column, 'read'):
+    if hasattr(column, "read"):
         # Try to detect large objects and format convert them to
         # strings on the fly.  We need to do this as we read each
         # row, due to the issue described here -
@@ -57,6 +57,7 @@
         # particular, the part where it says "In particular, do not
         # use the fetchall() method".
         column = column.read()
+
     elif isinstance(column, datetime):
         # cx_Oracle properly maps the type of timestamps to datetime
         # objects.  However, our code is mostly written against
@@ -65,21 +66,22 @@
         # just detect the datetimes and stringify them, for now
         # we'll do that.
         return column.strftime(SQL_TIMESTAMP_FORMAT)
+
     elif isinstance(column, float):
-        # cx_Oracle maps _all_ nubmers to float types, which is more consistent,
-        # but we expect the database to be able to store integers as integers
-        # (in fact almost all the values in our schema are integers), so we map
-        # those values which exactly match back into integers.
+        # cx_Oracle maps _all_ nubmers to float types, which is more
+        # consistent, but we expect the database to be able to store integers
+        # as integers (in fact almost all the values in our schema are
+        # integers), so we map those values which exactly match back into
+        # integers.
         if int(column) == column:
             return int(column)
         else:
             return column
+
     if isinstance(column, unicode):
         # Finally, we process all data as UTF-8 bytestrings in order to reduce
         # memory consumption.  Pass any unicode string values back to the
         # application as unicode.
-        column = column.encode('utf-8')
+        column = column.encode("utf-8")
+
     return column
-
-
-
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/4c3d0ac3/attachment.html>


More information about the calendarserver-changes mailing list