[CalendarServer-changes] [8138] CalendarServer/branches/users/cdaboo/implicituidrace
source_changes at macosforge.org
source_changes at macosforge.org
Mon Oct 3 08:27:24 PDT 2011
Revision: 8138
http://trac.macosforge.org/projects/calendarserver/changeset/8138
Author: cdaboo at apple.com
Date: 2011-10-03 08:27:22 -0700 (Mon, 03 Oct 2011)
Log Message:
-----------
Implicit UID race fixes.
Modified Paths:
--------------
CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/caldav.py
CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/util.py
CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/adbapi2.py
CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/syntax.py
CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/test/test_sqlsyntax.py
CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/test/test_adbapi2.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/mail.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/method/put_common.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/resource.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/schedule.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/processing.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/scheduler.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/storebridge.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/test/test_wrapping.py
CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/upgrade.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/datastore/file.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/test/test_sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/common.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_file.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/common.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_file.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/file.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql_legacy.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/test_util.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/util.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/util.py
CalendarServer/branches/users/cdaboo/implicituidrace/txdav/idav.py
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/caldav.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/caldav.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -640,9 +640,9 @@
"""
pool = None
if config.DBAMPFD:
- txnFactory = transactionFactoryFromFD(int(config.DBAMPFD))
+ transactionFactory = transactionFactoryFromFD(int(config.DBAMPFD))
elif not config.UseDatabase:
- txnFactory = None
+ transactionFactory = None
elif not config.SharedConnectionPool:
dialect = POSTGRES_DIALECT
paramstyle = 'pyformat'
@@ -663,12 +663,12 @@
pool = ConnectionPool(connectionFactory, dialect=dialect,
paramstyle=paramstyle,
maxConnections=config.MaxDBConnectionsPerPool)
- txnFactory = pool.connection
+ transactionFactory = pool.connection
else:
raise UsageError(
"trying to use DB in slave, but no connection info from parent"
)
- store = storeFromConfig(config, txnFactory)
+ store = storeFromConfig(config, transactionFactory)
result = self.requestProcessingService(options, store)
if pool is not None:
pool.setServiceParent(result)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/util.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/calendarserver/tap/util.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -170,7 +170,7 @@
-def storeFromConfig(config, txnFactory):
+def storeFromConfig(config, transactionFactory):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
@@ -192,9 +192,9 @@
quota = config.UserQuota
if quota == 0:
quota = None
- if txnFactory is not None:
+ if transactionFactory is not None:
return CommonSQLDataStore(
- txnFactory, notifierFactory, FilePath(config.AttachmentsRoot),
+ transactionFactory, notifierFactory, FilePath(config.AttachmentsRoot),
config.EnableCalDAV, config.EnableCardDAV,
quota=quota
)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/adbapi2.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/adbapi2.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/adbapi2.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -133,7 +133,7 @@
-class _ConnectedTxn(object):
+class _ConnectedTransaction(object):
"""
L{IAsyncTransaction} implementation based on a L{ThreadHolder} in the
current process.
@@ -173,7 +173,7 @@
necessary, and then, if there are results from the statement (as
determined by the DB-API 2.0 'description' attribute) it will fetch all
the rows and return them, leaving them to be relayed to
- L{_ConnectedTxn.execSQL} via the L{ThreadHolder}.
+ L{_ConnectedTransaction.execSQL} via the L{ThreadHolder}.
The rules for possibly reconnecting automatically are: if this is the
very first statement being executed in this transaction, and an error
@@ -379,7 +379,7 @@
-class _NoTxn(object):
+class _NoTransaction(object):
"""
An L{IAsyncTransaction} that indicates a local failure before we could even
communicate any statements (or possibly even any connection attempts) to the
@@ -405,11 +405,11 @@
-class _WaitingTxn(object):
+class _WaitingTransaction(object):
"""
- A L{_WaitingTxn} is an implementation of L{IAsyncTransaction} which cannot
+ A L{_WaitingTransaction} is an implementation of L{IAsyncTransaction} which cannot
yet actually execute anything, so it waits and spools SQL requests for later
- execution. When a L{_ConnectedTxn} becomes available later, it can be
+ execution. When a L{_ConnectedTransaction} becomes available later, it can be
unspooled onto that.
"""
@@ -417,7 +417,7 @@
def __init__(self, pool):
"""
- Initialize a L{_WaitingTxn} based on a L{ConnectionPool}. (The C{pool}
+ Initialize a L{_WaitingTransaction} based on a L{ConnectionPool}. (The C{pool}
is used only to reflect C{dialect} and C{paramstyle} attributes; not
remembered or modified in any way.)
"""
@@ -472,23 +472,23 @@
def abort(self):
"""
Succeed and do nothing. The actual logic for this method is mostly
- implemented by L{_SingleTxn._stopWaiting}.
+ implemented by L{_SingleTransaction._stopWaiting}.
"""
return succeed(None)
-class _SingleTxn(proxyForInterface(iface=IAsyncTransaction,
- originalAttribute='_baseTxn')):
+class _SingleTransaction(proxyForInterface(iface=IAsyncTransaction,
+ originalAttribute='_baseTransaction')):
"""
- A L{_SingleTxn} is a single-use wrapper for the longer-lived
- L{_ConnectedTxn}, so that if a badly-behaved API client accidentally hangs
+ A L{_SingleTransaction} is a single-use wrapper for the longer-lived
+ L{_ConnectedTransaction}, so that if a badly-behaved API client accidentally hangs
on to one of these and, for example C{.abort()}s it multiple times once
another client is using that connection, it will get some harmless
tracebacks.
- It's a wrapper around a "real" implementation; either a L{_ConnectedTxn},
- L{_NoTxn}, or L{_WaitingTxn} depending on the availability of real
+ It's a wrapper around a "real" implementation; either a L{_ConnectedTransaction},
+ L{_NoTransaction}, or L{_WaitingTransaction} depending on the availability of real
underlying datbase connections.
This is the only L{IAsyncTransaction} implementation exposed to application
@@ -498,9 +498,9 @@
commands together.
"""
- def __init__(self, pool, baseTxn):
+ def __init__(self, pool, baseTransaction):
self._pool = pool
- self._baseTxn = baseTxn
+ self._baseTransaction = baseTransaction
self._complete = False
self._currentBlock = None
self._blockedQueue = None
@@ -512,18 +512,18 @@
"""
Reveal the backend in the string representation.
"""
- return '_SingleTxn(%r)' % (self._baseTxn,)
+ return '_SingleTransaction(%r)' % (self._baseTransaction,)
- def _unspoolOnto(self, baseTxn):
+ def _unspoolOnto(self, baseTransaction):
"""
- Replace my C{_baseTxn}, currently a L{_WaitingTxn}, with a new
+ Replace my C{_baseTransaction}, currently a L{_WaitingTransaction}, with a new
implementation of L{IAsyncTransaction} that will actually do the work;
- either a L{_ConnectedTxn} or a L{_NoTxn}.
+ either a L{_ConnectedTransaction} or a L{_NoTransaction}.
"""
- spooledBase = self._baseTxn
- self._baseTxn = baseTxn
- spooledBase._unspool(baseTxn)
+ spooledBase = self._baseTransaction
+ self._baseTransaction = baseTransaction
+ spooledBase._unspool(baseTransaction)
def execSQL(self, sql, args=None, raiseOnZeroRowCount=None):
@@ -539,7 +539,7 @@
if block is None and self._blockedQueue is not None:
return self._blockedQueue.execSQL(sql, args, raiseOnZeroRowCount)
# 'block' should always be _currentBlock at this point.
- d = super(_SingleTxn, self).execSQL(sql, args, raiseOnZeroRowCount)
+ d = super(_SingleTransaction, self).execSQL(sql, args, raiseOnZeroRowCount)
self._stillExecuting.append(d)
def itsDone(result):
self._stillExecuting.remove(d)
@@ -597,12 +597,12 @@
return self._blockedQueue.commit()
self._markComplete()
- return super(_SingleTxn, self).commit()
+ return super(_SingleTransaction, self).commit()
def abort(self):
self._markComplete()
- result = super(_SingleTxn, self).abort()
+ result = super(_SingleTransaction, self).abort()
if self in self._pool._waiting:
self._stopWaiting()
return result
@@ -613,7 +613,7 @@
Stop waiting for a free transaction and fail.
"""
self._pool._waiting.remove(self)
- self._unspoolOnto(_NoTxn(self._pool))
+ self._unspoolOnto(_NoTransaction(self._pool))
def _checkComplete(self):
@@ -640,7 +640,7 @@
self._checkComplete()
block = CommandBlock(self)
if self._currentBlock is None:
- self._blockedQueue = _WaitingTxn(self._pool)
+ self._blockedQueue = _WaitingTransaction(self._pool)
# FIXME: test the case where it's ready immediately.
self._checkNextBlock()
return block
@@ -671,16 +671,16 @@
required. Instead, it provides 'end'.
"""
- def __init__(self, singleTxn):
- self._singleTxn = singleTxn
- self.paramstyle = singleTxn.paramstyle
- self.dialect = singleTxn.dialect
- self._spool = _WaitingTxn(singleTxn._pool)
+ def __init__(self, singleTransaction):
+ self._singleTransaction = singleTransaction
+ self.paramstyle = singleTransaction.paramstyle
+ self.dialect = singleTransaction.dialect
+ self._spool = _WaitingTransaction(singleTransaction._pool)
self._started = False
self._ended = False
self._waitingForEnd = []
self._endDeferred = Deferred()
- singleTxn._pendingBlocks.append(self)
+ singleTransaction._pendingBlocks.append(self)
def _startExecuting(self):
@@ -705,9 +705,9 @@
"""
if track and self._ended:
raise AlreadyFinishedError()
- self._singleTxn._checkComplete()
- if self._singleTxn._currentBlock is self and self._started:
- d = self._singleTxn._execSQLForBlock(
+ self._singleTransaction._checkComplete()
+ if self._singleTransaction._currentBlock is self and self._started:
+ d = self._singleTransaction._execSQLForBlock(
sql, args, raiseOnZeroRowCount, self)
else:
d = self._spool.execSQL(sql, args, raiseOnZeroRowCount)
@@ -742,7 +742,7 @@
-class _ConnectingPseudoTxn(object):
+class _ConnectingPseudoTransaction(object):
_retry = None
@@ -797,20 +797,20 @@
@type reactor: L{IReactorTime} and L{IReactorThreads} provider.
- @ivar _free: The list of free L{_ConnectedTxn} objects which are not
- currently attached to a L{_SingleTxn} object, and have active
+ @ivar _free: The list of free L{_ConnectedTransaction} objects which are not
+ currently attached to a L{_SingleTransaction} object, and have active
connections ready for processing a new transaction.
- @ivar _busy: The list of busy L{_ConnectedTxn} objects; those currently
- servicing an unfinished L{_SingleTxn} object.
+ @ivar _busy: The list of busy L{_ConnectedTransaction} objects; those currently
+ servicing an unfinished L{_SingleTransaction} object.
- @ivar _finishing: The list of 2-tuples of L{_ConnectedTxn} objects which
+ @ivar _finishing: The list of 2-tuples of L{_ConnectedTransaction} objects which
have had C{abort} or C{commit} called on them, but are not done
executing that method, and the L{Deferred} returned from that method
that will be fired when its execution has completed.
- @ivar _waiting: The list of L{_SingleTxn} objects attached to a
- L{_WaitingTxn}; i.e. those which are awaiting a connection to become
+ @ivar _waiting: The list of L{_SingleTransaction} objects attached to a
+ L{_WaitingTransaction}; i.e. those which are awaiting a connection to become
free so that they can be executed.
@ivar _stopping: Is this L{ConnectionPool} in the process of shutting down?
@@ -863,7 +863,7 @@
waiting = self._waiting[0]
waiting._stopWaiting()
- # Phase 2: Wait for all the Deferreds from the L{_ConnectedTxn}s that
+ # Phase 2: Wait for all the Deferreds from the L{_ConnectedTransaction}s that
# have *already* been stopped.
while self._finishing:
yield _fork(self._finishing[0][1])
@@ -877,9 +877,9 @@
# 'abort()' will have put them there. Shut down all the associated
# ThreadHolders.
while self._free:
- # Releasing a L{_ConnectedTxn} doesn't automatically recycle it /
- # remove it the way aborting a _SingleTxn does, so we need to .pop()
- # here. L{_ConnectedTxn.stop} really shouldn't be able to fail, as
+ # Releasing a L{_ConnectedTransaction} doesn't automatically recycle it /
+ # remove it the way aborting a _SingleTransaction does, so we need to .pop()
+ # here. L{_ConnectedTransaction.stop} really shouldn't be able to fail, as
# it's just stopping the thread, and the holder's stop() is
# independently submitted from .abort() / .close().
yield self._free.pop()._releaseConnection()
@@ -904,21 +904,21 @@
@return: an L{IAsyncTransaction}
"""
if self._stopping:
- # FIXME: should be wrapping a _SingleTxn around this to get
+ # FIXME: should be wrapping a _SingleTransaction around this to get
# .commandBlock()
- return _NoTxn(self)
+ return _NoTransaction(self)
if self._free:
- basetxn = self._free.pop(0)
- self._busy.append(basetxn)
- txn = _SingleTxn(self, basetxn)
+ basetransaction = self._free.pop(0)
+ self._busy.append(basetransaction)
+ transaction = _SingleTransaction(self, basetransaction)
else:
- txn = _SingleTxn(self, _WaitingTxn(self))
- self._waiting.append(txn)
+ transaction = _SingleTransaction(self, _WaitingTransaction(self))
+ self._waiting.append(transaction)
# FIXME/TESTME: should be len(self._busy) + len(self._finishing)
# (free doesn't need to be considered, as it's tested above)
if self._activeConnectionCount() < self.maxConnections:
self._startOneMore()
- return txn
+ return transaction
def _activeConnectionCount(self):
@@ -930,13 +930,13 @@
def _startOneMore(self):
"""
- Start one more _ConnectedTxn.
+ Start one more _ConnectedTransaction.
"""
holder = self._createHolder()
holder.start()
- txn = _ConnectingPseudoTxn(self, holder)
+ transaction = _ConnectingPseudoTransaction(self, holder)
# take up a slot in the 'busy' list, sit there so we can be aborted.
- self._busy.append(txn)
+ self._busy.append(transaction)
def initCursor():
# support threadlevel=1; we can't necessarily cursor() in a
# different thread than we do transactions in.
@@ -944,58 +944,58 @@
cursor = connection.cursor()
return (connection, cursor)
def finishInit((connection, cursor)):
- baseTxn = _ConnectedTxn(
+ baseTransaction = _ConnectedTransaction(
pool=self,
threadHolder=holder,
connection=connection,
cursor=cursor
)
- self._busy.remove(txn)
- self._repoolNow(baseTxn)
+ self._busy.remove(transaction)
+ self._repoolNow(baseTransaction)
def maybeTryAgain(f):
log.err(f, "Re-trying connection due to connection failure")
- txn._retry = self.reactor.callLater(self.RETRY_TIMEOUT, resubmit)
+ transaction._retry = self.reactor.callLater(self.RETRY_TIMEOUT, resubmit)
def resubmit():
d = holder.submit(initCursor)
d.addCallbacks(finishInit, maybeTryAgain)
resubmit()
- def _repoolAfter(self, txn, d):
+ def _repoolAfter(self, transaction, d):
"""
- Re-pool the given L{_ConnectedTxn} after the given L{Deferred} has
+ Re-pool the given L{_ConnectedTransaction} after the given L{Deferred} has
fired.
"""
- self._busy.remove(txn)
- finishRecord = (txn, d)
+ self._busy.remove(transaction)
+ finishRecord = (transaction, d)
self._finishing.append(finishRecord)
def repool(result):
self._finishing.remove(finishRecord)
- self._repoolNow(txn)
+ self._repoolNow(transaction)
return result
def discard(result):
self._finishing.remove(finishRecord)
- txn._releaseConnection()
+ transaction._releaseConnection()
self._startOneMore()
return result
return d.addCallbacks(repool, discard)
- def _repoolNow(self, txn):
+ def _repoolNow(self, transaction):
"""
- Recycle a L{_ConnectedTxn} into the free list.
+ Recycle a L{_ConnectedTransaction} into the free list.
"""
- txn.reset()
+ transaction.reset()
if self._waiting:
waiting = self._waiting.pop(0)
- self._busy.append(txn)
- waiting._unspoolOnto(txn)
+ self._busy.append(transaction)
+ waiting._unspoolOnto(transaction)
else:
- self._free.append(txn)
+ self._free.append(transaction)
-def txnarg():
+def transactionarg():
return [('transactionID', Integer())]
@@ -1049,11 +1049,11 @@
-class StartTxn(Command):
+class StartTransaction(Command):
"""
Start a transaction, identified with an ID generated by the client.
"""
- arguments = txnarg()
+ arguments = transactionarg()
@@ -1063,7 +1063,7 @@
"""
arguments = [('sql', String()),
('queryID', String()),
- ('args', Pickle())] + txnarg()
+ ('args', Pickle())] + transactionarg()
@@ -1089,12 +1089,12 @@
class Commit(Command):
- arguments = txnarg()
+ arguments = transactionarg()
class Abort(Command):
- arguments = txnarg()
+ arguments = transactionarg()
@@ -1116,12 +1116,12 @@
"""
super(ConnectionPoolConnection, self).__init__()
self.pool = pool
- self._txns = {}
+ self._transactions = {}
- @StartTxn.responder
+ @StartTransaction.responder
def start(self, transactionID):
- self._txns[transactionID] = self.pool.connection()
+ self._transactions[transactionID] = self.pool.connection()
return {}
@@ -1129,7 +1129,7 @@
@inlineCallbacks
def receivedSQL(self, transactionID, queryID, sql, args):
try:
- rows = yield self._txns[transactionID].execSQL(sql, args, _NoRows)
+ rows = yield self._transactions[transactionID].execSQL(sql, args, _NoRows)
except _NoRows:
norows = True
else:
@@ -1144,8 +1144,8 @@
def _complete(self, transactionID, thunk):
- txn = self._txns.pop(transactionID)
- return thunk(txn).addCallback(lambda ignored: {})
+ transaction = self._transactions.pop(transactionID)
+ return thunk(transaction).addCallback(lambda ignored: {})
@Commit.responder
@@ -1172,7 +1172,7 @@
def __init__(self):
super(ConnectionPoolClient, self).__init__()
self._nextID = count().next
- self._txns = {}
+ self._transactions = {}
self._queries = {}
@@ -1185,11 +1185,11 @@
@rtype: L{IAsyncTransaction}
"""
- txnid = str(self._nextID())
- txn = _NetTransaction(client=self, transactionID=txnid)
- self._txns[txnid] = txn
- self.callRemote(StartTxn, transactionID=txnid)
- return txn
+ transactionid = str(self._nextID())
+ transaction = _NetTransaction(client=self, transactionID=transactionid)
+ self._transactions[transactionid] = transaction
+ self.callRemote(StartTransaction, transactionID=transactionid)
+ return transaction
@Row.responder
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/syntax.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/syntax.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/syntax.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -121,7 +121,7 @@
return self._toSQL(metadata)
- def _extraVars(self, txn, metadata):
+ def _extraVars(self, transaction, metadata):
"""
A hook for subclasses to provide additional keyword arguments to the
C{bind} call when L{_Statement.on} is executed. Currently this is used
@@ -156,12 +156,12 @@
return result
- def on(self, txn, raiseOnZeroRowCount=None, **kw):
+ def on(self, transaction, raiseOnZeroRowCount=None, **kw):
"""
Execute this statement on a given L{IAsyncTransaction} and return the
resulting L{Deferred}.
- @param txn: the L{IAsyncTransaction} to execute this on.
+ @param transaction: the L{IAsyncTransaction} to execute this on.
@param raiseOnZeroRowCount: the exception to raise if no data was
affected or returned by this query.
@@ -174,11 +174,11 @@
@rtype: a L{Deferred} firing a C{list} of records (C{tuple}s or
C{list}s)
"""
- metadata = self._paramstyles[txn.paramstyle](txn.dialect)
- outvars = self._extraVars(txn, metadata)
+ metadata = self._paramstyles[transaction.paramstyle](transaction.dialect)
+ outvars = self._extraVars(transaction, metadata)
kw.update(outvars)
fragment = self.toSQL(metadata).bind(**kw)
- result = txn.execSQL(fragment.text, fragment.parameters,
+ result = transaction.execSQL(fragment.text, fragment.parameters,
raiseOnZeroRowCount)
result = self._extraResult(result, outvars, metadata)
if metadata.dialect == ORACLE_DIALECT and result:
@@ -973,7 +973,7 @@
return self.Return
- def _extraVars(self, txn, metadata):
+ def _extraVars(self, transaction, metadata):
result = []
rvars = self._returnAsList()
if metadata.dialect == ORACLE_DIALECT:
@@ -1237,22 +1237,22 @@
self._name = name
- def acquire(self, txn):
- return Savepoint(self._name).on(txn)
+ def acquire(self, transaction):
+ return Savepoint(self._name).on(transaction)
- def rollback(self, txn):
- return RollbackToSavepoint(self._name).on(txn)
+ def rollback(self, transaction):
+ return RollbackToSavepoint(self._name).on(transaction)
- def release(self, txn):
- if txn.dialect == ORACLE_DIALECT:
+ def release(self, transaction):
+ if transaction.dialect == ORACLE_DIALECT:
# There is no 'release savepoint' statement in oracle, but then, we
# don't need it because there's no resource to manage. Just don't
# do anything.
return NoOp()
else:
- return ReleaseSavepoint(self._name).on(txn)
+ return ReleaseSavepoint(self._name).on(transaction)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/test/test_sqlsyntax.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/dal/test/test_sqlsyntax.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -58,7 +58,7 @@
-class NullTestingOracleTxn(object):
+class NullTestingOracleTransaction(object):
"""
Fake transaction for testing oracle NULL behavior.
"""
@@ -643,7 +643,7 @@
return conn, pool, factory
- def test_insertMultiReturnOnOracleTxn(self):
+ def test_insertMultiReturnOnOracleTransaction(self):
"""
As described in L{test_insertMultiReturnOracle}, Oracle deals with
'returning' clauses by using out parameters. However, this is not quite
@@ -938,7 +938,7 @@
:3, as per the DB-API.
"""
stmts = []
- class FakeOracleTxn(object):
+ class FakeOracleTransaction(object):
def execSQL(self, text, params, exc):
stmts.append((text, params))
dialect = ORACLE_DIALECT
@@ -947,7 +947,7 @@
From=self.schema.FOO,
Where=(self.schema.FOO.BAR == 7).And(
self.schema.FOO.BAZ == 9)
- ).on(FakeOracleTxn())
+ ).on(FakeOracleTransaction())
self.assertEquals(
stmts, [("select BAR from FOO where BAR = :1 and BAZ = :2",
[7, 9])]
@@ -969,7 +969,7 @@
rows = resultOf(
Select([self.schema.NULLCHECK.ASTRING,
self.schema.NULLCHECK.ANUMBER],
- From=self.schema.NULLCHECK).on(NullTestingOracleTxn()))[0]
+ From=self.schema.NULLCHECK).on(NullTestingOracleTransaction()))[0]
self.assertEquals(rows, [['', None]])
@@ -980,7 +980,7 @@
shortcut.
"""
rows = resultOf(
- Select(From=self.schema.NULLCHECK).on(NullTestingOracleTxn())
+ Select(From=self.schema.NULLCHECK).on(NullTestingOracleTransaction())
)[0]
self.assertEquals(rows, [['', None]])
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/test/test_adbapi2.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/test/test_adbapi2.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twext/enterprise/test/test_adbapi2.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -729,12 +729,12 @@
underlying database connection has already been disconnected. If this
happens, shutdown should continue.
"""
- txns = []
- txns.append(self.pool.connection())
- txns.append(self.pool.connection())
- for txn in txns:
+ transactions = []
+ transactions.append(self.pool.connection())
+ transactions.append(self.pool.connection())
+ for transaction in transactions:
# Make sure rollback will actually be executed.
- results = resultOf(txn.execSQL("maybe change something!"))
+ results = resultOf(transaction.execSQL("maybe change something!"))
[[[counter, echo]]] = results
self.assertEquals("maybe change something!", echo)
# Fail one (and only one) call to rollback().
@@ -827,17 +827,17 @@
"""
TEST_PARAMSTYLE = "justtesting"
self.pool.paramstyle = TEST_PARAMSTYLE
- normaltxn = self.pool.connection()
- self.assertEquals(normaltxn.paramstyle, TEST_PARAMSTYLE)
+ normaltransaction = self.pool.connection()
+ self.assertEquals(normaltransaction.paramstyle, TEST_PARAMSTYLE)
self.pauseHolders()
extra = []
extra.append(self.pool.connection())
- waitingtxn = self.pool.connection()
- self.assertEquals(waitingtxn.paramstyle, TEST_PARAMSTYLE)
+ waitingtransaction = self.pool.connection()
+ self.assertEquals(waitingtransaction.paramstyle, TEST_PARAMSTYLE)
self.flushHolders()
self.pool.stopService()
- notxn = self.pool.connection()
- self.assertEquals(notxn.paramstyle, TEST_PARAMSTYLE)
+ notransaction = self.pool.connection()
+ self.assertEquals(notransaction.paramstyle, TEST_PARAMSTYLE)
def test_propagateDialect(self):
@@ -847,17 +847,17 @@
"""
TEST_DIALECT = "otherdialect"
self.pool.dialect = TEST_DIALECT
- normaltxn = self.pool.connection()
- self.assertEquals(normaltxn.dialect, TEST_DIALECT)
+ normaltransaction = self.pool.connection()
+ self.assertEquals(normaltransaction.dialect, TEST_DIALECT)
self.pauseHolders()
extra = []
extra.append(self.pool.connection())
- waitingtxn = self.pool.connection()
- self.assertEquals(waitingtxn.dialect, TEST_DIALECT)
+ waitingtransaction = self.pool.connection()
+ self.assertEquals(waitingtransaction.dialect, TEST_DIALECT)
self.flushHolders()
self.pool.stopService()
- notxn = self.pool.connection()
- self.assertEquals(notxn.dialect, TEST_DIALECT)
+ notransaction = self.pool.connection()
+ self.assertEquals(notransaction.dialect, TEST_DIALECT)
def test_reConnectWhenFirstExecFails(self):
@@ -875,9 +875,9 @@
# Allow 'connect' to succeed. This should behave basically the same
# whether connect() happened to succeed in some previous transaction and
# it's recycling the underlying transaction, or connect() just
- # succeeded. Either way you just have a _SingleTxn wrapping a
- # _ConnectedTxn.
- txn = self.pool.connection()
+ # succeeded. Either way you just have a _SingleTransaction wrapping a
+ # _ConnectedTransaction.
+ transaction = self.pool.connection()
self.assertEquals(len(self.factory.connections), 1,
"Sanity check failed.")
class CustomExecuteFailed(Exception):
@@ -885,7 +885,7 @@
Custom 'execute-failed' exception.
"""
self.factory.connections[0].executeWillFail(CustomExecuteFailed)
- results = resultOf(txn.execSQL("hello, world!"))
+ results = resultOf(transaction.execSQL("hello, world!"))
[[[counter, echo]]] = results
self.assertEquals("hello, world!", echo)
# Two execution attempts should have been made, one on each connection.
@@ -911,15 +911,15 @@
then, the database server will shut down and the connections will die,
but we will be none the wiser until we try to use them.
"""
- txn = self.pool.connection()
+ transaction = self.pool.connection()
moreFailureSetup(self.factory)
self.assertEquals(len(self.factory.connections), 1,
"Sanity check failed.")
- results = resultOf(txn.execSQL("hello, world!"))
- txn.commit()
+ results = resultOf(transaction.execSQL("hello, world!"))
+ transaction.commit()
[[[counter, echo]]] = results
self.assertEquals("hello, world!", echo)
- txn2 = self.pool.connection()
+ transaction2 = self.pool.connection()
self.assertEquals(len(self.factory.connections), 1,
"Sanity check failed.")
class CustomExecFail(Exception):
@@ -927,8 +927,8 @@
Custom 'execute()' failure.
"""
self.factory.connections[0].executeWillFail(CustomExecFail)
- results = resultOf(txn2.execSQL("second try!"))
- txn2.commit()
+ results = resultOf(transaction2.execSQL("second try!"))
+ transaction2.commit()
[[[counter, echo]]] = results
self.assertEquals("second try!", echo)
self.assertEquals(len(self.flushLoggedErrors(CustomExecFail)), 1)
@@ -970,16 +970,16 @@
statement transparently re-executed by the logic tested by
L{test_reConnectWhenFirstExecFails}.
"""
- txn = self.pool.connection()
+ transaction = self.pool.connection()
self.factory.commitFail = True
self.factory.rollbackFail = True
- [x] = resultOf(txn.commit())
+ [x] = resultOf(transaction.commit())
# No statements have been executed, so 'commit' will *not* be executed.
self.assertEquals(self.factory.commitFail, True)
self.assertIdentical(x, None)
self.assertEquals(len(self.pool._free), 1)
- self.assertIn(txn._baseTxn, self.pool._free)
+ self.assertIn(transaction._baseTransaction, self.pool._free)
self.assertEquals(self.pool._finishing, [])
self.assertEquals(len(self.factory.connections), 1)
self.assertEquals(self.factory.connections[0].closed, False)
@@ -1001,10 +1001,10 @@
relaying the exception back to application code but attempting a
re-connection on the next try.
"""
- txn = self.pool.connection()
- [[[counter, echo]]] = resultOf(txn.execSQL("hello, world!", []))
+ transaction = self.pool.connection()
+ [[[counter, echo]]] = resultOf(transaction.execSQL("hello, world!", []))
self.factory.connections[0].executeWillFail(ZeroDivisionError)
- [f] = resultOf(txn.execSQL("divide by zero", []))
+ [f] = resultOf(transaction.execSQL("divide by zero", []))
f.trap(ZeroDivisionError)
self.assertEquals(self.factory.connections[0].executions, 2)
# Reconnection should work exactly as before.
@@ -1012,7 +1012,7 @@
# Application code has to roll back its transaction at this point, since
# it failed (and we don't necessarily know why it failed: not enough
# information).
- txn.abort()
+ transaction.abort()
self.factory.connections[0].executions = 0 # re-set for next test
self.assertEquals(len(self.factory.connections), 1)
self.test_reConnectWhenFirstExecFails()
@@ -1028,17 +1028,17 @@
Also, a new connection will immediately be established to keep the pool
size the same.
"""
- txn = self.pool.connection()
- results = resultOf(txn.execSQL("maybe change something!"))
+ transaction = self.pool.connection()
+ results = resultOf(transaction.execSQL("maybe change something!"))
[[[counter, echo]]] = results
self.assertEquals("maybe change something!", echo)
self.factory.rollbackFail = True
- [x] = resultOf(txn.abort())
+ [x] = resultOf(transaction.abort())
# Abort does not propagate the error on, the transaction merely gets
# disposed of.
self.assertIdentical(x, None)
self.assertEquals(len(self.pool._free), 1)
- self.assertNotIn(txn._baseTxn, self.pool._free)
+ self.assertNotIn(transaction._baseTransaction, self.pool._free)
self.assertEquals(self.pool._finishing, [])
self.assertEquals(len(self.factory.connections), 2)
self.assertEquals(self.factory.connections[0].closed, True)
@@ -1053,15 +1053,15 @@
C{commit} has to be relayed to client code, since that actually means
some changes didn't hit the database.
"""
- txn = self.pool.connection()
+ transaction = self.pool.connection()
self.factory.commitFail = True
- results = resultOf(txn.execSQL("maybe change something!"))
+ results = resultOf(transaction.execSQL("maybe change something!"))
[[[counter, echo]]] = results
self.assertEquals("maybe change something!", echo)
- [x] = resultOf(txn.commit())
+ [x] = resultOf(transaction.commit())
x.trap(CommitFail)
self.assertEquals(len(self.pool._free), 1)
- self.assertNotIn(txn._baseTxn, self.pool._free)
+ self.assertNotIn(transaction._baseTransaction, self.pool._free)
self.assertEquals(self.pool._finishing, [])
self.assertEquals(len(self.factory.connections), 2)
self.assertEquals(self.factory.connections[0].closed, True)
@@ -1073,14 +1073,14 @@
L{IAsyncTransaction.commandBlock} returns an L{IAsyncTransaction}
provider which ensures that a block of commands are executed together.
"""
- txn = self.pool.connection()
- a = resultOf(txn.execSQL("a"))
- cb = txn.commandBlock()
+ transaction = self.pool.connection()
+ a = resultOf(transaction.execSQL("a"))
+ cb = transaction.commandBlock()
b = resultOf(cb.execSQL("b"))
- d = resultOf(txn.execSQL("d"))
+ d = resultOf(transaction.execSQL("d"))
c = resultOf(cb.execSQL("c"))
cb.end()
- e = resultOf(txn.execSQL("e"))
+ e = resultOf(transaction.execSQL("e"))
self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
[("a", []), ("b", []), ("c", []), ("d", []),
("e", [])])
@@ -1097,13 +1097,13 @@
executing until all SQL statements scheduled before it have completed.
"""
self.pauseHolders()
- txn = self.pool.connection()
- a = resultOf(txn.execSQL("a"))
- b = resultOf(txn.execSQL("b"))
- cb = txn.commandBlock()
+ transaction = self.pool.connection()
+ a = resultOf(transaction.execSQL("a"))
+ b = resultOf(transaction.execSQL("b"))
+ cb = transaction.commandBlock()
c = resultOf(cb.execSQL("c"))
d = resultOf(cb.execSQL("d"))
- e = resultOf(txn.execSQL("e"))
+ e = resultOf(transaction.execSQL("e"))
cb.end()
self.flushHolders()
@@ -1123,10 +1123,10 @@
When execution of one command block is complete, it will proceed to the
next queued block, then to regular SQL executed on the transaction.
"""
- txn = self.pool.connection()
- cb1 = txn.commandBlock()
- cb2 = txn.commandBlock()
- txn.execSQL("e")
+ transaction = self.pool.connection()
+ cb1 = transaction.commandBlock()
+ cb2 = transaction.commandBlock()
+ transaction.execSQL("e")
cb1.execSQL("a")
cb2.execSQL("c")
cb1.execSQL("b")
@@ -1152,8 +1152,8 @@
L{CommandBlock.end} will raise L{AlreadyFinishedError} when called more
than once.
"""
- txn = self.pool.connection()
- block = txn.commandBlock()
+ transaction = self.pool.connection()
+ block = transaction.commandBlock()
block.end()
self.assertRaises(AlreadyFinishedError, block.end)
@@ -1165,9 +1165,9 @@
when you call {IAsyncTransaction.commit}(), it should not actually take
effect if there are any pending command blocks.
"""
- txn = self.pool.connection()
- block = txn.commandBlock()
- commitResult = resultOf(txn.commit())
+ transaction = self.pool.connection()
+ block = transaction.commandBlock()
+ commitResult = resultOf(transaction.commit())
block.execSQL("in block")
self.assertEquals(commitResult, [])
self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
@@ -1183,14 +1183,14 @@
all outstanding C{execSQL}s will fail immediately, on both command
blocks and on the transaction itself.
"""
- txn = self.pool.connection()
- block = txn.commandBlock()
- block2 = txn.commandBlock()
- abortResult = resultOf(txn.abort())
+ transaction = self.pool.connection()
+ block = transaction.commandBlock()
+ block2 = transaction.commandBlock()
+ abortResult = resultOf(transaction.abort())
self.assertEquals(abortResult, [None])
self.assertRaises(AlreadyFinishedError, block2.execSQL, "bar")
self.assertRaises(AlreadyFinishedError, block.execSQL, "foo")
- self.assertRaises(AlreadyFinishedError, txn.execSQL, "baz")
+ self.assertRaises(AlreadyFinishedError, transaction.execSQL, "baz")
self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
[])
# end() should _not_ raise an exception, because this is the sort of
@@ -1206,8 +1206,8 @@
Attempting to execute SQL on a L{CommandBlock} which has had C{end}
called on it will result in an L{AlreadyFinishedError}.
"""
- txn = self.pool.connection()
- block = txn.commandBlock()
+ transaction = self.pool.connection()
+ block = transaction.commandBlock()
block.end()
self.assertRaises(AlreadyFinishedError, block.execSQL, "hello")
self.assertEquals(self.factory.connections[0].cursors[0].allExecutions,
@@ -1219,9 +1219,9 @@
Once an L{IAsyncTransaction} has been committed, L{commandBlock} raises
an exception.
"""
- txn = self.pool.connection()
- txn.commit()
- self.assertRaises(AlreadyFinishedError, txn.commandBlock)
+ transaction = self.pool.connection()
+ transaction.commit()
+ self.assertRaises(AlreadyFinishedError, transaction.commandBlock)
def test_commandBlockAfterAbortRaises(self):
@@ -1229,9 +1229,9 @@
Once an L{IAsyncTransaction} has been committed, L{commandBlock} raises
an exception.
"""
- txn = self.pool.connection()
- txn.abort()
- self.assertRaises(AlreadyFinishedError, txn.commandBlock)
+ transaction = self.pool.connection()
+ transaction.abort()
+ self.assertRaises(AlreadyFinishedError, transaction.commandBlock)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/mail.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/mail.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/mail.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -497,8 +497,8 @@
Set up a transaction which will be used and committed by implicit
scheduling.
"""
- txn = transactionFromRequest(request, self._newStore)
- return super(IMIPReplyInboxResource, self).renderHTTP(request, txn)
+ self.transaction = transactionFromRequest(request, self._newStore)
+ return super(IMIPReplyInboxResource, self).renderHTTP(request, self.transaction)
@inlineCallbacks
def http_POST(self, request):
@@ -513,7 +513,7 @@
scheduler = IMIPScheduler(request, self)
# Do the POST processing treating this as a non-local schedule
- result = (yield scheduler.doSchedulingViaPOST(use_request_headers=True))
+ result = (yield scheduler.doSchedulingViaPOST(self.transaction, use_request_headers=True))
returnValue(result.response())
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/method/put_common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/method/put_common.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/method/put_common.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -62,15 +62,16 @@
class UIDReservation(object):
- def __init__(self, index, uid, uri, internal_request):
+ def __init__(self, index, uid, uri, internal_request, transaction):
if internal_request:
self.lock = None
else:
- self.lock = MemcacheLock("ImplicitUIDLock", uid, timeout=60.0)
+ self.lock = MemcacheLock("ImplicitUIDLock", uid, timeout=60.0, expire_time=5*60)
self.reserved = False
self.index = index
self.uid = uid
self.uri = uri
+ self.transaction = transaction
@inlineCallbacks
def reserve(self):
@@ -102,6 +103,7 @@
if self.uri and not self.reserved:
if self.lock:
+ # Can release immediately as nothing happened
yield self.lock.release()
raise HTTPError(StatusResponse(responsecode.CONFLICT, "Resource: %s currently in use in calendar." % (self.uri,)))
@@ -111,7 +113,9 @@
yield self.index.unreserveUID(self.uid)
self.reserved = False
if self.lock:
- yield self.lock.clean()
+ # Release lock after commit or abort
+ self.transaction.postCommit(self.lock.clean)
+ self.transaction.postAbort(self.lock.clean)
def __init__(
self,
@@ -1000,7 +1004,8 @@
self.destination_index = self.destinationparent.index()
reservation = StoreCalendarObjectResource.UIDReservation(
self.destination_index, self.uid, self.destination_uri,
- self.internal_request or self.isiTIP
+ self.internal_request or self.isiTIP,
+ self.destination._associatedTransaction,
)
yield reservation.reserve()
# UID conflict check - note we do this after reserving the UID to avoid a race condition where two requests
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/resource.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/resource.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -2175,8 +2175,8 @@
@inlineCallbacks
def createNotificationsCollection(self):
- txn = self._associatedTransaction
- notifications = yield txn.notificationsWithUID(self._newStoreHome.uid())
+ transaction = self._associatedTransaction
+ notifications = yield transaction.notificationsWithUID(self._newStoreHome.uid())
from twistedcaldav.storebridge import StoreNotificationCollectionResource
similar = StoreNotificationCollectionResource(
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/schedule.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/schedule.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/schedule.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -334,7 +334,7 @@
scheduler = CalDAVScheduler(request, self)
# Do the POST processing treating
- result = (yield scheduler.doSchedulingViaPOST())
+ result = (yield scheduler.doSchedulingViaPOST(self._associatedTransaction))
returnValue(result.response())
@@ -451,17 +451,17 @@
scheduler = IScheduleScheduler(request, self)
# Need a transaction to work with
- txn = self._newStore.newTransaction("new transaction for Server To Server Inbox Resource")
- request._newStoreTransaction = txn
+ transaction = self._newStore.newTransaction("new transaction for Server To Server Inbox Resource")
+ request._newStoreTransaction = transaction
# Do the POST processing treating this as a non-local schedule
try:
- result = (yield scheduler.doSchedulingViaPOST(use_request_headers=True))
+ result = (yield scheduler.doSchedulingViaPOST(transaction, use_request_headers=True))
except Exception, e:
- yield txn.abort()
+ yield transaction.abort()
raise e
else:
- yield txn.commit()
+ yield transaction.commit()
returnValue(result.response())
##
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/processing.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/processing.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/processing.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -256,7 +256,7 @@
# We need to get the UID lock for implicit processing whilst we send the auto-reply
# as the Organizer processing will attempt to write out data to other attendees to
# refresh them. To prevent a race we need a lock.
- uidlock = MemcacheLock("ImplicitUIDLock", self.uid, timeout=60.0)
+ uidlock = MemcacheLock("ImplicitUIDLock", self.uid, timeout=60.0, expire_time=5*60)
try:
yield uidlock.acquire()
@@ -271,7 +271,7 @@
# inNewTransaction wipes out the remembered resource<-> URL mappings in the
# request object but we need to be able to map the actual reply resource to its
# URL when doing auto-processing, so we have to sneak that mapping back in here.
- txn = yield resource.inNewTransaction(self.request)
+ transaction = yield resource.inNewTransaction(self.request)
organizer_resource = (yield self.request.locateResource(resource._url))
try:
@@ -281,10 +281,11 @@
log.debug("ImplicitProcessing - skipping refresh of missing UID: '%s'" % (self.uid,))
except Exception, e:
log.debug("ImplicitProcessing - refresh exception UID: '%s', %s" % (self.uid, str(e)))
- yield txn.abort()
+ yield transaction.abort()
else:
- yield txn.commit()
+ yield transaction.commit()
finally:
+ # This correctly gets called only after commit or abort is done
yield uidlock.clean()
if lock:
@@ -513,7 +514,7 @@
# We need to get the UID lock for implicit processing whilst we send the auto-reply
# as the Organizer processing will attempt to write out data to other attendees to
# refresh them. To prevent a race we need a lock.
- lock = MemcacheLock("ImplicitUIDLock", calendar.resourceUID(), timeout=60.0)
+ lock = MemcacheLock("ImplicitUIDLock", calendar.resourceUID(), timeout=60.0, expire_time=5*60)
# Note that this lock also protects the request, as this request is
# being re-used by potentially multiple transactions and should not be
@@ -528,7 +529,7 @@
# inNewTransaction wipes out the remembered resource<-> URL mappings in the
# request object but we need to be able to map the actual reply resource to its
# URL when doing auto-processing, so we have to sneak that mapping back in here.
- txn = yield resource.inNewTransaction(self.request)
+ transaction = yield resource.inNewTransaction(self.request)
self.request._rememberResource(resource, resource._url)
try:
@@ -538,10 +539,11 @@
scheduler = ImplicitScheduler()
yield scheduler.sendAttendeeReply(self.request, resource, calendar, self.recipient)
except:
- yield txn.abort()
+ yield transaction.abort()
else:
- yield txn.commit()
+ yield transaction.commit()
finally:
+ # This correctly gets called only after commit or abort is done
yield lock.clean()
# Track outstanding auto-reply processing
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/scheduler.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/scheduler.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/scheduling/scheduler.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -37,6 +37,7 @@
from twistedcaldav.accounting import accountingEnabled, emitAccounting
from twistedcaldav.config import config
from twistedcaldav.ical import Component
+from twistedcaldav.memcachelock import MemcacheLock, MemcacheLockTimeoutError
from twistedcaldav.scheduling import addressmapping
from twistedcaldav.scheduling.caldav import ScheduleViaCalDAV
from twistedcaldav.scheduling.cuaddress import InvalidCalendarUser,\
@@ -84,7 +85,7 @@
self.internal_request = False
@inlineCallbacks
- def doSchedulingViaPOST(self, use_request_headers=False):
+ def doSchedulingViaPOST(self, transaction, use_request_headers=False):
"""
The Scheduling POST operation on an Outbox.
"""
@@ -107,6 +108,24 @@
# Do some extra authorization checks
self.checkAuthorization()
+ # We might trigger an implicit scheduling operation here that will require consistency
+ # of data for all events with the same UID. So detect this and use a lock
+ lock = None
+ if self.calendar.resourceType() != "VFREEBUSY":
+ uid = self.calendar.resourceUID()
+ lock = MemcacheLock("ImplicitUIDLock", uid, timeout=60.0, expire_time=5*60)
+
+ # Implicit lock
+ if lock:
+ try:
+ yield lock.acquire()
+ except MemcacheLockTimeoutError:
+ raise HTTPError(StatusResponse(responsecode.CONFLICT, "Resource: %s currently in use on the server." % (self.uri,)))
+ else:
+ # Release lock after commit or abort
+ transaction.postCommit(lock.clean)
+ transaction.postAbort(lock.clean)
+
result = (yield self.doScheduling())
returnValue(result)
@@ -558,12 +577,12 @@
super(CalDAVScheduler, self).__init__(request, resource)
self.doingPOST = False
- def doSchedulingViaPOST(self):
+ def doSchedulingViaPOST(self, transaction):
"""
The Scheduling POST operation on an Outbox.
"""
self.doingPOST = True
- return super(CalDAVScheduler, self).doSchedulingViaPOST()
+ return super(CalDAVScheduler, self).doSchedulingViaPOST(transaction)
def checkAuthorization(self):
# Must have an authenticated user
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/storebridge.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/storebridge.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/storebridge.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -1784,16 +1784,16 @@
calendarName = calendar.name()
ownerHome = calendar.ownerCalendarHome()
homeUID = ownerHome.uid()
- txn = ownerHome.transaction().store().newTransaction(
+ transaction = ownerHome.transaction().store().newTransaction(
"new transaction for " + self._newStoreObject.name())
- newParent = (yield (yield txn.calendarHomeWithUID(homeUID))
+ newParent = (yield (yield transaction.calendarHomeWithUID(homeUID))
.calendarWithName(calendarName))
newObject = (yield newParent.calendarObjectWithName(objectName))
- request._newStoreTransaction = txn
+ request._newStoreTransaction = transaction
request._resourcesByURL.clear()
request._urlsByResource.clear()
self._initializeWithObject(newObject, newParent)
- returnValue(txn)
+ returnValue(transaction)
@inlineCallbacks
@@ -1854,6 +1854,7 @@
# delete from happening.
isinbox = self._newStoreObject._calendar.name() == "inbox"
+ transaction = self._newStoreObject._transaction
# Do If-Schedule-Tag-Match behavior first
# Important: this should only ever be done when storeRemove is called
@@ -1875,7 +1876,7 @@
)
if do_implicit_action:
lock = MemcacheLock(
- "ImplicitUIDLock", calendar.resourceUID(), timeout=60.0
+ "ImplicitUIDLock", calendar.resourceUID(), timeout=60.0, expire_time=5*60
)
try:
@@ -1896,7 +1897,9 @@
finally:
if lock:
- yield lock.clean()
+ # Release lock after commit or abort
+ transaction.postCommit(lock.clean)
+ transaction.postAbort(lock.clean)
returnValue(NO_CONTENT)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/test/test_wrapping.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/test/test_wrapping.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/test/test_wrapping.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -119,13 +119,13 @@
self.calendarCollection._newStore._path.createDirectory()
except:
pass
- txn = self.calendarCollection._newStore.newTransaction()
- home = yield txn.calendarHomeWithUID(uid, True)
+ transaction = self.calendarCollection._newStore.newTransaction()
+ home = yield transaction.calendarHomeWithUID(uid, True)
cal = yield home.calendarWithName("calendar")
yield cal.createCalendarObjectWithName(
objectName, VComponent.fromString(objectText)
)
- yield txn.commit()
+ yield transaction.commit()
@inlineCallbacks
@@ -146,8 +146,8 @@
self.addressbookCollection._newStore._path.createDirectory()
except:
pass
- txn = self.addressbookCollection._newStore.newTransaction()
- home = yield txn.addressbookHomeWithUID(uid, True)
+ transaction = self.addressbookCollection._newStore.newTransaction()
+ home = yield transaction.addressbookHomeWithUID(uid, True)
adbk = yield home.addressbookWithName("addressbook")
if adbk is None:
yield home.createAddressBookWithName("addressbook")
@@ -155,7 +155,7 @@
yield adbk.createAddressBookObjectWithName(
objectName, VCComponent.fromString(objectText)
)
- yield txn.commit()
+ yield transaction.commit()
requestUnderTest = None
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/upgrade.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/upgrade.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/twistedcaldav/upgrade.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -1061,8 +1061,8 @@
#
# Remove item
#
- txn = request._newStoreTransaction
- txn._notifierFactory = None # Do not send push notifications
+ transaction = request._newStoreTransaction
+ transaction._notifierFactory = None # Do not send push notifications
yield inboxItem.storeRemove(request, True, uri)
- yield txn.commit()
+ yield transaction.commit()
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/datastore/file.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/datastore/file.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -129,6 +129,7 @@
self._termination = None
self._operations = []
self._postCommitOperations = []
+ self._postAbortOperations = []
self._tracker = _CommitTracker(name)
@@ -162,7 +163,10 @@
def abort(self):
self._terminate("aborted")
+ for operation in self._postAbortOperations:
+ operation()
+
def commit(self):
self._terminate("committed")
@@ -189,6 +193,9 @@
def postCommit(self, operation):
self._postCommitOperations.append(operation)
+ def postAbort(self, operation):
+ self._postAbortOperations.append(operation)
+
class FileMetaDataMixin(object):
implements(IDataStoreObject)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -56,7 +56,7 @@
@inlineCallbacks
- def _refresh(self, txn):
+ def _refresh(self, transaction):
"""
Load, or re-load, this object with the given transaction; first from
memcache, then pulling from the database again.
@@ -65,7 +65,7 @@
# Look for memcache entry first
rows = yield self._cacher.get(str(self._resourceID))
if rows is None:
- rows = yield self._allWithID.on(txn, resourceID=self._resourceID)
+ rows = yield self._allWithID.on(transaction, resourceID=self._resourceID)
yield self._cacher.set(str(self._resourceID),
rows if rows is not None else ())
for name, uid, value in rows:
@@ -74,20 +74,20 @@
@classmethod
@inlineCallbacks
- def load(cls, defaultuser, txn, resourceID, created=False):
+ def load(cls, defaultuser, transaction, resourceID, created=False):
self = cls.__new__(cls)
super(PropertyStore, self).__init__(defaultuser)
- self._txn = txn
+ self._transaction = transaction
self._resourceID = resourceID
self._cached = {}
if not created:
- yield self._refresh(txn)
+ yield self._refresh(transaction)
returnValue(self)
@classmethod
@inlineCallbacks
- def forMultipleResources(cls, defaultUser, txn,
+ def forMultipleResources(cls, defaultUser, transaction,
childColumn, parentColumn, parentID):
"""
Load all property stores for all objects in a collection. This is used
@@ -99,9 +99,9 @@
@type defaultUser: C{str}
- @param txn: the transaction within which to fetch the rows.
+ @param transaction: the transaction within which to fetch the rows.
- @type txn: L{IAsyncTransaction}
+ @type transaction: L{IAsyncTransaction}
@param childColumn: The resource ID column for the child resources, i.e.
the resources of the type for which this method will loading the
@@ -124,7 +124,7 @@
'right'),
Where=parentColumn == parentID
)
- rows = yield query.on(txn)
+ rows = yield query.on(transaction)
createdStores = {}
for object_resource_id, resource_id, name, view_uid, value in rows:
@@ -132,7 +132,7 @@
if resource_id not in createdStores:
store = cls.__new__(cls)
super(PropertyStore, store).__init__(defaultUser)
- store._txn = txn
+ store._transaction = transaction
store._resourceID = resource_id
store._cached = {}
createdStores[resource_id] = store
@@ -140,7 +140,7 @@
else:
store = cls.__new__(cls)
super(PropertyStore, store).__init__(defaultUser)
- store._txn = txn
+ store._transaction = transaction
store._resourceID = object_resource_id
store._cached = {}
createdStores[object_resource_id] = store
@@ -183,21 +183,21 @@
wasCached = [(key_str, uid) in self._cached]
self._cached[(key_str, uid)] = value_str
@inlineCallbacks
- def trySetItem(txn):
+ def trySetItem(transaction):
if tried:
- yield self._refresh(txn)
+ yield self._refresh(transaction)
wasCached[:] = [(key_str, uid) in self._cached]
tried.append(True)
if wasCached[0]:
yield self._updateQuery.on(
- txn, resourceID=self._resourceID, value=value_str,
+ transaction, resourceID=self._resourceID, value=value_str,
name=key_str, uid=uid)
else:
yield self._insertQuery.on(
- txn, resourceID=self._resourceID, value=value_str,
+ transaction, resourceID=self._resourceID, value=value_str,
name=key_str, uid=uid)
self._cacher.delete(str(self._resourceID))
- self._txn.subtransaction(trySetItem)
+ self._transaction.subtransaction(trySetItem)
@@ -213,7 +213,7 @@
key_str = key.toString()
del self._cached[(key_str, uid)]
- self._deleteQuery.on(self._txn, lambda:KeyError(key),
+ self._deleteQuery.on(self._transaction, lambda:KeyError(key),
resourceID=self._resourceID,
name=key_str, uid=uid
)
@@ -232,6 +232,6 @@
def _removeResource(self):
self._cached = {}
- self._deleteResourceQuery.on(self._txn, resourceID=self._resourceID)
+ self._deleteResourceQuery.on(self._transaction, resourceID=self._resourceID)
self._cacher.delete(str(self._resourceID))
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/test/test_sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/base/propertystore/test/test_sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -46,19 +46,19 @@
self.notifierFactory = StubNotifierFactory()
self.store = yield buildStore(self, self.notifierFactory)
self.addCleanup(self.maybeCommitLast)
- self._txn = self.store.newTransaction()
+ self._transaction = self.store.newTransaction()
self.propertyStore = self.propertyStore1 = yield PropertyStore.load(
- "user01", self._txn, 1
+ "user01", self._transaction, 1
)
- self.propertyStore2 = yield PropertyStore.load("user01", self._txn, 1)
+ self.propertyStore2 = yield PropertyStore.load("user01", self._transaction, 1)
self.propertyStore2._setPerUserUID("user02")
@inlineCallbacks
def maybeCommitLast(self):
- if hasattr(self, "_txn"):
- result = yield self._txn.commit()
- delattr(self, "_txn")
+ if hasattr(self, "_transaction"):
+ result = yield self._transaction.commit()
+ delattr(self, "_transaction")
else:
result = None
self.propertyStore = self.propertyStore1 = self.propertyStore2 = None
@@ -67,20 +67,20 @@
@inlineCallbacks
def _changed(self, store):
- if hasattr(self, "_txn"):
- yield self._txn.commit()
- delattr(self, "_txn")
- self._txn = self.store.newTransaction()
+ if hasattr(self, "_transaction"):
+ yield self._transaction.commit()
+ delattr(self, "_transaction")
+ self._transaction = self.store.newTransaction()
store = self.propertyStore1
self.propertyStore = self.propertyStore1 = yield PropertyStore.load(
- "user01", self._txn, 1
+ "user01", self._transaction, 1
)
self.propertyStore1._shadowableKeys = store._shadowableKeys
self.propertyStore1._globalKeys = store._globalKeys
store = self.propertyStore2
- self.propertyStore2 = yield PropertyStore.load("user01", self._txn, 1)
+ self.propertyStore2 = yield PropertyStore.load("user01", self._transaction, 1)
self.propertyStore2._setPerUserUID("user02")
self.propertyStore2._shadowableKeys = store._shadowableKeys
self.propertyStore2._globalKeys = store._globalKeys
@@ -88,21 +88,21 @@
@inlineCallbacks
def _abort(self, store):
- if hasattr(self, "_txn"):
- yield self._txn.abort()
- delattr(self, "_txn")
+ if hasattr(self, "_transaction"):
+ yield self._transaction.abort()
+ delattr(self, "_transaction")
- self._txn = self.store.newTransaction()
+ self._transaction = self.store.newTransaction()
store = self.propertyStore1
self.propertyStore = self.propertyStore1 = yield PropertyStore.load(
- "user01", self._txn, 1
+ "user01", self._transaction, 1
)
self.propertyStore1._shadowableKeys = store._shadowableKeys
self.propertyStore1._globalKeys = store._globalKeys
store = self.propertyStore2
- self.propertyStore2 = yield PropertyStore.load("user01", self._txn, 1)
+ self.propertyStore2 = yield PropertyStore.load("user01", self._transaction, 1)
self.propertyStore2._setPerUserUID("user02")
self.propertyStore2._shadowableKeys = store._shadowableKeys
self.propertyStore2._globalKeys = store._globalKeys
@@ -118,16 +118,16 @@
pname = propertyName("concurrent")
pval1 = propertyValue("alpha")
pval2 = propertyValue("beta")
- concurrentTxn = self.store.newTransaction()
+ concurrentTransaction = self.store.newTransaction()
@inlineCallbacks
def maybeAbortIt():
try:
- yield concurrentTxn.abort()
+ yield concurrentTransaction.abort()
except AlreadyFinishedError:
pass
self.addCleanup(maybeAbortIt)
concurrentPropertyStore = yield PropertyStore.load(
- "user01", concurrentTxn, 1
+ "user01", concurrentTransaction, 1
)
concurrentPropertyStore[pname] = pval1
race = []
@@ -141,10 +141,10 @@
race.append(label)
return result
return breaktie
- a = concurrentTxn.commit().addCallback(tiebreaker('a'))
+ a = concurrentTransaction.commit().addCallback(tiebreaker('a'))
self.propertyStore[pname] = pval2
- b = self._txn.commit().addCallback(tiebreaker('b'))
- del self._txn
+ b = self._transaction.commit().addCallback(tiebreaker('b'))
+ del self._transaction
self.assertEquals((yield gatherResults([a, b])), [None, None])
yield self._abort(self.propertyStore)
winner = {'a': pval1,
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -126,22 +126,22 @@
yield Delete(
From=chm,
Where=chm.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=cb,
Where=cb.CALENDAR_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=cor,
Where=cor.CALENDAR_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=ch,
Where=ch.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield self._cacher.delete(str(self._ownerUID))
@@ -184,7 +184,7 @@
'left outer'),
Where=(co.DROPBOX_ID == dropboxID).And(
cb.HOME_RESOURCE_ID == self._resourceID)
- ).on(self._txn))
+ ).on(self._transaction))
if rows:
calendarID, objectID = rows[0]
@@ -205,7 +205,7 @@
Where=(co.DROPBOX_ID != None).And(
cb.HOME_RESOURCE_ID == self._resourceID),
OrderBy=co.DROPBOX_ID
- ).on(self._txn))
+ ).on(self._transaction))
returnValue([row[0] for row in rows])
@@ -442,7 +442,7 @@
else:
# If migrating or re-creating or config option for delayed indexing is off, always index
- if reCreate or self._txn._migrating or not config.FreeBusyIndexDelayedExpand:
+ if reCreate or self._transaction._migrating or not config.FreeBusyIndexDelayedExpand:
doInstanceIndexing = True
# Duration into the future through which recurrences are expanded in the index
@@ -480,7 +480,7 @@
self.log_error("Invalid instance %s when indexing %s in %s" %
(e.rid, self._name, self._calendar,))
- if self._txn._migrating:
+ if self._transaction._migrating:
# TODO: fix the data here by re-writing component then re-index
instances = component.expandTimeRanges(expand, ignoreInvalidInstances=True)
recurrenceLimit = instances.limit
@@ -511,7 +511,7 @@
self._size = len(componentText)
# Special - if migrating we need to preserve the original md5
- if self._txn._migrating and hasattr(component, "md5"):
+ if self._transaction._migrating and hasattr(component, "md5"):
self._md5 = component.md5
# Determine attachment mode (ignore inbox's) - NB we have to do this
@@ -553,7 +553,7 @@
yield Insert(
values,
Return=(co.RESOURCE_ID, co.CREATED, co.MODIFIED)
- ).on(self._txn)
+ ).on(self._transaction)
)[0]
else:
values[co.MODIFIED] = utcNowSQL
@@ -561,13 +561,13 @@
yield Update(
values, Return=co.MODIFIED,
Where=co.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
)[0][0]
# Need to wipe the existing time-range for this and rebuild
yield Delete(
From=tr,
Where=tr.CALENDAR_OBJECT_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
else:
values = {
co.RECURRANCE_MAX :
@@ -577,13 +577,13 @@
yield Update(
values,
Where=co.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
# Need to wipe the existing time-range for this and rebuild
yield Delete(
From=tr,
Where=tr.CALENDAR_OBJECT_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
if doInstanceIndexing:
# TIME_RANGE table update
@@ -606,14 +606,14 @@
instance.component.getFBType(),
icalfbtype_to_indexfbtype["FREE"]),
tr.TRANSPARENT : transp,
- }, Return=tr.INSTANCE_ID).on(self._txn))[0][0]
+ }, Return=tr.INSTANCE_ID).on(self._transaction))[0][0]
peruserdata = component.perUserTransparency(instance.rid)
for useruid, transp in peruserdata:
(yield Insert({
tpy.TIME_RANGE_INSTANCE_ID : instanceid,
tpy.USER_ID : useruid,
tpy.TRANSPARENT : transp,
- }).on(self._txn))
+ }).on(self._transaction))
# Special - for unbounded recurrence we insert a value for "infinity"
# that will allow an open-ended time-range to always match it.
@@ -631,14 +631,14 @@
tr.FBTYPE :
icalfbtype_to_indexfbtype["UNKNOWN"],
tr.TRANSPARENT : transp,
- }, Return=tr.INSTANCE_ID).on(self._txn))[0][0]
+ }, Return=tr.INSTANCE_ID).on(self._transaction))[0][0]
peruserdata = component.perUserTransparency(None)
for useruid, transp in peruserdata:
(yield Insert({
tpy.TIME_RANGE_INSTANCE_ID : instanceid,
tpy.USER_ID : useruid,
tpy.TRANSPARENT : transp,
- }).on(self._txn))
+ }).on(self._transaction))
@inlineCallbacks
@@ -734,7 +734,7 @@
sharerHomeID = (yield self._parentCollection.sharerHomeID())
returnValue((
yield Attachment.create(
- self._txn, (yield self.dropboxID()), name, sharerHomeID
+ self._transaction, (yield self.dropboxID()), name, sharerHomeID
)
))
@@ -744,7 +744,7 @@
yield attachment.remove()
def attachmentWithName(self, name):
- return Attachment.loadWithName(self._txn, self._dropboxID, name)
+ return Attachment.loadWithName(self._transaction, self._dropboxID, name)
def attendeesCanManageAttachments(self):
return self._attachment == _ATTACHMENTS_MODE_WRITE
@@ -763,7 +763,7 @@
@inlineCallbacks
def attachments(self):
if self._dropboxID:
- rows = yield self._attachmentsQuery.on(self._txn,
+ rows = yield self._attachmentsQuery.on(self._transaction,
dropboxID=self._dropboxID)
result = []
for row in rows:
@@ -805,8 +805,8 @@
@property
- def _txn(self):
- return self._attachment._txn
+ def _transaction(self):
+ return self._attachment._transaction
def write(self, data):
@@ -827,7 +827,7 @@
# prevented from committing successfully. It's not valid to have an
# attachment that doesn't point to a real file.
- home = (yield self._txn.calendarHomeWithResourceID(
+ home = (yield self._transaction.calendarHomeWithResourceID(
self._attachment._ownerHomeID))
oldSize = self._attachment.size()
@@ -856,7 +856,7 @@
Where=(att.PATH == self._attachment.name()).And(
att.DROPBOX_ID == self._attachment._dropboxID
),
- Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
+ Return=(att.CREATED, att.MODIFIED)).on(self._transaction))[0]
)
if home:
@@ -875,8 +875,8 @@
implements(IAttachment)
- def __init__(self, txn, dropboxID, name, ownerHomeID=None, justCreated=False):
- self._txn = txn
+ def __init__(self, transaction, dropboxID, name, ownerHomeID=None, justCreated=False):
+ self._transaction = transaction
self._dropboxID = dropboxID
self._name = name
self._ownerHomeID = ownerHomeID
@@ -885,8 +885,8 @@
@classmethod
- def _attachmentPathRoot(cls, txn, dropboxID):
- attachmentRoot = txn._store.attachmentsPath
+ def _attachmentPathRoot(cls, transaction, dropboxID):
+ attachmentRoot = transaction._store.attachmentsPath
# Use directory hashing scheme based on MD5 of dropboxID
hasheduid = hashlib.md5(dropboxID).hexdigest()
@@ -896,16 +896,16 @@
@classmethod
@inlineCallbacks
- def create(cls, txn, dropboxID, name, ownerHomeID):
+ def create(cls, transaction, dropboxID, name, ownerHomeID):
# File system paths need to exist
try:
- cls._attachmentPathRoot(txn, dropboxID).makedirs()
+ cls._attachmentPathRoot(transaction, dropboxID).makedirs()
except:
pass
# Now create the DB entry
- attachment = cls(txn, dropboxID, name, ownerHomeID, True)
+ attachment = cls(transaction, dropboxID, name, ownerHomeID, True)
att = schema.ATTACHMENT
yield Insert({
att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
@@ -914,14 +914,14 @@
att.SIZE : 0,
att.MD5 : "",
att.PATH : name
- }).on(txn)
+ }).on(transaction)
returnValue(attachment)
@classmethod
@inlineCallbacks
- def loadWithName(cls, txn, dropboxID, name):
- attachment = cls(txn, dropboxID, name)
+ def loadWithName(cls, transaction, dropboxID, name):
+ attachment = cls(transaction, dropboxID, name)
attachment = (yield attachment.initFromStore())
returnValue(attachment)
@@ -938,7 +938,7 @@
att.SIZE, att.MD5, att.CREATED, att.MODIFIED],
From=att,
Where=(att.DROPBOX_ID == self._dropboxID).And(
- att.PATH == self._name)).on(self._txn))
+ att.PATH == self._name)).on(self._transaction))
if not rows:
returnValue(None)
self._ownerHomeID = rows[0][0]
@@ -956,7 +956,7 @@
@property
def _path(self):
- attachmentRoot = self._txn._store.attachmentsPath
+ attachmentRoot = self._transaction._store.attachmentsPath
# Use directory hashing scheme based on MD5 of dropboxID
hasheduid = hashlib.md5(self._dropboxID).hexdigest()
return attachmentRoot.child(hasheduid[0:2]).child(
@@ -986,10 +986,10 @@
@inlineCallbacks
def remove(self):
oldSize = self._size
- self._txn.postCommit(self._path.remove)
+ self._transaction.postCommit(self._path.remove)
yield self._internalRemove()
# Adjust quota
- home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+ home = (yield self._transaction.calendarHomeWithResourceID(self._ownerHomeID))
if home:
yield home.adjustQuotaUsedBytes(-oldSize)
@@ -1003,7 +1003,7 @@
for attachments that have failed to be created due to errors during
storage.)
"""
- return self._removeStatement.on(self._txn, dropboxID=self._dropboxID,
+ return self._removeStatement.on(self._transaction, dropboxID=self._dropboxID,
path=self._name)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/common.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/common.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -301,9 +301,9 @@
L{ICommonStoreTransaction}, L{ICalendarTransaction}, and their
respectively required attributes.
"""
- txn = self.transactionUnderTest()
- self.assertProvides(ICommonTransaction, txn)
- self.assertProvides(ICalendarTransaction, txn)
+ transaction = self.transactionUnderTest()
+ self.assertProvides(ICommonTransaction, transaction)
+ self.assertProvides(ICalendarTransaction, transaction)
@inlineCallbacks
@@ -337,8 +337,8 @@
@inlineCallbacks
def notificationUnderTest(self):
- txn = self.transactionUnderTest()
- notifications = yield txn.notificationsWithUID("home1")
+ transaction = self.transactionUnderTest()
+ notifications = yield transaction.notificationsWithUID("home1")
inviteNotification = InviteNotification()
yield notifications.writeNotificationObject("abc", inviteNotification,
inviteNotification.toxml())
@@ -362,8 +362,8 @@
L{ICalendar.resourceNamesSinceToken} will return the names of calendar
objects changed or deleted since
"""
- txn = self.transactionUnderTest()
- coll = yield txn.notificationsWithUID("home1")
+ transaction = self.transactionUnderTest()
+ coll = yield transaction.notificationsWithUID("home1")
invite1 = InviteNotification()
yield coll.writeNotificationObject("1", invite1, invite1.toxml())
st = yield coll.syncToken()
@@ -493,8 +493,8 @@
L{INotificationObject.notificationCollection} returns the
L{INotificationCollection} that the object was retrieved from.
"""
- txn = self.transactionUnderTest()
- collection = yield txn.notificationsWithUID("home1")
+ transaction = self.transactionUnderTest()
+ collection = yield transaction.notificationsWithUID("home1")
notification = yield self.notificationUnderTest()
self.assertIdentical(collection, notification.notificationCollection())
@@ -541,8 +541,8 @@
L{ICommonStoreTransaction.calendarHomeWithUID} should return C{None}
when asked for a non-existent calendar home.
"""
- txn = self.transactionUnderTest()
- self.assertEquals((yield txn.calendarHomeWithUID("xyzzy")), None)
+ transaction = self.transactionUnderTest()
+ self.assertEquals((yield transaction.calendarHomeWithUID("xyzzy")), None)
@inlineCallbacks
@@ -582,7 +582,7 @@
yield positiveAssertions()
# FIXME: revert
# FIXME: test for multiple renames
- # FIXME: test for conflicting renames (a->b, c->a in the same txn)
+ # FIXME: test for conflicting renames (a->b, c->a in the same transaction)
@inlineCallbacks
@@ -1267,26 +1267,26 @@
L{ICommonStoreTransaction.calendarHomeWithUID} with C{create=True}
will create a calendar home that doesn't exist yet.
"""
- txn = self.transactionUnderTest()
+ transaction = self.transactionUnderTest()
noHomeUID = "xyzzy"
- calendarHome = yield txn.calendarHomeWithUID(
+ calendarHome = yield transaction.calendarHomeWithUID(
noHomeUID,
create=True
)
@inlineCallbacks
- def readOtherTxn():
- otherTxn = self.savedStore.newTransaction(self.id() + "other txn")
- self.addCleanup(otherTxn.commit)
- returnValue((yield otherTxn.calendarHomeWithUID(noHomeUID)))
+ def readOtherTransaction():
+ otherTransaction = self.savedStore.newTransaction(self.id() + "other transaction")
+ self.addCleanup(otherTransaction.commit)
+ returnValue((yield otherTransaction.calendarHomeWithUID(noHomeUID)))
self.assertProvides(ICalendarHome, calendarHome)
# Default calendar should be automatically created.
self.assertProvides(ICalendar,
(yield calendarHome.calendarWithName("calendar")))
# A concurrent transaction shouldn't be able to read it yet:
- self.assertIdentical((yield readOtherTxn()), None)
+ self.assertIdentical((yield readOtherTransaction()), None)
yield self.commit()
# But once it's committed, other transactions should see it.
- self.assertProvides(ICalendarHome, (yield readOtherTxn()))
+ self.assertProvides(ICalendarHome, (yield readOtherTransaction()))
@inlineCallbacks
@@ -1867,15 +1867,15 @@
L{AlreadyFinishedError}.
"""
yield self.calendarObjectUnderTest()
- txn = self.lastTransaction
+ transaction = self.lastTransaction
yield self.commit()
yield self.failUnlessFailure(
- maybeDeferred(txn.commit),
+ maybeDeferred(transaction.commit),
AlreadyFinishedError
)
yield self.failUnlessFailure(
- maybeDeferred(txn.abort),
+ maybeDeferred(transaction.abort),
AlreadyFinishedError
)
@@ -1922,19 +1922,19 @@
"""
# create some additional calendar homes
additionalUIDs = set('alpha-uid home2 home3 beta-uid'.split())
- txn = self.transactionUnderTest()
+ transaction = self.transactionUnderTest()
for name in additionalUIDs:
# maybe it's not actually necessary to yield (i.e. wait) for each
# one? commit() should wait for all of them.
- yield txn.calendarHomeWithUID(name, create=True)
+ yield transaction.calendarHomeWithUID(name, create=True)
yield self.commit()
foundUIDs = set([])
- lastTxn = None
- for txn, home in (yield self.storeUnderTest().eachCalendarHome()):
- self.addCleanup(txn.commit)
+ lastTransaction = None
+ for transaction, home in (yield self.storeUnderTest().eachCalendarHome()):
+ self.addCleanup(transaction.commit)
foundUIDs.add(home.uid())
- self.assertNotIdentical(lastTxn, txn)
- lastTxn = txn
+ self.assertNotIdentical(lastTransaction, transaction)
+ lastTransaction = transaction
requiredUIDs = set([
uid for uid in self.requirements
if self.requirements[uid] is not None
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_file.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_file.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -68,7 +68,7 @@
testID = test.id()
test.calendarStore = CalendarStore(storeRootPath, test.notifierFactory,
quota=deriveQuota(test))
- test.txn = test.calendarStore.newTransaction(testID + "(old)")
+ test.transaction = test.calendarStore.newTransaction(testID + "(old)")
assert test.calendarStore is not None, "No calendar store?"
@@ -76,7 +76,7 @@
@inlineCallbacks
def setUpHome1(test):
setUpCalendarStore(test)
- test.home1 = yield test.txn.calendarHomeWithUID("home1")
+ test.home1 = yield test.transaction.calendarHomeWithUID("home1")
assert test.home1 is not None, "No calendar home?"
@@ -203,9 +203,9 @@
index = calendar._index
yield self.assertEquals(set((yield index.calendarObjects())),
set((yield calendar.calendarObjects())))
- yield self.txn.commit()
- self.txn = self.calendarStore.newTransaction()
- self.home1 = yield self.txn.calendarHomeWithUID("home1")
+ yield self.transaction.commit()
+ self.transaction = self.calendarStore.newTransaction()
+ self.home1 = yield self.transaction.calendarHomeWithUID("home1")
calendar = yield self.home1.calendarWithName("calendar2")
# FIXME: we should be curating our own index here, but in order to fix
# that the code in the old implicit scheduler needs to change. This
@@ -285,7 +285,7 @@
"""
self.calendar1.removeCalendarObjectWithName("2.ics")
self.failUnless(self.calendar1._path.child("2.ics").exists())
- yield self.txn.commit()
+ yield self.transaction.commit()
self.failIf(self.calendar1._path.child("2.ics").exists())
@@ -311,10 +311,10 @@
transaction.
"""
self.counter += 1
- self.txn = self.calendarStore.newTransaction(
+ self.transaction = self.calendarStore.newTransaction(
self.id() + " (old #" + str(self.counter) + ")"
)
- self.home1 = yield self.txn.calendarHomeWithUID("home1")
+ self.home1 = yield self.transaction.calendarHomeWithUID("home1")
self.calendar1 = yield self.home1.calendarWithName("calendar_1")
@@ -326,19 +326,19 @@
"""
# Make sure that the calendar home is actually committed; rolling back
# calendar home creation will remove the whole directory.
- yield self.txn.commit()
+ yield self.transaction.commit()
yield self._refresh()
self.calendar1.createCalendarObjectWithName(
"sample.ics",
VComponent.fromString(test_event_text)
)
- yield self.txn.abort()
+ yield self.transaction.abort()
yield self._refresh()
self.assertIdentical(
(yield self.calendar1.calendarObjectWithName("sample.ics")),
None
)
- yield self.txn.commit()
+ yield self.transaction.commit()
@inlineCallbacks
@@ -351,8 +351,8 @@
"""
def fail():
raise RuntimeError("oops")
- self.txn.addOperation(fail, "dummy failing operation")
- self.assertRaises(RuntimeError, self.txn.commit)
+ self.transaction.addOperation(fail, "dummy failing operation")
+ self.assertRaises(RuntimeError, self.transaction.commit)
yield self._refresh()
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/caldav/datastore/test/test_sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -113,9 +113,9 @@
"""
setUpCalendarStore(self)
fileStore = self.calendarStore
- txn = fileStore.newTransaction()
- self.addCleanup(txn.commit)
- return txn
+ transaction = fileStore.newTransaction()
+ self.addCleanup(transaction.commit)
+ return transaction
@inlineCallbacks
@@ -128,7 +128,7 @@
yield self.createAttachmentTest(lambda x: x)
attachmentRoot = (
yield self.calendarObjectUnderTest()
- )._txn._store.attachmentsPath
+ )._transaction._store.attachmentsPath
obj = yield self.calendarObjectUnderTest()
hasheduid = hashlib.md5(obj._dropboxID).hexdigest()
attachmentPath = attachmentRoot.child(
@@ -279,41 +279,41 @@
calendarStore = self._sqlCalendarStore
- txn1 = calendarStore.newTransaction()
- txn2 = calendarStore.newTransaction()
- txn3 = calendarStore.newTransaction()
+ transaction1 = calendarStore.newTransaction()
+ transaction2 = calendarStore.newTransaction()
+ transaction3 = calendarStore.newTransaction()
# Provision one home now - we will use this to later verify we can do
# reads of existing data in the table
- home_uid2 = yield txn3.homeWithUID(ECALENDARTYPE, "uid2", create=True)
+ home_uid2 = yield transaction3.homeWithUID(ECALENDARTYPE, "uid2", create=True)
self.assertNotEqual(home_uid2, None)
- yield txn3.commit()
+ yield transaction3.commit()
- home_uid1_1 = yield txn1.homeWithUID(
+ home_uid1_1 = yield transaction1.homeWithUID(
ECALENDARTYPE, "uid1", create=True
)
@inlineCallbacks
def _defer_home_uid1_2():
- home_uid1_2 = yield txn2.homeWithUID(
+ home_uid1_2 = yield transaction2.homeWithUID(
ECALENDARTYPE, "uid1", create=True
)
- yield txn2.commit()
+ yield transaction2.commit()
returnValue(home_uid1_2)
d1 = _defer_home_uid1_2()
@inlineCallbacks
def _pause_home_uid1_1():
yield deferLater(reactor, 1.0, lambda : None)
- yield txn1.commit()
+ yield transaction1.commit()
d2 = _pause_home_uid1_1()
# Verify that we can still get to the existing home - i.e. the lock
# on the table allows concurrent reads
- txn4 = calendarStore.newTransaction()
- home_uid2 = yield txn4.homeWithUID(ECALENDARTYPE, "uid2", create=True)
+ transaction4 = calendarStore.newTransaction()
+ home_uid2 = yield transaction4.homeWithUID(ECALENDARTYPE, "uid2", create=True)
self.assertNotEqual(home_uid2, None)
- yield txn4.commit()
+ yield transaction4.commit()
# Now do the concurrent provision attempt
yield d2
@@ -333,18 +333,18 @@
calendarStore = self._sqlCalendarStore
# Provision the home and calendar now
- txn = calendarStore.newTransaction()
- home = yield txn.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ transaction = calendarStore.newTransaction()
+ home = yield transaction.homeWithUID(ECALENDARTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
cal = yield home.calendarWithName("calendar")
self.assertNotEqual(cal, None)
- yield txn.commit()
+ yield transaction.commit()
- txn1 = calendarStore.newTransaction()
- txn2 = calendarStore.newTransaction()
+ transaction1 = calendarStore.newTransaction()
+ transaction2 = calendarStore.newTransaction()
- home1 = yield txn1.homeWithUID(ECALENDARTYPE, "uid1", create=True)
- home2 = yield txn2.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ home1 = yield transaction1.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ home2 = yield transaction2.homeWithUID(ECALENDARTYPE, "uid1", create=True)
cal1 = yield home1.calendarWithName("calendar")
cal2 = yield home2.calendarWithName("calendar")
@@ -391,7 +391,7 @@
"END:VEVENT\r\n"
"END:VCALENDAR\r\n"
))
- yield txn1.commit()
+ yield transaction1.commit()
d1 = _defer1()
@inlineCallbacks
@@ -436,7 +436,7 @@
"END:VEVENT\r\n"
"END:VCALENDAR\r\n"
))
- yield txn2.commit()
+ yield transaction2.commit()
d2 = _defer2()
yield d1
@@ -447,8 +447,8 @@
calendarStore = self._sqlCalendarStore
# Provision the home and calendar now
- txn = calendarStore.newTransaction()
- home = yield txn.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ transaction = calendarStore.newTransaction()
+ home = yield transaction.homeWithUID(ECALENDARTYPE, "uid1", create=True)
cal = yield home.calendarWithName("calendar")
cal._created = "2011-02-05 11:22:47"
cal._modified = "2011-02-06 11:22:47"
@@ -471,26 +471,26 @@
calendarStore = self._sqlCalendarStore
- txn1 = calendarStore.newTransaction()
- txn2 = calendarStore.newTransaction()
+ transaction1 = calendarStore.newTransaction()
+ transaction2 = calendarStore.newTransaction()
- notification_uid1_1 = yield txn1.notificationsWithUID(
+ notification_uid1_1 = yield transaction1.notificationsWithUID(
"uid1",
)
@inlineCallbacks
def _defer_notification_uid1_2():
- notification_uid1_2 = yield txn2.notificationsWithUID(
+ notification_uid1_2 = yield transaction2.notificationsWithUID(
"uid1",
)
- yield txn2.commit()
+ yield transaction2.commit()
returnValue(notification_uid1_2)
d1 = _defer_notification_uid1_2()
@inlineCallbacks
def _pause_notification_uid1_1():
yield deferLater(reactor, 1.0, lambda : None)
- yield txn1.commit()
+ yield transaction1.commit()
d2 = _pause_notification_uid1_1()
# Now do the concurrent provision attempt
@@ -648,18 +648,18 @@
calendarStore = self._sqlCalendarStore
# Provision the home and calendar now
- txn = calendarStore.newTransaction()
- home = yield txn.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ transaction = calendarStore.newTransaction()
+ home = yield transaction.homeWithUID(ECALENDARTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
cal = yield home.calendarWithName("calendar")
self.assertNotEqual(cal, None)
- yield txn.commit()
+ yield transaction.commit()
- txn1 = calendarStore.newTransaction()
- txn2 = calendarStore.newTransaction()
+ transaction1 = calendarStore.newTransaction()
+ transaction2 = calendarStore.newTransaction()
- home1 = yield txn1.homeWithUID(ECALENDARTYPE, "uid1", create=True)
- home2 = yield txn2.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ home1 = yield transaction1.homeWithUID(ECALENDARTYPE, "uid1", create=True)
+ home2 = yield transaction2.homeWithUID(ECALENDARTYPE, "uid1", create=True)
shares1 = yield home1.retrieveOldShares()
shares2 = yield home2.retrieveOldShares()
@@ -675,13 +675,13 @@
@inlineCallbacks
def _defer1():
yield shares1.addOrUpdateRecord(record)
- yield txn1.commit()
+ yield transaction1.commit()
d1 = _defer1()
@inlineCallbacks
def _defer2():
yield shares2.addOrUpdateRecord(record)
- yield txn2.commit()
+ yield transaction2.commit()
d2 = _defer2()
yield d1
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -109,22 +109,22 @@
yield Delete(
From=ahm,
Where=ahm.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=ab,
Where=ab.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=aor,
Where=aor.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield Delete(
From=ah,
Where=ah.RESOURCE_ID == self._resourceID
- ).on(self._txn)
+ ).on(self._transaction)
yield self._cacher.delete(str(self._ownerUID))
@@ -272,7 +272,7 @@
self._size = len(componentText)
# Special - if migrating we need to preserve the original md5
- if self._txn._migrating and hasattr(component, "md5"):
+ if self._transaction._migrating and hasattr(component, "md5"):
self._md5 = component.md5
if inserting:
@@ -286,7 +286,7 @@
Return=(ao.RESOURCE_ID,
ao.CREATED,
ao.MODIFIED)
- ).on(self._txn))[0]
+ ).on(self._transaction))[0]
else:
self._modified = (yield Update(
{ao.VCARD_TEXT: componentText,
@@ -294,7 +294,7 @@
ao.MD5: self._md5,
ao.MODIFIED: utcNowSQL},
Where=ao.RESOURCE_ID == self._resourceID,
- Return=ao.MODIFIED).on(self._txn))[0][0]
+ Return=ao.MODIFIED).on(self._transaction))[0][0]
@inlineCallbacks
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/common.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/common.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/common.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -192,9 +192,9 @@
The transactions generated by the addressbook store provide
L{IAddressBookStoreTransaction} and its required attributes.
"""
- txn = self.transactionUnderTest()
- self.assertProvides(ICommonTransaction, txn)
- self.assertProvides(IAddressBookTransaction, txn)
+ transaction = self.transactionUnderTest()
+ self.assertProvides(ICommonTransaction, transaction)
+ self.assertProvides(IAddressBookTransaction, transaction)
@inlineCallbacks
@@ -253,8 +253,8 @@
L{IAddressBookStoreTransaction.addressbookHomeWithUID} should return C{None}
when asked for a non-existent addressbook home.
"""
- txn = self.transactionUnderTest()
- self.assertEquals((yield txn.addressbookHomeWithUID("xyzzy")), None)
+ transaction = self.transactionUnderTest()
+ self.assertEquals((yield transaction.addressbookHomeWithUID("xyzzy")), None)
@inlineCallbacks
@@ -292,7 +292,7 @@
positiveAssertions()
# FIXME: revert
# FIXME: test for multiple renames
- # FIXME: test for conflicting renames (a->b, c->a in the same txn)
+ # FIXME: test for conflicting renames (a->b, c->a in the same transaction)
@inlineCallbacks
@@ -782,23 +782,23 @@
L{IAddressBookStoreTransaction.addressbookHomeWithUID} with
C{create=True} will create a addressbook home that doesn't exist yet.
"""
- txn = self.transactionUnderTest()
+ transaction = self.transactionUnderTest()
noHomeUID = "xyzzy"
- addressbookHome = yield txn.addressbookHomeWithUID(
+ addressbookHome = yield transaction.addressbookHomeWithUID(
noHomeUID,
create=True
)
@inlineCallbacks
- def readOtherTxn():
- otherTxn = self.savedStore.newTransaction()
- self.addCleanup(otherTxn.commit)
- returnValue((yield otherTxn.addressbookHomeWithUID(noHomeUID)))
+ def readOtherTransaction():
+ otherTransaction = self.savedStore.newTransaction()
+ self.addCleanup(otherTransaction.commit)
+ returnValue((yield otherTransaction.addressbookHomeWithUID(noHomeUID)))
self.assertProvides(IAddressBookHome, addressbookHome)
# A concurrent tnransaction shouldn't be able to read it yet:
- self.assertIdentical((yield readOtherTxn()), None)
+ self.assertIdentical((yield readOtherTransaction()), None)
yield self.commit()
# But once it's committed, other transactions should see it.
- self.assertProvides(IAddressBookHome, (yield readOtherTxn()))
+ self.assertProvides(IAddressBookHome, (yield readOtherTransaction()))
@inlineCallbacks
@@ -966,17 +966,17 @@
"""
# create some additional addressbook homes
additionalUIDs = set('alpha-uid home2 home3 beta-uid'.split())
- txn = self.transactionUnderTest()
+ transaction = self.transactionUnderTest()
for name in additionalUIDs:
- yield txn.addressbookHomeWithUID(name, create=True)
+ yield transaction.addressbookHomeWithUID(name, create=True)
yield self.commit()
foundUIDs = set([])
- lastTxn = None
- for txn, home in (yield self.storeUnderTest().eachAddressbookHome()):
- self.addCleanup(txn.commit)
+ lastTransaction = None
+ for transaction, home in (yield self.storeUnderTest().eachAddressbookHome()):
+ self.addCleanup(transaction.commit)
foundUIDs.add(home.uid())
- self.assertNotIdentical(lastTxn, txn)
- lastTxn = txn
+ self.assertNotIdentical(lastTransaction, transaction)
+ lastTransaction = transaction
requiredUIDs = set([
uid for uid in self.requirements
if self.requirements[uid] is not None
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_file.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_file.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -63,7 +63,7 @@
test.notifierFactory = StubNotifierFactory()
test.addressbookStore = AddressBookStore(storeRootPath, test.notifierFactory)
- test.txn = test.addressbookStore.newTransaction(test.id() + " (old)")
+ test.transaction = test.addressbookStore.newTransaction(test.id() + " (old)")
assert test.addressbookStore is not None, "No addressbook store?"
@@ -71,7 +71,7 @@
@inlineCallbacks
def setUpHome1(test):
setUpAddressBookStore(test)
- test.home1 = yield test.txn.addressbookHomeWithUID("home1")
+ test.home1 = yield test.transaction.addressbookHomeWithUID("home1")
assert test.home1 is not None, "No addressbook home?"
@@ -195,9 +195,9 @@
index = addressbook._index
self.assertEquals(set((yield index.addressbookObjects())),
set((yield addressbook.addressbookObjects())))
- yield self.txn.commit()
- self.txn = self.addressbookStore.newTransaction(self.id())
- self.home1 = yield self.txn.addressbookHomeWithUID("home1")
+ yield self.transaction.commit()
+ self.transaction = self.addressbookStore.newTransaction(self.id())
+ self.home1 = yield self.transaction.addressbookHomeWithUID("home1")
addressbook = yield self.home1.addressbookWithName("addressbook2")
# FIXME: we should be curating our own index here, but in order to fix
# that the code in the old implicit scheduler needs to change. This
@@ -279,7 +279,7 @@
"""
self.addressbook1.removeAddressBookObjectWithName("2.vcf")
self.failUnless(self.addressbook1._path.child("2.vcf").exists())
- self.txn.commit()
+ self.transaction.commit()
self.failIf(self.addressbook1._path.child("2.vcf").exists())
@@ -303,8 +303,8 @@
Re-read the (committed) home1 and addressbook1 objects in a new
transaction.
"""
- self.txn = self.addressbookStore.newTransaction(self.id())
- self.home1 = yield self.txn.addressbookHomeWithUID("home1")
+ self.transaction = self.addressbookStore.newTransaction(self.id())
+ self.home1 = yield self.transaction.addressbookHomeWithUID("home1")
self.addressbook1 = yield self.home1.addressbookWithName("addressbook_1")
@@ -316,13 +316,13 @@
"""
# Make sure that the addressbook home is actually committed; rolling back
# addressbook home creation will remove the whole directory.
- yield self.txn.commit()
+ yield self.transaction.commit()
yield self._refresh()
self.addressbook1.createAddressBookObjectWithName(
"sample.vcf",
VComponent.fromString(vcard4_text)
)
- yield self.txn.abort()
+ yield self.transaction.abort()
yield self._refresh()
self.assertIdentical(
(yield self.addressbook1.addressbookObjectWithName("sample.vcf")),
@@ -340,8 +340,8 @@
"""
def fail():
raise RuntimeError("oops")
- self.txn.addOperation(fail, "dummy failing operation")
- self.assertRaises(RuntimeError, self.txn.commit)
+ self.transaction.addOperation(fail, "dummy failing operation")
+ self.assertRaises(RuntimeError, self.transaction.commit)
yield self._refresh()
@@ -381,7 +381,7 @@
modifiedComponent,
(yield self.addressbook1.addressbookObjectWithName("1.vcf")).component()
)
- self.txn.commit()
+ self.transaction.commit()
@featureUnimplemented
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/carddav/datastore/test/test_sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -53,11 +53,11 @@
@inlineCallbacks
def populate(self):
- populateTxn = self.storeUnderTest().newTransaction()
+ populateTransaction = self.storeUnderTest().newTransaction()
for homeUID in self.requirements:
addressbooks = self.requirements[homeUID]
if addressbooks is not None:
- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
+ home = yield populateTransaction.addressbookHomeWithUID(homeUID, True)
# We don't want the default addressbook to appear unless it's
# explicitly listed.
yield home.removeAddressBookWithName("addressbook")
@@ -72,7 +72,7 @@
objectName, VCard.fromString(objData)
)
- yield populateTxn.commit()
+ yield populateTransaction.commit()
self.notifierFactory.reset()
@@ -125,9 +125,9 @@
"""
setUpAddressBookStore(self)
fileStore = self.addressbookStore
- txn = fileStore.newTransaction()
- self.addCleanup(txn.commit)
- return txn
+ transaction = fileStore.newTransaction()
+ self.addCleanup(transaction.commit)
+ return transaction
@inlineCallbacks
@@ -222,18 +222,18 @@
addressbookStore = yield buildStore(self, self.notifierFactory)
# Provision the home and addressbook now
- txn = addressbookStore.newTransaction()
- home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
+ transaction = addressbookStore.newTransaction()
+ home = yield transaction.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
adbk = yield home.addressbookWithName("addressbook")
self.assertNotEqual(adbk, None)
- yield txn.commit()
+ yield transaction.commit()
- txn1 = addressbookStore.newTransaction()
- txn2 = addressbookStore.newTransaction()
+ transaction1 = addressbookStore.newTransaction()
+ transaction2 = addressbookStore.newTransaction()
- home1 = yield txn1.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
- home2 = yield txn2.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
+ home1 = yield transaction1.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
+ home2 = yield transaction2.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
adbk1 = yield home1.addressbookWithName("addressbook")
adbk2 = yield home2.addressbookWithName("addressbook")
@@ -254,7 +254,7 @@
END:VCARD
""".replace("\n", "\r\n")
))
- yield txn1.commit() # FIXME: CONCURRENT
+ yield transaction1.commit() # FIXME: CONCURRENT
d1 = _defer1()
@inlineCallbacks
@@ -273,7 +273,7 @@
END:VCARD
""".replace("\n", "\r\n")
))
- yield txn2.commit() # FIXME: CONCURRENT
+ yield transaction2.commit() # FIXME: CONCURRENT
d2 = _defer2()
yield d1
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/file.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/file.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -149,9 +149,9 @@
uid = actualHome.basename()
if not isValidName(uid):
continue
- txn = self.newTransaction("enumerate home %r" % (uid,))
- home = txn.homeWithUID(storeType, uid, False)
- yield (txn, home)
+ transaction = self.newTransaction("enumerate home %r" % (uid,))
+ home = transaction.homeWithUID(storeType, uid, False)
+ yield (transaction, home)
def eachCalendarHome(self):
@@ -270,12 +270,12 @@
@classmethod
- def homeWithUID(cls, txn, uid, create=False, withNotifications=False):
+ def homeWithUID(cls, transaction, uid, create=False, withNotifications=False):
assert len(uid) >= 4
childPathSegments = []
- childPathSegments.append(txn._dataStore._path.child(cls._topPath))
+ childPathSegments.append(transaction._dataStore._path.child(cls._topPath))
childPathSegments.append(childPathSegments[-1].child(UIDPATH))
childPathSegments.append(childPathSegments[-1].child(uid[0:2]))
childPathSegments.append(childPathSegments[-1].child(uid[2:4]))
@@ -308,24 +308,24 @@
# do this _after_ all other file operations
home._path = childPath
return lambda : None
- txn.addOperation(do, "create home UID %r" % (uid,))
+ transaction.addOperation(do, "create home UID %r" % (uid,))
elif not childPath.isdir():
return None
else:
homePath = childPath
- if txn._notifierFactory:
- notifiers = (txn._notifierFactory.newNotifier(id=uid,
+ if transaction._notifierFactory:
+ notifiers = (transaction._notifierFactory.newNotifier(id=uid,
prefix=cls._notifierPrefix),)
else:
notifiers = None
- home = cls(uid, homePath, txn._dataStore, txn, notifiers)
+ home = cls(uid, homePath, transaction._dataStore, transaction, notifiers)
if creating:
home.createdHome()
if withNotifications:
- txn.notificationsWithUID(uid, home)
+ transaction.notificationsWithUID(uid, home)
return home
@@ -1007,18 +1007,18 @@
self._objectResourceClass = NotificationObject
@classmethod
- def notificationsFromHome(cls, txn, home):
+ def notificationsFromHome(cls, transaction, home):
notificationCollectionName = "notification"
if not home._path.child(notificationCollectionName).isdir():
- notifications = cls._create(txn, home, notificationCollectionName)
+ notifications = cls._create(transaction, home, notificationCollectionName)
else:
notifications = cls(notificationCollectionName, home)
return notifications
@classmethod
- def _create(cls, txn, home, collectionName):
+ def _create(cls, transaction, home, collectionName):
# FIXME: this is a near-copy of CommonHome.createChildWithName.
temporary = hidden(home._path.child(collectionName).temporarySibling())
temporary.createDirectory()
@@ -1043,7 +1043,7 @@
# Return undo
return lambda: home._path.child(collectionName).remove()
- txn.addOperation(do, "create notification child %r" %
+ transaction.addOperation(do, "create notification child %r" %
(collectionName,))
props = c.properties()
props[PropertyName(*ResourceType.qname())] = c.resourceType()
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -103,7 +103,7 @@
Shared logic for SQL-based data stores, between calendar and addressbook
storage.
- @ivar sqlTxnFactory: A 0-arg factory callable that produces an
+ @ivar sqlTransactionFactory: A 0-arg factory callable that produces an
L{IAsyncTransaction}.
@ivar notifierFactory: a L{twistedcaldav.notify.NotifierFactory} (or
@@ -129,12 +129,12 @@
implements(ICalendarStore)
- def __init__(self, sqlTxnFactory, notifierFactory, attachmentsPath,
+ def __init__(self, sqlTransactionFactory, notifierFactory, attachmentsPath,
enableCalendars=True, enableAddressBooks=True,
label="unlabeled", quota=(2 ** 20)):
assert enableCalendars or enableAddressBooks
- self.sqlTxnFactory = sqlTxnFactory
+ self.sqlTransactionFactory = sqlTransactionFactory
self.notifierFactory = notifierFactory
self.attachmentsPath = attachmentsPath
self.enableCalendars = enableCalendars
@@ -164,7 +164,7 @@
"""
return CommonStoreTransaction(
self,
- self.sqlTxnFactory(),
+ self.sqlTransactionFactory(),
self.enableCalendars,
self.enableAddressBooks,
None if migrating else self.notifierFactory,
@@ -215,7 +215,7 @@
id = 0
- def __init__(self, store, sqlTxn,
+ def __init__(self, store, sqlTransaction,
enableCalendars, enableAddressBooks,
notifierFactory, label, migrating=False):
self._store = store
@@ -223,6 +223,7 @@
self._addressbookHomes = {}
self._notificationHomes = {}
self._postCommitOperations = []
+ self._postAbortOperations = []
self._notifierFactory = notifierFactory
self._label = label
self._migrating = migrating
@@ -245,9 +246,9 @@
from txdav.carddav.datastore.sql import AddressBookHome
CommonStoreTransaction._homeClass[ECALENDARTYPE] = CalendarHome
CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
- self._sqlTxn = sqlTxn
- self.paramstyle = sqlTxn.paramstyle
- self.dialect = sqlTxn.dialect
+ self._sqlTransaction = sqlTransaction
+ self.paramstyle = sqlTransaction.paramstyle
+ self.dialect = sqlTransaction.dialect
# FIXME: want to pass a "debug" option in to enable this via config - off for now
self._stats = None #TransactionStatsCollector()
@@ -330,6 +331,13 @@
self._postCommitOperations.append(operation)
+ def postAbort(self, operation):
+ """
+ Run things after C{abort}.
+ """
+ self._postAbortOperations.append(operation)
+
+
_savepointCounter = 0
def _savepoint(self):
@@ -363,7 +371,7 @@
# TODO: we should really have a list of acceptable exceptions for
# failure and not blanket catch, but that involves more knowledge of the
# database driver in use than we currently possess at this layer.
- block = self._sqlTxn.commandBlock()
+ block = self._sqlTransaction.commandBlock()
sp = self._savepoint()
failuresToMaybeLog = []
triesLeft = retries + 1
@@ -383,7 +391,7 @@
# they actually get done, even if they didn't actually
# block or yield to wait for them! (c.f. property
# store writes.)
- newBlock = self._sqlTxn.commandBlock()
+ newBlock = self._sqlTransaction.commandBlock()
block.end()
block = newBlock
sp = self._savepoint()
@@ -415,7 +423,7 @@
"""
if self._stats:
self._stats.startStatement(a[0])
- results = (yield self._sqlTxn.execSQL(*a, **kw))
+ results = (yield self._sqlTransaction.execSQL(*a, **kw))
if self._stats:
self._stats.endStatement()
returnValue(results)
@@ -450,14 +458,18 @@
if self._stats:
self._stats.printReport()
- return self._sqlTxn.commit().addCallback(postCommit)
+ return self._sqlTransaction.commit().addCallback(postCommit)
def abort(self):
"""
Abort the transaction.
"""
- return self._sqlTxn.abort()
+ def postAbort(ignored):
+ for operation in self._postAbortOperations:
+ operation()
+ return ignored
+ return self._sqlTransaction.abort().addCallback(postAbort)
def _oldEventsBase(limited): #@NoSelf
@@ -598,7 +610,7 @@
_cacher = None # Initialize in derived classes
def __init__(self, transaction, ownerUID, notifiers):
- self._txn = transaction
+ self._transaction = transaction
self._ownerUID = ownerUID
self._resourceID = None
self._shares = None
@@ -617,7 +629,7 @@
def quotaAllowedBytes(self):
- return self._txn.store().quota
+ return self._transaction.store().quota
@classproperty
@@ -643,7 +655,7 @@
result = yield self._cacher.get(self._ownerUID)
if result is None:
result = yield self._resourceIDFromOwnerQuery.on(
- self._txn, ownerUID=self._ownerUID)
+ self._transaction, ownerUID=self._ownerUID)
if result and not no_cache:
yield self._cacher.set(self._ownerUID, result)
@@ -657,14 +669,14 @@
@classmethod
@inlineCallbacks
- def homeWithUID(cls, txn, uid, create=False):
- if txn._notifierFactory:
- notifiers = (txn._notifierFactory.newNotifier(
+ def homeWithUID(cls, transaction, uid, create=False):
+ if transaction._notifierFactory:
+ notifiers = (transaction._notifierFactory.newNotifier(
id=uid, prefix=cls._notifierPrefix
),)
else:
notifiers = None
- homeObject = cls(txn, uid, notifiers)
+ homeObject = cls(transaction, uid, notifiers)
homeObject = (yield homeObject.initFromStore())
if homeObject is not None:
returnValue(homeObject)
@@ -675,31 +687,31 @@
# Use savepoint so we can do a partial rollback if there is a race condition
# where this row has already been inserted
savepoint = SavepointAction("homeWithUID")
- yield savepoint.acquire(txn)
+ yield savepoint.acquire(transaction)
try:
resourceid = (yield Insert(
{cls._homeSchema.OWNER_UID: uid},
- Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
+ Return=cls._homeSchema.RESOURCE_ID).on(transaction))[0][0]
yield Insert(
- {cls._homeMetaDataSchema.RESOURCE_ID: resourceid}).on(txn)
+ {cls._homeMetaDataSchema.RESOURCE_ID: resourceid}).on(transaction)
except Exception: # FIXME: Really want to trap the pg.DatabaseError but in a non-DB specific manner
- yield savepoint.rollback(txn)
+ yield savepoint.rollback(transaction)
# Retry the query - row may exist now, if not re-raise
- homeObject = cls(txn, uid, notifiers)
+ homeObject = cls(transaction, uid, notifiers)
homeObject = (yield homeObject.initFromStore())
if homeObject:
returnValue(homeObject)
else:
raise
else:
- yield savepoint.release(txn)
+ yield savepoint.release(transaction)
# Note that we must not cache the owner_uid->resource_id
# mapping in _cacher when creating as we don't want that to appear
# until AFTER the commit
- home = cls(txn, uid, notifiers)
+ home = cls(transaction, uid, notifiers)
home = (yield home.initFromStore(no_cache=True))
yield home.createdHome()
returnValue(home)
@@ -707,8 +719,8 @@
@classmethod
@inlineCallbacks
- def homeUIDWithResourceID(cls, txn, rid):
- rows = (yield cls._ownerFromFromResourceID.on(txn, resourceID=rid))
+ def homeUIDWithResourceID(cls, transaction, rid):
+ rows = (yield cls._ownerFromFromResourceID.on(transaction, resourceID=rid))
if rows:
returnValue(rows[0][0])
else:
@@ -729,7 +741,7 @@
def transaction(self):
- return self._txn
+ return self._transaction
def retrieveOldShares(self):
@@ -882,7 +894,7 @@
@inlineCallbacks
def syncToken(self):
revision = (yield self._syncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
returnValue("%s_%s" % (self._resourceID, revision))
@@ -914,7 +926,7 @@
wasdeleted
)
for path, collection, name, wasdeleted in
- (yield self._changesQuery.on(self._txn,
+ (yield self._changesQuery.on(self._transaction,
resourceID=self._resourceID,
token=token))
]
@@ -947,7 +959,7 @@
Where=(bind.RESOURCE_NAME == sharename).And(
bind.HOME_RESOURCE_ID == self._resourceID).And(
bind.BIND_MODE != _BIND_MODE_OWN)
- ).on(self._txn))[0][0]
+ ).on(self._transaction))[0][0]
results = [
(
sharename,
@@ -958,7 +970,7 @@
(yield Select([rev.RESOURCE_NAME, rev.DELETED],
From=rev,
Where=(rev.REVISION > sharetoken).And(
- rev.RESOURCE_ID == shareID)).on(self._txn))
+ rev.RESOURCE_ID == shareID)).on(self._transaction))
if name
]
@@ -979,7 +991,7 @@
def _loadPropertyStore(self):
props = yield PropertyStore.load(
self.uid(),
- self._txn,
+ self._transaction,
self._resourceID
)
self._propertyStore = props
@@ -1031,7 +1043,7 @@
in the named child collections.
"""
results = []
- rows = (yield self._resourceByUIDQuery.on(self._txn, uid=uid,
+ rows = (yield self._resourceByUIDQuery.on(self._transaction, uid=uid,
resourceID=self._resourceID))
if rows:
for childID, objectID in rows:
@@ -1056,7 +1068,7 @@
def quotaUsedBytes(self):
if self._quotaUsedBytes is None:
self._quotaUsedBytes = (yield self._quotaQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
returnValue(self._quotaUsedBytes)
@@ -1092,18 +1104,18 @@
update' because a race also exists in the 'update ... x = x + 1' case as
seen via unit tests.
"""
- yield self._preLockResourceIDQuery.on(self._txn,
+ yield self._preLockResourceIDQuery.on(self._transaction,
resourceID=self._resourceID)
self._quotaUsedBytes = (yield self._increaseQuotaQuery.on(
- self._txn, delta=delta, resourceID=self._resourceID))[0][0]
+ self._transaction, delta=delta, resourceID=self._resourceID))[0][0]
# Double check integrity
if self._quotaUsedBytes < 0:
log.error(
"Fixing quota adjusted below zero to %s by change amount %s" %
(self._quotaUsedBytes, delta,))
- yield self._resetQuotaQuery.on(self._txn,
+ yield self._resetQuotaQuery.on(self._transaction,
resourceID=self._resourceID)
self._quotaUsedBytes = 0
@@ -1137,7 +1149,7 @@
"""
if self._notifiers:
for notifier in self._notifiers:
- self._txn.postCommit(notifier.notify)
+ self._transaction.postCommit(notifier.notify)
@@ -1162,7 +1174,7 @@
def syncToken(self):
if self._syncTokenRevision is None:
self._syncTokenRevision = (yield self._childSyncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
returnValue(("%s_%s" % (self._resourceID, self._syncTokenRevision,)))
@@ -1188,7 +1200,7 @@
(name if name else "", deleted)
for name, deleted in
(yield self._objectNamesSinceRevisionQuery.on(
- self._txn, revision=token, resourceID=self._resourceID))
+ self._transaction, revision=token, resourceID=self._resourceID))
]
results.sort(key=lambda x:x[1])
@@ -1229,10 +1241,10 @@
@inlineCallbacks
def _initSyncToken(self):
yield self._removeDeletedRevision.on(
- self._txn, homeID=self._home._resourceID, collectionName=self._name
+ self._transaction, homeID=self._home._resourceID, collectionName=self._name
)
self._syncTokenRevision = (yield (
- self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
+ self._addNewRevision.on(self._transaction, homeID=self._home._resourceID,
resourceID=self._resourceID,
collectionName=self._name)))[0][0]
@@ -1256,7 +1268,7 @@
@inlineCallbacks
def _renameSyncToken(self):
self._syncTokenRevision = (yield self._renameSyncTokenQuery.on(
- self._txn, name=self._name, resourceID=self._resourceID))[0][0]
+ self._transaction, name=self._name, resourceID=self._resourceID))[0][0]
@classproperty
@@ -1303,7 +1315,7 @@
@inlineCallbacks
def _deletedSyncToken(self, sharedRemoval=False):
# Remove all child entries
- yield self._deleteSyncTokenQuery.on(self._txn,
+ yield self._deleteSyncTokenQuery.on(self._transaction,
homeID=self._home._resourceID,
resourceID=self._resourceID)
@@ -1312,11 +1324,11 @@
# non-shared collection, then we need to mark all collections
# with the resource-id as being deleted to account for direct shares.
if sharedRemoval:
- yield self._sharedRemovalQuery.on(self._txn,
+ yield self._sharedRemovalQuery.on(self._transaction,
homeID=self._home._resourceID,
resourceID=self._resourceID)
else:
- yield self._unsharedRemovalQuery.on(self._txn,
+ yield self._unsharedRemovalQuery.on(self._transaction,
resourceID=self._resourceID)
self._syncTokenRevision = None
@@ -1386,11 +1398,11 @@
if action == "delete":
self._syncTokenRevision = (
yield self._deleteBumpTokenQuery.on(
- self._txn, resourceID=self._resourceID, name=name))[0][0]
+ self._transaction, resourceID=self._resourceID, name=name))[0][0]
elif action == "update":
self._syncTokenRevision = (
yield self._updateBumpTokenQuery.on(
- self._txn, resourceID=self._resourceID, name=name))[0][0]
+ self._transaction, resourceID=self._resourceID, name=name))[0][0]
elif action == "insert":
# Note that an "insert" may happen for a resource that previously
# existed and then was deleted. In that case an entry in the
@@ -1399,16 +1411,16 @@
found = bool( (
yield self._insertFindPreviouslyNamedQuery.on(
- self._txn, resourceID=self._resourceID, name=name)) )
+ self._transaction, resourceID=self._resourceID, name=name)) )
if found:
self._syncTokenRevision = (
yield self._updatePreviouslyNamedQuery.on(
- self._txn, resourceID=self._resourceID, name=name)
+ self._transaction, resourceID=self._resourceID, name=name)
)[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewRevisionQuery.on(
- self._txn, homeID=self._home._resourceID,
+ self._transaction, homeID=self._home._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
self._maybeNotify()
@@ -1504,10 +1516,10 @@
# FIXME: tests don't cover this as directly as they should.
if owned:
rows = yield cls._ownedChildListQuery.on(
- home._txn, resourceID=home._resourceID)
+ home._transaction, resourceID=home._resourceID)
else:
rows = yield cls._sharedChildListQuery.on(
- home._txn, resourceID=home._resourceID)
+ home._transaction, resourceID=home._resourceID)
names = [row[0] for row in rows]
returnValue(names)
@@ -1558,12 +1570,12 @@
query = cls._ownedHomeChildrenQuery
else:
query = cls._sharedHomeChildrenQuery
- dataRows = (yield query.on(home._txn, resourceID=home._resourceID))
+ dataRows = (yield query.on(home._transaction, resourceID=home._resourceID))
if dataRows:
# Get property stores for all these child resources (if any found)
propertyStores = (yield PropertyStore.forMultipleResources(
- home.uid(), home._txn,
+ home.uid(), home._transaction,
cls._bindSchema.RESOURCE_ID, cls._bindSchema.HOME_RESOURCE_ID,
home._resourceID
))
@@ -1581,7 +1593,7 @@
And(ownedCond).
And((rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
GroupBy=rev.RESOURCE_ID
- ).on(home._txn))
+ ).on(home._transaction))
revisions = dict(revisions)
# Create the actual objects merging in properties
@@ -1653,7 +1665,7 @@
query = cls._resourceIDOwnedByHomeByName
else:
query = cls._resourceIDSharedToHomeByName
- data = yield query.on(home._txn,
+ data = yield query.on(home._transaction,
objectName=name, homeID=home._resourceID)
if not data:
returnValue(None)
@@ -1689,7 +1701,7 @@
exists.
"""
data = yield cls._homeChildByIDQuery.on(
- home._txn, resourceID=resourceID, homeID=home._resourceID)
+ home._transaction, resourceID=resourceID, homeID=home._resourceID)
if not data:
returnValue(None)
name, mode = data[0]
@@ -1735,10 +1747,10 @@
# Create and initialize this object, similar to initFromStore
resourceID, _created, _modified = (
- yield cls._insertDefaultHomeChild.on(home._txn))[0]
+ yield cls._insertDefaultHomeChild.on(home._transaction))[0]
# Bind table needs entry
- yield cls._initialOwnerBind.on(home._txn, homeID=home._resourceID,
+ yield cls._initialOwnerBind.on(home._transaction, homeID=home._resourceID,
resourceID=resourceID, name=name)
# Initialize other state
@@ -1777,14 +1789,14 @@
avoid having to do DB queries for those individually later.
"""
self._created, self._modified = (
- yield self._datesByIDQuery.on(self._txn,
+ yield self._datesByIDQuery.on(self._transaction,
resourceID=self._resourceID))[0]
yield self._loadPropertyStore()
@property
- def _txn(self):
- return self._home._txn
+ def _transaction(self):
+ return self._home._transaction
def resourceType(self):
@@ -1834,7 +1846,7 @@
@return: a L{Deferred} which fires when the modification is complete.
"""
oldName = self._name
- yield self._renameQuery.on(self._txn, name=name,
+ yield self._renameQuery.on(self._transaction, name=name,
resourceID=self._resourceID,
homeID=self._home._resourceID)
self._name = name
@@ -1859,7 +1871,7 @@
@inlineCallbacks
def remove(self):
yield self._deletedSyncToken()
- yield self._deleteQuery.on(self._txn, NoSuchHomeChildError,
+ yield self._deleteQuery.on(self._transaction, NoSuchHomeChildError,
resourceID=self._resourceID)
self.properties()._removeResource()
@@ -1898,7 +1910,7 @@
returnValue(self._home._resourceID)
else:
rid = (yield self._ownerHomeFromResourceQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
returnValue(rid)
@@ -1933,7 +1945,7 @@
def listObjectResources(self):
if self._objectNames is None:
rows = yield self._objectResourceNamesQuery.on(
- self._txn, resourceID=self._resourceID)
+ self._transaction, resourceID=self._resourceID)
self._objectNames = sorted([row[0] for row in rows])
returnValue(self._objectNames)
@@ -2006,7 +2018,7 @@
except KeyError:
pass
rows = yield self._resourceNameForUIDQuery.on(
- self._txn, uid=uid, resourceID=self._resourceID)
+ self._transaction, uid=uid, resourceID=self._resourceID)
if rows:
returnValue(rows[0][0])
else:
@@ -2035,7 +2047,7 @@
except KeyError:
pass
rows = yield self._resourceUIDForNameQuery.on(
- self._txn, name=name, resourceID=self._resourceID)
+ self._transaction, name=name, resourceID=self._resourceID)
if rows:
returnValue(rows[0][0])
else:
@@ -2105,7 +2117,7 @@
if props is None:
props = yield PropertyStore.load(
self.ownerHome().uid(),
- self._txn,
+ self._transaction,
self._resourceID
)
self.initPropertyStore(props)
@@ -2173,7 +2185,7 @@
"""
if self._notifiers:
for notifier in self._notifiers:
- self._txn.postCommit(notifier.notify)
+ self._transaction.postCommit(notifier.notify)
@@ -2225,14 +2237,14 @@
# Load from the main table first
dataRows = yield cls._allColumnsWithParent.on(
- parent._txn, parentID=parent._resourceID)
+ parent._transaction, parentID=parent._resourceID)
if dataRows:
# Get property stores for all these child resources (if any found)
if parent.objectResourcesHaveProperties():
propertyStores =(yield PropertyStore.forMultipleResources(
parent._home.uid(),
- parent._txn,
+ parent._transaction,
cls._objectSchema.RESOURCE_ID,
cls._objectSchema.PARENT_RESOURCE_ID,
parent._resourceID
@@ -2325,15 +2337,15 @@
if self._name:
rows = yield self._allWithParentAndName.on(
- self._txn, name=self._name,
+ self._transaction, name=self._name,
parentID=self._parentCollection._resourceID)
elif self._uid:
rows = yield self._allWithParentAndUID.on(
- self._txn, uid=self._uid,
+ self._transaction, uid=self._uid,
parentID=self._parentCollection._resourceID)
elif self._resourceID:
rows = yield self._allWithParentAndID.on(
- self._txn, resourceID=self._resourceID,
+ self._transaction, resourceID=self._resourceID,
parentID=self._parentCollection._resourceID)
if rows:
self._initFromRow(tuple(rows[0]))
@@ -2381,7 +2393,7 @@
if self._parentCollection.objectResourcesHaveProperties():
props = yield PropertyStore.load(
self._parentCollection.ownerHome().uid(),
- self._txn,
+ self._transaction,
self._resourceID,
created=created
)
@@ -2408,8 +2420,8 @@
@property
- def _txn(self):
- return self._parentCollection._txn
+ def _transaction(self):
+ return self._parentCollection._transaction
def setComponent(self, component, inserting=False):
@@ -2434,7 +2446,7 @@
@inlineCallbacks
def remove(self):
- yield self._deleteQuery.on(self._txn, NoSuchObjectResourceError,
+ yield self._deleteQuery.on(self._transaction, NoSuchObjectResourceError,
resourceID=self._resourceID)
self.properties()._removeResource()
@@ -2492,7 +2504,7 @@
def _text(self):
if self._objectText is None:
text = (
- yield self._textByIDQuery.on(self._txn,
+ yield self._textByIDQuery.on(self._transaction,
resourceID=self._resourceID)
)[0][0]
self._objectText = text
@@ -2516,9 +2528,9 @@
_homeSchema = schema.NOTIFICATION_HOME
- def __init__(self, txn, uid, resourceID):
+ def __init__(self, transaction, uid, resourceID):
- self._txn = txn
+ self._transaction = transaction
self._uid = uid
self._resourceID = resourceID
self._notifications = {}
@@ -2527,12 +2539,12 @@
# Make sure we have push notifications setup to push on this collection
# as well as the home it is in
- if txn._notifierFactory:
+ if transaction._notifierFactory:
childID = "%s/%s" % (uid, "notification")
- notifier = txn._notifierFactory.newNotifier(
+ notifier = transaction._notifierFactory.newNotifier(
label="collection",
id=childID,
- prefix=txn._homeClass[txn._primaryHomeType]._notifierPrefix
+ prefix=transaction._homeClass[transaction._primaryHomeType]._notifierPrefix
)
notifier.addID(id=uid)
notifiers = (notifier,)
@@ -2562,8 +2574,8 @@
@classmethod
@inlineCallbacks
- def notificationsWithUID(cls, txn, uid):
- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
+ def notificationsWithUID(cls, transaction, uid):
+ rows = yield cls._resourceIDFromUIDQuery.on(transaction, uid=uid)
if rows:
resourceID = rows[0][0]
@@ -2572,17 +2584,17 @@
# Use savepoint so we can do a partial rollback if there is a race condition
# where this row has already been inserted
savepoint = SavepointAction("notificationsWithUID")
- yield savepoint.acquire(txn)
+ yield savepoint.acquire(transaction)
try:
resourceID = str((
- yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
+ yield cls._provisionNewNotificationsQuery.on(transaction, uid=uid)
)[0][0])
except Exception: # FIXME: Really want to trap the pg.DatabaseError but in a non-DB specific manner
- yield savepoint.rollback(txn)
+ yield savepoint.rollback(transaction)
# Retry the query - row may exist now, if not re-raise
- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
+ rows = yield cls._resourceIDFromUIDQuery.on(transaction, uid=uid)
if rows:
resourceID = rows[0][0]
created = False
@@ -2590,9 +2602,9 @@
raise
else:
created = True
- yield savepoint.release(txn)
+ yield savepoint.release(transaction)
- collection = cls(txn, uid, resourceID)
+ collection = cls(transaction, uid, resourceID)
yield collection._loadPropertyStore()
if created:
yield collection._initSyncToken()
@@ -2603,7 +2615,7 @@
def _loadPropertyStore(self):
self._propertyStore = yield PropertyStore.load(
self._uid,
- self._txn,
+ self._transaction,
self._resourceID
)
@@ -2647,7 +2659,7 @@
def listNotificationObjects(self):
if self._notificationNames is None:
rows = yield self._notificationUIDsForHomeQuery.on(
- self._txn, resourceID=self._resourceID)
+ self._transaction, resourceID=self._resourceID)
self._notificationNames = sorted([row[0] for row in rows])
returnValue(self._notificationNames)
@@ -2705,7 +2717,7 @@
@inlineCallbacks
def removeNotificationObjectWithUID(self, uid):
yield self._removeByUIDQuery.on(
- self._txn, uid=uid, resourceID=self._resourceID)
+ self._transaction, uid=uid, resourceID=self._resourceID)
self._notifications.pop(uid, None)
yield self._deleteRevision("%s.xml" % (uid,))
@@ -2723,7 +2735,7 @@
@inlineCallbacks
def _initSyncToken(self):
self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
_syncTokenQuery = Select(
@@ -2737,7 +2749,7 @@
if self._syncTokenRevision is None:
self._syncTokenRevision = (
yield self._syncTokenQuery.on(
- self._txn, resourceID=self._resourceID)
+ self._transaction, resourceID=self._resourceID)
)[0][0]
returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))
@@ -2775,7 +2787,7 @@
"""
if self._notifiers:
for notifier in self._notifiers:
- self._txn.postCommit(notifier.notify)
+ self._transaction.postCommit(notifier.notify)
@classproperty
@@ -2852,14 +2864,14 @@
# Load from the main table first
dataRows = (
- yield cls._allColumnsByHomeIDQuery.on(parent._txn,
+ yield cls._allColumnsByHomeIDQuery.on(parent._transaction,
homeID=parent._resourceID))
if dataRows:
# Get property stores for all these child resources (if any found)
propertyStores =(yield PropertyStore.forMultipleResources(
parent.uid(),
- parent._txn,
+ parent._transaction,
schema.NOTIFICATION.RESOURCE_ID,
schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
parent._resourceID,
@@ -2911,7 +2923,7 @@
@return: L{self} if object exists in the DB, else C{None}
"""
rows = (yield self._oneNotificationQuery.on(
- self._txn, uid=self._uid, homeID=self._home._resourceID))
+ self._transaction, uid=self._uid, homeID=self._home._resourceID))
if rows:
(self._resourceID,
self._md5,
@@ -2936,8 +2948,8 @@
@property
- def _txn(self):
- return self._home._txn
+ def _transaction(self):
+ return self._home._transaction
def notificationCollection(self):
@@ -2993,14 +3005,14 @@
self._size = len(xmldata)
if inserting:
rows = yield self._newNotificationQuery.on(
- self._txn, homeID=self._home._resourceID, uid=uid,
+ self._transaction, homeID=self._home._resourceID, uid=uid,
xmlType=self._xmlType.toxml(), xmlData=xmldata, md5=self._md5
)
self._resourceID, self._created, self._modified = rows[0]
self._loadPropertyStore()
else:
rows = yield self._updateNotificationQuery.on(
- self._txn, homeID=self._home._resourceID, uid=uid,
+ self._transaction, homeID=self._home._resourceID, uid=uid,
xmlType=self._xmlType.toxml(), xmlData=xmldata, md5=self._md5
)
self._modified = rows[0][0]
@@ -3017,7 +3029,7 @@
if self._objectText is None:
self._objectText = (
yield self._xmlDataFromID.on(
- self._txn, resourceID=self._resourceID))[0][0]
+ self._transaction, resourceID=self._resourceID))[0][0]
returnValue(self._objectText)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql_legacy.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql_legacy.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/sql_legacy.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -132,8 +132,8 @@
@property
- def _txn(self):
- return self._collection._txn
+ def _transaction(self):
+ return self._collection._transaction
def _getHomeWithUID(self, uid):
@@ -184,7 +184,7 @@
def allRecords(self):
values = []
rows = yield self._allRecordsQuery.on(
- self._txn, resourceID=self._collection._resourceID
+ self._transaction, resourceID=self._collection._resourceID
)
for row in rows:
values.append(self._makeInvite(row))
@@ -205,7 +205,7 @@
@inlineCallbacks
def recordForUserID(self, userid):
rows = yield self._inviteForRecipientQuery.on(
- self._txn,
+ self._transaction,
resourceID=self._collection._resourceID,
recipient=userid
)
@@ -227,7 +227,7 @@
@inlineCallbacks
def recordForPrincipalUID(self, principalUID):
rows = yield self._inviteForPrincipalUIDQuery.on(
- self._txn,
+ self._transaction,
resourceID=self._collection._resourceID,
principalUID=principalUID
)
@@ -245,7 +245,7 @@
@inlineCallbacks
def recordForInviteUID(self, inviteUID):
- rows = yield self._inviteForUIDQuery.on(self._txn, uid=inviteUID)
+ rows = yield self._inviteForUIDQuery.on(self._transaction, uid=inviteUID)
returnValue(self._makeInvite(rows[0]) if rows else None)
@@ -345,27 +345,27 @@
"INVALID": _BIND_STATUS_INVALID,
}[record.state]
shareeHome = yield self._getHomeWithUID(record.principalUID)
- rows = yield self._idsForInviteUID.on(self._txn,
+ rows = yield self._idsForInviteUID.on(self._transaction,
inviteuid=record.inviteuid)
if rows:
[[resourceID, homeResourceID]] = rows
yield self._updateBindQuery.on(
- self._txn,
+ self._transaction,
mode=bindMode, status=bindStatus, message=record.summary,
resourceID=resourceID, homeID=homeResourceID
)
yield self._updateInviteQuery.on(
- self._txn, name=record.name, uid=record.inviteuid
+ self._transaction, name=record.name, uid=record.inviteuid
)
else:
yield self._insertInviteQuery.on(
- self._txn, uid=record.inviteuid, name=record.name,
+ self._transaction, uid=record.inviteuid, name=record.name,
homeID=shareeHome._resourceID,
resourceID=self._collection._resourceID,
recipient=record.userid
)
yield self._insertBindQuery.on(
- self._txn,
+ self._transaction,
homeID=shareeHome._resourceID,
resourceID=self._collection._resourceID,
mode=bindMode,
@@ -404,8 +404,8 @@
@inlineCallbacks
def removeRecordForInviteUID(self, inviteUID):
- yield self._deleteBindByUID.on(self._txn, uid=inviteUID)
- yield self._deleteInviteByUID.on(self._txn, uid=inviteUID)
+ yield self._deleteBindByUID.on(self._transaction, uid=inviteUID)
+ yield self._deleteInviteByUID.on(self._transaction, uid=inviteUID)
@@ -422,7 +422,7 @@
_bindSchema = schema.CALENDAR_BIND
def _getHomeWithUID(self, uid):
- return self._txn.calendarHomeWithUID(uid, create=True)
+ return self._transaction.calendarHomeWithUID(uid, create=True)
@@ -439,7 +439,7 @@
_bindSchema = schema.ADDRESSBOOK_BIND
def _getHomeWithUID(self, uid):
- return self._txn.addressbookHomeWithUID(uid, create=True)
+ return self._transaction.addressbookHomeWithUID(uid, create=True)
@@ -457,8 +457,8 @@
@property
- def _txn(self):
- return self._home._txn
+ def _transaction(self):
+ return self._home._transaction
def _getHomeWithUID(self, uid):
@@ -525,13 +525,13 @@
# much simpler anyway; we should just do that.
all = []
shareRows = yield self._allSharedToQuery.on(
- self._txn, homeID=self._home._resourceID)
+ self._transaction, homeID=self._home._resourceID)
for resourceID, resourceName, bindMode, summary in shareRows:
[[ownerHomeID, ownerResourceName]] = yield (
- self._ownerHomeIDAndName.on(self._txn,
+ self._ownerHomeIDAndName.on(self._transaction,
resourceID=resourceID))
[[ownerUID]] = yield self._ownerUIDFromHomeID.on(
- self._txn, homeID=ownerHomeID)
+ self._transaction, homeID=ownerHomeID)
hosturl = '/%s/__uids__/%s/%s' % (
self._urlTopSegment, ownerUID, ownerResourceName
)
@@ -539,7 +539,7 @@
if bindMode != _BIND_MODE_DIRECT:
sharetype = 'I'
[[shareuid]] = yield self._inviteUIDByResourceIDsQuery.on(
- self._txn, resourceID=resourceID,
+ self._transaction, resourceID=resourceID,
homeID=self._home._resourceID
)
else:
@@ -612,7 +612,7 @@
# XXX_BIND.XXX_RESOURCE_NAME.
yield self._updateBindName.on(
- self._txn, localname=record.localname,
+ self._transaction, localname=record.localname,
homeID=self._home._resourceID, resourceID=collectionResourceID
)
elif record.sharetype == 'D':
@@ -621,20 +621,20 @@
# Use savepoint so we can do a partial rollback if there is a race condition
# where this row has already been inserted
savepoint = SavepointAction("addOrUpdateRecord")
- yield savepoint.acquire(self._txn)
+ yield savepoint.acquire(self._transaction)
try:
yield self._acceptDirectShareQuery.on(
- self._txn, homeID=self._home._resourceID,
+ self._transaction, homeID=self._home._resourceID,
resourceID=collectionResourceID, name=record.localname,
message=record.summary
)
except Exception: # FIXME: Really want to trap the pg.DatabaseError but in a non-DB specific manner
- yield savepoint.rollback(self._txn)
+ yield savepoint.rollback(self._transaction)
# For now we will assume that the insert already done is the winner - so nothing more to do here
else:
- yield savepoint.release(self._txn)
+ yield savepoint.release(self._transaction)
shareeCollection = yield self._home.sharedChildWithName(record.localname)
yield shareeCollection._initSyncToken()
@@ -655,7 +655,7 @@
shareeCollection = yield self._home.sharedChildWithName(record.localname)
yield shareeCollection._deletedSyncToken(sharedRemoval=True)
- result = yield self._unbindShareQuery.on(self._txn, name=localname,
+ result = yield self._unbindShareQuery.on(self._transaction, name=localname,
homeID=self._home._resourceID)
returnValue(result)
@@ -693,13 +693,13 @@
yield shareeCollection._deletedSyncToken(sharedRemoval=True)
if not shareUID.startswith("Direct"):
- yield self._removeInviteShareQuery.on(self._txn, uid=shareUID)
+ yield self._removeInviteShareQuery.on(self._transaction, uid=shareUID)
else:
# Extract pieces from synthesised UID
homeID, resourceID = shareUID[len("Direct-"):].split("-")
# Now remove the binding for the direct share
yield self._removeDirectShareQuery.on(
- self._txn, homeID=homeID, resourceID=resourceID)
+ self._transaction, homeID=homeID, resourceID=resourceID)
class SQLLegacyCalendarShares(SQLLegacyShares):
@@ -716,7 +716,7 @@
def _getHomeWithUID(self, uid):
- return self._txn.calendarHomeWithUID(uid, create=True)
+ return self._transaction.calendarHomeWithUID(uid, create=True)
@@ -734,7 +734,7 @@
def _getHomeWithUID(self, uid):
- return self._txn.addressbookHomeWithUID(uid, create=True)
+ return self._transaction.addressbookHomeWithUID(uid, create=True)
@@ -1083,8 +1083,8 @@
_objectSchema = schema.CALENDAR_OBJECT
@property
- def _txn(self):
- return self.calendar._txn
+ def _transaction(self):
+ return self.calendar._transaction
@inlineCallbacks
@@ -1134,7 +1134,7 @@
"""
returnValue([row[0] for row in (
yield self._notExpandedBeyondQuery.on(
- self._txn, minDate=pyCalendarTodatetime(normalizeForIndex(minDate)),
+ self._transaction, minDate=pyCalendarTodatetime(normalizeForIndex(minDate)),
resourceID=self.calendar._resourceID))]
)
@@ -1180,7 +1180,7 @@
# that it happens to be used by the oracle binding that we're using,
# whereas the postgres binding happens to use the 'pyformat' (e.g. %s)
# parameter style.
- if self.calendar._txn.paramstyle == 'numeric':
+ if self.calendar._transaction.paramstyle == 'numeric':
generator = oraclesqlgenerator
else:
generator = postgresqlgenerator
@@ -1216,7 +1216,7 @@
else:
if fbtype:
# For a free-busy time-range query we return all instances
- rowiter = yield self._txn.execSQL(
+ rowiter = yield self._transaction.execSQL(
"""
select DISTINCT
CALENDAR_OBJECT.RESOURCE_NAME,
@@ -1231,7 +1231,7 @@
qualifiers[1]
)
else:
- rowiter = yield self._txn.execSQL(
+ rowiter = yield self._transaction.execSQL(
"""
select
DISTINCT CALENDAR_OBJECT.RESOURCE_NAME,
@@ -1273,7 +1273,7 @@
def bruteForceSearch(self):
return self._bruteForceQuery.on(
- self._txn, resourceID=self.resource._resourceID)
+ self._transaction, resourceID=self.resource._resourceID)
@inlineCallbacks
@@ -1300,7 +1300,7 @@
def resourceExists(self, name):
returnValue((bool(
(yield self._resourceExistsQuery.on(
- self._txn, name=name, resourceID=self.resource._resourceID))
+ self._transaction, name=name, resourceID=self.resource._resourceID))
)))
@@ -1406,8 +1406,8 @@
@property
- def _txn(self):
- return self.addressbook._txn
+ def _transaction(self):
+ return self.addressbook._transaction
@inlineCallbacks
@@ -1445,7 +1445,7 @@
C{name} is the resource name, C{uid} is the resource UID, and
C{type} is the resource iCalendar component type.x
"""
- if self.addressbook._txn.paramstyle == 'numeric':
+ if self.addressbook._transaction.paramstyle == 'numeric':
generator = oraclesqladbkgenerator
else:
generator = postgresqladbkgenerator
@@ -1456,7 +1456,7 @@
else:
qualifiers = None
if qualifiers is not None:
- rowiter = yield self._txn.execSQL(
+ rowiter = yield self._transaction.execSQL(
"select DISTINCT ADDRESSBOOK_OBJECT.RESOURCE_NAME, ADDRESSBOOK_OBJECT.VCARD_UID" +
qualifiers[0],
qualifiers[1]
@@ -1468,7 +1468,7 @@
From=self._objectSchema,
Where=self._objectSchema.ADDRESSBOOK_RESOURCE_ID ==
self.addressbook._resourceID
- ).on(self.addressbook._txn)
+ ).on(self.addressbook._transaction)
returnValue(list(rowiter))
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/test_util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/test_util.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/test_util.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -94,19 +94,19 @@
self.topService.startService()
yield self.subStarted
self.assertEquals(self.stubService.running, True)
- txn = self.sqlStore.newTransaction()
- self.addCleanup(txn.commit)
+ transaction = self.sqlStore.newTransaction()
+ self.addCleanup(transaction.commit)
for uid in CommonTests.requirements:
if CommonTests.requirements[uid] is not None:
self.assertNotIdentical(
- None, (yield txn.calendarHomeWithUID(uid))
+ None, (yield transaction.calendarHomeWithUID(uid))
)
# Successfully migrated calendar homes are deleted
self.assertFalse(self.filesPath.child("calendars").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield transaction.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
for name, metadata, md5 in (
("1.ics", CommonTests.metadata1, CommonTests.md5Values[0]),
@@ -124,14 +124,14 @@
L{UpgradeToDatabaseService.startService} will skip migrating existing
homes.
"""
- startTxn = self.sqlStore.newTransaction("populate empty sample")
- yield startTxn.calendarHomeWithUID("home1", create=True)
- yield startTxn.commit()
+ startTransaction = self.sqlStore.newTransaction("populate empty sample")
+ yield startTransaction.calendarHomeWithUID("home1", create=True)
+ yield startTransaction.commit()
self.topService.startService()
yield self.subStarted
- vrfyTxn = self.sqlStore.newTransaction("verify sample still empty")
- self.addCleanup(vrfyTxn.commit)
- home = yield vrfyTxn.calendarHomeWithUID("home1")
+ vrfyTransaction = self.sqlStore.newTransaction("verify sample still empty")
+ self.addCleanup(vrfyTransaction.commit)
+ home = yield vrfyTransaction.calendarHomeWithUID("home1")
# The default calendar is still there.
self.assertNotIdentical(None, (yield home.calendarWithName("calendar")))
# The migrated calendar isn't.
@@ -145,17 +145,17 @@
as well.
"""
- txn = self.fileStore.newTransaction()
+ transaction = self.fileStore.newTransaction()
committed = []
def maybeCommit():
if not committed:
committed.append(True)
- return txn.commit()
+ return transaction.commit()
self.addCleanup(maybeCommit)
@inlineCallbacks
def getSampleObj():
- home = (yield txn.calendarHomeWithUID("home1"))
+ home = (yield transaction.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
object = (yield calendar.calendarObjectWithName("1.ics"))
returnValue(object)
@@ -174,7 +174,7 @@
self.topService.startService()
yield self.subStarted
committed = []
- txn = self.sqlStore.newTransaction()
+ transaction = self.sqlStore.newTransaction()
outObject = yield getSampleObj()
outAttachment = yield outObject.attachmentWithName(someAttachmentName)
allDone = Deferred()
@@ -199,19 +199,19 @@
self.topService.startService()
yield self.subStarted
self.assertEquals(self.stubService.running, True)
- txn = self.sqlStore.newTransaction()
- self.addCleanup(txn.commit)
+ transaction = self.sqlStore.newTransaction()
+ self.addCleanup(transaction.commit)
for uid in ABCommonTests.requirements:
if ABCommonTests.requirements[uid] is not None:
self.assertNotIdentical(
- None, (yield txn.addressbookHomeWithUID(uid))
+ None, (yield transaction.addressbookHomeWithUID(uid))
)
# Successfully migrated addressbook homes are deleted
self.assertFalse(self.filesPath.child("addressbooks").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
- home = (yield txn.addressbookHomeWithUID("home1"))
+ home = (yield transaction.addressbookHomeWithUID("home1"))
adbk = (yield home.addressbookWithName("addressbook_1"))
for name, md5 in (
("1.vcf", ABCommonTests.md5Values[0]),
@@ -320,32 +320,32 @@
Use the postgres schema mechanism to do tests under a separate "namespace"
in postgres that we can quickly wipe clean afterwards.
"""
- startTxn = store.newTransaction("test_dbUpgrades")
- yield startTxn.execSQL("create schema test_dbUpgrades;")
- yield startTxn.execSQL("set search_path to test_dbUpgrades;")
- yield startTxn.execSQL(path.getContent())
- yield startTxn.commit()
+ startTransaction = store.newTransaction("test_dbUpgrades")
+ yield startTransaction.execSQL("create schema test_dbUpgrades;")
+ yield startTransaction.execSQL("set search_path to test_dbUpgrades;")
+ yield startTransaction.execSQL(path.getContent())
+ yield startTransaction.commit()
@inlineCallbacks
def _loadVersion():
- startTxn = store.newTransaction("test_dbUpgrades")
- new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION';")
- yield startTxn.commit()
+ startTransaction = store.newTransaction("test_dbUpgrades")
+ new_version = yield startTransaction.execSQL("select value from calendarserver where name = 'VERSION';")
+ yield startTransaction.commit()
returnValue(int(new_version[0][0]))
@inlineCallbacks
def _unloadOldSchema():
- startTxn = store.newTransaction("test_dbUpgrades")
- yield startTxn.execSQL("set search_path to public;")
- yield startTxn.execSQL("drop schema test_dbUpgrades cascade;")
- yield startTxn.commit()
+ startTransaction = store.newTransaction("test_dbUpgrades")
+ yield startTransaction.execSQL("set search_path to public;")
+ yield startTransaction.execSQL("drop schema test_dbUpgrades cascade;")
+ yield startTransaction.commit()
@inlineCallbacks
def _cleanupOldSchema():
- startTxn = store.newTransaction("test_dbUpgrades")
- yield startTxn.execSQL("set search_path to public;")
- yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade;")
- yield startTxn.commit()
+ startTransaction = store.newTransaction("test_dbUpgrades")
+ yield startTransaction.execSQL("set search_path to public;")
+ yield startTransaction.execSQL("drop schema if exists test_dbUpgrades cascade;")
+ yield startTransaction.commit()
self.addCleanup(_cleanupOldSchema)
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/util.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/test/util.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -154,7 +154,7 @@
@inlineCallbacks
def cleanStore(self, testCase, storeToClean):
- cleanupTxn = storeToClean.sqlTxnFactory(
+ cleanupTransaction = storeToClean.sqlTransactionFactory(
"%s schema-cleanup" % (testCase.id(),)
)
# TODO: should be getting these tables from a declaration of the schema
@@ -177,10 +177,10 @@
'NOTIFICATION_HOME']
for table in tables:
try:
- yield cleanupTxn.execSQL("delete from "+table, [])
+ yield cleanupTransaction.execSQL("delete from "+table, [])
except:
log.err()
- yield cleanupTxn.commit()
+ yield cleanupTransaction.commit()
# Deal with memcached items that must be cleared
from txdav.caldav.datastore.sql import CalendarHome
@@ -252,11 +252,11 @@
@param store: the L{IDataStore} to populate with calendar data.
"""
- populateTxn = store.newTransaction()
+ populateTransaction = store.newTransaction()
for homeUID in requirements:
calendars = requirements[homeUID]
if calendars is not None:
- home = yield populateTxn.calendarHomeWithUID(homeUID, True)
+ home = yield populateTransaction.calendarHomeWithUID(homeUID, True)
# We don't want the default calendar or inbox to appear unless it's
# explicitly listed.
try:
@@ -278,7 +278,7 @@
VComponent.fromString(objData),
metadata = metadata,
)
- yield populateTxn.commit()
+ yield populateTransaction.commit()
@inlineCallbacks
def resetCalendarMD5s(md5s, store):
@@ -290,11 +290,11 @@
@param store: the L{IDataStore} to populate with calendar data.
"""
- populateTxn = store.newTransaction()
+ populateTransaction = store.newTransaction()
for homeUID in md5s:
calendars = md5s[homeUID]
if calendars is not None:
- home = yield populateTxn.calendarHomeWithUID(homeUID, True)
+ home = yield populateTransaction.calendarHomeWithUID(homeUID, True)
for calendarName in calendars:
calendarObjNames = calendars[calendarName]
if calendarObjNames is not None:
@@ -307,7 +307,7 @@
objectName,
)
obj.properties()[md5key] = TwistedGETContentMD5.fromString(md5)
- yield populateTxn.commit()
+ yield populateTransaction.commit()
@inlineCallbacks
@@ -320,11 +320,11 @@
@param store: the L{IDataStore} to populate with addressbook data.
"""
- populateTxn = store.newTransaction()
+ populateTransaction = store.newTransaction()
for homeUID in requirements:
addressbooks = requirements[homeUID]
if addressbooks is not None:
- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
+ home = yield populateTransaction.addressbookHomeWithUID(homeUID, True)
# We don't want the default addressbook
try:
yield home.removeAddressBookWithName("addressbook")
@@ -343,7 +343,7 @@
objectName,
ABComponent.fromString(objData),
)
- yield populateTxn.commit()
+ yield populateTransaction.commit()
@inlineCallbacks
def resetAddressBookMD5s(md5s, store):
@@ -355,11 +355,11 @@
@param store: the L{IDataStore} to populate with addressbook data.
"""
- populateTxn = store.newTransaction()
+ populateTransaction = store.newTransaction()
for homeUID in md5s:
addressbooks = md5s[homeUID]
if addressbooks is not None:
- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
+ home = yield populateTransaction.addressbookHomeWithUID(homeUID, True)
for addressbookName in addressbooks:
addressbookObjNames = addressbooks[addressbookName]
if addressbookObjNames is not None:
@@ -372,7 +372,7 @@
objectName,
)
obj.properties()[md5key] = TwistedGETContentMD5.fromString(md5)
- yield populateTxn.commit()
+ yield populateTransaction.commit()
def assertProvides(testCase, interface, provider):
@@ -416,10 +416,10 @@
if self.savedStore is None:
self.savedStore = self.storeUnderTest()
self.counter += 1
- txn = self.lastTransaction = self.savedStore.newTransaction(
+ transaction = self.lastTransaction = self.savedStore.newTransaction(
self.id() + " #" + str(self.counter)
)
- return txn
+ return transaction
def commit(self):
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/util.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/common/datastore/util.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -135,31 +135,31 @@
for homeType, migrateFunc, eachFunc, destFunc, topPathName in [
("calendar", migrateCalendarHome,
self.fileStore.eachCalendarHome,
- lambda txn: txn.calendarHomeWithUID,
+ lambda transaction: transaction.calendarHomeWithUID,
"calendars"),
("addressbook", migrateAddressbookHome,
self.fileStore.eachAddressbookHome,
- lambda txn: txn.addressbookHomeWithUID,
+ lambda transaction: transaction.addressbookHomeWithUID,
"addressbooks")
]:
- for fileTxn, fileHome in eachFunc():
+ for fileTransaction, fileHome in eachFunc():
uid = fileHome.uid()
self.log_warn("Migrating %s UID %r" % (homeType, uid))
- sqlTxn = self.sqlStore.newTransaction(migrating=True)
- homeGetter = destFunc(sqlTxn)
+ sqlTransaction = self.sqlStore.newTransaction(migrating=True)
+ homeGetter = destFunc(sqlTransaction)
if (yield homeGetter(uid, create=False)) is not None:
self.log_warn(
"%s home %r already existed not migrating" % (
homeType, uid))
- yield sqlTxn.abort()
- yield fileTxn.commit()
+ yield sqlTransaction.abort()
+ yield fileTransaction.commit()
continue
sqlHome = yield homeGetter(uid, create=True)
if sqlHome is None:
raise RuntimeError("THIS SHOULD NOT BE POSSIBLE.")
yield migrateFunc(fileHome, sqlHome)
- yield fileTxn.commit()
- yield sqlTxn.commit()
+ yield fileTransaction.commit()
+ yield sqlTransaction.commit()
# FIXME: need a public remove...HomeWithUID() for de-
# provisioning
@@ -260,14 +260,14 @@
self.log_warn("Required schema version: %s." % (required_version,))
# Get the schema version in the current database
- sqlTxn = self.sqlStore.newTransaction()
- dialect = sqlTxn.dialect
+ sqlTransaction = self.sqlStore.newTransaction()
+ dialect = sqlTransaction.dialect
try:
- actual_version = yield sqlTxn.schemaVersion()
- yield sqlTxn.commit()
+ actual_version = yield sqlTransaction.schemaVersion()
+ yield sqlTransaction.commit()
except RuntimeError:
self.log_error("Database schema version cannot be determined.")
- yield sqlTxn.abort()
+ yield sqlTransaction.abort()
raise
self.log_warn("Actual schema version: %s." % (actual_version,))
@@ -305,14 +305,14 @@
upgrades = self.determineUpgradeSequence(fromVersion, toVersion, files, dialect)
# Use one transaction for the entire set of upgrades
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTransaction = self.sqlStore.newTransaction()
try:
for fp in upgrades:
- yield self.applyUpgrade(sqlTxn, fp)
- yield sqlTxn.commit()
+ yield self.applyUpgrade(sqlTransaction, fp)
+ yield sqlTransaction.commit()
except RuntimeError:
self.log_error("Database upgrade failed:" % (fp.basename(),))
- yield sqlTxn.abort()
+ yield sqlTransaction.abort()
raise
self.log_warn("Schema upgraded from version %d to %d." % (fromVersion, toVersion,))
@@ -364,13 +364,13 @@
return upgrades
@inlineCallbacks
- def applyUpgrade(self, sqlTxn, fp):
+ def applyUpgrade(self, sqlTransaction, fp):
"""
Apply the schema upgrade .sql file to the database.
"""
self.log_warn("Applying schema upgrade: %s" % (fp.basename(),))
sql = fp.getContent()
- yield sqlTxn.execSQLBlock(sql)
+ yield sqlTransaction.execSQLBlock(sql)
def startService(self):
"""
Modified: CalendarServer/branches/users/cdaboo/implicituidrace/txdav/idav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/implicituidrace/txdav/idav.py 2011-10-03 14:58:12 UTC (rev 8137)
+++ CalendarServer/branches/users/cdaboo/implicituidrace/txdav/idav.py 2011-10-03 15:27:22 UTC (rev 8138)
@@ -210,6 +210,18 @@
"""
+ def postAbort(operation):
+ """
+ Registers an operation to be executed after the transaction is
+ aborted.
+
+ postAbort can be called multiple times, and operations are executed
+ in the order which they were registered.
+
+ @param operation: a callable.
+ """
+
+
def store():
"""
The store that this transaction was initiated from.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20111003/cc85cfc0/attachment-0001.html>
More information about the calendarserver-changes
mailing list