[CalendarServer-changes] [15102] CalendarServer/branches/release/CalendarServer-5.4-dev
source_changes at macosforge.org
source_changes at macosforge.org
Thu Sep 3 14:05:31 PDT 2015
Revision: 15102
http://trac.calendarserver.org//changeset/15102
Author: cdaboo at apple.com
Date: 2015-09-03 14:05:31 -0700 (Thu, 03 Sep 2015)
Log Message:
-----------
Add warning log messages when a txn stalls due to DB pool being fully utilized. Add warning for lost work check.
Modified Paths:
--------------
CalendarServer/branches/release/CalendarServer-5.4-dev/calendarserver/push/applepush.py
CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/adbapi2.py
CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/queue.py
CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/test/test_queue.py
CalendarServer/branches/release/CalendarServer-5.4-dev/twistedcaldav/directory/directory.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/inbound.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/mailgateway.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql_legacy.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/migrate.py
CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/sql/upgrade.py
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/calendarserver/push/applepush.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/calendarserver/push/applepush.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/calendarserver/push/applepush.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -182,7 +182,7 @@
@type purgeSeconds: C{int}
"""
self.log.debug("ApplePushNotifierService purgeOldSubscriptions")
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="ApplePushNotifierService.purgeOldSubscriptions")
yield txn.purgeOldAPNSubscriptions(int(time.time()) - purgeSeconds)
yield txn.commit()
@@ -720,7 +720,7 @@
self.log.debug("FeedbackProtocol processFeedback time=%d token=%s" %
(timestamp, token))
- txn = self.factory.store.newTransaction()
+ txn = self.factory.store.newTransaction(label="APNFeedbackProtocol.processFeedback")
subscriptions = (yield txn.apnSubscriptionsByToken(token))
for key, modified, _ignore_uid in subscriptions:
@@ -943,7 +943,7 @@
@type key: C{str}
"""
now = int(time.time()) # epoch seconds
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="APNSubscriptionResource.addSubscription")
yield txn.addAPNSubscription(token, key, now, uid, userAgent, host)
yield txn.commit()
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/adbapi2.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/adbapi2.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/adbapi2.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -31,6 +31,7 @@
import sys
import weakref
+import time
from cStringIO import StringIO
from cPickle import dumps, loads
@@ -47,7 +48,6 @@
from twisted.protocols.amp import Argument, String, Command, AMP, Integer
from twisted.internet import reactor as _reactor
from twisted.application.service import Service
-from twisted.python import log
from twisted.internet.defer import maybeDeferred
from twisted.python.components import proxyForInterface
@@ -63,6 +63,8 @@
AlreadyFinishedError, IAsyncTransaction, POSTGRES_DIALECT, ICommandBlock
)
+from twext.python.log import Logger
+log = Logger()
# FIXME: there should be no defaults for connection metadata, it should be
# discovered dynamically everywhere. Right now it's specified as an explicit
@@ -246,11 +248,11 @@
# Report the error before doing anything else, since doing
# other things may cause the traceback stack to be eliminated
# if they raise exceptions (even internally).
- log.err(
- Failure(),
+ log.failure(
"Exception from execute() on first statement in "
"transaction. Possibly caused by a database server "
- "restart. Automatically reconnecting now."
+ "restart. Automatically reconnecting now.",
+ failure=Failure(),
)
try:
self._connection.close()
@@ -267,10 +269,10 @@
# making debugging surprising error conditions very
# difficult, so let's make sure that the error is logged
# just in case.
- log.err(
- Failure(),
+ log.failure(
"Exception from close() while automatically "
- "reconnecting. (Probably not serious.)"
+ "reconnecting. (Probably not serious.)",
+ failure=Failure(),
)
# Now, if either of *these* things fail, there's an error here
@@ -354,7 +356,9 @@
def abort(self):
- return self._end(self._connection.rollback).addErrback(log.err)
+ def _report(f):
+ log.failure("txn abort", failure=f)
+ return self._end(self._connection.rollback).addErrback(_report)
def reset(self):
@@ -1043,12 +1047,32 @@
basetxn = self._free.pop(0)
self._busy.append(basetxn)
txn = _SingleTxn(self, basetxn)
+ log.debug(
+ "ConnectionPool: txn busy '{label}': free={free}, busy={busy}, waiting={waiting}",
+ label=label,
+ free=len(self._free),
+ busy=len(self._busy) + len(self._finishing),
+ waiting=len(self._waiting),
+ )
else:
txn = _SingleTxn(self, _WaitingTxn(self))
self._waiting.append(txn)
+ blocked = self._activeConnectionCount() >= self.maxConnections
+ if blocked:
+ txn._blocked_waiting_time = time.time()
+ txn._blocked_label = label
+ log.warn("ConnectionPool: txn blocked '{label}'", label=label)
+ log.debug(
+ "ConnectionPool: txn waiting add '{label}': free={free}, busy={busy}, waiting={waiting} {blocked}",
+ label=label,
+ free=len(self._free),
+ busy=len(self._busy) + len(self._finishing),
+ waiting=len(self._waiting),
+ blocked="blocked" if blocked else "",
+ )
# FIXME/TESTME: should be len(self._busy) + len(self._finishing)
# (free doesn't need to be considered, as it's tested above)
- if self._activeConnectionCount() < self.maxConnections:
+ if not blocked:
self._startOneMore()
return txn
@@ -1087,7 +1111,7 @@
self._busy.remove(txn)
self._repoolNow(baseTxn)
def maybeTryAgain(f):
- log.err(f, "Re-trying connection due to connection failure")
+ log.failure("Re-trying connection due to connection failure", failure=f)
txn._retry = self.reactor.callLater(self.RETRY_TIMEOUT, resubmit)
def resubmit():
d = holder.submit(initCursor)
@@ -1124,8 +1148,26 @@
waiting = self._waiting.pop(0)
self._busy.append(txn)
waiting._unspoolOnto(txn)
+ if hasattr(waiting, "_blocked_waiting_time"):
+ log.warn(
+ "ConnectionPool: txn unblocked '{label}': delay {delay:.1f}ms",
+ label=waiting._blocked_label,
+ delay=1000 * (time.time() - waiting._blocked_waiting_time),
+ )
+ log.debug(
+ "ConnectionPool: txn waiting remove: free={free}, busy={busy}, waiting={waiting}",
+ free=len(self._free),
+ busy=len(self._busy) + len(self._finishing),
+ waiting=len(self._waiting),
+ )
else:
self._free.append(txn)
+ log.debug(
+ "ConnectionPool: txn free: free={free}, busy={busy}, waiting={waiting}",
+ free=len(self._free),
+ busy=len(self._busy) + len(self._finishing),
+ waiting=len(self._waiting),
+ )
@@ -1213,7 +1255,7 @@
if f.type in command.errors:
returnValue(f)
else:
- log.err(Failure(), "shared database connection pool error")
+ log.failure("shared database connection pool error", failure=f)
raise FailsafeException()
else:
returnValue(val)
@@ -1324,7 +1366,7 @@
def stopReceivingBoxes(self, why):
- log.msg("(S) Stopped receiving boxes: " + why.getTraceback())
+ log.info("(S) Stopped receiving boxes: {tb}", tb=why.getTraceback())
def unhandledError(self, failure):
@@ -1332,7 +1374,7 @@
An unhandled error has occurred. Since we can't really classify errors
well on this protocol, log it and forget it.
"""
- log.err(failure, "Shared connection pool server encountered an error.")
+ log.failure("Shared connection pool server encountered an error.", failure=failure)
@failsafeResponder(StartTxn)
@@ -1438,11 +1480,11 @@
An unhandled error has occurred. Since we can't really classify errors
well on this protocol, log it and forget it.
"""
- log.err(failure, "Shared connection pool client encountered an error.")
+ log.failure("Shared connection pool client encountered an error.", failure=failure)
def stopReceivingBoxes(self, why):
- log.msg("(C) Stopped receiving boxes: " + why.getTraceback())
+ log.info("(C) Stopped receiving boxes: {tb}", tb=why.getTraceback())
def newTransaction(self):
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/queue.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/queue.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/queue.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -92,7 +92,6 @@
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.protocols.amp import AMP, Command, Integer, Argument, String
from twisted.python.reflect import qual
-from twisted.python import log
from twext.enterprise.dal.syntax import SchemaSyntax, Lock, NamedValue
@@ -106,7 +105,10 @@
from zope.interface.interface import Interface
from twext.enterprise.locking import NamedLock
+from twext.python.log import Logger
+log = Logger()
+
class _IWorkPerformer(Interface):
"""
An object that can perform work.
@@ -165,7 +167,7 @@
@inlineCallbacks
-def inTransaction(transactionCreator, operation):
+def inTransaction(transactionCreator, operation, label="<unlabeled>"):
"""
Perform the given operation in a transaction, committing or aborting as
required.
@@ -180,7 +182,7 @@
its error, unless there is an error creating, aborting or committing
the transaction.
"""
- txn = transactionCreator()
+ txn = transactionCreator(label=label)
try:
result = yield operation(txn)
except:
@@ -839,7 +841,7 @@
except NoSuchRecord:
# The record has already been removed
pass
- return inTransaction(txnFactory, work)
+ return inTransaction(txnFactory, work, label="ultimatelyPerform: {} {}".format(table.model.name, workID))
@@ -1278,11 +1280,17 @@
overdueItems = (yield itemType.query(
txn, (itemType.notBefore < tooLate))
)
+ if overdueItems:
+ log.warn(
+ "periodicLostWorkCheck: executing {count} items of {workType}",
+ count=len(overdueItems),
+ workType=itemType.table.model.name,
+ )
for overdueItem in overdueItems:
peer = self.choosePerformer()
yield peer.performWork(overdueItem.table,
overdueItem.workID)
- return inTransaction(self.transactionFactory, workCheck)
+ return inTransaction(self.transactionFactory, workCheck, label="_periodicLostWorkCheck")
_currentWorkDeferred = None
_lostWorkCheckCall = None
@@ -1294,7 +1302,9 @@
those checks in time based on the size of the cluster.
"""
self._lostWorkCheckCall = None
- @passthru(self._periodicLostWorkCheck().addErrback(log.err)
+ def _result(f):
+ log.failure("periodicLostWorkCheck failed", failure=f)
+ @passthru(self._periodicLostWorkCheck().addErrback(_result)
.addCallback)
def scheduleNext(result):
self._currentWorkDeferred = None
@@ -1343,7 +1353,7 @@
for node in nodes:
self._startConnectingTo(node)
- self._startingUp = inTransaction(self.transactionFactory, startup)
+ self._startingUp = inTransaction(self.transactionFactory, startup, label="PeerConnectionPool.startService")
@self._startingUp.addBoth
def done(result):
self._startingUp = None
@@ -1405,8 +1415,10 @@
noted, "identify"
)
def noted(err, x="connect"):
- log.msg("Could not {0} to cluster peer {1} because {2}"
- .format(x, node, str(err.value)))
+ log.info(
+ "Could not {action} to cluster peer {node} because {err}",
+ action=x, node=node, err=str(err.value),
+ )
connected.addCallbacks(whenConnected, noted)
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/test/test_queue.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/test/test_queue.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/twext/enterprise/test/test_queue.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -130,7 +130,7 @@
return self.aborts[-1]
createdTxns = []
- def createTxn():
+ def createTxn(**kwargs):
createdTxns.append(faketxn())
return createdTxns[-1]
dfrs = []
@@ -728,20 +728,21 @@
self.store = yield buildStore(self, None)
def doit(txn):
return txn.execSQL(schemaText)
- yield inTransaction(lambda: self.store.newTransaction("bonus schema"),
- doit)
- def indirectedTransactionFactory(*a):
+ yield inTransaction(self.store.newTransaction,
+ doit,
+ label="bonus schema")
+ def indirectedTransactionFactory(*a, **k):
"""
Allow tests to replace 'self.store.newTransaction' to provide
fixtures with extra methods on a test-by-test basis.
"""
- return self.store.newTransaction(*a)
+ return self.store.newTransaction(*a, **k)
def deschema():
@inlineCallbacks
def deletestuff(txn):
for stmt in dropSQL:
yield txn.execSQL(stmt)
- return inTransaction(lambda *a: self.store.newTransaction(*a),
+ return inTransaction(self.store.newTransaction,
deletestuff)
self.addCleanup(deschema)
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/twistedcaldav/directory/directory.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/twistedcaldav/directory/directory.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -1082,7 +1082,7 @@
@inlineCallbacks
def scheduleNextGroupCachingUpdate(store, seconds):
- txn = store.newTransaction()
+ txn = store.newTransaction(label="scheduleNextGroupCachingUpdate")
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=seconds)
log.debug("Scheduling next group cacher update: %s" % (notBefore,))
wp = (yield txn.enqueue(GroupCacherPollingWork, notBefore=notBefore))
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/inbound.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/inbound.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/inbound.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -168,7 +168,7 @@
@inlineCallbacks
def scheduleNextMailPoll(store, seconds):
- txn = store.newTransaction()
+ txn = store.newTransaction(label="iMIP:scheduleNextMailPoll")
notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=seconds)
log.debug("Scheduling next mail poll: %s" % (notBefore,))
yield txn.enqueue(IMIPPollingWork, notBefore=notBefore)
@@ -251,7 +251,7 @@
log.error("Mail gateway can't find token in DSN %s" % (msgId,))
return
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="iMIP:processDSN")
result = (yield txn.imipLookupByToken(token))
yield txn.commit()
try:
@@ -277,7 +277,7 @@
pass
log.warn("Mail gateway processing DSN %s" % (msgId,))
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="iMIP:enqueue IMIPReplyWork")
yield txn.enqueue(IMIPReplyWork, organizer=organizer, attendee=attendee,
icalendarText=str(calendar))
yield txn.commit()
@@ -300,7 +300,7 @@
"message %s" % (msg['To'], msg['Message-ID']))
returnValue(self.MALFORMED_TO_ADDRESS)
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="iMIP:processReply")
result = (yield txn.imipLookupByToken(token))
yield txn.commit()
try:
@@ -383,7 +383,7 @@
# the appropriate ATTENDEE. This will require a new localizable
# email template for the message.
- txn = self.store.newTransaction()
+ txn = self.store.newTransaction(label="iMIP:enqueue IMIPReplyWork")
yield txn.enqueue(IMIPReplyWork, organizer=organizer, attendee=attendee,
icalendarText=str(calendar))
yield txn.commit()
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/mailgateway.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/mailgateway.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/caldav/datastore/scheduling/imip/mailgateway.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -229,7 +229,7 @@
@type store: L{CommonDataStore}
"""
oldDB = MailGatewayTokensDatabase(path)
- txn = store.newTransaction()
+ txn = store.newTransaction(label="iMIP:migrateTokensToStore")
for token, organizer, attendee, icaluid in oldDB.getAllTokens():
yield txn.imipCreateToken(organizer, attendee, icaluid, token=token)
yield txn.commit()
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -210,7 +210,7 @@
Implementation of L{ICalendarStore.withEachCalendarHomeDo} and
L{IAddressbookStore.withEachAddressbookHomeDo}.
"""
- txn = yield self.newTransaction()
+ txn = yield self.newTransaction(label="_withEachHomeDo")
try:
allUIDs = yield (Select([homeTable.OWNER_UID], From=homeTable)
.on(txn))
@@ -252,7 +252,7 @@
"""
txn = CommonStoreTransaction(
self,
- self.sqlTxnFactory(),
+ self.sqlTxnFactory(label=label),
self.enableCalendars,
self.enableAddressBooks,
self._notifierFactories if self._enableNotifications else {},
@@ -5855,7 +5855,7 @@
Fix all UUIDs in the given SQL store to be in a canonical form;
00000000-0000-0000-0000-000000000000 format and upper-case.
"""
- t = store.newTransaction(disableCache=True)
+ t = store.newTransaction(label="fixUUIDNormalization", disableCache=True)
# First, let's see if there are any calendar, addressbook, or notification
# homes that have a de-normalized OWNER_UID. If there are none, then we can
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql_legacy.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql_legacy.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/sql_legacy.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -485,7 +485,7 @@
# Use a new transaction to do this update quickly without locking the row for too long. However, the original
# transaction may have the row locked, so use wait=False and if that fails, fall back to using the original txn.
- newTxn = obj.transaction().store().newTransaction()
+ newTxn = obj.transaction().store().newTransaction(label="reExpandResource")
try:
yield obj.lock(wait=False, txn=newTxn)
except NoSuchObjectResourceError:
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/migrate.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/migrate.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/migrate.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -217,7 +217,7 @@
Upgrade one calendar home.
"""
_ignore_migrateFunc, destFunc = homeTypeLookup[homeType]
- fileTxn = self.upgrader.fileStore.newTransaction()
+ fileTxn = self.upgrader.fileStore.newTransaction(label="UpgradeHelperProcess:oneUpgrade")
return (
maybeDeferred(destFunc(fileTxn), uid)
.addCallback(
@@ -315,7 +315,7 @@
uid = normalizeUUIDOrNot(fileHome.uid())
self.log.warn("Starting migration transaction %s UID %r" %
(homeType, uid))
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTxn = self.sqlStore.newTransaction(label="UpgradeToDatabaseStep.migrateOneHome")
homeGetter = destFunc(sqlTxn)
sqlHome = yield homeGetter(uid, create=False)
if sqlHome is not None and not self.merge:
@@ -409,7 +409,7 @@
# First force each home to v1 data format so the upgrades will be triggered
self.log.warn("Migration extra steps.")
- txn = self.sqlStore.newTransaction()
+ txn = self.sqlStore.newTransaction(label="UpgradeToDatabaseStep.doDataUpgradeSteps")
for storeType in (ECALENDARTYPE, EADDRESSBOOKTYPE):
schema = txn._homeClass[storeType]._homeSchema
yield Update(
Modified: CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/sql/upgrade.py
===================================================================
--- CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/sql/upgrade.py 2015-09-03 21:00:43 UTC (rev 15101)
+++ CalendarServer/branches/release/CalendarServer-5.4-dev/txdav/common/datastore/upgrade/sql/upgrade.py 2015-09-03 21:05:31 UTC (rev 15102)
@@ -48,7 +48,7 @@
@inlineCallbacks
def stepWithResult(self, result):
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTxn = self.sqlStore.newTransaction(label="UpgradeAcquireLockStep")
yield sqlTxn.acquireUpgradeLock()
yield sqlTxn.commit()
@@ -69,7 +69,7 @@
@inlineCallbacks
def stepWithResult(self, result):
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTxn = self.sqlStore.newTransaction(label="UpgradeReleaseLockStep")
yield sqlTxn.releaseUpgradeLock()
yield sqlTxn.commit()
@@ -170,7 +170,7 @@
self.log.warn("Required database key %s: %s." % (self.versionKey, required_version,))
# Get the schema version in the current database
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTxn = self.sqlStore.newTransaction(label="UpgradeDatabaseCoreStep.getVersions")
dialect = sqlTxn.dialect
try:
actual_version = yield sqlTxn.calendarserverValue(self.versionKey)
@@ -313,7 +313,7 @@
Apply the schema upgrade .sql file to the database.
"""
self.log.warn("Applying schema upgrade: %s" % (fp.basename(),))
- sqlTxn = self.sqlStore.newTransaction()
+ sqlTxn = self.sqlStore.newTransaction(label="UpgradeDatabaseSchemaStep.applyUpgrade")
try:
sql = fp.getContent()
yield sqlTxn.execSQLBlock(sql)
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20150903/108138e3/attachment-0001.html>
More information about the calendarserver-changes
mailing list