[CalendarServer-changes] [11260] CalendarServer/trunk

source_changes at macosforge.org source_changes at macosforge.org
Wed May 29 18:18:40 PDT 2013


Revision: 11260
          http://trac.calendarserver.org//changeset/11260
Author:   wsanchez at apple.com
Date:     2013-05-29 18:18:39 -0700 (Wed, 29 May 2013)
Log Message:
-----------
Get rid of LoggingMixIn.

Modified Paths:
--------------
    CalendarServer/trunk/calendarserver/push/amppush.py
    CalendarServer/trunk/calendarserver/push/applepush.py
    CalendarServer/trunk/calendarserver/push/notifier.py
    CalendarServer/trunk/calendarserver/push/util.py
    CalendarServer/trunk/calendarserver/tap/caldav.py
    CalendarServer/trunk/calendarserver/webcal/resource.py
    CalendarServer/trunk/twext/python/log.py
    CalendarServer/trunk/twext/python/test/test_log.py
    CalendarServer/trunk/twistedcaldav/authkerb.py
    CalendarServer/trunk/twistedcaldav/cache.py
    CalendarServer/trunk/twistedcaldav/client/pool.py
    CalendarServer/trunk/twistedcaldav/client/reverseproxy.py
    CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/trunk/twistedcaldav/directory/cachingdirectory.py
    CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py
    CalendarServer/trunk/twistedcaldav/directory/directory.py
    CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/trunk/twistedcaldav/directory/opendirectorybacker.py
    CalendarServer/trunk/twistedcaldav/directory/resourceinfo.py
    CalendarServer/trunk/twistedcaldav/directory/xmlfile.py
    CalendarServer/trunk/twistedcaldav/extensions.py
    CalendarServer/trunk/twistedcaldav/linkresource.py
    CalendarServer/trunk/twistedcaldav/memcachelock.py
    CalendarServer/trunk/twistedcaldav/memcachepool.py
    CalendarServer/trunk/twistedcaldav/memcacheprops.py
    CalendarServer/trunk/twistedcaldav/memcacher.py
    CalendarServer/trunk/twistedcaldav/method/report_addressbook_query.py
    CalendarServer/trunk/twistedcaldav/notifications.py
    CalendarServer/trunk/twistedcaldav/resource.py
    CalendarServer/trunk/twistedcaldav/scheduling_store/caldav/resource.py
    CalendarServer/trunk/twistedcaldav/sharing.py
    CalendarServer/trunk/twistedcaldav/test/test_cache.py
    CalendarServer/trunk/twistedcaldav/util.py
    CalendarServer/trunk/txdav/base/datastore/file.py
    CalendarServer/trunk/txdav/base/propertystore/base.py
    CalendarServer/trunk/txdav/base/propertystore/sql.py
    CalendarServer/trunk/txdav/base/propertystore/xattr.py
    CalendarServer/trunk/txdav/caldav/datastore/file.py
    CalendarServer/trunk/txdav/caldav/datastore/index_file.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/mailgateway.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/resource.py
    CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py
    CalendarServer/trunk/txdav/caldav/datastore/sql.py
    CalendarServer/trunk/txdav/caldav/resource.py
    CalendarServer/trunk/txdav/carddav/datastore/file.py
    CalendarServer/trunk/txdav/carddav/datastore/index_file.py
    CalendarServer/trunk/txdav/carddav/datastore/sql.py
    CalendarServer/trunk/txdav/carddav/resource.py
    CalendarServer/trunk/txdav/common/datastore/file.py
    CalendarServer/trunk/txdav/common/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql_legacy.py
    CalendarServer/trunk/txdav/common/datastore/upgrade/migrate.py
    CalendarServer/trunk/txdav/common/datastore/upgrade/sql/others/attachment_migration.py
    CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrade.py

Modified: CalendarServer/trunk/calendarserver/push/amppush.py
===================================================================
--- CalendarServer/trunk/calendarserver/push/amppush.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/push/amppush.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -15,7 +15,7 @@
 ##
 
 from calendarserver.push.util import PushScheduler
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.endpoints import TCP4ClientEndpoint
 from twisted.internet.protocol import Factory, ServerFactory
@@ -55,7 +55,8 @@
 
 # Server classes
 
-class AMPPushForwardingFactory(Factory, LoggingMixIn):
+class AMPPushForwardingFactory(Factory):
+    log = Logger()
 
     def __init__(self, forwarder):
         self.forwarder = forwarder
@@ -68,10 +69,12 @@
 
 
 
-class AMPPushForwarder(LoggingMixIn):
+class AMPPushForwarder(object):
     """
     Runs in the slaves, forwards notifications to the master via AMP
     """
+    log = Logger()
+
     def __init__(self, controlSocket):
         self.protocols = []
         controlSocket.addFactory(PUSH_ROUTE, AMPPushForwardingFactory(self))
@@ -87,10 +90,12 @@
 
 
 
-class AMPPushMasterListeningProtocol(amp.AMP, LoggingMixIn):
+class AMPPushMasterListeningProtocol(amp.AMP):
     """
     Listens for notifications coming in over AMP from the slaves
     """
+    log = Logger()
+
     def __init__(self, master):
         super(AMPPushMasterListeningProtocol, self).__init__()
         self.master = master
@@ -105,7 +110,8 @@
 
 
 
-class AMPPushMasterListenerFactory(Factory, LoggingMixIn):
+class AMPPushMasterListenerFactory(Factory):
+    log = Logger()
 
     def __init__(self, master):
         self.master = master
@@ -117,11 +123,12 @@
 
 
 
-class AMPPushMaster(LoggingMixIn):
+class AMPPushMaster(object):
     """
     AMPPushNotifierService allows clients to use AMP to subscribe to,
     and receive, change notifications.
     """
+    log = Logger()
 
     def __init__(self, controlSocket, parentService, port, enableStaggering,
         staggerSeconds, reactor=None):
@@ -150,12 +157,12 @@
 
 
     def addSubscriber(self, p):
-        self.log_debug("Added subscriber")
+        self.log.debug("Added subscriber")
         self.subscribers.append(p)
 
 
     def removeSubscriber(self, p):
-        self.log_debug("Removed subscriber")
+        self.log.debug("Removed subscriber")
         self.subscribers.remove(p)
 
 
@@ -204,7 +211,8 @@
 
 
 
-class AMPPushNotifierProtocol(amp.AMP, LoggingMixIn):
+class AMPPushNotifierProtocol(amp.AMP):
+    log = Logger()
 
     def __init__(self, service):
         super(AMPPushNotifierProtocol, self).__init__()
@@ -231,7 +239,7 @@
 
     def notify(self, token, id, dataChangedTimestamp):
         if self.subscribedToID(id) == token:
-            self.log_debug("Sending notification for %s to %s" % (id, token))
+            self.log.debug("Sending notification for %s to %s" % (id, token))
             return self.callRemote(NotificationForID, id=id,
                 dataChangedTimestamp=dataChangedTimestamp)
 
@@ -247,7 +255,8 @@
 
 
 
-class AMPPushNotifierFactory(ServerFactory, LoggingMixIn):
+class AMPPushNotifierFactory(ServerFactory):
+    log = Logger()
 
     protocol = AMPPushNotifierProtocol
 
@@ -286,7 +295,8 @@
 
 
 
-class AMPPushClientFactory(Factory, LoggingMixIn):
+class AMPPushClientFactory(Factory):
+    log = Logger()
 
     protocol = AMPPushClientProtocol
 

Modified: CalendarServer/trunk/calendarserver/push/applepush.py
===================================================================
--- CalendarServer/trunk/calendarserver/push/applepush.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/push/applepush.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -16,7 +16,7 @@
 ##
 
 from twext.internet.ssl import ChainingOpenSSLContextFactory
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from twext.web2 import responsecode
 from txdav.xml import element as davxml
@@ -46,7 +46,7 @@
 
 
 
-class ApplePushNotifierService(service.MultiService, LoggingMixIn):
+class ApplePushNotifierService(service.MultiService):
     """
     ApplePushNotifierService is a MultiService responsible for
     setting up the APN provider and feedback connections.  Once
@@ -57,6 +57,7 @@
 
     http://developer.apple.com/library/ios/#documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CommunicatingWIthAPS/CommunicatingWIthAPS.html
     """
+    log = Logger()
 
     @classmethod
     def makeService(cls, settings, store, testConnectorClass=None,
@@ -117,7 +118,7 @@
                 )
                 provider.setServiceParent(service)
                 service.providers[protocol] = provider
-                service.log_info("APNS %s topic: %s" %
+                service.log.info("APNS %s topic: %s" %
                     (protocol, settings[protocol]["Topic"]))
 
                 feedback = APNFeedbackService(
@@ -144,7 +145,7 @@
         LoopingCall whose job it is to purge old subscriptions
         """
         service.MultiService.startService(self)
-        self.log_debug("ApplePushNotifierService startService")
+        self.log.debug("ApplePushNotifierService startService")
         self.purgeCall = LoopingCall(self.purgeOldSubscriptions, self.purgeSeconds)
         self.purgeCall.start(self.purgeIntervalSeconds, now=False)
 
@@ -155,7 +156,7 @@
         LoopingCall
         """
         service.MultiService.stopService(self)
-        self.log_debug("ApplePushNotifierService stopService")
+        self.log.debug("ApplePushNotifierService stopService")
         if self.purgeCall is not None:
             self.purgeCall.stop()
             self.purgeCall = None
@@ -169,7 +170,7 @@
         @param purgeSeconds: The cutoff given in seconds
         @type purgeSeconds: C{int}
         """
-        self.log_debug("ApplePushNotifierService purgeOldSubscriptions")
+        self.log.debug("ApplePushNotifierService purgeOldSubscriptions")
         txn = self.store.newTransaction()
         yield txn.purgeOldAPNSubscriptions(int(time.time()) - purgeSeconds)
         yield txn.commit()
@@ -196,7 +197,7 @@
             protocol = pushKey.split("/")[1]
         except ValueError:
             # pushKey has no protocol, so we can't do anything with it
-            self.log_error("Push key '%s' is missing protocol" % (pushKey,))
+            self.log.error("Push key '%s' is missing protocol" % (pushKey,))
             return
 
         # Unit tests can pass this value in; otherwise it defaults to now
@@ -211,7 +212,7 @@
 
             numSubscriptions = len(subscriptions)
             if numSubscriptions > 0:
-                self.log_debug("Sending %d APNS notifications for %s" %
+                self.log.debug("Sending %d APNS notifications for %s" %
                     (numSubscriptions, pushKey))
                 tokens = []
                 for token, uid in subscriptions:
@@ -222,10 +223,11 @@
 
 
 
-class APNProviderProtocol(protocol.Protocol, LoggingMixIn):
+class APNProviderProtocol(protocol.Protocol):
     """
     Implements the Provider portion of APNS
     """
+    log = Logger()
 
     # Sent by provider
     COMMAND_SIMPLE = 0
@@ -256,12 +258,12 @@
 
     def makeConnection(self, transport):
         self.history = TokenHistory()
-        self.log_debug("ProviderProtocol makeConnection")
+        self.log.debug("ProviderProtocol makeConnection")
         protocol.Protocol.makeConnection(self, transport)
 
 
     def connectionMade(self):
-        self.log_debug("ProviderProtocol connectionMade")
+        self.log.debug("ProviderProtocol connectionMade")
         self.buffer = ""
         # Store a reference to ourself on the factory so the service can
         # later call us
@@ -270,7 +272,7 @@
 
 
     def connectionLost(self, reason=None):
-        # self.log_debug("ProviderProtocol connectionLost: %s" % (reason,))
+        # self.log.debug("ProviderProtocol connectionLost: %s" % (reason,))
         # Clear the reference to us from the factory
         self.factory.connection = None
 
@@ -285,7 +287,7 @@
         if fn is None:
             fn = self.processError
 
-        self.log_debug("ProviderProtocol dataReceived %d bytes" % (len(data),))
+        self.log.debug("ProviderProtocol dataReceived %d bytes" % (len(data),))
         self.buffer += data
 
         while len(self.buffer) >= self.MESSAGE_LENGTH:
@@ -297,7 +299,7 @@
                 if command == self.COMMAND_ERROR:
                     yield fn(status, identifier)
             except Exception, e:
-                self.log_warn("ProviderProtocol could not process error: %s (%s)" %
+                self.log.warn("ProviderProtocol could not process error: %s (%s)" %
                     (message.encode("hex"), e))
 
 
@@ -316,16 +318,16 @@
         @type status: C{int}
         """
         msg = self.STATUS_CODES.get(status, "Unknown status code")
-        self.log_info("Received APN error %d on identifier %d: %s" % (status, identifier, msg))
+        self.log.info("Received APN error %d on identifier %d: %s" % (status, identifier, msg))
         if status in self.TOKEN_REMOVAL_CODES:
             token = self.history.extractIdentifier(identifier)
             if token is not None:
-                self.log_debug("Removing subscriptions for bad token: %s" %
+                self.log.debug("Removing subscriptions for bad token: %s" %
                     (token,))
                 txn = self.factory.store.newTransaction()
                 subscriptions = (yield txn.apnSubscriptionsByToken(token))
                 for key, _ignore_modified, _ignore_uid in subscriptions:
-                    self.log_debug("Removing subscription: %s %s" %
+                    self.log.debug("Removing subscription: %s %s" %
                         (token, key))
                     yield txn.removeAPNSubscription(token, key)
                 yield txn.commit()
@@ -351,7 +353,7 @@
         try:
             binaryToken = token.replace(" ", "").decode("hex")
         except:
-            self.log_error("Invalid APN token in database: %s" % (token,))
+            self.log.error("Invalid APN token in database: %s" % (token,))
             return
 
         identifier = self.history.add(token)
@@ -363,7 +365,7 @@
             }
         )
         payloadLength = len(payload)
-        self.log_debug("Sending APNS notification to %s: id=%d payload=%s" %
+        self.log.debug("Sending APNS notification to %s: id=%d payload=%s" %
             (token, identifier, payload))
 
         self.transport.write(
@@ -380,7 +382,8 @@
 
 
 
-class APNProviderFactory(ReconnectingClientFactory, LoggingMixIn):
+class APNProviderFactory(ReconnectingClientFactory):
+    log = Logger()
 
     protocol = APNProviderProtocol
 
@@ -393,26 +396,26 @@
 
 
     def clientConnectionMade(self):
-        self.log_info("Connection to APN server made")
+        self.log.info("Connection to APN server made")
         self.service.clientConnectionMade()
         self.delay = 1.0
 
 
     def clientConnectionLost(self, connector, reason):
         if not self.shuttingDown:
-            self.log_info("Connection to APN server lost: %s" % (reason,))
+            self.log.info("Connection to APN server lost: %s" % (reason,))
         ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
 
 
     def clientConnectionFailed(self, connector, reason):
-        self.log_error("Unable to connect to APN server: %s" % (reason,))
+        self.log.error("Unable to connect to APN server: %s" % (reason,))
         self.connected = False
         ReconnectingClientFactory.clientConnectionFailed(self, connector,
             reason)
 
 
     def retry(self, connector=None):
-        self.log_info("Reconnecting to APN server")
+        self.log.info("Reconnecting to APN server")
         ReconnectingClientFactory.retry(self, connector)
 
 
@@ -422,7 +425,8 @@
 
 
 
-class APNConnectionService(service.Service, LoggingMixIn):
+class APNConnectionService(service.Service):
+    log = Logger()
 
     def __init__(self, host, port, certPath, keyPath, chainPath="",
         passphrase="", sslMethod="TLSv1_METHOD", testConnector=None,
@@ -485,13 +489,13 @@
 
 
     def startService(self):
-        self.log_debug("APNProviderService startService")
+        self.log.debug("APNProviderService startService")
         self.factory = APNProviderFactory(self, self.store)
         self.connect(self.factory)
 
 
     def stopService(self):
-        self.log_debug("APNProviderService stopService")
+        self.log.debug("APNProviderService stopService")
         if self.factory is not None:
             self.factory.stopTrying()
         if self.scheduler is not None:
@@ -555,10 +559,10 @@
             tokenKeyPair = (token, key)
             for existingPair, ignored in self.queue:
                 if tokenKeyPair == existingPair:
-                    self.log_debug("APNProviderService has no connection; skipping duplicate: %s %s" % (token, key))
+                    self.log.debug("APNProviderService has no connection; skipping duplicate: %s %s" % (token, key))
                     break # Already scheduled
             else:
-                self.log_debug("APNProviderService has no connection; queuing: %s %s" % (token, key))
+                self.log.debug("APNProviderService has no connection; queuing: %s %s" % (token, key))
                 self.queue.append(((token, key), dataChangedTimestamp))
 
 
@@ -587,15 +591,16 @@
 
 
 
-class APNFeedbackProtocol(protocol.Protocol, LoggingMixIn):
+class APNFeedbackProtocol(protocol.Protocol):
     """
     Implements the Feedback portion of APNS
     """
+    log = Logger()
 
     MESSAGE_LENGTH = 38
 
     def connectionMade(self):
-        self.log_debug("FeedbackProtocol connectionMade")
+        self.log.debug("FeedbackProtocol connectionMade")
         self.buffer = ""
 
 
@@ -609,7 +614,7 @@
         if fn is None:
             fn = self.processFeedback
 
-        self.log_debug("FeedbackProtocol dataReceived %d bytes" % (len(data),))
+        self.log.debug("FeedbackProtocol dataReceived %d bytes" % (len(data),))
         self.buffer += data
 
         while len(self.buffer) >= self.MESSAGE_LENGTH:
@@ -622,7 +627,7 @@
                 token = binaryToken.encode("hex").lower()
                 yield fn(timestamp, token)
             except Exception, e:
-                self.log_warn("FeedbackProtocol could not process message: %s (%s)" %
+                self.log.warn("FeedbackProtocol could not process message: %s (%s)" %
                     (message.encode("hex"), e))
 
 
@@ -641,21 +646,22 @@
         @type token: C{str}
         """
 
-        self.log_debug("FeedbackProtocol processFeedback time=%d token=%s" %
+        self.log.debug("FeedbackProtocol processFeedback time=%d token=%s" %
             (timestamp, token))
         txn = self.factory.store.newTransaction()
         subscriptions = (yield txn.apnSubscriptionsByToken(token))
 
         for key, modified, _ignore_uid in subscriptions:
             if timestamp > modified:
-                self.log_debug("FeedbackProtocol removing subscription: %s %s" %
+                self.log.debug("FeedbackProtocol removing subscription: %s %s" %
                     (token, key))
                 yield txn.removeAPNSubscription(token, key)
         yield txn.commit()
 
 
 
-class APNFeedbackFactory(ClientFactory, LoggingMixIn):
+class APNFeedbackFactory(ClientFactory):
+    log = Logger()
 
     protocol = APNFeedbackProtocol
 
@@ -664,7 +670,7 @@
 
 
     def clientConnectionFailed(self, connector, reason):
-        self.log_error("Unable to connect to APN feedback server: %s" %
+        self.log.error("Unable to connect to APN feedback server: %s" %
             (reason,))
         self.connected = False
         ClientFactory.clientConnectionFailed(self, connector, reason)
@@ -686,20 +692,20 @@
 
 
     def startService(self):
-        self.log_debug("APNFeedbackService startService")
+        self.log.debug("APNFeedbackService startService")
         self.factory = APNFeedbackFactory(self.store)
         self.checkForFeedback()
 
 
     def stopService(self):
-        self.log_debug("APNFeedbackService stopService")
+        self.log.debug("APNFeedbackService stopService")
         if self.nextCheck is not None:
             self.nextCheck.cancel()
 
 
     def checkForFeedback(self):
         self.nextCheck = None
-        self.log_debug("APNFeedbackService checkForFeedback")
+        self.log.debug("APNFeedbackService checkForFeedback")
         self.connect(self.factory)
         self.nextCheck = self.reactor.callLater(self.updateSeconds,
             self.checkForFeedback)
@@ -707,7 +713,7 @@
 
 
 class APNSubscriptionResource(ReadOnlyNoCopyResourceMixIn,
-    DAVResourceWithoutChildrenMixin, DAVResource, LoggingMixIn):
+    DAVResourceWithoutChildrenMixin, DAVResource):
     """
     The DAV resource allowing clients to subscribe to Apple push notifications.
     To subscribe, a client should first determine the key they are interested
@@ -716,6 +722,7 @@
     request to this resource, passing their device token and the key in either
     the URL params or in the POST body.
     """
+    log = Logger()
 
     def __init__(self, parent, store):
         DAVResource.__init__(

Modified: CalendarServer/trunk/calendarserver/push/notifier.py
===================================================================
--- CalendarServer/trunk/calendarserver/push/notifier.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/push/notifier.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -21,7 +21,7 @@
 from twext.enterprise.dal.record import fromTable
 from twext.enterprise.dal.syntax import Delete
 from twext.enterprise.queue import WorkItem
-from twext.python.log import LoggingMixIn, Logger
+from twext.python.log import Logger
 
 from twisted.internet.defer import inlineCallbacks
 
@@ -58,11 +58,12 @@
 # Classes used within calendarserver itself
 #
 
-class Notifier(LoggingMixIn):
+class Notifier(object):
     """
     Provides a hook for sending change notifications to the
     L{NotifierFactory}.
     """
+    log = Logger()
 
     implements(IStoreNotifier)
 
@@ -73,12 +74,12 @@
 
 
     def enableNotify(self, arg):
-        self.log_debug("enableNotify: %s" % (self._ids['default'][1],))
+        self.log.debug("enableNotify: %s" % (self._ids['default'][1],))
         self._notify = True
 
 
     def disableNotify(self):
-        self.log_debug("disableNotify: %s" % (self._ids['default'][1],))
+        self.log.debug("disableNotify: %s" % (self._ids['default'][1],))
         self._notify = False
 
 
@@ -98,10 +99,10 @@
 
         for prefix, id in ids:
             if self._notify:
-                self.log_debug("Notifications are enabled: %s %s/%s" % (self._storeObject, prefix, id,))
+                self.log.debug("Notifications are enabled: %s %s/%s" % (self._storeObject, prefix, id,))
                 yield self._notifierFactory.send(prefix, id)
             else:
-                self.log_debug("Skipping notification for: %s %s/%s" % (self._storeObject, prefix, id,))
+                self.log.debug("Skipping notification for: %s %s/%s" % (self._storeObject, prefix, id,))
 
 
     def clone(self, storeObject):
@@ -124,13 +125,14 @@
 
 
 
-class NotifierFactory(LoggingMixIn):
+class NotifierFactory(object):
     """
     Notifier Factory
 
     Creates Notifier instances and forwards notifications from them to the
     work queue.
     """
+    log = Logger()
 
     implements(IStoreNotifierFactory)
 

Modified: CalendarServer/trunk/calendarserver/push/util.py
===================================================================
--- CalendarServer/trunk/calendarserver/push/util.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/push/util.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -15,7 +15,7 @@
 ##
 
 from OpenSSL import crypto
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 def getAPNTopicFromCertificate(certPath):
     """
@@ -108,10 +108,11 @@
 
 
 
-class PushScheduler(LoggingMixIn):
+class PushScheduler(object):
     """
     Allows staggered scheduling of push notifications
     """
+    log = Logger()
 
     def __init__(self, reactor, callback, staggerSeconds=1):
         """
@@ -146,12 +147,12 @@
         for token in tokens:
             internalKey = (token, key)
             if internalKey in self.outstanding:
-                self.log_debug("PushScheduler already has this scheduled: %s" %
+                self.log.debug("PushScheduler already has this scheduled: %s" %
                     (internalKey,))
             else:
                 self.outstanding[internalKey] = self.reactor.callLater(
                     scheduleTime, self.send, token, key, dataChangedTimestamp)
-                self.log_debug("PushScheduler scheduled: %s in %.0f sec" %
+                self.log.debug("PushScheduler scheduled: %s in %.0f sec" %
                     (internalKey, scheduleTime))
                 scheduleTime += self.staggerSeconds
 
@@ -170,7 +171,7 @@
             which triggered this notification
         @type key: C{int}
         """
-        self.log_debug("PushScheduler fired for %s %s %d" % (token, key, dataChangedTimestamp))
+        self.log.debug("PushScheduler fired for %s %s %d" % (token, key, dataChangedTimestamp))
         del self.outstanding[(token, key)]
         return self.callback(token, key, dataChangedTimestamp)
 
@@ -180,5 +181,5 @@
         Cancel all outstanding delayed calls
         """
         for (token, key), delayed in self.outstanding.iteritems():
-            self.log_debug("PushScheduler cancelling %s %s" % (token, key))
+            self.log.debug("PushScheduler cancelling %s %s" % (token, key))
             delayed.cancel()

Modified: CalendarServer/trunk/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/trunk/calendarserver/tap/caldav.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/tap/caldav.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -56,7 +56,7 @@
 from twistedcaldav.config import config, ConfigurationError
 from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
 from twext.web2.server import Site
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twext.python.log import LogLevel, logLevelForNamespace, setLogLevelForNamespace
 from twext.python.filepath import CachingFilePath
 from twext.internet.ssl import ChainingOpenSSLContextFactory
@@ -281,7 +281,9 @@
 
 
 
-class CalDAVOptions (Options, LoggingMixIn):
+class CalDAVOptions (Options):
+    log = Logger()
+
     optParameters = [[
         "config", "f", DEFAULT_CONFIG_FILE, "Path to configuration file."
     ]]
@@ -430,7 +432,7 @@
         # Check current umask and warn if changed
         oldmask = os.umask(config.umask)
         if oldmask != config.umask:
-            self.log_info("WARNING: changing umask from: 0%03o to 0%03o"
+            self.log.info("WARNING: changing umask from: 0%03o to 0%03o"
                           % (oldmask, config.umask))
         self.parent['umask'] = config.umask
 
@@ -496,10 +498,11 @@
 
 
 
-class WorkSchedulingService(Service, LoggingMixIn):
+class WorkSchedulingService(Service):
     """
     A Service to kick off the initial scheduling of periodic work items.
     """
+    log = Logger()
 
     def __init__(self, store, doImip, doGroupCaching):
         """
@@ -527,10 +530,11 @@
 
 
 
-class ReExecService(MultiService, LoggingMixIn):
+class ReExecService(MultiService):
     """
     A MultiService which catches SIGHUP and re-exec's the process.
     """
+    log = Logger()
 
     def __init__(self, pidfilePath, reactor=None):
         """
@@ -550,9 +554,9 @@
         Removes pidfile, registers an exec to happen after shutdown, then
         stops the reactor.
         """
-        self.log_info("SIGHUP received - restarting")
+        self.log.info("SIGHUP received - restarting")
         try:
-            self.log_info("Removing pidfile: %s" % (self.pidfilePath,))
+            self.log.info("Removing pidfile: %s" % (self.pidfilePath,))
             os.remove(self.pidfilePath)
         except OSError:
             pass
@@ -706,7 +710,9 @@
 
 
 
-class CalDAVServiceMaker (LoggingMixIn):
+class CalDAVServiceMaker (object):
+    log = Logger()
+
     implements(IPlugin, IServiceMaker)
 
     tapname = "caldav"
@@ -718,7 +724,7 @@
         """
         Create the top-level service.
         """
-        self.log_info("%s %s starting %s process..." % (self.description, version, config.ProcessType))
+        self.log.info("%s %s starting %s process..." % (self.description, version, config.ProcessType))
 
         try:
             from setproctitle import setproctitle
@@ -810,12 +816,12 @@
 
         if config.ControlSocket:
             id = config.ControlSocket
-            self.log_info("Control via AF_UNIX: %s" % (id,))
+            self.log.info("Control via AF_UNIX: %s" % (id,))
             endpointFactory = lambda reactor: UNIXClientEndpoint(
                 reactor, id)
         else:
             id = int(config.ControlPort)
-            self.log_info("Control via AF_INET: %d" % (id,))
+            self.log.info("Control via AF_INET: %d" % (id,))
             endpointFactory = lambda reactor: TCP4ClientEndpoint(
                 reactor, "127.0.0.1", id)
         controlSocketClient = ControlSocket()
@@ -929,9 +935,9 @@
         #
         # Configure the service
         #
-        self.log_info("Setting up service")
+        self.log.info("Setting up service")
 
-        self.log_info("Configuring access log observer: %s" % (logObserver,))
+        self.log.info("Configuring access log observer: %s" % (logObserver,))
         service = CalDAVService(logObserver)
 
         rootResource = getRootResource(config, store, additional)
@@ -948,7 +954,7 @@
         requestFactory = underlyingSite
 
         if config.RedirectHTTPToHTTPS:
-            self.log_info("Redirecting to HTTPS port %s" % (config.SSLPort,))
+            self.log.info("Redirecting to HTTPS port %s" % (config.SSLPort,))
             def requestFactory(*args, **kw):
                 return SSLRedirectRequest(site=underlyingSite, *args, **kw)
 
@@ -1028,7 +1034,7 @@
             try:
                 contextFactory = self.createContextFactory()
             except SSLError, e:
-                self.log_error("Unable to set up SSL context factory: %s" % (e,))
+                self.log.error("Unable to set up SSL context factory: %s" % (e,))
                 # None is okay as a context factory for ReportingHTTPService as
                 # long as we will never receive a file descriptor with the
                 # 'SSL' tag on it, since that's the only time it's used.
@@ -1043,15 +1049,15 @@
                 self._validatePortConfig()
                 if config.EnableSSL:
                     for port in config.BindSSLPorts:
-                        self.log_info("Adding SSL server at %s:%s"
+                        self.log.info("Adding SSL server at %s:%s"
                                       % (bindAddress, port))
 
                         try:
                             contextFactory = self.createContextFactory()
                         except SSLError, e:
-                            self.log_error("Unable to set up SSL context factory: %s"
+                            self.log.error("Unable to set up SSL context factory: %s"
                                            % (e,))
-                            self.log_error("Disabling SSL port: %s" % (port,))
+                            self.log.error("Disabling SSL port: %s" % (port,))
                         else:
                             httpsService = MaxAcceptSSLServer(
                                 int(port), httpFactory,
@@ -1199,7 +1205,7 @@
         # directly using Popen to spawn memcached.
         for name, pool in config.Memcached.Pools.items():
             if pool.ServerEnabled:
-                self.log_info(
+                self.log.info(
                     "Adding memcached service for pool: %s" % (name,)
                 )
                 memcachedArgv = [
@@ -1449,7 +1455,7 @@
 
         for name, pool in config.Memcached.Pools.items():
             if pool.ServerEnabled:
-                self.log_info(
+                self.log.info(
                     "Adding memcached service for pool: %s" % (name,)
                 )
                 memcachedArgv = [
@@ -1571,7 +1577,7 @@
             # connection pool implementation that can dispense transactions
             # synchronously as the interface requires.
             if pool is not None and config.SharedConnectionPool:
-                self.log_warn("Using Shared Connection Pool")
+                self.log.warn("Using Shared Connection Pool")
                 dispenser = ConnectionDispenser(pool)
             else:
                 dispenser = None
@@ -1633,7 +1639,7 @@
             if (os.path.exists(checkSocket)):
                 # See if the file represents a socket.  If not, delete it.
                 if (not stat.S_ISSOCK(os.stat(checkSocket).st_mode)):
-                    self.log_warn("Deleting stale socket file (not a socket): %s" % checkSocket)
+                    self.log.warn("Deleting stale socket file (not a socket): %s" % checkSocket)
                     os.remove(checkSocket)
                 else:
                     # It looks like a socket.  See if it's accepting connections.
@@ -1649,7 +1655,7 @@
                     # If the file didn't connect on any expected ports,
                     # consider it stale and remove it.
                     if numConnectFailures == len(testPorts):
-                        self.log_warn("Deleting stale socket file (not accepting connections): %s" % checkSocket)
+                        self.log.warn("Deleting stale socket file (not accepting connections): %s" % checkSocket)
                         os.remove(checkSocket)
 
 

Modified: CalendarServer/trunk/calendarserver/webcal/resource.py
===================================================================
--- CalendarServer/trunk/calendarserver/webcal/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/calendarserver/webcal/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -155,7 +155,7 @@
         tzid = queryValue("tzid")
         if not tzid:
             tzid = getLocalTimezone()
-            self.log_debug("Determined timezone to be %s" % (tzid,))
+            self.log.debug("Determined timezone to be %s" % (tzid,))
 
         #
         # Make some HTML
@@ -166,7 +166,7 @@
                 "principalURL": authenticatedPrincipalURL,
             }
         except IOError, e:
-            self.log_error("Unable to obtain WebCalendar template: %s" % (e,))
+            self.log.error("Unable to obtain WebCalendar template: %s" % (e,))
             return responsecode.NOT_FOUND
 
         response = Response()

Modified: CalendarServer/trunk/twext/python/log.py
===================================================================
--- CalendarServer/trunk/twext/python/log.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twext/python/log.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -28,18 +28,19 @@
 
 Or in a class:
 
-    from twext.python.log import LoggingMixIn
+    from twext.python.log import Logger
 
-    class Foo(LoggingMixIn):
+    class Foo(object):
+        log = Logger()
+
         def oops(self):
-            self.log_error("Oops!")
+            self.log.error("Oops!")
 
 C{Logger}s have namespaces, for which logging can be configured independently.
 Namespaces may be specified by passing in a C{namespace} argument to L{Logger}
 when instantiating it, but if none is given, the logger will derive its own
 namespace by using the module name of the callable that instantiated it, or, in
-the case of a class using L{LoggingMixIn}, by using the fully qualified name of
-the class.
+the case of a class, by using the fully qualified name of the class.
 
 In the first example above, the namespace would be C{some.module}, and in the
 second example, it would be C{some.module.Foo}.
@@ -50,10 +51,8 @@
 #
 # * TwistedCompatibleLogger.err is setting isError=0 until we fix our callers
 #
-# * Get rid of LoggingMixIn
+# * Replace message argument with format argument
 #
-# * Replace method argument with format argument
-#
 
 __all__ = [
     "LogLevel",
@@ -61,7 +60,6 @@
     "setLogLevelForNamespace",
     "clearLogLevels",
     "Logger",
-    "LoggingMixIn",
     "InvalidLogLevelError",
     "StandardIOObserver",
 ]
@@ -80,6 +78,9 @@
 
 
 class LogLevel(Names):
+    """
+    Constants denoting log levels.
+    """
     debug = NamedConstant()
     info  = NamedConstant()
     warn  = NamedConstant()
@@ -93,7 +94,6 @@
             raise InvalidLogLevelError(name)
 
 
-
 #
 # Mappings to Python's logging module
 #
@@ -320,31 +320,6 @@
 
 
 
-class LoggingMixIn(object):
-    """
-    Mix-in class for logging methods.
-    """
-    def _getLogger(self):
-        try:
-            return self._logger
-        except AttributeError:
-            self._logger = Logger(
-                "%s.%s" % (
-                    self.__class__.__module__,
-                    self.__class__.__name__,
-                )
-            )
-
-        return self._logger
-
-
-    def _setLogger(self, value):
-        self._logger = value
-
-    logger = property(_getLogger, _setLogger)
-
-
-
 def bindEmit(level):
     doc = """
     Emit a log message at log level C{%s}.
@@ -367,24 +342,6 @@
     setattr(Logger, level.name, log_emit)
     setattr(Logger, level.name + "_enabled", property(will_emit))
 
-    #
-    # Attach methods to LoggingMixIn
-    #
-    def log_emit(self, message=None, raiseException=None, **kwargs):
-        self.logger.emit(level, message, **kwargs)
-        if raiseException:
-            raise raiseException(message)
-
-    def will_emit(self=log_emit):
-        return self.logger.willLogAtLevel(level)
-
-    log_emit.__doc__ = doc
-    log_emit.enabled = will_emit
-
-    setattr(LoggingMixIn, "log_" + level.name, log_emit)
-    setattr(LoggingMixIn, "log_" + level.name + "_enabled", property(will_emit))
-
-
 for level in LogLevel.iterconstants(): 
     bindEmit(level)
 del level

Modified: CalendarServer/trunk/twext/python/test/test_log.py
===================================================================
--- CalendarServer/trunk/twext/python/test/test_log.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twext/python/test/test_log.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -21,7 +21,7 @@
 from twext.python.log import logLevelsByNamespace, logLevelForNamespace
 from twext.python.log import LogLevel, setLogLevelForNamespace, clearLogLevels
 from twext.python.log import pythonLogLevelMapping
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from twistedcaldav.test.util import TestCase
 
@@ -65,11 +65,6 @@
 
 
 
-class LoggingEnabledObject(LoggingMixIn):
-    pass
-
-
-
 class Logging(TestCase):
     def setUp(self):
         super(Logging, self).setUp()
@@ -89,14 +84,6 @@
         self.assertEquals(log.namespace, __name__)
 
 
-    def test_namespace_mixin(self):
-        """
-        Default namespace for classes using L{LoggingMixIn} is the class name.
-        """
-        object = LoggingEnabledObject()
-        self.assertEquals(object.logger.namespace, "twext.python.test.test_log.LoggingEnabledObject")
-
-
     def test_namespace_attribute(self):
         """
         Default namespace for classes using L{Logger} as a descriptor is the
@@ -155,39 +142,6 @@
                 self.assertFalse(hasattr(log, "eventDict"))
 
 
-    def test_basic_LoggingMixIn(self):
-        """
-        Test that log levels and messages are emitted correctly for
-        LoggingMixIn.
-        """
-        for level in LogLevel.iterconstants():
-            message = "This is a %s message" % (level.name,)
-
-            object = LoggingEnabledObject()
-            object.logger = TestLogger()
-
-            method = getattr(object, "log_" + level.name)
-            method(message, junk=message)
-
-            # Ensure that test_emit got called with expected arguments
-            self.assertEquals(object.logger.emitted["level"], level)
-            self.assertEquals(object.logger.emitted["message"], message)
-            self.assertEquals(object.logger.emitted["kwargs"]["junk"], message)
-
-            if object.logger.willLogAtLevel(level):
-                self.assertEquals(object.logger.eventDict["level"], level)
-                self.assertEquals(object.logger.eventDict["logLevel"], pythonLogLevelMapping[level])
-                self.assertEquals(object.logger.eventDict["junk"], message)
-
-                # FIXME: this checks the end of message because we do formatting in emit()
-                self.assertEquals(
-                    twistedLogging.textFromEventDict(object.logger.eventDict)[-len(message):],
-                    message
-                )
-            else:
-                self.assertFalse(hasattr(object.logger, "eventDict"))
-
-
     def test_conflicting_kwargs(self):
         """
         Make sure that conflicting kwargs don't pass through.
@@ -262,18 +216,3 @@
                 self.assertTrue(log.willLogAtLevel(level))
             else:
                 self.assertFalse(log.willLogAtLevel(level))
-
-
-    def test_logMethodTruthiness_LoggingMixIn(self):
-        """
-        LoggingMixIn's log level functions/methods have true/false
-        value based on whether they will log.
-        """
-        object = LoggingEnabledObject()
-
-        for level in LogLevel.iterconstants():
-            enabled = getattr(object, "log_" + level.name + "_enabled")
-            if enabled:
-                self.assertTrue(object.logger.willLogAtLevel(level))
-            else:
-                self.assertFalse(object.logger.willLogAtLevel(level))

Modified: CalendarServer/trunk/twistedcaldav/authkerb.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/authkerb.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/authkerb.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -46,12 +46,13 @@
 from twext.web2.auth.interfaces import ICredentialFactory
 from twext.web2.dav.auth import IPrincipalCredentials
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
-class KerberosCredentialFactoryBase(LoggingMixIn):
+class KerberosCredentialFactoryBase(object):
     """
     Code common to Kerberos-based credential factories.
     """
+    log = Logger()
 
     implements(ICredentialFactory)
 
@@ -76,7 +77,7 @@
             try:
                 principal = kerberos.getServerPrincipalDetails(type, hostname)
             except kerberos.KrbError, ex:
-                self.log_error("getServerPrincipalDetails: %s" % (ex[0],))
+                self.log.error("getServerPrincipalDetails: %s" % (ex[0],))
                 raise ValueError('Authentication System Failure: %s' % (ex[0],))
 
         self.service, self.realm = self._splitPrincipal(principal)
@@ -90,7 +91,7 @@
             service = splits[0].upper()
             realm = splits[1]
         except IndexError:
-            self.log_error("Invalid Kerberos principal: %s" % (principal,))
+            self.log.error("Invalid Kerberos principal: %s" % (principal,))
             raise ValueError('Authentication System Failure: Invalid Kerberos principal: %s' % (principal,))
                 
         service = "%s@%s" % (servicetype, service,)
@@ -160,7 +161,8 @@
             return succeed(c)
         raise error.LoginFailed('Invalid credentials')
 
-class BasicKerberosCredentialsChecker(LoggingMixIn):
+class BasicKerberosCredentialsChecker(object):
+    log = Logger()
 
     implements(checkers.ICredentialsChecker)
 
@@ -176,7 +178,7 @@
             try:
                 kerberos.checkPassword(creds.username, creds.password, creds.service, creds.default_realm)
             except kerberos.BasicAuthError, ex:
-                self.log_error("%s" % (ex[0],))
+                self.log.error("%s" % (ex[0],))
                 raise error.UnauthorizedLogin("Bad credentials for: %s (%s: %s)" % (pcreds.authnURI, ex[0], ex[1],))
             else:
                 return succeed((
@@ -231,18 +233,18 @@
         try:
             _ignore_result, context = kerberos.authGSSServerInit("");
         except kerberos.GSSError, ex:
-            self.log_error("authGSSServerInit: %s(%s)" % (ex[0][0], ex[1][0],))
+            self.log.error("authGSSServerInit: %s(%s)" % (ex[0][0], ex[1][0],))
             raise error.LoginFailed('Authentication System Failure: %s(%s)' % (ex[0][0], ex[1][0],))
 
         # Do the GSSAPI step and get response and username
         try:
             kerberos.authGSSServerStep(context, base64data);
         except kerberos.GSSError, ex:
-            self.log_error("authGSSServerStep: %s(%s)" % (ex[0][0], ex[1][0],))
+            self.log.error("authGSSServerStep: %s(%s)" % (ex[0][0], ex[1][0],))
             kerberos.authGSSServerClean(context)
             raise error.UnauthorizedLogin('Bad credentials: %s(%s)' % (ex[0][0], ex[1][0],))
         except kerberos.KrbError, ex:
-            self.log_error("authGSSServerStep: %s" % (ex[0],))
+            self.log.error("authGSSServerStep: %s" % (ex[0],))
             kerberos.authGSSServerClean(context)
             raise error.UnauthorizedLogin('Bad credentials: %s' % (ex[0],))
 
@@ -250,11 +252,11 @@
         try:
             service, _ignore_realm = self._splitPrincipal(targetname)
         except ValueError:
-            self.log_error("authGSSServerTargetName invalid target name: '%s'" % (targetname,))
+            self.log.error("authGSSServerTargetName invalid target name: '%s'" % (targetname,))
             kerberos.authGSSServerClean(context)
             raise error.UnauthorizedLogin('Bad credentials: bad target name %s' % (targetname,))
         if service.lower() != self.service.lower():
-            self.log_error("authGSSServerTargetName mismatch got: '%s' wanted: '%s'" % (service, self.service))
+            self.log.error("authGSSServerTargetName mismatch got: '%s' wanted: '%s'" % (service, self.service))
             kerberos.authGSSServerClean(context)
             raise error.UnauthorizedLogin('Bad credentials: wrong target name %s' % (targetname,))
 
@@ -278,7 +280,7 @@
         try:
             kerberos.authGSSServerClean(context);
         except kerberos.GSSError, ex:
-            self.log_error("authGSSServerClean: %s" % (ex[0][0], ex[1][0],))
+            self.log.error("authGSSServerClean: %s" % (ex[0][0], ex[1][0],))
             raise error.LoginFailed('Authentication System Failure %s(%s)' % (ex[0][0], ex[1][0],))
         
         # If we successfully decoded and verified the Kerberos credentials we need to add the Kerberos

Modified: CalendarServer/trunk/twistedcaldav/cache.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/cache.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/cache.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -14,7 +14,7 @@
 # limitations under the License.
 ##
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.web2.dav.util import allDataFromStream
 from twext.web2.http import Response
 from twext.web2.iweb import IResource
@@ -101,10 +101,11 @@
 
 
 
-class MemcacheChangeNotifier(LoggingMixIn, CachePoolUserMixIn):
+class MemcacheChangeNotifier(CachePoolUserMixIn):
     """
     A change notifier used by resources (not store objects).
     """
+    log = Logger()
 
     def __init__(self, resource, cachePool=None, cacheHandle="Default"):
         self._resource = resource
@@ -126,17 +127,19 @@
         # For shared resources we use the owner URL as the cache key
         url = self._resource.url()
 
-        self.log_debug("Changing Cache Token for %r" % (url,))
+        self.log.debug("Changing Cache Token for %r" % (url,))
         return self.getCachePool().set(
             'cacheToken:%s' % (url,),
             self._newCacheToken(), expireTime=config.ResponseCacheTimeout * 60)
 
 
 
-class BaseResponseCache(LoggingMixIn):
+class BaseResponseCache(object):
     """
     A base class which provides some common operations
     """
+    log = Logger()
+
     def _principalURI(self, principal):
         return str(principal.children[0])
 
@@ -306,7 +309,7 @@
         oldkey = (yield self._requestKey(request))
         request.cacheKey = key = hashlib.md5(
             ':'.join([str(t) for t in oldkey])).hexdigest()
-        self.log_debug("hashing key for get: %r to %r" % (oldkey, key))
+        self.log.debug("hashing key for get: %r to %r" % (oldkey, key))
         returnValue(request.cacheKey)
 
 
@@ -320,15 +323,15 @@
         try:
             key = (yield self._hashedRequestKey(request))
 
-            self.log_debug("Checking cache for: %r" % (key,))
+            self.log.debug("Checking cache for: %r" % (key,))
             _ignore_flags, value = (yield self.getCachePool().get(key))
 
             if value is None:
-                self.log_debug("Not in cache: %r" % (key,))
+                self.log.debug("Not in cache: %r" % (key,))
                 returnValue(None)
 
             (principalToken, directoryToken, uriToken, childTokens, (code, headers, body)) = cPickle.loads(value)
-            self.log_debug("Found in cache: %r = %r" % (key, (
+            self.log.debug("Found in cache: %r = %r" % (key, (
                 principalToken,
                 directoryToken,
                 uriToken,
@@ -338,7 +341,7 @@
             currentTokens = (yield self._getTokens(request))
 
             if currentTokens[0] != principalToken:
-                self.log_debug(
+                self.log.debug(
                     "Principal token doesn't match for %r: %r != %r" % (
                         request.cacheKey,
                         currentTokens[0],
@@ -346,7 +349,7 @@
                 returnValue(None)
 
             if currentTokens[1] != directoryToken:
-                self.log_debug(
+                self.log.debug(
                     "Directory Record Token doesn't match for %r: %r != %r" % (
                         request.cacheKey,
                         currentTokens[1],
@@ -354,7 +357,7 @@
                 returnValue(None)
 
             if currentTokens[2] != uriToken:
-                self.log_debug(
+                self.log.debug(
                     "URI token doesn't match for %r: %r != %r" % (
                         request.cacheKey,
                         currentTokens[2],
@@ -364,7 +367,7 @@
             for childuri, token in childTokens.items():
                 currentToken = (yield self._tokenForURI(childuri))
                 if currentToken != token:
-                    self.log_debug(
+                    self.log.debug(
                         "Child %s token doesn't match for %r: %r != %r" % (
                             childuri,
                             request.cacheKey,
@@ -372,7 +375,7 @@
                             token))
                     returnValue(None)
 
-            self.log_debug("Response cache matched")
+            self.log.debug("Response cache matched")
             r = Response(code, stream=MemoryStream(body))
 
             for key, value in headers.iteritems():
@@ -381,7 +384,7 @@
             returnValue(r)
 
         except URINotFoundException, e:
-            self.log_debug("Could not locate URI: %r" % (e,))
+            self.log.debug("Could not locate URI: %r" % (e,))
             returnValue(None)
 
 
@@ -415,7 +418,7 @@
                     responseBody
                 )
             ))
-            self.log_debug("Adding to cache: %r = tokens - %r" % (key, (
+            self.log.debug("Adding to cache: %r = tokens - %r" % (key, (
                 pToken,
                 dToken,
                 uToken,
@@ -425,7 +428,7 @@
                 expireTime=config.ResponseCacheTimeout * 60)
 
         except URINotFoundException, e:
-            self.log_debug("Could not locate URI: %r" % (e,))
+            self.log.debug("Could not locate URI: %r" % (e,))
 
         returnValue(response)
 
@@ -471,7 +474,7 @@
 
 
 
-class CacheStoreNotifierFactory(LoggingMixIn, CachePoolUserMixIn):
+class CacheStoreNotifierFactory(CachePoolUserMixIn):
     """
     A notifier factory specifically for store object notifications. This is handed of to
     the data store object, which calls .newNotifier() each time a home object is created
@@ -480,6 +483,7 @@
 
     This object uses a memcachepool for setting new cache tokens.
     """
+    log = Logger()
 
     implements(IStoreNotifierFactory)
 
@@ -498,7 +502,7 @@
         return: A L{Deferred} that fires when the token has been changed.
         """
 
-        self.log_debug("Changing Cache Token for %r" % (cache_id,))
+        self.log.debug("Changing Cache Token for %r" % (cache_id,))
         return self.getCachePool().set(
             'cacheToken:%s' % (cache_id,),
             self._newCacheToken(), expireTime=config.ResponseCacheTimeout * 60)

Modified: CalendarServer/trunk/twistedcaldav/client/pool.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/client/pool.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/client/pool.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -23,7 +23,7 @@
 import OpenSSL
 import urlparse
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.internet.gaiendpoint import GAIEndpoint
 from twext.internet.adaptendpoint import connect
 
@@ -39,7 +39,7 @@
 from twext.web2.dav.util import allDataFromStream
 from twext.web2.stream import MemoryStream
 
-class PooledHTTPClientFactory(ClientFactory, LoggingMixIn):
+class PooledHTTPClientFactory(ClientFactory):
     """
     A client factory for HTTPClient that notifies a pool of it's state. It the connection
     fails in the middle of a request it will retry the request.
@@ -48,6 +48,8 @@
         to our connectionPool.
     @ivar connectionPool: A managing connection pool that we notify of events.
     """
+    log = Logger()
+
     protocol = HTTPClientProtocol
     connectionPool = None
 
@@ -92,7 +94,7 @@
 
 
 
-class HTTPClientPool(LoggingMixIn):
+class HTTPClientPool(object):
     """
     A connection pool for HTTPClientProtocol instances.
 
@@ -114,6 +116,8 @@
     @ivar _pendingConnects: A C{int} indicating how many connections are in
         progress.
     """
+    log = Logger()
+
     clientFactory = PooledHTTPClientFactory
     maxRetries = 2
 
@@ -173,7 +177,7 @@
         """
         self._pendingConnects += 1
 
-        self.log_debug("Initating new client connection to: %r" % (
+        self.log.debug("Initating new client connection to: %r" % (
                 self._endpoint,))
         self._logClientStats()
 
@@ -268,18 +272,18 @@
                 response = (yield self._submitRequest(request, args, kwargs))
 
             except (ConnectionLost, ConnectionDone, ConnectError), e:
-                self.log_error("HTTP pooled client connection error (attempt: %d) - retrying: %s" % (ctr + 1, e,))
+                self.log.error("HTTP pooled client connection error (attempt: %d) - retrying: %s" % (ctr + 1, e,))
                 continue
 
             # TODO: find the proper cause of these assertions and fix
             except (AssertionError,), e:
-                self.log_error("HTTP pooled client connection assertion error (attempt: %d) - retrying: %s" % (ctr + 1, e,))
+                self.log.error("HTTP pooled client connection assertion error (attempt: %d) - retrying: %s" % (ctr + 1, e,))
                 continue
 
             else:
                 returnValue(response)
         else:
-            self.log_error("HTTP pooled client connection error - exhausted retry attempts.")
+            self.log.error("HTTP pooled client connection error - exhausted retry attempts.")
             raise HTTPError(StatusResponse(responsecode.BAD_GATEWAY, "Could not connect to HTTP pooled client host."))
 
 
@@ -303,7 +307,7 @@
         elif len(self._busyClients) + self._pendingConnects >= self._maxClients:
             d = Deferred()
             self._pendingRequests.append((d, request, args, kwargs))
-            self.log_debug("Request queued: %s, %r, %r" % (request, args, kwargs))
+            self.log.debug("Request queued: %s, %r, %r" % (request, args, kwargs))
             self._logClientStats()
 
         else:
@@ -314,7 +318,7 @@
 
 
     def _logClientStats(self):
-        self.log_debug("Clients #free: %d, #busy: %d, "
+        self.log.debug("Clients #free: %d, #busy: %d, "
                        "#pending: %d, #queued: %d" % (
                 len(self._freeClients),
                 len(self._busyClients),
@@ -334,7 +338,7 @@
         elif client in self._freeClients:
             self._freeClients.remove(client)
 
-        self.log_debug("Removed client: %r" % (client,))
+        self.log.debug("Removed client: %r" % (client,))
         self._logClientStats()
 
         self._processPending()
@@ -352,7 +356,7 @@
 
         self._busyClients.add(client)
 
-        self.log_debug("Busied client: %r" % (client,))
+        self.log.debug("Busied client: %r" % (client,))
         self._logClientStats()
 
 
@@ -370,7 +374,7 @@
         if self.shutdown_deferred and self._isIdle():
             self.shutdown_deferred.callback(None)
 
-        self.log_debug("Freed client: %r" % (client,))
+        self.log.debug("Freed client: %r" % (client,))
         self._logClientStats()
 
         self._processPending()
@@ -380,7 +384,7 @@
         if len(self._pendingRequests) > 0:
             d, request, args, kwargs = self._pendingRequests.pop(0)
 
-            self.log_debug("Performing Queued Request: %s, %r, %r" % (
+            self.log.debug("Performing Queued Request: %s, %r, %r" % (
                     request, args, kwargs))
             self._logClientStats()
 

Modified: CalendarServer/trunk/twistedcaldav/client/reverseproxy.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/client/reverseproxy.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/client/reverseproxy.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -25,15 +25,17 @@
 from twext.web2.http import StatusResponse, HTTPError
 from twext.web2.resource import LeafResource
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from twistedcaldav.client.pool import getHTTPClientPool
 from twistedcaldav.config import config
 
-class ReverseProxyResource(LeafResource, LoggingMixIn):
+class ReverseProxyResource(LeafResource):
     """
     A L{LeafResource} which always performs a reverse proxy operation.
     """
+    log = Logger()
+
     implements(iweb.IResource)
 
     def __init__(self, poolID, *args, **kwargs):
@@ -67,7 +69,7 @@
         @return: Deferred L{Response}
         """
 
-        self.logger.info("%s %s %s" % (request.method, request.uri, "HTTP/%s.%s" % request.clientproto))
+        self.log.info("%s %s %s" % (request.method, request.uri, "HTTP/%s.%s" % request.clientproto))
 
         # Check for multi-hop
         if not self.allowMultiHop:

Modified: CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -31,6 +31,7 @@
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
 from twisted.cred.credentials import UsernamePassword
 from twext.web2.auth.digest import DigestedCredentials
+from twext.python.log import Logger
 
 from twistedcaldav.config import config
 from twistedcaldav.directory.cachingdirectory import CachingDirectoryService, \
@@ -48,6 +49,8 @@
     """
     OpenDirectory implementation of L{IDirectoryService}.
     """
+    log = Logger()
+
     baseGUID = "891F8321-ED02-424C-BA72-89C32F215C1E"
 
     def __repr__(self):
@@ -100,7 +103,7 @@
         try:
             directory = self.odModule.odInit(params['node'])
         except self.odModule.ODError, e:
-            self.log_error("OpenDirectory (node=%s) Initialization error: %s" % (params['node'], e))
+            self.log.error("OpenDirectory (node=%s) Initialization error: %s" % (params['node'], e))
             raise
 
         self.augmentService = params['augmentService']
@@ -131,8 +134,8 @@
             if time.time() - self.restrictedTimestamp > self.cacheTimeout:
                 attributeToMatch = dsattributes.kDS1AttrGeneratedUID if self.restrictToGUID else dsattributes.kDSNAttrRecordName
                 valueToMatch = self.restrictToGroup
-                self.log_debug("Doing restricted group membership check")
-                self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+                self.log.debug("Doing restricted group membership check")
+                self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                     self.directory,
                     attributeToMatch,
                     valueToMatch,
@@ -158,7 +161,7 @@
                     members = []
                     nestedGroups = []
                 self._cachedRestrictedGUIDs = set(self._expandGroupMembership(members, nestedGroups, returnGroups=True))
-                self.log_debug("Got %d restricted group members" % (len(self._cachedRestrictedGUIDs),))
+                self.log.debug("Got %d restricted group members" % (len(self._cachedRestrictedGUIDs),))
                 self.restrictedTimestamp = time.time()
             return self._cachedRestrictedGUIDs
         else:
@@ -204,7 +207,7 @@
             if groupGUID in processedGUIDs:
                 continue
 
-            self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                 self.directory,
                 dsattributes.kDS1AttrGeneratedUID,
                 groupGUID,
@@ -224,7 +227,7 @@
             )
 
             if not result:
-                self.log_error("Couldn't find group %s when trying to expand nested groups."
+                self.log.error("Couldn't find group %s when trying to expand nested groups."
                              % (groupGUID,))
                 continue
 
@@ -276,15 +279,15 @@
             attrs.append(dsattributes.kDSNAttrNestedGroups)
             ODRecordType = dsattributes.kDSStdRecordTypeGroups
 
-        self.log_debug("Querying OD for all %s records" % (recordType,))
+        self.log.debug("Querying OD for all %s records" % (recordType,))
         results = self.odModule.listAllRecordsWithAttributes_list(
             self.directory, ODRecordType, attrs)
-        self.log_debug("Retrieved %d %s records" % (len(results), recordType,))
+        self.log.debug("Retrieved %d %s records" % (len(results), recordType,))
 
         for key, value in results:
             recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
             if not recordGUID:
-                self.log_warn("Ignoring record missing GUID: %s %s" %
+                self.log.warn("Ignoring record missing GUID: %s %s" %
                     (key, value,))
                 continue
 
@@ -375,7 +378,7 @@
                 d.addCallback(lambda x: record.addAugmentInformation(x))
             records.append(record)
 
-        self.log_debug("ListRecords returning %d %s records" % (len(records),
+        self.log.debug("ListRecords returning %d %s records" % (len(records),
             recordType))
 
         return records
@@ -391,9 +394,9 @@
 
         guids = set()
 
-        self.log_debug("Looking up which groups %s is a member of" % (guid,))
+        self.log.debug("Looking up which groups %s is a member of" % (guid,))
         try:
-            self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                 self.directory,
                 dsattributes.kDSNAttrGroupMembers,
                 guid,
@@ -412,7 +415,7 @@
                 attrs,
             )
         except self.odModule.ODError, ex:
-            self.log_error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
+            self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
             raise
 
         for (_ignore_recordShortName, value) in results:
@@ -423,7 +426,7 @@
                 guids.add(recordGUID)
 
         try:
-            self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+            self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                 self.directory,
                 dsattributes.kDSNAttrNestedGroups,
                 guid,
@@ -442,7 +445,7 @@
                 attrs,
             )
         except self.odModule.ODError, ex:
-            self.log_error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
+            self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
             raise
 
         for (_ignore_recordShortName, value) in results:
@@ -452,7 +455,7 @@
             if recordGUID:
                 guids.add(recordGUID)
 
-        self.log_debug("%s is a member of %d groups" % (guid, len(guids)))
+        self.log.debug("%s is a member of %d groups" % (guid, len(guids)))
 
         return guids
 
@@ -563,9 +566,9 @@
             lookupMethod = self.odModule.queryRecordsWithAttributes_list
 
         def collectResults(results):
-            self.log_debug("Got back %d records from OD" % (len(results),))
+            self.log.debug("Got back %d records from OD" % (len(results),))
             for key, value in results:
-                self.log_debug("OD result: %s %s" % (key, value))
+                self.log.debug("OD result: %s %s" % (key, value))
                 try:
                     recordNodeName = value.get(
                         dsattributes.kDSNAttrMetaNodeLocation)
@@ -662,7 +665,7 @@
             for compound in queries:
                 compound = compound.generate()
 
-                self.log_debug("Calling OD: Types %s, Query %s" %
+                self.log.debug("Calling OD: Types %s, Query %s" %
                     (recordTypes, compound))
 
                 try:
@@ -684,7 +687,7 @@
                     sets.append(newSet)
 
                 except self.odModule.ODError, e:
-                    self.log_error("Ignoring OD Error: %d %s" %
+                    self.log.error("Ignoring OD Error: %d %s" %
                         (e.message[1], e.message[0]))
                     continue
 
@@ -739,9 +742,9 @@
         # specified in the last field in the fields list
 
         def collectResults(results):
-            self.log_debug("Got back %d records from OD" % (len(results),))
+            self.log.debug("Got back %d records from OD" % (len(results),))
             for key, value in results:
-                self.log_debug("OD result: %s %s" % (key, value))
+                self.log.debug("OD result: %s %s" % (key, value))
                 try:
                     recordNodeName = value.get(
                         dsattributes.kDSNAttrMetaNodeLocation)
@@ -843,7 +846,7 @@
                 else:
                     comparison = dsattributes.eDSExact
 
-                self.log_debug("Calling OD: Types %s, Field %s, Value %s, Match %s, Caseless %s" %
+                self.log.debug("Calling OD: Types %s, Field %s, Value %s, Match %s, Caseless %s" %
                     (recordTypes, ODField, value, matchType, caseless))
 
                 try:
@@ -873,7 +876,7 @@
                         sets.append(newSet)
 
                 except self.odModule.ODError, e:
-                    self.log_error("Ignoring OD Error: %d %s" %
+                    self.log.error("Ignoring OD Error: %d %s" %
                         (e.message[1], e.message[0]))
                     continue
 
@@ -989,7 +992,7 @@
             # Because we're getting transient OD error -14987, try 3 times:
             for _ignore in xrange(3):
                 try:
-                    self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+                    self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
                         self.directory,
                         queryattr,
                         indexKey,
@@ -1012,18 +1015,18 @@
                 except self.odModule.ODError, ex:
                     if ex.message[1] == -14987:
                         # Fall through and retry
-                        self.log_error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
+                        self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
                     elif ex.message[1] == -14140 or ex.message[1] == -14200:
                         # Unsupported attribute on record - don't fail
                         return
                     else:
-                        self.log_error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
+                        self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
                         raise
                 else:
                     # Success, so break the retry loop
                     break
 
-        self.log_debug("opendirectory.queryRecordsWithAttribute_list matched records: %s" % (len(results),))
+        self.log.debug("opendirectory.queryRecordsWithAttribute_list matched records: %s" % (len(results),))
 
         enabledRecords = []
         disabledRecords = []
@@ -1044,19 +1047,19 @@
             recordNodeName = value.get(dsattributes.kDSNAttrMetaNodeLocation)
 
             if not recordType:
-                self.log_debug("Record (unknown)%s in node %s has no recordType; ignoring."
+                self.log.debug("Record (unknown)%s in node %s has no recordType; ignoring."
                                % (recordShortName, recordNodeName))
                 continue
 
             recordType = self._fromODRecordTypes[recordType]
 
             if not recordGUID:
-                self.log_debug("Record (%s)%s in node %s has no GUID; ignoring."
+                self.log.debug("Record (%s)%s in node %s has no GUID; ignoring."
                                % (recordType, recordShortName, recordNodeName))
                 continue
 
             if recordGUID.lower().startswith("ffffeeee-dddd-cccc-bbbb-aaaa"):
-                self.log_debug("Ignoring system record (%s)%s in node %s."
+                self.log.debug("Ignoring system record (%s)%s in node %s."
                                % (recordType, recordShortName, recordNodeName))
                 continue
 
@@ -1136,7 +1139,7 @@
                 record.autoSchedule = True
 
             if not unrestricted:
-                self.log_debug("%s is not enabled because it's not a member of group: %s" % (recordGUID, self.restrictToGroup))
+                self.log.debug("%s is not enabled because it's not a member of group: %s" % (recordGUID, self.restrictToGroup))
                 record.enabledForCalendaring = False
                 record.enabledForAddressBooks = False
 
@@ -1153,14 +1156,14 @@
         elif len(enabledRecords) == 0 and len(disabledRecords) == 1:
             record = disabledRecords[0]
         elif indexType == self.INDEX_TYPE_GUID and len(enabledRecords) > 1:
-            self.log_error("Duplicate records found for GUID %s:" % (indexKey,))
+            self.log.error("Duplicate records found for GUID %s:" % (indexKey,))
             for duplicateRecord in enabledRecords:
-                self.log_error("Duplicate: %s" % (", ".join(duplicateRecord.shortNames)))
+                self.log.error("Duplicate: %s" % (", ".join(duplicateRecord.shortNames)))
 
         if record:
             if isinstance(origIndexKey, unicode):
                 origIndexKey = origIndexKey.encode("utf-8")
-            self.log_debug("Storing (%s %s) %s in internal cache" % (indexType, origIndexKey, record))
+            self.log.debug("Storing (%s %s) %s in internal cache" % (indexType, origIndexKey, record))
 
             self.recordCacheForType(recordType).addRecord(record, indexType, origIndexKey)
 
@@ -1179,7 +1182,7 @@
 
         for recordType in (dsattributes.kDSStdRecordTypePlaces, dsattributes.kDSStdRecordTypeResources):
             try:
-                self.log_debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
+                self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
                     self.directory,
                     recordType,
                     attrs,
@@ -1190,7 +1193,7 @@
                     attrs,
                 )
             except self.odModule.ODError, ex:
-                self.log_error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
+                self.log.error("OpenDirectory (node=%s) error: %s" % (self.realmName, str(ex)))
                 raise
 
             for (recordShortName, value) in results:
@@ -1223,7 +1226,7 @@
             for node in nodes:
                 self.odModule.getNodeAttributes(self.directory, node, [dsattributes.kDSNAttrNodePath])
         except self.odModule.ODError:
-            self.log_warn("OpenDirectory Node %s not available" % (node,))
+            self.log.warn("OpenDirectory Node %s not available" % (node,))
             return False
 
         return True
@@ -1242,7 +1245,7 @@
 
         loop = 1
         while valuesToFetch:
-            self.log_debug("getGroups loop %d" % (loop,))
+            self.log.debug("getGroups loop %d" % (loop,))
 
             results = []
 
@@ -1250,12 +1253,12 @@
                 fields = []
                 for value in batch:
                     fields.append(["guid", value, False, "equals"])
-                self.log_debug("getGroups fetching batch of %d" %
+                self.log.debug("getGroups fetching batch of %d" %
                     (len(fields),))
                 result = list((yield self.recordsMatchingFields(fields,
                     recordType=self.recordType_groups)))
                 results.extend(result)
-                self.log_debug("getGroups got back batch of %d for subtotal of %d" %
+                self.log.debug("getGroups got back batch of %d for subtotal of %d" %
                     (len(result), len(results)))
 
             # Reset values for next iteration
@@ -1269,7 +1272,7 @@
                 # record.nestedGUIDs() contains the sub groups of this group
                 for memberGUID in record.nestedGUIDs():
                     if memberGUID not in recordsByGUID:
-                        self.log_debug("getGroups group %s contains group %s" %
+                        self.log.debug("getGroups group %s contains group %s" %
                             (record.guid, memberGUID))
                         valuesToFetch.add(memberGUID)
 
@@ -1419,7 +1422,7 @@
                     self.password = credentials.password
                     return True
             except self.service.odModule.ODError, e:
-                self.log_error("OpenDirectory (node=%s) error while performing basic authentication for user %s: %s"
+                self.log.error("OpenDirectory (node=%s) error while performing basic authentication for user %s: %s"
                             % (self.service.realmName, self.shortNames[0], e))
 
             return False
@@ -1442,7 +1445,7 @@
                     'algorithm=%(algorithm)s'
                 ) % credentials.fields
             except KeyError, e:
-                self.log_error(
+                self.log.error(
                     "OpenDirectory (node=%s) error while performing digest authentication for user %s: "
                     "missing digest response field: %s in: %s"
                     % (self.service.realmName, self.shortNames[0], e, credentials.fields)
@@ -1473,7 +1476,7 @@
 
                     return True
                 else:
-                    self.log_debug(
+                    self.log.debug(
 """OpenDirectory digest authentication failed with:
     Nodename:  %s
     Username:  %s
@@ -1484,7 +1487,7 @@
        credentials.method))
 
             except self.service.odModule.ODError, e:
-                self.log_error(
+                self.log.error(
                     "OpenDirectory (node=%s) error while performing digest authentication for user %s: %s"
                     % (self.service.realmName, self.shortNames[0], e)
                 )

Modified: CalendarServer/trunk/twistedcaldav/directory/cachingdirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/cachingdirectory.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/cachingdirectory.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -30,7 +30,7 @@
 
 import base64
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.python.memcacheclient import ClientFactory, MemcacheError
 
 from twistedcaldav.config import config
@@ -64,11 +64,12 @@
 
 
 
-class DictRecordTypeCache(RecordTypeCache, LoggingMixIn):
+class DictRecordTypeCache(RecordTypeCache):
     """
     Cache implementation using a dict, and uses memcached to share records
     with other instances.
     """
+    log = Logger()
 
     def __init__(self, directoryService, recordType):
 
@@ -104,18 +105,18 @@
             if useMemcache:
                 key = self.directoryService.generateMemcacheKey(indexType, indexKey,
                     record.recordType)
-                self.log_debug("Memcache: storing %s" % (key,))
+                self.log.debug("Memcache: storing %s" % (key,))
                 try:
                     self.directoryService.memcacheSet(key, record)
                 except DirectoryMemcacheError:
-                    self.log_error("Memcache: failed to store %s" % (key,))
+                    self.log.error("Memcache: failed to store %s" % (key,))
                     pass
 
 
     def removeRecord(self, record):
         if record in self.records:
             self.records.remove(record)
-            self.log_debug("Removed record %s" % (record.guid,))
+            self.log.debug("Removed record %s" % (record.guid,))
             for indexType in self.directoryService.indexTypes():
                 try:
                     indexData = getattr(record, CachingDirectoryService.indexTypeToRecordAttribute[indexType])
@@ -154,6 +155,7 @@
 
     This is class must be overridden to provide a concrete implementation.
     """
+    log = Logger()
 
     INDEX_TYPE_GUID = "guid"
     INDEX_TYPE_SHORTNAME = "shortname"
@@ -204,12 +206,12 @@
 
             key = base64.b64encode(key)
             if not self._getMemcacheClient().set(key, record, time=self.cacheTimeout):
-                self.log_error("Could not write to memcache, retrying")
+                self.log.error("Could not write to memcache, retrying")
                 if not self._getMemcacheClient(refresh=True).set(
                     key, record,
                     time=self.cacheTimeout
                 ):
-                    self.log_error("Could not write to memcache again, giving up")
+                    self.log.error("Could not write to memcache again, giving up")
                     del self.memcacheClient
                     raise DirectoryMemcacheError("Failed to write to memcache")
         finally:
@@ -225,13 +227,13 @@
             if record is not None and isinstance(record, DirectoryRecord):
                 record.service = self
         except MemcacheError:
-            self.log_error("Could not read from memcache, retrying")
+            self.log.error("Could not read from memcache, retrying")
             try:
                 record = self._getMemcacheClient(refresh=True).get(key)
                 if record is not None and isinstance(record, DirectoryRecord):
                     record.service = self
             except MemcacheError:
-                self.log_error("Could not read from memcache again, giving up")
+                self.log.error("Could not read from memcache again, giving up")
                 del self.memcacheClient
                 raise DirectoryMemcacheError("Failed to read from memcache")
         return record
@@ -363,18 +365,18 @@
             # one recordType, so using recordTypes[0] here is always safe:
             key = self.generateMemcacheKey(indexType, indexKey, recordTypes[0])
 
-            self.log_debug("Memcache: checking %s" % (key,))
+            self.log.debug("Memcache: checking %s" % (key,))
 
             try:
                 record = self.memcacheGet(key)
             except DirectoryMemcacheError:
-                self.log_error("Memcache: failed to get %s" % (key,))
+                self.log.error("Memcache: failed to get %s" % (key,))
                 record = None
 
             if record is None:
-                self.log_debug("Memcache: miss %s" % (key,))
+                self.log.debug("Memcache: miss %s" % (key,))
             else:
-                self.log_debug("Memcache: hit %s" % (key,))
+                self.log.debug("Memcache: hit %s" % (key,))
                 self.recordCacheForType(record.recordType).addRecord(record, indexType, indexKey, useMemcache=False)
                 return record
 
@@ -384,35 +386,35 @@
                 try:
                     val = self.memcacheGet("-%s" % (key,))
                 except DirectoryMemcacheError:
-                    self.log_error("Memcache: failed to get -%s" % (key,))
+                    self.log.error("Memcache: failed to get -%s" % (key,))
                     val = None
                 if val == 1:
-                    self.log_debug("Memcache: negative %s" % (key,))
+                    self.log.debug("Memcache: negative %s" % (key,))
                     self._disabledKeys[indexType][indexKey] = time.time()
                     return None
 
         # Try query
-        self.log_debug("Faulting record for attribute '%s' with value '%s'" % (indexType, indexKey,))
+        self.log.debug("Faulting record for attribute '%s' with value '%s'" % (indexType, indexKey,))
         self.queryDirectory(recordTypes, indexType, indexKey)
 
         # Now try again from cache
         record = lookup()
         if record:
-            self.log_debug("Found record for attribute '%s' with value '%s'" % (indexType, indexKey,))
+            self.log.debug("Found record for attribute '%s' with value '%s'" % (indexType, indexKey,))
             return record
 
         if self.negativeCaching:
 
             # Add to negative cache with timestamp
-            self.log_debug("Failed to fault record for attribute '%s' with value '%s'" % (indexType, indexKey,))
+            self.log.debug("Failed to fault record for attribute '%s' with value '%s'" % (indexType, indexKey,))
             self._disabledKeys[indexType][indexKey] = time.time()
 
             if config.Memcached.Pools.Default.ClientEnabled:
-                self.log_debug("Memcache: storing (negative) %s" % (key,))
+                self.log.debug("Memcache: storing (negative) %s" % (key,))
                 try:
                     self.memcacheSet("-%s" % (key,), 1)
                 except DirectoryMemcacheError:
-                    self.log_error("Memcache: failed to set -%s" % (key,))
+                    self.log.error("Memcache: failed to set -%s" % (key,))
                     pass
 
         return None

Modified: CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -38,7 +38,7 @@
 from twext.web2.dav.util import joinURL
 from twext.web2.dav.noneprops import NonePropertyStore
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from twisted.web.template import XMLFile, Element, renderer
 from twisted.python.modules import getModule
@@ -156,10 +156,11 @@
 
 class CalendarUserProxyPrincipalResource (
         CalDAVComplianceMixIn, PermissionsMixIn, DAVResourceWithChildrenMixin,
-        DAVPrincipalResource, LoggingMixIn):
+        DAVPrincipalResource):
     """
     Calendar user proxy principal resource.
     """
+    log = Logger()
 
     def __init__(self, parent, proxyType):
         """
@@ -383,7 +384,7 @@
                 # existing principals are removed
                 yield self._index().refreshPrincipal(uid)
             else:
-                self.log_warn("Delegate is missing from directory: %s" % (uid,))
+                self.log.warn("Delegate is missing from directory: %s" % (uid,))
 
         returnValue(found)
 
@@ -426,7 +427,7 @@
             returnValue(True)
         returnValue(False)
 
-class ProxyDB(AbstractADBAPIDatabase, LoggingMixIn):
+class ProxyDB(AbstractADBAPIDatabase):
     """
     A database to maintain calendar user proxy group memberships.
 
@@ -435,8 +436,8 @@
     Group Database:
 
     ROW: GROUPNAME, MEMBER
-
     """
+    log = Logger()
 
     schema_version = "4"
     schema_type    = "CALENDARUSERPROXY"
@@ -612,12 +613,12 @@
 
             elif overdue is None:
                 # No timer was previously set
-                self.log_debug("Delaying removal of missing proxy principal '%s'"
+                self.log.debug("Delaying removal of missing proxy principal '%s'"
                                % (principalUID,))
                 yield self._memcacher.setDeletionTimer(principalUID, delay=delay)
                 returnValue(None)
 
-        self.log_warn("Removing missing proxy principal for '%s'"
+        self.log.warn("Removing missing proxy principal for '%s'"
                       % (principalUID,))
 
         for suffix in ("calendar-proxy-read", "calendar-proxy-write",):

Modified: CalendarServer/trunk/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/directory.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/directory.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -34,7 +34,7 @@
 from twext.enterprise.dal.record import fromTable
 from twext.enterprise.dal.syntax import Delete
 from twext.enterprise.queue import WorkItem, PeerConnectionPool
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twext.web2.dav.auth import IPrincipalCredentials
 from twext.web2.dav.util import joinURL
 
@@ -70,7 +70,7 @@
 log = Logger()
 
 
-class DirectoryService(LoggingMixIn):
+class DirectoryService(object):
     implements(IDirectoryService, ICalendarStoreDirectoryService, ICredentialsChecker)
 
     ##
@@ -97,10 +97,10 @@
             assert self.baseGUID, "Class %s must provide a baseGUID attribute" % (self.__class__.__name__,)
 
             if realmName is None:
-                self.log_error("Directory service %s has no realm name or GUID; generated service GUID will not be unique." % (self,))
+                self.log.error("Directory service %s has no realm name or GUID; generated service GUID will not be unique." % (self,))
                 realmName = ""
             else:
-                self.log_info("Directory service %s has no GUID; generating service GUID from realm name." % (self,))
+                self.log.info("Directory service %s has no GUID; generating service GUID from realm name." % (self,))
 
             self._guid = uuidFromName(self.baseGUID, realmName)
 
@@ -467,7 +467,7 @@
         if ignore:
             for key in ignore:
                 if key in params:
-                    self.log_warn("Ignoring obsolete directory service parameter: %s" % (key,))
+                    self.log.warn("Ignoring obsolete directory service parameter: %s" % (key,))
                     keys.remove(key)
 
         if keys:
@@ -495,7 +495,7 @@
             read_only_proxy = wpframework.get("ReadOnlyCalendaringDelegate", None)
             autoAcceptGroup = wpframework.get("AutoAcceptGroup", "")
         except (ExpatError, AttributeError), e:
-            self.log_error(
+            self.log.error(
                 "Failed to parse ResourceInfo attribute of record (%s)%s (guid=%s): %s\n%s" %
                 (recordType, shortname, guid, e, plist,)
             )
@@ -588,7 +588,7 @@
 
 
 
-class GroupMembershipCache(Memcacher, LoggingMixIn):
+class GroupMembershipCache(Memcacher):
     """
     Caches group membership information
 
@@ -604,6 +604,7 @@
     "group-cacher-populated" : contains a datestamp indicating the most recent
     population.
     """
+    log = Logger()
 
     def __init__(self, namespace, pickle=True, no_invalidation=False,
         key_normalization=True, expireSeconds=0):
@@ -616,14 +617,14 @@
 
 
     def setGroupsFor(self, guid, memberships):
-        self.log_debug("set groups-for %s : %s" % (guid, memberships))
+        self.log.debug("set groups-for %s : %s" % (guid, memberships))
         return self.set("groups-for:%s" %
             (str(guid)), memberships,
             expireTime=self.expireSeconds)
 
 
     def getGroupsFor(self, guid):
-        self.log_debug("get groups-for %s" % (guid,))
+        self.log.debug("get groups-for %s" % (guid,))
         def _value(value):
             if value:
                 return value
@@ -635,29 +636,30 @@
 
 
     def deleteGroupsFor(self, guid):
-        self.log_debug("delete groups-for %s" % (guid,))
+        self.log.debug("delete groups-for %s" % (guid,))
         return self.delete("groups-for:%s" % (str(guid),))
 
 
     def setPopulatedMarker(self):
-        self.log_debug("set group-cacher-populated")
+        self.log.debug("set group-cacher-populated")
         return self.set("group-cacher-populated", str(datetime.datetime.now()))
 
 
     @inlineCallbacks
     def isPopulated(self):
-        self.log_debug("is group-cacher-populated")
+        self.log.debug("is group-cacher-populated")
         value = (yield self.get("group-cacher-populated"))
         returnValue(value is not None)
 
 
 
-class GroupMembershipCacheUpdater(LoggingMixIn):
+class GroupMembershipCacheUpdater(object):
     """
     Responsible for updating memcached with group memberships.  This will run
     in a sidecar.  There are two sources of proxy data to pull from: the local
     proxy database, and the location/resource info in the directory system.
     """
+    log = Logger()
 
     def __init__(self, proxyDB, directory, updateSeconds, expireSeconds,
         cache=None, namespace=None, useExternalProxies=False,
@@ -767,22 +769,22 @@
             # We're in quick-start mode.  Check first to see if someone has
             # populated the membership cache, and if so, return immediately
             if isPopulated:
-                self.log_info("Group membership cache is already populated")
+                self.log.info("Group membership cache is already populated")
                 returnValue((fast, 0))
 
-        self.log_info("Updating group membership cache")
+        self.log.info("Updating group membership cache")
 
         dataRoot = FilePath(config.DataRoot)
         membershipsCacheFile = dataRoot.child("memberships_cache")
         extProxyCacheFile = dataRoot.child("external_proxy_cache")
 
         if not membershipsCacheFile.exists():
-            self.log_info("Group membership snapshot file does not yet exist")
+            self.log.info("Group membership snapshot file does not yet exist")
             fast = False
             previousMembers = {}
             callGroupsChanged = False
         else:
-            self.log_info("Group membership snapshot file exists: %s" %
+            self.log.info("Group membership snapshot file exists: %s" %
                 (membershipsCacheFile.path,))
             previousMembers = pickle.loads(membershipsCacheFile.getContent())
             callGroupsChanged = True
@@ -792,13 +794,13 @@
             # Load in cached copy of external proxies so we can diff against them
             previousAssignments = []
             if extProxyCacheFile.exists():
-                self.log_info("External proxies snapshot file exists: %s" %
+                self.log.info("External proxies snapshot file exists: %s" %
                     (extProxyCacheFile.path,))
                 previousAssignments = pickle.loads(extProxyCacheFile.getContent())
 
-            self.log_info("Retrieving proxy assignments from directory")
+            self.log.info("Retrieving proxy assignments from directory")
             assignments = self.externalProxiesSource()
-            self.log_info("%d proxy assignments retrieved from directory" %
+            self.log.info("%d proxy assignments retrieved from directory" %
                 (len(assignments),))
 
             changed, removed = diffAssignments(previousAssignments, assignments)
@@ -808,14 +810,14 @@
 
             # populate proxy DB from external resource info
             if changed:
-                self.log_info("Updating proxy assignments")
+                self.log.info("Updating proxy assignments")
                 assignmentCount = 0
                 totalNumAssignments = len(changed)
                 currentAssignmentNum = 0
                 for principalUID, members in changed:
                     currentAssignmentNum += 1
                     if currentAssignmentNum % 1000 == 0:
-                        self.log_info("...proxy assignment %d of %d" % (currentAssignmentNum,
+                        self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
                             totalNumAssignments))
                     try:
                         current = (yield self.proxyDB.getMembers(principalUID))
@@ -823,30 +825,30 @@
                             assignmentCount += 1
                             yield self.proxyDB.setGroupMembers(principalUID, members)
                     except Exception, e:
-                        self.log_error("Unable to update proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
-                self.log_info("Updated %d assignment%s in proxy database" %
+                        self.log.error("Unable to update proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
+                self.log.info("Updated %d assignment%s in proxy database" %
                     (assignmentCount, "" if assignmentCount == 1 else "s"))
 
             if removed:
-                self.log_info("Deleting proxy assignments")
+                self.log.info("Deleting proxy assignments")
                 assignmentCount = 0
                 totalNumAssignments = len(removed)
                 currentAssignmentNum = 0
                 for principalUID in removed:
                     currentAssignmentNum += 1
                     if currentAssignmentNum % 1000 == 0:
-                        self.log_info("...proxy assignment %d of %d" % (currentAssignmentNum,
+                        self.log.info("...proxy assignment %d of %d" % (currentAssignmentNum,
                             totalNumAssignments))
                     try:
                         assignmentCount += 1
                         yield self.proxyDB.setGroupMembers(principalUID, [])
                     except Exception, e:
-                        self.log_error("Unable to remove proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
-                self.log_info("Removed %d assignment%s from proxy database" %
+                        self.log.error("Unable to remove proxy assignment: principal=%s, members=%s, error=%s" % (principalUID, members, e))
+                self.log.info("Removed %d assignment%s from proxy database" %
                     (assignmentCount, "" if assignmentCount == 1 else "s"))
 
             # Store external proxy snapshot
-            self.log_info("Taking snapshot of external proxies to %s" %
+            self.log.info("Taking snapshot of external proxies to %s" %
                 (extProxyCacheFile.path,))
             extProxyCacheFile.setContent(pickle.dumps(assignments))
 
@@ -855,7 +857,7 @@
             # load that and put into memcached, bypassing the faulting in of
             # any records, so that the server can start up quickly.
 
-            self.log_info("Loading group memberships from snapshot")
+            self.log.info("Loading group memberships from snapshot")
             members = pickle.loads(membershipsCacheFile.getContent())
 
         else:
@@ -863,11 +865,11 @@
             # of delegated-to guids, intersect those and build a dictionary
             # containing which delegated-to groups a user is a member of
 
-            self.log_info("Retrieving list of all proxies")
+            self.log.info("Retrieving list of all proxies")
             # This is always a set of guids:
             delegatedGUIDs = set((yield self.proxyDB.getAllMembers()))
-            self.log_info("There are %d proxies" % (len(delegatedGUIDs),))
-            self.log_info("Retrieving group hierarchy from directory")
+            self.log.info("There are %d proxies" % (len(delegatedGUIDs),))
+            self.log.info("Retrieving group hierarchy from directory")
 
             # "groups" maps a group to its members; the keys and values consist
             # of whatever directory attribute is used to refer to members.  The
@@ -876,11 +878,11 @@
             # back to the group's guid.
             groups, aliases = (yield self.getGroups(guids=delegatedGUIDs))
             groupGUIDs = set(aliases.keys())
-            self.log_info("%d groups retrieved from the directory" %
+            self.log.info("%d groups retrieved from the directory" %
                 (len(groupGUIDs),))
 
             delegatedGUIDs = delegatedGUIDs.intersection(groupGUIDs)
-            self.log_info("%d groups are proxies" % (len(delegatedGUIDs),))
+            self.log.info("%d groups are proxies" % (len(delegatedGUIDs),))
 
             # Reverse index the group membership from cache
             members = {}
@@ -891,11 +893,11 @@
                     memberships = members.setdefault(member, set())
                     memberships.add(groupGUID)
 
-            self.log_info("There are %d users delegated-to via groups" %
+            self.log.info("There are %d users delegated-to via groups" %
                 (len(members),))
 
             # Store snapshot
-            self.log_info("Taking snapshot of group memberships to %s" %
+            self.log.info("Taking snapshot of group memberships to %s" %
                 (membershipsCacheFile.path,))
             membershipsCacheFile.setContent(pickle.dumps(members))
 
@@ -909,7 +911,7 @@
             if extProxyCacheFile.exists():
                 os.chown(extProxyCacheFile.path, uid, gid)
 
-        self.log_info("Storing %d group memberships in memcached" %
+        self.log.info("Storing %d group memberships in memcached" %
                        (len(members),))
         changedMembers = set()
         totalNumMembers = len(members)
@@ -917,9 +919,9 @@
         for member, groups in members.iteritems():
             currentMemberNum += 1
             if currentMemberNum % 1000 == 0:
-                self.log_info("...membership %d of %d" % (currentMemberNum,
+                self.log.info("...membership %d of %d" % (currentMemberNum,
                     totalNumMembers))
-            # self.log_debug("%s is in %s" % (member, groups))
+            # self.log.debug("%s is in %s" % (member, groups))
             yield self.cache.setGroupsFor(member, groups)
             if groups != previousMembers.get(member, None):
                 # This principal has had a change in group membership
@@ -949,14 +951,14 @@
                 if record is not None:
                     principal = self.directory.principalCollection.principalForRecord(record)
                     if principal is not None:
-                        self.log_debug("Group membership changed for %s (%s)" %
+                        self.log.debug("Group membership changed for %s (%s)" %
                             (record.shortNames[0], record.guid,))
                         if hasattr(principal, "groupsChanged"):
                             yield principal.groupsChanged()
 
         yield self.cache.setPopulatedMarker()
 
-        self.log_info("Group memberships cache updated")
+        self.log.info("Group memberships cache updated")
 
         returnValue((fast, len(members), len(changedMembers)))
 
@@ -1044,7 +1046,9 @@
 
 
 
-class DirectoryRecord(LoggingMixIn):
+class DirectoryRecord(object):
+    log = Logger()
+
     implements(IDirectoryRecord, ICalendarStoreDirectoryRecord)
 
     def __repr__(self):
@@ -1190,7 +1194,7 @@
                 # For augment records cloned from the Default augment record,
                 # don't emit this message:
                 if not augment.clonedFromDefault:
-                    self.log_error("Group '%s(%s)' cannot be enabled for calendaring or address books" % (self.guid, self.shortNames[0],))
+                    self.log.error("Group '%s(%s)' cannot be enabled for calendaring or address books" % (self.guid, self.shortNames[0],))
 
         else:
             # Groups are by default always enabled
@@ -1210,11 +1214,11 @@
         if config.EnableSACLs and self.CheckSACL:
             username = self.shortNames[0]
             if self.CheckSACL(username, "calendar") != 0:
-                self.log_debug("%s is not enabled for calendaring due to SACL"
+                self.log.debug("%s is not enabled for calendaring due to SACL"
                                % (username,))
                 self.enabledForCalendaring = False
             if self.CheckSACL(username, "addressbook") != 0:
-                self.log_debug("%s is not enabled for addressbooks due to SACL"
+                self.log.debug("%s is not enabled for addressbooks due to SACL"
                                % (username,))
                 self.enabledForAddressBooks = False
 

Modified: CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -58,6 +58,7 @@
 from twistedcaldav.directory.augment import AugmentRecord
 from twistedcaldav.directory.util import splitIntoBatches
 from twisted.internet.defer import succeed, inlineCallbacks, returnValue
+from twext.python.log import Logger
 from twext.web2.http import HTTPError, StatusResponse
 from twext.web2 import responsecode
 
@@ -65,6 +66,8 @@
     """
     LDAP based implementation of L{IDirectoryService}.
     """
+    log = Logger()
+
     baseGUID = "5A871574-0C86-44EE-B11B-B9440C3DC4DD"
 
     def __repr__(self):
@@ -309,7 +312,7 @@
             filterstr = "(&%s%s)" % (filterstr, typeFilter)
 
         # Query the LDAP server
-        self.log_debug("Querying ldap for records matching base %s and filter %s for attributes %s." %
+        self.log.debug("Querying ldap for records matching base %s and filter %s for attributes %s." %
             (ldap.dn.dn2str(base), filterstr, self.attrlist))
 
         # This takes a while, so if you don't want to have a "long request"
@@ -329,20 +332,20 @@
 
             try:
                 record = self._ldapResultToRecord(dn, attrs, recordType)
-                # self.log_debug("Got LDAP record %s" % (record,))
+                # self.log.debug("Got LDAP record %s" % (record,))
             except MissingGuidException:
                 numMissingGuids += 1
                 continue
 
             if not unrestricted:
-                self.log_debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
+                self.log.debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
                 record.enabledForCalendaring = False
                 record.enabledForAddressBooks = False
 
             records.append(record)
 
         if numMissingGuids:
-            self.log_info("%d %s records are missing %s" %
+            self.log.info("%d %s records are missing %s" %
                 (numMissingGuids, recordType, guidAttr))
 
         return records
@@ -379,7 +382,7 @@
         readAttr = self.resourceSchema["readOnlyProxyAttr"]
         writeAttr = self.resourceSchema["proxyAttr"]
         if not (guidAttr and readAttr and writeAttr):
-            self.log_error("LDAP configuration requires guidAttr, proxyAttr, and readOnlyProxyAttr in order to use external proxy assignments efficiently; falling back to slower method")
+            self.log.error("LDAP configuration requires guidAttr, proxyAttr, and readOnlyProxyAttr in order to use external proxy assignments efficiently; falling back to slower method")
             # Fall back to the less-specialized version
             return super(LdapDirectoryService, self).getExternalProxyAssignments()
 
@@ -388,7 +391,7 @@
         attrlist = [guidAttr, readAttr, writeAttr]
 
         # Query the LDAP server
-        self.log_debug("Querying ldap for records matching base %s and filter %s for attributes %s." %
+        self.log.debug("Querying ldap for records matching base %s and filter %s for attributes %s." %
             (ldap.dn.dn2str(self.base), filterstr, attrlist))
 
         results = self.timedSearch(ldap.dn.dn2str(self.base),
@@ -411,20 +414,20 @@
 
     def getLDAPConnection(self):
         if self.ldap is None:
-            self.log_info("Connecting to LDAP %s" % (repr(self.uri),))
+            self.log.info("Connecting to LDAP %s" % (repr(self.uri),))
             self.ldap = self.createLDAPConnection()
-            self.log_info("Connection established to LDAP %s" % (repr(self.uri),))
+            self.log.info("Connection established to LDAP %s" % (repr(self.uri),))
             if self.credentials.get("dn", ""):
                 try:
-                    self.log_info("Binding to LDAP %s" %
+                    self.log.info("Binding to LDAP %s" %
                         (repr(self.credentials.get("dn")),))
                     self.ldap.simple_bind_s(self.credentials.get("dn"),
                         self.credentials.get("password"))
-                    self.log_info("Successfully authenticated with LDAP as %s" %
+                    self.log.info("Successfully authenticated with LDAP as %s" %
                         (repr(self.credentials.get("dn")),))
                 except ldap.INVALID_CREDENTIALS:
                     msg = "Can't bind to LDAP %s: check credentials" % (self.uri,)
-                    self.log_error(msg)
+                    self.log.error(msg)
                     raise DirectoryConfigurationError(msg)
         return self.ldap
 
@@ -464,10 +467,10 @@
         TRIES = 3
 
         for i in xrange(TRIES):
-            self.log_debug("Authenticating %s" % (dn,))
+            self.log.debug("Authenticating %s" % (dn,))
 
             if self.authLDAP is None:
-                self.log_debug("Creating authentication connection to LDAP")
+                self.log.debug("Creating authentication connection to LDAP")
                 self.authLDAP = self.createLDAPConnection()
 
             try:
@@ -481,7 +484,7 @@
                 raise ldap.INVALID_CREDENTIALS()
 
             except ldap.NO_SUCH_OBJECT:
-                self.log_error("LDAP Authentication error for %s: NO_SUCH_OBJECT"
+                self.log.error("LDAP Authentication error for %s: NO_SUCH_OBJECT"
                     % (dn,))
                 # fall through to try again; could be transient
 
@@ -489,24 +492,24 @@
                 raise
 
             except ldap.SERVER_DOWN:
-                self.log_error("Lost connection to LDAP server.")
+                self.log.error("Lost connection to LDAP server.")
                 self.authLDAP = None
                 # Fall through and retry if TRIES has been reached
 
             except Exception, e:
-                self.log_error("LDAP authentication failed with %s." % (e,))
+                self.log.error("LDAP authentication failed with %s." % (e,))
                 raise
 
             finally:
                 totalTime = time.time() - startTime
                 if totalTime > self.warningThresholdSeconds:
-                    self.log_error("LDAP auth exceeded threshold: %.2f seconds for %s" % (totalTime, dn))
+                    self.log.error("LDAP auth exceeded threshold: %.2f seconds for %s" % (totalTime, dn))
 
         else:
-            self.log_error("Giving up on LDAP authentication after %d tries.  Responding with 503." % (TRIES,))
+            self.log.error("Giving up on LDAP authentication after %d tries.  Responding with 503." % (TRIES,))
             raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE, "LDAP server unavailable"))
 
-        self.log_debug("Authentication succeeded for %s" % (dn,))
+        self.log.debug("Authentication succeeded for %s" % (dn,))
 
 
     def timedSearch(self, base, scope, filterstr="(objectClass=*)",
@@ -532,15 +535,15 @@
             except ldap.NO_SUCH_OBJECT:
                 return []
             except ldap.FILTER_ERROR, e:
-                self.log_error("LDAP filter error: %s %s" % (e, filterstr))
+                self.log.error("LDAP filter error: %s %s" % (e, filterstr))
                 return []
             except ldap.SIZELIMIT_EXCEEDED, e:
-                self.log_debug("LDAP result limit exceeded: %d" % (resultLimit,))
+                self.log.debug("LDAP result limit exceeded: %d" % (resultLimit,))
             except ldap.TIMELIMIT_EXCEEDED, e:
-                self.log_warn("LDAP timeout exceeded: %d seconds" % (timeoutSeconds,))
+                self.log.warn("LDAP timeout exceeded: %d seconds" % (timeoutSeconds,))
             except ldap.SERVER_DOWN:
                 self.ldap = None
-                self.log_error("LDAP server unavailable (tried %d times)" % (i+1,))
+                self.log.error("LDAP server unavailable (tried %d times)" % (i+1,))
                 continue
 
             # change format, ignoring resultsType
@@ -550,7 +553,7 @@
             if totalTime > self.warningThresholdSeconds:
                 if filterstr and len(filterstr) > 100:
                     filterstr = "%s..." % (filterstr[:100],)
-                self.log_error("LDAP query exceeded threshold: %.2f seconds for %s %s %s (#results=%d)" %
+                self.log.error("LDAP query exceeded threshold: %.2f seconds for %s %s %s (#results=%d)" %
                     (totalTime, base, filterstr, attrlist, len(result)))
             return result
 
@@ -593,7 +596,7 @@
                 base = self.typeDNs[recordType]
                 # TODO: This shouldn't be hardcoded to cn
                 filterstr = "(cn=%s)" % (self.restrictToGroup,)
-                self.log_debug("Retrieving ldap record with base %s and filter %s." %
+                self.log.debug("Retrieving ldap record with base %s and filter %s." %
                     (ldap.dn.dn2str(base), filterstr))
                 result = self.timedSearch(ldap.dn.dn2str(base),
                     ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
@@ -625,7 +628,7 @@
 
                 self._cachedRestrictedPrincipals = set(self._expandGroupMembership(members,
                     nestedGroups))
-                self.log_info("Got %d restricted group members" % (
+                self.log.info("Got %d restricted group members" % (
                     len(self._cachedRestrictedPrincipals),))
                 self.restrictedTimestamp = time.time()
             return self._cachedRestrictedPrincipals
@@ -684,7 +687,7 @@
                 base = ldap.dn.str2dn(group)
                 filterstr = "(objectClass=*)"
 
-            self.log_debug("Retrieving ldap record with base %s and filter %s." %
+            self.log.debug("Retrieving ldap record with base %s and filter %s." %
                 (ldap.dn.dn2str(base), filterstr))
             result = self.timedSearch(ldap.dn.dn2str(base),
                 scope, filterstr=filterstr, attrlist=self.attrlist)
@@ -774,7 +777,7 @@
         if guidAttr:
             guid = self._getUniqueLdapAttribute(attrs, guidAttr)
             if not guid:
-                self.log_debug("LDAP data for %s is missing guid attribute %s" % (shortNames, guidAttr))
+                self.log.debug("LDAP data for %s is missing guid attribute %s" % (shortNames, guidAttr))
                 raise MissingGuidException()
 
         # Find or build email
@@ -829,7 +832,7 @@
                         memberGUIDs.append(dnStr)
                     except Exception, e:
                         # LDAP returned an illegal DN value, log and ignore it
-                        self.log_warn("Bad LDAP DN: %s" % (dnStr,))
+                        self.log.warn("Bad LDAP DN: %s" % (dnStr,))
 
         elif recordType in (self.recordType_resources,
             self.recordType_locations):
@@ -858,7 +861,7 @@
                         if readOnlyProxy:
                             readOnlyProxyGUIDs = (readOnlyProxy,)
                     except ValueError, e:
-                        self.log_error("Unable to parse resource info (%s)" % (e,))
+                        self.log.error("Unable to parse resource info (%s)" % (e,))
             else: # the individual resource attributes might be specified
                 if self.resourceSchema["autoScheduleAttr"]:
                     autoScheduleValue = self._getUniqueLdapAttribute(attrs,
@@ -960,7 +963,7 @@
         if queryMethod is None:
             queryMethod = self.timedSearch
 
-        self.log_debug("LDAP query for types %s, indexType %s and indexKey %s"
+        self.log.debug("LDAP query for types %s, indexType %s and indexKey %s"
             % (recordTypes, indexType, indexKey))
 
         guidAttr = self.rdnSchema["guidAttr"]
@@ -1023,7 +1026,7 @@
                 return
 
             # Query the LDAP server
-            self.log_debug("Retrieving ldap record with base %s and filter %s." %
+            self.log.debug("Retrieving ldap record with base %s and filter %s." %
                 (ldap.dn.dn2str(base), filterstr))
             result = queryMethod(ldap.dn.dn2str(base),
                 ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=self.attrlist)
@@ -1036,10 +1039,10 @@
 
                 try:
                     record = self._ldapResultToRecord(dn, attrs, recordType)
-                    self.log_debug("Got LDAP record %s" % (record,))
+                    self.log.debug("Got LDAP record %s" % (record,))
 
                     if not unrestricted:
-                        self.log_debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
+                        self.log.debug("%s is not enabled because it's not a member of group: %s" % (dn, self.restrictToGroup))
                         record.enabledForCalendaring = False
                         record.enabledForAddressBooks = False
 
@@ -1053,11 +1056,11 @@
                     break
 
                 except MissingRecordNameException:
-                    self.log_warn("Ignoring record missing record name attribute: recordType %s, indexType %s and indexKey %s"
+                    self.log.warn("Ignoring record missing record name attribute: recordType %s, indexType %s and indexKey %s"
                         % (recordTypes, indexType, indexKey))
 
                 except MissingGuidException:
-                    self.log_warn("Ignoring record missing guid attribute: recordType %s, indexType %s and indexKey %s"
+                    self.log.warn("Ignoring record missing guid attribute: recordType %s, indexType %s and indexKey %s"
                         % (recordTypes, indexType, indexKey))
 
 
@@ -1080,7 +1083,7 @@
         context is "attendee", only users, groups, and resources
         are considered.
         """
-        self.log_debug("Peforming calendar user search for %s (%s)" % (tokens, context))
+        self.log.debug("Peforming calendar user search for %s (%s)" % (tokens, context))
 
         records = []
         recordTypes = self.recordTypesForSearchContext(context)
@@ -1095,13 +1098,13 @@
 
             if filterstr is not None:
                 # Query the LDAP server
-                self.log_debug("LDAP search %s %s %s" %
+                self.log.debug("LDAP search %s %s %s" %
                     (ldap.dn.dn2str(base), scope, filterstr))
                 results = self.timedSearch(ldap.dn.dn2str(base), scope,
                     filterstr=filterstr, attrlist=self.attrlist,
                     timeoutSeconds=self.requestTimeoutSeconds,
                     resultLimit=self.requestResultsLimit)
-                self.log_debug("LDAP search returned %d results" % (len(results),))
+                self.log.debug("LDAP search returned %d results" % (len(results),))
                 numMissingGuids = 0
                 numMissingRecordNames = 0
                 for dn, attrs in results:
@@ -1130,14 +1133,14 @@
                         numMissingRecordNames += 1
 
                 if numMissingGuids:
-                    self.log_warn("%d %s records are missing %s" %
+                    self.log.warn("%d %s records are missing %s" %
                         (numMissingGuids, recordType, guidAttr))
 
                 if numMissingRecordNames:
-                    self.log_warn("%d %s records are missing record name" %
+                    self.log.warn("%d %s records are missing record name" %
                         (numMissingRecordNames, recordType))
 
-        self.log_debug("Calendar user search matched %d records" % (len(records),))
+        self.log.debug("Calendar user search matched %d records" % (len(records),))
         return succeed(records)
 
 
@@ -1148,7 +1151,7 @@
         """
         records = []
 
-        self.log_debug("Peforming principal property search for %s" % (fields,))
+        self.log.debug("Peforming principal property search for %s" % (fields,))
 
         if recordType is None:
             # Make a copy since we're modifying it
@@ -1185,13 +1188,13 @@
 
             if filterstr is not None:
                 # Query the LDAP server
-                self.log_debug("LDAP search %s %s %s" %
+                self.log.debug("LDAP search %s %s %s" %
                     (ldap.dn.dn2str(base), scope, filterstr))
                 results = self.timedSearch(ldap.dn.dn2str(base), scope,
                     filterstr=filterstr, attrlist=self.attrlist,
                     timeoutSeconds=self.requestTimeoutSeconds,
                     resultLimit=self.requestResultsLimit)
-                self.log_debug("LDAP search returned %d results" % (len(results),))
+                self.log.debug("LDAP search returned %d results" % (len(results),))
                 numMissingGuids = 0
                 numMissingRecordNames = 0
                 for dn, attrs in results:
@@ -1220,14 +1223,14 @@
                         numMissingRecordNames += 1
 
                 if numMissingGuids:
-                    self.log_warn("%d %s records are missing %s" %
+                    self.log.warn("%d %s records are missing %s" %
                         (numMissingGuids, recordType, guidAttr))
 
                 if numMissingRecordNames:
-                    self.log_warn("%d %s records are missing record name" %
+                    self.log.warn("%d %s records are missing record name" %
                         (numMissingRecordNames, recordType))
 
-        self.log_debug("Principal property search matched %d records" % (len(records),))
+        self.log.debug("Principal property search matched %d records" % (len(records),))
         return succeed(records)
 
 
@@ -1526,7 +1529,7 @@
 
                 base = self.service.base
                 filterstr = "(%s=%s)" % (memberIdAttr, ldapEsc(memberId))
-                self.log_debug("Retrieving subtree of %s with filter %s" %
+                self.log.debug("Retrieving subtree of %s with filter %s" %
                     (ldap.dn.dn2str(base), filterstr),
                     system="LdapDirectoryService")
                 result = self.service.timedSearch(ldap.dn.dn2str(base),
@@ -1535,7 +1538,7 @@
 
             else: # using DN
 
-                self.log_debug("Retrieving %s." % memberId,
+                self.log.debug("Retrieving %s." % memberId,
                     system="LdapDirectoryService")
                 result = self.service.timedSearch(memberId,
                     ldap.SCOPE_BASE, attrlist=self.service.attrlist)
@@ -1544,10 +1547,10 @@
 
                 dn, attrs = result.pop()
                 dn = normalizeDNstr(dn)
-                self.log_debug("Retrieved: %s %s" % (dn,attrs))
+                self.log.debug("Retrieved: %s %s" % (dn,attrs))
                 recordType = self.service.recordTypeForDN(dn)
                 if recordType is None:
-                    self.log_error("Unable to map %s to a record type" % (dn,))
+                    self.log.error("Unable to map %s to a record type" % (dn,))
                     continue
 
                 shortName = self.service._getUniqueLdapAttribute(attrs,
@@ -1588,7 +1591,7 @@
                     ["(%s=%s)" % (a, self._memberId) for a in membersAttrs]
                 ),
             )
-        self.log_debug("Finding groups containing %s" % (self._memberId,))
+        self.log.debug("Finding groups containing %s" % (self._memberId,))
         groups = []
 
         try:
@@ -1598,12 +1601,12 @@
             for dn, attrs in results:
                 dn = normalizeDNstr(dn)
                 shortName = self.service._getUniqueLdapAttribute(attrs, "cn")
-                self.log_debug("%s is a member of %s" % (self._memberId, shortName))
+                self.log.debug("%s is a member of %s" % (self._memberId, shortName))
                 record = self.service.recordWithShortName(recordType, shortName)
                 if record is not None:
                     groups.append(record)
         except ldap.PROTOCOL_ERROR, e:
-            self.log_warn(str(e))
+            self.log.warn(str(e))
 
         return groups
 
@@ -1643,7 +1646,7 @@
 
                 if not pamAvailable:
                     msg = "PAM module is not installed"
-                    self.log_error(msg)
+                    self.log.error(msg)
                     raise DirectoryConfigurationError(msg)
 
                 def pam_conv(auth, query_list, userData):
@@ -1672,14 +1675,14 @@
                     return True
 
                 except ldap.INVALID_CREDENTIALS:
-                    self.log_info("Invalid credentials for %s" %
+                    self.log.info("Invalid credentials for %s" %
                         (repr(self.dn),), system="LdapDirectoryService")
                     return False
 
             else:
                 msg = "Unknown Authentication Method '%s'" % (
                     self.service.authMethod.upper(),)
-                self.log_error(msg)
+                self.log.error(msg)
                 raise DirectoryConfigurationError(msg)
 
         return super(LdapDirectoryRecord, self).verifyCredentials(credentials)

Modified: CalendarServer/trunk/twistedcaldav/directory/opendirectorybacker.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/opendirectorybacker.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/opendirectorybacker.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -133,7 +133,7 @@
             try:
                 self.peopleDirectory = opendirectory.odInit(peopleNode)
             except opendirectory.ODError, e:
-                self.log_error("Open Directory (node=%s) Initialization error: %s" % (peopleNode, e))
+                self.log.error("Open Directory (node=%s) Initialization error: %s" % (peopleNode, e))
                 raise
             self.realmName = peopleNode
 
@@ -146,7 +146,7 @@
                 try:
                     self.userDirectory = opendirectory.odInit(userNode)
                 except opendirectory.ODError, e:
-                    self.log_error("Open Directory (node=%s) Initialization error: %s" % (userNode, e))
+                    self.log.error("Open Directory (node=%s) Initialization error: %s" % (userNode, e))
                     raise
                 if self.realmName:
                     self.realmName += "+" + userNode
@@ -180,7 +180,7 @@
                                                 VCardRecord.dsqueryAttributesForProperty.get("X-INTERNAL-REQUIRED")
                                                 )))
             if (self.allowedDSQueryAttributes != VCardRecord.allDSQueryAttributes):
-                self.log_info("Allowed DS query attributes = %r" % (self.allowedDSQueryAttributes,))
+                self.log.info("Allowed DS query attributes = %r" % (self.allowedDSQueryAttributes,))
         else:
             self.allowedDSQueryAttributes = VCardRecord.allDSQueryAttributes
 
@@ -238,7 +238,7 @@
             returnValue(False)
         elif not self._created:
             createdLock = MemcacheLock("OpenDirectoryBacker", self._createdLockPath)
-            self.log_debug("blocking on lock of: \"%s\")" % self._createdLockPath)
+            self.log.debug("blocking on lock of: \"%s\")" % self._createdLockPath)
             self._created = (yield createdLock.locked())
 
         returnValue(self._created)
@@ -255,11 +255,11 @@
         """
 
         if not self.liveQuery:
-            self.log_info("loading directory address book")
+            self.log.info("loading directory address book")
 
             # get init lock
             initLock = MemcacheLock("OpenDirectoryBacker", self._initLockPath, timeout=0)
-            self.log_debug("Attempt lock of: \"%s\")" % self._initLockPath)
+            self.log.debug("Attempt lock of: \"%s\")" % self._initLockPath)
             gotCreateLock = False
             try:
                 yield initLock.acquire()
@@ -270,10 +270,10 @@
             self._triedCreateLock = True
 
             if gotCreateLock:
-                self.log_debug("Got lock!")
+                self.log.debug("Got lock!")
                 yield self._refreshCache(flushCache=False, creating=True)
             else:
-                self.log_debug("Could not get lock - directory address book will be filled by peer")
+                self.log.debug("Could not get lock - directory address book will be filled by peer")
 
 
     @inlineCallbacks
@@ -292,7 +292,7 @@
             cacheTimeout = (cacheTimeout * random()) - (cacheTimeout / 2)
             cacheTimeout += self.cacheTimeout * 60
             reactor.callLater(cacheTimeout, self._refreshCache) #@UndefinedVariable
-            self.log_info("Refresh directory address book in %d minutes %d seconds" % divmod(cacheTimeout, 60))
+            self.log.info("Refresh directory address book in %d minutes %d seconds" % divmod(cacheTimeout, 60))
 
         def cleanupLater():
 
@@ -309,7 +309,7 @@
             nom = 120
             later = nom * (random() + .5)
             self._lastCleanupCall = reactor.callLater(later, removeTmpAddressBooks) #@UndefinedVariable
-            self.log_info("Remove temporary directory address books in %d minutes %d seconds" % divmod(later, 60))
+            self.log.info("Remove temporary directory address books in %d minutes %d seconds" % divmod(later, 60))
 
 
         def getTmpDirAndTmpFilePrefixSuffix():
@@ -333,11 +333,11 @@
 
         @inlineCallbacks
         def removeTmpAddressBooks():
-            self.log_info("Checking for temporary directory address books")
+            self.log.info("Checking for temporary directory address books")
             tmpDir, prefix, suffix = getTmpDirAndTmpFilePrefixSuffix()
 
             tmpDirLock = self._tmpDirAddressBookLock
-            self.log_debug("blocking on lock of: \"%s\")" % self._tmpDirAddressBookLockPath)
+            self.log.debug("blocking on lock of: \"%s\")" % self._tmpDirAddressBookLockPath)
             yield tmpDirLock.acquire()
 
             try:
@@ -345,13 +345,13 @@
                     if name.startswith(prefix) and name.endswith(suffix):
                         try:
                             path = join(tmpDir, name)
-                            self.log_info("Deleting temporary directory address book at: %s" % path)
+                            self.log.info("Deleting temporary directory address book at: %s" % path)
                             FilePath(path).remove()
-                            self.log_debug("Done deleting")
+                            self.log.debug("Done deleting")
                         except:
-                            self.log_info("Deletion failed")
+                            self.log.info("Deletion failed")
             finally:
-                self.log_debug("unlocking: \"%s\")" % self._tmpDirAddressBookLockPath)
+                self.log.debug("unlocking: \"%s\")" % self._tmpDirAddressBookLockPath)
                 yield tmpDirLock.release()
 
             self._cleanupTime = time.time()
@@ -374,7 +374,7 @@
                 # get the old hash
                 oldAddressBookCTag = ""
                 updateLock = self.updateLock()
-                self.log_debug("blocking on lock of: \"%s\")" % self._updateLockPath)
+                self.log.debug("blocking on lock of: \"%s\")" % self._updateLockPath)
                 yield updateLock.acquire()
 
                 if not flushCache:
@@ -384,15 +384,15 @@
                     except:
                         oldAddressBookCTag = ""
 
-                self.log_debug("Comparing {http://calendarserver.org/ns/}getctag: new = %s, old = %s" % (newAddressBookCTag, oldAddressBookCTag))
+                self.log.debug("Comparing {http://calendarserver.org/ns/}getctag: new = %s, old = %s" % (newAddressBookCTag, oldAddressBookCTag))
                 if str(newAddressBookCTag) != str(oldAddressBookCTag):
 
-                    self.log_debug("unlocking: \"%s\")" % self._updateLockPath)
+                    self.log.debug("unlocking: \"%s\")" % self._updateLockPath)
                     yield updateLock.release()
                     updateLock = None
 
                 if not keepLock:
-                    self.log_debug("unlocking: \"%s\")" % self._updateLockPath)
+                    self.log.debug("unlocking: \"%s\")" % self._updateLockPath)
                     yield updateLock.release()
                     updateLock = None
 
@@ -404,7 +404,7 @@
 
             if creating:
                 createdLock = MemcacheLock("OpenDirectoryBacker", self._createdLockPath)
-                self.log_debug("blocking on lock of: \"%s\")" % self._createdLockPath)
+                self.log.debug("blocking on lock of: \"%s\")" % self._createdLockPath)
                 yield createdLock.acquire()
 
             cleanupLater()
@@ -428,7 +428,7 @@
             recordTypes = [dsattributes.kDSStdRecordTypePeople, dsattributes.kDSStdRecordTypeUsers, ]
             try:
                 localNodeDirectory = opendirectory.odInit("/Local/Default")
-                self.log_debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
+                self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r)" % (
                         "/DSLocal",
                         recordTypes,
                         self.returnedAttributes,
@@ -439,7 +439,7 @@
                         self.returnedAttributes,
                     ))
             except opendirectory.ODError, ex:
-                self.log_error("Open Directory (node=%s) error: %s" % ("/Local/Default", str(ex)))
+                self.log.error("Open Directory (node=%s) error: %s" % ("/Local/Default", str(ex)))
                 raise
 
             self._dsLocalRecords = []
@@ -450,19 +450,19 @@
                 if self.ignoreSystemRecords:
                     # remove system users and people
                     if record.guid.startswith("FFFFEEEE-DDDD-CCCC-BBBB-AAAA"):
-                        self.log_info("Ignoring vcard for system record %s" % (record,))
+                        self.log.info("Ignoring vcard for system record %s" % (record,))
                         continue
 
                 if record.guid in records:
-                    self.log_info("Record skipped due to conflict (duplicate uuid): %s" % (record,))
+                    self.log.info("Record skipped due to conflict (duplicate uuid): %s" % (record,))
                 else:
                     try:
                         vCardText = record.vCardText()
                     except:
                         traceback.print_exc()
-                        self.log_info("Could not get vcard for record %s" % (record,))
+                        self.log.info("Could not get vcard for record %s" % (record,))
                     else:
-                        self.log_debug("VCard text =\n%s" % (vCardText,))
+                        self.log.debug("VCard text =\n%s" % (vCardText,))
                         records[record.guid] = record
 
             return records
@@ -488,12 +488,12 @@
         queryResults = (yield self._queryDirectory(query, attributes, maxRecords))
         if maxRecords and len(queryResults) >= maxRecords:
             limited = True
-            self.log_debug("Directory address book record limit (= %d) reached." % (maxRecords,))
+            self.log.debug("Directory address book record limit (= %d) reached." % (maxRecords,))
 
-        self.log_debug("Query done. Inspecting %s results" % len(queryResults))
+        self.log.debug("Query done. Inspecting %s results" % len(queryResults))
 
         records = self._getDSLocalRecords().copy()
-        self.log_debug("Adding %s DSLocal results" % len(records.keys()))
+        self.log.debug("Adding %s DSLocal results" % len(records.keys()))
 
         for (recordShortName, value) in queryResults: #@UnusedVariable
 
@@ -502,15 +502,15 @@
             if self.ignoreSystemRecords:
                 # remove system users and people
                 if record.guid.startswith("FFFFEEEE-DDDD-CCCC-BBBB-AAAA"):
-                    self.log_info("Ignoring vcard for system record %s" % (record,))
+                    self.log.info("Ignoring vcard for system record %s" % (record,))
                     continue
 
             if record.guid in records:
-                self.log_info("Ignoring vcard for record due to conflict (duplicate uuid): %s" % (record,))
+                self.log.info("Ignoring vcard for record due to conflict (duplicate uuid): %s" % (record,))
             else:
                 records[record.guid] = record
 
-        self.log_debug("After filtering, %s records (limited=%s)." % (len(records), limited))
+        self.log.debug("After filtering, %s records (limited=%s)." % (len(records), limited))
         returnValue((records, limited,))
 
 
@@ -538,7 +538,7 @@
             try:
                 if query:
                     if isinstance(query, dsquery.match) and query.value is not "":
-                        self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r,%r)" % (
+                        self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r,%r)" % (
                             node,
                             query.attribute,
                             query.value,
@@ -560,7 +560,7 @@
                                 maxRecords,
                             ))
                     else:
-                        self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r)" % (
+                        self.log.debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r)" % (
                             node,
                             query.generate(),
                             False,
@@ -578,7 +578,7 @@
                                 maxRecords,
                             ))
                 else:
-                    self.log_debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r,%r)" % (
+                    self.log.debug("opendirectory.listAllRecordsWithAttributes_list(%r,%r,%r,%r)" % (
                         node,
                         recordType,
                         attributes,
@@ -592,7 +592,7 @@
                             maxRecords,
                         ))
             except opendirectory.ODError, ex:
-                self.log_error("Open Directory (node=%s) error: %s" % (self.realmName, str(ex)))
+                self.log.error("Open Directory (node=%s) error: %s" % (self.realmName, str(ex)))
                 raise
 
             allResults.extend(results)
@@ -603,7 +603,7 @@
                     break
 
         elaspedTime = time.time() - startTime
-        self.log_info("Timing: Directory query: %.1f ms (%d records, %.2f records/sec)" % (elaspedTime * 1000, len(allResults), len(allResults) / elaspedTime))
+        self.log.info("Timing: Directory query: %.1f ms (%d records, %.2f records/sec)" % (elaspedTime * 1000, len(allResults), len(allResults) / elaspedTime))
         return succeed(allResults)
 
 
@@ -772,7 +772,7 @@
                                     try:
                                         recordNameQualifier = matchString[recordNameStart:].decode("base64").decode("utf8")
                                     except Exception, e:
-                                        self.log_debug("Could not decode UID string %r in %r: %r" % (matchString[recordNameStart:], matchString, e,))
+                                        self.log.debug("Could not decode UID string %r in %r: %r" % (matchString[recordNameStart:], matchString, e,))
                                     else:
                                         if textMatchElement.negate:
                                             return (False, queryAttributes,
@@ -977,7 +977,7 @@
         updateLock, limited = (yield self._refreshCache(reschedule=False, query=dsFilter, attributes=attributes, keepLock=True, clear=clear, maxRecords=maxRecords))
 
         elaspedTime = time.time() - startTime
-        self.log_info("Timing: Cache fill: %.1f ms" % (elaspedTime * 1000,))
+        self.log.info("Timing: Cache fill: %.1f ms" % (elaspedTime * 1000,))
 
         returnValue((updateLock, limited))
 
@@ -1018,10 +1018,10 @@
                     vCardText = record.vCardText()
                 except:
                     traceback.print_exc()
-                    self.log_info("Could not get vcard for record %s" % (record,))
+                    self.log.info("Could not get vcard for record %s" % (record,))
                 else:
                     if not record.firstValueForAttribute(dsattributes.kDSNAttrMetaNodeLocation).startswith("/Local"):
-                        self.log_debug("VCard text =\n%s" % (vCardText,))
+                        self.log.debug("VCard text =\n%s" % (vCardText,))
                     queryRecords.append(record)
 
         returnValue((queryRecords, limited,))
@@ -1200,7 +1200,7 @@
 
     def __init__(self, service, recordAttributes, defaultNodeName=None):
 
-        self.log_debug("service=%s, attributes=%s" % (service, recordAttributes))
+        self.log.debug("service=%s, attributes=%s" % (service, recordAttributes))
 
         #save off for debugging
         if service.addDSAttrXProperties:
@@ -1370,7 +1370,7 @@
                     vcard.addProperty(newProperty)
                 else:
                     if attrType and attrValue:
-                        self.log_info("Ignoring attribute %r with value %r in creating property %r. A duplicate property already exists." % (attrType, attrValue, newProperty,))
+                        self.log.info("Ignoring attribute %r with value %r in creating property %r. A duplicate property already exists." % (attrType, attrValue, newProperty,))
 
             def addPropertyAndLabel(groupCount, label, propertyName, propertyValue, parameters=None):
                 groupCount[0] += 1
@@ -1418,8 +1418,8 @@
 
                     except Exception, e:
                         traceback.print_exc()
-                        self.log_debug("addPropertiesAndLabelsForPrefixedAttribute(): groupCount=%r, propertyPrefix=%r, propertyName=%r, nolabelParamTypes=%r, labelMap=%r, attrType=%r" % (groupCount[0], propertyPrefix, propertyName, nolabelParamTypes, labelMap, attrType,))
-                        self.log_error("addPropertiesAndLabelsForPrefixedAttribute(): Trouble parsing attribute %s, with value \"%s\".  Error = %s" % (attrType, attrValue, e,))
+                        self.log.debug("addPropertiesAndLabelsForPrefixedAttribute(): groupCount=%r, propertyPrefix=%r, propertyName=%r, nolabelParamTypes=%r, labelMap=%r, attrType=%r" % (groupCount[0], propertyPrefix, propertyName, nolabelParamTypes, labelMap, attrType,))
+                        self.log.error("addPropertiesAndLabelsForPrefixedAttribute(): Trouble parsing attribute %s, with value \"%s\".  Error = %s" % (attrType, attrValue, e,))
 
             #print("VCardRecord.vCard")
             # create vCard
@@ -1640,7 +1640,7 @@
                 if (len(parts) == 2):
                     vcard.addProperty(Property("GEO", parts))
                 else:
-                    self.log_info("Ignoring malformed attribute %r with value %r. Well-formed example: 7.7,10.6." % (dsattributes.kDSNAttrMapCoordinates, coordinate))
+                    self.log.info("Ignoring malformed attribute %r with value %r. Well-formed example: 7.7,10.6." % (dsattributes.kDSNAttrMapCoordinates, coordinate))
             #
             # 3.5 ORGANIZATIONAL TYPES http://tools.ietf.org/html/rfc2426#section-3.5
             #

Modified: CalendarServer/trunk/twistedcaldav/directory/resourceinfo.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/resourceinfo.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/resourceinfo.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -26,12 +26,12 @@
 
 from twisted.internet.defer import inlineCallbacks, returnValue
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from twistedcaldav.memcacher import Memcacher
 from twistedcaldav.sql import AbstractSQLDatabase, db_prefix
 
-class ResourceInfoDatabase(AbstractSQLDatabase, LoggingMixIn):
+class ResourceInfoDatabase(AbstractSQLDatabase):
     """
     A database to maintain resource (and location) information
 
@@ -40,8 +40,8 @@
     Group Database:
 
     ROW: GUID, AUTOSCHEDULE
-
     """
+    log = Logger()
 
     dbType = "RESOURCEINFO"
     dbFilename = "resourceinfo.sqlite"

Modified: CalendarServer/trunk/twistedcaldav/directory/xmlfile.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/xmlfile.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/directory/xmlfile.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -104,14 +104,14 @@
             try:
                 uid = pwd.getpwnam(config.UserName).pw_uid
             except KeyError:
-                self.log_error("User not found: %s" % (config.UserName,))
+                self.log.error("User not found: %s" % (config.UserName,))
 
         gid = -1
         if config.GroupName:
             try:
                 gid = grp.getgrnam(config.GroupName).gr_gid
             except KeyError:
-                self.log_error("Group not found: %s" % (config.GroupName,))
+                self.log.error("Group not found: %s" % (config.GroupName,))
 
         if uid != -1 and gid != -1:
             os.chown(xmlFile.path, uid, gid)
@@ -423,14 +423,14 @@
             try:
                 uid = pwd.getpwnam(config.UserName).pw_uid
             except KeyError:
-                self.log_error("User not found: %s" % (config.UserName,))
+                self.log.error("User not found: %s" % (config.UserName,))
 
         gid = -1
         if config.GroupName:
             try:
                 gid = grp.getgrnam(config.GroupName).gr_gid
             except KeyError:
-                self.log_error("Group not found: %s" % (config.GroupName,))
+                self.log.error("Group not found: %s" % (config.GroupName,))
 
         if uid != -1 and gid != -1:
             os.chown(self.xmlFile.path, uid, gid)

Modified: CalendarServer/trunk/twistedcaldav/extensions.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/extensions.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/extensions.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -58,7 +58,7 @@
 from twisted.internet.defer import gatherResults
 from twext.web2.dav.method import prop_common
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from twistedcaldav import customxml
 from twistedcaldav.customxml import calendarserver_namespace
@@ -534,7 +534,7 @@
 
 
 class DAVResource (DirectoryPrincipalPropertySearchMixIn,
-                   SuperDAVResource, LoggingMixIn,
+                   SuperDAVResource,
                    DirectoryRenderingMixIn, StaticRenderMixin):
     """
     Extended L{twext.web2.dav.resource.DAVResource} implementation.
@@ -542,6 +542,7 @@
     Note we add StaticRenderMixin as a base class because we need all the etag etc behavior
     that is currently in static.py but is actually applicable to any type of resource.
     """
+    log = Logger()
 
     http_REPORT = http_REPORT
 
@@ -662,11 +663,12 @@
 
 
 class DAVPrincipalResource (DirectoryPrincipalPropertySearchMixIn,
-                            SuperDAVPrincipalResource, LoggingMixIn,
+                            SuperDAVPrincipalResource,
                             DirectoryRenderingMixIn):
     """
     Extended L{twext.web2.dav.static.DAVFile} implementation.
     """
+    log = Logger()
 
     def liveProperties(self):
         return super(DAVPrincipalResource, self).liveProperties() + (
@@ -753,10 +755,11 @@
 
 
 
-class DAVFile (SuperDAVFile, LoggingMixIn, DirectoryRenderingMixIn):
+class DAVFile (SuperDAVFile, DirectoryRenderingMixIn):
     """
     Extended L{twext.web2.dav.static.DAVFile} implementation.
     """
+    log = Logger()
 
     def resourceType(self):
         # Allow live property to be overridden by dead property
@@ -856,18 +859,20 @@
 
 
 
-class CachingPropertyStore (LoggingMixIn):
+class CachingPropertyStore (object):
     """
     DAV property store using a dict in memory on top of another
     property store implementation.
     """
+    log = Logger()
+
     def __init__(self, propertyStore):
         self.propertyStore = propertyStore
         self.resource = propertyStore.resource
 
 
     def get(self, qname, uid=None):
-        #self.log_debug("Get: %r, %r" % (self.resource.fp.path, qname))
+        #self.log.debug("Get: %r, %r" % (self.resource.fp.path, qname))
 
         cache = self._cache()
 
@@ -876,7 +881,7 @@
         if cachedQname in cache:
             property = cache.get(cachedQname, None)
             if property is None:
-                self.log_debug("Cache miss: %r, %r, %r" % (self, self.resource.fp.path, qname))
+                self.log.debug("Cache miss: %r, %r, %r" % (self, self.resource.fp.path, qname))
                 try:
                     property = self.propertyStore.get(qname, uid)
                 except HTTPError:
@@ -890,7 +895,7 @@
 
 
     def set(self, property, uid=None):
-        #self.log_debug("Set: %r, %r" % (self.resource.fp.path, property))
+        #self.log.debug("Set: %r, %r" % (self.resource.fp.path, property))
 
         cache = self._cache()
 
@@ -902,7 +907,7 @@
 
 
     def contains(self, qname, uid=None):
-        #self.log_debug("Contains: %r, %r" % (self.resource.fp.path, qname))
+        #self.log.debug("Contains: %r, %r" % (self.resource.fp.path, qname))
 
         cachedQname = qname + (uid,)
 
@@ -915,14 +920,14 @@
                 raise
 
         if cachedQname in cache:
-            #self.log_debug("Contains cache hit: %r, %r, %r" % (self, self.resource.fp.path, qname))
+            #self.log.debug("Contains cache hit: %r, %r, %r" % (self, self.resource.fp.path, qname))
             return True
         else:
             return False
 
 
     def delete(self, qname, uid=None):
-        #self.log_debug("Delete: %r, %r" % (self.resource.fp.path, qname))
+        #self.log.debug("Delete: %r, %r" % (self.resource.fp.path, qname))
 
         cachedQname = qname + (uid,)
 
@@ -933,7 +938,7 @@
 
 
     def list(self, uid=None, filterByUID=True):
-        #self.log_debug("List: %r" % (self.resource.fp.path,))
+        #self.log.debug("List: %r" % (self.resource.fp.path,))
         keys = self._cache().iterkeys()
         if filterByUID:
             return [
@@ -947,7 +952,7 @@
 
     def _cache(self):
         if not hasattr(self, "_data"):
-            #self.log_debug("Cache init: %r" % (self.resource.fp.path,))
+            #self.log.debug("Cache init: %r" % (self.resource.fp.path,))
             self._data = dict(
                 (name, None)
                 for name in self.propertyStore.list(filterByUID=False)

Modified: CalendarServer/trunk/twistedcaldav/linkresource.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/linkresource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/linkresource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -15,7 +15,7 @@
 ##
 
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.web2 import responsecode, server, http
 from txdav.xml import element as davxml
 from twext.web2.http import HTTPError, StatusResponse
@@ -41,11 +41,12 @@
 A resource that is a soft-link to another.
 """
 
-class LinkResource(CalDAVComplianceMixIn, WrapperResource, LoggingMixIn):
+class LinkResource(CalDAVComplianceMixIn, WrapperResource):
     """
     This is similar to a WrapperResource except that we locate our resource dynamically. We need to deal with the
     case of a missing underlying resource (broken link) as indicated by self._linkedResource being None.
     """
+    log = Logger()
     
     def __init__(self, parent, link_url):
         self.parent = parent

Modified: CalendarServer/trunk/twistedcaldav/memcachelock.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcachelock.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/memcachelock.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -66,19 +66,19 @@
             if result:
                 self._hasLock = True
                 if waiting:
-                    self.log_debug("Got lock after waiting on %s" % (self._locktoken,))
+                    self.log.debug("Got lock after waiting on %s" % (self._locktoken,))
                 break
             
             if self._timeout and time.time() < timeout_at:
                 waiting = True
-                self.log_debug("Waiting for lock on %s" % (self._locktoken,))
+                self.log.debug("Waiting for lock on %s" % (self._locktoken,))
                 pause = Deferred()
                 def _timedDeferred():
                     pause.callback(True)
                 reactor.callLater(self._retry_interval, _timedDeferred)
                 yield pause
             else:
-                self.log_debug("Timed out lock after waiting on %s" % (self._locktoken,))
+                self.log.debug("Timed out lock after waiting on %s" % (self._locktoken,))
                 raise MemcacheLockTimeoutError()
         
         returnValue(True)

Modified: CalendarServer/trunk/twistedcaldav/memcachepool.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcachepool.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/memcachepool.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -22,7 +22,7 @@
 from twext.internet.gaiendpoint import GAIEndpoint
 from twext.internet.adaptendpoint import connect
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.protocols.memcache import MemCacheProtocol, NoSuchCommand
 
 class PooledMemCacheProtocol(MemCacheProtocol):
@@ -46,7 +46,7 @@
 
 
 
-class MemCacheClientFactory(ReconnectingClientFactory, LoggingMixIn):
+class MemCacheClientFactory(ReconnectingClientFactory):
     """
     A client factory for MemCache that reconnects and notifies a pool of it's
     state.
@@ -56,6 +56,8 @@
     @ivar _protocolInstance: The current instance of our protocol that we pass
         to our connectionPool.
     """
+    log = Logger()
+
     protocol = PooledMemCacheProtocol
     connectionPool = None
     _protocolInstance = None
@@ -74,7 +76,7 @@
             # The reactor is stopping; don't reconnect
             return
 
-        self.log_error("MemCache connection lost: %s" % (reason,))
+        self.log.error("MemCache connection lost: %s" % (reason,))
         if self._protocolInstance is not None:
             self.connectionPool.clientBusy(self._protocolInstance)
 
@@ -88,7 +90,7 @@
         """
         Notify the connectionPool that we're unable to connect
         """
-        self.log_error("MemCache connection failed: %s" % (reason,))
+        self.log.error("MemCache connection failed: %s" % (reason,))
         if self._protocolInstance is not None:
             self.connectionPool.clientBusy(self._protocolInstance)
 
@@ -111,7 +113,7 @@
 
 
 
-class MemCachePool(LoggingMixIn):
+class MemCachePool(object):
     """
     A connection pool for MemCacheProtocol instances.
 
@@ -133,6 +135,8 @@
     @ivar _pendingConnects: A C{int} indicating how many connections are in
         progress.
     """
+    log = Logger()
+
     clientFactory = MemCacheClientFactory
 
     REQUEST_LOGGING_SIZE = 1024
@@ -183,7 +187,7 @@
 
         @return: A L{Deferred} that fires with the L{IProtocol} instance.
         """
-        self.log_debug("Initating new client connection to: %r" % (
+        self.log.debug("Initating new client connection to: %r" % (
                 self._endpoint,))
         self._logClientStats()
 
@@ -231,7 +235,7 @@
             Upon memcache error, log the failed request along with the error
             message and free the client.
             """
-            self.log_error("Memcache error: %s; request: %s %s" %
+            self.log.error("Memcache error: %s; request: %s %s" %
                 (failure.value, command,
                 " ".join(args)[:self.REQUEST_LOGGING_SIZE],))
             self.clientFree(client)
@@ -271,7 +275,7 @@
         elif len(self._busyClients) + self._pendingConnects >= self._maxClients:
             d = Deferred()
             self._commands.append((d, command, args, kwargs))
-            self.log_debug("Command queued: %s, %r, %r" % (
+            self.log.debug("Command queued: %s, %r, %r" % (
                     command, args, kwargs))
             self._logClientStats()
 
@@ -284,7 +288,7 @@
 
 
     def _logClientStats(self):
-        self.log_debug("Clients #free: %d, #busy: %d, "
+        self.log.debug("Clients #free: %d, #busy: %d, "
                        "#pending: %d, #queued: %d" % (
                 len(self._freeClients),
                 len(self._busyClients),
@@ -304,7 +308,7 @@
         elif client in self._freeClients:
             self._freeClients.remove(client)
 
-        self.log_debug("Removed client: %r" % (client,))
+        self.log.debug("Removed client: %r" % (client,))
         self._logClientStats()
 
 
@@ -320,7 +324,7 @@
 
         self._busyClients.add(client)
 
-        self.log_debug("Busied client: %r" % (client,))
+        self.log.debug("Busied client: %r" % (client,))
         self._logClientStats()
 
 
@@ -341,7 +345,7 @@
         if len(self._commands) > 0:
             d, command, args, kwargs = self._commands.pop(0)
 
-            self.log_debug("Performing Queued Command: %s, %r, %r" % (
+            self.log.debug("Performing Queued Command: %s, %r, %r" % (
                     command, args, kwargs))
             self._logClientStats()
 
@@ -350,7 +354,7 @@
 
             _ign_d.addCallback(d.callback)
 
-        self.log_debug("Freed client: %r" % (client,))
+        self.log.debug("Freed client: %r" % (client,))
         self._logClientStats()
 
 

Modified: CalendarServer/trunk/twistedcaldav/memcacheprops.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcacheprops.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/memcacheprops.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -32,7 +32,7 @@
 except ImportError:
     from md5 import new as md5
 
-from twext.python.log import LoggingMixIn, Logger
+from twext.python.log import Logger
 from twext.python.memcacheclient import ClientFactory
 from twext.python.memcacheclient import MemcacheError, TokenMismatchError
 from twext.python.filepath import CachingFilePath as FilePath
@@ -42,15 +42,18 @@
 
 from twistedcaldav.config import config
 
-log = Logger()
 
+
 NoValue = ""
 
 
-class MemcachePropertyCollection (LoggingMixIn):
+
+class MemcachePropertyCollection (object):
     """
     Manages a single property store for all resources in a collection.
     """
+    log = Logger()
+
     def __init__(self, collection, cacheTimeout=0):
         self.collection = collection
         self.cacheTimeout = cacheTimeout
@@ -60,7 +63,7 @@
     def memcacheClient(cls, refresh=False):
         if not hasattr(MemcachePropertyCollection, "_memcacheClient"):
 
-            log.info("Instantiating memcache connection for MemcachePropertyCollection")
+            cls.log.info("Instantiating memcache connection for MemcachePropertyCollection")
 
             MemcachePropertyCollection._memcacheClient = ClientFactory.getClient([
                     "%s:%s" % (config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port)
@@ -99,11 +102,11 @@
         try:
             childCache, token = propertyCache[key]
         except KeyError:
-            self.log_debug("No child property cache for %s" % (child,))
+            self.log.debug("No child property cache for %s" % (child,))
             childCache, token = ({}, None)
 
             #message = "No child property cache for %s" % (child,)
-            #log.error(message)
+            #self.log.error(message)
             #raise AssertionError(message)
 
         return propertyCache, key, childCache, token
@@ -127,7 +130,7 @@
             else:
                 return {}
 
-        self.log_debug("Loading cache for %s" % (self.collection,))
+        self.log.debug("Loading cache for %s" % (self.collection,))
 
         client = self.memcacheClient()
         assert client is not None, "OMG no cache!"
@@ -142,12 +145,12 @@
         result = self._split_gets_multi((key for key, _ignore_name in keys),
             client.gets_multi)
 
-        if self.logger.willLogAtLevel("debug"):
+        if self.log.willLogAtLevel("debug"):
             if abortIfMissing:
                 missing = "missing "
             else:
                 missing = ""
-            self.log_debug("Loaded keys for %schildren of %s: %s" % (
+            self.log.debug("Loaded keys for %schildren of %s: %s" % (
                 missing,
                 self.collection,
                 [name for _ignore_key, name in keys],
@@ -214,7 +217,7 @@
 
 
     def _storeCache(self, cache):
-        self.log_debug("Storing cache for %s" % (self.collection,))
+        self.log.debug("Storing cache for %s" % (self.collection,))
 
         values = dict((
             (self._keyForPath(path), props)
@@ -234,7 +237,7 @@
         elif not childNames:
             return {}
 
-        self.log_debug("Building cache for %s" % (self.collection,))
+        self.log.debug("Building cache for %s" % (self.collection,))
 
         cache = {}
 
@@ -282,7 +285,7 @@
                 except TokenMismatchError:
                     # The value in memcache has changed since we last
                     # fetched it
-                    log.debug("memcacheprops setProperty TokenMismatchError; retrying...")
+                    self.log.debug("memcacheprops setProperty TokenMismatchError; retrying...")
 
                 finally:
                     # Re-fetch the properties for this child
@@ -300,7 +303,7 @@
                     childCache[qnameuid] = property
 
             else:
-                log.error("memcacheprops setProperty had too many failures")
+                self.log.error("memcacheprops setProperty had too many failures")
                 delattr(self, "_propertyCache")
                 raise MemcacheError("Unable to %s property %s%s on %s" % (
                     "delete" if delete else "set",
@@ -333,7 +336,9 @@
         return self.ChildPropertyStore(self, child, childPropertyStore)
 
 
-    class ChildPropertyStore (LoggingMixIn):
+    class ChildPropertyStore (object):
+        log = Logger()
+
         def __init__(self, parentPropertyCollection, child, childPropertyStore):
             self.parentPropertyCollection = parentPropertyCollection
             self.child = child
@@ -360,7 +365,7 @@
                         "No such property: %s%s" % (uid if uid else "", encodeXMLName(*qname))
                     ))
 
-            self.log_debug("Read for %s%s on %s" % (
+            self.log.debug("Read for %s%s on %s" % (
                 ("{%s}:" % (uid,)) if uid else "",
                 qname,
                 self.childPropertyStore.resource.fp.path
@@ -368,7 +373,7 @@
             return self.childPropertyStore.get(qname, uid=uid)
 
         def set(self, property, uid=None):
-            self.log_debug("Write for %s%s on %s" % (
+            self.log.debug("Write for %s%s on %s" % (
                 ("{%s}:" % (uid,)) if uid else "",
                 property.qname(),
                 self.childPropertyStore.resource.fp.path
@@ -378,7 +383,7 @@
             self.childPropertyStore.set(property, uid=uid)
 
         def delete(self, qname, uid=None):
-            self.log_debug("Delete for %s%s on %s" % (
+            self.log.debug("Delete for %s%s on %s" % (
                 ("{%s}:" % (uid,)) if uid else "",
                 qname,
                 self.childPropertyStore.resource.fp.path
@@ -393,7 +398,7 @@
                 qnameuid = qname + (uid,)
                 return qnameuid in propertyCache
 
-            self.log_debug("Contains for %s%s on %s" % (
+            self.log.debug("Contains for %s%s on %s" % (
                 ("{%s}:" % (uid,)) if uid else "",
                 qname,
                 self.childPropertyStore.resource.fp.path,
@@ -413,6 +418,6 @@
                 else:
                     return results
 
-            self.log_debug("List for %s"
+            self.log.debug("List for %s"
                            % (self.childPropertyStore.resource.fp.path,))
             return self.childPropertyStore.list(uid=uid, filterByUID=filterByUID)

Modified: CalendarServer/trunk/twistedcaldav/memcacher.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcacher.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/memcacher.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -20,12 +20,13 @@
 
 from twisted.internet.defer import succeed
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from twistedcaldav.memcachepool import CachePoolUserMixIn
 from twistedcaldav.config import config
 
-class Memcacher(LoggingMixIn, CachePoolUserMixIn):
+class Memcacher(CachePoolUserMixIn):
+    log = Logger()
 
     MEMCACHE_KEY_LIMIT   = 250      # the memcached key length limit
     NAMESPACE_MAX_LENGTH = 32       # max size of namespace we will allow
@@ -238,7 +239,7 @@
         my_value = value
         if self._pickle:
             my_value = cPickle.dumps(value)
-        self.log_debug("Adding Cache Token for %r" % (key,))
+        self.log.debug("Adding Cache Token for %r" % (key,))
         return proto.add('%s:%s' % (self._namespace, self._normalizeKey(key)), my_value, expireTime=expireTime)
 
     def set(self, key, value, expireTime=0):
@@ -248,7 +249,7 @@
         my_value = value
         if self._pickle:
             my_value = cPickle.dumps(value)
-        self.log_debug("Setting Cache Token for %r" % (key,))
+        self.log.debug("Setting Cache Token for %r" % (key,))
         return proto.set('%s:%s' % (self._namespace, self._normalizeKey(key)), my_value, expireTime=expireTime)
 
     def checkAndSet(self, key, value, cas, flags=0, expireTime=0):
@@ -258,7 +259,7 @@
         my_value = value
         if self._pickle:
             my_value = cPickle.dumps(value)
-        self.log_debug("Setting Cache Token for %r" % (key,))
+        self.log.debug("Setting Cache Token for %r" % (key,))
         return proto.checkAndSet('%s:%s' % (self._namespace, self._normalizeKey(key)), my_value, cas, expireTime=expireTime)
 
     def get(self, key, withIdentifier=False):
@@ -273,23 +274,23 @@
                 value = (identifier, value)
             return value
 
-        self.log_debug("Getting Cache Token for %r" % (key,))
+        self.log.debug("Getting Cache Token for %r" % (key,))
         d = self._getMemcacheProtocol().get('%s:%s' % (self._namespace, self._normalizeKey(key)), withIdentifier=withIdentifier)
         d.addCallback(_gotit, withIdentifier)
         return d
 
     def delete(self, key):
-        self.log_debug("Deleting Cache Token for %r" % (key,))
+        self.log.debug("Deleting Cache Token for %r" % (key,))
         return self._getMemcacheProtocol().delete('%s:%s' % (self._namespace, self._normalizeKey(key)))
 
     def incr(self, key, delta=1):
-        self.log_debug("Incrementing Cache Token for %r" % (key,))
+        self.log.debug("Incrementing Cache Token for %r" % (key,))
         return self._getMemcacheProtocol().incr('%s:%s' % (self._namespace, self._normalizeKey(key)), delta)
 
     def decr(self, key, delta=1):
-        self.log_debug("Decrementing Cache Token for %r" % (key,))
+        self.log.debug("Decrementing Cache Token for %r" % (key,))
         return self._getMemcacheProtocol().incr('%s:%s' % (self._namespace, self._normalizeKey(key)), delta)
 
     def flushAll(self):
-        self.log_debug("Flushing All Cache Tokens")
+        self.log.debug("Flushing All Cache Tokens")
         return self._getMemcacheProtocol().flushAll()

Modified: CalendarServer/trunk/twistedcaldav/method/report_addressbook_query.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/method/report_addressbook_query.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/method/report_addressbook_query.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -297,7 +297,7 @@
         depth = request.headers.getHeader("depth", "0")
         yield report_common.applyToAddressBookCollections(self, request, request.uri, depth, doQuery, (davxml.Read(),))
     except NumberOfMatchesWithinLimits, e:
-        self.log_info("Too many matching components in addressbook-query report. Limited to %d items" % e.maxLimit())
+        self.log.info("Too many matching components in addressbook-query report. Limited to %d items" % e.maxLimit())
         responses.append(davxml.StatusResponse(
                         davxml.HRef.fromString(request.uri),
                         davxml.Status.fromResponseCode(responsecode.INSUFFICIENT_STORAGE_SPACE),

Modified: CalendarServer/trunk/twistedcaldav/notifications.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/notifications.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/notifications.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -23,7 +23,7 @@
     "NotificationCollectionResource",
 ]
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twext.web2 import responsecode
 from txdav.xml import element as davxml
 
@@ -138,8 +138,9 @@
         self.name = name
         self.xmltype = xmltype
 
-class NotificationsDatabase(AbstractSQLDatabase, LoggingMixIn):
-    
+class NotificationsDatabase(AbstractSQLDatabase):
+    log = Logger()
+
     db_basename = db_prefix + "notifications"
     schema_version = "1"
     db_type = "notifications"

Modified: CalendarServer/trunk/twistedcaldav/resource.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -40,7 +40,7 @@
 from twisted.internet.defer import succeed, maybeDeferred, fail
 from twisted.internet.defer import inlineCallbacks, returnValue
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from txdav.xml import element
 from txdav.xml.element import dav_namespace
@@ -205,13 +205,15 @@
 
 class CalDAVResource (
         CalDAVComplianceMixIn, SharedResourceMixin,
-        DAVResourceWithChildrenMixin, DAVResource, LoggingMixIn
-    ):
+        DAVResourceWithChildrenMixin, DAVResource
+):
     """
     CalDAV resource.
 
     Extends L{DAVResource} to provide CalDAV functionality.
     """
+    log = Logger()
+
     implements(ICalDAVResource)
 
     ##
@@ -1369,7 +1371,7 @@
             created in this resource.
         """
         if self.exists():
-            self.log_error("Attempt to create collection where file exists: %s" % (self,))
+            self.log.error("Attempt to create collection where file exists: %s" % (self,))
             raise HTTPError(StatusResponse(responsecode.NOT_ALLOWED, "File exists"))
 
         # newStore guarantees that we always have a parent calendar home
@@ -1384,7 +1386,7 @@
         parent = (yield self._checkParents(request, isPseudoCalendarCollectionResource))
 
         if parent is not None:
-            self.log_error("Cannot create a calendar collection within a calendar collection %s" % (parent,))
+            self.log.error("Cannot create a calendar collection within a calendar collection %s" % (parent,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldavxml.caldav_namespace, "calendar-collection-location-ok"),
@@ -1395,7 +1397,7 @@
         if config.MaxCollectionsPerHome:
             parent = (yield self.locateParent(request, request.urlForResource(self)))
             if (yield parent.countOwnedChildren()) >= config.MaxCollectionsPerHome: # NB this ignores shares
-                self.log_error("Cannot create a calendar collection because there are too many already present in %s" % (parent,))
+                self.log.error("Cannot create a calendar collection because there are too many already present in %s" % (parent,))
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
                     customxml.MaxCollections(),
@@ -1465,7 +1467,7 @@
         #
 
         if self.exists():
-            self.log_error("Attempt to create collection where file exists: %s" % (self,))
+            self.log.error("Attempt to create collection where file exists: %s" % (self,))
             raise HTTPError(StatusResponse(responsecode.NOT_ALLOWED, "File exists"))
 
         # newStore guarantees that we always have a parent calendar home
@@ -1479,7 +1481,7 @@
 
         parent = (yield self._checkParents(request, isAddressBookCollectionResource))
         if parent is not None:
-            self.log_error("Cannot create an address book collection within an address book collection %s" % (parent,))
+            self.log.error("Cannot create an address book collection within an address book collection %s" % (parent,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (carddavxml.carddav_namespace, "addressbook-collection-location-ok"),
@@ -1490,7 +1492,7 @@
         if config.MaxCollectionsPerHome:
             parent = (yield self.locateParent(request, request.urlForResource(self)))
             if (yield parent.countOwnedChildren()) >= config.MaxCollectionsPerHome: # NB this ignores shares
-                self.log_error("Cannot create a calendar collection because there are too many already present in %s" % (parent,))
+                self.log.error("Cannot create a calendar collection because there are too many already present in %s" % (parent,))
                 raise HTTPError(ErrorResponse(
                     responsecode.FORBIDDEN,
                     customxml.MaxCollections(),

Modified: CalendarServer/trunk/twistedcaldav/scheduling_store/caldav/resource.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/scheduling_store/caldav/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/scheduling_store/caldav/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -400,7 +400,7 @@
         # Must be content-type text/calendar
         contentType = request.headers.getHeader("content-type")
         if contentType is not None and (contentType.mediaType, contentType.mediaSubtype) != ("text", "calendar"):
-            self.log_error("MIME type %s not allowed in calendar collection" % (contentType,))
+            self.log.error("MIME type %s not allowed in calendar collection" % (contentType,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "supported-calendar-data"),
@@ -412,7 +412,7 @@
             calendar = (yield Component.fromIStream(request.stream))
         except:
             # FIXME: Bare except
-            self.log_error("Error while handling POST: %s" % (Failure(),))
+            self.log.error("Error while handling POST: %s" % (Failure(),))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "valid-calendar-data"),
@@ -437,7 +437,7 @@
                     originator = originatorPrincipal.canonicalCalendarUserAddress()
 
         if not originator:
-            self.log_error("%s request must have Originator" % (self.method,))
+            self.log.error("%s request must have Originator" % (self.method,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "originator-specified"),
@@ -458,7 +458,7 @@
                 unique_set.add(attendee)
 
         if not attendees:
-            self.log_error("POST request must have at least one ATTENDEE")
+            self.log.error("POST request must have at least one ATTENDEE")
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (caldav_namespace, "recipient-specified"),

Modified: CalendarServer/trunk/twistedcaldav/sharing.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/sharing.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/sharing.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -925,7 +925,7 @@
         try:
             doc = element.WebDAVDocument.fromString(xmldata)
         except ValueError, e:
-            self.log_error("Error parsing doc (%s) Doc:\n %s" % (str(e), xmldata,))
+            self.log.error("Error parsing doc (%s) Doc:\n %s" % (str(e), xmldata,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (customxml.calendarserver_namespace, "valid-request"),
@@ -937,7 +937,7 @@
             result = (yield self.xmlDocHandlers[type(root)](self, request, root))
             returnValue(result)
         else:
-            self.log_error("Unsupported XML (%s)" % (root,))
+            self.log.error("Unsupported XML (%s)" % (root,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (customxml.calendarserver_namespace, "valid-request"),
@@ -963,9 +963,9 @@
                 if contentType in self._postHandlers:
                     return self._postHandlers[contentType](self, request)
                 else:
-                    self.log_info("Got a POST on collection or group with an unsupported content type: %s" % (contentType,))
+                    self.log.info("Got a POST on collection or group with an unsupported content type: %s" % (contentType,))
             else:
-                self.log_info("Got a POST on collection or group with no content type")
+                self.log.info("Got a POST on collection or group with no content type")
         return succeed(responsecode.FORBIDDEN)
 
     _postHandlers = {

Modified: CalendarServer/trunk/twistedcaldav/test/test_cache.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/test/test_cache.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/test/test_cache.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -388,7 +388,7 @@
 
         memcacheStub = InMemoryMemcacheProtocol()
         self.rc = MemcacheResponseCache(None, cachePool=memcacheStub)
-        self.rc.logger.setLevel(LogLevel.debug)
+        self.rc.log.setLevel(LogLevel.debug)
         self.tokens = {}
 
         self.tokens['/calendars/__uids__/cdaboo/'] = 'uriToken0'

Modified: CalendarServer/trunk/twistedcaldav/util.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/util.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/twistedcaldav/util.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -25,7 +25,7 @@
 from twisted.internet import ssl, reactor
 from twisted.web import client
 from twisted.python import failure
-from twext.python.log import LoggingMixIn, Logger
+from twext.python.log import Logger
 
 log = Logger()
 from twext.internet.gaiendpoint import GAIEndpoint
@@ -359,7 +359,8 @@
 
 
 
-class AuthorizedHTTPGetter(client.HTTPPageGetter, LoggingMixIn):
+class AuthorizedHTTPGetter(client.HTTPPageGetter):
+    log = Logger()
 
     def handleStatus_401(self):
 
@@ -376,7 +377,7 @@
 
         self.factory.retried = True
 
-        # self.log_debug("Got a 401 trying to inject [%s]" % (self.headers,))
+        # self.log.debug("Got a 401 trying to inject [%s]" % (self.headers,))
         details = {}
         basicAvailable = digestAvailable = False
         wwwauth = self.headers.get("www-authenticate")
@@ -458,7 +459,7 @@
                 connect(
                     GAIEndpoint(reactor, self.factory.host, self.factory.port),
                     self.factory)
-            # self.log_debug("Retrying with digest after 401")
+            # self.log.debug("Retrying with digest after 401")
 
             return self.factory.deferred
 
@@ -478,7 +479,7 @@
                 connect(
                     GAIEndpoint(reactor, self.factory.host, self.factory.port),
                     self.factory)
-            # self.log_debug("Retrying with basic after 401")
+            # self.log.debug("Retrying with basic after 401")
 
             return self.factory.deferred
 

Modified: CalendarServer/trunk/txdav/base/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/base/datastore/file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -24,7 +24,7 @@
 
 from twisted.python import hashlib
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from twext.enterprise.ienterprise import AlreadyFinishedError
 from twext.web2.dav.resource import TwistedGETContentMD5
 from txdav.idav import IDataStoreObject
@@ -61,10 +61,11 @@
 
 
 
-class DataStore(LoggingMixIn):
+class DataStore(object):
     """
     Generic data store.
     """
+    log = Logger()
 
     _transactionClass = None    # Derived class must set this
 
@@ -117,10 +118,11 @@
 
 
 
-class DataStoreTransaction(LoggingMixIn):
+class DataStoreTransaction(object):
     """
     In-memory implementation of a data store transaction.
     """
+    log = Logger()
 
     def __init__(self, dataStore, name):
         """
@@ -186,12 +188,12 @@
                 if undo is not None:
                     undos.append(undo)
             except:
-                self.log_debug("Undoing DataStoreTransaction")
+                self.log.debug("Undoing DataStoreTransaction")
                 for undo in undos:
                     try:
                         undo()
                     except:
-                        self.log_error("Cannot undo DataStoreTransaction")
+                        self.log.error("Cannot undo DataStoreTransaction")
                 raise
 
         for operation in self._postCommitOperations:

Modified: CalendarServer/trunk/txdav/base/propertystore/base.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/base.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/base/propertystore/base.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -23,7 +23,7 @@
     "PropertyName",
 ]
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from txdav.xml import element as davxml
 from txdav.xml.base import encodeXMLName
 from twext.web2.dav.resource import TwistedGETContentMD5, \
@@ -35,10 +35,12 @@
 
 from zope.interface import implements
 
-class PropertyName(LoggingMixIn):
+class PropertyName(object):
     """
     Property name.
     """
+    log = Logger()
+
     implements(IPropertyName)
 
     @staticmethod
@@ -97,10 +99,12 @@
 
 
 
-class AbstractPropertyStore(LoggingMixIn, DictMixin):
+class AbstractPropertyStore(DictMixin, object):
     """
     Base property store.
     """
+    log = Logger()
+
     implements(IPropertyStore)
 
     _defaultShadowableKeys = frozenset()

Modified: CalendarServer/trunk/txdav/base/propertystore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/sql.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/base/propertystore/sql.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -31,7 +31,7 @@
 
 from txdav.xml.parser import WebDAVDocument
 from txdav.common.icommondatastore import AllRetriesFailed
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from txdav.common.datastore.sql_tables import schema
 from txdav.base.propertystore.base import (AbstractPropertyStore,
                                            PropertyName, validKey)
@@ -41,7 +41,8 @@
 
 prop = schema.RESOURCE_PROPERTY
 
-class PropertyStore(AbstractPropertyStore, LoggingMixIn):
+class PropertyStore(AbstractPropertyStore):
+    log = Logger()
 
     _cacher = Memcacher("SQL.props", pickle=True, key_normalization=False)
 
@@ -270,7 +271,7 @@
 
         def justLogIt(f):
             f.trap(AllRetriesFailed)
-            self.log_error("setting a property failed; probably nothing.")
+            self.log.error("setting a property failed; probably nothing.")
         self._txn.subtransaction(trySetItem).addErrback(justLogIt)
 
     _deleteQuery = Delete(
@@ -302,7 +303,7 @@
 
         def justLogIt(f):
             f.trap(AllRetriesFailed)
-            self.log_error("setting a property failed; probably nothing.")
+            self.log.error("setting a property failed; probably nothing.")
         self._txn.subtransaction(doIt).addErrback(justLogIt)
 
 

Modified: CalendarServer/trunk/txdav/base/propertystore/xattr.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/xattr.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/base/propertystore/xattr.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -188,7 +188,7 @@
                         "Unable to upgrade property "
                         "to compressed namespace: %s" % (key.toString())
                     )
-                    self.log_error(msg)
+                    self.log.error(msg)
                     raise PropertyStoreError(msg)
             else:
                 raise
@@ -221,7 +221,7 @@
                 msg = "Invalid property value stored on server: %s %s" % (
                     key.toString(), data
                 )
-                self.log_error(msg)
+                self.log.error(msg)
                 raise PropertyStoreError(msg)
             else:
                 legacy = True

Modified: CalendarServer/trunk/txdav/caldav/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -487,10 +487,10 @@
         fixed, unfixed = component.validCalendarData(doFix=True, doRaise=False)
 
         if unfixed:
-            self.log_error("Calendar data at %s had unfixable problems:\n  %s" % (self._path.path, "\n  ".join(unfixed),))
+            self.log.error("Calendar data at %s had unfixable problems:\n  %s" % (self._path.path, "\n  ".join(unfixed),))
 
         if fixed:
-            self.log_error("Calendar data at %s had fixable problems:\n  %s" % (self._path.path, "\n  ".join(fixed),))
+            self.log.error("Calendar data at %s had fixable problems:\n  %s" % (self._path.path, "\n  ".join(fixed),))
 
         return component
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/index_file.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/index_file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/index_file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -41,7 +41,7 @@
 
 from twisted.internet.defer import maybeDeferred, succeed
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from txdav.common.icommondatastore import SyncTokenValidException, \
     ReservationError, IndexedSearchException
@@ -74,12 +74,13 @@
 indexfbtype_to_icalfbtype = dict([(v, k) for k, v in icalfbtype_to_indexfbtype.iteritems()])
 
 
-class AbstractCalendarIndex(AbstractSQLDatabase, LoggingMixIn):
+class AbstractCalendarIndex(AbstractSQLDatabase):
     """
     Calendar collection index abstract base class that defines the apis for the index.
     This will be subclassed for the two types of index behaviour we need: one for
     regular calendar collections, one for schedule calendar collections.
     """
+    log = Logger()
 
     def __init__(self, resource):
         """
@@ -263,7 +264,7 @@
         names = self.notExpandedBeyond(minDate)
         # Actually expand recurrence max
         for name in names:
-            self.log_info("Search falls outside range of index for %s %s" % (name, minDate))
+            self.log.info("Search falls outside range of index for %s %s" % (name, minDate))
             self.reExpandResource(name, minDate)
 
 
@@ -834,7 +835,9 @@
 
 
 
-class MemcachedUIDReserver(CachePoolUserMixIn, LoggingMixIn):
+class MemcachedUIDReserver(CachePoolUserMixIn):
+    log = Logger()
+
     def __init__(self, index, cachePool=None):
         self.index = index
         self._cachePool = cachePool
@@ -848,7 +851,7 @@
 
     def reserveUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Reserving UID %r @ %r" % (
+        self.log.debug("Reserving UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 
@@ -868,7 +871,7 @@
 
     def unreserveUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Unreserving UID %r @ %r" % (
+        self.log.debug("Unreserving UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 
@@ -886,7 +889,7 @@
 
     def isReservedUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Is reserved UID %r @ %r" % (
+        self.log.debug("Is reserved UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -22,7 +22,7 @@
 from twext.enterprise.dal.syntax import Delete
 from twext.enterprise.queue import WorkItem
 from twext.internet.gaiendpoint import GAIEndpoint
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from twisted.application import service
 from twisted.internet import protocol, defer, ssl
@@ -409,11 +409,13 @@
 # POP3
 #
 
-class POP3DownloadProtocol(pop3client.POP3Client, LoggingMixIn):
+class POP3DownloadProtocol(pop3client.POP3Client):
+    log = Logger()
+
     allowInsecureLogin = False
 
     def serverGreeting(self, greeting):
-        self.log_debug("POP servergreeting")
+        self.log.debug("POP servergreeting")
         pop3client.POP3Client.serverGreeting(self, greeting)
         login = self.login(self.factory.settings["Username"],
             self.factory.settings["Password"])
@@ -422,18 +424,18 @@
 
 
     def cbLoginFailed(self, reason):
-        self.log_error("POP3 login failed for %s" %
+        self.log.error("POP3 login failed for %s" %
             (self.factory.settings["Username"],))
         return self.quit()
 
 
     def cbLoggedIn(self, result):
-        self.log_debug("POP loggedin")
+        self.log.debug("POP loggedin")
         return self.listSize().addCallback(self.cbGotMessageSizes)
 
 
     def cbGotMessageSizes(self, sizes):
-        self.log_debug("POP gotmessagesizes")
+        self.log.debug("POP gotmessagesizes")
         downloads = []
         for i in range(len(sizes)):
             downloads.append(self.retrieve(i).addCallback(self.cbDownloaded, i))
@@ -441,19 +443,21 @@
 
 
     def cbDownloaded(self, lines, id):
-        self.log_debug("POP downloaded message %d" % (id,))
+        self.log.debug("POP downloaded message %d" % (id,))
         self.factory.handleMessage("\r\n".join(lines))
-        self.log_debug("POP deleting message %d" % (id,))
+        self.log.debug("POP deleting message %d" % (id,))
         self.delete(id)
 
 
     def cbFinished(self, results):
-        self.log_debug("POP finished")
+        self.log.debug("POP finished")
         return self.quit()
 
 
 
-class POP3DownloadFactory(protocol.ClientFactory, LoggingMixIn):
+class POP3DownloadFactory(protocol.ClientFactory):
+    log = Logger()
+
     protocol = POP3DownloadProtocol
 
     def __init__(self, settings, mailReceiver):
@@ -463,17 +467,17 @@
 
     def clientConnectionLost(self, connector, reason):
         self.connector = connector
-        self.log_debug("POP factory connection lost")
+        self.log.debug("POP factory connection lost")
 
 
     def clientConnectionFailed(self, connector, reason):
         self.connector = connector
-        self.log_info("POP factory connection failed")
+        self.log.info("POP factory connection failed")
 
 
     def handleMessage(self, message):
-        self.log_debug("POP factory handle message")
-        self.log_debug(message)
+        self.log.debug("POP factory handle message")
+        self.log.debug(message)
         return self.mailReceiver.inbound(message)
 
 
@@ -483,21 +487,22 @@
 #
 
 
-class IMAP4DownloadProtocol(imap4.IMAP4Client, LoggingMixIn):
+class IMAP4DownloadProtocol(imap4.IMAP4Client):
+    log = Logger()
 
     def serverGreeting(self, capabilities):
-        self.log_debug("IMAP servergreeting")
+        self.log.debug("IMAP servergreeting")
         return self.authenticate(self.factory.settings["Password"]
             ).addCallback(self.cbLoggedIn
             ).addErrback(self.ebAuthenticateFailed)
 
 
     def ebLogError(self, error):
-        self.log_error("IMAP Error: %s" % (error,))
+        self.log.error("IMAP Error: %s" % (error,))
 
 
     def ebAuthenticateFailed(self, reason):
-        self.log_debug("IMAP authenticate failed for %s, trying login" %
+        self.log.debug("IMAP authenticate failed for %s, trying login" %
             (self.factory.settings["Username"],))
         return self.login(self.factory.settings["Username"],
             self.factory.settings["Password"]
@@ -506,27 +511,27 @@
 
 
     def ebLoginFailed(self, reason):
-        self.log_error("IMAP login failed for %s" %
+        self.log.error("IMAP login failed for %s" %
             (self.factory.settings["Username"],))
         self.transport.loseConnection()
 
 
     def cbLoggedIn(self, result):
-        self.log_debug("IMAP logged in [%s]" % (self.state,))
+        self.log.debug("IMAP logged in [%s]" % (self.state,))
         self.select("Inbox").addCallback(self.cbInboxSelected)
 
 
     def cbInboxSelected(self, result):
-        self.log_debug("IMAP Inbox selected [%s]" % (self.state,))
+        self.log.debug("IMAP Inbox selected [%s]" % (self.state,))
         allMessages = imap4.MessageSet(1, None)
         self.fetchUID(allMessages, True).addCallback(self.cbGotUIDs)
 
 
     def cbGotUIDs(self, results):
-        self.log_debug("IMAP got uids [%s]" % (self.state,))
+        self.log.debug("IMAP got uids [%s]" % (self.state,))
         self.messageUIDs = [result['UID'] for result in results.values()]
         self.messageCount = len(self.messageUIDs)
-        self.log_debug("IMAP Inbox has %d messages" % (self.messageCount,))
+        self.log.debug("IMAP Inbox has %d messages" % (self.messageCount,))
         if self.messageCount:
             self.fetchNextMessage()
         else:
@@ -535,30 +540,30 @@
 
 
     def fetchNextMessage(self):
-        self.log_debug("IMAP in fetchnextmessage [%s]" % (self.state,))
+        self.log.debug("IMAP in fetchnextmessage [%s]" % (self.state,))
         if self.messageUIDs:
             nextUID = self.messageUIDs.pop(0)
             messageListToFetch = imap4.MessageSet(nextUID)
-            self.log_debug("Downloading message %d of %d (%s)" %
+            self.log.debug("Downloading message %d of %d (%s)" %
                 (self.messageCount - len(self.messageUIDs), self.messageCount,
                 nextUID))
             self.fetchMessage(messageListToFetch, True).addCallback(
                 self.cbGotMessage, messageListToFetch).addErrback(
                     self.ebLogError)
         else:
-            self.log_debug("Seeing if anything new has arrived")
+            self.log.debug("Seeing if anything new has arrived")
             # Go back and see if any more messages have come in
             self.expunge().addCallback(self.cbInboxSelected)
 
 
     def cbGotMessage(self, results, messageList):
-        self.log_debug("IMAP in cbGotMessage [%s]" % (self.state,))
+        self.log.debug("IMAP in cbGotMessage [%s]" % (self.state,))
         try:
             messageData = results.values()[0]['RFC822']
         except IndexError:
             # results will be empty unless the "twistedmail-imap-flags-anywhere"
             # patch from http://twistedmatrix.com/trac/ticket/1105 is applied
-            self.log_error("Skipping empty results -- apply twisted patch!")
+            self.log.error("Skipping empty results -- apply twisted patch!")
             self.fetchNextMessage()
             return
 
@@ -576,39 +581,41 @@
 
 
     def cbMessageDeleted(self, results, messageList):
-        self.log_debug("IMAP in cbMessageDeleted [%s]" % (self.state,))
-        self.log_debug("Deleted message")
+        self.log.debug("IMAP in cbMessageDeleted [%s]" % (self.state,))
+        self.log.debug("Deleted message")
         self.fetchNextMessage()
 
 
     def cbClosed(self, results):
-        self.log_debug("IMAP in cbClosed [%s]" % (self.state,))
-        self.log_debug("Mailbox closed")
+        self.log.debug("IMAP in cbClosed [%s]" % (self.state,))
+        self.log.debug("Mailbox closed")
         self.logout().addCallback(
             lambda _: self.transport.loseConnection())
 
 
     def rawDataReceived(self, data):
-        self.log_debug("RAW RECEIVED: %s" % (data,))
+        self.log.debug("RAW RECEIVED: %s" % (data,))
         imap4.IMAP4Client.rawDataReceived(self, data)
 
 
     def lineReceived(self, line):
-        self.log_debug("RECEIVED: %s" % (line,))
+        self.log.debug("RECEIVED: %s" % (line,))
         imap4.IMAP4Client.lineReceived(self, line)
 
 
     def sendLine(self, line):
-        self.log_debug("SENDING: %s" % (line,))
+        self.log.debug("SENDING: %s" % (line,))
         imap4.IMAP4Client.sendLine(self, line)
 
 
 
-class IMAP4DownloadFactory(protocol.ClientFactory, LoggingMixIn):
+class IMAP4DownloadFactory(protocol.ClientFactory):
+    log = Logger()
+
     protocol = IMAP4DownloadProtocol
 
     def __init__(self, settings, mailReceiver):
-        self.log_debug("Setting up IMAPFactory")
+        self.log.debug("Setting up IMAPFactory")
 
         self.settings = settings
         self.mailReceiver = mailReceiver
@@ -625,16 +632,16 @@
 
 
     def handleMessage(self, message):
-        self.log_debug("IMAP factory handle message")
-        self.log_debug(message)
+        self.log.debug("IMAP factory handle message")
+        self.log.debug(message)
         return self.mailReceiver.inbound(message)
 
 
     def clientConnectionLost(self, connector, reason):
         self.connector = connector
-        self.log_debug("IMAP factory connection lost")
+        self.log.debug("IMAP factory connection lost")
 
 
     def clientConnectionFailed(self, connector, reason):
         self.connector = connector
-        self.log_warn("IMAP factory connection failed")
+        self.log.warn("IMAP factory connection failed")

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/mailgateway.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/mailgateway.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/mailgateway.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -23,14 +23,14 @@
 import os
 import uuid
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twistedcaldav.sql import AbstractSQLDatabase
 from twisted.internet.defer import inlineCallbacks
 
 log = Logger()
 
 
-class MailGatewayTokensDatabase(AbstractSQLDatabase, LoggingMixIn):
+class MailGatewayTokensDatabase(AbstractSQLDatabase):
     """
     A database to maintain "plus-address" tokens for IMIP requests.
 
@@ -39,8 +39,8 @@
     Token Database:
 
     ROW: TOKEN, ORGANIZER, ATTENDEE, ICALUID, DATESTAMP
-
     """
+    log = Logger()
 
     dbType = "MAILGATEWAYTOKENS"
     dbFilename = "mailgatewaytokens.sqlite"

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -30,7 +30,7 @@
 from pycalendar.duration import PyCalendarDuration
 from twext.enterprise.dal.record import fromTable
 from twext.enterprise.queue import WorkItem
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.mail.smtp import messageid, rfc822date
 from twisted.web.microdom import Text as DOMText, Element as DOMElement
@@ -297,10 +297,11 @@
 
 
 
-class MailSender(LoggingMixIn):
+class MailSender(object):
     """
     Generates outbound IMIP messages and sends them.
     """
+    log = Logger()
 
     def __init__(self, address, suppressionDays, smtpSender, language):
         self.address = address
@@ -375,13 +376,13 @@
 
             if token is None:
                 token = (yield txn.imipCreateToken(originator, toAddr.lower(), icaluid))
-                self.log_debug("Mail gateway created token %s for %s "
+                self.log.debug("Mail gateway created token %s for %s "
                                "(originator), %s (recipient) and %s (icaluid)"
                                % (token, originator, toAddr, icaluid))
                 inviteState = "new"
 
             else:
-                self.log_debug("Mail gateway reusing token %s for %s "
+                self.log.debug("Mail gateway reusing token %s for %s "
                                "(originator), %s (recipient) and %s (icaluid)"
                                % (token, originator, toAddr, icaluid))
                 inviteState = "update"
@@ -457,7 +458,7 @@
         # want to do, but if this message is for an event completely in
         # the past we don't want to actually send an email.
         if not calendar.hasInstancesAfter(onlyAfter):
-            self.log_debug("Skipping IMIP message for old event")
+            self.log.debug("Skipping IMIP message for old event")
             returnValue(True)
 
         # Now prevent any "internal" CUAs from being exposed by converting
@@ -480,7 +481,7 @@
                 msgId, message))
             returnValue(success)
         except Exception, e:
-            self.log_error("Failed to send IMIP message (%s)" % (str(e),))
+            self.log.error("Failed to send IMIP message (%s)" % (str(e),))
             returnValue(False)
 
 
@@ -573,7 +574,7 @@
 
         calendarText = str(calendar)
         # the icalendar attachment
-        self.log_debug("Mail gateway sending calendar body: %s"
+        self.log.debug("Mail gateway sending calendar body: %s"
                        % (calendarText,))
         msgIcal = MIMEText(calendarText, "calendar", "UTF-8")
         method = calendar.propertyValue("METHOD").lower()

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/resource.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -243,7 +243,7 @@
         # Must have Originator header
         originator = request.headers.getRawHeaders("originator")
         if originator is None or (len(originator) != 1):
-            self.log_error("iSchedule POST request must have Originator header")
+            self.log.error("iSchedule POST request must have Originator header")
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (ischedule_namespace, "originator-missing"),
@@ -258,7 +258,7 @@
         # Get list of Recipient headers
         rawRecipients = request.headers.getRawHeaders("recipient")
         if rawRecipients is None or (len(rawRecipients) == 0):
-            self.log_error("%s request must have at least one Recipient header" % (self.method,))
+            self.log.error("%s request must have at least one Recipient header" % (self.method,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (ischedule_namespace, "recipient-missing"),
@@ -281,7 +281,7 @@
         # Must be content-type text/calendar
         contentType = request.headers.getHeader("content-type")
         if contentType is not None and (contentType.mediaType, contentType.mediaSubtype) != ("text", "calendar"):
-            self.log_error("MIME type %s not allowed in iSchedule POST request" % (contentType,))
+            self.log.error("MIME type %s not allowed in iSchedule POST request" % (contentType,))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (ischedule_namespace, "invalid-calendar-data-type"),
@@ -293,7 +293,7 @@
             calendar = (yield Component.fromIStream(request.stream))
         except:
             # FIXME: Bare except
-            self.log_error("Error while handling iSchedule POST: %s" % (Failure(),))
+            self.log.error("Error while handling iSchedule POST: %s" % (Failure(),))
             raise HTTPError(ErrorResponse(
                 responsecode.FORBIDDEN,
                 (ischedule_namespace, "invalid-calendar-data"),

Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/scheduler.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -18,7 +18,7 @@
 from twisted.python.failure import Failure
 
 from twext.enterprise.locking import NamedLock
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twext.web2 import responsecode
 from twext.web2.http import HTTPError, Response
 from twext.web2.http_headers import MimeType
@@ -687,11 +687,12 @@
 
 
 
-class ScheduleResponseQueue (LoggingMixIn):
+class ScheduleResponseQueue (object):
     """
     Stores a list of (typically error) responses for use in a
     L{ScheduleResponse}.
     """
+    log = Logger()
 
     schedule_response_element = caldavxml.ScheduleResponse
     response_element = caldavxml.Response
@@ -745,7 +746,7 @@
             raise AssertionError("Unknown data type: %r" % (what,))
 
         if not suppressErrorLog and code > 400: # Error codes only
-            self.log_error("Error during %s for %s: %s" % (self.method, recipient, message))
+            self.log.error("Error during %s for %s: %s" % (self.method, recipient, message))
 
         children = []
         children.append(self.recipient_element(davxml.HRef.fromString(recipient)) if self.recipient_uses_href else self.recipient_element.fromString(recipient))

Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/sql.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -572,7 +572,7 @@
         """
 
         # Make sure the loop does not operate on any new calendars created during the loop
-        self.log_warn("Splitting calendars for user %s" % (self._ownerUID,))
+        self.log.warn("Splitting calendars for user %s" % (self._ownerUID,))
         calendars = yield self.calendars()
         for calendar in calendars:
 
@@ -580,7 +580,7 @@
             if calendar.isInbox():
                 continue
             split_count = yield calendar.splitCollectionByComponentTypes()
-            self.log_warn("  Calendar: '%s', split into %d" % (calendar.name(), split_count + 1,))
+            self.log.warn("  Calendar: '%s', split into %d" % (calendar.name(), split_count + 1,))
 
         yield self.ensureDefaultCalendarsExist()
 
@@ -2109,7 +2109,7 @@
                 recurrenceLimit = instances.limit
                 recurrenceLowerLimit = instances.lowerLimit
             except InvalidOverriddenInstanceError, e:
-                self.log_error("Invalid instance %s when indexing %s in %s" %
+                self.log.error("Invalid instance %s when indexing %s in %s" %
                                (e.rid, self._name, self._calendar,))
 
                 if txn._migrating:
@@ -2341,11 +2341,11 @@
             fixed, unfixed = component.validCalendarData(doFix=True, doRaise=False)
 
             if unfixed:
-                self.log_error("Calendar data id=%s had unfixable problems:\n  %s" %
+                self.log.error("Calendar data id=%s had unfixable problems:\n  %s" %
                                (self._resourceID, "\n  ".join(unfixed),))
 
             if fixed:
-                self.log_error("Calendar data id=%s had fixable problems:\n  %s" %
+                self.log.error("Calendar data id=%s had fixable problems:\n  %s" %
                                (self._resourceID, "\n  ".join(fixed),))
 
             self._cachedComponent = component
@@ -2825,7 +2825,7 @@
             t = attachment.store(content_type, filename)
             yield readStream(stream, t.write)
         except Exception, e:
-            self.log_error("Unable to store attachment: %s" % (e,))
+            self.log.error("Unable to store attachment: %s" % (e,))
             raise AttachmentStoreFailed
         yield t.loseConnection()
 
@@ -2883,7 +2883,7 @@
             # Check that this is a proper update
             oldattachment = (yield self.attachmentWithManagedID(managed_id))
             if oldattachment is None:
-                self.log_error("Missing managed attachment even though ATTACHMENT_CALENDAR_OBJECT indicates it is present: %s" % (managed_id,))
+                self.log.error("Missing managed attachment even though ATTACHMENT_CALENDAR_OBJECT indicates it is present: %s" % (managed_id,))
                 raise AttachmentStoreFailed
 
             # We actually create a brand new attachment object for the update, but with the same managed-id. That way, other resources
@@ -2892,7 +2892,7 @@
             t = attachment.store(content_type, filename)
             yield readStream(stream, t.write)
         except Exception, e:
-            self.log_error("Unable to store attachment: %s" % (e,))
+            self.log.error("Unable to store attachment: %s" % (e,))
             raise AttachmentStoreFailed
         yield t.loseConnection()
 

Modified: CalendarServer/trunk/txdav/caldav/resource.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/caldav/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -30,7 +30,7 @@
 
 import urllib
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from txdav.xml.base import dav_namespace
 from twext.web2.http_headers import MimeType
 from twext.web2.http import RedirectResponse, Response
@@ -43,10 +43,12 @@
 from twistedcaldav.ical import allowedComponents
 
 
-class CalDAVResource(DAVResource, LoggingMixIn):
+class CalDAVResource(DAVResource):
     """
     CalDAV resource.
     """
+    log = Logger()
+
     def davComplianceClasses(self):
         return (
             tuple(super(CalDAVResource, self).davComplianceClasses())

Modified: CalendarServer/trunk/txdav/carddav/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/carddav/datastore/file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -237,10 +237,10 @@
         fixed, unfixed = component.validVCardData(doFix=True, doRaise=False)
 
         if unfixed:
-            self.log_error("Address data at %s had unfixable problems:\n  %s" % (self._path.path, "\n  ".join(unfixed),))
+            self.log.error("Address data at %s had unfixable problems:\n  %s" % (self._path.path, "\n  ".join(unfixed),))
 
         if fixed:
-            self.log_error("Address data at %s had fixable problems:\n  %s" % (self._path.path, "\n  ".join(fixed),))
+            self.log.error("Address data at %s had fixable problems:\n  %s" % (self._path.path, "\n  ".join(fixed),))
 
         return component
 

Modified: CalendarServer/trunk/txdav/carddav/datastore/index_file.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/index_file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/carddav/datastore/index_file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -45,7 +45,7 @@
 from twistedcaldav.sql import db_prefix
 from twistedcaldav.vcard import Component
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 from twistedcaldav.config import config
 from twistedcaldav.memcachepool import CachePoolUserMixIn
 
@@ -62,7 +62,9 @@
 
 
 
-class MemcachedUIDReserver(CachePoolUserMixIn, LoggingMixIn):
+class MemcachedUIDReserver(CachePoolUserMixIn):
+    log = Logger()
+
     def __init__(self, index, cachePool=None):
         self.index = index
         self._cachePool = cachePool
@@ -76,7 +78,7 @@
 
     def reserveUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Reserving UID %r @ %r" % (
+        self.log.debug("Reserving UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 
@@ -96,7 +98,7 @@
 
     def unreserveUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Unreserving UID %r @ %r" % (
+        self.log.debug("Unreserving UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 
@@ -114,7 +116,7 @@
 
     def isReservedUID(self, uid):
         uid = uid.encode('utf-8')
-        self.log_debug("Is reserved UID %r @ %r" % (
+        self.log.debug("Is reserved UID %r @ %r" % (
                 uid,
                 self.index.resource.fp.path))
 

Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/sql.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -2011,10 +2011,10 @@
                 fixed, unfixed = component.validVCardData(doFix=True, doRaise=False)
 
                 if unfixed:
-                    self.log_error("Address data id=%s had unfixable problems:\n  %s" % (self._resourceID, "\n  ".join(unfixed),))
+                    self.log.error("Address data id=%s had unfixable problems:\n  %s" % (self._resourceID, "\n  ".join(unfixed),))
 
                 if fixed:
-                    self.log_error("Address data id=%s had fixable problems:\n  %s" % (self._resourceID, "\n  ".join(fixed),))
+                    self.log.error("Address data id=%s had fixable problems:\n  %s" % (self._resourceID, "\n  ".join(fixed),))
 
                 if self._kind == _ABO_KIND_GROUP:
                     assert not component.hasProperty("X-ADDRESSBOOKSERVER-MEMBER"), "database group vCard text contains members %s" % (component,)

Modified: CalendarServer/trunk/txdav/carddav/resource.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/resource.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/carddav/resource.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -25,17 +25,19 @@
     "AddressBookObjectResource",
 ]
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from txdav.xml.base import dav_namespace
 
 from twistedcaldav.carddavxml import carddav_namespace
 from twistedcaldav.config import config
 from twistedcaldav.extensions import DAVResource
 
-class CardDAVResource(DAVResource, LoggingMixIn):
+class CardDAVResource(DAVResource):
     """
     CardDAV resource.
     """
+    log = Logger()
+
     def davComplianceClasses(self):
         return (
             tuple(super(CardDAVResource, self).davComplianceClasses())

Modified: CalendarServer/trunk/txdav/common/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/file.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/file.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -21,7 +21,7 @@
 
 import sys
 from twext.internet.decorate import memoizedKey
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 from txdav.xml.rfc2518 import GETContentType, HRef
 from txdav.xml.rfc5842 import ResourceID
 from twext.web2.http_headers import generateContentType, MimeType
@@ -431,7 +431,8 @@
 
 
 
-class SharedCollectionsDatabase(AbstractSQLDatabase, LoggingMixIn):
+class SharedCollectionsDatabase(AbstractSQLDatabase):
+    log = Logger()
 
     db_basename = db_prefix + "shares"
     schema_version = "1"
@@ -575,7 +576,8 @@
 
 
 
-class CommonHome(FileMetaDataMixin, LoggingMixIn):
+class CommonHome(FileMetaDataMixin):
+    log = Logger()
 
     # All these need to be initialized by derived classes for each store type
     _childClass = None
@@ -905,7 +907,7 @@
         old_used = self.quotaUsedBytes()
         new_used = old_used + delta
         if new_used < 0:
-            self.log_error("Fixing quota adjusted below zero to %s by change amount %s" % (new_used, delta,))
+            self.log.error("Fixing quota adjusted below zero to %s by change amount %s" % (new_used, delta,))
             new_used = 0
         self.properties()[PropertyName.fromElement(TwistedQuotaUsedProperty)] = TwistedQuotaUsedProperty(str(new_used))
 
@@ -937,10 +939,11 @@
 
 
 
-class CommonHomeChild(FileMetaDataMixin, LoggingMixIn, FancyEqMixin, HomeChildBase):
+class CommonHomeChild(FileMetaDataMixin, FancyEqMixin, HomeChildBase):
     """
     Common ancestor class of AddressBooks and Calendars.
     """
+    log = Logger()
 
     compareAttributes = (
         "_name",
@@ -1078,7 +1081,7 @@
                     trash.remove()
                     self.properties()._removeResource()
                 except Exception, e:
-                    self.log_error("Unable to delete trashed child at %s: %s" % (trash.fp, e))
+                    self.log.error("Unable to delete trashed child at %s: %s" % (trash.fp, e))
 
             self._transaction.addOperation(cleanup, "remove child backup %r" % (self._name,))
             def undo():
@@ -1300,12 +1303,13 @@
 
 
 
-class CommonObjectResource(FileMetaDataMixin, LoggingMixIn, FancyEqMixin):
+class CommonObjectResource(FileMetaDataMixin, FancyEqMixin):
     """
     @ivar _path: The path of the file on disk
 
     @type _path: L{FilePath}
     """
+    log = Logger()
 
     compareAttributes = (
         "_name",

Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -31,7 +31,7 @@
 
 from zope.interface import implements, directlyProvides
 
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from txdav.xml.parser import WebDAVDocument
 from twext.web2.http_headers import MimeType
@@ -1390,7 +1390,8 @@
 
 
 
-class CommonHome(LoggingMixIn):
+class CommonHome(object):
+    log = Logger()
 
     # All these need to be initialized by derived classes for each store type
     _homeTable = None
@@ -3185,10 +3186,11 @@
 
 
 
-class CommonHomeChild(LoggingMixIn, FancyEqMixin, Memoizable, _SharedSyncLogic, HomeChildBase, SharingMixIn):
+class CommonHomeChild(FancyEqMixin, Memoizable, _SharedSyncLogic, HomeChildBase, SharingMixIn):
     """
     Common ancestor class of AddressBooks and Calendars.
     """
+    log = Logger()
 
     compareAttributes = (
         "_name",
@@ -4125,10 +4127,11 @@
 
 
 
-class CommonObjectResource(LoggingMixIn, FancyEqMixin):
+class CommonObjectResource(FancyEqMixin, object):
     """
     Base class for object resources.
     """
+    log = Logger()
 
     compareAttributes = (
         "_name",
@@ -4619,7 +4622,8 @@
 
 
 
-class NotificationCollection(LoggingMixIn, FancyEqMixin, _SharedSyncLogic):
+class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
+    log = Logger()
 
     implements(INotificationCollection)
 
@@ -4946,7 +4950,8 @@
 
 
 
-class NotificationObject(LoggingMixIn, FancyEqMixin):
+class NotificationObject(FancyEqMixin, object):
+    log = Logger()
 
     implements(INotificationObject)
 

Modified: CalendarServer/trunk/txdav/common/datastore/sql_legacy.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_legacy.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/sql_legacy.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -41,7 +41,7 @@
 from txdav.common.datastore.sql_tables import schema
 from twext.enterprise.dal.syntax import Parameter, Select
 from twext.python.clsprop import classproperty
-from twext.python.log import Logger, LoggingMixIn
+from twext.python.log import Logger
 
 from pycalendar.datetime import PyCalendarDateTime
 from pycalendar.duration import PyCalendarDuration
@@ -56,7 +56,9 @@
     4: 'T',
 }
 
-class MemcachedUIDReserver(CachePoolUserMixIn, LoggingMixIn):
+class MemcachedUIDReserver(CachePoolUserMixIn):
+    log = Logger()
+
     def __init__(self, index, cachePool=None):
         self.index = index
         self._cachePool = cachePool
@@ -69,7 +71,7 @@
 
 
     def reserveUID(self, uid):
-        self.log_debug("Reserving UID %r @ %r" % (
+        self.log.debug("Reserving UID %r @ %r" % (
                 uid,
                 self.index.resource))
 
@@ -88,7 +90,7 @@
 
 
     def unreserveUID(self, uid):
-        self.log_debug("Unreserving UID %r @ %r" % (
+        self.log.debug("Unreserving UID %r @ %r" % (
                 uid,
                 self.index.resource))
 
@@ -105,7 +107,7 @@
 
 
     def isReservedUID(self, uid):
-        self.log_debug("Is reserved UID %r @ %r" % (
+        self.log.debug("Is reserved UID %r @ %r" % (
                 uid,
                 self.index.resource))
 
@@ -121,7 +123,8 @@
 
 
 
-class DummyUIDReserver(LoggingMixIn):
+class DummyUIDReserver(object):
+    log = Logger()
 
     def __init__(self, index):
         self.index = index
@@ -135,7 +138,7 @@
 
 
     def reserveUID(self, uid):
-        self.log_debug("Reserving UID %r @ %r" % (
+        self.log.debug("Reserving UID %r @ %r" % (
                 uid,
                 self.index.resource))
 
@@ -150,7 +153,7 @@
 
 
     def unreserveUID(self, uid):
-        self.log_debug("Unreserving UID %r @ %r" % (
+        self.log.debug("Unreserving UID %r @ %r" % (
                 uid,
                 self.index.resource))
 
@@ -161,7 +164,7 @@
 
 
     def isReservedUID(self, uid):
-        self.log_debug("Is reserved UID %r @ %r" % (
+        self.log.debug("Is reserved UID %r @ %r" % (
                 uid,
                 self.index.resource))
         key = self._key(uid)
@@ -357,7 +360,8 @@
 
 
 
-class LegacyIndexHelper(LoggingMixIn, object):
+class LegacyIndexHelper(object):
+    log = Logger()
 
     @inlineCallbacks
     def isAllowedUID(self, uid, *names):
@@ -527,7 +531,7 @@
 
         # Actually expand recurrence max
         for name in names:
-            self.log_info("Search falls outside range of index for %s %s to %s" %
+            self.log.info("Search falls outside range of index for %s %s to %s" %
                           (name, minDate, maxDate))
             yield self.reExpandResource(name, minDate, maxDate)
 

Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/migrate.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/migrate.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/migrate.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -24,7 +24,7 @@
 import xattr
 
 from twisted.python.failure import Failure
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from twisted.python.runtime import platform
 from twisted.python.reflect import namedAny, qual
@@ -228,10 +228,11 @@
 
 
 
-class UpgradeToDatabaseStep(LoggingMixIn, object):
+class UpgradeToDatabaseStep(object):
     """
     Upgrade resources from a filesystem store to a database store.
     """
+    log = Logger()
 
     def __init__(self, fileStore, sqlStore, uid=None, gid=None, merge=False):
         """
@@ -308,13 +309,13 @@
         """
         migrateFunc, destFunc = homeTypeLookup.get(homeType)
         uid = normalizeUUIDOrNot(fileHome.uid())
-        self.log_warn("Starting migration transaction %s UID %r" %
+        self.log.warn("Starting migration transaction %s UID %r" %
                       (homeType, uid))
         sqlTxn = self.sqlStore.newTransaction()
         homeGetter = destFunc(sqlTxn)
         sqlHome = yield homeGetter(uid, create=False)
         if sqlHome is not None and not self.merge:
-            self.log_warn(
+            self.log.warn(
                 "%s home %r already existed not migrating" % (
                     homeType, uid))
             yield sqlTxn.abort()
@@ -345,7 +346,7 @@
         @return: a Deferred which fires when the migration is complete.
         """
         self.sqlStore.setMigrating(True)
-        self.log_warn("Beginning filesystem -> database upgrade.")
+        self.log.warn("Beginning filesystem -> database upgrade.")
 
         for homeType, eachFunc in [
                 ("calendar", self.fileStore.withEachCalendarHomeDo),
@@ -375,7 +376,7 @@
 
         self.sqlStore.setMigrating(False)
 
-        self.log_warn(
+        self.log.warn(
             "Filesystem upgrade complete, launching database service."
         )
 
@@ -383,7 +384,7 @@
     @inlineCallbacks
     def _upgradeAction(self, fileTxn, fileHome, homeType):
         uid = fileHome.uid()
-        self.log_warn("Migrating %s UID %r" % (homeType, uid))
+        self.log.warn("Migrating %s UID %r" % (homeType, uid))
         yield self.migrateOneHome(fileTxn, homeType, fileHome)
 
 

Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/others/attachment_migration.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/others/attachment_migration.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/others/attachment_migration.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -33,7 +33,7 @@
 
     # Ignore if the store is not enabled for managed attachments
     if not upgrader.sqlStore.enableManagedAttachments:
-        upgrader.log_warn("No dropbox migration - managed attachments not enabled")
+        upgrader.log.warn("No dropbox migration - managed attachments not enabled")
         returnValue(None)
 
     statusKey = "MANAGED-ATTACHMENTS"
@@ -42,16 +42,16 @@
     try:
         managed = (yield txn.calendarserverValue(statusKey, raiseIfMissing=False))
         if managed is None:
-            upgrader.log_warn("Checking for dropbox migration")
+            upgrader.log.warn("Checking for dropbox migration")
             needUpgrade = (yield storeWrapper.hasDropboxAttachments(txn))
         else:
             needUpgrade = False
         if needUpgrade:
-            upgrader.log_warn("Starting dropbox migration")
+            upgrader.log.warn("Starting dropbox migration")
             yield storeWrapper.upgradeToManagedAttachments(batchSize=10)
-            upgrader.log_warn("Finished dropbox migration")
+            upgrader.log.warn("Finished dropbox migration")
         else:
-            upgrader.log_warn("No dropbox migration needed")
+            upgrader.log.warn("No dropbox migration needed")
         if managed is None:
             yield txn.setCalendarserverValue(statusKey, "1")
     except RuntimeError:

Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrade.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrade.py	2013-05-29 23:02:53 UTC (rev 11259)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrade.py	2013-05-30 01:18:39 UTC (rev 11260)
@@ -22,7 +22,7 @@
 
 import re
 
-from twext.python.log import LoggingMixIn
+from twext.python.log import Logger
 
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.python.failure import Failure
@@ -31,7 +31,7 @@
 
 from txdav.common.datastore.upgrade.sql.others import attachment_migration
 
-class UpgradeDatabaseCoreStep(LoggingMixIn, object):
+class UpgradeDatabaseCoreStep(object):
     """
     Base class for either schema or data upgrades on the database.
 
@@ -42,6 +42,7 @@
 
     @type sqlStore: L{txdav.idav.IDataStore}
     """
+    log = Logger()
 
     def __init__(self, sqlStore, uid=None, gid=None, failIfUpgradeNeeded=False):
         """
@@ -73,18 +74,18 @@
         """
         Do a database schema upgrade.
         """
-        self.log_warn("Beginning database %s check." % (self.versionDescriptor,))
+        self.log.warn("Beginning database %s check." % (self.versionDescriptor,))
 
         # Retrieve information from schema and database
         dialect, required_version, actual_version = yield self.getVersions()
 
         if required_version == actual_version:
-            self.log_warn("%s version check complete: no upgrade needed." % (self.versionDescriptor.capitalize(),))
+            self.log.warn("%s version check complete: no upgrade needed." % (self.versionDescriptor.capitalize(),))
         elif required_version < actual_version:
             msg = "Actual %s version %s is more recent than the expected version %s. The service cannot be started" % (
                 self.versionDescriptor, actual_version, required_version,
             )
-            self.log_error(msg)
+            self.log.error(msg)
             raise RuntimeError(msg)
         elif self.failIfUpgradeNeeded:
                 # TODO: change this exception to be upgrade-specific
@@ -94,7 +95,7 @@
             yield self.upgradeVersion(actual_version, required_version, dialect)
             self.sqlStore.setUpgrading(False)
 
-        self.log_warn("Database %s check complete." % (self.versionDescriptor,))
+        self.log.warn("Database %s check complete." % (self.versionDescriptor,))
 
         returnValue(None)
 
@@ -111,11 +112,11 @@
         found = re.search("insert into CALENDARSERVER values \('%s', '(\d+)'\);" % (self.versionKey,), current_schema)
         if found is None:
             msg = "Schema is missing required database key %s insert statement: %s" % (self.versionKey, current_schema,)
-            self.log_error(msg)
+            self.log.error(msg)
             raise RuntimeError(msg)
         else:
             required_version = int(found.group(1))
-            self.log_warn("Required database key %s: %s." % (self.versionKey, required_version,))
+            self.log.warn("Required database key %s: %s." % (self.versionKey, required_version,))
 
         # Get the schema version in the current database
         sqlTxn = self.sqlStore.newTransaction()
@@ -126,14 +127,14 @@
             yield sqlTxn.commit()
         except (RuntimeError, ValueError):
             f = Failure()
-            self.log_error("Database key %s cannot be determined." % (self.versionKey,))
+            self.log.error("Database key %s cannot be determined." % (self.versionKey,))
             yield sqlTxn.abort()
             if self.defaultKeyValue is None:
                 f.raiseException()
             else:
                 actual_version = self.defaultKeyValue
 
-        self.log_warn("Actual database key %s: %s." % (self.versionKey, actual_version,))
+        self.log.warn("Actual database key %s: %s." % (self.versionKey, actual_version,))
 
         returnValue((dialect, required_version, actual_version,))
 
@@ -145,7 +146,7 @@
         looking for upgrade_from_X_to_Y.sql files that cover the full range of upgrades.
         """
 
-        self.log_warn("Starting %s upgrade from version %d to %d." % (self.versionDescriptor, fromVersion, toVersion,))
+        self.log.warn("Starting %s upgrade from version %d to %d." % (self.versionDescriptor, fromVersion, toVersion,))
 
         # Scan for all possible upgrade files - returned sorted
         files = self.scanForUpgradeFiles(dialect)
@@ -158,10 +159,10 @@
             for fp in upgrades:
                 yield self.applyUpgrade(fp)
         except RuntimeError:
-            self.log_error("Database %s upgrade failed using: %s" % (self.versionDescriptor, fp.basename(),))
+            self.log.error("Database %s upgrade failed using: %s" % (self.versionDescriptor, fp.basename(),))
             raise
 
-        self.log_warn("%s upgraded from version %d to %d." % (self.versionDescriptor.capitalize(), fromVersion, toVersion,))
+        self.log.warn("%s upgraded from version %d to %d." % (self.versionDescriptor.capitalize(), fromVersion, toVersion,))
 
 
     def getPathToUpgrades(self, dialect):
@@ -210,7 +211,7 @@
         while nextVersion != toVersion:
             if nextVersion not in filesByFromVersion:
                 msg = "Missing upgrade file from version %d with dialect %s" % (nextVersion, dialect,)
-                self.log_error(msg)
+                self.log.error(msg)
                 raise RuntimeError(msg)
             else:
                 upgrades.append(filesByFromVersion[nextVersion][2])
@@ -260,7 +261,7 @@
         """
         Apply the schema upgrade .sql file to the database.
         """
-        self.log_warn("Applying schema upgrade: %s" % (fp.basename(),))
+        self.log.warn("Applying schema upgrade: %s" % (fp.basename(),))
         sqlTxn = self.sqlStore.newTransaction()
         try:
             sql = fp.getContent()
@@ -301,10 +302,10 @@
             doUpgrade = namedObject(module)
         except ImportError:
             msg = "Failed data upgrade: %s" % (fp.basename()[:-4],)
-            self.log_error(msg)
+            self.log.error(msg)
             raise RuntimeError(msg)
 
-        self.log_warn("Applying data upgrade: %s" % (module,))
+        self.log.warn("Applying data upgrade: %s" % (module,))
         yield doUpgrade(self.sqlStore)
 
 
@@ -387,7 +388,7 @@
         """
         Do upgrades.
         """
-        self.log_warn("Beginning database %s check." % (self.versionDescriptor,))
+        self.log.warn("Beginning database %s check." % (self.versionDescriptor,))
 
         # Do each upgrade in our own predefined order
         self.sqlStore.setUpgrading(True)
@@ -397,6 +398,6 @@
 
         self.sqlStore.setUpgrading(False)
 
-        self.log_warn("Database %s check complete." % (self.versionDescriptor,))
+        self.log.warn("Database %s check complete." % (self.versionDescriptor,))
 
         returnValue(None)
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20130529/559f0a3b/attachment-0001.html>


More information about the calendarserver-changes mailing list