[CalendarServer-changes] [6914] CalendarServer/trunk

source_changes at macosforge.org source_changes at macosforge.org
Tue Feb 8 15:16:17 PST 2011


Revision: 6914
          http://trac.macosforge.org/projects/calendarserver/changeset/6914
Author:   cdaboo at apple.com
Date:     2011-02-08 15:16:17 -0800 (Tue, 08 Feb 2011)
Log Message:
-----------
Cache home owner_uid -> resource_if mapping. Fix up tests for new SQL caching behavior.

Modified Paths:
--------------
    CalendarServer/trunk/twistedcaldav/memcachepool.py
    CalendarServer/trunk/twistedcaldav/memcacher.py
    CalendarServer/trunk/txdav/base/propertystore/sql.py
    CalendarServer/trunk/txdav/base/propertystore/test/test_sql.py
    CalendarServer/trunk/txdav/caldav/datastore/sql.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
    CalendarServer/trunk/txdav/carddav/datastore/sql.py
    CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py
    CalendarServer/trunk/txdav/common/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql
    CalendarServer/trunk/txdav/common/datastore/test/util.py

Modified: CalendarServer/trunk/twistedcaldav/memcachepool.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcachepool.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/twistedcaldav/memcachepool.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -360,8 +360,11 @@
     def add(self, *args, **kwargs):
         return self.performRequest('add', *args, **kwargs)
 
+    def flush_all(self, *args, **kwargs):
+        return self.performRequest('flush_all', *args, **kwargs)
 
 
+
 class CachePoolUserMixIn(object):
     """
     A mixin that returns a saved cache pool or fetches the default cache pool.

Modified: CalendarServer/trunk/twistedcaldav/memcacher.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcacher.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/twistedcaldav/memcacher.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -70,6 +70,10 @@
             except KeyError:
                 return succeed(False)
 
+        def flush_all(self):
+            self._cache = {}
+            return succeed(True)
+            
     #TODO: an sqlite based cacher that can be used for multiple instance servers
     # in the absence of memcached. This is not ideal and we may want to not implement
     # this, but it is being documented for completeness.
@@ -93,7 +97,10 @@
         def delete(self, key):
             return succeed(True)
 
-    def __init__(self, namespace, pickle=False, no_invalidation=False):
+        def flush_all(self):
+            return succeed(True)
+
+    def __init__(self, namespace, pickle=False, no_invalidation=False, key_normalization=True):
         """
         @param namespace: a unique namespace for this cache's keys
         @type namespace: C{str}
@@ -105,6 +112,9 @@
             is not present,as there is no issue with caches in each instance getting out of sync. If C{False} the
             nullCacher will be used for the multi-instance case when memcached is not configured.
         @type no_invalidation: C{bool}
+        @param key_normalization: if C{True} the key is assumed to possibly be longer than the Memcache key size and so additional
+            work is done to truncate and append a hash.
+        @type key_normalization: C{bool}
         """
         
         assert len(namespace) <= Memcacher.NAMESPACE_MAX_LENGTH, "Memcacher namespace must be less than or equal to %s characters long" % (Memcacher.NAMESPACE_MAX_LENGTH,)
@@ -113,6 +123,7 @@
         self._namespace = namespace
         self._pickle = pickle
         self._noInvalidation = no_invalidation
+        self._key_normalization = key_normalization
 
 
     def _getMemcacheProtocol(self):
@@ -141,9 +152,12 @@
             key = key.encode("utf-8")
         assert isinstance(key, str), "Key must be a str."
 
-        hash = hashlib.md5(key).hexdigest()
-        key = key[:Memcacher.TRUNCATED_KEY_LENGTH]
-        return "%s-%s" % (key.translate(Memcacher.keyNormalizeTranslateTable), hash,)
+        if self._key_normalization:
+            hash = hashlib.md5(key).hexdigest()
+            key = key[:Memcacher.TRUNCATED_KEY_LENGTH]
+            return "%s-%s" % (key.translate(Memcacher.keyNormalizeTranslateTable), hash,)
+        else:
+            return key
 
     def add(self, key, value, expire_time=0):
         
@@ -177,7 +191,10 @@
         d.addCallback(_gotit)
         return d
 
-
     def delete(self, key):
         self.log_debug("Deleting Cache Token for %r" % (key,))
         return self._getMemcacheProtocol().delete('%s:%s' % (self._namespace, self._normalizeKey(key)))
+
+    def flush_all(self):
+        self.log_debug("Flushing All Cache Tokens")
+        return self._getMemcacheProtocol().flush_all()

Modified: CalendarServer/trunk/txdav/base/propertystore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/base/propertystore/sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -34,7 +34,7 @@
 
 class PropertyStore(AbstractPropertyStore):
 
-    cacher = Memcacher("propertystore.sql", pickle=True)
+    _cacher = Memcacher("propertystore.sql", pickle=True, key_normalization=False)
 
     def __init__(self, *a, **kw):
         raise NotImplementedError(
@@ -55,7 +55,7 @@
             # Cache existing properties in this object 
 
             # Look for memcache entry first
-            rows = yield self.cacher.get(str(self._resourceID))
+            rows = yield self._cacher.get(str(self._resourceID))
             
             if rows is None:
                 rows = yield self._txn.execSQL(
@@ -65,7 +65,7 @@
                     """,
                     [self._resourceID]
                 )
-                yield self.cacher.set(str(self._resourceID), rows if rows is not None else ())
+                yield self._cacher.set(str(self._resourceID), rows if rows is not None else ())
             for name, uid, value in rows:
                 self._cached[(name, uid)] = value
 
@@ -151,7 +151,7 @@
                 [self._resourceID, key_str, value_str, uid]
             )
         self._cached[(key_str, uid)] = value_str
-        self.cacher.delete(str(self._resourceID))
+        self._cacher.delete(str(self._resourceID))
 
     def _delitem_uid(self, key, uid):
         validKey(key)
@@ -166,7 +166,7 @@
             [self._resourceID, key_str, uid],
             raiseOnZeroRowCount=lambda:KeyError(key)
         )
-        self.cacher.delete(str(self._resourceID))
+        self._cacher.delete(str(self._resourceID))
             
 
     def _keys_uid(self, uid):

Modified: CalendarServer/trunk/txdav/base/propertystore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/test_sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/base/propertystore/test/test_sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -26,6 +26,9 @@
 from txdav.base.propertystore.base import PropertyName
 from txdav.base.propertystore.test import base
 
+from twistedcaldav import memcacher
+from twistedcaldav.config import config
+
 try:
     from txdav.base.propertystore.sql import PropertyStore
 except ImportError, e:
@@ -39,6 +42,10 @@
 
     @inlineCallbacks
     def setUp(self):
+        self.patch(config.Memcached.Pools.Default, "ClientEnabled", False)
+        self.patch(config.Memcached.Pools.Default, "ServerEnabled", False)
+        self.patch(memcacher.Memcacher, "allowTestCache", True)
+
         self.notifierFactory = StubNotifierFactory()
         self.store = yield buildStore(self, self.notifierFactory)
         self.addCleanup(self.maybeCommitLast)

Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -40,6 +40,7 @@
 from twistedcaldav.dateops import normalizeForIndex, datetimeMktime
 from twistedcaldav.ical import Component
 from twistedcaldav.instance import InvalidOverriddenInstanceError
+from twistedcaldav.memcacher import Memcacher
 
 from txdav.base.propertystore.base import PropertyName
 from txdav.caldav.datastore.util import validateCalendarComponent,\
@@ -77,6 +78,8 @@
     _notifierPrefix = "CalDAV"
     _revisionsTable = CALENDAR_OBJECT_REVISIONS_TABLE
 
+    _cacher = Memcacher("datastore.calhome", pickle=True, key_normalization=False)
+
     def __init__(self, transaction, ownerUID, notifiers):
 
         self._childClass = Calendar

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -36,8 +36,9 @@
 from txdav.caldav.datastore.util import _migrateCalendar, migrateHome
 from txdav.base.propertystore.base import PropertyName
 
+from twistedcaldav import memcacher
+from twistedcaldav.config import config
 
-
 class CalendarSQLStorageTests(CalendarCommonTests, unittest.TestCase):
     """
     Calendar SQL storage tests.
@@ -45,6 +46,10 @@
 
     @inlineCallbacks
     def setUp(self):
+        self.patch(config.Memcached.Pools.Default, "ClientEnabled", False)
+        self.patch(config.Memcached.Pools.Default, "ServerEnabled", False)
+        self.patch(memcacher.Memcacher, "allowTestCache", True)
+
         yield super(CalendarSQLStorageTests, self).setUp()
         self._sqlCalendarStore = yield buildStore(self, self.notifierFactory)
         yield self.populate()

Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -34,6 +34,7 @@
 from twext.web2.http_headers import MimeType
 
 from twistedcaldav import carddavxml, customxml
+from twistedcaldav.memcacher import Memcacher
 from twistedcaldav.vcard import Component as VCard
 
 from txdav.common.datastore.sql_legacy import \
@@ -68,6 +69,8 @@
     _notifierPrefix = "CardDAV"
     _revisionsTable = ADDRESSBOOK_OBJECT_REVISIONS_TABLE
 
+    _cacher = Memcacher("datastore.adbkhome", pickle=True, key_normalization=False)
+
     def __init__(self, transaction, ownerUID, notifiers):
 
         self._childClass = AddressBook

Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -31,6 +31,8 @@
 from twisted.trial import unittest
 from twisted.internet.defer import inlineCallbacks, returnValue
 
+from twistedcaldav import memcacher
+from twistedcaldav.config import config
 from twistedcaldav.vcard import Component as VCard
 
 
@@ -41,6 +43,10 @@
 
     @inlineCallbacks
     def setUp(self):
+        self.patch(config.Memcached.Pools.Default, "ClientEnabled", False)
+        self.patch(config.Memcached.Pools.Default, "ServerEnabled", False)
+        self.patch(memcacher.Memcacher, "allowTestCache", True)
+
         yield super(AddressBookSQLStorageTests, self).setUp()
         self._sqlStore = yield buildStore(self, self.notifierFactory)
         yield self.populate()

Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -378,6 +378,8 @@
     _notifierPrefix = None
     _revisionsTable = None
     _notificationRevisionsTable = NOTIFICATION_OBJECT_REVISIONS_TABLE
+    
+    _cacher = None  # Initialize in derived classes
 
     def __init__(self, transaction, ownerUID, notifiers):
         self._txn = transaction
@@ -398,17 +400,22 @@
             self._revisionBindJoinTable["BIND:%s" % (key,)] = value
 
     @inlineCallbacks
-    def initFromStore(self):
+    def initFromStore(self, no_cache=False):
         """
         Initialize this object from the store. We read in and cache all the extra meta-data
         from the DB to avoid having to do DB queries for those individually later.
         """
 
-        result = yield self._txn.execSQL(
-            "select %(column_RESOURCE_ID)s from %(name)s"
-            " where %(column_OWNER_UID)s = %%s" % self._homeTable,
-            [self._ownerUID]
-        )
+        result = yield self._cacher.get(self._ownerUID)
+        if result is None:
+            result = yield self._txn.execSQL(
+                "select %(column_RESOURCE_ID)s from %(name)s"
+                " where %(column_OWNER_UID)s = %%s" % self._homeTable,
+                [self._ownerUID]
+            )
+            if result and not no_cache:
+                yield self._cacher.set(self._ownerUID, result)
+
         if result:
             self._resourceID = result[0][0]
             yield self._loadPropertyStore()
@@ -434,10 +441,15 @@
             if not create:
                 returnValue(None)
             # Need to lock to prevent race condition
+
             # FIXME: this is an entire table lock - ideally we want a row lock
             # but the row does not exist yet. However, the "exclusive" mode
             # does allow concurrent reads so the only thing we block is other
             # attempts to provision a home, which is not too bad
+            
+            # Also note that we must not cache the owner_uid->resource_id mapping in _cacher
+            # when creating as we don't want that to appear until AFTER the commit
+
             yield txn.execSQL(
                 "lock %(name)s in exclusive mode" % cls._homeTable,
             )
@@ -458,7 +470,8 @@
                     "insert into %(name)s (%(column_RESOURCE_ID)s) values (%%s)" % cls._homeMetaDataTable,
                     [resourceid]
                 )
-            home = yield cls.homeWithUID(txn, uid)
+            home = cls(txn, uid, notifiers)
+            home = (yield home.initFromStore(no_cache=not exists))
             if not exists:
                 yield home.createdHome()
             returnValue(home)

Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql	2011-02-08 23:16:17 UTC (rev 6914)
@@ -200,7 +200,7 @@
 create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
   CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
 
-  create index CALENDAR_OBJECT_ORGANIZER_OBJECT on
+create index CALENDAR_OBJECT_ORGANIZER_OBJECT on
   CALENDAR_OBJECT(ORGANIZER_OBJECT);
 
 create index CALENDAR_OBJECT_DROPBOX_ID on

Modified: CalendarServer/trunk/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/test/util.py	2011-02-08 22:45:57 UTC (rev 6913)
+++ CalendarServer/trunk/txdav/common/datastore/test/util.py	2011-02-08 23:16:17 UTC (rev 6914)
@@ -164,6 +164,14 @@
             except:
                 log.err()
         yield cleanupTxn.commit()
+        
+        # Deal with memcached items that must be cleared
+        from txdav.caldav.datastore.sql import CalendarHome
+        CalendarHome._cacher.flush_all()
+        from txdav.carddav.datastore.sql import AddressBookHome
+        AddressBookHome._cacher.flush_all()
+        from txdav.base.propertystore.sql import PropertyStore
+        PropertyStore._cacher.flush_all()
 
 theStoreBuilder = SQLStoreBuilder()
 buildStore = theStoreBuilder.buildStore
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20110208/702adc89/attachment-0001.html>


More information about the calendarserver-changes mailing list