[CalendarServer-changes] [4589] CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/ twistedcaldav

source_changes at macosforge.org source_changes at macosforge.org
Fri Oct 16 14:21:32 PDT 2009


Revision: 4589
          http://trac.macosforge.org/projects/calendarserver/changeset/4589
Author:   sagen at apple.com
Date:     2009-10-16 14:21:31 -0700 (Fri, 16 Oct 2009)
Log Message:
-----------
New OD caching strategy: the master process performs a periodic directory query and stores the results to files in the DataRoot; the child processes stat/read these files instead of talking to OD.

Modified Paths:
--------------
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/cluster.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/directory.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/test/test_opendirectoryrecords.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/xmlfile.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_memcacher.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_root.py
    CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_tap.py

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/cluster.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/cluster.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/cluster.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -23,11 +23,13 @@
 from twisted.runner import procmon
 from twisted.application import internet, service
 from twisted.internet import reactor, process
+from twisted.python.reflect import namedClass
 
 from twistedcaldav.accesslog import AMPLoggingFactory, RotatingFileAccessLoggingObserver
 from twistedcaldav.config import config, ConfigurationError
 from twistedcaldav.util import getNCPU
 from twistedcaldav.log import Logger
+from twistedcaldav.directory.appleopendirectory import OpenDirectoryService
 
 log = Logger()
 
@@ -231,7 +233,15 @@
             childFDs=childFDs)
 
 def makeService_Combined(self, options):
+
+
+    # Refresh directory information on behalf of the child processes
+    directoryClass = namedClass(config.DirectoryService["type"])
+    directory = directoryClass(dosetup=False, **config.DirectoryService["params"])
+    directory.refresh()
+
     s = service.MultiService()
+
     monitor = DelayedStartupProcessMonitor()
     monitor.setServiceParent(s)
     s.processMonitor = monitor

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/appleopendirectory.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/appleopendirectory.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -34,6 +34,7 @@
 import dsattributes
 import dsquery
 import memcacheclient
+import cPickle as pickle
 
 try:
     from hashlib import md5
@@ -43,8 +44,10 @@
 
 from twisted.internet.reactor import callLater
 from twisted.internet.threads import deferToThread
+from twisted.internet.task import LoopingCall
 from twisted.cred.credentials import UsernamePassword
 from twisted.web2.auth.digest import DigestedCredentials
+from twisted.python.filepath import FilePath
 
 from twistedcaldav.config import config
 from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
@@ -64,7 +67,7 @@
     def __repr__(self):
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.node)
 
-    def __init__(self, node="/Search", requireComputerRecord=True, dosetup=True, cacheTimeout=30):
+    def __init__(self, node="/Search", requireComputerRecord=True, dosetup=True, cacheTimeout=30, **kwds):
         """
         @param node: an OpenDirectory node name to bind to.
         @param requireComputerRecord: C{True} if the directory schema is to be used to determine
@@ -110,6 +113,50 @@
             for recordType in self.recordTypes():
                 self.recordsForType(recordType)
 
+    def refresh(self, loop=True):
+        """
+        This service works by having the master process call this method
+        which queries OD for all records, storing the pickled results into
+        files that the child processes stat/read every minute.
+        The files are only written by this method if there are actually
+        changes in the results.
+        The reloadCache( ) method below used to talk to OD, but now it reads
+        these files.
+        """
+
+        def _refresh(self):
+            dataRoot = FilePath(config.DataRoot)
+            cacheDir = dataRoot.child("DirectoryCache")
+            if not cacheDir.exists():
+                cacheDir.createDirectory()
+
+            for recordType in self.recordTypes():
+                self.log_debug("Master fetching %s from directory" % (recordType,))
+                cacheFile = cacheDir.child(recordType)
+                results = self._queryDirectory(recordType)
+                results.sort()
+                pickled = pickle.dumps(results)
+                needsWrite = True
+                if cacheFile.exists():
+                    prevPickled = cacheFile.getContent()
+                    if prevPickled == pickled:
+                        needsWrite = False
+                if needsWrite:
+                    self.log_info("Saving cache file for %s" % (recordType,))
+                    cacheFile.setContent(pickled)
+                else:
+                    self.log_debug("%s info hasn't changed" % (recordType,))
+
+        def _refreshInThread(self):
+            return deferToThread(_refresh, self)
+
+        if loop:
+            LoopingCall(_refreshInThread, self).start(self.cacheTimeout * 60)
+        else:
+            _refresh(self)
+
+
+
     def _expandGroupMembership(self, members, nestedGroups, processedGUIDs=None):
 
         if processedGUIDs is None:
@@ -445,71 +492,28 @@
         try:
             return self.recordsForType(recordType)[shortName]
         except KeyError:
-            # Check negative cache
-            if shortName in self._storage(recordType)["disabled names"]:
-                return None
+            return None
 
-            # Cache miss; try looking the record up, in case it is new
-            # FIXME: This is a blocking call (hopefully it's a fast one)
-            self.reloadCache(recordType, shortName=shortName)
-            record = self.recordsForType(recordType).get(shortName, None)
-            if record is None:
-                # Add to negative cache
-                self._storage(recordType)["disabled names"].add(shortName)
-            return record
-
     def recordWithGUID(self, guid):
-        def lookup():
-            for recordType in self.recordTypes():
-                record = self._storage(recordType)["guids"].get(guid, None)
-                if record:
-                    return record
-            else:
-                return None
+        for recordType in self.recordTypes():
+            record = self._storage(recordType)["guids"].get(guid, None)
+            if record:
+                return record
+        else:
+            return None
 
-        record = lookup()
 
-        if record is None:
-            # Cache miss; try looking the record up, in case it is new
-            for recordType in self.recordTypes():
-                # Check negative cache
-                if guid in self._storage(recordType)["disabled guids"]:
-                    continue
-
-                self.reloadCache(recordType, guid=guid)
-                record = lookup()
-
-                if record is None:
-                    self._storage(recordType)["disabled guids"].add(guid)
-                else:
-                    self.log_info("Faulted record with GUID %s into %s record cache"
-                                  % (guid, recordType))
-                    break
-            else:
-                # Nothing found; add to negative cache
-                self.log_info("Unable to find any record with GUID %s" % (guid,))
-
-        return record
-
     def recordWithCalendarUserAddress(self, address):
         address = address.lower()
 
-        def lookup():
-            for recordType in self.recordTypes():
-                record = self._storage(recordType)["cuaddrs"].get(address, None)
-                if record:
-                    return record
-            else:
-                return None
+        for recordType in self.recordTypes():
+            record = self._storage(recordType)["cuaddrs"].get(address, None)
+            if record:
+                return record
+        else:
+            return None
 
-        record = lookup()
 
-        if record is None:
-            # Nothing found
-            self.log_info("Unable to find any record with calendar user address %s" % (address,))
-
-        return record
-
     def groupsForGUID(self, guid):
         
         # Lookup in index
@@ -609,47 +613,60 @@
                 self.log_error("OD search failed: %s" % (e,))
                 raise
 
-    def reloadCache(self, recordType, shortName=None, guid=None):
-        if shortName is not None:
-            self.log_info("Faulting record with shortName %s into %s record cache" % (shortName, recordType))
-        elif guid is not None:
-            self.log_info("Faulting record with guid %s into %s record cache" % (guid, recordType))
-        elif shortName is None and guid is None:
-            self.log_info("Reloading %s record cache" % (recordType,))
-        else:
-            raise AssertionError("%r.reloadCache(%s, %s, %s)" % (self, recordType, shortName, guid))
+    def reloadCache(self, recordType, forceUpdate=False):
 
-        results = self._queryDirectory(recordType, shortName=shortName, guid=guid)
-        
-        if shortName is None and guid is None:
-            records = {}
-            guids   = {}
-            cuaddrs = {}
+        def rot():
+            storage["status"] = "stale"
+            removals = set()
+            for call in self._delayedCalls:
+                if not call.active():
+                    removals.add(call)
+            for item in removals:
+                self._delayedCalls.remove(item)
 
-            disabledNames = set()
-            disabledGUIDs = set()
-            
-            if recordType == DirectoryService.recordType_groups:
-                groupsForGUID = {}
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                proxiesForGUID = {}
-                readOnlyProxiesForGUID = {}
-        else:
+        cacheTimeout = 60 # child processes always check once per minute
+
+        dataRoot = FilePath(config.DataRoot)
+        cacheDir = dataRoot.child("DirectoryCache")
+        if not cacheDir.exists():
+            self.log_error("Directory cache directory for does not exist: %s" % (cacheFile.path,))
+            return
+
+        cacheFile = cacheDir.child(recordType)
+        if not cacheFile.exists():
+            self.log_error("Directory cache file for %s does not exist: %s" % (recordType, cacheFile.path))
+            return
+        lastModified = cacheFile.getModificationTime()
+        try:
             storage = self._records[recordType]
+            if not forceUpdate and (lastModified <= storage["last modified"]):
+                self.log_debug("Directory cache file for %s unchanged" % (recordType,))
+                storage["status"] = "new" # mark this as not stale
+                self._delayedCalls.add(callLater(cacheTimeout, rot))
+                return
+        except KeyError:
+            # Haven't read the file before
+            pass
 
-            records = storage["records"]
-            guids   = storage["guids"]
-            cuaddrs = storage["cuaddrs"]
+        self.log_info("Reloading %s record cache" % (recordType,))
 
-            disabledNames = storage["disabled names"]
-            disabledGUIDs = storage["disabled guids"]
-            
-            if recordType == DirectoryService.recordType_groups:
-                groupsForGUID = storage["groupsForGUID"]
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                proxiesForGUID = storage["proxiesForGUID"]
-                readOnlyProxiesForGUID = storage["readOnlyProxiesForGUID"]
+        pickled = cacheFile.getContent()
+        results = pickle.loads(pickled)
+        # results = self._queryDirectory(recordType)
 
+        records = {}
+        guids   = {}
+        cuaddrs = {}
+
+        disabledNames = set()
+        disabledGUIDs = set()
+        
+        if recordType == DirectoryService.recordType_groups:
+            groupsForGUID = {}
+        elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
+            proxiesForGUID = {}
+            readOnlyProxiesForGUID = {}
+
         for (recordShortName, value) in results:
             enabledForCalendaring = True
 
@@ -809,54 +826,38 @@
                         self._indexGroup(record, record._proxyGUIDs, proxiesForGUID)
                         self._indexGroup(record, record._readOnlyProxyGUIDs, readOnlyProxiesForGUID)
 
-        if shortName is None and guid is None:
-            #
-            # Replace the entire cache
-            #
-            storage = {
-                "status"        : "new",
-                "records"       : records,
-                "guids"         : guids,
-                "cuaddrs"       : cuaddrs,
-                "disabled names": disabledNames,
-                "disabled guids": disabledGUIDs,
-            }
+        #
+        # Replace the entire cache
+        #
+        storage = {
+            "status"        : "new",
+            "records"       : records,
+            "guids"         : guids,
+            "cuaddrs"       : cuaddrs,
+            "disabled names": disabledNames,
+            "disabled guids": disabledGUIDs,
+            "last modified" : lastModified,
+        }
 
-            # Add group indexing if needed
-            if recordType == DirectoryService.recordType_groups:
-                storage["groupsForGUID"] = groupsForGUID
+        # Add group indexing if needed
+        if recordType == DirectoryService.recordType_groups:
+            storage["groupsForGUID"] = groupsForGUID
 
-            # Add proxy indexing if needed
-            elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
-                storage["proxiesForGUID"] = proxiesForGUID
-                storage["readOnlyProxiesForGUID"] = readOnlyProxiesForGUID
+        # Add proxy indexing if needed
+        elif recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
+            storage["proxiesForGUID"] = proxiesForGUID
+            storage["readOnlyProxiesForGUID"] = readOnlyProxiesForGUID
 
-            def rot():
-                storage["status"] = "stale"
-                removals = set()
-                for call in self._delayedCalls:
-                    if not call.active():
-                        removals.add(call)
-                for item in removals:
-                    self._delayedCalls.remove(item)
+        self._delayedCalls.add(callLater(cacheTimeout, rot))
 
-            #
-            # Add jitter/fuzz factor to avoid stampede for large OD query
-            # Max out the jitter at 60 minutes
-            #
-            cacheTimeout = min(self.cacheTimeout, 60) * 60
-            cacheTimeout = (cacheTimeout * random()) - (cacheTimeout / 2)
-            cacheTimeout += self.cacheTimeout * 60
-            self._delayedCalls.add(callLater(cacheTimeout, rot))
+        self._records[recordType] = storage
 
-            self._records[recordType] = storage
+        self.log_info(
+            "Added %d records to %s OD record cache; expires in %d seconds"
+            % (len(self._records[recordType]["guids"]), recordType, cacheTimeout)
+        )
 
-            self.log_info(
-                "Added %d records to %s OD record cache; expires in %d seconds"
-                % (len(self._records[recordType]["guids"]), recordType, cacheTimeout)
-            )
-
-    def _queryDirectory(self, recordType, shortName=None, guid=None):
+    def _queryDirectory(self, recordType):
         attrs = [
             dsattributes.kDS1AttrGeneratedUID,
             dsattributes.kDS1AttrDistinguishedName,
@@ -887,45 +888,44 @@
 
         if self.requireComputerRecord:
             if self.isWorkgroupServer and recordType == DirectoryService.recordType_users:
-                if shortName is None and guid is None:
-                    self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
-                        self.directory,
-                        dsattributes.kDSNAttrRecordName,
-                        saclGroup,
-                        dsattributes.eDSExact,
-                        False,
-                        dsattributes.kDSStdRecordTypeGroups,
-                        [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups],
-                    ))
-                    results = opendirectory.queryRecordsWithAttribute_list(
-                        self.directory,
-                        dsattributes.kDSNAttrRecordName,
-                        saclGroup,
-                        dsattributes.eDSExact,
-                        False,
-                        dsattributes.kDSStdRecordTypeGroups,
-                        [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
-                    )
+                self.log_debug("opendirectory.queryRecordsWithAttribute_list(%r,%r,%r,%r,%r,%r,%r)" % (
+                    self.directory,
+                    dsattributes.kDSNAttrRecordName,
+                    saclGroup,
+                    dsattributes.eDSExact,
+                    False,
+                    dsattributes.kDSStdRecordTypeGroups,
+                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups],
+                ))
+                results = opendirectory.queryRecordsWithAttribute_list(
+                    self.directory,
+                    dsattributes.kDSNAttrRecordName,
+                    saclGroup,
+                    dsattributes.eDSExact,
+                    False,
+                    dsattributes.kDSStdRecordTypeGroups,
+                    [dsattributes.kDSNAttrGroupMembers, dsattributes.kDSNAttrNestedGroups]
+                )
 
-                    if len(results) == 1:
-                        members      = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
-                        nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
-                    else:
-                        members = []
-                        nestedGroups = []
+                if len(results) == 1:
+                    members      = results[0][1].get(dsattributes.kDSNAttrGroupMembers, [])
+                    nestedGroups = results[0][1].get(dsattributes.kDSNAttrNestedGroups, [])
+                else:
+                    members = []
+                    nestedGroups = []
 
-                    guidQueries = []
+                guidQueries = []
 
-                    for GUID in self._expandGroupMembership(members, nestedGroups):
-                        guidQueries.append(
-                            dsquery.match(dsattributes.kDS1AttrGeneratedUID, GUID, dsattributes.eDSExact)
-                        )
+                for GUID in self._expandGroupMembership(members, nestedGroups):
+                    guidQueries.append(
+                        dsquery.match(dsattributes.kDS1AttrGeneratedUID, GUID, dsattributes.eDSExact)
+                    )
 
-                    if not guidQueries:
-                        self.log_warn("No SACL enabled users found.")
-                        return ()
+                if not guidQueries:
+                    self.log_warn("No SACL enabled users found.")
+                    return ()
 
-                    query = dsquery.expression(dsquery.expression.OR, guidQueries)
+                query = dsquery.expression(dsquery.expression.OR, guidQueries)
 
             #
             # For users and groups, we'll load all entries, even if
@@ -950,19 +950,8 @@
                 else:
                     query = dsquery.expression(dsquery.expression.AND, (subquery, query))
 
-        if shortName is not None:
-            subquery = dsquery.match(dsattributes.kDSNAttrRecordName, shortName, dsattributes.eDSExact)
-        elif guid is not None:
-            subquery = dsquery.match(dsattributes.kDS1AttrGeneratedUID, guid, dsattributes.eDSExact)
-        else:
-            subquery = None
+        subquery = None
 
-        if subquery is not None:
-            if query is None:
-                query = subquery
-            else:
-                query = dsquery.expression(dsquery.expression.AND, (subquery, query))
-
         try:
             if query:
                 if isinstance(query, dsquery.match):

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/directory.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/directory.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -139,6 +139,12 @@
             for record in self.listRecords(recordType):
                 yield record
 
+    def refresh(self):
+        """
+        This gets called in the master process to give the directory service
+        a chance to refresh a cache of directory information
+        """
+        pass
 
 class DirectoryRecord(LoggingMixIn):
     implements(IDirectoryRecord)

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/test/test_opendirectoryrecords.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/test/test_opendirectoryrecords.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/test/test_opendirectoryrecords.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -24,29 +24,21 @@
 else:
     from twistedcaldav.directory.directory import DirectoryService
     from twistedcaldav.directory.util import uuidFromName
+    from twistedcaldav.test.util import TestCase
 
     class OpenDirectoryService (RealOpenDirectoryService):
-        def _queryDirectory(self, recordType, shortName=None, guid=None):
-            if shortName is None and guid is None:
+        def _queryDirectory(self, recordType):
+            try:
                 return self.fakerecords[recordType]
-
-            assert shortName is None or guid is None
-            if guid is not None:
-                guid = guid.lower()
-
-            records = []
-
-            for name, record in self.fakerecords[recordType]:
-                if name == shortName or record[dsattributes.kDS1AttrGeneratedUID] == guid:
-                    records.append((name, record))
-
-            return tuple(records)
+            except KeyError:
+                return []
     
-    class ReloadCache(twisted.trial.unittest.TestCase):
+    class ReloadCache(TestCase):
         def setUp(self):
             super(ReloadCache, self).setUp()
             self._service = OpenDirectoryService(node="/Search", dosetup=False)
             self._service.servicetags.add("FE588D50-0514-4DF9-BCB5-8ECA5F3DA274:030572AE-ABEC-4E0F-83C9-FCA304769E5F:calendar")
+            self._service.fakerecords = { }
             
         def tearDown(self):
             for call in self._service._delayedCalls:
@@ -106,6 +98,7 @@
                     fakeODRecord("Location 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -151,6 +144,7 @@
                     fakeODRecord("Location 04", addLocator=False),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -175,6 +169,7 @@
                     fakeODRecord("User 01"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -188,9 +183,9 @@
                     fakeODRecord("User 03", guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
                 ],
             }
+            self._service.refresh(loop=False)
 
-            self._service.reloadCache(DirectoryService.recordType_users, shortName="user02")
-            self._service.reloadCache(DirectoryService.recordType_users, guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A")
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01", "user02", "user03"))
             self._verifyDisabledRecords(DirectoryService.recordType_users, (), ())
@@ -203,6 +198,7 @@
                     fakeODRecord("User 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -219,6 +215,7 @@
                     fakeODRecord("User 02", guid="30CA2BB9-C935-4A5D-80E2-79266BCB0255"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -237,6 +234,7 @@
                     fakeODRecord("User 03", guid="113D7F74-F84A-4F17-8C96-CE8F10D68EF8"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -256,6 +254,7 @@
                     fakeODRecord("User 02", guid="136E369F-DB40-4135-878D-B75D38242D39"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -274,12 +273,12 @@
                     fakeODRecord("User 03", guid="D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01", "user02", "user03"))
             self._verifyDisabledRecords(DirectoryService.recordType_users, (), ())
-            
             self._service.fakerecords = {
                 DirectoryService.recordType_users: [
                     fakeODRecord("User 01"),
@@ -289,15 +288,15 @@
                     fakeODRecord("User 03", guid="62368DDF-0C62-4C97-9A58-DE9FD46131A0", shortName="user05"),
                 ],
             }
+            self._service.refresh(loop=False)
 
-            self._service.reloadCache(DirectoryService.recordType_users, shortName="user04")
-            self._service.reloadCache(DirectoryService.recordType_users, guid="62368DDF-0C62-4C97-9A58-DE9FD46131A0")
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             self._verifyRecords(DirectoryService.recordType_users, ("user01",))
             self._verifyDisabledRecords(
                 DirectoryService.recordType_users,
                 ("user02", "user03", "user04", "user05"),
-                ("EDB9EE55-31F2-4EA9-B5FB-D8AE2A8BA35E", "62368DDF-0C62-4C97-9A58-DE9FD46131A0", "D10F3EE0-5014-41D3-8488-3819D3EF3B2A"),
+                ("EDB9EE55-31F2-4EA9-B5FB-D8AE2A8BA35E", "62368DDF-0C62-4C97-9A58-DE9FD46131A0"),
             )
 
         def test_groupmembers(self):
@@ -325,6 +324,7 @@
                     fakeODRecord("Location 02"),
                 ],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
             self._service.reloadCache(DirectoryService.recordType_groups)
@@ -354,7 +354,8 @@
                     guidForShortName("user02"),
                 ]),
             ]
-            self._service.reloadCache(DirectoryService.recordType_groups)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_groups, forceUpdate=True)
 
             group1 = self._service.recordWithShortName(DirectoryService.recordType_groups, "group01")
             self.assertTrue(group1 is not None)
@@ -371,12 +372,20 @@
             self.assertEqual(set((group2,)), user2.groups()) 
             
             self._service.fakerecords[DirectoryService.recordType_groups] = [
+                fakeODRecord("Group 01", members=[
+                    guidForShortName("user01"),
+                ]),
+                fakeODRecord("Group 02", members=[
+                    guidForShortName("resource01"),
+                    guidForShortName("user02"),
+                ]),
                 fakeODRecord("Group 03", members=[
                     guidForShortName("user01"),
                     guidForShortName("user02"),
                 ]),
             ]
-            self._service.reloadCache(DirectoryService.recordType_groups, guid=guidForShortName("group03"))
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_groups, forceUpdate=True)
 
             group1 = self._service.recordWithShortName(DirectoryService.recordType_groups, "group01")
             self.assertTrue(group1 is not None)
@@ -405,6 +414,7 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
+            self._service.refresh(loop=False)
 
             self._service.reloadCache(DirectoryService.recordType_users)
 
@@ -424,7 +434,8 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
-            self._service.reloadCache(DirectoryService.recordType_users)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
             self.assertTrue(user1 is not None)
@@ -441,7 +452,8 @@
                 DirectoryService.recordType_resources: [],
                 DirectoryService.recordType_locations: [],
             }
-            self._service.reloadCache(DirectoryService.recordType_users)
+            self._service.refresh(loop=False)
+            self._service.reloadCache(DirectoryService.recordType_users, forceUpdate=True)
 
             user1 = self._service.recordWithCalendarUserAddress("mailto:user01 at example.com")
             self.assertTrue(user1 is None)

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/xmlfile.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/xmlfile.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/directory/xmlfile.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -40,7 +40,7 @@
     def __repr__(self):
         return "<%s %r: %r>" % (self.__class__.__name__, self.realmName, self.xmlFile)
 
-    def __init__(self, xmlFile):
+    def __init__(self, xmlFile, **kwds):
         super(XMLDirectoryService, self).__init__()
 
         if type(xmlFile) is str:

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_memcacher.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_memcacher.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_memcacher.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -6,7 +6,7 @@
 """
 
 from twisted.internet.defer import inlineCallbacks
-from twisted.trial.unittest import TestCase
+from twistedcaldav.test.util import TestCase
 
 from twistedcaldav.config import config
 from twistedcaldav.memcacher import Memcacher

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_root.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_root.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_root.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -52,6 +52,7 @@
 
 class RootTests(TestCase):
     def setUp(self):
+        super(RootTests, self).setUp()
         self.docroot = self.mktemp()
         os.mkdir(self.docroot)
 

Modified: CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_tap.py
===================================================================
--- CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_tap.py	2009-10-16 21:17:33 UTC (rev 4588)
+++ CalendarServer/branches/users/sagen/deployment-inherit-fds-4571/twistedcaldav/test/test_tap.py	2009-10-16 21:21:31 UTC (rev 4589)
@@ -18,6 +18,7 @@
 from copy import deepcopy
 
 from twisted.trial import unittest
+from twistedcaldav.test.util import TestCase
 
 from twisted.python.usage import Options, UsageError
 from twisted.python.util import sibpath
@@ -53,7 +54,7 @@
         pass
 
 
-class CalDAVOptionsTest(unittest.TestCase):
+class CalDAVOptionsTest(TestCase):
     """
     Test various parameters of our usage.Options subclass
     """
@@ -63,6 +64,7 @@
         Set up our options object, giving it a parent, and forcing the
         global config to be loaded from defaults.
         """
+        super(CalDAVOptionsTest, self).setUp()
         self.config = TestCalDAVOptions()
         self.config.parent = Options()
         self.config.parent['uid'] = 0
@@ -163,7 +165,7 @@
 
         self.assertEquals(config.MultiProcess['ProcessCount'], 102)
 
-class BaseServiceMakerTests(unittest.TestCase):
+class BaseServiceMakerTests(TestCase):
     """
     Utility class for ServiceMaker tests.
     """
@@ -171,6 +173,7 @@
     configOptions = None
 
     def setUp(self):
+        super(BaseServiceMakerTests, self).setUp()
         self.options = TestCalDAVOptions()
         self.options.parent = Options()
         self.options.parent['gid'] = None
@@ -224,6 +227,9 @@
         """
 
         self.options.parseOptions(['-f', self.configFile])
+        config.Memcached.ClientEnabled = False
+        config.Memcached.ServerEnabled = False
+        # memcacheclient.ClientFactory.allowTestCache = True
 
         return CalDAVServiceMaker().makeService(self.options)
 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20091016/76837150/attachment-0001.html>


More information about the calendarserver-changes mailing list