[CalendarServer-changes] [7805] CalendarServer/trunk

source_changes at macosforge.org source_changes at macosforge.org
Tue Jul 19 14:38:23 PDT 2011


Revision: 7805
          http://trac.macosforge.org/projects/calendarserver/changeset/7805
Author:   sagen at apple.com
Date:     2011-07-19 14:38:22 -0700 (Tue, 19 Jul 2011)
Log Message:
-----------
Adds external proxy assignments, resource-info, and podding info from directory, plus faster group-membership indexing

Modified Paths:
--------------
    CalendarServer/trunk/conf/caldavd-test.plist
    CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py
    CalendarServer/trunk/twistedcaldav/directory/directory.py
    CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py
    CalendarServer/trunk/twistedcaldav/directory/test/test_directory.py
    CalendarServer/trunk/twistedcaldav/directory/test/test_ldapdirectory.py
    CalendarServer/trunk/twistedcaldav/directory/test/test_opendirectory.py
    CalendarServer/trunk/twistedcaldav/directory/xmlfile.py
    CalendarServer/trunk/twistedcaldav/stdconfig.py
    CalendarServer/trunk/twistedcaldav/upgrade.py

Modified: CalendarServer/trunk/conf/caldavd-test.plist
===================================================================
--- CalendarServer/trunk/conf/caldavd-test.plist	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/conf/caldavd-test.plist	2011-07-19 21:38:22 UTC (rev 7805)
@@ -298,6 +298,19 @@
           <key>memberIdAttr</key>
           <string>apple-generateduid</string>
         </dict>
+        <key>resourceSchema</key>
+        <dict>
+          <key>resourceInfoAttr</key>
+          <string>apple-resource-info</string>
+          <key>autoScheduleAttr</key>
+          <string></string>
+          <key>autoScheduleEnabledValue</key>
+          <string>yes</string>
+          <key>proxyAttr</key>
+          <string></string>
+          <key>readOnlyProxyAttr</key>
+          <string></string>
+        </dict>
       </dict>
     </dict>
     -->
@@ -911,6 +924,8 @@
       <integer>300</integer>
       <key>ExpireSeconds</key>
       <integer>3600</integer>
+      <key>UseExternalProxies</key>
+      <false/>
     </dict>
 
     <!--

Modified: CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/appleopendirectory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -44,6 +44,7 @@
 
 
 
+
 class OpenDirectoryService(CachingDirectoryService):
     """
     OpenDirectory implementation of L{IDirectoryService}.
@@ -232,8 +233,134 @@
     def recordTypes(self):
         return self._recordTypes
 
+    def listRecords(self, recordType):
+        """
+        Retrieve all the records of recordType from the directory, but for
+        expediency don't index them or cache them locally, nor in memcached.
+        """
+
+        records = []
+
+        attrs = [
+            dsattributes.kDS1AttrGeneratedUID,
+            dsattributes.kDSNAttrRecordName,
+            dsattributes.kDS1AttrDistinguishedName,
+        ]
+
+        if recordType == DirectoryService.recordType_users:
+            ODRecordType = self._toODRecordTypes[recordType]
+
+        elif recordType in (
+            DirectoryService.recordType_resources,
+            DirectoryService.recordType_locations,
+        ):
+            attrs.append(dsattributes.kDSNAttrResourceInfo)
+            ODRecordType = self._toODRecordTypes[recordType]
+
+        elif recordType == DirectoryService.recordType_groups:
+            attrs.append(dsattributes.kDSNAttrGroupMembers)
+            attrs.append(dsattributes.kDSNAttrNestedGroups)
+            ODRecordType = dsattributes.kDSStdRecordTypeGroups
+
+        self.log_debug("Querying OD for all %s records" % (recordType,))
+        results = self.odModule.listAllRecordsWithAttributes_list(
+            self.directory, ODRecordType, attrs)
+        self.log_debug("Retrieved %d %s records" % (len(results), recordType,))
+
+        for key, value in results:
+            recordGUID = value.get(dsattributes.kDS1AttrGeneratedUID)
+
+            # Skip if group restriction is in place and guid is not
+            # a member (but don't skip any groups)
+            if (recordType != self.recordType_groups and
+                self.restrictedGUIDs is not None):
+                if str(recordGUID) not in self.restrictedGUIDs:
+                    continue
+
+            recordShortNames = self._uniqueTupleFromAttribute(
+                value.get(dsattributes.kDSNAttrRecordName))
+            recordFullName = value.get(
+                dsattributes.kDS1AttrDistinguishedName)
+
+            proxyGUIDs = ()
+            readOnlyProxyGUIDs = ()
+            autoSchedule = False
+
+            if recordType in (
+                DirectoryService.recordType_resources,
+                DirectoryService.recordType_locations,
+            ):
+                resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
+                if resourceInfo is not None:
+                    if type(resourceInfo) is not str:
+                        resourceInfo = resourceInfo[0]
+                    try:
+                        (
+                            autoSchedule,
+                            proxy,
+                            readOnlyProxy
+                        ) = self.parseResourceInfo(
+                            resourceInfo,
+                            recordGUID,
+                            recordType,
+                            recordShortNames[0]
+                        )
+                    except ValueError:
+                        continue
+                    if proxy:
+                        proxyGUIDs = (proxy,)
+                    if readOnlyProxy:
+                        readOnlyProxyGUIDs = (readOnlyProxy,)
+
+            # Special case for groups, which have members.
+            if recordType == self.recordType_groups:
+                memberGUIDs = value.get(dsattributes.kDSNAttrGroupMembers)
+                if memberGUIDs is None:
+                    memberGUIDs = ()
+                elif type(memberGUIDs) is str:
+                    memberGUIDs = (memberGUIDs,)
+                nestedGUIDs = value.get(dsattributes.kDSNAttrNestedGroups)
+                if nestedGUIDs:
+                    if type(nestedGUIDs) is str:
+                        nestedGUIDs = (nestedGUIDs,)
+                    memberGUIDs += tuple(nestedGUIDs)
+            else:
+                memberGUIDs = ()
+
+            record = OpenDirectoryRecord(
+                service               = self,
+                recordType            = recordType,
+                guid                  = recordGUID,
+                nodeName              = "",
+                shortNames            = recordShortNames,
+                authIDs               = (),
+                fullName              = recordFullName,
+                firstName             = "",
+                lastName              = "",
+                emailAddresses        = "",
+                memberGUIDs           = memberGUIDs,
+                extProxies            = proxyGUIDs,
+                extReadOnlyProxies    = readOnlyProxyGUIDs,
+            )
+
+            # (Copied from below)
+            # Look up augment information
+            # TODO: this needs to be deferred but for now we hard code
+            # the deferred result because we know it is completing
+            # immediately.
+            d = self.augmentService.getAugmentRecord(record.guid,
+                recordType)
+            d.addCallback(lambda x:record.addAugmentInformation(x))
+            records.append(record)
+
+        self.log_debug("ListRecords returning %d %s records" % (len(records),
+            recordType))
+
+        return records
+
+
     def groupsForGUID(self, guid):
-        
+
         attrs = [
             dsattributes.kDS1AttrGeneratedUID,
         ]
@@ -454,6 +581,8 @@
                         lastName              = recordLastName,
                         emailAddresses        = recordEmailAddresses,
                         memberGUIDs           = (),
+                        extProxies            = (),
+                        extReadOnlyProxies    = (),
                     )
 
                     # (Copied from below)
@@ -731,6 +860,24 @@
             else:
                 memberGUIDs = ()
 
+            # Special case for resources and locations
+            autoSchedule = False
+            proxyGUIDs = ()
+            readOnlyProxyGUIDs = ()
+            if recordType in (DirectoryService.recordType_resources, DirectoryService.recordType_locations):
+                resourceInfo = value.get(dsattributes.kDSNAttrResourceInfo)
+                if resourceInfo is not None:
+                    if type(resourceInfo) is not str:
+                        resourceInfo = resourceInfo[0]
+                    try:
+                        autoSchedule, proxy, read_only_proxy = self.parseResourceInfo(resourceInfo, recordGUID, recordType, recordShortName)
+                    except ValueError:
+                        continue
+                    if proxy:
+                        proxyGUIDs = (proxy,)
+                    if read_only_proxy:
+                        readOnlyProxyGUIDs = (read_only_proxy,)
+
             record = OpenDirectoryRecord(
                 service               = self,
                 recordType            = recordType,
@@ -743,6 +890,8 @@
                 lastName              = recordLastName,
                 emailAddresses        = recordEmailAddresses,
                 memberGUIDs           = memberGUIDs,
+                extProxies            = proxyGUIDs,
+                extReadOnlyProxies    = readOnlyProxyGUIDs,
             )
 
             # Look up augment information
@@ -752,6 +901,10 @@
                 recordType)
             d.addCallback(lambda x:record.addAugmentInformation(x))
 
+            # Override based on ResourceInfo
+            if autoSchedule:
+                record.autoSchedule = True
+
             if not unrestricted:
                 self.log_debug("%s is not enabled because it's not a member of group: %s" % (recordGUID, self.restrictToGroup))
                 record.enabledForCalendaring = False
@@ -831,6 +984,7 @@
     def __init__(
         self, service, recordType, guid, nodeName, shortNames, authIDs,
         fullName, firstName, lastName, emailAddresses, memberGUIDs,
+        extProxies, extReadOnlyProxies,
     ):
         super(OpenDirectoryRecord, self).__init__(
             service               = service,
@@ -842,12 +996,15 @@
             firstName             = firstName,
             lastName              = lastName,
             emailAddresses        = emailAddresses,
+            extProxies            = extProxies,
+            extReadOnlyProxies    = extReadOnlyProxies,
         )
         self.nodeName = nodeName
+
         self._memberGUIDs = tuple(memberGUIDs)
-        
         self._groupMembershipGUIDs = None
 
+
     def __repr__(self):
         if self.service.realmName == self.nodeName:
             location = self.nodeName
@@ -882,6 +1039,9 @@
             if record:
                 yield record
 
+    def memberGUIDs(self):
+        return set(self._memberGUIDs)
+
     def verifyCredentials(self, credentials):
         if isinstance(credentials, UsernamePassword):
             # Check cached password

Modified: CalendarServer/trunk/twistedcaldav/directory/directory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/directory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/directory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -33,6 +33,7 @@
 import types
 import pwd, grp
 import cPickle as pickle
+import itertools
 
 
 from zope.interface import implements
@@ -58,6 +59,8 @@
 from twisted.application import service
 from twisted.plugin import IPlugin
 from zope.interface import implements
+from xml.parsers.expat import ExpatError
+from plistlib import readPlistFromString
 
 class DirectoryService(LoggingMixIn):
     implements(IDirectoryService, ICredentialsChecker)
@@ -324,7 +327,34 @@
             raise DirectoryConfigurationError("Invalid directory service parameter(s): %s" % (", ".join(list(keys)),))
         return result
 
+    def parseResourceInfo(self, plist, guid, recordType, shortname):
+        """
+        Parse ResourceInfo plist and extract information that the server needs.
 
+        @param plist: the plist that is the attribute value.
+        @type plist: str
+        @param guid: the directory GUID of the record being parsed.
+        @type guid: str
+        @param shortname: the record shortname of the record being parsed.
+        @type shortname: str
+        @return: a C{tuple} of C{bool} for auto-accept, C{str} for proxy GUID, C{str} for read-only proxy GUID.
+        """
+        try:
+            plist = readPlistFromString(plist)
+            wpframework = plist.get("com.apple.WhitePagesFramework", {})
+            autoaccept = wpframework.get("AutoAcceptsInvitation", False)
+            proxy = wpframework.get("CalendaringDelegate", None)
+            read_only_proxy = wpframework.get("ReadOnlyCalendaringDelegate", None)
+        except (ExpatError, AttributeError), e:
+            self.log_error(
+                "Failed to parse ResourceInfo attribute of record (%s)%s (guid=%s): %s\n%s" %
+                (recordType, shortname, guid, e, plist,)
+            )
+            raise ValueError("Invalid ResourceInfo")
+
+        return (autoaccept, proxy, read_only_proxy,)
+
+
     def createRecord(self, recordType, guid=None, shortNames=(), authIDs=set(),
         fullName=None, firstName=None, lastName=None, emailAddresses=set(),
         uid=None, password=None, **kwargs):
@@ -353,72 +383,8 @@
         """
         raise NotImplementedError("Subclass must implement createRecords")
 
-    @inlineCallbacks
-    def cacheGroupMembership(self, guids, fast=False):
-        """
-        Update the "which groups is each principal in" cache.  The only groups
-        that the server needs to worry about are the ones which have been
-        delegated to.  So instead of faulting in all groups a principal is in,
-        we pre-fault in all the delgated-to groups and build an index to
-        quickly look up principal membership.
 
-        guids is the set of every guid that's been directly delegated to, and
-        can be a mixture of users and groups.
-        """
 
-        dataRoot = FilePath(config.DataRoot)
-        snapshotFile = dataRoot.child("memberships_cache")
-
-        if not snapshotFile.exists():
-            fast = False
-
-        if fast:
-            # If there is an on-disk snapshot of the membership information,
-            # load that and put into memcached, bypassing the faulting in of
-            # any records, so that the server can start up quickly.
-
-            self.log_debug("Loading group memberships from snapshot")
-            members = pickle.loads(snapshotFile.getContent())
-
-        else:
-            self.log_debug("Loading group memberships from directory")
-            groups = set()
-            for guid in guids:
-                record = self.recordWithGUID(guid)
-                if record is not None and record.recordType == self.recordType_groups:
-                    groups.add(record)
-
-            members = { }
-            for group in groups:
-                groupMembers = group.expandedMembers()
-                for member in groupMembers:
-                    if member.recordType == self.recordType_users:
-                        memberships = members.setdefault(member.guid, set())
-                        memberships.add(group.guid)
-
-            # Store snapshot
-            self.log_debug("Taking snapshot of group memberships to %s" %
-                (snapshotFile.path,))
-            snapshotFile.setContent(pickle.dumps(members))
-
-            # Update ownership
-            uid = gid = -1
-            if config.UserName:
-                uid = pwd.getpwnam(config.UserName).pw_uid
-            if config.GroupName:
-                gid = grp.getgrnam(config.GroupName).gr_gid
-            os.chown(snapshotFile.path, uid, gid)
-
-        self.log_debug("Storing group memberships in memcached")
-        for member, groups in members.iteritems():
-            # self.log_debug("%s is in %s" % (member, groups))
-            yield self.groupMembershipCache.setGroupsFor(member, groups)
-
-        self.log_debug("Group memberships cache updated")
-
-        returnValue((fast, len(members)))
-
-
 class GroupMembershipCache(Memcacher, LoggingMixIn):
     """
     Caches group membership information
@@ -471,19 +437,56 @@
     Responsible for updating memcached with group memberships.  This will run
     in a sidecar.  There are two sources of proxy data to pull from: the local
     proxy database, and the location/resource info in the directory system.
-
-    TODO: Implement location/resource
     """
 
     def __init__(self, proxyDB, directory, expireSeconds, cache=None,
-        namespace=None):
+        namespace=None, useExternalProxies=False, externalProxiesSource=None):
         self.proxyDB = proxyDB
         self.directory = directory
+        self.useExternalProxies = useExternalProxies
+        if externalProxiesSource is None:
+            externalProxiesSource = self.getExternalProxyAssignments
+        self.externalProxiesSource = externalProxiesSource
+
         if cache is None:
             assert namespace is not None, "namespace must be specified if GroupMembershipCache is not provided"
             cache = GroupMembershipCache(namespace, expireSeconds=expireSeconds)
         self.cache = cache
 
+
+    def getGroups(self):
+        """
+        Retrieve all groups and their member info (but don't actually fault in
+        the records of the members), and return a dictionary of group-guid to
+        member-guids.  Ultimately this dictionary will be used to reverse-index
+        the groups that users are in by expandedMembers().
+        """
+        groups = {}
+        for record in self.directory.listRecords(self.directory.recordType_groups):
+            groups[record.guid] = record.memberGUIDs()
+        return groups
+
+    def expandedMembers(self, groups, guid, members=None, seen=None):
+        """
+        Return the complete, flattened set of members of a group, including
+        all sub-groups, based on the group hierarchy described in the
+        groups dictionary.
+        """
+        if members is None:
+            members = set()
+        if seen is None:
+            seen = set()
+
+        if guid not in seen:
+            seen.add(guid)
+            for member in groups[guid]:
+                members.add(member)
+                if groups.has_key(member): # it's a group then
+                    self.expandedMembers(groups, member, members=members,
+                                         seen=seen)
+
+        return members
+
     @inlineCallbacks
     def updateCache(self, fast=False):
         """
@@ -496,11 +499,119 @@
 
         self.log_debug("Updating group membership cache")
 
-        guids = set((yield self.proxyDB.getAllMembers()))
-        returnValue((yield self.directory.cacheGroupMembership(guids, fast=fast)))
+        dataRoot = FilePath(config.DataRoot)
+        snapshotFile = dataRoot.child("memberships_cache")
 
+        if not snapshotFile.exists():
+            self.log_debug("Group membership snapshot file does not yet exist")
+            fast = False
+        else:
+            self.log_debug("Group membership snapshot file exists: %s" %
+                           (snapshotFile.path,))
 
+        if not fast and self.useExternalProxies:
+            self.log_debug("Retrieving proxy assignments from directory")
+            assignments = self.externalProxiesSource()
+            self.log_debug("%d proxy assignments retrieved from directory" %
+                (len(assignments),))
+            # populate proxy DB from external resource info
+            self.log_debug("Applying proxy assignment changes")
+            assignmentCount = 0
+            for principalUID, members in assignments:
+                current = (yield self.proxyDB.getMembers(principalUID))
+                if members != current:
+                    assignmentCount += 1
+                    yield self.proxyDB.setGroupMembers(principalUID, members)
+            self.log_debug("Applied %d assignment%s to proxy database" %
+                (assignmentCount, "" if assignmentCount == 1 else "s"))
 
+        if fast:
+            # If there is an on-disk snapshot of the membership information,
+            # load that and put into memcached, bypassing the faulting in of
+            # any records, so that the server can start up quickly.
+
+            self.log_debug("Loading group memberships from snapshot")
+            members = pickle.loads(snapshotFile.getContent())
+
+        else:
+            # Fetch the group hierarchy from the directory, fetch the list
+            # of delegated-to guids, intersect those and build a dictionary
+            # containing which delegated-to groups a user is a member of
+
+            self.log_debug("Retrieving list of all proxies")
+            delegatedGUIDs = set((yield self.proxyDB.getAllMembers()))
+            self.log_debug("There are %d proxies" % (len(delegatedGUIDs),))
+
+            self.log_debug("Retrieving group hierarchy from directory")
+            groups = self.getGroups()
+            groupGUIDs = set(groups.keys())
+            self.log_debug("There are %d groups in the directory" %
+                           (len(groupGUIDs),))
+
+            delegatedGUIDs = delegatedGUIDs.intersection(groupGUIDs)
+            self.log_debug("%d groups are proxies" % (len(delegatedGUIDs),))
+
+            # Reverse index the group membership from cache
+            members = {}
+            for groupGUID in delegatedGUIDs:
+                groupMembers = self.expandedMembers(groups, groupGUID)
+                for member in groupMembers:
+                    memberships = members.setdefault(member, set())
+                    memberships.add(groupGUID)
+
+            self.log_debug("There are %d users delegated-to via groups" %
+                (len(members),))
+
+            # Store snapshot
+            self.log_debug("Taking snapshot of group memberships to %s" %
+                (snapshotFile.path,))
+            snapshotFile.setContent(pickle.dumps(members))
+
+            # Update ownership
+            uid = gid = -1
+            if config.UserName:
+                uid = pwd.getpwnam(config.UserName).pw_uid
+            if config.GroupName:
+                gid = grp.getgrnam(config.GroupName).gr_gid
+            os.chown(snapshotFile.path, uid, gid)
+
+        self.log_debug("Storing %d group memberships in memcached" %
+                       (len(members),))
+        for member, groups in members.iteritems():
+            # self.log_debug("%s is in %s" % (member, groups))
+            yield self.cache.setGroupsFor(member, groups)
+
+        self.log_debug("Group memberships cache updated")
+
+        returnValue((fast, len(members)))
+
+
+
+
+    def getExternalProxyAssignments(self):
+        """
+        Retrieve proxy assignments for locations and resources from the
+        directory and return a list of (principalUID, ([memberUIDs)) tuples,
+        suitable for passing to proxyDB.setGroupMembers( )
+        """
+        assignments = []
+
+        resources = itertools.chain(
+            self.directory.listRecords(self.directory.recordType_locations),
+            self.directory.listRecords(self.directory.recordType_resources)
+        )
+        for record in resources:
+            guid = record.guid
+            assignments.append(("%s#calendar-proxy-write" % (guid,),
+                               record.externalProxies()))
+            assignments.append(("%s#calendar-proxy-read" % (guid,),
+                               record.externalReadOnlyProxies()))
+
+        return assignments
+
+
+
+
 class GroupMembershipCacherOptions(Options):
     optParameters = [[
         "config", "f", DEFAULT_CONFIG_FILE, "Path to configuration file."
@@ -594,10 +705,12 @@
     """
 
     def __init__(self, proxyDB, directory, namespace, updateSeconds,
-        expireSeconds, reactor=None, updateMethod=None):
+        expireSeconds, reactor=None, updateMethod=None,
+        useExternalProxies=False):
 
         self.updater = GroupMembershipCacheUpdater(proxyDB, directory,
-            expireSeconds, namespace=namespace)
+            expireSeconds, namespace=namespace,
+            useExternalProxies=useExternalProxies)
 
         if reactor is None:
             from twisted.internet import reactor
@@ -668,7 +781,8 @@
         cacherService = GroupMembershipCacherService(proxyDB, directory,
             config.GroupCaching.MemcachedPool,
             config.GroupCaching.UpdateSeconds,
-            config.GroupCaching.ExpireSeconds
+            config.GroupCaching.ExpireSeconds,
+            useExternalProxies=config.GroupCaching.UseExternalProxies
             )
 
         return cacherService
@@ -698,6 +812,7 @@
         enabledForAddressBooks=None,
         uid=None,
         enabledForLogin=True,
+        extProxies=(), extReadOnlyProxies=(),
         **kwargs
     ):
         assert service.realmName is not None
@@ -730,6 +845,8 @@
         self.autoSchedule           = autoSchedule
         self.enabledForAddressBooks = enabledForAddressBooks
         self.enabledForLogin        = enabledForLogin
+        self.extProxies             = extProxies
+        self.extReadOnlyProxies     = extReadOnlyProxies
         self.extras                 = kwargs
 
 
@@ -826,6 +943,7 @@
     def members(self):
         return ()
 
+
     def expandedMembers(self, members=None, seen=None):
         """
         Return the complete, flattened set of members of a group, including
@@ -857,7 +975,26 @@
         """
         return self.service.groupMembershipCache.getGroupsFor(self.guid)
 
+    def externalProxies(self):
+        """
+        Return the set of proxies defined in the directory service, as opposed
+        to assignments in the proxy DB itself.
+        """
+        return set(self.extProxies)
 
+    def externalReadOnlyProxies(self):
+        """
+        Return the set of read-only proxies defined in the directory service,
+        as opposed to assignments in the proxy DB itself.
+        """
+        return set(self.extReadOnlyProxies)
+
+    def memberGUIDs(self):
+        """
+        Return the set of GUIDs that are members of this group
+        """
+        return set()
+
     def verifyCredentials(self, credentials):
         return False
 

Modified: CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/ldapdirectory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -70,7 +70,8 @@
         """
         @param params: a dictionary containing the following keys:
             cacheTimeout, realmName, uri, tls, tlsCACertFile, tlsCACertDir,
-            tlsRequireCert, crendentials, rdnSchema, groupSchema
+            tlsRequireCert, credentials, rdnSchema, groupSchema, resourceSchema
+            partitionSchema
         """
 
         defaults = {
@@ -111,7 +112,7 @@
                     "recordName": "cn", # uniquely identifies group records
                 },
                 "locations": {
-                    "rdn": "ou=Locations",
+                    "rdn": "ou=Places",
                     "attr": "cn", # used only to synthesize email address
                     "emailSuffix": None, # used only to synthesize email address
                     "filter": None, # additional filter for this type
@@ -130,6 +131,21 @@
                 "nestedGroupsAttr": None, # how nested groups are specified
                 "memberIdAttr": None, # which attribute the above refer to
             },
+            "resourceSchema": {
+                # Either set this attribute to retrieve the plist version
+                # of resource-info, as in a Leopard OD server, or...
+                "resourceInfoAttr": None,
+                # ...set the above to None and instead specify these
+                # individually:
+                "autoScheduleAttr": None,
+                "autoScheduleEnabledValue": "yes",
+                "proxyAttr": None, # list of GUIDs
+                "readOnlyProxyAttr": None, # list of GUIDs
+            },
+            "partitionSchema": {
+                "serverIdAttr": None, # maps to augments server-id
+                "partitionIdAttr": None, # maps to augments partition-id
+            },
             "attributeMapping": { # maps internal record names to LDAP
                 "fullName" : "cn",
                 "emailAddresses" : "mail",
@@ -157,6 +173,8 @@
         self.authMethod = params["authMethod"]
         self.rdnSchema = params["rdnSchema"]
         self.groupSchema = params["groupSchema"]
+        self.resourceSchema = params["resourceSchema"]
+        self.partitionSchema = params["partitionSchema"]
         self.attributeMapping = params["attributeMapping"]
 
         self.base = ldap.dn.str2dn(self.rdnSchema["base"])
@@ -180,6 +198,18 @@
             attrSet.add(self.groupSchema["memberIdAttr"])
         if self.rdnSchema["users"]["loginEnabledAttr"]:
             attrSet.add(self.rdnSchema["users"]["loginEnabledAttr"])
+        if self.resourceSchema["resourceInfoAttr"]:
+            attrSet.add(self.resourceSchema["resourceInfoAttr"])
+        if self.resourceSchema["autoScheduleAttr"]:
+            attrSet.add(self.resourceSchema["autoScheduleAttr"])
+        if self.resourceSchema["proxyAttr"]:
+            attrSet.add(self.resourceSchema["proxyAttr"])
+        if self.resourceSchema["readOnlyProxyAttr"]:
+            attrSet.add(self.resourceSchema["readOnlyProxyAttr"])
+        if self.partitionSchema["serverIdAttr"]:
+            attrSet.add(self.partitionSchema["serverIdAttr"])
+        if self.partitionSchema["partitionIdAttr"]:
+            attrSet.add(self.partitionSchema["partitionIdAttr"])
         self.attrList = list(attrSet)
 
         self.typeRDNs = {}
@@ -216,6 +246,49 @@
         return self._recordTypes
 
 
+    def listRecords(self, recordType):
+
+        # Build base for this record Type
+        base = self.typeRDNs[recordType] + self.base
+
+        # Build filter
+        filter = "(!(objectClass=organizationalUnit))"
+        typeFilter = self.rdnSchema[recordType]["filter"]
+        if typeFilter:
+            filter = "(&%s%s)" % (filter, typeFilter)
+
+        # Query the LDAP server
+        self.log_debug("Querying ldap for records matching base %s and filter %s for attributes %s." %
+            (ldap.dn.dn2str(base), filter, self.attrList))
+
+        results = self.ldap.search_s(ldap.dn.dn2str(base),
+            ldap.SCOPE_SUBTREE, filter, self.attrList)
+
+        records = []
+
+        for dn, attrs in results:
+
+            unrestricted = True
+            if self.restrictedGUIDs is not None:
+                guidAttr = self.rdnSchema["guidAttr"]
+                if guidAttr:
+                    guid = self._getUniqueLdapAttribute(attrs, guidAttr)
+                    if guid not in self.restrictedGUIDs:
+                        unrestricted = False
+
+            record = self._ldapResultToRecord(dn, attrs, recordType)
+            self.log_debug("Got LDAP record %s" % (record,))
+
+            if not unrestricted:
+                self.log_debug("%s is not enabled because it's not a member of group: %s" % (guid, self.restrictToGroup))
+                record.enabledForCalendaring = False
+                record.enabledForAddressBooks = False
+
+            records.append(record)
+
+        return records
+
+
     def createLDAPConnection(self):
         """
         Create and configure LDAP connection
@@ -274,7 +347,7 @@
                 recordType = self.recordType_groups
                 base = self.typeRDNs[recordType] + self.base
                 filter = "(cn=%s)" % (self.restrictToGroup,)
-                self.log_info("Retrieving ldap record with base %s and filter %s." %
+                self.log_debug("Retrieving ldap record with base %s and filter %s." %
                     (ldap.dn.dn2str(base), filter))
                 result = self.ldap.search_s(ldap.dn.dn2str(base),
                     ldap.SCOPE_SUBTREE, filter, self.attrList)
@@ -326,7 +399,7 @@
             base = self.typeRDNs[recordType] + self.base
             filter = "(%s=%s)" % (self.rdnSchema["guidAttr"], groupGUID)
 
-            self.log_info("Retrieving ldap record with base %s and filter %s." %
+            self.log_debug("Retrieving ldap record with base %s and filter %s." %
                 (ldap.dn.dn2str(base), filter))
             result = self.ldap.search_s(ldap.dn.dn2str(base),
                 ldap.SCOPE_SUBTREE, filter, self.attrList)
@@ -417,6 +490,11 @@
                 self.rdnSchema[recordType]["attr"])
             emailAddresses.add(emailPrefix + emailSuffix)
 
+        proxyGUIDs = ()
+        readOnlyProxyGUIDs = ()
+        autoSchedule = False
+        memberGUIDs = set()
+
         # LDAP attribute -> principal matchings
         shortNames = (self._getUniqueLdapAttribute(attrs, self.rdnSchema[recordType]["recordName"]),)
         if recordType == self.recordType_users:
@@ -433,13 +511,68 @@
             enabledForAddressBooks = False
             enabledForLogin = False
 
+            if self.groupSchema["membersAttr"]:
+                members = self._getMultipleLdapAttributes(attrs, self.groupSchema["membersAttr"])
+                if members:
+                    if type(members) is str:
+                        members = set([members])
+                    memberGUIDs.update(members)
+            if self.groupSchema["nestedGroupsAttr"]:
+                members = self._getMultipleLdapAttributes(attrs, self.groupSchema["nestedGroupsAttr"])
+                if members:
+                    if type(members) is str:
+                        members = set([members])
+                    memberGUIDs.update(members)
+
+
         elif recordType in (self.recordType_resources,
             self.recordType_locations):
             fullName = self._getUniqueLdapAttribute(attrs, "cn")
             enabledForCalendaring = True
             enabledForAddressBooks = False
             enabledForLogin = False
+            if self.resourceSchema["resourceInfoAttr"]:
+                resourceInfo = self._getUniqueLdapAttribute(attrs,
+                    self.resourceSchema["resourceInfoAttr"])
+                if resourceInfo:
+                    try:
+                        (
+                            autoSchedule,
+                            proxy,
+                            readOnlyProxy
+                        ) = self.parseResourceInfo(
+                            resourceInfo,
+                            guid,
+                            recordType,
+                            shortNames[0]
+                        )
+                        if proxy:
+                            proxyGUIDs = (proxy,)
+                        if readOnlyProxy:
+                            readOnlyProxyGUIDs = (readOnlyProxy,)
+                    except ValueError, e:
+                        self.log_error("Unable to parse resource info (%s)" % (e,))
+            else: # the individual resource attributes might be specified
+                if self.resourceSchema["autoScheduleAttr"]:
+                    autoScheduleValue = self._getUniqueLdapAttribute(attrs,
+                        self.resourceSchema["autoScheduleAttr"])
+                    autoSchedule = (autoScheduleValue ==
+                        self.resourceSchema["autoScheduleEnabledValue"])
+                if self.resourceSchema["proxyAttr"]:
+                    proxyGUIDs = self._getMultipleLdapAttributes(attrs,
+                        self.resourceSchema["proxyAttr"])
+                if self.resourceSchema["readOnlyProxyAttr"]:
+                    readOnlyProxyGUIDs = self._getMultipleLdapAttributes(attrs,
+                        self.resourceSchema["readOnlyProxyAttr"])
 
+        serverID = partitionID = None
+        if self.partitionSchema["serverIdAttr"]:
+            serverID = self._getUniqueLdapAttribute(attrs,
+                self.partitionSchema["serverIdAttr"])
+        if self.partitionSchema["partitionIdAttr"]:
+            partitionID = self._getUniqueLdapAttribute(attrs,
+                self.partitionSchema["partitionIdAttr"])
+
         record = LdapDirectoryRecord(
             service                 = self,
             recordType              = recordType,
@@ -452,6 +585,9 @@
             emailAddresses          = emailAddresses,
             uid                     = uid,
             dn                      = dn,
+            memberGUIDs             = memberGUIDs,
+            extProxies              = proxyGUIDs,
+            extReadOnlyProxies      = readOnlyProxyGUIDs,
             attrs                   = attrs,
         )
 
@@ -469,10 +605,10 @@
             augmentRecord = AugmentRecord(
                 guid,
                 enabled=True,
-                serverID="", # TODO: add to LDAP?
-                partitionID="", # TODO: add to LDAP?
+                serverID=serverID,
+                partitionID=partitionID,
                 enabledForCalendaring=enabledForCalendaring,
-                autoSchedule=False, # TODO: add to LDAP?
+                autoSchedule=autoSchedule,
                 enabledForAddressBooks=enabledForAddressBooks, # TODO: add to LDAP?
                 enabledForLogin=enabledForLogin,
             )
@@ -548,7 +684,7 @@
                 return
 
             # Query the LDAP server
-            self.log_info("Retrieving ldap record with base %s and filter %s." %
+            self.log_debug("Retrieving ldap record with base %s and filter %s." %
                 (ldap.dn.dn2str(base), filter))
             result = self.ldap.search_s(ldap.dn.dn2str(base),
                 ldap.SCOPE_SUBTREE, filter, self.attrList)
@@ -659,7 +795,8 @@
         self, service, recordType,
         guid, shortNames, authIDs, fullName,
         firstName, lastName, emailAddresses,
-        uid, dn, attrs
+        uid, dn, memberGUIDs, extProxies, extReadOnlyProxies,
+        attrs
     ):
         super(LdapDirectoryRecord, self).__init__(
             service               = service,
@@ -671,6 +808,8 @@
             firstName             = firstName,
             lastName              = lastName,
             emailAddresses        = emailAddresses,
+            extProxies            = extProxies,
+            extReadOnlyProxies    = extReadOnlyProxies,
             uid                   = uid,
         )
 
@@ -678,6 +817,9 @@
         self.dn = dn
         self.attrs = attrs
 
+        # Store copy of member guids
+        self._memberGUIDs = memberGUIDs
+
         # Identifiers of the members of this record if it is a group
         membersAttrs = []
         if self.service.groupSchema["membersAttr"]:
@@ -799,6 +941,10 @@
         return groups
 
 
+    def memberGUIDs(self):
+        return set(self._memberGUIDs)
+
+
     def verifyCredentials(self, credentials):
         """ Supports PAM or simple LDAP bind for username+password """
 

Modified: CalendarServer/trunk/twistedcaldav/directory/test/test_directory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/test/test_directory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/test/test_directory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -191,7 +191,53 @@
 
         updater = GroupMembershipCacheUpdater(
             calendaruserproxy.ProxyDBService, self.directoryService, 30,
-            cache=cache)
+            cache=cache, useExternalProxies=False)
+
+        # Exercise getGroups()
+        groups = updater.getGroups()
+        self.assertEquals(
+            groups,
+            {
+                '9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1':
+                    set(['8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
+                'admin':
+                    set(['9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1']),
+                'both_coasts':
+                    set(['left_coast', 'right_coast']),
+                'grunts':
+                    set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                         '5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
+                         '6423F94A-6B76-4A3A-815B-D52CFD77935D']),
+                'left_coast':
+                    set(['5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
+                         '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                         '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
+                'non_calendar_group':
+                    set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                         '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0']),
+                'recursive1_coasts':
+                    set(['6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                         'recursive2_coasts']),
+                'recursive2_coasts':
+                    set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                         'recursive1_coasts']),
+                'right_coast':
+                    set(['5A985493-EE2C-4665-94CF-4DFEA3A89500'])
+            }
+        )
+
+        # Exercise expandedMembers()
+        self.assertEquals(
+            updater.expandedMembers(groups, "both_coasts"),
+            set(['5A985493-EE2C-4665-94CF-4DFEA3A89500',
+                 '5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1',
+                 '6423F94A-6B76-4A3A-815B-D52CFD77935D',
+                 '8B4288F6-CC82-491D-8EF9-642EF4F3E7D0',
+                 'left_coast',
+                 'right_coast']
+            )
+        )
+
         yield updater.updateCache()
 
         delegates = (
@@ -274,6 +320,106 @@
 
 
     @inlineCallbacks
+    def test_groupMembershipCacheUpdaterExternalProxies(self):
+        """
+        Exercise external proxy assignment support (assignments come from the
+        directory service itself)
+        """
+        cache = GroupMembershipCache("ProxyDB", 60)
+        # Having a groupMembershipCache assigned to the directory service is the
+        # trigger to use such a cache:
+        self.directoryService.groupMembershipCache = cache
+
+        # This time, we're setting some external proxy assignments for the
+        # "transporter" resource...
+        def fakeExternalProxies():
+            return [
+                (
+                    "transporter#calendar-proxy-write",
+                    set(["6423F94A-6B76-4A3A-815B-D52CFD77935D",
+                         "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0"])
+                ),
+                (
+                    "transporter#calendar-proxy-read",
+                    set(["5A985493-EE2C-4665-94CF-4DFEA3A89500"])
+                ),
+            ]
+
+        updater = GroupMembershipCacheUpdater(
+            calendaruserproxy.ProxyDBService, self.directoryService, 30,
+            cache=cache, useExternalProxies=True,
+            externalProxiesSource=fakeExternalProxies)
+
+        yield updater.updateCache()
+
+        delegates = (
+
+            # record name
+            # read-write delegators
+            # read-only delegators
+            # groups delegate is in (restricted to only those groups
+            #   participating in delegation)
+
+            ("wsanchez",
+             set(["mercury", "apollo", "orion", "gemini", "transporter"]),
+             set(["non_calendar_proxy"]),
+             set(['left_coast',
+                  'both_coasts',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'gemini#calendar-proxy-write',
+                  'transporter#calendar-proxy-write',
+                ]),
+            ),
+            ("cdaboo",
+             set(["apollo", "orion", "non_calendar_proxy"]),
+             set(["non_calendar_proxy", "transporter"]),
+             set(['both_coasts',
+                  'non_calendar_group',
+                  'recursive1_coasts',
+                  'recursive2_coasts',
+                  'transporter#calendar-proxy-read',
+                ]),
+            ),
+            ("lecroy",
+             set(["apollo", "mercury", "non_calendar_proxy", "transporter"]),
+             set(),
+             set(['both_coasts',
+                  'left_coast',
+                  'non_calendar_group',
+                  'transporter#calendar-proxy-write',
+                ]),
+            ),
+        )
+
+        for name, write, read, groups in delegates:
+            delegate = self._getPrincipalByShortName(DirectoryService.recordType_users, name)
+
+            proxyFor = (yield delegate.proxyFor(True))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                write,
+            )
+            proxyFor = (yield delegate.proxyFor(False))
+            self.assertEquals(
+                set([p.record.guid for p in proxyFor]),
+                read,
+            )
+            groupsIn = (yield delegate.groupMemberships())
+            uids = set()
+            for group in groupsIn:
+                try:
+                    uid = group.uid # a sub-principal
+                except AttributeError:
+                    uid = group.record.guid # a regular group
+                uids.add(uid)
+            self.assertEquals(
+                set(uids),
+                groups,
+            )
+
+
+    @inlineCallbacks
     def test_groupMembershipCacheSnapshot(self):
         """
         The group membership cache creates a snapshot (a pickle file) of
@@ -300,7 +446,7 @@
         # directory (fast now is False), and snapshot will get created
         fast, numMembers = (yield updater.updateCache(fast=True))
         self.assertEquals(fast, False)
-        self.assertEquals(numMembers, 4)
+        self.assertEquals(numMembers, 8)
         self.assertTrue(snapshotFile.exists())
 
         # Try another fast update where the snapshot already exists (as in a
@@ -308,12 +454,12 @@
         # as indicated by the return value for "fast"
         fast, numMembers = (yield updater.updateCache(fast=True))
         self.assertEquals(fast, True)
-        self.assertEquals(numMembers, 4)
+        self.assertEquals(numMembers, 8)
 
         # Try an update which faults in from the directory (fast=False)
         fast, numMembers = (yield updater.updateCache(fast=False))
         self.assertEquals(fast, False)
-        self.assertEquals(numMembers, 4)
+        self.assertEquals(numMembers, 8)
 
         # Verify the snapshot contains the pickled dictionary we expect
         members = pickle.loads(snapshotFile.getContent())
@@ -322,28 +468,46 @@
             {
                 "5A985493-EE2C-4665-94CF-4DFEA3A89500":
                     set([
-                        "non_calendar_group",
-                        "recursive1_coasts",
-                        "recursive2_coasts",
-                        "both_coasts"
+                        u"non_calendar_group",
+                        u"recursive1_coasts",
+                        u"recursive2_coasts",
+                        u"both_coasts"
                     ]),
                 "6423F94A-6B76-4A3A-815B-D52CFD77935D":
                     set([
-                        "left_coast",
-                        "recursive1_coasts",
-                        "recursive2_coasts",
-                        "both_coasts"
+                        u"left_coast",
+                        u"recursive1_coasts",
+                        u"recursive2_coasts",
+                        u"both_coasts"
                     ]),
                 "5FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1":
                     set([
-                        "left_coast",
-                        "both_coasts"
+                        u"left_coast",
+                        u"both_coasts"
                     ]),
                 "8B4288F6-CC82-491D-8EF9-642EF4F3E7D0":
                     set([
-                        "non_calendar_group",
-                        "left_coast",
-                        "both_coasts"
+                        u"non_calendar_group",
+                        u"left_coast",
+                        u"both_coasts"
+                    ]),
+                "left_coast":
+                     set([
+                         u"both_coasts"
+                     ]),
+                "recursive1_coasts":
+                     set([
+                         u"recursive1_coasts",
+                         u"recursive2_coasts"
+                     ]),
+                "recursive2_coasts":
+                    set([
+                        u"recursive1_coasts",
+                        u"recursive2_coasts"
+                    ]),
+                "right_coast":
+                    set([
+                        u"both_coasts"
                     ])
             }
         )

Modified: CalendarServer/trunk/twistedcaldav/directory/test/test_ldapdirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/test/test_ldapdirectory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/test/test_ldapdirectory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -15,7 +15,7 @@
 ##
 
 try:
-    from twistedcaldav.directory.ldapdirectory import buildFilter
+    from twistedcaldav.directory.ldapdirectory import buildFilter, LdapDirectoryService
 except ImportError:
     print "Skipping because ldap module not installed"
 else:
@@ -83,3 +83,305 @@
                         operand=entry["operand"]),
                     entry["expected"]
                 )
+
+
+    class LdapDirectoryTestWrapper(object):
+        """
+        A test stub which replaces search_s( ) with a version that will return
+        whatever you have previously called setTestResults( ) with.
+        """
+
+        def __init__(self, actual):
+            self.actual = actual
+            self.testResults = None
+
+        def setTestResults(self, results):
+            self.testResults = results
+
+        def search_s(self, base, scope, filter="(objectClass=*)",
+            attrList=None):
+            return self.testResults
+
+
+    class LdapDirectoryServiceTestCase(TestCase):
+
+        def setUp(self):
+            params = {
+                "augmentService" : None,
+                "groupMembershipCache" : None,
+                "cacheTimeout": 1, # Minutes
+                "negativeCaching": False,
+                "restrictEnabledRecords": False,
+                "restrictToGroup": "",
+                "recordTypes": ("users", "groups", "locations", "resources"),
+                "uri": "ldap://localhost/",
+                "tls": False,
+                "tlsCACertFile": None,
+                "tlsCACertDir": None,
+                "tlsRequireCert": None, # never, allow, try, demand, hard
+                "credentials": {
+                    "dn": None,
+                    "password": None,
+                },
+                "authMethod": "LDAP",
+                "rdnSchema": {
+                    "base": "dc=example,dc=com",
+                    "guidAttr": "apple-generateduid",
+                    "users": {
+                        "rdn": "cn=users",
+                        "attr": "uid", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-user)", # additional filter for this type
+                        "recordName": "uid", # uniquely identifies user records
+                        "loginEnabledAttr" : "", # attribute controlling login
+                        "loginEnabledValue" : "yes", # value of above attribute
+                    },
+                    "groups": {
+                        "rdn": "cn=groups",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-group)", # additional filter for this type
+                        "recordName": "cn", # uniquely identifies group records
+                    },
+                    "locations": {
+                        "rdn": "cn=places",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "recordName": "cn", # uniquely identifies location records
+                    },
+                    "resources": {
+                        "rdn": "cn=resources",
+                        "attr": "cn", # used only to synthesize email address
+                        "emailSuffix": None, # used only to synthesize email address
+                        "filter": "(objectClass=apple-resource)", # additional filter for this type
+                        "recordName": "cn", # uniquely identifies resource records
+                    },
+                },
+                "groupSchema": {
+                    "membersAttr": "apple-group-memberguid", # how members are specified
+                    "nestedGroupsAttr": "apple-group-nestedgroup", # how nested groups are specified
+                    "memberIdAttr": "apple-generateduid", # which attribute the above refer to
+                },
+                "resourceSchema": {
+                    "resourceInfoAttr": "apple-resource-info", # contains location/resource info
+                    "autoScheduleAttr": None,
+                    "proxyAttr": None,
+                    "readOnlyProxyAttr": None,
+                },
+                "partitionSchema": {
+                    "serverIdAttr": "server-id", # maps to augments server-id
+                    "partitionIdAttr": "partition-id", # maps to augments partition-id
+                },
+                "attributeMapping": { # maps internal record names to LDAP
+                    "fullName" : "cn",
+                    "emailAddresses" : "mail",
+                    "firstName" : "givenName",
+                    "lastName" : "sn",
+                },
+            }
+
+            self.service = LdapDirectoryService(params)
+            self.service.ldap = LdapDirectoryTestWrapper(self.service.ldap)
+
+
+        def test_ldapRecordCreation(self):
+            """
+            Exercise _ldapResultToRecord(), which converts a dictionary
+            of LDAP attributes into an LdapDirectoryRecord
+            """
+
+            # User
+
+            dn = "uid=odtestamanda,cn=users,dc=example,dc=com"
+            guid = '9DC04A70-E6DD-11DF-9492-0800200C9A66'
+            attrs = {
+                'uid': ['odtestamanda'],
+                'apple-generateduid': [guid],
+                'sn': ['Test'],
+                'mail': ['odtestamanda at example.com', 'alternate at example.com'],
+                'givenName': ['Amanda'],
+                'cn': ['Amanda Test']
+            }
+
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_users)
+            self.assertEquals(record.guid, guid)
+            self.assertEquals(record.emailAddresses,
+                set(['alternate at example.com', 'odtestamanda at example.com']))
+            self.assertEquals(record.shortNames, ('odtestamanda',))
+            self.assertEquals(record.firstName, 'Amanda')
+            self.assertEquals(record.lastName, 'Test')
+            self.assertEquals(record.serverID, None)
+            self.assertEquals(record.partitionID, None)
+
+            # User with "podding" info
+
+            dn = "uid=odtestamanda,cn=users,dc=example,dc=com"
+            guid = '9DC04A70-E6DD-11DF-9492-0800200C9A66'
+            attrs = {
+                'uid': ['odtestamanda'],
+                'apple-generateduid': [guid],
+                'cn': ['Amanda Test'],
+                'server-id' : ["test-server-id"],
+                'partition-id' : ["test-partition-id"],
+            }
+
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_users)
+            self.assertEquals(record.serverID, "test-server-id")
+            self.assertEquals(record.partitionID, "test-partition-id")
+
+            # Group with direct user members and nested group
+
+            dn = "cn=odtestgrouptop,cn=groups,dc=example,dc=com"
+            guid = '6C6CD280-E6E3-11DF-9492-0800200C9A66'
+            attrs = {
+                'apple-generateduid': [guid],
+                'apple-group-memberguid':
+                    [
+                        '9DC04A70-E6DD-11DF-9492-0800200C9A66',
+                        '9DC04A71-E6DD-11DF-9492-0800200C9A66'
+                    ],
+                'apple-group-nestedgroup':
+                    [
+                        '6C6CD282-E6E3-11DF-9492-0800200C9A66'
+                    ],
+                'cn': ['odtestgrouptop']
+            }
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_groups)
+            self.assertEquals(record.guid, guid)
+            self.assertEquals(record.memberGUIDs(),
+                set(['6C6CD282-E6E3-11DF-9492-0800200C9A66',
+                     '9DC04A70-E6DD-11DF-9492-0800200C9A66',
+                     '9DC04A71-E6DD-11DF-9492-0800200C9A66'])
+            )
+
+            # Resource with delegates and autoSchedule = True
+
+            dn = "cn=odtestresource,cn=resources,dc=example,dc=com"
+            guid = 'D3094652-344B-4633-8DB8-09639FA00FB6'
+            attrs = {
+                'apple-generateduid': [guid],
+                'cn': ['odtestresource'],
+                'apple-resource-info': ["""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+<key>com.apple.WhitePagesFramework</key>
+<dict>
+ <key>AutoAcceptsInvitation</key>
+<true/>
+<key>CalendaringDelegate</key>
+<string>6C6CD280-E6E3-11DF-9492-0800200C9A66</string>
+<key>ReadOnlyCalendaringDelegate</key>
+<string>6AA1AE12-592F-4190-A069-547CD83C47C0</string>
+</dict>
+</dict>
+</plist>"""]
+            }
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_resources)
+            self.assertEquals(record.guid, guid)
+            self.assertEquals(record.externalProxies(),
+                set(['6C6CD280-E6E3-11DF-9492-0800200C9A66']))
+            self.assertEquals(record.externalReadOnlyProxies(),
+                set(['6AA1AE12-592F-4190-A069-547CD83C47C0']))
+            self.assertTrue(record.autoSchedule)
+
+            # Resource with no delegates and autoSchedule = False
+
+            dn = "cn=odtestresource,cn=resources,dc=example,dc=com"
+            guid = 'D3094652-344B-4633-8DB8-09639FA00FB6'
+            attrs = {
+                'apple-generateduid': [guid],
+                'cn': ['odtestresource'],
+                'apple-resource-info': ["""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+<key>com.apple.WhitePagesFramework</key>
+<dict>
+ <key>AutoAcceptsInvitation</key>
+<false/>
+</dict>
+</dict>
+</plist>"""]
+            }
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_resources)
+            self.assertEquals(record.guid, guid)
+            self.assertEquals(record.externalProxies(),
+                set())
+            self.assertEquals(record.externalReadOnlyProxies(),
+                set())
+            self.assertFalse(record.autoSchedule)
+
+
+            # Now switch off the resourceInfoAttr and switch to individual
+            # attributes...
+            self.service.resourceSchema = {
+                "resourceInfoAttr" : "",
+                "autoScheduleAttr" : "auto-schedule",
+                "autoScheduleEnabledValue" : "yes",
+                "proxyAttr" : "proxy",
+                "readOnlyProxyAttr" : "read-only-proxy",
+            }
+
+            # Resource with delegates and autoSchedule = True
+
+            dn = "cn=odtestresource,cn=resources,dc=example,dc=com"
+            guid = 'D3094652-344B-4633-8DB8-09639FA00FB6'
+            attrs = {
+                'apple-generateduid': [guid],
+                'cn': ['odtestresource'],
+                'auto-schedule' : ['yes'],
+                'proxy' : ['6C6CD280-E6E3-11DF-9492-0800200C9A66'],
+                'read-only-proxy' : ['6AA1AE12-592F-4190-A069-547CD83C47C0'],
+            }
+            record = self.service._ldapResultToRecord(dn, attrs,
+                self.service.recordType_resources)
+            self.assertEquals(record.guid, guid)
+            self.assertEquals(record.externalProxies(),
+                set(['6C6CD280-E6E3-11DF-9492-0800200C9A66']))
+            self.assertEquals(record.externalReadOnlyProxies(),
+                set(['6AA1AE12-592F-4190-A069-547CD83C47C0']))
+            self.assertTrue(record.autoSchedule)
+
+        def test_listRecords(self):
+            """
+            listRecords makes an LDAP query (with fake results in this test)
+            and turns the results into records
+            """
+
+            self.service.ldap.setTestResults([
+                (
+                    "uid=odtestamanda,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestamanda'],
+                        'apple-generateduid': ['9DC04A70-E6DD-11DF-9492-0800200C9A66'],
+                        'sn': ['Test'],
+                        'mail': ['odtestamanda at example.com', 'alternate at example.com'],
+                        'givenName': ['Amanda'],
+                        'cn': ['Amanda Test']
+                    }
+                ),
+                (
+                    "uid=odtestbetty,cn=users,dc=example,dc=com",
+                    {
+                        'uid': ['odtestbetty'],
+                        'apple-generateduid': ['93A8F5C5-49D8-4641-840F-CD1903B0394C'],
+                        'sn': ['Test'],
+                        'mail': ['odtestbetty at example.com'],
+                        'givenName': ['Betty'],
+                        'cn': ['Betty Test']
+                    }
+                ),
+            ])
+            records = self.service.listRecords(self.service.recordType_users)
+            self.assertEquals(len(records), 2)
+            self.assertEquals(
+                set([r.firstName for r in records]),
+                set(["Amanda", "Betty"])
+            )

Modified: CalendarServer/trunk/twistedcaldav/directory/test/test_opendirectory.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/test/test_opendirectory.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/test/test_opendirectory.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -81,6 +81,8 @@
                 lastName              = "User",
                 emailAddresses        = set(("someuser at example.com",)),
                 memberGUIDs           = [],
+                extProxies            = [],
+                extReadOnlyProxies    = [],
             )
             self.assertEquals(record.fullName, "")
 
@@ -97,6 +99,8 @@
                 lastName              = "User",
                 emailAddresses        = set(("someuser at example.com",)),
                 memberGUIDs           = [],
+                extProxies            = [],
+                extReadOnlyProxies    = [],
             )
 
             digestFields = {}
@@ -117,6 +121,8 @@
                 lastName              = "User",
                 emailAddresses        = set(("someuser at example.com",)),
                 memberGUIDs           = [],
+                extProxies            = [],
+                extReadOnlyProxies    = [],
             )
 
             digestFields = {

Modified: CalendarServer/trunk/twistedcaldav/directory/xmlfile.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/xmlfile.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/directory/xmlfile.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -596,6 +596,13 @@
         for shortName in self._groups:
             yield self.service.recordWithShortName(DirectoryService.recordType_groups, shortName)
 
+    def memberGUIDs(self):
+        results = set()
+        for recordType, shortName in self._members:
+            record = self.service.recordWithShortName(recordType, shortName)
+            results.add(record.guid)
+        return results
+
     def verifyCredentials(self, credentials):
         if self.enabled:
             if isinstance(credentials, UsernamePassword):

Modified: CalendarServer/trunk/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/stdconfig.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/stdconfig.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -109,6 +109,13 @@
             "nestedGroupsAttr": None, # how nested groups are specified
             "memberIdAttr": None, # which attribute the above refer to
         },
+        "resourceSchema": {
+            "resourceInfoAttr": None, # contains location/resource info
+        },
+        "partitionSchema": {
+            "serverIdAttr": None, # maps to augments server-id
+            "partitionIdAttr": None, # maps to augments partition-id
+        },
         "attributeMapping": { # maps internal record names to LDAP
             "fullName" : "cn",
             "emailAddresses" : "mail",
@@ -737,11 +744,12 @@
     },
 
     "GroupCaching" : {
-        "Enabled": False,
+        "Enabled": True,
         "MemcachedPool" : "ProxyDB",
         "UpdateSeconds" : 300,
         "ExpireSeconds" : 3600,
         "EnableUpdater" : True,
+        "UseExternalProxies" : False,
     },
 
     "EnableKeepAlive": True,

Modified: CalendarServer/trunk/twistedcaldav/upgrade.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/upgrade.py	2011-07-19 02:26:05 UTC (rev 7804)
+++ CalendarServer/trunk/twistedcaldav/upgrade.py	2011-07-19 21:38:22 UTC (rev 7805)
@@ -846,7 +846,8 @@
 
             updater = GroupMembershipCacheUpdater(proxydb,
                 directory, self.config.GroupCaching.ExpireSeconds,
-                namespace=self.config.GroupCaching.MemcachedPool)
+                namespace=self.config.GroupCaching.MemcachedPool,
+                useExternalProxies=self.config.GroupCaching.UseExternalProxies)
             yield updater.updateCache(fast=True)
 
         # Process old inbox items
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20110719/8ef779c3/attachment-0001.html>


More information about the calendarserver-changes mailing list