[CalendarServer-changes] [14103] CalendarServer/trunk
source_changes at macosforge.org
source_changes at macosforge.org
Fri Oct 24 14:12:40 PDT 2014
Revision: 14103
http://trac.calendarserver.org//changeset/14103
Author: cdaboo at apple.com
Date: 2014-10-24 14:12:40 -0700 (Fri, 24 Oct 2014)
Log Message:
-----------
Directory proxy/delegate memcache based caching.
Modified Paths:
--------------
CalendarServer/trunk/calendarserver/tools/gateway.py
CalendarServer/trunk/calendarserver/tools/principals.py
CalendarServer/trunk/conf/caldavd-test-podA.plist
CalendarServer/trunk/conf/caldavd-test-podB.plist
CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py
CalendarServer/trunk/twistedcaldav/directory/calendaruserproxyloader.py
CalendarServer/trunk/twistedcaldav/directory/principal.py
CalendarServer/trunk/twistedcaldav/directory/test/test_principal.py
CalendarServer/trunk/twistedcaldav/stdconfig.py
CalendarServer/trunk/twistedcaldav/test/test_upgrade.py
CalendarServer/trunk/twistedcaldav/upgrade.py
CalendarServer/trunk/txdav/common/datastore/podding/directory.py
CalendarServer/trunk/txdav/common/datastore/sql.py
CalendarServer/trunk/txdav/dps/server.py
CalendarServer/trunk/txdav/who/delegates.py
CalendarServer/trunk/txdav/who/test/test_delegates.py
CalendarServer/trunk/txdav/who/test/test_groups.py
Modified: CalendarServer/trunk/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/gateway.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/calendarserver/tools/gateway.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -48,9 +48,7 @@
from txdav.who.idirectory import RecordType as CalRecordType
from twext.who.idirectory import FieldName
from twisted.python.constants import Names, NamedConstant
-from txdav.who.delegates import (
- addDelegate, removeDelegate, RecordType as DelegateRecordType
-)
+from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
attrMap = {
@@ -529,7 +527,7 @@
returnValue(None)
txn = self.store.newTransaction()
- yield addDelegate(txn, record, proxyRecord, (proxyType == "write"))
+ yield Delegates.addDelegate(txn, record, proxyRecord, (proxyType == "write"))
yield txn.commit()
yield self.respondWithProxies(command, record, proxyType)
@@ -555,7 +553,7 @@
returnValue(None)
txn = self.store.newTransaction()
- yield removeDelegate(txn, record, proxyRecord, (proxyType == "write"))
+ yield Delegates.removeDelegate(txn, record, proxyRecord, (proxyType == "write"))
yield txn.commit()
yield self.respondWithProxies(command, record, proxyType)
Modified: CalendarServer/trunk/calendarserver/tools/principals.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/principals.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/calendarserver/tools/principals.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -32,9 +32,7 @@
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
-from txdav.who.delegates import (
- addDelegate, removeDelegate, RecordType as DelegateRecordType
-)
+from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
from txdav.who.idirectory import AutoScheduleMode
from txdav.who.groups import GroupCacherPollingWork
@@ -608,7 +606,7 @@
print("You are not allowed to add proxies for locations or resources via command line when their proxy assignments come from the directory service.")
returnValue(None)
- yield _addRemoveProxy("Added", addDelegate, store, record, proxyType, *proxyIDs)
+ yield _addRemoveProxy("Added", Delegates.addDelegate, store, record, proxyType, *proxyIDs)
@@ -623,9 +621,9 @@
returnValue(None)
# Write
- yield _addRemoveProxy("Removed", removeDelegate, store, record, "write", *proxyIDs)
+ yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "write", *proxyIDs)
# Read
- yield _addRemoveProxy("Removed", removeDelegate, store, record, "read", *proxyIDs)
+ yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "read", *proxyIDs)
Modified: CalendarServer/trunk/conf/caldavd-test-podA.plist
===================================================================
--- CalendarServer/trunk/conf/caldavd-test-podA.plist 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/conf/caldavd-test-podA.plist 2014-10-24 21:12:40 UTC (rev 14103)
@@ -140,7 +140,7 @@
<key>Port</key>
<integer>11211</integer>
</dict>
- <key>ProxyDB</key>
+ <key>AllPods</key>
<dict>
<key>ClientEnabled</key>
<true/>
@@ -153,6 +153,7 @@
<key>HandleCacheTypes</key>
<array>
<string>ProxyDB</string>
+ <string>DelegatesDB</string>
<string>PrincipalToken</string>
<string>DIGESTCREDENTIALS</string>
</array>
Modified: CalendarServer/trunk/conf/caldavd-test-podB.plist
===================================================================
--- CalendarServer/trunk/conf/caldavd-test-podB.plist 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/conf/caldavd-test-podB.plist 2014-10-24 21:12:40 UTC (rev 14103)
@@ -147,7 +147,7 @@
<key>Port</key>
<integer>11411</integer>
</dict>
- <key>ProxyDB</key>
+ <key>AllPods</key>
<dict>
<key>ClientEnabled</key>
<true/>
@@ -160,6 +160,7 @@
<key>HandleCacheTypes</key>
<array>
<string>ProxyDB</string>
+ <string>DelegatesDB</string>
<string>PrincipalToken</string>
<string>DIGESTCREDENTIALS</string>
</array>
Modified: CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/directory/calendaruserproxy.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -208,8 +208,12 @@
)
+ def __repr__(self):
+ return "<{}: {}>".format(self.__class__.__name__, str(self))
+
+
def __str__(self):
- return "%s [%s]" % (self.parent, self.proxyType)
+ return "{} [{}]".format(self.parent, self.proxyType)
def _index(self):
Modified: CalendarServer/trunk/twistedcaldav/directory/calendaruserproxyloader.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/calendaruserproxyloader.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/directory/calendaruserproxyloader.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -31,7 +31,7 @@
from twistedcaldav.config import config, fullServerPath
from twistedcaldav.xmlutil import readXML
-from txdav.who.delegates import addDelegate
+from txdav.who.delegates import Delegates
log = Logger()
@@ -160,13 +160,13 @@
if delegateRecord is None:
continue
- yield addDelegate(txn, delegatorRecord, delegateRecord, True)
+ yield Delegates.addDelegate(txn, delegatorRecord, delegateRecord, True)
for proxy in read_proxies:
delegateRecord = yield directory.recordWithUID(proxy)
if delegateRecord is None:
continue
- yield addDelegate(txn, delegatorRecord, delegateRecord, False)
+ yield Delegates.addDelegate(txn, delegatorRecord, delegateRecord, False)
yield txn.commit()
Modified: CalendarServer/trunk/twistedcaldav/directory/principal.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/principal.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/directory/principal.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -821,8 +821,12 @@
])
+ def __repr__(self):
+ return "<{}: {}>".format(self.__class__.__name__, str(self))
+
+
def __str__(self):
- return "(%s)%s" % (self.record.recordType, self.record.uid)
+ return "({}){}".format(self.record.recordType, self.record.uid)
def __eq__(self, other):
Modified: CalendarServer/trunk/twistedcaldav/directory/test/test_principal.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/directory/test/test_principal.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/directory/test/test_principal.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -32,7 +32,7 @@
)
from twistedcaldav.test.util import StoreTestCase
-from txdav.who.delegates import addDelegate
+from txdav.who.delegates import Delegates
from txdav.who.idirectory import AutoScheduleMode, RecordType as CalRecordType
from txdav.xml import element as davxml
@@ -1072,8 +1072,8 @@
self.assertTrue(len((yield principal03.proxyFor(True))) == 0)
# Make user01 a read-only proxy for user02 and user03
- yield addDelegate(self.transactionUnderTest(), principal02.record, principal01.record, False)
- yield addDelegate(self.transactionUnderTest(), principal03.record, principal01.record, False)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal02.record, principal01.record, False)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal03.record, principal01.record, False)
yield self.commit()
self.assertTrue(len((yield principal01.proxyFor(False))) == 2)
@@ -1097,7 +1097,15 @@
self.assertTrue(len((yield principal01.proxyFor(False))) == 1)
self.assertTrue(len((yield principal01.proxyFor(True))) == 0)
+ # Remove user01 as read-only proxy for user02 and user03
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal02.record, principal01.record, False)
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal03.record, principal01.record, False)
+ yield self.commit()
+ self.assertTrue(len((yield principal01.proxyFor(False))) == 0)
+ self.assertTrue(len((yield principal01.proxyFor(True))) == 0)
+
+
@inlineCallbacks
def test_isProxyFor(self):
"""
@@ -1119,8 +1127,8 @@
self.assertFalse((yield principal03.isProxyFor(principal03)))
# Make user02 a read-only proxy for user01, and user03 a read-write proxy for user01
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
yield self.commit()
# Check proxy for
@@ -1134,7 +1142,26 @@
self.assertFalse((yield principal03.isProxyFor(principal02)))
self.assertFalse((yield principal03.isProxyFor(principal03)))
+ # Remove user02 as read-only proxy for user01, and user03 as read-write proxy for user01
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield self.commit()
+ # Check proxy for
+ proxies = yield principal01.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal01.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(True)
+ self.assertEqual(proxies, set())
+
+
@inlineCallbacks
def test_proxyMode(self):
"""
@@ -1155,8 +1182,8 @@
self.assertEqual(mode, "none")
# Make user02 a read-only proxy for user01, and user03 a read-write proxy for user01
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
yield self.commit()
# Check proxy mode
@@ -1169,7 +1196,26 @@
mode = yield principal01.proxyMode(principal03)
self.assertEqual(mode, "none")
+ # Remove user02 as read-only proxy for user01, and user03 as read-write proxy for user01
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield self.commit()
+ # Check proxy for
+ proxies = yield principal01.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal01.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(True)
+ self.assertEqual(proxies, set())
+
+
@inlineCallbacks
def test_proxyFor(self):
"""
@@ -1194,8 +1240,8 @@
self.assertEqual(proxies, set())
# Make user02 a read-only proxy for user01, and user03 a read-write proxy for user01
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
- yield addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.addDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
yield self.commit()
# Check proxy for
@@ -1211,3 +1257,22 @@
self.assertEqual(proxies, set())
proxies = yield principal03.proxyFor(True)
self.assertEqual(proxies, set((principal01,)))
+
+ # Remove user02 as read-only proxy for user01, and user03 as read-write proxy for user01
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal02.record, False)
+ yield Delegates.removeDelegate(self.transactionUnderTest(), principal01.record, principal03.record, True)
+ yield self.commit()
+
+ # Check proxy for
+ proxies = yield principal01.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal01.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal02.proxyFor(True)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(False)
+ self.assertEqual(proxies, set())
+ proxies = yield principal03.proxyFor(True)
+ self.assertEqual(proxies, set())
Modified: CalendarServer/trunk/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/stdconfig.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/stdconfig.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -930,6 +930,7 @@
# "Port": 11211,
# "HandleCacheTypes": [
# "ProxyDB",
+ # "DelegatesDB",
# "PrincipalToken",
# ]
# },
Modified: CalendarServer/trunk/twistedcaldav/test/test_upgrade.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/test/test_upgrade.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/test/test_upgrade.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -32,7 +32,7 @@
)
from txdav.caldav.datastore.index_file import db_basename
from txdav.caldav.datastore.scheduling.imip.mailgateway import MailGatewayTokensDatabase
-from txdav.who.delegates import delegatesOf
+from txdav.who.delegates import Delegates
from txdav.xml.parser import WebDAVDocument
@@ -1470,7 +1470,7 @@
store = self.storeUnderTest()
record = yield self.directory.recordWithUID(u"mercury")
txn = store.newTransaction()
- writeDelegates = yield delegatesOf(txn, record, True)
+ writeDelegates = yield Delegates.delegatesOf(txn, record, True)
self.assertEquals(len(writeDelegates), 0)
yield txn.commit()
@@ -1484,7 +1484,7 @@
# Check delegates in store
txn = store.newTransaction()
- writeDelegates = yield delegatesOf(txn, record, True)
+ writeDelegates = yield Delegates.delegatesOf(txn, record, True)
self.assertEquals(len(writeDelegates), 1)
self.assertEquals(
set([d.uid for d in writeDelegates]),
@@ -1493,7 +1493,7 @@
record = yield self.directory.recordWithUID(u"non_calendar_proxy")
- readDelegates = yield delegatesOf(txn, record, False)
+ readDelegates = yield Delegates.delegatesOf(txn, record, False)
self.assertEquals(len(readDelegates), 1)
self.assertEquals(
set([d.uid for d in readDelegates]),
Modified: CalendarServer/trunk/twistedcaldav/upgrade.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/upgrade.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/twistedcaldav/upgrade.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -53,7 +53,7 @@
from txdav.caldav.datastore.scheduling.imip.mailgateway import migrateTokensToStore
from txdav.caldav.datastore.scheduling.scheduler import DirectScheduler
from txdav.caldav.datastore.util import normalizationLookup
-from txdav.who.delegates import addDelegate
+from txdav.who.delegates import Delegates
from txdav.who.idirectory import RecordType as CalRecordType
from txdav.xml import element
@@ -1078,7 +1078,7 @@
continue
readWrite = (groupType == "calendar-proxy-write")
- yield addDelegate(txn, delegatorRecord, delegateRecord, readWrite)
+ yield Delegates.addDelegate(txn, delegatorRecord, delegateRecord, readWrite)
yield txn.commit()
Modified: CalendarServer/trunk/txdav/common/datastore/podding/directory.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/directory.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/common/datastore/podding/directory.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -17,7 +17,7 @@
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.reflect import namedClass
from txdav.common.datastore.podding.base import FailedCrossPodRequestError
-from txdav.who.delegates import _delegatesOfUIDs, _delegatedToUIDs, setDelegates
+from txdav.who.delegates import Delegates
class DirectoryPoddingConduitMixin(object):
@@ -81,7 +81,7 @@
raise FailedCrossPodRequestError("Cross-pod delegate missing on this server: {}".format(uid))
delegates.append(delegate)
- yield setDelegates(txn, delegator, delegates, request["read-write"])
+ yield Delegates.setDelegates(txn, delegator, delegates, request["read-write"])
except Exception as e:
returnValue({
"result": "exception",
@@ -140,7 +140,7 @@
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate not on this server: {}".format(delegator.uid))
- delegates = yield _delegatesOfUIDs(txn, delegator, request["read-write"], request["expanded"])
+ delegates = yield Delegates._delegatesOfUIDs(txn, delegator, request["read-write"], request["expanded"])
except Exception as e:
returnValue({
"result": "exception",
@@ -201,7 +201,7 @@
if delegate is None or delegate.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegate.uid))
- delegateors = yield _delegatedToUIDs(txn, delegate, request["read-write"], onlyThisServer=True)
+ delegateors = yield Delegates._delegatedToUIDs(txn, delegate, request["read-write"], onlyThisServer=True)
except Exception as e:
returnValue({
"result": "exception",
Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -80,6 +80,7 @@
from txdav.common.inotifications import INotificationCollection, \
INotificationObject
from txdav.idav import ChangeCategory
+from txdav.who.delegates import Delegates
from txdav.xml import element
from uuid import uuid4, UUID
@@ -1103,12 +1104,12 @@
if record is None:
returnValue(None)
- groupID = yield self._addGroupQuery.on(
+ groupID = (yield self._addGroupQuery.on(
self,
name=name.encode("utf-8"),
groupUID=groupUID.encode("utf-8"),
membershipHash=membershipHash
- )
+ ))[0][0]
yield self.refreshGroup(
groupUID, record, groupID, name.encode("utf-8"), membershipHash, True
@@ -1184,7 +1185,7 @@
bool(results[0][4]), # extant
))
else:
- raise
+ returnValue((None, None, None, None, None))
else:
yield savepoint.release(self)
results = (
@@ -1201,7 +1202,7 @@
bool(results[0][4]), # extant
))
else:
- raise
+ returnValue((None, None, None, None, None))
else:
returnValue((None, None, None, None, None))
@@ -1375,33 +1376,58 @@
)
if membershipChanged:
- newMemberUIDs = set()
- for memberUID in memberUIDs:
- newMemberUIDs.add(memberUID)
- yield self.synchronizeMembers(groupID, newMemberUIDs)
+ yield self.synchronizeMembers(groupID, set(memberUIDs))
returnValue(membershipChanged)
@inlineCallbacks
def synchronizeMembers(self, groupID, newMemberUIDs):
+ """
+ Update the group membership table in the database to match the new membership list. This
+ method will diff the existing set with the new set and apply the changes. It also calls out
+ to a groupChanged() method with the set of added and removed members so that other modules
+ that depend on groups can monitor the changes.
+
+ @param groupID: group id of group to update
+ @type groupID: L{str}
+ @param newMemberUIDs: set of new member UIDs in the group
+ @type newMemberUIDs: L{set} of L{str}
+ """
numRemoved = numAdded = 0
cachedMemberUIDs = (yield self.groupMemberUIDs(groupID))
- for memberUID in cachedMemberUIDs:
- if memberUID not in newMemberUIDs:
- numRemoved += 1
- yield self.removeMemberFromGroup(memberUID, groupID)
+ removed = cachedMemberUIDs - newMemberUIDs
+ for memberUID in removed:
+ numRemoved += 1
+ yield self.removeMemberFromGroup(memberUID, groupID)
- for memberUID in newMemberUIDs:
- if memberUID not in cachedMemberUIDs:
- numAdded += 1
- yield self.addMemberToGroup(memberUID, groupID)
+ added = newMemberUIDs - cachedMemberUIDs
+ for memberUID in added:
+ numAdded += 1
+ yield self.addMemberToGroup(memberUID, groupID)
+ yield self.groupChanged(groupID, added, removed)
+
returnValue((numAdded, numRemoved))
@inlineCallbacks
+ def groupChanged(self, groupID, addedUIDs, removedUIDs):
+ """
+ Called when membership of a group changes.
+
+ @param groupID: group id of group that changed
+ @type groupID: L{str}
+ @param addedUIDs: set of new member UIDs added to the group
+ @type addedUIDs: L{set} of L{str}
+ @param removedUIDs: set of old member UIDs removed from the group
+ @type removedUIDs: L{set} of L{str}
+ """
+ yield Delegates.groupChanged(self, groupID, addedUIDs, removedUIDs)
+
+
+ @inlineCallbacks
def groupMembers(self, groupID):
"""
The members of the given group as recorded in the db
@@ -1826,41 +1852,40 @@
@rtype: a Deferred resulting in a set
"""
delegates = set()
+ delegatorU = delegator.encode("utf-8")
# First get the direct delegates
results = (
yield self._selectDelegatesQuery.on(
self,
- delegator=delegator.encode("utf-8"),
+ delegator=delegatorU,
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegates.add(row[0].decode("utf-8"))
+ delegates.update([row[0].decode("utf-8") for row in results])
if expanded:
# Get those who are in groups which have been delegated to
results = (
yield self._selectIndirectDelegatesQuery.on(
self,
- delegator=delegator.encode("utf-8"),
+ delegator=delegatorU,
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegates.add(row[0].decode("utf-8"))
+ # Skip the delegator if they are in one of the groups
+ delegates.update([row[0].decode("utf-8") for row in results if row[0] != delegatorU])
else:
# Get the directly-delegated-to groups
results = (
yield self._selectDelegateGroupsQuery.on(
self,
- delegator=delegator.encode("utf-8"),
+ delegator=delegatorU,
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegates.add(row[0].decode("utf-8"))
+ delegates.update([row[0].decode("utf-8") for row in results])
returnValue(delegates)
@@ -1881,29 +1906,29 @@
@rtype: a Deferred resulting in a set
"""
delegators = set()
+ delegateU = delegate.encode("utf-8")
# First get the direct delegators
results = (
yield self._selectDirectDelegatorsQuery.on(
self,
- delegate=delegate.encode("utf-8"),
+ delegate=delegateU,
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegators.add(row[0].decode("utf-8"))
+ delegators.update([row[0].decode("utf-8") for row in results])
# Finally get those who have delegated to groups the delegate
# is a member of
results = (
yield self._selectIndirectDelegatorsQuery.on(
self,
- delegate=delegate.encode("utf-8"),
+ delegate=delegateU,
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegators.add(row[0].decode("utf-8"))
+ # Skip the delegator if they are in one of the groups
+ delegators.update([row[0].decode("utf-8") for row in results if row[0] != delegateU])
returnValue(delegators)
@@ -1924,7 +1949,6 @@
@rtype: a Deferred resulting in a set
"""
- delegators = set()
results = (
yield self._selectDelegatorsToGroupQuery.on(
self,
@@ -1932,8 +1956,7 @@
readWrite=1 if readWrite else 0
)
)
- for row in results:
- delegators.add(row[0].decode("utf-8"))
+ delegators = set([row[0].decode("utf-8") for row in results])
returnValue(delegators)
Modified: CalendarServer/trunk/txdav/dps/server.py
===================================================================
--- CalendarServer/trunk/txdav/dps/server.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/dps/server.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -29,6 +29,7 @@
from twisted.protocols import amp
from twisted.python.constants import Names, NamedConstant
from twisted.python.usage import Options, UsageError
+from twistedcaldav import memcachepool
from twistedcaldav.config import config
from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
from txdav.dps.commands import (
@@ -796,6 +797,14 @@
raise
+ #
+ # Configure Memcached Client Pool
+ #
+ memcachepool.installPools(
+ config.Memcached.Pools,
+ config.Memcached.MaxClients,
+ )
+
log.info("Created directory service")
return strPortsService(
Modified: CalendarServer/trunk/txdav/who/delegates.py
===================================================================
--- CalendarServer/trunk/txdav/who/delegates.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/who/delegates.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -24,6 +24,7 @@
DeferredList
from twistedcaldav.config import config
+from twistedcaldav.memcacher import Memcacher
from twext.python.log import Logger
from twext.who.idirectory import (
@@ -78,11 +79,11 @@
RecordType.readDelegateGroup, RecordType.writeDelegateGroup
): # Members are delegates of this record
readWrite = (self.recordType is RecordType.writeDelegateGroup)
- delegateUIDs = yield _delegatesOfUIDs(txn, parentRecord, readWrite, expanded=expanded)
+ delegateUIDs = yield Delegates._delegatesOfUIDs(txn, parentRecord, readWrite, expanded=expanded)
else: # Members have delegated to this record
readWrite = (self.recordType is RecordType.writeDelegatorGroup)
- delegateUIDs = yield _delegatedToUIDs(txn, parentRecord, readWrite)
+ delegateUIDs = yield Delegates._delegatedToUIDs(txn, parentRecord, readWrite)
returnValue(delegateUIDs)
delegateUIDs = yield self.service._store.inTransaction(
@@ -130,7 +131,7 @@
)
def _setMembers(txn):
- return setDelegates(txn, delegator, memberRecords, readWrite)
+ return Delegates.setDelegates(txn, delegator, memberRecords, readWrite)
yield self.service._store.inTransaction(
"DirectoryRecord.setMembers", _setMembers
@@ -227,249 +228,398 @@
- at inlineCallbacks
-def setDelegates(txn, delegator, delegates, readWrite):
+class CachingDelegates(object):
"""
- Sets the full set of delegates for a delegator.
+ Manages access to the store's delegates API, including caching of results.
+ """
- We need to take multiple pods into account by re-directing this request
- to the cross-pod conduit if the delegator is not local to this pod.
+ class DelegatesMemcacher(Memcacher):
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param delegates: the delegates directory records
- @type delegates: L{list}} of L{IDirectoryRecord}
- @param readWrite: if True, read and write access is granted; read-only
- access otherwise
- """
- if delegator.thisServer():
- yield txn.removeDelegates(delegator.uid, readWrite)
- yield txn.removeDelegateGroups(delegator.uid, readWrite)
+ def __init__(self, namespace):
+ super(CachingDelegates.DelegatesMemcacher, self).__init__(namespace, key_normalization=True)
- for delegate in delegates:
- yield addDelegate(txn, delegator, delegate, readWrite)
- else:
- yield _podSetDelegates(txn, delegator, delegates, readWrite)
+ def _key(self, keyname, uid, readWrite, expanded):
+ return "{}{}:{}#{}".format(
+ keyname,
+ "-expanded" if expanded else "",
+ uid,
+ "write" if readWrite else "read",
+ )
+ def _membersKey(self, uid, readWrite, expanded):
+ return self._key("members", uid, readWrite, expanded)
+ def _membershipsKey(self, uid, readWrite):
+ return self._key("memberships", uid, readWrite, False)
- at inlineCallbacks
-def addDelegate(txn, delegator, delegate, readWrite):
- """
- Adds "delegate" as a delegate of "delegator". The type of access is
- specified by the "readWrite" parameter.
+ def setMembers(self, uid, readWrite, members, expanded):
+ return self.set(
+ self._membersKey(uid, readWrite, expanded),
+ ",".join(members),
+ )
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param delegate: the delegate's directory record
- @type delegate: L{IDirectoryRecord}
- @param readWrite: if True, read and write access is granted; read-only
- access otherwise
- """
- if delegate.recordType == BaseRecordType.group:
- # find the groupID
- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(
- delegate.uid
- )
- yield txn.addDelegateGroup(delegator.uid, groupID, readWrite)
- else:
- yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
+ def setMemberships(self, uid, readWrite, memberships):
+ return self.set(
+ self._membershipsKey(uid, readWrite),
+ ",".join(memberships),
+ )
+ @staticmethod
+ def _value_decode(value):
+ if value:
+ return set(value.split(","))
+ elif value is None:
+ return None
+ else:
+ return set()
+ @inlineCallbacks
+ def getMembers(self, uid, readWrite, expanded):
+ value = yield self.get(self._membersKey(uid, readWrite, expanded))
+ returnValue(self._value_decode(value))
- at inlineCallbacks
-def removeDelegate(txn, delegator, delegate, readWrite):
- """
- Removes "delegate" as a delegate of "delegator". The type of access is
- specified by the "readWrite" parameter.
+ @inlineCallbacks
+ def getMemberships(self, uid, readWrite):
+ value = yield self.get(self._membershipsKey(uid, readWrite))
+ returnValue(self._value_decode(value))
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param delegate: the delegate's directory record
- @type delegate: L{IDirectoryRecord}
- @param readWrite: if True, read and write access is revoked; read-only
- access otherwise
- """
- if delegate.recordType == BaseRecordType.group:
- # find the groupID
- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(
- delegate.uid
- )
- yield txn.removeDelegateGroup(delegator.uid, groupID, readWrite)
- else:
- yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
+ @inlineCallbacks
+ def deleteMember(self, uid, readWrite):
+ """
+ Delete both the regular and expanded keys.
+ """
+ yield self.delete(self._membersKey(uid, readWrite, False))
+ yield self.delete(self._membersKey(uid, readWrite, True))
+ @inlineCallbacks
+ def deleteMembership(self, uid, readWrite):
+ """
+ Delete both the regular and expanded keys.
+ """
+ yield self.delete(self._membershipsKey(uid, readWrite))
- at inlineCallbacks
-def delegatesOf(txn, delegator, readWrite, expanded=False):
- """
- Return the records of the delegates of "delegator". The type of access
- is specified by the "readWrite" parameter.
+ def __init__(self):
+ self._memcacher = CachingDelegates.DelegatesMemcacher("DelegatesDB")
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegates are returned;
- read-only access otherwise
- @return: the set of directory records
- @rtype: a Deferred which fires a set of L{IDirectoryRecord}
- """
- delegateUIDs = yield _delegatesOfUIDs(txn, delegator, readWrite, expanded)
- records = []
- directory = delegator.service
- for uid in delegateUIDs:
- if uid != delegator.uid:
- record = (yield directory.recordWithUID(uid))
- if record is not None:
- records.append(record)
- returnValue(records)
+ @inlineCallbacks
+ def setDelegates(self, txn, delegator, delegates, readWrite):
+ """
+ Sets the full set of delegates for a delegator.
+ We need to take multiple pods into account by re-directing this request
+ to the cross-pod conduit if the delegator is not local to this pod.
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param delegates: the delegates directory records
+ @type delegates: L{list}} of L{IDirectoryRecord}
+ @param readWrite: if True, read and write access is granted; read-only
+ access otherwise
+ """
+ existingDelegates = yield self.delegatesOf(txn, delegator, readWrite)
- at inlineCallbacks
-def delegatedTo(txn, delegate, readWrite):
- """
- Return the records of those who have delegated to "delegate". The type of
- access is specified by the "readWrite" parameter.
+ if delegator.thisServer():
+ # Remove some
+ for delegate in set(existingDelegates) - set(delegates):
+ yield self.removeDelegate(txn, delegator, delegate, readWrite)
- @param delegate: the delegate's directory record
- @type delegate: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegators are returned;
- read-only access otherwise
- @return: the set of directory records
- @rtype: a Deferred which fires a set of L{IDirectoryRecord}
- """
- delegatorUIDs = yield _delegatedToUIDs(txn, delegate, readWrite)
+ for delegate in set(delegates) - set(existingDelegates):
+ yield self.addDelegate(txn, delegator, delegate, readWrite)
+ else:
+ yield self._podSetDelegates(txn, delegator, delegates, readWrite)
- records = []
- directory = delegate.service
- for uid in delegatorUIDs:
- if uid != delegate.uid:
- record = (yield directory.recordWithUID(uid))
- if record is not None:
- records.append(record)
- returnValue(records)
+ @inlineCallbacks
+ def addDelegate(self, txn, delegator, delegate, readWrite):
+ """
+ Adds "delegate" as a delegate of "delegator". The type of access is
+ specified by the "readWrite" parameter.
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param delegate: the delegate's directory record
+ @type delegate: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access is granted; read-only
+ access otherwise
+ """
- at inlineCallbacks
-def _delegatesOfUIDs(txn, delegator, readWrite, expanded=False):
- """
- Return the UIDs of the delegates of "delegator". The type of access
- is specified by the "readWrite" parameter.
+ # Never add the delegator as a delegate
+ if delegator.uid == delegate.uid:
+ returnValue(None)
- We need to take multiple pods into account by re-directing this request
- to the cross-pod conduit if the delegator is not local to this pod.
+ existingDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegates are returned;
- read-only access otherwise
- @return: the set of directory record uids
- @rtype: a Deferred which fires a set of L{str}
- """
+ if delegate.recordType == BaseRecordType.group:
+ # find the groupID
+ (
+ groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
+ _ignore_extant
+ ) = yield txn.groupByUID(
+ delegate.uid
+ )
+ yield txn.addDelegateGroup(delegator.uid, groupID, readWrite)
+ else:
+ yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
- log.debug("_delegatesOfUIDs for: {} and read-write = {} and expanded = {}".format(delegator.uid, readWrite, expanded,))
- if delegator.thisServer():
- delegateUIDs = yield txn.delegates(delegator.uid, readWrite, expanded=expanded)
- else:
- delegateUIDs = yield _podDelegates(txn, delegator, readWrite, expanded=expanded)
- returnValue(delegateUIDs)
+ # Update cache (remove the member cache entry first as we need to recalculate it for
+ # memberships removal)
+ yield self._memcacher.deleteMember(delegator.uid, readWrite)
+ newDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
+ for uid in set(newDelegateUIDs) - set(existingDelegateUIDs):
+ yield self._memcacher.deleteMembership(uid, readWrite)
+ @inlineCallbacks
+ def removeDelegate(self, txn, delegator, delegate, readWrite):
+ """
+ Removes "delegate" as a delegate of "delegator". The type of access is
+ specified by the "readWrite" parameter.
- at inlineCallbacks
-def _delegatedToUIDs(txn, delegate, readWrite, onlyThisServer=False):
- """
- Return the UIDs of those who have delegated to "delegate". The type of
- access is specified by the "readWrite" parameter.
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param delegate: the delegate's directory record
+ @type delegate: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access is revoked; read-only
+ access otherwise
+ """
- We need to take multiple pods into account by re-directing this request
- to the cross-pod conduit if the delegate is not local to this pod.
+ # Never remove the delegator as a delegate
+ if delegator.uid == delegate.uid:
+ returnValue(None)
- @param delegate: the delegate's directory record
- @type delegate: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegators are returned;
- read-only access otherwise
- @param onlyThisServer: used when doing the query as part of a cross-pod request since that
- should only returns results for this server
- @type onlyThisServer: L{bool}
- @return: the set of directory record uids
- @rtype: a Deferred which fires a set of L{str}
- """
+ existingDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
+ if delegate.recordType == BaseRecordType.group:
+ # find the groupID
+ (
+ groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
+ _ignore_extant
+ ) = yield txn.groupByUID(
+ delegate.uid
+ )
+ yield txn.removeDelegateGroup(delegator.uid, groupID, readWrite)
+ else:
+ yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
- log.debug("_delegatedToUIDs for: {} and read-write = {}".format(delegate.uid, readWrite,))
- delegatorUIDs = (yield txn.delegators(delegate.uid, readWrite))
- if not onlyThisServer and config.Servers.Enabled:
- delegatorUIDs.update((yield _podDelegators(txn, delegate, readWrite)))
- returnValue(delegatorUIDs)
+ # Update cache (remove the member cache entry first as we need to recalculate it for
+ # memberships removal)
+ yield self._memcacher.deleteMember(delegator.uid, readWrite)
+ newDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
+ for uid in set(existingDelegateUIDs) - set(newDelegateUIDs):
+ yield self._memcacher.deleteMembership(uid, readWrite)
+ @inlineCallbacks
+ def groupChanged(self, txn, groupID, addedUIDs, removedUIDs):
+ """
+ A group has changed. We need to see which delegators might be using this group
+ and invalidate caches.
-def _podSetDelegates(txn, delegator, delegates, readWrite):
- """
- Sets the full set of delegates for a delegator.
+ @param groupID: group id of group that changed
+ @type groupID: L{str}
+ @param addedUIDs: set of new member UIDs added to the group
+ @type addedUIDs: L{set} of L{str}
+ @param removedUIDs: set of old member UIDs removed from the group
+ @type removedUIDs: L{set} of L{str}
+ """
- We need to take multiple pods into account by re-directing this request
- to the cross-pod conduit if the delegator is not local to this pod.
+ # Remove member cache entry for delegators using the group
+ delegators = set()
+ for readWrite in (True, False):
+ delegators.update((yield txn.delegatorsToGroup(groupID, readWrite)))
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param delegates: the delegates directory records
- @type delegates: L{list}} of L{IDirectoryRecord}
- @param readWrite: if True, read and write access is granted; read-only
- access otherwise
- """
- return txn.store().conduit.send_set_delegates(txn, delegator, delegates, readWrite)
+ for delegator in delegators:
+ yield self._memcacher.deleteMember(delegator, True)
+ yield self._memcacher.deleteMember(delegator, False)
+ # Remove membership cache entries for added/removed delegates
+ for delegate in (addedUIDs | removedUIDs):
+ yield self._memcacher.deleteMembership(delegate, True)
+ yield self._memcacher.deleteMembership(delegate, False)
-def _podDelegates(txn, delegator, readWrite, expanded=False):
- """
- Do a cross-pod request to get the delegates for this delegator.
+ @inlineCallbacks
+ def delegatesOf(self, txn, delegator, readWrite, expanded=False):
+ """
+ Return the records of the delegates of "delegator". The type of access
+ is specified by the "readWrite" parameter.
- @param delegator: the delegator's directory record
- @type delegator: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegates are returned;
- read-only access otherwise
- @return: the set of directory record uids
- @rtype: a Deferred which fires a set of L{str}
- """
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ @return: the set of directory records
+ @rtype: a Deferred which fires a set of L{IDirectoryRecord}
+ """
+ delegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded)
- log.debug("_podDelegates for: {} and read-write = {} and expanded = {}".format(delegator.uid, readWrite, expanded,))
- return txn.store().conduit.send_get_delegates(txn, delegator, readWrite, expanded)
+ records = []
+ directory = delegator.service
+ for uid in delegateUIDs:
+ if uid != delegator.uid:
+ record = (yield directory.recordWithUID(uid))
+ if record is not None:
+ records.append(record)
+ returnValue(records)
+ @inlineCallbacks
+ def delegatedTo(self, txn, delegate, readWrite):
+ """
+ Return the records of those who have delegated to "delegate". The type of
+ access is specified by the "readWrite" parameter.
- at inlineCallbacks
-def _podDelegators(txn, delegate, readWrite):
- """
- Do a cross-pod request to get the delegators for this delegate. We need to iterate over all
- other pod servers to get results from each one.
+ @param delegate: the delegate's directory record
+ @type delegate: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegators are returned;
+ read-only access otherwise
+ @return: the set of directory records
+ @rtype: a Deferred which fires a set of L{IDirectoryRecord}
+ """
+ delegatorUIDs = yield self._delegatedToUIDs(txn, delegate, readWrite)
- @param delegate: the delegate's directory record
- @type delegate: L{IDirectoryRecord}
- @param readWrite: if True, read and write access delegates are returned;
- read-only access otherwise
- @return: the set of directory record uids
- @rtype: a Deferred which fires a set of L{str}
- """
+ records = []
+ directory = delegate.service
+ for uid in delegatorUIDs:
+ if uid != delegate.uid:
+ record = (yield directory.recordWithUID(uid))
+ if record is not None:
+ records.append(record)
+ returnValue(records)
- log.debug("_podDelegators for: {} and read-write = {}".format(delegate.uid, readWrite,))
- results = yield DeferredList([
- txn.store().conduit.send_get_delegators(txn, server, delegate, readWrite) for
- server in txn.directoryService().serversDB.allServersExceptThis()
- ], consumeErrors=True)
- delegators = set()
- for result in results:
- if result and result[0]:
- delegators.update(result[1])
- returnValue(delegators)
+
+ @inlineCallbacks
+ def _delegatesOfUIDs(self, txn, delegator, readWrite, expanded=False):
+ """
+ Return the UIDs of the delegates of "delegator". The type of access
+ is specified by the "readWrite" parameter.
+
+ We need to take multiple pods into account by re-directing this request
+ to the cross-pod conduit if the delegator is not local to this pod.
+
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ @return: the set of directory record uids
+ @rtype: a Deferred which fires a set of L{str}
+ """
+
+ # Try cache first
+ delegateUIDs = yield self._memcacher.getMembers(delegator.uid, readWrite, expanded)
+ if delegateUIDs is not None:
+ log.debug("_delegatesOfUIDs cached for: {} and read-write = {} and expanded = {}".format(delegator.uid, readWrite, expanded,))
+ returnValue(delegateUIDs)
+
+ # Get from the store
+ log.debug("_delegatesOfUIDs for: {} and read-write = {} and expanded = {}".format(delegator.uid, readWrite, expanded,))
+ if delegator.thisServer():
+ delegateUIDs = yield txn.delegates(delegator.uid, readWrite, expanded=expanded)
+
+ # Cache result - only need to do this on the host
+ yield self._memcacher.setMembers(delegator.uid, readWrite, delegateUIDs, expanded)
+ else:
+ delegateUIDs = yield self._podDelegates(txn, delegator, readWrite, expanded=expanded)
+
+ returnValue(delegateUIDs)
+
+
+ @inlineCallbacks
+ def _delegatedToUIDs(self, txn, delegate, readWrite, onlyThisServer=False):
+ """
+ Return the UIDs of those who have delegated to "delegate". The type of
+ access is specified by the "readWrite" parameter.
+
+ We need to take multiple pods into account by re-directing this request
+ to the cross-pod conduit if the delegate is not local to this pod.
+
+ @param delegate: the delegate's directory record
+ @type delegate: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegators are returned;
+ read-only access otherwise
+ @param onlyThisServer: used when doing the query as part of a cross-pod request since that
+ should only returns results for this server
+ @type onlyThisServer: L{bool}
+ @return: the set of directory record uids
+ @rtype: a Deferred which fires a set of L{str}
+ """
+
+ # Try cache first
+ delegatorUIDs = yield self._memcacher.getMemberships(delegate.uid, readWrite)
+ if delegatorUIDs is not None:
+ log.debug("_delegatedToUIDs cached for: {} and read-write = {}".format(delegate.uid, readWrite,))
+ returnValue(delegatorUIDs)
+
+ # Get from the store
+ log.debug("_delegatedToUIDs for: {} and read-write = {}".format(delegate.uid, readWrite,))
+ delegatorUIDs = (yield txn.delegators(delegate.uid, readWrite))
+ if not onlyThisServer and config.Servers.Enabled:
+ delegatorUIDs.update((yield self._podDelegators(txn, delegate, readWrite)))
+
+ # Cache result - only need to do this on the host
+ yield self._memcacher.setMemberships(delegate.uid, readWrite, delegatorUIDs)
+
+ returnValue(delegatorUIDs)
+
+
+ def _podSetDelegates(self, txn, delegator, delegates, readWrite):
+ """
+ Sets the full set of delegates for a delegator.
+
+ We need to take multiple pods into account by re-directing this request
+ to the cross-pod conduit if the delegator is not local to this pod.
+
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param delegates: the delegates directory records
+ @type delegates: L{list}} of L{IDirectoryRecord}
+ @param readWrite: if True, read and write access is granted; read-only
+ access otherwise
+ """
+ return txn.store().conduit.send_set_delegates(txn, delegator, delegates, readWrite)
+
+
+ def _podDelegates(self, txn, delegator, readWrite, expanded=False):
+ """
+ Do a cross-pod request to get the delegates for this delegator.
+
+ @param delegator: the delegator's directory record
+ @type delegator: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ @return: the set of directory record uids
+ @rtype: a Deferred which fires a set of L{str}
+ """
+
+ log.debug("_podDelegates for: {} and read-write = {} and expanded = {}".format(delegator.uid, readWrite, expanded,))
+ return txn.store().conduit.send_get_delegates(txn, delegator, readWrite, expanded)
+
+
+ @inlineCallbacks
+ def _podDelegators(self, txn, delegate, readWrite):
+ """
+ Do a cross-pod request to get the delegators for this delegate. We need to iterate over all
+ other pod servers to get results from each one.
+
+ @param delegate: the delegate's directory record
+ @type delegate: L{IDirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ @return: the set of directory record uids
+ @rtype: a Deferred which fires a set of L{str}
+ """
+
+ log.debug("_podDelegators for: {} and read-write = {}".format(delegate.uid, readWrite,))
+ results = yield DeferredList([
+ txn.store().conduit.send_get_delegators(txn, server, delegate, readWrite) for
+ server in txn.directoryService().serversDB.allServersExceptThis()
+ ], consumeErrors=True)
+ delegators = set()
+ for result in results:
+ if result and result[0]:
+ delegators.update(result[1])
+ returnValue(delegators)
+
+Delegates = CachingDelegates()
Modified: CalendarServer/trunk/txdav/who/test/test_delegates.py
===================================================================
--- CalendarServer/trunk/txdav/who/test/test_delegates.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/who/test/test_delegates.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -18,10 +18,8 @@
Delegates implementation tests
"""
-from txdav.who.delegates import (
- addDelegate, removeDelegate, delegatesOf, delegatedTo,
- RecordType as DelegateRecordType
-)
+from txdav.common.datastore.sql import CommonStoreTransaction
+from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
from txdav.who.groups import GroupCacher
from twext.who.idirectory import RecordType
from twisted.internet.defer import inlineCallbacks
@@ -46,10 +44,10 @@
delegate2 = yield self.directory.recordWithUID(u"__cdaboo1__")
# Add 1 delegate
- yield addDelegate(txn, delegator, delegate1, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ yield Delegates.addDelegate(txn, delegator, delegate1, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals([u"__sagen1__"], [d.uid for d in delegates])
- delegators = (yield delegatedTo(txn, delegate1, True))
+ delegators = (yield Delegates.delegatedTo(txn, delegate1, True))
self.assertEquals([u"__wsanchez1__"], [d.uid for d in delegators])
yield txn.commit() # So delegateService will see the changes
@@ -97,27 +95,27 @@
)
# Add another delegate
- yield addDelegate(txn, delegator, delegate2, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ yield Delegates.addDelegate(txn, delegator, delegate2, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__"]),
set([d.uid for d in delegates])
)
- delegators = (yield delegatedTo(txn, delegate2, True))
+ delegators = (yield Delegates.delegatedTo(txn, delegate2, True))
self.assertEquals([u"__wsanchez1__"], [d.uid for d in delegators])
# Remove 1 delegate
- yield removeDelegate(txn, delegator, delegate1, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ yield Delegates.removeDelegate(txn, delegator, delegate1, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals([u"__cdaboo1__"], [d.uid for d in delegates])
- delegators = (yield delegatedTo(txn, delegate1, True))
+ delegators = (yield Delegates.delegatedTo(txn, delegate1, True))
self.assertEquals(0, len(delegators))
# Remove the other delegate
- yield removeDelegate(txn, delegator, delegate2, True)
- delegates = (yield delegatesOf(txn, delegator, True))
+ yield Delegates.removeDelegate(txn, delegator, delegate2, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals(0, len(delegates))
- delegators = (yield delegatedTo(txn, delegate2, True))
+ delegators = (yield Delegates.delegatedTo(txn, delegate2, True))
self.assertEquals(0, len(delegators))
yield txn.commit() # So delegateService will see the changes
@@ -131,7 +129,7 @@
# Verify the assignments were made
txn = self.store.newTransaction(label="test_directDelegation")
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__"]),
set([d.uid for d in delegates])
@@ -143,7 +141,7 @@
# Verify the assignments were made
txn = self.store.newTransaction(label="test_directDelegation")
- delegates = (yield delegatesOf(txn, delegator, True))
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True))
self.assertEquals(
set([u"__cdaboo1__"]),
set([d.uid for d in delegates])
@@ -161,31 +159,31 @@
group2 = yield self.directory.recordWithUID(u"__sub_group_1__")
# Add group delegate
- yield addDelegate(txn, delegator, group1, True)
+ yield Delegates.addDelegate(txn, delegator, group1, True)
# Passing expanded=False will return the group
- delegates = (yield delegatesOf(txn, delegator, True, expanded=False))
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=False))
self.assertEquals(1, len(delegates))
self.assertEquals(delegates[0].uid, u"__top_group_1__")
# Passing expanded=True will return not the group -- it only returns
# non-groups
- delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__", u"__glyph1__"]),
set([d.uid for d in delegates])
)
- delegators = (yield delegatedTo(txn, delegate1, True))
+ delegators = (yield Delegates.delegatedTo(txn, delegate1, True))
self.assertEquals([u"__wsanchez1__"], [d.uid for d in delegators])
# Verify we can ask for all delegated-to groups
- yield addDelegate(txn, delegator, group2, True)
+ yield Delegates.addDelegate(txn, delegator, group2, True)
groups = (yield txn.allGroupDelegates())
self.assertEquals(
set([u'__sub_group_1__', u'__top_group_1__']), set(groups)
)
# Delegate to a user who is already indirectly delegated-to
- yield addDelegate(txn, delegator, delegate1, True)
- delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
+ yield Delegates.addDelegate(txn, delegator, delegate1, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__", u"__glyph1__"]),
set([d.uid for d in delegates])
@@ -205,23 +203,23 @@
_ignore_numAdded, _ignore_numRemoved = (
yield self.groupCacher.synchronizeMembers(txn, groupID, newSet)
)
- delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__", u"__glyph1__", u"__dre1__"]),
set([d.uid for d in delegates])
)
# Remove delegate access from the top group
- yield removeDelegate(txn, delegator, group1, True)
- delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
+ yield Delegates.removeDelegate(txn, delegator, group1, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
set([u"__sagen1__", u"__cdaboo1__"]),
set([d.uid for d in delegates])
)
# Remove delegate access from the sub group
- yield removeDelegate(txn, delegator, group2, True)
- delegates = (yield delegatesOf(txn, delegator, True, expanded=True))
+ yield Delegates.removeDelegate(txn, delegator, group2, True)
+ delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
self.assertEquals(
set([u"__sagen1__"]),
set([d.uid for d in delegates])
@@ -240,11 +238,11 @@
delegate1 = yield self.directory.recordWithUID(u"__sagen1__")
txn = self.store.newTransaction(label="test_noDuplication")
- yield addDelegate(txn, delegator, delegate1, True)
+ yield Delegates.addDelegate(txn, delegator, delegate1, True)
yield txn.commit()
txn = self.store.newTransaction(label="test_noDuplication")
- yield addDelegate(txn, delegator, delegate1, True)
+ yield Delegates.addDelegate(txn, delegator, delegate1, True)
yield txn.commit()
txn = self.store.newTransaction(label="test_noDuplication")
@@ -262,11 +260,11 @@
group1 = yield self.directory.recordWithUID(u"__top_group_1__")
txn = self.store.newTransaction(label="test_noDuplication")
- yield addDelegate(txn, delegator, group1, True)
+ yield Delegates.addDelegate(txn, delegator, group1, True)
yield txn.commit()
txn = self.store.newTransaction(label="test_noDuplication")
- yield addDelegate(txn, delegator, group1, True)
+ yield Delegates.addDelegate(txn, delegator, group1, True)
yield txn.commit()
txn = self.store.newTransaction(label="test_noDuplication")
@@ -279,3 +277,572 @@
)
yield txn.commit()
self.assertEquals([["__top_group_1__"]], results)
+
+
+
+class DelegationCachingTest(StoreTestCase):
+
+ @inlineCallbacks
+ def setUp(self):
+ yield super(DelegationCachingTest, self).setUp()
+ self.store = self.storeUnderTest()
+ self.groupCacher = GroupCacher(self.directory)
+
+
+ @inlineCallbacks
+ def _memcacherMemberResults(self, delegate, readWrite, expanded, results):
+ delegateUIDs = yield Delegates._memcacher.getMembers(delegate.uid, readWrite, expanded)
+ self.assertEqual(
+ set(delegateUIDs) if delegateUIDs is not None else None,
+ set([delegate.uid for delegate in results]) if results is not None else None,
+ msg="uid:{}, rw={}, expanded={}".format(delegate.uid, readWrite, expanded)
+ )
+
+
+ @inlineCallbacks
+ def _memcacherAllMemberResults(self, delegate, results1, results2, results3, results4):
+ for readWrite, expanded, results in (
+ (True, False, results1),
+ (True, True, results2),
+ (False, False, results3),
+ (False, True, results4),
+ ):
+ yield self._memcacherMemberResults(delegate, readWrite, expanded, results)
+
+
+ @inlineCallbacks
+ def _memcacherMembershipResults(self, delegate, readWrite, results):
+ delegatorUIDs = yield Delegates._memcacher.getMemberships(delegate.uid, readWrite)
+ self.assertEqual(
+ set(delegatorUIDs) if delegatorUIDs is not None else None,
+ set([delegator.uid for delegator in results]) if results is not None else None,
+ msg="uid:{}, rw={}".format(delegate.uid, readWrite)
+ )
+
+
+ @inlineCallbacks
+ def _memcacherAllMembershipResults(self, delegate, results1, results2):
+ for readWrite, results in (
+ (True, results1),
+ (False, results2),
+ ):
+ yield self._memcacherMembershipResults(delegate, readWrite, results)
+
+
+ @inlineCallbacks
+ def _delegatesOfResults(self, delegator, readWrite, expanded, results):
+ delegates = (yield Delegates.delegatesOf(self.transactionUnderTest(), delegator, readWrite, expanded))
+ self.assertEquals(
+ set([d.uid for d in delegates]),
+ set([delegate.uid for delegate in results]),
+ msg="uid:{}, rw={}, expanded={}".format(delegator.uid, readWrite, expanded)
+ )
+
+
+ @inlineCallbacks
+ def _delegatesOfAllResults(self, delegator, results1, results2, results3, results4):
+ for readWrite, expanded, results in (
+ (True, False, results1),
+ (True, True, results2),
+ (False, False, results3),
+ (False, True, results4),
+ ):
+ yield self._delegatesOfResults(delegator, readWrite, expanded, results)
+
+
+ @inlineCallbacks
+ def _delegatedToResults(self, delegate, readWrite, results):
+ delegators = (yield Delegates.delegatedTo(self.transactionUnderTest(), delegate, readWrite))
+ self.assertEquals(
+ set([d.uid for d in delegators]),
+ set([delegator.uid for delegator in results]),
+ msg="uid:{}, rw={}".format(delegate.uid, readWrite)
+ )
+
+
+ @inlineCallbacks
+ def _delegatedToAllResults(self, delegator, results1, results2):
+ for readWrite, results in (
+ (True, results1),
+ (False, results2),
+ ):
+ yield self._delegatedToResults(delegator, readWrite, results)
+
+
+ @inlineCallbacks
+ def test_cacheUsed(self):
+
+ yield Delegates._memcacher.flushAll()
+
+ delegator = yield self.directory.recordWithUID(u"__wsanchez1__")
+ delegate1 = yield self.directory.recordWithUID(u"__sagen1__")
+
+ # Patch transaction so we can monitor whether cache is being used
+ original_delegates = CommonStoreTransaction.delegates
+ delegates_query = [0]
+ def _delegates(self, delegator, readWrite, expanded=False):
+ delegates_query[0] += 1
+ return original_delegates(self, delegator, readWrite, expanded)
+ self.patch(CommonStoreTransaction, "delegates", _delegates)
+
+ original_delegators = CommonStoreTransaction.delegators
+ delegators_query = [0]
+ def _delegators(self, delegate, readWrite):
+ delegators_query[0] += 1
+ return original_delegators(self, delegate, readWrite)
+ self.patch(CommonStoreTransaction, "delegators", _delegators)
+
+ # Not used
+ yield Delegates.delegatesOf(self.transactionUnderTest(), delegator, True, False)
+ self.assertEqual(delegates_query[0], 1)
+
+ # Used
+ yield Delegates.delegatesOf(self.transactionUnderTest(), delegator, True, False)
+ self.assertEqual(delegates_query[0], 1)
+
+ # Not used
+ yield Delegates.delegatesOf(self.transactionUnderTest(), delegator, False, False)
+ self.assertEqual(delegates_query[0], 2)
+
+ # Used
+ yield Delegates.delegatesOf(self.transactionUnderTest(), delegator, False, False)
+ self.assertEqual(delegates_query[0], 2)
+
+ # Not used
+ yield Delegates.delegatedTo(self.transactionUnderTest(), delegate1, True)
+ self.assertEqual(delegators_query[0], 1)
+
+ # Used
+ yield Delegates.delegatedTo(self.transactionUnderTest(), delegate1, True)
+ self.assertEqual(delegators_query[0], 1)
+
+ # Not used
+ yield Delegates.delegatedTo(self.transactionUnderTest(), delegate1, False)
+ self.assertEqual(delegators_query[0], 2)
+
+ # Used
+ yield Delegates.delegatedTo(self.transactionUnderTest(), delegate1, False)
+ self.assertEqual(delegators_query[0], 2)
+
+
+ @inlineCallbacks
+ def test_addRemoveDelegation(self):
+
+ yield Delegates._memcacher.flushAll()
+
+ delegator = yield self.directory.recordWithUID(u"__wsanchez1__")
+ delegate1 = yield self.directory.recordWithUID(u"__sagen1__")
+ delegate2 = yield self.directory.recordWithUID(u"__cdaboo1__")
+
+ # Add delegate
+ yield Delegates.addDelegate(self.transactionUnderTest(), delegator, delegate1, True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, [delegate1], None, None)
+ yield self._memcacherAllMemberResults(delegate1, None, None, None, None)
+ yield self._memcacherAllMemberResults(delegate2, None, None, None, None)
+ yield self._memcacherAllMembershipResults(delegator, None, None)
+ yield self._memcacherAllMembershipResults(delegate1, None, None)
+ yield self._memcacherAllMembershipResults(delegate2, None, None)
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(
+ delegator,
+ [delegate1], [delegate1], [], [],
+ )
+
+ yield self._delegatesOfAllResults(
+ delegate1,
+ [], [], [], [],
+ )
+
+ yield self._delegatesOfAllResults(
+ delegate2,
+ [], [], [], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegator,
+ [], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegate1,
+ [delegator], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegate2,
+ [], [],
+ )
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [delegate1], [delegate1], [], [])
+ yield self._memcacherAllMemberResults(delegate1, [], [], [], [])
+ yield self._memcacherAllMemberResults(delegate2, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegate1, [delegator], [])
+ yield self._memcacherAllMembershipResults(delegate2, [], [])
+
+ # Remove delegate
+ yield Delegates.removeDelegate(self.transactionUnderTest(), delegator, delegate1, True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, [], [], [])
+ yield self._memcacherAllMemberResults(delegate1, [], [], [], [])
+ yield self._memcacherAllMemberResults(delegate2, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegate1, None, [])
+ yield self._memcacherAllMembershipResults(delegate2, [], [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(
+ delegator,
+ [], [], [], [],
+ )
+
+ yield self._delegatesOfAllResults(
+ delegate1,
+ [], [], [], [],
+ )
+
+ yield self._delegatesOfAllResults(
+ delegate2,
+ [], [], [], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegator,
+ [], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegate1,
+ [], [],
+ )
+
+ yield self._delegatedToAllResults(
+ delegate2,
+ [], [],
+ )
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [], [], [], [])
+ yield self._memcacherAllMemberResults(delegate1, [], [], [], [])
+ yield self._memcacherAllMemberResults(delegate2, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegate1, [], [])
+ yield self._memcacherAllMembershipResults(delegate2, [], [])
+
+
+ @inlineCallbacks
+ def test_setDelegation(self):
+
+ yield Delegates._memcacher.flushAll()
+
+ delegator = yield self.directory.recordWithUID(u"__wsanchez1__")
+ delegates = [
+ (yield self.directory.recordWithUID(u"__sagen1__")),
+ (yield self.directory.recordWithUID(u"__cdaboo1__")),
+ (yield self.directory.recordWithUID(u"__dre1__")),
+ ]
+
+ # Add delegates
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [delegates[0], delegates[1]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, [delegates[0], delegates[1]], None, None)
+ yield self._memcacherAllMembershipResults(delegator, None, None)
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, None, None, None, None)
+ yield self._memcacherAllMembershipResults(delegate, None, None)
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [delegates[0], delegates[1]], [delegates[0], delegates[1]], [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [delegates[0], delegates[1]], [delegates[0], delegates[1]], [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+
+ # Remove delegate
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [delegates[1], delegates[2]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, [delegates[1], delegates[2]], [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], None, [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], None, [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [delegates[1], delegates[2]], [delegates[1], delegates[2]], [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [delegates[1], delegates[2]], [delegates[1], delegates[2]], [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+
+ # Add delegate with other mode
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [delegates[0]], False)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, [delegates[1], delegates[2]], [delegates[1], delegates[2]], None, [delegates[0]])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [], None)
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [delegates[1], delegates[2]], [delegates[1], delegates[2]], [delegates[0]], [delegates[0]])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [], [delegator])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [delegates[1], delegates[2]], [delegates[1], delegates[2]], [delegates[0]], [delegates[0]])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [], [delegator])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+
+
+ @inlineCallbacks
+ def test_setGroupDelegation(self):
+
+ yield Delegates._memcacher.flushAll()
+
+ delegator = yield self.directory.recordWithUID(u"__wsanchez1__")
+ delegates = [
+ (yield self.directory.recordWithUID(u"__sagen1__")),
+ (yield self.directory.recordWithUID(u"__cdaboo1__")),
+ (yield self.directory.recordWithUID(u"__glyph1__")),
+ (yield self.directory.recordWithUID(u"__dre1__")),
+ ]
+ group1 = yield self.directory.recordWithUID(u"__top_group_1__")
+ group2 = yield self.directory.recordWithUID(u"__sub_group_1__")
+ yield self.transactionUnderTest().groupByUID(u"__top_group_1__")
+ yield self.transactionUnderTest().groupByUID(u"__sub_group_1__")
+ yield self.commit()
+
+ def delegateMatch(*args):
+ return [delegates[i] for i in args]
+
+ # Add group delegate
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [group1], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, delegateMatch(0, 1, 2), None, None)
+ yield self._memcacherAllMembershipResults(delegator, None, None)
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, None, None, None, None)
+ yield self._memcacherAllMembershipResults(delegate, None, None)
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [group1], delegateMatch(0, 1, 2), [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [delegator], [])
+ yield self._delegatedToAllResults(delegates[3], [], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [group1], delegateMatch(0, 1, 2), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [], [])
+
+ # Add individual delegate
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [group1, delegates[3]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, delegateMatch(0, 1, 2, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[3], None, [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [group1, delegates[3]], delegateMatch(0, 1, 2, 3), [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [delegator], [])
+ yield self._delegatedToAllResults(delegates[3], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [group1, delegates[3]], delegateMatch(0, 1, 2, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Switch to sub-group
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [group2, delegates[3]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], None, [])
+ yield self._memcacherAllMembershipResults(delegates[1], None, [])
+ yield self._memcacherAllMembershipResults(delegates[2], None, [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [group2, delegates[3]], delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [], [])
+ yield self._delegatedToAllResults(delegates[3], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [group2, delegates[3]], delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Add member of existing group
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [group2, delegates[0], delegates[3]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [group2, delegates[0], delegates[3]], delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [delegator], [])
+ yield self._delegatedToAllResults(delegates[2], [], [])
+ yield self._delegatedToAllResults(delegates[3], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [group2, delegates[0], delegates[3]], delegateMatch(0, 1, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Remove group
+ yield Delegates.setDelegates(self.transactionUnderTest(), delegator, [delegates[0], delegates[3]], True)
+ yield self.commit()
+
+ # Some cache entries invalid
+ yield self._memcacherAllMemberResults(delegator, None, delegateMatch(0, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], None, [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
+
+ # Read the delegate information twice - first time should be without cache, second with
+ for _ignore in range(2):
+ yield self._delegatesOfAllResults(delegator, [delegates[0], delegates[3]], delegateMatch(0, 3), [], [])
+ for delegate in delegates:
+ yield self._delegatesOfAllResults(delegate, [], [], [], [])
+
+ yield self._delegatedToAllResults(delegator, [], [])
+ yield self._delegatedToAllResults(delegates[0], [delegator], [])
+ yield self._delegatedToAllResults(delegates[1], [], [])
+ yield self._delegatedToAllResults(delegates[2], [], [])
+ yield self._delegatedToAllResults(delegates[3], [delegator], [])
+
+ # Check cache
+ yield self._memcacherAllMemberResults(delegator, [delegates[0], delegates[3]], delegateMatch(0, 3), [], [])
+ for delegate in delegates:
+ yield self._memcacherAllMemberResults(delegate, [], [], [], [])
+ yield self._memcacherAllMembershipResults(delegator, [], [])
+ yield self._memcacherAllMembershipResults(delegates[0], [delegator], [])
+ yield self._memcacherAllMembershipResults(delegates[1], [], [])
+ yield self._memcacherAllMembershipResults(delegates[2], [], [])
+ yield self._memcacherAllMembershipResults(delegates[3], [delegator], [])
Modified: CalendarServer/trunk/txdav/who/test/test_groups.py
===================================================================
--- CalendarServer/trunk/txdav/who/test/test_groups.py 2014-10-24 17:34:33 UTC (rev 14102)
+++ CalendarServer/trunk/txdav/who/test/test_groups.py 2014-10-24 21:12:40 UTC (rev 14103)
@@ -277,7 +277,6 @@
delegates,
set(
[
- u"__wsanchez1__",
u"__sagen1__",
u"__cdaboo1__",
u"__glyph1__"
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20141024/7a3759be/attachment-0001.html>
More information about the calendarserver-changes
mailing list